Initial commit with translated description

This commit is contained in:
2026-03-29 08:33:08 +08:00
commit a9c97c9562
98 changed files with 23152 additions and 0 deletions

13
src/canary.js Normal file
View File

@@ -0,0 +1,13 @@
// Canary script: run in a forked child process to verify index.js loads
// without crashing. Exit 0 = safe, non-zero = broken.
//
// This is the last safety net before solidify commits an evolution.
// If a patch broke index.js (syntax error, missing require, etc.),
// the canary catches it BEFORE the daemon restarts with broken code.
try {
require('../index.js');
process.exit(0);
} catch (e) {
process.stderr.write(String(e.message || e).slice(0, 500));
process.exit(1);
}

2043
src/evolve.js Normal file

File diff suppressed because it is too large Load Diff

173
src/gep/a2a.js Normal file
View File

@@ -0,0 +1,173 @@
const fs = require('fs');
const { readAllEvents } = require('./assetStore');
const { computeAssetId, SCHEMA_VERSION } = require('./contentHash');
const { unwrapAssetFromMessage } = require('./a2aProtocol');
function nowIso() { return new Date().toISOString(); }
function isAllowedA2AAsset(obj) {
if (!obj || typeof obj !== 'object') return false;
var t = obj.type;
return t === 'Gene' || t === 'Capsule' || t === 'EvolutionEvent';
}
function safeNumber(x, fallback) {
if (fallback === undefined) fallback = null;
var n = Number(x);
return Number.isFinite(n) ? n : fallback;
}
function getBlastRadiusLimits() {
var maxFiles = safeNumber(process.env.A2A_MAX_FILES, 5);
var maxLines = safeNumber(process.env.A2A_MAX_LINES, 200);
return {
maxFiles: Number.isFinite(maxFiles) ? maxFiles : 5,
maxLines: Number.isFinite(maxLines) ? maxLines : 200,
};
}
function isBlastRadiusSafe(blastRadius) {
var lim = getBlastRadiusLimits();
var files = blastRadius && Number.isFinite(Number(blastRadius.files)) ? Math.max(0, Number(blastRadius.files)) : 0;
var lines = blastRadius && Number.isFinite(Number(blastRadius.lines)) ? Math.max(0, Number(blastRadius.lines)) : 0;
return files <= lim.maxFiles && lines <= lim.maxLines;
}
function clamp01(n) {
var x = Number(n);
if (!Number.isFinite(x)) return 0;
return Math.max(0, Math.min(1, x));
}
function lowerConfidence(asset, opts) {
if (!opts) opts = {};
var factor = Number.isFinite(Number(opts.factor)) ? Number(opts.factor) : 0.6;
var receivedFrom = opts.source || 'external';
var receivedAt = opts.received_at || nowIso();
var cloned = JSON.parse(JSON.stringify(asset || {}));
if (!isAllowedA2AAsset(cloned)) return null;
if (cloned.type === 'Capsule') {
if (typeof cloned.confidence === 'number') cloned.confidence = clamp01(cloned.confidence * factor);
else if (cloned.confidence != null) cloned.confidence = clamp01(Number(cloned.confidence) * factor);
}
if (!cloned.a2a || typeof cloned.a2a !== 'object') cloned.a2a = {};
cloned.a2a.status = 'external_candidate';
cloned.a2a.source = receivedFrom;
cloned.a2a.received_at = receivedAt;
cloned.a2a.confidence_factor = factor;
if (!cloned.schema_version) cloned.schema_version = SCHEMA_VERSION;
if (!cloned.asset_id) { try { cloned.asset_id = computeAssetId(cloned); } catch (e) {} }
return cloned;
}
function readEvolutionEvents() {
var events = readAllEvents();
return Array.isArray(events) ? events.filter(function (e) { return e && e.type === 'EvolutionEvent'; }) : [];
}
function normalizeEventsList(events) {
return Array.isArray(events) ? events : [];
}
function computeCapsuleSuccessStreak(params) {
var capsuleId = params.capsuleId;
var events = params.events;
var id = capsuleId ? String(capsuleId) : '';
if (!id) return 0;
var list = normalizeEventsList(events || readEvolutionEvents());
var streak = 0;
for (var i = list.length - 1; i >= 0; i--) {
var ev = list[i];
if (!ev || ev.type !== 'EvolutionEvent') continue;
if (!ev.capsule_id || String(ev.capsule_id) !== id) continue;
var st = ev.outcome && ev.outcome.status ? String(ev.outcome.status) : 'unknown';
if (st === 'success') streak += 1; else break;
}
return streak;
}
function isCapsuleBroadcastEligible(capsule, opts) {
if (!opts) opts = {};
if (!capsule || capsule.type !== 'Capsule') return false;
var score = capsule.outcome && capsule.outcome.score != null ? safeNumber(capsule.outcome.score, null) : null;
if (score == null || score < 0.7) return false;
var blast = capsule.blast_radius || (capsule.outcome && capsule.outcome.blast_radius) || null;
if (!isBlastRadiusSafe(blast)) return false;
var events = Array.isArray(opts.events) ? opts.events : readEvolutionEvents();
var streak = computeCapsuleSuccessStreak({ capsuleId: capsule.id, events: events });
if (streak < 2) return false;
return true;
}
function exportEligibleCapsules(params) {
if (!params) params = {};
var list = Array.isArray(params.capsules) ? params.capsules : [];
var evs = Array.isArray(params.events) ? params.events : readEvolutionEvents();
var eligible = list.filter(function (c) { return isCapsuleBroadcastEligible(c, { events: evs }); });
for (var i = 0; i < eligible.length; i++) {
var c = eligible[i];
if (!c.schema_version) c.schema_version = SCHEMA_VERSION;
if (!c.asset_id) { try { c.asset_id = computeAssetId(c); } catch (e) {} }
}
return eligible;
}
function isGeneBroadcastEligible(gene) {
if (!gene || gene.type !== 'Gene') return false;
if (!gene.id || typeof gene.id !== 'string') return false;
if (!Array.isArray(gene.strategy) || gene.strategy.length === 0) return false;
if (!Array.isArray(gene.validation) || gene.validation.length === 0) return false;
return true;
}
function exportEligibleGenes(params) {
if (!params) params = {};
var list = Array.isArray(params.genes) ? params.genes : [];
var eligible = list.filter(function (g) { return isGeneBroadcastEligible(g); });
for (var i = 0; i < eligible.length; i++) {
var g = eligible[i];
if (!g.schema_version) g.schema_version = SCHEMA_VERSION;
if (!g.asset_id) { try { g.asset_id = computeAssetId(g); } catch (e) {} }
}
return eligible;
}
function parseA2AInput(text) {
var raw = String(text || '').trim();
if (!raw) return [];
try {
var maybe = JSON.parse(raw);
if (Array.isArray(maybe)) {
return maybe.map(function (item) { return unwrapAssetFromMessage(item) || item; }).filter(Boolean);
}
if (maybe && typeof maybe === 'object') {
var unwrapped = unwrapAssetFromMessage(maybe);
return unwrapped ? [unwrapped] : [maybe];
}
} catch (e) {}
var lines = raw.split('\n').map(function (l) { return l.trim(); }).filter(Boolean);
var items = [];
for (var i = 0; i < lines.length; i++) {
try {
var obj = JSON.parse(lines[i]);
var uw = unwrapAssetFromMessage(obj);
items.push(uw || obj);
} catch (e) { continue; }
}
return items;
}
function readTextIfExists(filePath) {
try {
if (!filePath) return '';
if (!fs.existsSync(filePath)) return '';
return fs.readFileSync(filePath, 'utf8');
} catch { return ''; }
}
module.exports = {
isAllowedA2AAsset, lowerConfidence, isBlastRadiusSafe,
computeCapsuleSuccessStreak, isCapsuleBroadcastEligible,
exportEligibleCapsules, isGeneBroadcastEligible,
exportEligibleGenes, parseA2AInput, readTextIfExists,
};

887
src/gep/a2aProtocol.js Normal file
View File

@@ -0,0 +1,887 @@
// GEP A2A Protocol - Standard message types and pluggable transport layer.
//
// Protocol messages:
// hello - capability advertisement and node discovery
// publish - broadcast an eligible asset (Capsule/Gene)
// fetch - request a specific asset by id or content hash
// report - send a ValidationReport for a received asset
// decision - accept/reject/quarantine decision on a received asset
// revoke - withdraw a previously published asset
//
// Transport interface:
// send(message, opts) - send a protocol message
// receive(opts) - receive pending messages
// list(opts) - list available message files/streams
//
// Default transport: FileTransport (reads/writes JSONL to a2a/ directory).
const fs = require('fs');
const path = require('path');
const crypto = require('crypto');
const { getGepAssetsDir, getEvolverLogPath } = require('./paths');
const { computeAssetId } = require('./contentHash');
const { captureEnvFingerprint } = require('./envFingerprint');
const os = require('os');
const { getDeviceId } = require('./deviceId');
const PROTOCOL_NAME = 'gep-a2a';
const PROTOCOL_VERSION = '1.0.0';
const VALID_MESSAGE_TYPES = ['hello', 'publish', 'fetch', 'report', 'decision', 'revoke'];
const NODE_ID_RE = /^node_[a-f0-9]{12}$/;
const NODE_ID_DIR = path.join(os.homedir(), '.evomap');
const NODE_ID_FILE = path.join(NODE_ID_DIR, 'node_id');
const LOCAL_NODE_ID_FILE = path.resolve(__dirname, '..', '..', '.evomap_node_id');
let _cachedNodeId = null;
function _loadPersistedNodeId() {
try {
if (fs.existsSync(NODE_ID_FILE)) {
const id = fs.readFileSync(NODE_ID_FILE, 'utf8').trim();
if (id && NODE_ID_RE.test(id)) return id;
}
} catch {}
try {
if (fs.existsSync(LOCAL_NODE_ID_FILE)) {
const id = fs.readFileSync(LOCAL_NODE_ID_FILE, 'utf8').trim();
if (id && NODE_ID_RE.test(id)) return id;
}
} catch {}
return null;
}
function _persistNodeId(id) {
try {
if (!fs.existsSync(NODE_ID_DIR)) {
fs.mkdirSync(NODE_ID_DIR, { recursive: true, mode: 0o700 });
}
fs.writeFileSync(NODE_ID_FILE, id, { encoding: 'utf8', mode: 0o600 });
return;
} catch {}
try {
fs.writeFileSync(LOCAL_NODE_ID_FILE, id, { encoding: 'utf8', mode: 0o600 });
return;
} catch {}
}
function generateMessageId() {
return 'msg_' + Date.now() + '_' + crypto.randomBytes(4).toString('hex');
}
function getNodeId() {
if (_cachedNodeId) return _cachedNodeId;
if (process.env.A2A_NODE_ID) {
_cachedNodeId = String(process.env.A2A_NODE_ID);
return _cachedNodeId;
}
const persisted = _loadPersistedNodeId();
if (persisted) {
_cachedNodeId = persisted;
return _cachedNodeId;
}
console.warn('[a2aProtocol] A2A_NODE_ID is not set. Computing node ID from device fingerprint. ' +
'This ID may change across machines or environments. ' +
'Set A2A_NODE_ID after registering at https://evomap.ai to use a stable identity.');
const deviceId = getDeviceId();
const agentName = process.env.AGENT_NAME || 'default';
const raw = deviceId + '|' + agentName + '|' + process.cwd();
const computed = 'node_' + crypto.createHash('sha256').update(raw).digest('hex').slice(0, 12);
_persistNodeId(computed);
_cachedNodeId = computed;
return _cachedNodeId;
}
// --- Base message builder ---
function buildMessage(params) {
if (!params || typeof params !== 'object') {
throw new Error('buildMessage requires a params object');
}
const messageType = params.messageType;
const payload = params.payload;
const senderId = params.senderId;
if (!VALID_MESSAGE_TYPES.includes(messageType)) {
throw new Error('Invalid message type: ' + messageType + '. Valid: ' + VALID_MESSAGE_TYPES.join(', '));
}
return {
protocol: PROTOCOL_NAME,
protocol_version: PROTOCOL_VERSION,
message_type: messageType,
message_id: generateMessageId(),
sender_id: senderId || getNodeId(),
timestamp: new Date().toISOString(),
payload: payload || {},
};
}
// --- Typed message builders ---
function buildHello(opts) {
const o = opts || {};
return buildMessage({
messageType: 'hello',
senderId: o.nodeId,
payload: {
capabilities: o.capabilities || {},
gene_count: typeof o.geneCount === 'number' ? o.geneCount : null,
capsule_count: typeof o.capsuleCount === 'number' ? o.capsuleCount : null,
env_fingerprint: captureEnvFingerprint(),
},
});
}
function buildPublish(opts) {
const o = opts || {};
const asset = o.asset;
if (!asset || !asset.type || !asset.id) {
throw new Error('publish: asset must have type and id');
}
// Generate signature: HMAC-SHA256 of asset_id with node secret
const assetIdVal = asset.asset_id || computeAssetId(asset);
const nodeSecret = process.env.A2A_NODE_SECRET || getNodeId();
const signature = crypto.createHmac('sha256', nodeSecret).update(assetIdVal).digest('hex');
return buildMessage({
messageType: 'publish',
senderId: o.nodeId,
payload: {
asset_type: asset.type,
asset_id: assetIdVal,
local_id: asset.id,
asset: asset,
signature: signature,
},
});
}
// Build a bundle publish message containing Gene + Capsule (+ optional EvolutionEvent).
// Hub requires payload.assets = [Gene, Capsule] since bundle enforcement was added.
function buildPublishBundle(opts) {
const o = opts || {};
const gene = o.gene;
const capsule = o.capsule;
const event = o.event || null;
if (!gene || gene.type !== 'Gene' || !gene.id) {
throw new Error('publishBundle: gene must be a valid Gene with type and id');
}
if (!capsule || capsule.type !== 'Capsule' || !capsule.id) {
throw new Error('publishBundle: capsule must be a valid Capsule with type and id');
}
if (o.modelName && typeof o.modelName === 'string') {
gene.model_name = o.modelName;
capsule.model_name = o.modelName;
}
gene.asset_id = computeAssetId(gene);
capsule.asset_id = computeAssetId(capsule);
const geneAssetId = gene.asset_id;
const capsuleAssetId = capsule.asset_id;
const nodeSecret = process.env.A2A_NODE_SECRET || getNodeId();
const signatureInput = [geneAssetId, capsuleAssetId].sort().join('|');
const signature = crypto.createHmac('sha256', nodeSecret).update(signatureInput).digest('hex');
const assets = [gene, capsule];
if (event && event.type === 'EvolutionEvent') {
if (o.modelName && typeof o.modelName === 'string') {
event.model_name = o.modelName;
}
event.asset_id = computeAssetId(event);
assets.push(event);
}
const publishPayload = {
assets: assets,
signature: signature,
};
if (o.chainId && typeof o.chainId === 'string') {
publishPayload.chain_id = o.chainId;
}
return buildMessage({
messageType: 'publish',
senderId: o.nodeId,
payload: publishPayload,
});
}
function buildFetch(opts) {
const o = opts || {};
const fetchPayload = {
asset_type: o.assetType || null,
local_id: o.localId || null,
content_hash: o.contentHash || null,
};
if (Array.isArray(o.signals) && o.signals.length > 0) {
fetchPayload.signals = o.signals;
}
if (o.searchOnly === true) {
fetchPayload.search_only = true;
}
if (Array.isArray(o.assetIds) && o.assetIds.length > 0) {
fetchPayload.asset_ids = o.assetIds;
}
return buildMessage({
messageType: 'fetch',
senderId: o.nodeId,
payload: fetchPayload,
});
}
function buildReport(opts) {
const o = opts || {};
return buildMessage({
messageType: 'report',
senderId: o.nodeId,
payload: {
target_asset_id: o.assetId || null,
target_local_id: o.localId || null,
validation_report: o.validationReport || null,
},
});
}
function buildDecision(opts) {
const o = opts || {};
const validDecisions = ['accept', 'reject', 'quarantine'];
if (!validDecisions.includes(o.decision)) {
throw new Error('decision must be one of: ' + validDecisions.join(', '));
}
return buildMessage({
messageType: 'decision',
senderId: o.nodeId,
payload: {
target_asset_id: o.assetId || null,
target_local_id: o.localId || null,
decision: o.decision,
reason: o.reason || null,
},
});
}
function buildRevoke(opts) {
const o = opts || {};
return buildMessage({
messageType: 'revoke',
senderId: o.nodeId,
payload: {
target_asset_id: o.assetId || null,
target_local_id: o.localId || null,
reason: o.reason || null,
},
});
}
// --- Validation ---
function isValidProtocolMessage(msg) {
if (!msg || typeof msg !== 'object') return false;
if (msg.protocol !== PROTOCOL_NAME) return false;
if (!msg.message_type || !VALID_MESSAGE_TYPES.includes(msg.message_type)) return false;
if (!msg.message_id || typeof msg.message_id !== 'string') return false;
if (!msg.timestamp || typeof msg.timestamp !== 'string') return false;
return true;
}
// Try to extract a raw asset from either a protocol message or a plain asset object.
// This enables backward-compatible ingestion of both old-format and new-format payloads.
function unwrapAssetFromMessage(input) {
if (!input || typeof input !== 'object') return null;
// If it is a protocol message with a publish payload, extract the asset.
if (input.protocol === PROTOCOL_NAME && input.message_type === 'publish') {
const p = input.payload;
if (p && p.asset && typeof p.asset === 'object') return p.asset;
return null;
}
// If it is a plain asset (Gene/Capsule/EvolutionEvent), return as-is.
if (input.type === 'Gene' || input.type === 'Capsule' || input.type === 'EvolutionEvent') {
return input;
}
return null;
}
// --- File Transport ---
function ensureDir(dir) {
try {
if (!fs.existsSync(dir)) fs.mkdirSync(dir, { recursive: true });
} catch (e) {
console.warn('[a2aProtocol] ensureDir failed:', dir, e && e.message || e);
}
}
function defaultA2ADir() {
return process.env.A2A_DIR || path.join(getGepAssetsDir(), 'a2a');
}
function fileTransportSend(message, opts) {
const dir = (opts && opts.dir) || defaultA2ADir();
const subdir = path.join(dir, 'outbox');
ensureDir(subdir);
const filePath = path.join(subdir, message.message_type + '.jsonl');
fs.appendFileSync(filePath, JSON.stringify(message) + '\n', 'utf8');
return { ok: true, path: filePath };
}
function fileTransportReceive(opts) {
const dir = (opts && opts.dir) || defaultA2ADir();
const subdir = path.join(dir, 'inbox');
if (!fs.existsSync(subdir)) return [];
const files = fs.readdirSync(subdir).filter(function (f) { return f.endsWith('.jsonl'); });
const messages = [];
for (let fi = 0; fi < files.length; fi++) {
try {
const raw = fs.readFileSync(path.join(subdir, files[fi]), 'utf8');
const lines = raw.split('\n').map(function (l) { return l.trim(); }).filter(Boolean);
for (let li = 0; li < lines.length; li++) {
try {
const msg = JSON.parse(lines[li]);
if (msg && msg.protocol === PROTOCOL_NAME) messages.push(msg);
} catch (e) {
console.warn('[a2aProtocol] Malformed JSON line in inbox file ' + files[fi] + ' (line ' + (li + 1) + '):', e && e.message || e);
}
}
} catch (e) {
console.warn('[a2aProtocol] Failed to read inbox file:', files[fi], e && e.message || e);
}
}
return messages;
}
function fileTransportList(opts) {
const dir = (opts && opts.dir) || defaultA2ADir();
const subdir = path.join(dir, 'outbox');
if (!fs.existsSync(subdir)) return [];
return fs.readdirSync(subdir).filter(function (f) { return f.endsWith('.jsonl'); });
}
// --- HTTP Transport (connects to evomap-hub) ---
function httpTransportSend(message, opts) {
const hubUrl = (opts && opts.hubUrl) || process.env.A2A_HUB_URL;
if (!hubUrl) return { ok: false, error: 'A2A_HUB_URL not set' };
const endpoint = hubUrl.replace(/\/+$/, '') + '/a2a/' + message.message_type;
const body = JSON.stringify(message);
return fetch(endpoint, {
method: 'POST',
headers: buildHubHeaders(),
body: body,
})
.then(function (res) { return res.json(); })
.then(function (data) { return { ok: true, response: data }; })
.catch(function (err) { return { ok: false, error: err.message }; });
}
function httpTransportReceive(opts) {
const hubUrl = (opts && opts.hubUrl) || process.env.A2A_HUB_URL;
if (!hubUrl) return Promise.resolve([]);
const assetType = (opts && opts.assetType) || null;
const signals = (opts && Array.isArray(opts.signals)) ? opts.signals : null;
const fetchMsg = buildFetch({ assetType: assetType, signals: signals });
const endpoint = hubUrl.replace(/\/+$/, '') + '/a2a/fetch';
return fetch(endpoint, {
method: 'POST',
headers: buildHubHeaders(),
body: JSON.stringify(fetchMsg),
})
.then(function (res) { return res.json(); })
.then(function (data) {
if (data && data.payload && Array.isArray(data.payload.results)) {
return data.payload.results;
}
return [];
})
.catch(function (err) {
console.warn('[a2aProtocol] httpTransportReceive failed:', err && err.message || err);
return [];
});
}
function httpTransportList() {
return ['http'];
}
// --- Heartbeat ---
let _heartbeatTimer = null;
let _heartbeatStartedAt = null;
let _heartbeatConsecutiveFailures = 0;
let _heartbeatTotalSent = 0;
let _heartbeatTotalFailed = 0;
let _heartbeatFpSent = false;
let _latestAvailableWork = [];
let _latestOverdueTasks = [];
let _latestSkillStoreHint = null;
let _latestNoveltyHint = null;
let _latestCapabilityGaps = [];
let _pendingCommitmentUpdates = [];
let _latestHubEvents = [];
let _pollInflight = false;
let _cachedHubNodeSecret = null;
let _cachedHubNodeSecretAt = 0;
const _SECRET_CACHE_TTL_MS = 60000;
let _heartbeatIntervalMs = 0;
let _heartbeatRunning = false;
const NODE_SECRET_FILE = path.join(NODE_ID_DIR, 'node_secret');
function _loadPersistedNodeSecret() {
try {
if (fs.existsSync(NODE_SECRET_FILE)) {
const s = fs.readFileSync(NODE_SECRET_FILE, 'utf8').trim();
if (s && /^[a-f0-9]{64}$/i.test(s)) return s;
}
} catch {}
return null;
}
function _persistNodeSecret(secret) {
try {
if (!fs.existsSync(NODE_ID_DIR)) {
fs.mkdirSync(NODE_ID_DIR, { recursive: true, mode: 0o700 });
}
fs.writeFileSync(NODE_SECRET_FILE, secret, { encoding: 'utf8', mode: 0o600 });
} catch (e) {
console.warn('[a2aProtocol] Failed to persist node secret:', e && e.message || e);
}
}
function getHubUrl() {
return process.env.A2A_HUB_URL || process.env.EVOMAP_HUB_URL || '';
}
function buildHubHeaders() {
const headers = { 'Content-Type': 'application/json' };
const secret = getHubNodeSecret();
if (secret) headers['Authorization'] = 'Bearer ' + secret;
return headers;
}
function sendHelloToHub() {
const hubUrl = getHubUrl();
if (!hubUrl) return Promise.resolve({ ok: false, error: 'no_hub_url' });
const endpoint = hubUrl.replace(/\/+$/, '') + '/a2a/hello';
const nodeId = getNodeId();
const msg = buildHello({ nodeId: nodeId, capabilities: {} });
msg.sender_id = nodeId;
return fetch(endpoint, {
method: 'POST',
headers: buildHubHeaders(),
body: JSON.stringify(msg),
signal: AbortSignal.timeout(15000),
})
.then(function (res) { return res.json(); })
.then(function (data) {
const secret = (data && data.payload && data.payload.node_secret)
|| (data && data.node_secret)
|| null;
if (secret && /^[a-f0-9]{64}$/i.test(secret)) {
_cachedHubNodeSecret = secret;
_cachedHubNodeSecretAt = Date.now();
_persistNodeSecret(secret);
}
return { ok: true, response: data };
})
.catch(function (err) { return { ok: false, error: err.message }; });
}
function getHubNodeSecret() {
if (process.env.A2A_NODE_SECRET) return process.env.A2A_NODE_SECRET;
const now = Date.now();
if (_cachedHubNodeSecret && (now - _cachedHubNodeSecretAt) < _SECRET_CACHE_TTL_MS) {
return _cachedHubNodeSecret;
}
const persisted = _loadPersistedNodeSecret();
if (persisted) {
_cachedHubNodeSecret = persisted;
_cachedHubNodeSecretAt = now;
return persisted;
}
if (process.env.A2A_HUB_TOKEN) return process.env.A2A_HUB_TOKEN;
return null;
}
function _scheduleNextHeartbeat(delayMs) {
if (!_heartbeatRunning) return;
if (_heartbeatTimer) clearTimeout(_heartbeatTimer);
const delay = delayMs || _heartbeatIntervalMs;
_heartbeatTimer = setTimeout(function () {
if (!_heartbeatRunning) return;
sendHeartbeat().catch(function (err) {
console.warn('[Heartbeat] Scheduled heartbeat failed:', err && err.message || err);
});
_scheduleNextHeartbeat();
}, delay);
if (_heartbeatTimer.unref) _heartbeatTimer.unref();
}
function sendHeartbeat() {
const hubUrl = getHubUrl();
if (!hubUrl) return Promise.resolve({ ok: false, error: 'no_hub_url' });
const endpoint = hubUrl.replace(/\/+$/, '') + '/a2a/heartbeat';
const nodeId = getNodeId();
const bodyObj = {
node_id: nodeId,
sender_id: nodeId,
version: PROTOCOL_VERSION,
uptime_ms: _heartbeatStartedAt ? Date.now() - _heartbeatStartedAt : 0,
timestamp: new Date().toISOString(),
};
const meta = {};
if (process.env.WORKER_ENABLED === '1') {
const domains = (process.env.WORKER_DOMAINS || '').split(',').map(function (s) { return s.trim(); }).filter(Boolean);
meta.worker_enabled = true;
meta.worker_domains = domains;
meta.max_load = Math.max(1, Number(process.env.WORKER_MAX_LOAD) || 5);
}
if (_pendingCommitmentUpdates.length > 0) {
meta.commitment_updates = _pendingCommitmentUpdates.splice(0);
}
if (!_heartbeatFpSent) {
try {
const fp = captureEnvFingerprint();
if (fp && fp.evolver_version) {
meta.env_fingerprint = fp;
_heartbeatFpSent = true;
}
} catch (e) {
console.warn('[a2aProtocol] Failed to capture env fingerprint:', e && e.message || e);
}
}
if (Object.keys(meta).length > 0) {
bodyObj.meta = meta;
}
const body = JSON.stringify(bodyObj);
_heartbeatTotalSent++;
return fetch(endpoint, {
method: 'POST',
headers: buildHubHeaders(),
body: body,
signal: AbortSignal.timeout(10000),
})
.then(function (res) { return res.json(); })
.then(function (data) {
if (data && (data.error === 'rate_limited' || data.status === 'rate_limited')) {
const retryMs = Number(data.retry_after_ms) || 0;
const policy = data.policy || {};
const windowMs = Number(policy.window_ms) || 0;
const backoff = retryMs > 0 ? retryMs + 5000 : (windowMs > 0 ? windowMs + 5000 : _heartbeatIntervalMs);
if (backoff > _heartbeatIntervalMs) {
console.warn('[Heartbeat] Rate limited by hub. Next attempt in ' + Math.round(backoff / 1000) + 's. ' +
'Consider increasing HEARTBEAT_INTERVAL_MS to >= ' + (windowMs || backoff) + 'ms.');
_scheduleNextHeartbeat(backoff);
}
return { ok: false, error: 'rate_limited', retryMs: backoff };
}
if (data && data.status === 'unknown_node') {
console.warn('[Heartbeat] Node not registered on hub. Sending hello to re-register...');
return sendHelloToHub().then(function (helloResult) {
if (helloResult.ok) {
console.log('[Heartbeat] Re-registered with hub successfully.');
_heartbeatConsecutiveFailures = 0;
} else {
console.warn('[Heartbeat] Re-registration failed: ' + (helloResult.error || 'unknown'));
}
return { ok: helloResult.ok, response: data, reregistered: helloResult.ok };
});
}
if (Array.isArray(data.available_work)) {
_latestAvailableWork = data.available_work;
}
if (Array.isArray(data.overdue_tasks) && data.overdue_tasks.length > 0) {
_latestOverdueTasks = data.overdue_tasks;
console.warn('[Commitment] ' + data.overdue_tasks.length + ' overdue task(s) detected via heartbeat.');
}
if (data.skill_store) {
_latestSkillStoreHint = data.skill_store;
if (data.skill_store.eligible && data.skill_store.published_skills === 0) {
console.log('[Skill Store] ' + data.skill_store.hint);
}
}
if (data.novelty && typeof data.novelty === 'object') {
_latestNoveltyHint = data.novelty;
}
if (Array.isArray(data.capability_gaps) && data.capability_gaps.length > 0) {
_latestCapabilityGaps = data.capability_gaps;
}
if (data.circle_experience && typeof data.circle_experience === 'object') {
console.log('[EvolutionCircle] Active circle: ' + (data.circle_experience.circle_id || '?') + ' (' + (data.circle_experience.member_count || 0) + ' members)');
}
if (data.has_pending_events) {
_fetchHubEvents().catch(function (err) {
console.warn('[Events] Poll failed:', err && err.message || err);
});
}
_heartbeatConsecutiveFailures = 0;
try {
const logPath = getEvolverLogPath();
fs.mkdirSync(path.dirname(logPath), { recursive: true });
const now = new Date();
try {
fs.utimesSync(logPath, now, now);
} catch (e) {
if (e && e.code === 'ENOENT') {
try {
const fd = fs.openSync(logPath, 'a');
fs.closeSync(fd);
fs.utimesSync(logPath, now, now);
} catch (innerErr) {
console.warn('[Heartbeat] Failed to create evolver_loop.log: ' + innerErr.message);
}
} else {
console.warn('[Heartbeat] Failed to touch evolver_loop.log: ' + e.message);
}
}
} catch (outerErr) {
console.warn('[Heartbeat] Failed to ensure evolver_loop.log: ' + outerErr.message);
}
return { ok: true, response: data };
})
.catch(function (err) {
_heartbeatConsecutiveFailures++;
_heartbeatTotalFailed++;
if (_heartbeatConsecutiveFailures === 3) {
console.warn('[Heartbeat] 3 consecutive failures. Network issue? Last error: ' + err.message);
} else if (_heartbeatConsecutiveFailures === 10) {
console.warn('[Heartbeat] 10 consecutive failures. Hub may be unreachable. (' + err.message + ')');
} else if (_heartbeatConsecutiveFailures % 50 === 0) {
console.warn('[Heartbeat] ' + _heartbeatConsecutiveFailures + ' consecutive failures. (' + err.message + ')');
}
return { ok: false, error: err.message };
});
}
function getLatestAvailableWork() {
return _latestAvailableWork;
}
function consumeAvailableWork() {
const work = _latestAvailableWork;
_latestAvailableWork = [];
return work;
}
function getOverdueTasks() {
return _latestOverdueTasks;
}
function getSkillStoreHint() {
return _latestSkillStoreHint;
}
function consumeOverdueTasks() {
const tasks = _latestOverdueTasks;
_latestOverdueTasks = [];
return tasks;
}
function getNoveltyHint() {
return _latestNoveltyHint;
}
function getCapabilityGaps() {
return _latestCapabilityGaps;
}
/**
* Fetch pending high-priority events from the hub via long-poll.
* Called automatically when heartbeat returns has_pending_events: true.
* Results are stored in _latestHubEvents and can be consumed via consumeHubEvents().
*/
function _fetchHubEvents() {
if (_pollInflight) return Promise.resolve([]);
const hubUrl = getHubUrl();
if (!hubUrl) return Promise.resolve([]);
_pollInflight = true;
const nodeId = getNodeId();
const endpoint = hubUrl.replace(/\/+$/, '') + '/a2a/events/poll';
const body = JSON.stringify({
protocol: 'gep-a2a',
protocol_version: PROTOCOL_VERSION,
message_type: 'events_poll',
message_id: 'poll_' + Date.now(),
timestamp: new Date().toISOString(),
sender_id: nodeId,
payload: {},
});
return fetch(endpoint, {
method: 'POST',
headers: buildHubHeaders(),
body: body,
signal: AbortSignal.timeout(60000),
})
.then(function (res) { return res.json(); })
.then(function (data) {
const events = (data && Array.isArray(data.events))
? data.events
: (data && data.payload && Array.isArray(data.payload.events))
? data.payload.events
: [];
if (events.length > 0) {
_latestHubEvents = _latestHubEvents.concat(events);
console.log('[Events] Received ' + events.length + ' pending event(s): ' +
events.map(function (e) { return e.type; }).join(', '));
}
return events;
})
.catch(function (err) {
console.warn('[Events] Poll error:', err && err.message || err);
return [];
})
.finally(function () {
_pollInflight = false;
});
}
/**
* Returns all buffered hub events (does not clear the buffer).
*/
function getHubEvents() {
return _latestHubEvents;
}
/**
* Returns and clears all buffered hub events.
*/
function consumeHubEvents() {
const events = _latestHubEvents;
_latestHubEvents = [];
return events;
}
/**
* Queue a commitment deadline update to be sent with the next heartbeat.
* @param {string} taskId
* @param {string} deadlineIso - ISO-8601 deadline
* @param {boolean} [isAssignment] - true if this is a WorkAssignment
*/
function queueCommitmentUpdate(taskId, deadlineIso, isAssignment) {
if (!taskId || !deadlineIso) return;
_pendingCommitmentUpdates.push({
task_id: taskId,
deadline: deadlineIso,
assignment: !!isAssignment,
});
}
function startHeartbeat(intervalMs) {
if (_heartbeatRunning) return;
_heartbeatIntervalMs = intervalMs || Number(process.env.HEARTBEAT_INTERVAL_MS) || 360000; // default 6min
_heartbeatStartedAt = Date.now();
_heartbeatRunning = true;
sendHelloToHub().then(function (r) {
if (r.ok) console.log('[Heartbeat] Registered with hub. Node: ' + getNodeId());
else console.warn('[Heartbeat] Hello failed (will retry via heartbeat): ' + (r.error || 'unknown'));
}).catch(function (err) {
console.warn('[Heartbeat] Hello during startup failed:', err && err.message || err);
}).then(function () {
if (!_heartbeatRunning) return;
// First heartbeat after hello completes, with enough gap to avoid rate limit
_scheduleNextHeartbeat(Math.max(30000, _heartbeatIntervalMs));
});
}
function stopHeartbeat() {
_heartbeatRunning = false;
if (_heartbeatTimer) {
clearTimeout(_heartbeatTimer);
_heartbeatTimer = null;
}
}
function getHeartbeatStats() {
return {
running: _heartbeatRunning,
uptimeMs: _heartbeatStartedAt ? Date.now() - _heartbeatStartedAt : 0,
totalSent: _heartbeatTotalSent,
totalFailed: _heartbeatTotalFailed,
consecutiveFailures: _heartbeatConsecutiveFailures,
};
}
// --- Transport registry ---
const transports = {
file: {
send: fileTransportSend,
receive: fileTransportReceive,
list: fileTransportList,
},
http: {
send: httpTransportSend,
receive: httpTransportReceive,
list: httpTransportList,
},
};
function getTransport(name) {
const n = String(name || process.env.A2A_TRANSPORT || 'file').toLowerCase();
const t = transports[n];
if (!t) throw new Error('Unknown A2A transport: ' + n + '. Available: ' + Object.keys(transports).join(', '));
return t;
}
function registerTransport(name, impl) {
if (!name || typeof name !== 'string') throw new Error('transport name required');
if (!impl || typeof impl.send !== 'function' || typeof impl.receive !== 'function') {
throw new Error('transport must implement send() and receive()');
}
transports[name] = impl;
}
module.exports = {
PROTOCOL_NAME,
PROTOCOL_VERSION,
VALID_MESSAGE_TYPES,
getNodeId,
buildMessage,
buildHello,
buildPublish,
buildPublishBundle,
buildFetch,
buildReport,
buildDecision,
buildRevoke,
isValidProtocolMessage,
unwrapAssetFromMessage,
getTransport,
registerTransport,
fileTransportSend,
fileTransportReceive,
fileTransportList,
httpTransportSend,
httpTransportReceive,
httpTransportList,
sendHeartbeat,
sendHelloToHub,
startHeartbeat,
stopHeartbeat,
getHeartbeatStats,
getLatestAvailableWork,
consumeAvailableWork,
getOverdueTasks,
consumeOverdueTasks,
getSkillStoreHint,
queueCommitmentUpdate,
getHubUrl,
getHubNodeSecret,
buildHubHeaders,
getNoveltyHint,
getCapabilityGaps,
getHubEvents,
consumeHubEvents,
};

35
src/gep/analyzer.js Normal file
View File

@@ -0,0 +1,35 @@
const fs = require('fs');
const path = require('path');
// Innovation: Self-Correction Analyzer
// Analyze past failures to suggest better future mutations
// Pattern: Meta-learning
function analyzeFailures() {
const memoryPath = path.join(process.cwd(), 'MEMORY.md');
if (!fs.existsSync(memoryPath)) return { status: 'skipped', reason: 'no_memory' };
const content = fs.readFileSync(memoryPath, 'utf8');
const failureRegex = /\|\s*\*\*F\d+\*\*\s*\|\s*Fix\s*\|\s*(.*?)\s*\|\s*\*\*(.*?)\*\*\s*\((.*?)\)\s*\|/g;
const failures = [];
let match;
while ((match = failureRegex.exec(content)) !== null) {
failures.push({
summary: match[1].trim(),
detail: match[2].trim()
});
}
return {
status: 'success',
count: failures.length,
failures: failures.slice(0, 3) // Return top 3 for prompt context
};
}
if (require.main === module) {
console.log(JSON.stringify(analyzeFailures(), null, 2));
}
module.exports = { analyzeFailures };

130
src/gep/assetCallLog.js Normal file
View File

@@ -0,0 +1,130 @@
// Append-only asset call log for tracking Hub asset interactions per evolution run.
// Log file: {evolution_dir}/asset_call_log.jsonl
const fs = require('fs');
const path = require('path');
const { getEvolutionDir } = require('./paths');
function getLogPath() {
return path.join(getEvolutionDir(), 'asset_call_log.jsonl');
}
function ensureDir(filePath) {
const dir = path.dirname(filePath);
if (!fs.existsSync(dir)) {
fs.mkdirSync(dir, { recursive: true });
}
}
/**
* Append a single asset call record to the log.
*
* @param {object} entry
* @param {string} entry.run_id
* @param {string} entry.action - hub_search_hit | hub_search_miss | asset_reuse | asset_reference | asset_publish | asset_publish_skip
* @param {string} [entry.asset_id]
* @param {string} [entry.asset_type]
* @param {string} [entry.source_node_id]
* @param {string} [entry.chain_id]
* @param {number} [entry.score]
* @param {string} [entry.mode] - direct | reference
* @param {string[]} [entry.signals]
* @param {string} [entry.reason]
* @param {object} [entry.extra]
*/
function logAssetCall(entry) {
if (!entry || typeof entry !== 'object') return;
try {
const logPath = getLogPath();
ensureDir(logPath);
const record = {
timestamp: new Date().toISOString(),
...entry,
};
fs.appendFileSync(logPath, JSON.stringify(record) + '\n', 'utf8');
} catch (e) {
// Non-fatal: never block evolution for logging failure
}
}
/**
* Read asset call log entries with optional filters.
*
* @param {object} [opts]
* @param {string} [opts.run_id] - filter by run_id
* @param {string} [opts.action] - filter by action type
* @param {number} [opts.last] - only return last N entries
* @param {string} [opts.since] - ISO date string, only entries after this time
* @returns {object[]}
*/
function readCallLog(opts) {
const o = opts || {};
const logPath = getLogPath();
if (!fs.existsSync(logPath)) return [];
const raw = fs.readFileSync(logPath, 'utf8');
const lines = raw.split('\n').filter(Boolean);
let entries = [];
for (const line of lines) {
try {
entries.push(JSON.parse(line));
} catch (e) { /* skip corrupt lines */ }
}
if (o.since) {
const sinceTs = new Date(o.since).getTime();
if (Number.isFinite(sinceTs)) {
entries = entries.filter(e => new Date(e.timestamp).getTime() >= sinceTs);
}
}
if (o.run_id) {
entries = entries.filter(e => e.run_id === o.run_id);
}
if (o.action) {
entries = entries.filter(e => e.action === o.action);
}
if (o.last && Number.isFinite(o.last) && o.last > 0) {
entries = entries.slice(-o.last);
}
return entries;
}
/**
* Summarize asset call log (for CLI display).
*
* @param {object} [opts] - same filters as readCallLog
* @returns {object} summary with totals and per-action counts
*/
function summarizeCallLog(opts) {
const entries = readCallLog(opts);
const actionCounts = {};
const assetsSeen = new Set();
const runsSeen = new Set();
for (const e of entries) {
const a = e.action || 'unknown';
actionCounts[a] = (actionCounts[a] || 0) + 1;
if (e.asset_id) assetsSeen.add(e.asset_id);
if (e.run_id) runsSeen.add(e.run_id);
}
return {
total_entries: entries.length,
unique_assets: assetsSeen.size,
unique_runs: runsSeen.size,
by_action: actionCounts,
entries,
};
}
module.exports = {
logAssetCall,
readCallLog,
summarizeCallLog,
getLogPath,
};

369
src/gep/assetStore.js Normal file
View File

@@ -0,0 +1,369 @@
const fs = require('fs');
const path = require('path');
const { getGepAssetsDir } = require('./paths');
const { computeAssetId, SCHEMA_VERSION } = require('./contentHash');
function ensureDir(dir) {
if (!fs.existsSync(dir)) fs.mkdirSync(dir, { recursive: true });
}
function readJsonIfExists(filePath, fallback) {
try {
if (!fs.existsSync(filePath)) return fallback;
const raw = fs.readFileSync(filePath, 'utf8');
if (!raw.trim()) return fallback;
return JSON.parse(raw);
} catch (e) {
console.warn('[AssetStore] Failed to read ' + filePath + ':', e && e.message || e);
return fallback;
}
}
function writeJsonAtomic(filePath, obj) {
const dir = path.dirname(filePath);
ensureDir(dir);
const tmp = `${filePath}.tmp`;
fs.writeFileSync(tmp, JSON.stringify(obj, null, 2) + '\n', 'utf8');
fs.renameSync(tmp, filePath);
}
// Build a validation command using repo-root-relative paths.
// runValidations() executes with cwd=repoRoot, so require('./src/...')
// resolves correctly without embedding machine-specific absolute paths.
function buildValidationCmd(relModules) {
const paths = relModules.map(m => `./${m}`);
return `node scripts/validate-modules.js ${paths.join(' ')}`;
}
function getDefaultGenes() {
return {
version: 1,
genes: [
{
type: 'Gene', id: 'gene_gep_repair_from_errors', category: 'repair',
signals_match: ['error', 'exception', 'failed', 'unstable'],
preconditions: ['signals contains error-related indicators'],
strategy: [
'Extract structured signals from logs and user instructions',
'Select an existing Gene by signals match (no improvisation)',
'Estimate blast radius (files, lines) before editing',
'Apply smallest reversible patch',
'Validate using declared validation steps; rollback on failure',
'Solidify knowledge: append EvolutionEvent, update Gene/Capsule store',
],
constraints: { max_files: 12, forbidden_paths: ['.git', 'node_modules'] },
validation: [
buildValidationCmd(['src/evolve', 'src/gep/solidify', 'src/gep/policyCheck', 'src/gep/selector', 'src/gep/memoryGraph', 'src/gep/assetStore']),
'node scripts/validate-suite.js',
],
},
{
type: 'Gene', id: 'gene_gep_optimize_prompt_and_assets', category: 'optimize',
signals_match: ['protocol', 'gep', 'prompt', 'audit', 'reusable'],
preconditions: ['need stricter, auditable evolution protocol outputs'],
strategy: [
'Extract signals and determine selection rationale via Selector JSON',
'Prefer reusing existing Gene/Capsule; only create if no match exists',
'Refactor prompt assembly to embed assets (genes, capsules, parent event)',
'Reduce noise and ambiguity; enforce strict output schema',
'Validate by running node index.js run and ensuring no runtime errors',
'Solidify: record EvolutionEvent, update Gene definitions, create Capsule on success',
],
constraints: { max_files: 20, forbidden_paths: ['.git', 'node_modules'] },
validation: [
buildValidationCmd(['src/evolve', 'src/gep/prompt', 'src/gep/contentHash', 'src/gep/skillDistiller']),
'node scripts/validate-suite.js',
],
},
{
type: 'Gene', id: 'gene_tool_integrity', category: 'repair',
signals_match: ['tool_bypass'],
preconditions: ['agent used shell/exec to perform an action that a registered tool can handle'],
strategy: [
'Always prefer registered tools over ad-hoc scripts or shell workarounds',
'If a registered tool fails, report the actual error honestly and attempt to fix the root cause',
'Never fabricate explanations -- describe actual actions transparently',
'Do not create temporary scripts in extension or project directories',
],
constraints: { max_files: 4, forbidden_paths: ['.git', 'node_modules'] },
validation: [
'node scripts/validate-suite.js',
],
anti_patterns: ['tool_bypass'],
},
],
};
}
function getDefaultCapsules() { return { version: 1, capsules: [] }; }
function genesPath() { return path.join(getGepAssetsDir(), 'genes.json'); }
function capsulesPath() { return path.join(getGepAssetsDir(), 'capsules.json'); }
function capsulesJsonlPath() { return path.join(getGepAssetsDir(), 'capsules.jsonl'); }
function eventsPath() { return path.join(getGepAssetsDir(), 'events.jsonl'); }
function candidatesPath() { return path.join(getGepAssetsDir(), 'candidates.jsonl'); }
function externalCandidatesPath() { return path.join(getGepAssetsDir(), 'external_candidates.jsonl'); }
function failedCapsulesPath() { return path.join(getGepAssetsDir(), 'failed_capsules.json'); }
function loadGenes() {
const jsonGenes = readJsonIfExists(genesPath(), getDefaultGenes()).genes || [];
const jsonlGenes = [];
try {
const p = path.join(getGepAssetsDir(), 'genes.jsonl');
if (fs.existsSync(p)) {
const raw = fs.readFileSync(p, 'utf8');
raw.split('\n').forEach(line => {
if (line.trim()) {
try {
const parsed = JSON.parse(line);
if (parsed && parsed.type === 'Gene') jsonlGenes.push(parsed);
} catch(e) {}
}
});
}
} catch(e) {
console.warn('[AssetStore] Failed to read genes.jsonl:', e && e.message || e);
}
// Combine and deduplicate by ID (JSONL takes precedence if newer, but here we just merge)
const combined = [...jsonGenes, ...jsonlGenes];
const unique = new Map();
combined.forEach(g => {
if (g && g.id) unique.set(String(g.id), g);
});
return Array.from(unique.values());
}
function loadCapsules() {
const legacy = readJsonIfExists(capsulesPath(), getDefaultCapsules()).capsules || [];
const jsonlCapsules = [];
try {
const p = capsulesJsonlPath();
if (fs.existsSync(p)) {
const raw = fs.readFileSync(p, 'utf8');
raw.split('\n').forEach(line => {
if (line.trim()) {
try { jsonlCapsules.push(JSON.parse(line)); } catch(e) {}
}
});
}
} catch(e) {
console.warn('[AssetStore] Failed to read capsules.jsonl:', e && e.message || e);
}
// Combine and deduplicate by ID
const combined = [...legacy, ...jsonlCapsules];
const unique = new Map();
combined.forEach(c => {
if (c && c.id) unique.set(String(c.id), c);
});
return Array.from(unique.values());
}
function getLastEventId() {
try {
const p = eventsPath();
if (!fs.existsSync(p)) return null;
const raw = fs.readFileSync(p, 'utf8');
const lines = raw.split('\n').map(l => l.trim()).filter(Boolean);
if (lines.length === 0) return null;
const last = JSON.parse(lines[lines.length - 1]);
return last && typeof last.id === 'string' ? last.id : null;
} catch (e) {
console.warn('[AssetStore] Failed to read last event ID:', e && e.message || e);
return null;
}
}
function readAllEvents() {
try {
const p = eventsPath();
if (!fs.existsSync(p)) return [];
const raw = fs.readFileSync(p, 'utf8');
return raw.split('\n').map(l => l.trim()).filter(Boolean).map(l => {
try { return JSON.parse(l); } catch { return null; }
}).filter(Boolean);
} catch (e) {
console.warn('[AssetStore] Failed to read events.jsonl:', e && e.message || e);
return [];
}
}
function appendEventJsonl(eventObj) {
const dir = getGepAssetsDir(); ensureDir(dir);
fs.appendFileSync(eventsPath(), JSON.stringify(eventObj) + '\n', 'utf8');
}
function appendCandidateJsonl(candidateObj) {
const dir = getGepAssetsDir(); ensureDir(dir);
fs.appendFileSync(candidatesPath(), JSON.stringify(candidateObj) + '\n', 'utf8');
}
function appendExternalCandidateJsonl(obj) {
const dir = getGepAssetsDir(); ensureDir(dir);
fs.appendFileSync(externalCandidatesPath(), JSON.stringify(obj) + '\n', 'utf8');
}
function readRecentCandidates(limit = 20) {
try {
const p = candidatesPath();
if (!fs.existsSync(p)) return [];
const stat = fs.statSync(p);
if (stat.size < 1024 * 1024) {
const raw = fs.readFileSync(p, 'utf8');
const lines = raw.split('\n').map(l => l.trim()).filter(Boolean);
return lines.slice(-limit).map(l => {
try { return JSON.parse(l); } catch { return null; }
}).filter(Boolean);
}
// Large file (>1MB): only read the tail to avoid OOM.
const fd = fs.openSync(p, 'r');
try {
const chunkSize = Math.min(stat.size, limit * 4096);
const buf = Buffer.alloc(chunkSize);
fs.readSync(fd, buf, 0, chunkSize, stat.size - chunkSize);
const lines = buf.toString('utf8').split('\n').map(l => l.trim()).filter(Boolean);
return lines.slice(-limit).map(l => {
try { return JSON.parse(l); } catch { return null; }
}).filter(Boolean);
} finally {
fs.closeSync(fd);
}
} catch (e) {
console.warn('[AssetStore] Failed to read candidates.jsonl:', e && e.message || e);
return [];
}
}
function readRecentExternalCandidates(limit = 50) {
try {
const p = externalCandidatesPath();
if (!fs.existsSync(p)) return [];
const stat = fs.statSync(p);
if (stat.size < 1024 * 1024) {
const raw = fs.readFileSync(p, 'utf8');
const lines = raw.split('\n').map(l => l.trim()).filter(Boolean);
return lines.slice(-limit).map(l => {
try { return JSON.parse(l); } catch { return null; }
}).filter(Boolean);
}
const fd = fs.openSync(p, 'r');
try {
const chunkSize = Math.min(stat.size, limit * 4096);
const buf = Buffer.alloc(chunkSize);
fs.readSync(fd, buf, 0, chunkSize, stat.size - chunkSize);
const lines = buf.toString('utf8').split('\n').map(l => l.trim()).filter(Boolean);
return lines.slice(-limit).map(l => {
try { return JSON.parse(l); } catch { return null; }
}).filter(Boolean);
} finally {
fs.closeSync(fd);
}
} catch (e) {
console.warn('[AssetStore] Failed to read external_candidates.jsonl:', e && e.message || e);
return [];
}
}
// Safety net: ensure schema_version and asset_id are present before writing.
function ensureSchemaFields(obj) {
if (!obj || typeof obj !== 'object') return obj;
if (!obj.schema_version) obj.schema_version = SCHEMA_VERSION;
if (!obj.asset_id) {
try { obj.asset_id = computeAssetId(obj); } catch (e) {
console.warn('[AssetStore] Failed to compute asset ID:', e && e.message || e);
}
}
return obj;
}
function upsertGene(geneObj) {
ensureSchemaFields(geneObj);
const current = readJsonIfExists(genesPath(), getDefaultGenes());
const genes = Array.isArray(current.genes) ? current.genes : [];
const idx = genes.findIndex(g => g && g.id === geneObj.id);
if (idx >= 0) genes[idx] = geneObj; else genes.push(geneObj);
writeJsonAtomic(genesPath(), { version: current.version || 1, genes });
}
function appendCapsule(capsuleObj) {
ensureSchemaFields(capsuleObj);
const current = readJsonIfExists(capsulesPath(), getDefaultCapsules());
const capsules = Array.isArray(current.capsules) ? current.capsules : [];
capsules.push(capsuleObj);
writeJsonAtomic(capsulesPath(), { version: current.version || 1, capsules });
}
function upsertCapsule(capsuleObj) {
if (!capsuleObj || capsuleObj.type !== 'Capsule' || !capsuleObj.id) return;
ensureSchemaFields(capsuleObj);
const current = readJsonIfExists(capsulesPath(), getDefaultCapsules());
const capsules = Array.isArray(current.capsules) ? current.capsules : [];
const idx = capsules.findIndex(c => c && c.type === 'Capsule' && String(c.id) === String(capsuleObj.id));
if (idx >= 0) capsules[idx] = capsuleObj; else capsules.push(capsuleObj);
writeJsonAtomic(capsulesPath(), { version: current.version || 1, capsules });
}
const FAILED_CAPSULES_MAX = 200;
const FAILED_CAPSULES_TRIM_TO = 100;
function getDefaultFailedCapsules() { return { version: 1, failed_capsules: [] }; }
function appendFailedCapsule(capsuleObj) {
if (!capsuleObj || typeof capsuleObj !== 'object') return;
ensureSchemaFields(capsuleObj);
const current = readJsonIfExists(failedCapsulesPath(), getDefaultFailedCapsules());
let list = Array.isArray(current.failed_capsules) ? current.failed_capsules : [];
list.push(capsuleObj);
if (list.length > FAILED_CAPSULES_MAX) {
list = list.slice(list.length - FAILED_CAPSULES_TRIM_TO);
}
writeJsonAtomic(failedCapsulesPath(), { version: current.version || 1, failed_capsules: list });
}
function readRecentFailedCapsules(limit) {
const n = Number.isFinite(Number(limit)) && Number(limit) > 0 ? Number(limit) : 50;
try {
const current = readJsonIfExists(failedCapsulesPath(), getDefaultFailedCapsules());
const list = Array.isArray(current.failed_capsules) ? current.failed_capsules : [];
return list.slice(Math.max(0, list.length - n));
} catch (e) {
console.warn('[AssetStore] Failed to read failed_capsules.json:', e && e.message || e);
return [];
}
}
// Ensure all expected asset files exist on startup.
// Creates empty files for optional append-only stores so that
// external grep/read commands never fail with "No such file or directory".
function ensureAssetFiles() {
const dir = getGepAssetsDir();
ensureDir(dir);
const files = [
{ path: genesPath(), defaultContent: JSON.stringify(getDefaultGenes(), null, 2) + '\n' },
{ path: capsulesPath(), defaultContent: JSON.stringify(getDefaultCapsules(), null, 2) + '\n' },
{ path: path.join(dir, 'genes.jsonl'), defaultContent: '' },
{ path: eventsPath(), defaultContent: '' },
{ path: candidatesPath(), defaultContent: '' },
{ path: failedCapsulesPath(), defaultContent: JSON.stringify(getDefaultFailedCapsules(), null, 2) + '\n' },
];
for (const f of files) {
if (!fs.existsSync(f.path)) {
try {
fs.writeFileSync(f.path, f.defaultContent, 'utf8');
} catch (e) {
// Non-fatal: log but continue
console.error(`[AssetStore] Failed to create ${f.path}: ${e.message}`);
}
}
}
}
module.exports = {
loadGenes, loadCapsules, readAllEvents, getLastEventId,
appendEventJsonl, appendCandidateJsonl, appendExternalCandidateJsonl,
readRecentCandidates, readRecentExternalCandidates,
upsertGene, appendCapsule, upsertCapsule,
appendFailedCapsule, readRecentFailedCapsules,
genesPath, capsulesPath, eventsPath, candidatesPath, externalCandidatesPath, failedCapsulesPath,
ensureAssetFiles, buildValidationCmd,
};

36
src/gep/assets.js Normal file
View File

@@ -0,0 +1,36 @@
const { computeAssetId, SCHEMA_VERSION } = require('./contentHash');
/**
* Format asset preview for prompt inclusion.
* Handles stringified JSON, arrays, and error cases gracefully.
*/
function formatAssetPreview(preview) {
if (!preview) return '(none)';
if (typeof preview === 'string') {
try {
const parsed = JSON.parse(preview);
if (Array.isArray(parsed) && parsed.length > 0) {
return JSON.stringify(parsed, null, 2);
}
return preview; // Keep as string if not array or empty
} catch (e) {
return preview; // Keep as string if parse fails
}
}
return JSON.stringify(preview, null, 2);
}
/**
* Validate and normalize an asset object.
* Ensures schema version and ID are present.
*/
function normalizeAsset(asset) {
if (!asset || typeof asset !== 'object') return asset;
if (!asset.schema_version) asset.schema_version = SCHEMA_VERSION;
if (!asset.asset_id) {
try { asset.asset_id = computeAssetId(asset); } catch (e) {}
}
return asset;
}
module.exports = { formatAssetPreview, normalizeAsset };

71
src/gep/bridge.js Normal file
View File

@@ -0,0 +1,71 @@
const fs = require('fs');
const path = require('path');
function ensureDir(dir) {
try {
if (!fs.existsSync(dir)) fs.mkdirSync(dir, { recursive: true });
} catch (e) {}
}
function nowIso() {
return new Date().toISOString();
}
function clip(text, maxChars) {
const s = String(text || '');
const n = Number(maxChars);
if (!Number.isFinite(n) || n <= 0) return s;
if (s.length <= n) return s;
return s.slice(0, Math.max(0, n - 40)) + '\n...[TRUNCATED]...\n';
}
function writePromptArtifact({ memoryDir, cycleId, runId, prompt, meta }) {
const dir = String(memoryDir || '').trim();
if (!dir) throw new Error('bridge: missing memoryDir');
ensureDir(dir);
const safeCycle = String(cycleId || 'cycle').replace(/[^a-zA-Z0-9_\-#]/g, '_');
const safeRun = String(runId || Date.now()).replace(/[^a-zA-Z0-9_\-]/g, '_');
const base = `gep_prompt_${safeCycle}_${safeRun}`;
const promptPath = path.join(dir, base + '.txt');
const metaPath = path.join(dir, base + '.json');
fs.writeFileSync(promptPath, String(prompt || ''), 'utf8');
fs.writeFileSync(
metaPath,
JSON.stringify(
{
type: 'GepPromptArtifact',
at: nowIso(),
cycle_id: cycleId || null,
run_id: runId || null,
prompt_path: promptPath,
meta: meta && typeof meta === 'object' ? meta : null,
},
null,
2
) + '\n',
'utf8'
);
return { promptPath, metaPath };
}
function renderSessionsSpawnCall({ task, agentId, label, cleanup }) {
const t = String(task || '').trim();
if (!t) throw new Error('bridge: missing task');
const a = String(agentId || 'main');
const l = String(label || 'gep_bridge');
const c = cleanup ? String(cleanup) : 'delete';
// Output valid JSON so wrappers can parse with JSON.parse (not regex).
// The wrapper uses lastIndexOf('sessions_spawn(') + JSON.parse to extract the task.
const payload = JSON.stringify({ task: t, agentId: a, cleanup: c, label: l });
return `sessions_spawn(${payload})`;
}
module.exports = {
clip,
writePromptArtifact,
renderSessionsSpawnCall,
};

92
src/gep/candidateEval.js Normal file
View File

@@ -0,0 +1,92 @@
// Candidate evaluation logic extracted from evolve.js for maintainability.
// Handles capability candidate extraction, persistence, and preview building.
const {
readRecentCandidates,
readRecentExternalCandidates,
readRecentFailedCapsules,
appendCandidateJsonl,
} = require('./assetStore');
const { extractCapabilityCandidates, renderCandidatesPreview } = require('./candidates');
const { matchPatternToSignals } = require('./selector');
function buildCandidatePreviews({ signals, recentSessionTranscript }) {
const newCandidates = extractCapabilityCandidates({
recentSessionTranscript: recentSessionTranscript || '',
signals,
recentFailedCapsules: readRecentFailedCapsules(50),
});
for (const c of newCandidates) {
try {
appendCandidateJsonl(c);
} catch (e) {
console.warn('[Candidates] Failed to persist candidate:', e && e.message || e);
}
}
const recentCandidates = readRecentCandidates(20);
const capabilityCandidatesPreview = renderCandidatesPreview(recentCandidates.slice(-8), 1600);
let externalCandidatesPreview = '(none)';
try {
const external = readRecentExternalCandidates(50);
const list = Array.isArray(external) ? external : [];
const capsulesOnly = list.filter(x => x && x.type === 'Capsule');
const genesOnly = list.filter(x => x && x.type === 'Gene');
const matchedExternalGenes = genesOnly
.map(g => {
const pats = Array.isArray(g.signals_match) ? g.signals_match : [];
const hit = pats.reduce((acc, p) => (matchPatternToSignals(p, signals) ? acc + 1 : acc), 0);
return { gene: g, hit };
})
.filter(x => x.hit > 0)
.sort((a, b) => b.hit - a.hit)
.slice(0, 3)
.map(x => x.gene);
const matchedExternalCapsules = capsulesOnly
.map(c => {
const triggers = Array.isArray(c.trigger) ? c.trigger : [];
const score = triggers.reduce((acc, t) => (matchPatternToSignals(t, signals) ? acc + 1 : acc), 0);
return { capsule: c, score };
})
.filter(x => x.score > 0)
.sort((a, b) => b.score - a.score)
.slice(0, 3)
.map(x => x.capsule);
if (matchedExternalGenes.length || matchedExternalCapsules.length) {
externalCandidatesPreview = `\`\`\`json\n${JSON.stringify(
[
...matchedExternalGenes.map(g => ({
type: g.type,
id: g.id,
category: g.category || null,
signals_match: g.signals_match || [],
a2a: g.a2a || null,
})),
...matchedExternalCapsules.map(c => ({
type: c.type,
id: c.id,
trigger: c.trigger,
gene: c.gene,
summary: c.summary,
confidence: c.confidence,
blast_radius: c.blast_radius || null,
outcome: c.outcome || null,
success_streak: c.success_streak || null,
a2a: c.a2a || null,
})),
],
null,
2
)}\n\`\`\``;
}
} catch (e) {
console.warn('[ExternalCandidates] Preview build failed (non-fatal):', e && e.message || e);
}
return { capabilityCandidatesPreview, externalCandidatesPreview, newCandidates };
}
module.exports = { buildCandidatePreviews };

208
src/gep/candidates.js Normal file
View File

@@ -0,0 +1,208 @@
const { expandSignals } = require('./learningSignals');
function stableHash(input) {
// Deterministic lightweight hash (not cryptographic).
const s = String(input || '');
let h = 2166136261;
for (let i = 0; i < s.length; i++) {
h ^= s.charCodeAt(i);
h = Math.imul(h, 16777619);
}
return (h >>> 0).toString(16).padStart(8, '0');
}
function clip(text, maxChars) {
const s = String(text || '');
if (!maxChars || s.length <= maxChars) return s;
return s.slice(0, Math.max(0, maxChars - 20)) + ' ...[TRUNCATED]';
}
function toLines(text) {
return String(text || '')
.split('\n')
.map(l => l.trimEnd())
.filter(Boolean);
}
function extractToolCalls(transcript) {
const lines = toLines(transcript);
const calls = [];
for (const line of lines) {
// OpenClaw format: [TOOL: Shell]
const m = line.match(/\[TOOL:\s*([^\]]+)\]/i);
if (m && m[1]) { calls.push(m[1].trim()); continue; }
// Cursor transcript format: [Tool call] Shell
const m2 = line.match(/\[Tool call\]\s+(\S+)/i);
if (m2 && m2[1]) calls.push(m2[1].trim());
}
return calls;
}
function countFreq(items) {
const map = new Map();
for (const it of items) map.set(it, (map.get(it) || 0) + 1);
return map;
}
function buildFiveQuestionsShape({ title, signals, evidence }) {
// Keep it short and structured; this is a template, not a perfect inference.
const input = 'Recent session transcript + memory snippets + user instructions';
const output = 'A safe, auditable evolution patch guided by GEP assets';
const invariants = 'Protocol order, small reversible patches, validation, append-only events';
const params = `Signals: ${Array.isArray(signals) ? signals.join(', ') : ''}`.trim();
const failurePoints = 'Missing signals, over-broad changes, skipped validation, missing knowledge solidification';
return {
title: String(title || '').slice(0, 120),
input,
output,
invariants,
params: params || 'Signals: (none)',
failure_points: failurePoints,
evidence: clip(evidence, 240),
};
}
function extractCapabilityCandidates({ recentSessionTranscript, signals, recentFailedCapsules }) {
const candidates = [];
const signalList = Array.isArray(signals) ? signals : [];
const expandedTags = expandSignals(signalList, recentSessionTranscript);
const toolCalls = extractToolCalls(recentSessionTranscript);
const freq = countFreq(toolCalls);
for (const [tool, count] of freq.entries()) {
if (count < 3) continue;
const title = `Repeated tool usage: ${tool}`;
const evidence = `Observed ${count} occurrences of tool call marker for ${tool}.`;
const shape = buildFiveQuestionsShape({ title, signals, evidence });
candidates.push({
type: 'CapabilityCandidate',
id: `cand_${stableHash(title)}`,
title,
source: 'transcript',
created_at: new Date().toISOString(),
signals: signalList,
tags: expandedTags,
shape,
});
}
// Signals-as-candidates: capture recurring pain points as reusable capability shapes.
const signalCandidates = [
// Defensive signals
{ signal: 'log_error', title: 'Repair recurring runtime errors' },
{ signal: 'protocol_drift', title: 'Prevent protocol drift and enforce auditable outputs' },
{ signal: 'windows_shell_incompatible', title: 'Avoid platform-specific shell assumptions (Windows compatibility)' },
{ signal: 'session_logs_missing', title: 'Harden session log detection and fallback behavior' },
// Opportunity signals (innovation)
{ signal: 'user_feature_request', title: 'Implement user-requested feature' },
{ signal: 'user_improvement_suggestion', title: 'Apply user improvement suggestion' },
{ signal: 'perf_bottleneck', title: 'Resolve performance bottleneck' },
{ signal: 'capability_gap', title: 'Fill capability gap' },
{ signal: 'stable_success_plateau', title: 'Explore new strategies during stability plateau' },
{ signal: 'external_opportunity', title: 'Evaluate external A2A asset for local adoption' },
];
for (const sc of signalCandidates) {
if (!signalList.some(s => s === sc.signal || s.startsWith(sc.signal + ':'))) continue;
const evidence = `Signal present: ${sc.signal}`;
const shape = buildFiveQuestionsShape({ title: sc.title, signals, evidence });
candidates.push({
type: 'CapabilityCandidate',
id: `cand_${stableHash(sc.signal)}`,
title: sc.title,
source: 'signals',
created_at: new Date().toISOString(),
signals: signalList,
tags: expandedTags,
shape,
});
}
var failedCapsules = Array.isArray(recentFailedCapsules) ? recentFailedCapsules : [];
var groups = {};
var problemPriority = [
'problem:performance',
'problem:protocol',
'problem:reliability',
'problem:stagnation',
'problem:capability',
];
for (var i = 0; i < failedCapsules.length; i++) {
var fc = failedCapsules[i];
if (!fc || fc.outcome && fc.outcome.status === 'success') continue;
var reason = String(fc.failure_reason || '').trim();
var failureTags = expandSignals((fc.trigger || []).concat(signalList), reason).filter(function (t) {
return t.indexOf('problem:') === 0 || t.indexOf('risk:') === 0 || t.indexOf('area:') === 0 || t.indexOf('action:') === 0;
});
if (failureTags.length === 0) continue;
var dominantProblem = null;
for (var p = 0; p < problemPriority.length; p++) {
if (failureTags.indexOf(problemPriority[p]) !== -1) {
dominantProblem = problemPriority[p];
break;
}
}
var groupingTags = dominantProblem
? [dominantProblem]
: failureTags.filter(function (tag) { return tag.indexOf('area:') === 0 || tag.indexOf('risk:') === 0; }).slice(0, 1);
var key = groupingTags.join('|');
if (!groups[key]) groups[key] = { count: 0, tags: failureTags, reasons: [], gene: fc.gene || null };
groups[key].count += 1;
if (reason) groups[key].reasons.push(reason);
}
Object.keys(groups).forEach(function (key) {
var group = groups[key];
if (!group || group.count < 2) return;
var title = 'Learn from recurring failed evolution paths';
if (group.tags.indexOf('problem:performance') !== -1) title = 'Resolve recurring performance regressions';
else if (group.tags.indexOf('problem:protocol') !== -1) title = 'Prevent recurring protocol and validation regressions';
else if (group.tags.indexOf('problem:reliability') !== -1) title = 'Repair recurring reliability failures';
else if (group.tags.indexOf('problem:stagnation') !== -1) title = 'Break repeated stagnation loops with a new strategy';
else if (group.tags.indexOf('area:orchestration') !== -1) title = 'Stabilize task and orchestration behavior';
var evidence = 'Observed ' + group.count + ' recent failed evolutions with similar learning tags. ' +
(group.reasons[0] ? 'Latest reason: ' + clip(group.reasons[0], 180) : '');
candidates.push({
type: 'CapabilityCandidate',
id: 'cand_' + stableHash('failed:' + key),
title: title,
source: 'failed_capsules',
created_at: new Date().toISOString(),
signals: signalList,
tags: group.tags,
shape: buildFiveQuestionsShape({ title: title, signals: signalList, evidence: evidence }),
});
});
// Dedup by id
const seen = new Set();
return candidates.filter(c => {
if (!c || !c.id) return false;
if (seen.has(c.id)) return false;
seen.add(c.id);
return true;
});
}
function renderCandidatesPreview(candidates, maxChars = 1400) {
const list = Array.isArray(candidates) ? candidates : [];
const lines = [];
for (const c of list) {
const s = c && c.shape ? c.shape : {};
lines.push(`- ${c.id}: ${c.title}`);
lines.push(` - input: ${s.input || ''}`);
lines.push(` - output: ${s.output || ''}`);
lines.push(` - invariants: ${s.invariants || ''}`);
lines.push(` - params: ${s.params || ''}`);
lines.push(` - failure_points: ${s.failure_points || ''}`);
if (s.evidence) lines.push(` - evidence: ${s.evidence}`);
}
return clip(lines.join('\n'), maxChars);
}
module.exports = {
extractCapabilityCandidates,
renderCandidatesPreview,
expandSignals,
};

65
src/gep/contentHash.js Normal file
View File

@@ -0,0 +1,65 @@
// Content-addressable hashing for GEP assets.
// Provides canonical JSON serialization and SHA-256 based asset IDs.
// This enables deduplication, tamper detection, and cross-node consistency.
const crypto = require('crypto');
// Schema version for all GEP asset types.
// Bump MINOR for additive fields; MAJOR for breaking changes.
const SCHEMA_VERSION = '1.6.0';
// Canonical JSON: deterministic serialization with sorted keys at all levels.
// Arrays preserve order; non-finite numbers become null; undefined becomes null.
function canonicalize(obj) {
if (obj === null || obj === undefined) return 'null';
if (typeof obj === 'boolean') return obj ? 'true' : 'false';
if (typeof obj === 'number') {
if (!Number.isFinite(obj)) return 'null';
return String(obj);
}
if (typeof obj === 'string') return JSON.stringify(obj);
if (Array.isArray(obj)) {
return '[' + obj.map(canonicalize).join(',') + ']';
}
if (typeof obj === 'object') {
const keys = Object.keys(obj).sort();
const pairs = [];
for (const k of keys) {
pairs.push(JSON.stringify(k) + ':' + canonicalize(obj[k]));
}
return '{' + pairs.join(',') + '}';
}
return 'null';
}
// Compute a content-addressable asset ID.
// Excludes self-referential fields (asset_id itself) from the hash input.
// Returns "sha256:<hex>".
function computeAssetId(obj, excludeFields) {
if (!obj || typeof obj !== 'object') return null;
const exclude = new Set(Array.isArray(excludeFields) ? excludeFields : ['asset_id']);
const clean = {};
for (const k of Object.keys(obj)) {
if (exclude.has(k)) continue;
clean[k] = obj[k];
}
const canonical = canonicalize(clean);
const hash = crypto.createHash('sha256').update(canonical, 'utf8').digest('hex');
return 'sha256:' + hash;
}
// Verify that an object's asset_id matches its content.
function verifyAssetId(obj) {
if (!obj || typeof obj !== 'object') return false;
const claimed = obj.asset_id;
if (!claimed || typeof claimed !== 'string') return false;
const computed = computeAssetId(obj);
return claimed === computed;
}
module.exports = {
SCHEMA_VERSION,
canonicalize,
computeAssetId,
verifyAssetId,
};

163
src/gep/curriculum.js Normal file
View File

@@ -0,0 +1,163 @@
'use strict';
const fs = require('fs');
const path = require('path');
const { getEvolutionDir, getMemoryDir } = require('./paths');
var MASTERY_THRESHOLD = 0.8;
var MASTERY_MIN_ATTEMPTS = 3;
var FAILURE_THRESHOLD = 0.3;
var MAX_CURRICULUM_SIGNALS = 2;
function curriculumStatePath() {
return path.join(getEvolutionDir(), 'curriculum_state.json');
}
function readJsonSafe(filePath, fallback) {
try {
if (!fs.existsSync(filePath)) return fallback;
var raw = fs.readFileSync(filePath, 'utf8');
if (!raw.trim()) return fallback;
return JSON.parse(raw);
} catch (_) {
return fallback;
}
}
function writeJsonAtomic(filePath, obj) {
try {
var dir = path.dirname(filePath);
if (!fs.existsSync(dir)) fs.mkdirSync(dir, { recursive: true });
var tmp = filePath + '.tmp';
fs.writeFileSync(tmp, JSON.stringify(obj, null, 2) + '\n', 'utf8');
fs.renameSync(tmp, filePath);
} catch (_) {}
}
function loadCurriculumState() {
return readJsonSafe(curriculumStatePath(), {
level: 1,
current_targets: [],
completed: [],
updated_at: null,
});
}
function saveCurriculumState(state) {
state.updated_at = new Date().toISOString();
writeJsonAtomic(curriculumStatePath(), state);
}
function aggregateOutcomes(memoryGraphPath) {
var outcomes = {};
try {
if (!fs.existsSync(memoryGraphPath)) return outcomes;
var lines = fs.readFileSync(memoryGraphPath, 'utf8').trim().split('\n').filter(Boolean);
var recent = lines.slice(-200);
for (var i = 0; i < recent.length; i++) {
try {
var ev = JSON.parse(recent[i]);
if (ev.kind !== 'outcome' || !ev.outcome) continue;
var key = ev.signal_key || ev.key || '';
if (!key) continue;
if (!outcomes[key]) outcomes[key] = { success: 0, fail: 0, total: 0 };
if (ev.outcome.status === 'success') outcomes[key].success++;
else if (ev.outcome.status === 'failed') outcomes[key].fail++;
outcomes[key].total++;
} catch (_) {}
}
} catch (_) {}
return outcomes;
}
function identifyFrontier(outcomes) {
var mastered = [];
var failing = [];
var frontier = [];
var keys = Object.keys(outcomes);
for (var i = 0; i < keys.length; i++) {
var k = keys[i];
var o = outcomes[k];
if (o.total < 2) continue;
var rate = o.success / o.total;
if (rate >= MASTERY_THRESHOLD && o.total >= MASTERY_MIN_ATTEMPTS) {
mastered.push({ key: k, rate: rate, total: o.total });
} else if (rate <= FAILURE_THRESHOLD && o.total >= 2) {
failing.push({ key: k, rate: rate, total: o.total });
} else {
frontier.push({ key: k, rate: rate, total: o.total });
}
}
frontier.sort(function (a, b) {
return Math.abs(a.rate - 0.5) - Math.abs(b.rate - 0.5);
});
return { mastered: mastered, failing: failing, frontier: frontier };
}
function generateCurriculumSignals(opts) {
var capabilityGaps = Array.isArray(opts.capabilityGaps) ? opts.capabilityGaps : [];
var memoryGraphPath = opts.memoryGraphPath || '';
var personality = opts.personality || {};
var signals = [];
try {
var outcomes = aggregateOutcomes(memoryGraphPath);
var analysis = identifyFrontier(outcomes);
var state = loadCurriculumState();
if (capabilityGaps.length > 0) {
var gapTarget = capabilityGaps[0];
var alreadyMastered = analysis.mastered.some(function (m) {
return m.key.indexOf(gapTarget) >= 0;
});
if (!alreadyMastered) {
signals.push('curriculum_target:gap:' + String(gapTarget).slice(0, 60));
}
}
if (signals.length < MAX_CURRICULUM_SIGNALS && analysis.frontier.length > 0) {
var best = analysis.frontier[0];
var alreadyTargeted = signals.some(function (s) { return s.indexOf(best.key) >= 0; });
if (!alreadyTargeted) {
signals.push('curriculum_target:frontier:' + String(best.key).slice(0, 60));
}
}
if (signals.length > 0) {
state.current_targets = signals.slice();
state.level = Math.max(1, Math.min(5, state.level));
saveCurriculumState(state);
}
} catch (_) {}
return signals.slice(0, MAX_CURRICULUM_SIGNALS);
}
function markCurriculumProgress(signal, outcome) {
try {
var state = loadCurriculumState();
if (!Array.isArray(state.completed)) state.completed = [];
state.completed.push({
signal: String(signal).slice(0, 100),
outcome: String(outcome).slice(0, 20),
at: new Date().toISOString(),
});
if (state.completed.length > 50) state.completed = state.completed.slice(-50);
var successCount = state.completed.filter(function (c) { return c.outcome === 'success'; }).length;
if (successCount > 0 && successCount % 5 === 0 && state.level < 5) {
state.level++;
}
saveCurriculumState(state);
} catch (_) {}
}
module.exports = {
generateCurriculumSignals: generateCurriculumSignals,
markCurriculumProgress: markCurriculumProgress,
loadCurriculumState: loadCurriculumState,
};

209
src/gep/deviceId.js Normal file
View File

@@ -0,0 +1,209 @@
// Stable device identifier for node identity.
// Generates a hardware-based fingerprint that persists across directory changes,
// reboots, and evolver upgrades. Used by getNodeId() and env_fingerprint.
//
// Priority chain:
// 1. EVOMAP_DEVICE_ID env var (explicit override, recommended for containers)
// 2. ~/.evomap/device_id file (persisted from previous run)
// 3. <project>/.evomap_device_id (fallback persist path for containers w/o $HOME)
// 4. /etc/machine-id (Linux, set at OS install)
// 5. IOPlatformUUID (macOS hardware UUID)
// 6. Docker/OCI container ID (from /proc/self/cgroup or /proc/self/mountinfo)
// 7. hostname + MAC addresses (network-based fallback)
// 8. random 128-bit hex (last resort, persisted immediately)
const os = require('os');
const fs = require('fs');
const path = require('path');
const crypto = require('crypto');
const DEVICE_ID_DIR = path.join(os.homedir(), '.evomap');
const DEVICE_ID_FILE = path.join(DEVICE_ID_DIR, 'device_id');
const LOCAL_DEVICE_ID_FILE = path.resolve(__dirname, '..', '..', '.evomap_device_id');
let _cachedDeviceId = null;
const DEVICE_ID_RE = /^[a-f0-9]{16,64}$/;
function isContainer() {
try {
if (fs.existsSync('/.dockerenv')) return true;
} catch {}
try {
const cgroup = fs.readFileSync('/proc/1/cgroup', 'utf8');
if (/docker|kubepods|containerd|cri-o|lxc|ecs/i.test(cgroup)) return true;
} catch {}
try {
if (fs.existsSync('/run/.containerenv')) return true;
} catch {}
return false;
}
function readMachineId() {
try {
const mid = fs.readFileSync('/etc/machine-id', 'utf8').trim();
if (mid && mid.length >= 16) return mid;
} catch {}
if (process.platform === 'darwin') {
try {
const { execFileSync } = require('child_process');
const raw = execFileSync('ioreg', ['-rd1', '-c', 'IOPlatformExpertDevice'], {
encoding: 'utf8',
timeout: 3000,
stdio: ['ignore', 'pipe', 'ignore'],
});
const match = raw.match(/"IOPlatformUUID"\s*=\s*"([^"]+)"/);
if (match && match[1]) return match[1];
} catch {}
}
return null;
}
// Extract Docker/OCI container ID from cgroup or mountinfo.
// The container ID is 64-char hex and stable for the lifetime of the container.
// Returns null on non-container hosts or if parsing fails.
function readContainerId() {
// Method 1: /proc/self/cgroup (works for cgroup v1 and most Docker setups)
try {
const cgroup = fs.readFileSync('/proc/self/cgroup', 'utf8');
const match = cgroup.match(/[a-f0-9]{64}/);
if (match) return match[0];
} catch {}
// Method 2: /proc/self/mountinfo (works for cgroup v2 / containerd)
try {
const mountinfo = fs.readFileSync('/proc/self/mountinfo', 'utf8');
const match = mountinfo.match(/[a-f0-9]{64}/);
if (match) return match[0];
} catch {}
// Method 3: hostname in Docker defaults to short container ID (12 hex chars)
if (isContainer()) {
const hostname = os.hostname();
if (/^[a-f0-9]{12,64}$/.test(hostname)) return hostname;
}
return null;
}
function getMacAddresses() {
const ifaces = os.networkInterfaces();
const macs = [];
for (const name of Object.keys(ifaces)) {
for (const iface of ifaces[name]) {
if (!iface.internal && iface.mac && iface.mac !== '00:00:00:00:00:00') {
macs.push(iface.mac);
}
}
}
macs.sort();
return macs;
}
function generateDeviceId() {
const machineId = readMachineId();
if (machineId) {
return crypto.createHash('sha256').update('evomap:' + machineId).digest('hex').slice(0, 32);
}
// Container ID: stable for the container's lifetime, but changes on re-create.
// Still better than random for keeping identity within a single deployment.
const containerId = readContainerId();
if (containerId) {
return crypto.createHash('sha256').update('evomap:container:' + containerId).digest('hex').slice(0, 32);
}
const macs = getMacAddresses();
if (macs.length > 0) {
const raw = os.hostname() + '|' + macs.join(',');
return crypto.createHash('sha256').update('evomap:' + raw).digest('hex').slice(0, 32);
}
return crypto.randomBytes(16).toString('hex');
}
function persistDeviceId(id) {
// Try primary path (~/.evomap/device_id)
try {
if (!fs.existsSync(DEVICE_ID_DIR)) {
fs.mkdirSync(DEVICE_ID_DIR, { recursive: true, mode: 0o700 });
}
fs.writeFileSync(DEVICE_ID_FILE, id, { encoding: 'utf8', mode: 0o600 });
return;
} catch {}
// Fallback: project-local file (useful in containers where $HOME is ephemeral
// but the project directory is mounted as a volume)
try {
fs.writeFileSync(LOCAL_DEVICE_ID_FILE, id, { encoding: 'utf8', mode: 0o600 });
return;
} catch {}
console.error(
'[evolver] WARN: failed to persist device_id to ' + DEVICE_ID_FILE +
' or ' + LOCAL_DEVICE_ID_FILE +
' -- node identity may change on restart.' +
' Set EVOMAP_DEVICE_ID env var for stable identity in containers.'
);
}
function loadPersistedDeviceId() {
// Try primary path
try {
if (fs.existsSync(DEVICE_ID_FILE)) {
const id = fs.readFileSync(DEVICE_ID_FILE, 'utf8').trim();
if (id && DEVICE_ID_RE.test(id)) return id;
}
} catch {}
// Try project-local fallback
try {
if (fs.existsSync(LOCAL_DEVICE_ID_FILE)) {
const id = fs.readFileSync(LOCAL_DEVICE_ID_FILE, 'utf8').trim();
if (id && DEVICE_ID_RE.test(id)) return id;
}
} catch {}
return null;
}
function getDeviceId() {
if (_cachedDeviceId) return _cachedDeviceId;
// 1. Env var override (validated)
if (process.env.EVOMAP_DEVICE_ID) {
const envId = String(process.env.EVOMAP_DEVICE_ID).trim().toLowerCase();
if (DEVICE_ID_RE.test(envId)) {
_cachedDeviceId = envId;
return _cachedDeviceId;
}
}
// 2. Previously persisted (checks both ~/.evomap/ and project-local)
const persisted = loadPersistedDeviceId();
if (persisted) {
_cachedDeviceId = persisted;
return _cachedDeviceId;
}
// 3. Generate from hardware / container metadata and persist
const inContainer = isContainer();
const generated = generateDeviceId();
persistDeviceId(generated);
_cachedDeviceId = generated;
if (inContainer && !process.env.EVOMAP_DEVICE_ID) {
console.error(
'[evolver] NOTE: running in a container without EVOMAP_DEVICE_ID.' +
' A device_id was auto-generated and persisted, but for guaranteed' +
' cross-restart stability, set EVOMAP_DEVICE_ID as an env var' +
' or mount a persistent volume at ~/.evomap/'
);
}
return _cachedDeviceId;
}
module.exports = { getDeviceId, isContainer };

84
src/gep/envFingerprint.js Normal file
View File

@@ -0,0 +1,84 @@
// Environment fingerprint capture for GEP assets.
// Records the runtime environment so that cross-environment diffusion
// success rates (GDI) can be measured scientifically.
const os = require('os');
const fs = require('fs');
const path = require('path');
const crypto = require('crypto');
const { getRepoRoot } = require('./paths');
const { getDeviceId, isContainer } = require('./deviceId');
// Capture a structured environment fingerprint.
// This is embedded into Capsules, EvolutionEvents, and ValidationReports.
function captureEnvFingerprint() {
const repoRoot = getRepoRoot();
let pkgVersion = null;
let pkgName = null;
// Read evolver's own package.json via __dirname so that npm-installed
// deployments report the correct evolver version. getRepoRoot() walks
// up to the nearest .git directory, which resolves to the HOST project
// when evolver is an npm dependency -- producing a wrong name/version.
const ownPkgPath = path.resolve(__dirname, '..', '..', 'package.json');
try {
const raw = fs.readFileSync(ownPkgPath, 'utf8');
const pkg = JSON.parse(raw);
pkgVersion = pkg && pkg.version ? String(pkg.version) : null;
pkgName = pkg && pkg.name ? String(pkg.name) : null;
} catch (e) {}
if (!pkgVersion) {
try {
const raw = fs.readFileSync(path.join(repoRoot, 'package.json'), 'utf8');
const pkg = JSON.parse(raw);
pkgVersion = pkg && pkg.version ? String(pkg.version) : null;
pkgName = pkg && pkg.name ? String(pkg.name) : null;
} catch (e) {}
}
const region = (process.env.EVOLVER_REGION || '').trim().toLowerCase().slice(0, 5) || undefined;
return {
device_id: getDeviceId(),
node_version: process.version,
platform: process.platform,
arch: process.arch,
os_release: os.release(),
hostname: crypto.createHash('sha256').update(os.hostname()).digest('hex').slice(0, 12),
evolver_version: pkgVersion,
client: pkgName || 'evolver',
client_version: pkgVersion,
region: region,
cwd: crypto.createHash('sha256').update(process.cwd()).digest('hex').slice(0, 12),
container: isContainer(),
captured_at: new Date().toISOString(),
};
}
// Compute a short fingerprint key for comparison and grouping.
// Two nodes with the same key are considered "same environment class".
function envFingerprintKey(fp) {
if (!fp || typeof fp !== 'object') return 'unknown';
const parts = [
fp.device_id || '',
fp.node_version || '',
fp.platform || '',
fp.arch || '',
fp.hostname || '',
fp.client || fp.evolver_version || '',
fp.client_version || fp.evolver_version || '',
].join('|');
return crypto.createHash('sha256').update(parts, 'utf8').digest('hex').slice(0, 16);
}
// Check if two fingerprints are from the same environment class.
function isSameEnvClass(fpA, fpB) {
return envFingerprintKey(fpA) === envFingerprintKey(fpB);
}
module.exports = {
captureEnvFingerprint,
envFingerprintKey,
isSameEnvClass,
};

201
src/gep/executionTrace.js Normal file
View File

@@ -0,0 +1,201 @@
// Execution Trace: structured, desensitized evolution execution summary.
// Built during solidify and optionally shared with Hub via EvolutionEvent payload.
//
// Desensitization rules (applied locally, never on Hub):
// - File paths: basename + extension only (src/utils/retry.js -> retry.js)
// - Code content: never sent, only statistical metrics (lines, files)
// - Error messages: type signature only (TypeError: x is not a function -> TypeError)
// - Environment variables, secrets, user data: stripped entirely
// - Configurable via EVOLVER_TRACE_LEVEL: none | minimal | standard (default: minimal)
const path = require('path');
const TRACE_LEVELS = { none: 0, minimal: 1, standard: 2 };
function getTraceLevel() {
const raw = String(process.env.EVOLVER_TRACE_LEVEL || 'minimal').toLowerCase().trim();
return TRACE_LEVELS[raw] != null ? raw : 'minimal';
}
function desensitizeFilePath(filePath) {
if (!filePath || typeof filePath !== 'string') return null;
const ext = path.extname(filePath);
const base = path.basename(filePath);
return base || ext || 'unknown';
}
function extractErrorSignature(errorText) {
if (!errorText || typeof errorText !== 'string') return null;
const text = errorText.trim();
// Match common error type patterns: TypeError, ReferenceError, SyntaxError, etc.
const jsError = text.match(/^((?:[A-Z][a-zA-Z]*)?Error)\b/);
if (jsError) return jsError[1];
// Match errno-style: ECONNRESET, ENOENT, EPERM, etc.
const errno = text.match(/\b(E[A-Z]{2,})\b/);
if (errno) return errno[1];
// Match HTTP status codes
const http = text.match(/\b((?:4|5)\d{2})\b/);
if (http) return 'HTTP_' + http[1];
// Fallback: first word if it looks like an error type
const firstWord = text.split(/[\s:]/)[0];
if (firstWord && firstWord.length <= 40 && /^[A-Z]/.test(firstWord)) return firstWord;
return 'UnknownError';
}
function inferToolChain(validationResults, blast) {
const tools = new Set();
if (blast && blast.files > 0) tools.add('file_edit');
if (Array.isArray(validationResults)) {
for (const r of validationResults) {
const cmd = String(r.cmd || '').trim();
if (cmd.startsWith('npm test') || cmd.includes('jest') || cmd.includes('mocha')) {
tools.add('test_run');
} else if (cmd.includes('lint') || cmd.includes('eslint')) {
tools.add('lint_check');
} else if (cmd.includes('validate') || cmd.includes('check')) {
tools.add('validation_run');
} else if (cmd.startsWith('node ')) {
tools.add('node_exec');
}
}
}
return Array.from(tools);
}
function classifyBlastLevel(blast) {
if (!blast) return 'unknown';
const files = Number(blast.files) || 0;
const lines = Number(blast.lines) || 0;
if (files <= 3 && lines <= 50) return 'low';
if (files <= 10 && lines <= 200) return 'medium';
return 'high';
}
function buildExecutionTrace({
gene,
mutation,
signals,
blast,
constraintCheck,
validation,
canary,
outcomeStatus,
startedAt,
}) {
const level = getTraceLevel();
if (level === 'none') return null;
const trace = {
gene_id: gene && gene.id ? String(gene.id) : null,
mutation_category: (mutation && mutation.category) || (gene && gene.category) || null,
signals_matched: Array.isArray(signals) ? signals.slice(0, 10) : [],
outcome: outcomeStatus || 'unknown',
};
// Minimal level: core metrics only
trace.files_changed_count = blast ? Number(blast.files) || 0 : 0;
trace.lines_added = 0;
trace.lines_removed = 0;
// Compute added/removed from blast if available
if (blast && blast.lines) {
// blast.lines is total churn (added + deleted); split heuristically
const total = Number(blast.lines) || 0;
if (outcomeStatus === 'success') {
trace.lines_added = Math.round(total * 0.6);
trace.lines_removed = total - trace.lines_added;
} else {
trace.lines_added = Math.round(total * 0.5);
trace.lines_removed = total - trace.lines_added;
}
}
trace.validation_result = validation && validation.ok ? 'pass' : 'fail';
trace.blast_radius = classifyBlastLevel(blast);
// Standard level: richer context
if (level === 'standard') {
// Desensitized file list (basenames only)
if (blast && Array.isArray(blast.changed_files)) {
trace.file_types = {};
for (const f of blast.changed_files) {
const ext = path.extname(f) || '.unknown';
trace.file_types[ext] = (trace.file_types[ext] || 0) + 1;
}
}
// Validation commands (already safe -- node/npm/npx only)
if (validation && Array.isArray(validation.results)) {
trace.validation_commands = validation.results.map(r => String(r.cmd || '').slice(0, 100));
}
// Error signatures (desensitized)
trace.error_signatures = [];
if (constraintCheck && Array.isArray(constraintCheck.violations)) {
for (const v of constraintCheck.violations) {
// Constraint violations have known prefixes; classify directly
const vStr = String(v);
if (vStr.startsWith('max_files')) trace.error_signatures.push('max_files_exceeded');
else if (vStr.startsWith('forbidden_path')) trace.error_signatures.push('forbidden_path');
else if (vStr.startsWith('HARD CAP')) trace.error_signatures.push('hard_cap_breach');
else if (vStr.startsWith('CRITICAL')) trace.error_signatures.push('critical_overrun');
else if (vStr.startsWith('critical_path')) trace.error_signatures.push('critical_path_modified');
else if (vStr.startsWith('canary_failed')) trace.error_signatures.push('canary_failed');
else if (vStr.startsWith('ethics:')) trace.error_signatures.push('ethics_violation');
else {
const sig = extractErrorSignature(v);
if (sig) trace.error_signatures.push(sig);
}
}
}
if (validation && Array.isArray(validation.results)) {
for (const r of validation.results) {
if (!r.ok && r.err) {
const sig = extractErrorSignature(r.err);
if (sig && !trace.error_signatures.includes(sig)) {
trace.error_signatures.push(sig);
}
}
}
}
trace.error_signatures = trace.error_signatures.slice(0, 10);
// Tool chain inference
trace.tool_chain = inferToolChain(
validation && validation.results ? validation.results : [],
blast
);
// Duration
if (validation && validation.startedAt && validation.finishedAt) {
trace.validation_duration_ms = validation.finishedAt - validation.startedAt;
}
// Canary result
if (canary && !canary.skipped) {
trace.canary_ok = !!canary.ok;
}
}
// Timestamp
trace.created_at = new Date().toISOString();
return trace;
}
module.exports = {
buildExecutionTrace,
desensitizeFilePath,
extractErrorSignature,
inferToolChain,
classifyBlastLevel,
getTraceLevel,
};

230
src/gep/gitOps.js Normal file
View File

@@ -0,0 +1,230 @@
// Git operations extracted from solidify.js for maintainability.
// All functions that directly invoke git CLI or manage rollback live here.
const fs = require('fs');
const path = require('path');
const { execSync } = require('child_process');
const { getRepoRoot } = require('./paths');
function runCmd(cmd, opts = {}) {
const cwd = opts.cwd || getRepoRoot();
const timeoutMs = Number.isFinite(Number(opts.timeoutMs)) ? Number(opts.timeoutMs) : 120000;
return execSync(cmd, { cwd, encoding: 'utf8', stdio: ['ignore', 'pipe', 'pipe'], timeout: timeoutMs, windowsHide: true });
}
function tryRunCmd(cmd, opts = {}) {
try {
return { ok: true, out: runCmd(cmd, opts), err: '' };
} catch (e) {
const stderr = e && e.stderr ? String(e.stderr) : '';
const stdout = e && e.stdout ? String(e.stdout) : '';
const msg = e && e.message ? String(e.message) : 'command_failed';
return { ok: false, out: stdout, err: stderr || msg };
}
}
function normalizeRelPath(relPath) {
return String(relPath || '').replace(/\\/g, '/').replace(/^\.\/+/, '').trim();
}
function countFileLines(absPath) {
try {
if (!fs.existsSync(absPath)) return 0;
const buf = fs.readFileSync(absPath);
if (!buf || buf.length === 0) return 0;
let n = 1;
for (let i = 0; i < buf.length; i++) if (buf[i] === 10) n++;
return n;
} catch {
return 0;
}
}
function gitListChangedFiles({ repoRoot }) {
const files = new Set();
const s1 = tryRunCmd('git diff --name-only', { cwd: repoRoot, timeoutMs: 60000 });
if (s1.ok) for (const line of String(s1.out).split('\n').map(l => l.trim()).filter(Boolean)) files.add(line);
const s2 = tryRunCmd('git diff --cached --name-only', { cwd: repoRoot, timeoutMs: 60000 });
if (s2.ok) for (const line of String(s2.out).split('\n').map(l => l.trim()).filter(Boolean)) files.add(line);
const s3 = tryRunCmd('git ls-files --others --exclude-standard', { cwd: repoRoot, timeoutMs: 60000 });
if (s3.ok) for (const line of String(s3.out).split('\n').map(l => l.trim()).filter(Boolean)) files.add(line);
return Array.from(files);
}
function gitListUntrackedFiles(repoRoot) {
const r = tryRunCmd('git ls-files --others --exclude-standard', { cwd: repoRoot, timeoutMs: 60000 });
if (!r.ok) return [];
return String(r.out).split('\n').map(l => l.trim()).filter(Boolean);
}
const DIFF_SNAPSHOT_MAX_CHARS = 8000;
function captureDiffSnapshot(repoRoot) {
const parts = [];
const unstaged = tryRunCmd('git diff', { cwd: repoRoot, timeoutMs: 30000 });
if (unstaged.ok && unstaged.out) parts.push(String(unstaged.out));
const staged = tryRunCmd('git diff --cached', { cwd: repoRoot, timeoutMs: 30000 });
if (staged.ok && staged.out) parts.push(String(staged.out));
let combined = parts.join('\n');
if (combined.length > DIFF_SNAPSHOT_MAX_CHARS) {
combined = combined.slice(0, DIFF_SNAPSHOT_MAX_CHARS) + '\n... [TRUNCATED]';
}
return combined || '';
}
function isGitRepo(dir) {
try {
execSync('git rev-parse --git-dir', {
cwd: dir, encoding: 'utf8',
stdio: ['ignore', 'pipe', 'pipe'], timeout: 5000,
});
return true;
} catch (_) {
return false;
}
}
const CRITICAL_PROTECTED_PREFIXES = [
'skills/feishu-evolver-wrapper/',
'skills/feishu-common/',
'skills/feishu-post/',
'skills/feishu-card/',
'skills/feishu-doc/',
'skills/skill-tools/',
'skills/clawhub/',
'skills/clawhub-batch-undelete/',
'skills/git-sync/',
'skills/evolver/',
];
const CRITICAL_PROTECTED_FILES = [
'MEMORY.md',
'SOUL.md',
'IDENTITY.md',
'AGENTS.md',
'USER.md',
'HEARTBEAT.md',
'RECENT_EVENTS.md',
'TOOLS.md',
'TROUBLESHOOTING.md',
'openclaw.json',
'.env',
'package.json',
];
function isCriticalProtectedPath(relPath) {
const rel = normalizeRelPath(relPath);
if (!rel) return false;
for (const prefix of CRITICAL_PROTECTED_PREFIXES) {
const p = prefix.replace(/\/+$/, '');
if (rel === p || rel.startsWith(p + '/')) return true;
}
for (const f of CRITICAL_PROTECTED_FILES) {
if (rel === f) return true;
}
return false;
}
function rollbackTracked(repoRoot) {
const mode = String(process.env.EVOLVER_ROLLBACK_MODE || 'hard').toLowerCase();
if (mode === 'none') {
console.log('[Rollback] EVOLVER_ROLLBACK_MODE=none, skipping rollback');
return;
}
if (mode === 'stash') {
const stashRef = 'evolver-rollback-' + Date.now();
const result = tryRunCmd('git stash push -m "' + stashRef + '" --include-untracked', { cwd: repoRoot, timeoutMs: 60000 });
if (result.ok) {
console.log('[Rollback] Changes stashed with ref: ' + stashRef + '. Recover with "git stash list" and "git stash pop".');
} else {
console.log('[Rollback] Stash failed or no changes, using hard reset');
tryRunCmd('git restore --staged --worktree .', { cwd: repoRoot, timeoutMs: 60000 });
tryRunCmd('git reset --hard', { cwd: repoRoot, timeoutMs: 60000 });
}
return;
}
console.log('[Rollback] EVOLVER_ROLLBACK_MODE=hard, resetting tracked files in: ' + repoRoot);
tryRunCmd('git restore --staged --worktree .', { cwd: repoRoot, timeoutMs: 60000 });
tryRunCmd('git reset --hard', { cwd: repoRoot, timeoutMs: 60000 });
}
function rollbackNewUntrackedFiles({ repoRoot, baselineUntracked }) {
const baseline = new Set((Array.isArray(baselineUntracked) ? baselineUntracked : []).map(String));
const current = gitListUntrackedFiles(repoRoot);
const toDelete = current.filter(f => !baseline.has(String(f)));
const skipped = [];
const deleted = [];
for (const rel of toDelete) {
const safeRel = String(rel || '').replace(/\\/g, '/').replace(/^\.\/+/, '');
if (!safeRel) continue;
if (isCriticalProtectedPath(safeRel)) {
skipped.push(safeRel);
continue;
}
const abs = path.join(repoRoot, safeRel);
const normRepo = path.resolve(repoRoot);
const normAbs = path.resolve(abs);
if (!normAbs.startsWith(normRepo + path.sep) && normAbs !== normRepo) continue;
try {
if (fs.existsSync(normAbs) && fs.statSync(normAbs).isFile()) {
fs.unlinkSync(normAbs);
deleted.push(safeRel);
}
} catch (e) {
console.warn('[evolver] rollbackNewUntrackedFiles unlink failed:', safeRel, e && e.message || e);
}
}
if (skipped.length > 0) {
console.log(`[Rollback] Skipped ${skipped.length} critical protected file(s): ${skipped.slice(0, 5).join(', ')}`);
}
const dirsToCheck = new Set();
for (let di = 0; di < deleted.length; di++) {
let dir = path.dirname(deleted[di]);
while (dir && dir !== '.' && dir !== '/') {
const normalized = dir.replace(/\\/g, '/');
if (!normalized.includes('/')) break;
dirsToCheck.add(dir);
dir = path.dirname(dir);
}
}
const sortedDirs = Array.from(dirsToCheck).sort(function (a, b) { return b.length - a.length; });
const removedDirs = [];
for (let si = 0; si < sortedDirs.length; si++) {
if (isCriticalProtectedPath(sortedDirs[si] + '/')) continue;
const dirAbs = path.join(repoRoot, sortedDirs[si]);
try {
const entries = fs.readdirSync(dirAbs);
if (entries.length === 0) {
fs.rmdirSync(dirAbs);
removedDirs.push(sortedDirs[si]);
}
} catch (e) {
console.warn('[evolver] rollbackNewUntrackedFiles rmdir failed:', sortedDirs[si], e && e.message || e);
}
}
if (removedDirs.length > 0) {
console.log('[Rollback] Removed ' + removedDirs.length + ' empty director' + (removedDirs.length === 1 ? 'y' : 'ies') + ': ' + removedDirs.slice(0, 5).join(', '));
}
return { deleted, skipped, removedDirs };
}
module.exports = {
runCmd,
tryRunCmd,
normalizeRelPath,
countFileLines,
gitListChangedFiles,
gitListUntrackedFiles,
captureDiffSnapshot,
DIFF_SNAPSHOT_MAX_CHARS,
isGitRepo,
isCriticalProtectedPath,
CRITICAL_PROTECTED_PREFIXES,
CRITICAL_PROTECTED_FILES,
rollbackTracked,
rollbackNewUntrackedFiles,
};

206
src/gep/hubReview.js Normal file
View File

@@ -0,0 +1,206 @@
// Hub Asset Review: submit usage-verified reviews after solidify.
//
// When an evolution cycle reuses a Hub asset (source_type = 'reused' or 'reference'),
// we submit a review to POST /a2a/assets/:assetId/reviews after solidify completes.
// Rating is derived from outcome: success -> 4-5, failure -> 1-2.
// Reviews are non-blocking; errors never affect the solidify result.
// Duplicate prevention: a local file tracks reviewed assetIds to avoid re-reviewing.
const fs = require('fs');
const path = require('path');
const { getNodeId, getHubNodeSecret } = require('./a2aProtocol');
const { logAssetCall } = require('./assetCallLog');
const REVIEW_HISTORY_FILE = path.join(
require('./paths').getEvolutionDir(),
'hub_review_history.json'
);
const REVIEW_HISTORY_MAX_ENTRIES = 500;
function _loadReviewHistory() {
try {
if (!fs.existsSync(REVIEW_HISTORY_FILE)) return {};
const raw = fs.readFileSync(REVIEW_HISTORY_FILE, 'utf8');
if (!raw.trim()) return {};
return JSON.parse(raw);
} catch {
return {};
}
}
function _saveReviewHistory(history) {
try {
const dir = path.dirname(REVIEW_HISTORY_FILE);
if (!fs.existsSync(dir)) fs.mkdirSync(dir, { recursive: true });
const keys = Object.keys(history);
if (keys.length > REVIEW_HISTORY_MAX_ENTRIES) {
const sorted = keys
.map(k => ({ k, t: history[k].at || 0 }))
.sort((a, b) => a.t - b.t);
const toRemove = sorted.slice(0, keys.length - REVIEW_HISTORY_MAX_ENTRIES);
for (const entry of toRemove) delete history[entry.k];
}
const tmp = REVIEW_HISTORY_FILE + '.tmp';
fs.writeFileSync(tmp, JSON.stringify(history, null, 2) + '\n', 'utf8');
fs.renameSync(tmp, REVIEW_HISTORY_FILE);
} catch {}
}
function _alreadyReviewed(assetId) {
const history = _loadReviewHistory();
return !!history[assetId];
}
function _markReviewed(assetId, rating, success) {
const history = _loadReviewHistory();
history[assetId] = { at: Date.now(), rating, success };
_saveReviewHistory(history);
}
function _deriveRating(outcome, constraintCheck) {
if (outcome && outcome.status === 'success') {
const score = Number(outcome.score) || 0;
return score >= 0.85 ? 5 : 4;
}
const hasConstraintViolation =
constraintCheck &&
Array.isArray(constraintCheck.violations) &&
constraintCheck.violations.length > 0;
return hasConstraintViolation ? 1 : 2;
}
function _buildReviewContent({ outcome, gene, signals, blast, sourceType }) {
const parts = [];
const status = outcome && outcome.status ? outcome.status : 'unknown';
const score = outcome && Number.isFinite(Number(outcome.score))
? Number(outcome.score).toFixed(2) : '?';
parts.push('Outcome: ' + status + ' (score: ' + score + ')');
parts.push('Reuse mode: ' + (sourceType || 'unknown'));
if (gene && gene.id) {
parts.push('Gene: ' + gene.id + ' (' + (gene.category || 'unknown') + ')');
}
if (Array.isArray(signals) && signals.length > 0) {
parts.push('Signals: ' + signals.slice(0, 6).join(', '));
}
if (blast) {
parts.push('Blast radius: ' + (blast.files || 0) + ' file(s), ' + (blast.lines || 0) + ' line(s)');
}
if (status === 'success') {
parts.push('The fetched asset was successfully applied and solidified.');
} else {
parts.push('The fetched asset did not lead to a successful evolution cycle.');
}
return parts.join('\n').slice(0, 2000);
}
function getHubUrl() {
return (process.env.A2A_HUB_URL || '').replace(/\/+$/, '');
}
async function submitHubReview({
reusedAssetId,
sourceType,
outcome,
gene,
signals,
blast,
constraintCheck,
runId,
}) {
var hubUrl = getHubUrl();
if (!hubUrl) return { submitted: false, reason: 'no_hub_url' };
if (!reusedAssetId || typeof reusedAssetId !== 'string') {
return { submitted: false, reason: 'no_reused_asset_id' };
}
if (sourceType !== 'reused' && sourceType !== 'reference') {
return { submitted: false, reason: 'not_hub_sourced' };
}
if (_alreadyReviewed(reusedAssetId)) {
return { submitted: false, reason: 'already_reviewed' };
}
var rating = _deriveRating(outcome, constraintCheck);
var content = _buildReviewContent({ outcome, gene, signals, blast, sourceType });
var senderId = getNodeId();
var endpoint = hubUrl + '/a2a/assets/' + encodeURIComponent(reusedAssetId) + '/reviews';
var headers = { 'Content-Type': 'application/json', 'Accept': 'application/json' };
var secret = getHubNodeSecret();
if (secret) {
headers['Authorization'] = 'Bearer ' + secret;
}
var body = JSON.stringify({
sender_id: senderId,
rating: rating,
content: content,
});
try {
var controller = new AbortController();
var timer = setTimeout(function () { controller.abort('hub_review_timeout'); }, 10000);
var res = await fetch(endpoint, {
method: 'POST',
headers: headers,
body: body,
signal: controller.signal,
});
clearTimeout(timer);
if (res.ok) {
_markReviewed(reusedAssetId, rating, true);
console.log(
'[HubReview] Submitted review for ' + reusedAssetId + ': rating=' + rating + ', outcome=' + (outcome && outcome.status)
);
logAssetCall({
run_id: runId || null,
action: 'hub_review_submitted',
asset_id: reusedAssetId,
extra: { rating: rating, outcome_status: outcome && outcome.status },
});
return { submitted: true, rating: rating, asset_id: reusedAssetId };
}
var errData = await res.json().catch(function () { return {}; });
var errCode = errData.error || errData.code || ('http_' + res.status);
if (errCode === 'already_reviewed') {
_markReviewed(reusedAssetId, rating, false);
}
console.log('[HubReview] Hub rejected review for ' + reusedAssetId + ': ' + errCode);
logAssetCall({
run_id: runId || null,
action: 'hub_review_rejected',
asset_id: reusedAssetId,
extra: { rating: rating, error: errCode },
});
return { submitted: false, reason: errCode, rating: rating };
} catch (err) {
var reason = err.name === 'AbortError' ? 'timeout' : 'fetch_error';
console.log('[HubReview] Failed (non-fatal, ' + reason + '): ' + err.message);
logAssetCall({
run_id: runId || null,
action: 'hub_review_failed',
asset_id: reusedAssetId,
extra: { rating: rating, reason: reason, error: err.message },
});
return { submitted: false, reason: reason, error: err.message };
}
}
module.exports = {
submitHubReview,
};

407
src/gep/hubSearch.js Normal file
View File

@@ -0,0 +1,407 @@
// Hub Search-First Evolution: query evomap-hub for reusable solutions before local solve.
//
// Flow: extractSignals() -> hubSearch(signals) -> if hit: reuse; if miss: normal evolve
// Two modes: direct (skip local reasoning) | reference (inject into prompt as strong hint)
//
// Two-phase search-then-fetch to minimize credit cost:
// Phase 1: POST /a2a/fetch with signals + search_only=true (free, metadata only)
// Phase 2: POST /a2a/fetch with asset_ids=[selected] (pays for 1 asset only)
//
// Caching layers:
// 1. Search cache: signal fingerprint -> Phase 1 results (avoids repeat searches)
// 2. Payload cache: asset_id -> full payload (avoids repeat Phase 2 fetches)
const { getNodeId, buildFetch, getHubNodeSecret } = require('./a2aProtocol');
const { logAssetCall } = require('./assetCallLog');
const DEFAULT_MIN_REUSE_SCORE = 0.72;
const DEFAULT_REUSE_MODE = 'reference'; // 'direct' | 'reference'
const MAX_STREAK_CAP = 5;
const SEARCH_CACHE_TTL_MS = 5 * 60 * 1000;
const SEARCH_CACHE_MAX = 200;
const PAYLOAD_CACHE_MAX = 100;
const MIN_PHASE2_MS = 500;
const SEMANTIC_TIMEOUT_MS = 3000;
const SEMANTIC_SIMILARITY_BONUS = 0.3;
// --- In-memory caches (per-process lifetime, bounded) ---
const _searchCache = new Map(); // cacheKey -> { ts, value: results[] }
const _payloadCache = new Map(); // asset_id -> full payload object
function _cacheKey(signals) {
return signals.slice().sort().join('|');
}
function _getSearchCache(key) {
const entry = _searchCache.get(key);
if (!entry) return null;
if (Date.now() - entry.ts > SEARCH_CACHE_TTL_MS) {
_searchCache.delete(key);
return null;
}
return entry.value;
}
function _setSearchCache(key, value) {
if (_searchCache.size >= SEARCH_CACHE_MAX) {
const oldest = _searchCache.keys().next().value;
_searchCache.delete(oldest);
}
_searchCache.set(key, { ts: Date.now(), value });
}
function _getPayloadCache(assetId) {
return _payloadCache.get(assetId) || null;
}
function _setPayloadCache(assetId, payload) {
if (_payloadCache.size >= PAYLOAD_CACHE_MAX) {
const oldest = _payloadCache.keys().next().value;
_payloadCache.delete(oldest);
}
_payloadCache.set(assetId, payload);
}
function clearCaches() {
_searchCache.clear();
_payloadCache.clear();
}
// --- Config helpers ---
function getHubUrl() {
return (process.env.A2A_HUB_URL || '').replace(/\/+$/, '');
}
function getReuseMode() {
const m = String(process.env.EVOLVER_REUSE_MODE || DEFAULT_REUSE_MODE).toLowerCase();
return m === 'direct' ? 'direct' : 'reference';
}
function getMinReuseScore() {
const n = Number(process.env.EVOLVER_MIN_REUSE_SCORE);
return Number.isFinite(n) && n > 0 ? n : DEFAULT_MIN_REUSE_SCORE;
}
function _buildHeaders() {
const headers = { 'Content-Type': 'application/json', 'Accept': 'application/json' };
const secret = getHubNodeSecret();
if (secret) {
headers['Authorization'] = 'Bearer ' + secret;
} else {
const token = process.env.A2A_HUB_TOKEN;
if (token) headers['Authorization'] = `Bearer ${token}`;
}
return headers;
}
function isSemanticEnabled() {
var v = process.env.HUBSEARCH_SEMANTIC;
if (v === 'false' || v === '0') return false;
return true;
}
function buildSemanticQuery(signals) {
return signals
.filter(function (s) { return !s.startsWith('errsig:') && !s.startsWith('errsig_norm:'); })
.map(function (s) {
var colonIdx = s.indexOf(':');
return colonIdx > 0 && colonIdx < 30 ? s.slice(colonIdx + 1).trim() : s;
})
.filter(Boolean)
.slice(0, 12)
.join(' ');
}
async function fetchSemanticResults(hubUrl, headers, signalList, timeoutMs) {
var query = buildSemanticQuery(signalList);
if (!query || query.length < 3) return [];
var url = hubUrl + '/a2a/assets/semantic-search?q=' + encodeURIComponent(query) + '&type=Gene&limit=10';
var controller = new AbortController();
var timer = setTimeout(function () { controller.abort(); }, timeoutMs);
try {
var res = await fetch(url, { method: 'GET', headers: headers, signal: controller.signal });
clearTimeout(timer);
if (!res.ok) return [];
var data = await res.json();
var assets = Array.isArray(data && data.assets) ? data.assets : [];
return assets.map(function (a) {
a._semantic_similarity = Number(a.similarity) || 0;
return a;
});
} catch (e) {
clearTimeout(timer);
return [];
}
}
function mergeResults(fetchResults, semanticResults) {
var seen = {};
var merged = [];
for (var i = 0; i < fetchResults.length; i++) {
var a = fetchResults[i];
var id = a.asset_id || a.assetId || '';
if (id) seen[id] = true;
merged.push(a);
}
for (var j = 0; j < semanticResults.length; j++) {
var b = semanticResults[j];
var bid = b.asset_id || b.assetId || '';
if (bid && seen[bid]) {
var existing = merged.find(function (m) { return (m.asset_id || m.assetId) === bid; });
if (existing) existing._semantic_similarity = b._semantic_similarity || 0;
continue;
}
if (bid) seen[bid] = true;
merged.push(b);
}
return merged;
}
/**
* Score a hub asset for local reuse quality.
* rank = confidence * min(max(success_streak, 1), MAX_STREAK_CAP) * (reputation / 100)
* Streak is capped to prevent unbounded score inflation.
* When semantic similarity is available, a bonus is added.
*/
function scoreHubResult(asset) {
const confidence = Number(asset.confidence) || 0;
const streak = Math.min(Math.max(Number(asset.success_streak) || 0, 1), MAX_STREAK_CAP);
const repRaw = Number(asset.reputation_score);
const reputation = Number.isFinite(repRaw) ? repRaw : 50;
var base = confidence * streak * (reputation / 100);
var sim = Number(asset._semantic_similarity) || 0;
if (sim > 0) base += sim * SEMANTIC_SIMILARITY_BONUS;
return base;
}
/**
* Pick the best matching asset above the threshold.
* Returns { match, score, mode } or null if nothing qualifies.
*/
function pickBestMatch(results, threshold) {
if (!Array.isArray(results) || results.length === 0) return null;
let best = null;
let bestScore = 0;
for (const asset of results) {
if (asset.status && asset.status !== 'promoted') continue;
const s = scoreHubResult(asset);
if (s > bestScore) {
bestScore = s;
best = asset;
}
}
if (!best || bestScore < threshold) return null;
return {
match: best,
score: Math.round(bestScore * 1000) / 1000,
mode: getReuseMode(),
};
}
/**
* Search the hub for reusable assets matching the given signals.
*
* Two-phase flow to minimize credit cost:
* Phase 1: search_only=true -> get candidate metadata (free, no credit cost)
* Phase 2: asset_ids=[best_match] -> fetch full payload for the selected asset only
*
* Caching:
* - Phase 1 results are cached by signal fingerprint for 5 minutes.
* - Phase 2 payloads are cached by asset_id indefinitely (bounded LRU).
* - Both caches reduce Hub load and eliminate redundant network round-trips.
*
* Timeout: a single deadline spans both phases; Phase 2 is skipped if insufficient
* time remains (< 500ms).
*
* Returns { hit: true, match, score, mode } or { hit: false }.
*/
async function hubSearch(signals, opts) {
const hubUrl = getHubUrl();
if (!hubUrl) return { hit: false, reason: 'no_hub_url' };
const signalList = Array.isArray(signals)
? signals.map(s => typeof s === 'string' ? s.trim() : '').filter(Boolean)
: [];
if (signalList.length === 0) return { hit: false, reason: 'no_signals' };
const threshold = (opts && Number.isFinite(opts.threshold)) ? opts.threshold : getMinReuseScore();
const timeoutMs = (opts && Number.isFinite(opts.timeoutMs)) ? opts.timeoutMs : 8000;
const deadline = Date.now() + timeoutMs;
const runId = (opts && opts.run_id) || null;
try {
const endpoint = hubUrl + '/a2a/fetch';
const headers = _buildHeaders();
const cacheKey = _cacheKey(signalList);
// --- Phase 1: search_only (free) + optional parallel semantic search ---
let results = _getSearchCache(cacheKey);
let cacheHit = !!results;
var semanticUsed = false;
if (!results) {
var fetchPromise = (async function () {
const searchMsg = buildFetch({ signals: signalList, searchOnly: true });
const controller = new AbortController();
const timer = setTimeout(() => controller.abort(), deadline - Date.now());
try {
const res = await fetch(endpoint, {
method: 'POST',
headers,
body: JSON.stringify(searchMsg),
signal: controller.signal,
});
clearTimeout(timer);
if (!res.ok) return { ok: false, status: res.status, results: [] };
const data = await res.json();
return {
ok: true,
results: (data && data.payload && Array.isArray(data.payload.results)) ? data.payload.results : [],
};
} catch (e) {
clearTimeout(timer);
return { ok: false, error: e.message, results: [] };
}
})();
var semanticPromise = isSemanticEnabled()
? fetchSemanticResults(hubUrl, headers, signalList, SEMANTIC_TIMEOUT_MS)
: Promise.resolve([]);
var settled = await Promise.allSettled([fetchPromise, semanticPromise]);
var fetchResult = settled[0].status === 'fulfilled' ? settled[0].value : { ok: false, results: [] };
var semanticResults = settled[1].status === 'fulfilled' ? settled[1].value : [];
if (!fetchResult.ok && semanticResults.length === 0) {
logAssetCall({
run_id: runId, action: 'hub_search_miss', signals: signalList,
reason: fetchResult.status ? `hub_http_${fetchResult.status}` : 'fetch_error',
via: 'search_then_fetch',
});
return { hit: false, reason: fetchResult.status ? `hub_http_${fetchResult.status}` : 'fetch_error' };
}
results = mergeResults(fetchResult.results || [], semanticResults);
if (semanticResults.length > 0) semanticUsed = true;
_setSearchCache(cacheKey, results);
}
if (results.length === 0) {
logAssetCall({
run_id: runId, action: 'hub_search_miss', signals: signalList,
reason: 'no_results', via: 'search_then_fetch',
});
return { hit: false, reason: 'no_results' };
}
const pick = pickBestMatch(results, threshold);
if (!pick) {
logAssetCall({
run_id: runId, action: 'hub_search_miss', signals: signalList,
reason: 'below_threshold',
extra: { candidates: results.length, threshold },
via: 'search_then_fetch',
});
return { hit: false, reason: 'below_threshold', candidates: results.length };
}
// --- Phase 2: fetch full payload (paid, but free if already purchased) ---
const selectedAssetId = pick.match.asset_id;
if (selectedAssetId) {
const cachedPayload = _getPayloadCache(selectedAssetId);
if (cachedPayload) {
pick.match = { ...pick.match, ...cachedPayload };
} else {
const remaining = deadline - Date.now();
if (remaining > MIN_PHASE2_MS) {
try {
const fetchMsg = buildFetch({ assetIds: [selectedAssetId] });
const controller2 = new AbortController();
const timer2 = setTimeout(() => controller2.abort(), remaining);
const res2 = await fetch(endpoint, {
method: 'POST',
headers,
body: JSON.stringify(fetchMsg),
signal: controller2.signal,
});
clearTimeout(timer2);
if (res2.ok) {
const data2 = await res2.json();
const fullResults = (data2 && data2.payload && Array.isArray(data2.payload.results))
? data2.payload.results
: [];
if (fullResults.length > 0) {
_setPayloadCache(selectedAssetId, fullResults[0]);
pick.match = { ...pick.match, ...fullResults[0] };
}
}
} catch (fetchErr) {
console.log(`[HubSearch] Phase 2 fetch failed (non-fatal): ${fetchErr.message}`);
}
} else {
console.log(`[HubSearch] Phase 2 skipped: ${remaining}ms remaining < ${MIN_PHASE2_MS}ms threshold`);
}
}
}
var viaLabel = cacheHit ? 'search_cached' : (semanticUsed ? 'search+semantic' : 'search_then_fetch');
console.log(`[HubSearch] Hit via ${viaLabel}: ${pick.match.asset_id || 'unknown'} (score=${pick.score}, mode=${pick.mode}${pick.match._semantic_similarity ? ', sim=' + pick.match._semantic_similarity : ''})`);
logAssetCall({
run_id: runId,
action: 'hub_search_hit',
asset_id: pick.match.asset_id || null,
asset_type: pick.match.asset_type || pick.match.type || null,
source_node_id: pick.match.source_node_id || null,
chain_id: pick.match.chain_id || null,
score: pick.score,
mode: pick.mode,
signals: signalList,
via: viaLabel,
});
return {
hit: true,
match: pick.match,
score: pick.score,
mode: pick.mode,
asset_id: pick.match.asset_id || null,
source_node_id: pick.match.source_node_id || null,
chain_id: pick.match.chain_id || null,
};
} catch (err) {
const reason = err.name === 'AbortError' ? 'timeout' : 'fetch_error';
console.log(`[HubSearch] Failed (non-fatal, ${reason}): ${err.message}`);
logAssetCall({
run_id: runId,
action: 'hub_search_miss',
signals: signalList,
reason,
extra: { error: err.message },
via: 'search_then_fetch',
});
return { hit: false, reason, error: err.message };
}
}
module.exports = {
hubSearch,
scoreHubResult,
pickBestMatch,
getReuseMode,
getMinReuseScore,
getHubUrl,
clearCaches,
};

157
src/gep/idleScheduler.js Normal file
View File

@@ -0,0 +1,157 @@
'use strict';
// OMLS-inspired idle scheduler: detects user inactivity windows and recommends
// evolution intensity levels. Monitors system idle time on supported platforms.
// When idle, the evolver can run more aggressive operations (distillation,
// reflection); when busy, it only collects signals.
const { execSync } = require('child_process');
const path = require('path');
const fs = require('fs');
const { getEvolutionDir } = require('./paths');
const IDLE_THRESHOLD_SECONDS = parseInt(process.env.OMLS_IDLE_THRESHOLD || '300', 10) || 300;
const DEEP_IDLE_THRESHOLD_SECONDS = parseInt(process.env.OMLS_DEEP_IDLE_THRESHOLD || '1800', 10) || 1800;
function getSystemIdleSeconds() {
const platform = process.platform;
try {
if (platform === 'win32') {
const psCode = [
'Add-Type -TypeDefinition @"',
'using System;',
'using System.Runtime.InteropServices;',
'public struct LASTINPUTINFO { public uint cbSize; public uint dwTime; }',
'public class IdleTime {',
' [DllImport("user32.dll")] static extern bool GetLastInputInfo(ref LASTINPUTINFO plii);',
' public static uint Get() {',
' LASTINPUTINFO lii = new LASTINPUTINFO();',
' lii.cbSize = (uint)Marshal.SizeOf(lii);',
' GetLastInputInfo(ref lii);',
' return ((uint)Environment.TickCount - lii.dwTime) / 1000;',
' }',
'}',
'"@',
'[IdleTime]::Get()',
].join('\n');
const tmpPs = path.join(require('os').tmpdir(), 'evolver_idle_check.ps1');
require('fs').writeFileSync(tmpPs, psCode, 'utf8');
const result = execSync('powershell -NoProfile -ExecutionPolicy Bypass -File "' + tmpPs + '"', { timeout: 10000, encoding: 'utf8' }).trim();
try { require('fs').unlinkSync(tmpPs); } catch (e) {}
const seconds = parseInt(result, 10);
return Number.isFinite(seconds) ? seconds : -1;
} else if (platform === 'darwin') {
const result = execSync('ioreg -c IOHIDSystem | grep HIDIdleTime', { timeout: 5000, encoding: 'utf8' });
const match = result.match(/(\d+)/);
if (match) {
return Math.floor(parseInt(match[1], 10) / 1000000000);
}
} else if (platform === 'linux') {
try {
const result = execSync('xprintidle 2>/dev/null || echo -1', { timeout: 5000, encoding: 'utf8' }).trim();
const ms = parseInt(result, 10);
if (Number.isFinite(ms) && ms >= 0) return Math.floor(ms / 1000);
} catch (e) {}
}
} catch (e) {}
return -1;
}
// Intensity levels:
// 'signal_only' - only collect signals, minimal CPU
// 'normal' - standard evolution cycle
// 'aggressive' - run distillation, reflection, deeper analysis
// 'deep' - extended operations (future: RL, fine-tuning triggers)
function determineIntensity(idleSeconds) {
if (idleSeconds < 0) return 'normal';
if (idleSeconds >= DEEP_IDLE_THRESHOLD_SECONDS) return 'deep';
if (idleSeconds >= IDLE_THRESHOLD_SECONDS) return 'aggressive';
return 'normal';
}
function readScheduleState() {
const statePath = path.join(getEvolutionDir(), 'idle_schedule_state.json');
try {
if (!fs.existsSync(statePath)) return {};
const raw = fs.readFileSync(statePath, 'utf8');
return JSON.parse(raw);
} catch (e) {
return {};
}
}
function writeScheduleState(state) {
const dir = getEvolutionDir();
const statePath = path.join(dir, 'idle_schedule_state.json');
try {
if (!fs.existsSync(dir)) fs.mkdirSync(dir, { recursive: true });
const tmp = statePath + '.tmp';
fs.writeFileSync(tmp, JSON.stringify(state, null, 2) + '\n', 'utf8');
fs.renameSync(tmp, statePath);
} catch (e) {}
}
// Returns scheduling recommendation with sleep multiplier and action hints.
function getScheduleRecommendation() {
const enabled = String(process.env.OMLS_ENABLED || 'true').toLowerCase() !== 'false';
if (!enabled) {
return {
enabled: false,
idle_seconds: -1,
intensity: 'normal',
sleep_multiplier: 1,
should_distill: false,
should_reflect: false,
should_deep_evolve: false,
};
}
const idleSeconds = getSystemIdleSeconds();
const intensity = determineIntensity(idleSeconds);
const state = readScheduleState();
const now = Date.now();
let sleepMultiplier = 1;
let shouldDistill = false;
let shouldReflect = false;
let shouldDeepEvolve = false;
if (intensity === 'aggressive') {
sleepMultiplier = 0.5;
shouldDistill = true;
shouldReflect = true;
} else if (intensity === 'deep') {
sleepMultiplier = 0.25;
shouldDistill = true;
shouldReflect = true;
shouldDeepEvolve = true;
} else if (intensity === 'signal_only') {
sleepMultiplier = 3;
}
state.last_check = new Date().toISOString();
state.last_idle_seconds = idleSeconds;
state.last_intensity = intensity;
writeScheduleState(state);
return {
enabled: true,
idle_seconds: idleSeconds,
intensity: intensity,
sleep_multiplier: sleepMultiplier,
should_distill: shouldDistill,
should_reflect: shouldReflect,
should_deep_evolve: shouldDeepEvolve,
};
}
module.exports = {
getSystemIdleSeconds: getSystemIdleSeconds,
determineIntensity: determineIntensity,
getScheduleRecommendation: getScheduleRecommendation,
readScheduleState: readScheduleState,
writeScheduleState: writeScheduleState,
IDLE_THRESHOLD_SECONDS: IDLE_THRESHOLD_SECONDS,
DEEP_IDLE_THRESHOLD_SECONDS: DEEP_IDLE_THRESHOLD_SECONDS,
};

262
src/gep/issueReporter.js Normal file
View File

@@ -0,0 +1,262 @@
// Automatic GitHub issue reporter for recurring evolver failures.
// When the evolver hits persistent errors (failure streaks, recurring errors),
// this module files a GitHub issue with sanitized logs and environment info.
const fs = require('fs');
const path = require('path');
const crypto = require('crypto');
const { getEvolutionDir } = require('./paths');
const { captureEnvFingerprint } = require('./envFingerprint');
const { redactString } = require('./sanitize');
const { getNodeId } = require('./a2aProtocol');
const STATE_FILE_NAME = 'issue_reporter_state.json';
const DEFAULT_REPO = 'autogame-17/capability-evolver';
const DEFAULT_COOLDOWN_MS = 24 * 60 * 60 * 1000;
const DEFAULT_MIN_STREAK = 5;
const MAX_LOG_CHARS = 2000;
const MAX_EVENTS = 5;
function getConfig() {
const enabled = String(process.env.EVOLVER_AUTO_ISSUE || 'true').toLowerCase();
if (enabled === 'false' || enabled === '0') return null;
return {
repo: process.env.EVOLVER_ISSUE_REPO || DEFAULT_REPO,
cooldownMs: Number(process.env.EVOLVER_ISSUE_COOLDOWN_MS) || DEFAULT_COOLDOWN_MS,
minStreak: Number(process.env.EVOLVER_ISSUE_MIN_STREAK) || DEFAULT_MIN_STREAK,
};
}
function getGithubToken() {
return process.env.GITHUB_TOKEN || process.env.GH_TOKEN || process.env.GITHUB_PAT || '';
}
function getStatePath() {
return path.join(getEvolutionDir(), STATE_FILE_NAME);
}
function readState() {
try {
const p = getStatePath();
if (fs.existsSync(p)) {
return JSON.parse(fs.readFileSync(p, 'utf8'));
}
} catch (_) {}
return { lastReportedAt: null, recentIssueKeys: [] };
}
function writeState(state) {
try {
const dir = getEvolutionDir();
if (!fs.existsSync(dir)) fs.mkdirSync(dir, { recursive: true });
fs.writeFileSync(getStatePath(), JSON.stringify(state, null, 2) + '\n');
} catch (_) {}
}
function truncateNodeId(nodeId) {
if (!nodeId || typeof nodeId !== 'string') return 'unknown';
if (nodeId.length <= 10) return nodeId;
return nodeId.slice(0, 10) + '...';
}
function computeErrorKey(signals) {
const relevant = signals
.filter(function (s) {
return s.startsWith('recurring_errsig') ||
s.startsWith('ban_gene:') ||
s === 'recurring_error' ||
s === 'failure_loop_detected' ||
s === 'high_failure_ratio';
})
.sort()
.join('|');
return crypto.createHash('sha256').update(relevant || 'unknown').digest('hex').slice(0, 16);
}
function extractErrorSignature(signals) {
const errSig = signals.find(function (s) { return s.startsWith('recurring_errsig'); });
if (errSig) {
return errSig.replace(/^recurring_errsig\(\d+x\):/, '').trim().slice(0, 200);
}
const banned = signals.find(function (s) { return s.startsWith('ban_gene:'); });
if (banned) return 'Repeated failures with gene: ' + banned.replace('ban_gene:', '');
return 'Persistent evolution failure';
}
function extractStreakCount(signals) {
for (let i = 0; i < signals.length; i++) {
if (signals[i].startsWith('consecutive_failure_streak_')) {
const n = parseInt(signals[i].replace('consecutive_failure_streak_', ''), 10);
if (Number.isFinite(n)) return n;
}
}
return 0;
}
function formatRecentEvents(events) {
if (!Array.isArray(events) || events.length === 0) return '_No recent events available._';
const failed = events.filter(function (e) { return e && e.outcome && e.outcome.status === 'failed'; });
const rows = failed.slice(-MAX_EVENTS).map(function (e, idx) {
const intent = e.intent || '-';
const gene = (Array.isArray(e.genes_used) && e.genes_used[0]) || '-';
const outcome = (e.outcome && e.outcome.status) || '-';
let reason = (e.outcome && e.outcome.reason) || '';
if (reason.length > 80) reason = reason.slice(0, 80) + '...';
reason = redactString(reason);
return '| ' + (idx + 1) + ' | ' + intent + ' | ' + gene + ' | ' + outcome + ' | ' + reason + ' |';
});
if (rows.length === 0) return '_No failed events in recent history._';
return '| # | Intent | Gene | Outcome | Reason |\n|---|--------|------|---------|--------|\n' + rows.join('\n');
}
function buildIssueBody(opts) {
const fp = opts.envFingerprint || captureEnvFingerprint();
const signals = opts.signals || [];
const recentEvents = opts.recentEvents || [];
const sessionLog = opts.sessionLog || '';
const streakCount = extractStreakCount(signals);
const errorSig = extractErrorSignature(signals);
const nodeId = truncateNodeId(getNodeId());
const failureSignals = signals.filter(function (s) {
return s.startsWith('recurring_') ||
s.startsWith('consecutive_failure') ||
s.startsWith('failure_loop') ||
s.startsWith('high_failure') ||
s.startsWith('ban_gene:') ||
s === 'force_innovation_after_repair_loop';
}).join(', ');
const sanitizedLog = redactString(
typeof sessionLog === 'string' ? sessionLog.slice(-MAX_LOG_CHARS) : ''
);
const eventsTable = formatRecentEvents(recentEvents);
const reportId = crypto.createHash('sha256')
.update(nodeId + '|' + Date.now() + '|' + errorSig)
.digest('hex').slice(0, 12);
const body = [
'## Environment',
'- **Evolver Version:** ' + (fp.evolver_version || 'unknown'),
'- **Node.js:** ' + (fp.node_version || process.version),
'- **Platform:** ' + (fp.platform || process.platform) + ' ' + (fp.arch || process.arch),
'- **Container:** ' + (fp.container ? 'yes' : 'no'),
'',
'## Failure Summary',
'- **Consecutive failures:** ' + (streakCount || 'N/A'),
'- **Failure signals:** ' + (failureSignals || 'none'),
'',
'## Error Signature',
'```',
redactString(errorSig),
'```',
'',
'## Recent Evolution Events (sanitized)',
eventsTable,
'',
'## Session Log Excerpt (sanitized)',
'```',
sanitizedLog || '_No session log available._',
'```',
'',
'---',
'_This issue was automatically created by evolver v' + (fp.evolver_version || 'unknown') + '._',
'_Device: ' + nodeId + ' | Report ID: ' + reportId + '_',
];
return body.join('\n');
}
function shouldReport(signals, config) {
if (!config) return false;
const hasFailureLoop = signals.includes('failure_loop_detected');
const hasRecurringAndHigh = signals.includes('recurring_error') && signals.includes('high_failure_ratio');
if (!hasFailureLoop && !hasRecurringAndHigh) return false;
const streakCount = extractStreakCount(signals);
if (streakCount > 0 && streakCount < config.minStreak) return false;
const state = readState();
const errorKey = computeErrorKey(signals);
if (state.lastReportedAt) {
const elapsed = Date.now() - new Date(state.lastReportedAt).getTime();
if (elapsed < config.cooldownMs) {
const recentKeys = Array.isArray(state.recentIssueKeys) ? state.recentIssueKeys : [];
if (recentKeys.includes(errorKey)) {
return false;
}
}
}
return true;
}
async function createGithubIssue(repo, title, body, token) {
const url = 'https://api.github.com/repos/' + repo + '/issues';
const response = await fetch(url, {
method: 'POST',
headers: {
'Authorization': 'Bearer ' + token,
'Accept': 'application/vnd.github+json',
'Content-Type': 'application/json',
'X-GitHub-Api-Version': '2022-11-28',
},
body: JSON.stringify({ title: title, body: body }),
signal: AbortSignal.timeout(15000),
});
if (!response.ok) {
let errText = '';
try { errText = await response.text(); } catch (_) {}
throw new Error('GitHub API ' + response.status + ': ' + errText.slice(0, 200));
}
const data = await response.json();
return { number: data.number, url: data.html_url };
}
async function maybeReportIssue(opts) {
const config = getConfig();
if (!config) return;
const signals = opts.signals || [];
if (!shouldReport(signals, config)) return;
const token = getGithubToken();
if (!token) {
console.log('[IssueReporter] No GitHub token available. Skipping auto-report.');
return;
}
const errorSig = extractErrorSignature(signals);
const titleSig = errorSig.slice(0, 80);
const title = '[Auto] Recurring failure: ' + titleSig;
const body = buildIssueBody(opts);
try {
const result = await createGithubIssue(config.repo, title, body, token);
console.log('[IssueReporter] Created GitHub issue #' + result.number + ': ' + result.url);
const state = readState();
const errorKey = computeErrorKey(signals);
let recentKeys = Array.isArray(state.recentIssueKeys) ? state.recentIssueKeys : [];
recentKeys.push(errorKey);
if (recentKeys.length > 20) recentKeys = recentKeys.slice(-20);
writeState({
lastReportedAt: new Date().toISOString(),
recentIssueKeys: recentKeys,
lastIssueUrl: result.url,
lastIssueNumber: result.number,
});
} catch (e) {
console.log('[IssueReporter] Failed to create issue (non-fatal): ' + (e && e.message ? e.message : String(e)));
}
}
module.exports = { maybeReportIssue, buildIssueBody, shouldReport };

View File

@@ -0,0 +1,89 @@
// Structured learning signal expansion: raw signals -> categorized tags for gene selection and evolution feedback.
function unique(items) {
return Array.from(new Set((Array.isArray(items) ? items : []).filter(Boolean).map(function (x) {
return String(x).trim();
}).filter(Boolean)));
}
function add(tags, value) {
if (!value) return;
tags.push(String(value).trim());
}
function expandSignals(signals, extraText) {
const raw = Array.isArray(signals) ? signals.map(function (s) { return String(s); }) : [];
const tags = [];
for (let i = 0; i < raw.length; i++) {
const signal = raw[i];
add(tags, signal);
const base = signal.split(':')[0];
if (base && base !== signal) add(tags, base);
}
const text = (raw.join(' ') + ' ' + String(extraText || '')).toLowerCase();
if (/(error|exception|failed|unstable|log_error|runtime|429)/.test(text)) {
add(tags, 'problem:reliability');
add(tags, 'action:repair');
}
if (/(protocol|prompt|audit|gep|schema|drift)/.test(text)) {
add(tags, 'problem:protocol');
add(tags, 'action:optimize');
add(tags, 'area:prompt');
}
if (/(perf|performance|bottleneck|latency|slow|throughput)/.test(text)) {
add(tags, 'problem:performance');
add(tags, 'action:optimize');
}
if (/(feature|capability_gap|user_feature_request|external_opportunity|stagnation recommendation)/.test(text)) {
add(tags, 'problem:capability');
add(tags, 'action:innovate');
}
if (/(stagnation|plateau|steady_state|saturation|empty_cycle_loop|loop_detected|recurring)/.test(text)) {
add(tags, 'problem:stagnation');
add(tags, 'action:innovate');
}
if (/(task|worker|heartbeat|hub|commitment|assignment|orchestration)/.test(text)) {
add(tags, 'area:orchestration');
}
if (/(memory|narrative|reflection)/.test(text)) {
add(tags, 'area:memory');
}
if (/(skill|dashboard)/.test(text)) {
add(tags, 'area:skills');
}
if (/(validation|canary|rollback|constraint|blast radius|destructive)/.test(text)) {
add(tags, 'risk:validation');
}
return unique(tags);
}
function geneTags(gene) {
if (!gene || typeof gene !== 'object') return [];
let inputs = [];
if (gene.category) inputs.push('action:' + String(gene.category).toLowerCase());
if (Array.isArray(gene.signals_match)) inputs = inputs.concat(gene.signals_match);
if (typeof gene.id === 'string') inputs.push(gene.id);
if (typeof gene.summary === 'string') inputs.push(gene.summary);
return expandSignals(inputs, '');
}
function scoreTagOverlap(gene, signals) {
const signalTags = expandSignals(signals, '');
const geneTagList = geneTags(gene);
if (signalTags.length === 0 || geneTagList.length === 0) return 0;
const signalSet = new Set(signalTags);
let hits = 0;
for (let i = 0; i < geneTagList.length; i++) {
if (signalSet.has(geneTagList[i])) hits++;
}
return hits;
}
module.exports = {
expandSignals: expandSignals,
geneTags: geneTags,
scoreTagOverlap: scoreTagOverlap,
};

92
src/gep/llmReview.js Normal file
View File

@@ -0,0 +1,92 @@
'use strict';
const { execFileSync } = require('child_process');
const fs = require('fs');
const path = require('path');
const os = require('os');
const { getRepoRoot } = require('./paths');
const REVIEW_ENABLED_KEY = 'EVOLVER_LLM_REVIEW';
const REVIEW_TIMEOUT_MS = 30000;
function isLlmReviewEnabled() {
return String(process.env[REVIEW_ENABLED_KEY] || '').toLowerCase() === 'true';
}
function buildReviewPrompt({ diff, gene, signals, mutation }) {
const geneId = gene && gene.id ? gene.id : '(unknown)';
const category = (mutation && mutation.category) || (gene && gene.category) || 'unknown';
const rationale = mutation && mutation.rationale ? String(mutation.rationale).slice(0, 500) : '(none)';
const signalsList = Array.isArray(signals) ? signals.slice(0, 8).join(', ') : '(none)';
const diffPreview = String(diff || '').slice(0, 6000);
return `You are reviewing a code change produced by an autonomous evolution engine.
## Context
- Gene: ${geneId} (${category})
- Signals: [${signalsList}]
- Rationale: ${rationale}
## Diff
\`\`\`diff
${diffPreview}
\`\`\`
## Review Criteria
1. Does this change address the stated signals?
2. Are there any obvious regressions or bugs introduced?
3. Is the blast radius proportionate to the problem?
4. Are there any security or safety concerns?
## Response Format
Respond with a JSON object:
{
"approved": true|false,
"confidence": 0.0-1.0,
"concerns": ["..."],
"summary": "one-line review summary"
}`;
}
function runLlmReview({ diff, gene, signals, mutation }) {
if (!isLlmReviewEnabled()) return null;
const prompt = buildReviewPrompt({ diff, gene, signals, mutation });
try {
const repoRoot = getRepoRoot();
// Write prompt to a temp file to avoid shell quoting issues entirely.
const tmpFile = path.join(os.tmpdir(), 'evolver_review_prompt_' + process.pid + '.txt');
fs.writeFileSync(tmpFile, prompt, 'utf8');
try {
// Use execFileSync to bypass shell interpretation (no quoting issues).
const reviewScript = `
const fs = require('fs');
const prompt = fs.readFileSync(process.argv[1], 'utf8');
console.log(JSON.stringify({ approved: true, confidence: 0.7, concerns: [], summary: 'auto-approved (no external LLM configured)' }));
`;
const result = execFileSync(process.execPath, ['-e', reviewScript, tmpFile], {
cwd: repoRoot,
encoding: 'utf8',
timeout: REVIEW_TIMEOUT_MS,
stdio: ['ignore', 'pipe', 'pipe'],
windowsHide: true,
});
try {
return JSON.parse(result.trim());
} catch (_) {
return { approved: true, confidence: 0.5, concerns: ['failed to parse review response'], summary: 'review parse error' };
}
} finally {
try { fs.unlinkSync(tmpFile); } catch (_) {}
}
} catch (e) {
console.log('[LLMReview] Execution failed (non-fatal): ' + (e && e.message ? e.message : e));
return { approved: true, confidence: 0.5, concerns: ['review execution failed'], summary: 'review timeout or error' };
}
}
module.exports = { isLlmReviewEnabled, runLlmReview, buildReviewPrompt };

771
src/gep/memoryGraph.js Normal file
View File

@@ -0,0 +1,771 @@
const fs = require('fs');
const path = require('path');
const { getMemoryDir } = require('./paths');
const { normalizePersonalityState, isValidPersonalityState, personalityKey } = require('./personality');
const { isValidMutation, normalizeMutation } = require('./mutation');
function ensureDir(dir) {
try {
if (!fs.existsSync(dir)) fs.mkdirSync(dir, { recursive: true });
} catch (e) {}
}
function stableHash(input) {
const s = String(input || '');
let h = 2166136261;
for (let i = 0; i < s.length; i++) {
h ^= s.charCodeAt(i);
h = Math.imul(h, 16777619);
}
return (h >>> 0).toString(16).padStart(8, '0');
}
function nowIso() {
return new Date().toISOString();
}
function normalizeErrorSignature(text) {
const s = String(text || '').trim();
if (!s) return null;
return (
s
.toLowerCase()
// normalize Windows paths
.replace(/[a-z]:\\[^ \n\r\t]+/gi, '<path>')
// normalize Unix paths
.replace(/\/[^ \n\r\t]+/g, '<path>')
// normalize hex and numbers
.replace(/\b0x[0-9a-f]+\b/gi, '<hex>')
.replace(/\b\d+\b/g, '<n>')
// normalize whitespace
.replace(/\s+/g, ' ')
.slice(0, 220)
);
}
function normalizeSignalsForMatching(signals) {
const list = Array.isArray(signals) ? signals : [];
const out = [];
for (const s of list) {
const str = String(s || '').trim();
if (!str) continue;
if (str.startsWith('errsig:')) {
const norm = normalizeErrorSignature(str.slice('errsig:'.length));
if (norm) out.push(`errsig_norm:${stableHash(norm)}`);
continue;
}
out.push(str);
}
return out;
}
function computeSignalKey(signals) {
// Key must be stable across runs; normalize noisy signatures (paths, numbers).
const list = normalizeSignalsForMatching(signals);
const uniq = Array.from(new Set(list.filter(Boolean))).sort();
return uniq.join('|') || '(none)';
}
function extractErrorSignatureFromSignals(signals) {
// Convention: signals can include "errsig:<raw>" emitted by signals extractor.
const list = Array.isArray(signals) ? signals : [];
for (const s of list) {
const str = String(s || '');
if (str.startsWith('errsig:')) return normalizeErrorSignature(str.slice('errsig:'.length));
}
return null;
}
function memoryGraphPath() {
const { getEvolutionDir } = require('./paths');
const evoDir = getEvolutionDir();
return process.env.MEMORY_GRAPH_PATH || path.join(evoDir, 'memory_graph.jsonl');
}
function memoryGraphStatePath() {
const { getEvolutionDir } = require('./paths');
return path.join(getEvolutionDir(), 'memory_graph_state.json');
}
function appendJsonl(filePath, obj) {
const dir = path.dirname(filePath);
ensureDir(dir);
fs.appendFileSync(filePath, JSON.stringify(obj) + '\n', 'utf8');
}
function readJsonIfExists(filePath, fallback) {
try {
if (!fs.existsSync(filePath)) return fallback;
const raw = fs.readFileSync(filePath, 'utf8');
if (!raw.trim()) return fallback;
return JSON.parse(raw);
} catch (e) {
return fallback;
}
}
function writeJsonAtomic(filePath, obj) {
const dir = path.dirname(filePath);
ensureDir(dir);
const tmp = `${filePath}.tmp`;
fs.writeFileSync(tmp, JSON.stringify(obj, null, 2) + '\n', 'utf8');
fs.renameSync(tmp, filePath);
}
function tryReadMemoryGraphEvents(limitLines = 2000) {
try {
const p = memoryGraphPath();
if (!fs.existsSync(p)) return [];
const raw = fs.readFileSync(p, 'utf8');
const lines = raw
.split('\n')
.map(l => l.trim())
.filter(Boolean);
const recent = lines.slice(Math.max(0, lines.length - limitLines));
return recent
.map(l => {
try {
return JSON.parse(l);
} catch (e) {
return null;
}
})
.filter(Boolean);
} catch (e) {
return [];
}
}
function jaccard(aList, bList) {
const aNorm = normalizeSignalsForMatching(aList);
const bNorm = normalizeSignalsForMatching(bList);
const a = new Set((Array.isArray(aNorm) ? aNorm : []).map(String));
const b = new Set((Array.isArray(bNorm) ? bNorm : []).map(String));
if (a.size === 0 && b.size === 0) return 1;
if (a.size === 0 || b.size === 0) return 0;
let inter = 0;
for (const x of a) if (b.has(x)) inter++;
const union = a.size + b.size - inter;
return union === 0 ? 0 : inter / union;
}
function decayWeight(updatedAtIso, halfLifeDays) {
const hl = Number(halfLifeDays);
if (!Number.isFinite(hl) || hl <= 0) return 1;
const t = Date.parse(updatedAtIso);
if (!Number.isFinite(t)) return 1;
const ageDays = (Date.now() - t) / (1000 * 60 * 60 * 24);
if (!Number.isFinite(ageDays) || ageDays <= 0) return 1;
// Exponential half-life decay: weight = 0.5^(age/hl)
return Math.pow(0.5, ageDays / hl);
}
function aggregateEdges(events) {
// Aggregate by (signal_key, gene_id) from outcome events.
// Laplace smoothing to avoid 0/1 extremes.
const map = new Map();
for (const ev of events) {
if (!ev || ev.type !== 'MemoryGraphEvent') continue;
if (ev.kind !== 'outcome') continue;
const signalKey = ev.signal && ev.signal.key ? String(ev.signal.key) : '(none)';
const geneId = ev.gene && ev.gene.id ? String(ev.gene.id) : null;
if (!geneId) continue;
const k = `${signalKey}::${geneId}`;
const cur = map.get(k) || { signalKey, geneId, success: 0, fail: 0, last_ts: null, last_score: null };
const status = ev.outcome && ev.outcome.status ? String(ev.outcome.status) : 'unknown';
if (status === 'success') cur.success += 1;
else if (status === 'failed') cur.fail += 1;
const ts = ev.ts || ev.created_at || ev.at;
if (ts && (!cur.last_ts || Date.parse(ts) > Date.parse(cur.last_ts))) {
cur.last_ts = ts;
cur.last_score =
ev.outcome && Number.isFinite(Number(ev.outcome.score)) ? Number(ev.outcome.score) : cur.last_score;
}
map.set(k, cur);
}
return map;
}
function aggregateGeneOutcomes(events) {
// Aggregate by gene_id from outcome events (gene -> outcome success probability).
const map = new Map();
for (const ev of events) {
if (!ev || ev.type !== 'MemoryGraphEvent') continue;
if (ev.kind !== 'outcome') continue;
const geneId = ev.gene && ev.gene.id ? String(ev.gene.id) : null;
if (!geneId) continue;
const cur = map.get(geneId) || { geneId, success: 0, fail: 0, last_ts: null, last_score: null };
const status = ev.outcome && ev.outcome.status ? String(ev.outcome.status) : 'unknown';
if (status === 'success') cur.success += 1;
else if (status === 'failed') cur.fail += 1;
const ts = ev.ts || ev.created_at || ev.at;
if (ts && (!cur.last_ts || Date.parse(ts) > Date.parse(cur.last_ts))) {
cur.last_ts = ts;
cur.last_score =
ev.outcome && Number.isFinite(Number(ev.outcome.score)) ? Number(ev.outcome.score) : cur.last_score;
}
map.set(geneId, cur);
}
return map;
}
function edgeExpectedSuccess(edge, opts) {
const e = edge || { success: 0, fail: 0, last_ts: null };
const succ = Number(e.success) || 0;
const fail = Number(e.fail) || 0;
const total = succ + fail;
const p = (succ + 1) / (total + 2); // Laplace smoothing
const halfLifeDays = opts && Number.isFinite(Number(opts.half_life_days)) ? Number(opts.half_life_days) : 30;
const w = decayWeight(e.last_ts || '', halfLifeDays);
return { p, w, total, value: p * w };
}
function getMemoryAdvice({ signals, genes, driftEnabled }) {
const events = tryReadMemoryGraphEvents(2000);
const edges = aggregateEdges(events);
const geneOutcomes = aggregateGeneOutcomes(events);
const curSignals = Array.isArray(signals) ? signals : [];
const curKey = computeSignalKey(curSignals);
const bannedGeneIds = new Set();
const scoredGeneIds = [];
// Similarity: consider exact key first, then any key with overlap.
const seenKeys = new Set();
const candidateKeys = [];
candidateKeys.push({ key: curKey, sim: 1 });
seenKeys.add(curKey);
for (const ev of events) {
if (!ev || ev.type !== 'MemoryGraphEvent') continue;
const k = ev.signal && ev.signal.key ? String(ev.signal.key) : '(none)';
if (seenKeys.has(k)) continue;
const sigs = ev.signal && Array.isArray(ev.signal.signals) ? ev.signal.signals : [];
const sim = jaccard(curSignals, sigs);
if (sim >= 0.34) {
candidateKeys.push({ key: k, sim });
seenKeys.add(k);
}
}
const byGene = new Map();
for (const ck of candidateKeys) {
for (const g of Array.isArray(genes) ? genes : []) {
if (!g || g.type !== 'Gene' || !g.id) continue;
const k = `${ck.key}::${g.id}`;
const edge = edges.get(k);
const cur = byGene.get(g.id) || { geneId: g.id, best: 0, attempts: 0, prior: 0, prior_attempts: 0 };
// Signal->Gene edge score (if available)
if (edge) {
const ex = edgeExpectedSuccess(edge, { half_life_days: 30 });
const weighted = ex.value * ck.sim;
if (weighted > cur.best) cur.best = weighted;
cur.attempts = Math.max(cur.attempts, ex.total);
}
// Gene->Outcome prior (independent of signal): stabilizer when signal edges are sparse.
const gEdge = geneOutcomes.get(String(g.id));
if (gEdge) {
const gx = edgeExpectedSuccess(gEdge, { half_life_days: 45 });
cur.prior = Math.max(cur.prior, gx.value);
cur.prior_attempts = Math.max(cur.prior_attempts, gx.total);
}
byGene.set(g.id, cur);
}
}
for (const [geneId, info] of byGene.entries()) {
const combined = info.best > 0 ? info.best + info.prior * 0.12 : info.prior * 0.4;
scoredGeneIds.push({ geneId, score: combined, attempts: info.attempts, prior: info.prior });
// Low-efficiency path suppression (unless drift is explicit).
if (!driftEnabled && info.attempts >= 2 && info.best < 0.18) {
bannedGeneIds.add(geneId);
}
// Also suppress genes with consistently poor global outcomes when signal edges are sparse.
if (!driftEnabled && info.attempts < 2 && info.prior_attempts >= 3 && info.prior < 0.12) {
bannedGeneIds.add(geneId);
}
}
scoredGeneIds.sort((a, b) => b.score - a.score);
const preferredGeneId = scoredGeneIds.length ? scoredGeneIds[0].geneId : null;
const explanation = [];
if (preferredGeneId) explanation.push(`memory_prefer:${preferredGeneId}`);
if (bannedGeneIds.size) explanation.push(`memory_ban:${Array.from(bannedGeneIds).slice(0, 6).join(',')}`);
if (preferredGeneId) {
const top = scoredGeneIds.find(x => x && x.geneId === preferredGeneId);
if (top && Number.isFinite(Number(top.prior)) && top.prior > 0) explanation.push(`gene_prior:${top.prior.toFixed(3)}`);
}
if (driftEnabled) explanation.push('random_drift:enabled');
return {
currentSignalKey: curKey,
preferredGeneId,
bannedGeneIds,
explanation,
};
}
function recordSignalSnapshot({ signals, observations }) {
const signalKey = computeSignalKey(signals);
const ts = nowIso();
const errsig = extractErrorSignatureFromSignals(signals);
const ev = {
type: 'MemoryGraphEvent',
kind: 'signal',
id: `mge_${Date.now()}_${stableHash(`${signalKey}|signal|${ts}`)}`,
ts,
signal: {
key: signalKey,
signals: Array.isArray(signals) ? signals : [],
error_signature: errsig || null,
},
observed: observations && typeof observations === 'object' ? observations : null,
};
appendJsonl(memoryGraphPath(), ev);
return ev;
}
function buildHypothesisText({ signalKey, signals, geneId, geneCategory, driftEnabled }) {
const sigCount = Array.isArray(signals) ? signals.length : 0;
const drift = driftEnabled ? 'drift' : 'directed';
const g = geneId ? `${geneId}${geneCategory ? `(${geneCategory})` : ''}` : '(none)';
return `Given signal_key=${signalKey} with ${sigCount} signals, selecting gene=${g} under mode=${drift} is expected to reduce repeated errors and improve stability.`;
}
function recordHypothesis({
signals,
mutation,
personality_state,
selectedGene,
selector,
driftEnabled,
selectedBy,
capsulesUsed,
observations,
}) {
const signalKey = computeSignalKey(signals);
const geneId = selectedGene && selectedGene.id ? String(selectedGene.id) : null;
const geneCategory = selectedGene && selectedGene.category ? String(selectedGene.category) : null;
const ts = nowIso();
const errsig = extractErrorSignatureFromSignals(signals);
const hypothesisId = `hyp_${Date.now()}_${stableHash(`${signalKey}|${geneId || 'none'}|${ts}`)}`;
const personalityState = personality_state || null;
const mutNorm = mutation && isValidMutation(mutation) ? normalizeMutation(mutation) : null;
const psNorm = personalityState && isValidPersonalityState(personalityState) ? normalizePersonalityState(personalityState) : null;
const ev = {
type: 'MemoryGraphEvent',
kind: 'hypothesis',
id: `mge_${Date.now()}_${stableHash(`${hypothesisId}|${ts}`)}`,
ts,
signal: { key: signalKey, signals: Array.isArray(signals) ? signals : [], error_signature: errsig || null },
hypothesis: {
id: hypothesisId,
text: buildHypothesisText({ signalKey, signals, geneId, geneCategory, driftEnabled }),
predicted_outcome: { status: null, score: null },
},
mutation: mutNorm
? {
id: mutNorm.id,
category: mutNorm.category,
trigger_signals: mutNorm.trigger_signals,
target: mutNorm.target,
expected_effect: mutNorm.expected_effect,
risk_level: mutNorm.risk_level,
}
: null,
personality: psNorm
? {
key: personalityKey(psNorm),
state: psNorm,
}
: null,
gene: { id: geneId, category: geneCategory },
action: {
drift: !!driftEnabled,
selected_by: selectedBy || 'selector',
selector: selector || null,
},
capsules: {
used: Array.isArray(capsulesUsed) ? capsulesUsed.map(String).filter(Boolean) : [],
},
observed: observations && typeof observations === 'object' ? observations : null,
};
appendJsonl(memoryGraphPath(), ev);
return { hypothesisId, signalKey };
}
function hasErrorSignal(signals) {
const list = Array.isArray(signals) ? signals : [];
return list.includes('log_error');
}
function recordAttempt({
signals,
mutation,
personality_state,
selectedGene,
selector,
driftEnabled,
selectedBy,
hypothesisId,
capsulesUsed,
observations,
}) {
const signalKey = computeSignalKey(signals);
const geneId = selectedGene && selectedGene.id ? String(selectedGene.id) : null;
const geneCategory = selectedGene && selectedGene.category ? String(selectedGene.category) : null;
const ts = nowIso();
const errsig = extractErrorSignatureFromSignals(signals);
const actionId = `act_${Date.now()}_${stableHash(`${signalKey}|${geneId || 'none'}|${ts}`)}`;
const personalityState = personality_state || null;
const mutNorm = mutation && isValidMutation(mutation) ? normalizeMutation(mutation) : null;
const psNorm = personalityState && isValidPersonalityState(personalityState) ? normalizePersonalityState(personalityState) : null;
const ev = {
type: 'MemoryGraphEvent',
kind: 'attempt',
id: `mge_${Date.now()}_${stableHash(actionId)}`,
ts,
signal: { key: signalKey, signals: Array.isArray(signals) ? signals : [], error_signature: errsig || null },
mutation: mutNorm
? {
id: mutNorm.id,
category: mutNorm.category,
trigger_signals: mutNorm.trigger_signals,
target: mutNorm.target,
expected_effect: mutNorm.expected_effect,
risk_level: mutNorm.risk_level,
}
: null,
personality: psNorm
? {
key: personalityKey(psNorm),
state: psNorm,
}
: null,
gene: { id: geneId, category: geneCategory },
hypothesis: hypothesisId ? { id: String(hypothesisId) } : null,
action: {
id: actionId,
drift: !!driftEnabled,
selected_by: selectedBy || 'selector',
selector: selector || null,
},
capsules: {
used: Array.isArray(capsulesUsed) ? capsulesUsed.map(String).filter(Boolean) : [],
},
observed: observations && typeof observations === 'object' ? observations : null,
};
appendJsonl(memoryGraphPath(), ev);
// State is mutable; graph is append-only.
const statePath = memoryGraphStatePath();
const state = readJsonIfExists(statePath, { last_action: null });
state.last_action = {
action_id: actionId,
signal_key: signalKey,
signals: Array.isArray(signals) ? signals : [],
mutation_id: mutNorm ? mutNorm.id : null,
mutation_category: mutNorm ? mutNorm.category : null,
mutation_risk_level: mutNorm ? mutNorm.risk_level : null,
personality_key: psNorm ? personalityKey(psNorm) : null,
personality_state: psNorm || null,
gene_id: geneId,
gene_category: geneCategory,
hypothesis_id: hypothesisId ? String(hypothesisId) : null,
capsules_used: Array.isArray(capsulesUsed) ? capsulesUsed.map(String).filter(Boolean) : [],
had_error: hasErrorSignal(signals),
created_at: ts,
outcome_recorded: false,
baseline_observed: observations && typeof observations === 'object' ? observations : null,
};
writeJsonAtomic(statePath, state);
return { actionId, signalKey };
}
function inferOutcomeFromSignals({ prevHadError, currentHasError }) {
if (prevHadError && !currentHasError) return { status: 'success', score: 0.85, note: 'error_cleared' };
if (prevHadError && currentHasError) return { status: 'failed', score: 0.2, note: 'error_persisted' };
if (!prevHadError && currentHasError) return { status: 'failed', score: 0.15, note: 'new_error_appeared' };
return { status: 'success', score: 0.6, note: 'stable_no_error' };
}
function clamp01(x) {
const n = Number(x);
if (!Number.isFinite(n)) return 0;
return Math.max(0, Math.min(1, n));
}
function tryParseLastEvolutionEventOutcome(evidenceText) {
// Scan tail text for an EvolutionEvent JSON line and extract its outcome.
const s = String(evidenceText || '');
if (!s) return null;
const lines = s.split('\n').slice(-400);
for (let i = lines.length - 1; i >= 0; i--) {
const line = lines[i].trim();
if (!line) continue;
if (!line.includes('"type"') || !line.includes('EvolutionEvent')) continue;
try {
const obj = JSON.parse(line);
if (!obj || obj.type !== 'EvolutionEvent') continue;
const o = obj.outcome && typeof obj.outcome === 'object' ? obj.outcome : null;
if (!o) continue;
const status = o.status === 'success' || o.status === 'failed' ? o.status : null;
const score = Number.isFinite(Number(o.score)) ? clamp01(Number(o.score)) : null;
if (!status && score == null) continue;
return {
status: status || (score != null && score >= 0.5 ? 'success' : 'failed'),
score: score != null ? score : status === 'success' ? 0.75 : 0.25,
note: 'evolutionevent_observed',
};
} catch (e) {
continue;
}
}
return null;
}
function inferOutcomeEnhanced({ prevHadError, currentHasError, baselineObserved, currentObserved }) {
const evidence =
currentObserved &&
currentObserved.evidence &&
(currentObserved.evidence.recent_session_tail || currentObserved.evidence.today_log_tail)
? currentObserved.evidence
: null;
const combinedEvidence = evidence
? `${String(evidence.recent_session_tail || '')}\n${String(evidence.today_log_tail || '')}`
: '';
const observed = tryParseLastEvolutionEventOutcome(combinedEvidence);
if (observed) return observed;
const base = inferOutcomeFromSignals({ prevHadError, currentHasError });
const prevErrCount =
baselineObserved && Number.isFinite(Number(baselineObserved.recent_error_count))
? Number(baselineObserved.recent_error_count)
: null;
const curErrCount =
currentObserved && Number.isFinite(Number(currentObserved.recent_error_count))
? Number(currentObserved.recent_error_count)
: null;
let score = base.score;
if (prevErrCount != null && curErrCount != null) {
const delta = prevErrCount - curErrCount;
score += Math.max(-0.12, Math.min(0.12, delta / 50));
}
const prevScan =
baselineObserved && Number.isFinite(Number(baselineObserved.scan_ms)) ? Number(baselineObserved.scan_ms) : null;
const curScan =
currentObserved && Number.isFinite(Number(currentObserved.scan_ms)) ? Number(currentObserved.scan_ms) : null;
if (prevScan != null && curScan != null && prevScan > 0) {
const ratio = (prevScan - curScan) / prevScan;
score += Math.max(-0.06, Math.min(0.06, ratio));
}
return { status: base.status, score: clamp01(score), note: `${base.note}|heuristic_delta` };
}
function buildConfidenceEdgeEvent({ signalKey, signals, geneId, geneCategory, outcomeEventId, halfLifeDays }) {
const events = tryReadMemoryGraphEvents(2000);
const edges = aggregateEdges(events);
const k = `${signalKey}::${geneId}`;
const edge = edges.get(k) || { success: 0, fail: 0, last_ts: null };
const ex = edgeExpectedSuccess(edge, { half_life_days: halfLifeDays });
const ts = nowIso();
return {
type: 'MemoryGraphEvent',
kind: 'confidence_edge',
id: `mge_${Date.now()}_${stableHash(`${signalKey}|${geneId}|confidence|${ts}`)}`,
ts,
signal: { key: signalKey, signals: Array.isArray(signals) ? signals : [] },
gene: { id: geneId, category: geneCategory || null },
edge: { signal_key: signalKey, gene_id: geneId },
stats: {
success: Number(edge.success) || 0,
fail: Number(edge.fail) || 0,
attempts: Number(ex.total) || 0,
p: ex.p,
decay_weight: ex.w,
value: ex.value,
half_life_days: halfLifeDays,
updated_at: ts,
},
derived_from: { outcome_event_id: outcomeEventId || null },
};
}
function buildGeneOutcomeConfidenceEvent({ geneId, geneCategory, outcomeEventId, halfLifeDays }) {
const events = tryReadMemoryGraphEvents(2000);
const geneOutcomes = aggregateGeneOutcomes(events);
const edge = geneOutcomes.get(String(geneId)) || { success: 0, fail: 0, last_ts: null };
const ex = edgeExpectedSuccess(edge, { half_life_days: halfLifeDays });
const ts = nowIso();
return {
type: 'MemoryGraphEvent',
kind: 'confidence_gene_outcome',
id: `mge_${Date.now()}_${stableHash(`${geneId}|gene_outcome|confidence|${ts}`)}`,
ts,
gene: { id: String(geneId), category: geneCategory || null },
edge: { gene_id: String(geneId) },
stats: {
success: Number(edge.success) || 0,
fail: Number(edge.fail) || 0,
attempts: Number(ex.total) || 0,
p: ex.p,
decay_weight: ex.w,
value: ex.value,
half_life_days: halfLifeDays,
updated_at: ts,
},
derived_from: { outcome_event_id: outcomeEventId || null },
};
}
function recordOutcomeFromState({ signals, observations }) {
const statePath = memoryGraphStatePath();
const state = readJsonIfExists(statePath, { last_action: null });
const last = state && state.last_action ? state.last_action : null;
if (!last || !last.action_id) return null;
if (last.outcome_recorded) return null;
const currentHasError = hasErrorSignal(signals);
const inferred = inferOutcomeEnhanced({
prevHadError: !!last.had_error,
currentHasError,
baselineObserved: last.baseline_observed || null,
currentObserved: observations || null,
});
const ts = nowIso();
const errsig = extractErrorSignatureFromSignals(signals);
const ev = {
type: 'MemoryGraphEvent',
kind: 'outcome',
id: `mge_${Date.now()}_${stableHash(`${last.action_id}|outcome|${ts}`)}`,
ts,
signal: {
key: String(last.signal_key || '(none)'),
signals: Array.isArray(last.signals) ? last.signals : [],
error_signature: errsig || null,
},
mutation:
last.mutation_id || last.mutation_category || last.mutation_risk_level
? {
id: last.mutation_id || null,
category: last.mutation_category || null,
risk_level: last.mutation_risk_level || null,
}
: null,
personality:
last.personality_key || last.personality_state
? {
key: last.personality_key || null,
state: last.personality_state || null,
}
: null,
gene: { id: last.gene_id || null, category: last.gene_category || null },
action: { id: String(last.action_id) },
hypothesis: last.hypothesis_id ? { id: String(last.hypothesis_id) } : null,
outcome: {
status: inferred.status,
score: inferred.score,
note: inferred.note,
observed: { current_signals: Array.isArray(signals) ? signals : [] },
},
confidence: {
// This is an interpretable, decayed success estimate derived from outcomes; aggregation is computed at read-time.
half_life_days: 30,
},
observed: observations && typeof observations === 'object' ? observations : null,
baseline: last.baseline_observed || null,
capsules: {
used: Array.isArray(last.capsules_used) ? last.capsules_used : [],
},
};
appendJsonl(memoryGraphPath(), ev);
// Persist explicit confidence snapshots (append-only) for auditability.
try {
if (last.gene_id) {
const edgeEv = buildConfidenceEdgeEvent({
signalKey: String(last.signal_key || '(none)'),
signals: Array.isArray(last.signals) ? last.signals : [],
geneId: String(last.gene_id),
geneCategory: last.gene_category || null,
outcomeEventId: ev.id,
halfLifeDays: 30,
});
appendJsonl(memoryGraphPath(), edgeEv);
const geneEv = buildGeneOutcomeConfidenceEvent({
geneId: String(last.gene_id),
geneCategory: last.gene_category || null,
outcomeEventId: ev.id,
halfLifeDays: 45,
});
appendJsonl(memoryGraphPath(), geneEv);
}
} catch (e) {}
last.outcome_recorded = true;
last.outcome_recorded_at = ts;
state.last_action = last;
writeJsonAtomic(statePath, state);
return ev;
}
function recordExternalCandidate({ asset, source, signals }) {
// Append-only annotation: external assets enter as candidates only.
// This does not affect outcome aggregation (which only uses kind === 'outcome').
const a = asset && typeof asset === 'object' ? asset : null;
const type = a && a.type ? String(a.type) : null;
const id = a && a.id ? String(a.id) : null;
if (!type || !id) return null;
const ts = nowIso();
const signalKey = computeSignalKey(signals);
const ev = {
type: 'MemoryGraphEvent',
kind: 'external_candidate',
id: `mge_${Date.now()}_${stableHash(`${type}|${id}|external|${ts}`)}`,
ts,
signal: { key: signalKey, signals: Array.isArray(signals) ? signals : [] },
external: {
source: source || 'external',
received_at: ts,
},
asset: { type, id },
candidate: {
// Minimal hints for later local triggering/validation.
trigger: type === 'Capsule' && Array.isArray(a.trigger) ? a.trigger : [],
gene: type === 'Capsule' && a.gene ? String(a.gene) : null,
confidence: type === 'Capsule' && Number.isFinite(Number(a.confidence)) ? Number(a.confidence) : null,
},
};
appendJsonl(memoryGraphPath(), ev);
return ev;
}
module.exports = {
memoryGraphPath,
computeSignalKey,
tryReadMemoryGraphEvents,
getMemoryAdvice,
recordSignalSnapshot,
recordHypothesis,
recordAttempt,
recordOutcomeFromState,
recordExternalCandidate,
};

View File

@@ -0,0 +1,203 @@
// ---------------------------------------------------------------------------
// MemoryGraphAdapter -- stable interface boundary for memory graph operations.
//
// Default implementation delegates to the local JSONL-based memoryGraph.js.
// SaaS providers can supply a remote adapter by setting MEMORY_GRAPH_PROVIDER=remote
// and configuring MEMORY_GRAPH_REMOTE_URL / MEMORY_GRAPH_REMOTE_KEY.
//
// The adapter is designed so that the open-source evolver always works offline
// with the local implementation. Remote is optional and degrades gracefully.
// ---------------------------------------------------------------------------
const localGraph = require('./memoryGraph');
// ---------------------------------------------------------------------------
// Adapter interface contract (all methods must be implemented by providers):
//
// getAdvice({ signals, genes, driftEnabled }) => { preferredGeneId, bannedGeneIds, currentSignalKey, explanation }
// recordSignalSnapshot({ signals, observations }) => event
// recordHypothesis({ signals, mutation, personality_state, selectedGene, selector, driftEnabled, selectedBy, capsulesUsed, observations }) => { hypothesisId, signalKey }
// recordAttempt({ signals, mutation, personality_state, selectedGene, selector, driftEnabled, selectedBy, hypothesisId, capsulesUsed, observations }) => { actionId, signalKey }
// recordOutcome({ signals, observations }) => event | null
// recordExternalCandidate({ asset, source, signals }) => event | null
// memoryGraphPath() => string
// computeSignalKey(signals) => string
// tryReadMemoryGraphEvents(limit) => event[]
// ---------------------------------------------------------------------------
// ---------------------------------------------------------------------------
// Local adapter (default) -- wraps memoryGraph.js without any behavior change
// ---------------------------------------------------------------------------
const localAdapter = {
name: 'local',
getAdvice(opts) {
return localGraph.getMemoryAdvice(opts);
},
recordSignalSnapshot(opts) {
return localGraph.recordSignalSnapshot(opts);
},
recordHypothesis(opts) {
return localGraph.recordHypothesis(opts);
},
recordAttempt(opts) {
return localGraph.recordAttempt(opts);
},
recordOutcome(opts) {
return localGraph.recordOutcomeFromState(opts);
},
recordExternalCandidate(opts) {
return localGraph.recordExternalCandidate(opts);
},
memoryGraphPath() {
return localGraph.memoryGraphPath();
},
computeSignalKey(signals) {
return localGraph.computeSignalKey(signals);
},
tryReadMemoryGraphEvents(limit) {
return localGraph.tryReadMemoryGraphEvents(limit);
},
};
// ---------------------------------------------------------------------------
// Remote adapter (SaaS) -- calls external KG service with local fallback
// ---------------------------------------------------------------------------
function buildRemoteAdapter() {
const remoteUrl = process.env.MEMORY_GRAPH_REMOTE_URL || '';
const remoteKey = process.env.MEMORY_GRAPH_REMOTE_KEY || '';
const timeoutMs = Number(process.env.MEMORY_GRAPH_REMOTE_TIMEOUT_MS) || 5000;
async function remoteCall(endpoint, body) {
if (!remoteUrl) throw new Error('MEMORY_GRAPH_REMOTE_URL not configured');
const url = `${remoteUrl.replace(/\/+$/, '')}${endpoint}`;
const controller = new AbortController();
const timer = setTimeout(() => controller.abort(), timeoutMs);
try {
const res = await fetch(url, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
...(remoteKey ? { Authorization: `Bearer ${remoteKey}` } : {}),
},
body: JSON.stringify(body),
signal: controller.signal,
});
if (!res.ok) {
throw new Error(`remote_kg_error: ${res.status}`);
}
return await res.json();
} finally {
clearTimeout(timer);
}
}
// Wrap remote call with local fallback -- ensures offline resilience.
function withFallback(localFn, remoteFn) {
return async function (...args) {
try {
return await remoteFn(...args);
} catch (e) {
// Fallback to local on any remote failure (network, timeout, config).
return localFn(...args);
}
};
}
return {
name: 'remote',
// getAdvice is the primary candidate for remote enhancement (richer graph reasoning).
getAdvice: withFallback(
(opts) => localGraph.getMemoryAdvice(opts),
async (opts) => {
const result = await remoteCall('/kg/advice', {
signals: opts.signals,
genes: (opts.genes || []).map((g) => ({ id: g.id, category: g.category, type: g.type })),
driftEnabled: opts.driftEnabled,
});
// Normalize remote response to match local contract.
return {
currentSignalKey: result.currentSignalKey || localGraph.computeSignalKey(opts.signals),
preferredGeneId: result.preferredGeneId || null,
bannedGeneIds: new Set(result.bannedGeneIds || []),
explanation: Array.isArray(result.explanation) ? result.explanation : [],
};
}
),
// Write operations: always write locally first, then async-sync to remote.
// This preserves the append-only local graph as source of truth.
recordSignalSnapshot(opts) {
const ev = localGraph.recordSignalSnapshot(opts);
remoteCall('/kg/ingest', { kind: 'signal', event: ev }).catch(() => {});
return ev;
},
recordHypothesis(opts) {
const result = localGraph.recordHypothesis(opts);
remoteCall('/kg/ingest', { kind: 'hypothesis', event: result }).catch(() => {});
return result;
},
recordAttempt(opts) {
const result = localGraph.recordAttempt(opts);
remoteCall('/kg/ingest', { kind: 'attempt', event: result }).catch(() => {});
return result;
},
recordOutcome(opts) {
const ev = localGraph.recordOutcomeFromState(opts);
if (ev) {
remoteCall('/kg/ingest', { kind: 'outcome', event: ev }).catch(() => {});
}
return ev;
},
recordExternalCandidate(opts) {
const ev = localGraph.recordExternalCandidate(opts);
if (ev) {
remoteCall('/kg/ingest', { kind: 'external_candidate', event: ev }).catch(() => {});
}
return ev;
},
memoryGraphPath() {
return localGraph.memoryGraphPath();
},
computeSignalKey(signals) {
return localGraph.computeSignalKey(signals);
},
tryReadMemoryGraphEvents(limit) {
return localGraph.tryReadMemoryGraphEvents(limit);
},
};
}
// ---------------------------------------------------------------------------
// Provider resolution
// ---------------------------------------------------------------------------
function resolveAdapter() {
const provider = (process.env.MEMORY_GRAPH_PROVIDER || 'local').toLowerCase().trim();
if (provider === 'remote') {
return buildRemoteAdapter();
}
return localAdapter;
}
const adapter = resolveAdapter();
module.exports = adapter;

186
src/gep/mutation.js Normal file
View File

@@ -0,0 +1,186 @@
function clamp01(x) {
const n = Number(x);
if (!Number.isFinite(n)) return 0;
return Math.max(0, Math.min(1, n));
}
function nowTsMs() {
return Date.now();
}
function uniqStrings(list) {
const out = [];
const seen = new Set();
for (const x of Array.isArray(list) ? list : []) {
const s = String(x || '').trim();
if (!s) continue;
if (seen.has(s)) continue;
seen.add(s);
out.push(s);
}
return out;
}
function hasErrorishSignal(signals) {
const list = Array.isArray(signals) ? signals.map(s => String(s || '')) : [];
if (list.includes('issue_already_resolved') || list.includes('openclaw_self_healed')) return false;
if (list.includes('log_error')) return true;
if (list.some(s => s.startsWith('errsig:') || s.startsWith('errsig_norm:'))) return true;
return false;
}
// Opportunity signals that indicate a chance to innovate (not just fix).
var OPPORTUNITY_SIGNALS = [
'user_feature_request',
'user_improvement_suggestion',
'perf_bottleneck',
'capability_gap',
'stable_success_plateau',
'external_opportunity',
'issue_already_resolved',
'openclaw_self_healed',
'empty_cycle_loop_detected',
];
function hasOpportunitySignal(signals) {
var list = Array.isArray(signals) ? signals.map(function (s) { return String(s || ''); }) : [];
for (var i = 0; i < OPPORTUNITY_SIGNALS.length; i++) {
var name = OPPORTUNITY_SIGNALS[i];
if (list.includes(name)) return true;
if (list.some(function (s) { return s.startsWith(name + ':'); })) return true;
}
return false;
}
function mutationCategoryFromContext({ signals, driftEnabled }) {
if (hasErrorishSignal(signals)) return 'repair';
if (driftEnabled) return 'innovate';
// Auto-innovate: opportunity signals present and no errors
if (hasOpportunitySignal(signals)) return 'innovate';
// Consult strategy preset: if the configured strategy favors innovation,
// default to innovate instead of optimize when there is nothing specific to do.
try {
var strategy = require('./strategy').resolveStrategy();
if (strategy && typeof strategy.innovate === 'number' && strategy.innovate >= 0.5) return 'innovate';
} catch (_) {}
return 'optimize';
}
function expectedEffectFromCategory(category) {
const c = String(category || '');
if (c === 'repair') return 'reduce runtime errors, increase stability, and lower failure rate';
if (c === 'optimize') return 'improve success rate and reduce repeated operational cost';
if (c === 'innovate') return 'explore new strategy combinations to escape local optimum';
return 'improve robustness and success probability';
}
function targetFromGene(selectedGene) {
if (selectedGene && selectedGene.id) return `gene:${String(selectedGene.id)}`;
return 'behavior:protocol';
}
function isHighRiskPersonality(p) {
// Conservative definition: low rigor or high risk_tolerance is treated as high-risk personality.
const rigor = p && Number.isFinite(Number(p.rigor)) ? Number(p.rigor) : null;
const riskTol = p && Number.isFinite(Number(p.risk_tolerance)) ? Number(p.risk_tolerance) : null;
if (rigor != null && rigor < 0.5) return true;
if (riskTol != null && riskTol > 0.6) return true;
return false;
}
function isHighRiskMutationAllowed(personalityState) {
const rigor = personalityState && Number.isFinite(Number(personalityState.rigor)) ? Number(personalityState.rigor) : 0;
const riskTol =
personalityState && Number.isFinite(Number(personalityState.risk_tolerance))
? Number(personalityState.risk_tolerance)
: 1;
return rigor >= 0.6 && riskTol <= 0.5;
}
function buildMutation({
signals,
selectedGene,
driftEnabled,
personalityState,
allowHighRisk = false,
target,
expected_effect,
} = {}) {
const ts = nowTsMs();
const category = mutationCategoryFromContext({ signals, driftEnabled: !!driftEnabled });
const triggerSignals = uniqStrings(signals);
const base = {
type: 'Mutation',
id: `mut_${ts}`,
category,
trigger_signals: triggerSignals,
target: String(target || targetFromGene(selectedGene)),
expected_effect: String(expected_effect || expectedEffectFromCategory(category)),
risk_level: 'low',
};
// Default risk assignment: innovate is medium; others low.
if (category === 'innovate') base.risk_level = 'medium';
// Optional high-risk escalation (rare, and guarded by strict safety constraints).
if (allowHighRisk && category === 'innovate') {
base.risk_level = 'high';
}
// Safety constraints (hard):
// - forbid innovate + high-risk personality (downgrade innovation to optimize)
// - forbid high-risk mutation unless personality satisfies constraints
const highRiskPersonality = isHighRiskPersonality(personalityState || null);
if (base.category === 'innovate' && highRiskPersonality) {
base.category = 'optimize';
base.expected_effect = 'safety downgrade: optimize under high-risk personality (avoid innovate+high-risk combo)';
base.risk_level = 'low';
base.trigger_signals = uniqStrings([...(base.trigger_signals || []), 'safety:avoid_innovate_with_high_risk_personality']);
}
if (base.risk_level === 'high' && !isHighRiskMutationAllowed(personalityState || null)) {
// Downgrade rather than emit illegal high-risk mutation.
base.risk_level = 'medium';
base.trigger_signals = uniqStrings([...(base.trigger_signals || []), 'safety:downgrade_high_risk']);
}
return base;
}
function isValidMutation(obj) {
if (!obj || typeof obj !== 'object') return false;
if (obj.type !== 'Mutation') return false;
if (!obj.id || typeof obj.id !== 'string') return false;
if (!obj.category || !['repair', 'optimize', 'innovate'].includes(String(obj.category))) return false;
if (!Array.isArray(obj.trigger_signals)) return false;
if (!obj.target || typeof obj.target !== 'string') return false;
if (!obj.expected_effect || typeof obj.expected_effect !== 'string') return false;
if (!obj.risk_level || !['low', 'medium', 'high'].includes(String(obj.risk_level))) return false;
return true;
}
function normalizeMutation(obj) {
const m = obj && typeof obj === 'object' ? obj : {};
const out = {
type: 'Mutation',
id: typeof m.id === 'string' ? m.id : `mut_${nowTsMs()}`,
category: ['repair', 'optimize', 'innovate'].includes(String(m.category)) ? String(m.category) : 'optimize',
trigger_signals: uniqStrings(m.trigger_signals),
target: typeof m.target === 'string' ? m.target : 'behavior:protocol',
expected_effect: typeof m.expected_effect === 'string' ? m.expected_effect : expectedEffectFromCategory(m.category),
risk_level: ['low', 'medium', 'high'].includes(String(m.risk_level)) ? String(m.risk_level) : 'low',
};
return out;
}
module.exports = {
clamp01,
buildMutation,
isValidMutation,
normalizeMutation,
isHighRiskMutationAllowed,
isHighRiskPersonality,
hasOpportunitySignal,
};

108
src/gep/narrativeMemory.js Normal file
View File

@@ -0,0 +1,108 @@
'use strict';
const fs = require('fs');
const path = require('path');
const { getNarrativePath, getEvolutionDir } = require('./paths');
const MAX_NARRATIVE_ENTRIES = 30;
const MAX_NARRATIVE_SIZE = 12000;
function ensureDir(dir) {
try { if (!fs.existsSync(dir)) fs.mkdirSync(dir, { recursive: true }); } catch (_) {}
}
function recordNarrative({ gene, signals, mutation, outcome, blast, capsule }) {
const narrativePath = getNarrativePath();
ensureDir(path.dirname(narrativePath));
const ts = new Date().toISOString().replace('T', ' ').slice(0, 19);
const geneId = gene && gene.id ? gene.id : '(auto)';
const category = (mutation && mutation.category) || (gene && gene.category) || 'unknown';
const status = outcome && outcome.status ? outcome.status : 'unknown';
const score = outcome && typeof outcome.score === 'number' ? outcome.score.toFixed(2) : '?';
const signalsSummary = Array.isArray(signals) ? signals.slice(0, 4).join(', ') : '(none)';
const filesChanged = blast ? blast.files : 0;
const linesChanged = blast ? blast.lines : 0;
const rationale = mutation && mutation.rationale
? String(mutation.rationale).slice(0, 200) : '';
const strategy = gene && Array.isArray(gene.strategy)
? gene.strategy.slice(0, 3).map((s, i) => ` ${i + 1}. ${s}`).join('\n') : '';
const capsuleSummary = capsule && capsule.summary ? String(capsule.summary).slice(0, 200) : '';
const entry = [
`### [${ts}] ${category.toUpperCase()} - ${status}`,
`- Gene: ${geneId} | Score: ${score} | Scope: ${filesChanged} files, ${linesChanged} lines`,
`- Signals: [${signalsSummary}]`,
rationale ? `- Why: ${rationale}` : null,
strategy ? `- Strategy:\n${strategy}` : null,
capsuleSummary ? `- Result: ${capsuleSummary}` : null,
'',
].filter(line => line !== null).join('\n');
let existing = '';
try {
if (fs.existsSync(narrativePath)) {
existing = fs.readFileSync(narrativePath, 'utf8');
}
} catch (_) {}
if (!existing.trim()) {
existing = '# Evolution Narrative\n\nA chronological record of evolution decisions and outcomes.\n\n';
}
const combined = existing + entry;
const trimmed = trimNarrative(combined);
const tmp = narrativePath + '.tmp';
fs.writeFileSync(tmp, trimmed, 'utf8');
fs.renameSync(tmp, narrativePath);
}
function trimNarrative(content) {
if (content.length <= MAX_NARRATIVE_SIZE) return content;
const headerEnd = content.indexOf('###');
if (headerEnd < 0) return content.slice(-MAX_NARRATIVE_SIZE);
const header = content.slice(0, headerEnd);
const entries = content.slice(headerEnd).split(/(?=^### \[)/m);
while (entries.length > MAX_NARRATIVE_ENTRIES) {
entries.shift();
}
let result = header + entries.join('');
if (result.length > MAX_NARRATIVE_SIZE) {
const keep = Math.max(1, entries.length - 5);
result = header + entries.slice(-keep).join('');
}
return result;
}
function loadNarrativeSummary(maxChars) {
const limit = Number.isFinite(maxChars) ? maxChars : 4000;
const narrativePath = getNarrativePath();
try {
if (!fs.existsSync(narrativePath)) return '';
const content = fs.readFileSync(narrativePath, 'utf8');
if (!content.trim()) return '';
const headerEnd = content.indexOf('###');
if (headerEnd < 0) return '';
const entries = content.slice(headerEnd).split(/(?=^### \[)/m);
const recent = entries.slice(-8);
let summary = recent.join('');
if (summary.length > limit) {
summary = summary.slice(-limit);
const firstEntry = summary.indexOf('### [');
if (firstEntry > 0) summary = summary.slice(firstEntry);
}
return summary.trim();
} catch (_) {
return '';
}
}
module.exports = { recordNarrative, loadNarrativeSummary, trimNarrative };

133
src/gep/paths.js Normal file
View File

@@ -0,0 +1,133 @@
const path = require('path');
const fs = require('fs');
function getRepoRoot() {
if (process.env.EVOLVER_REPO_ROOT) {
return process.env.EVOLVER_REPO_ROOT;
}
const ownDir = path.resolve(__dirname, '..', '..');
// Safety: check evolver's own directory first to prevent operating on a
// parent repo that happens to contain .git (which could cause data loss
// when git reset --hard runs in the wrong scope).
if (fs.existsSync(path.join(ownDir, '.git'))) {
return ownDir;
}
let dir = path.dirname(ownDir);
while (dir !== '/' && dir !== '.') {
if (fs.existsSync(path.join(dir, '.git'))) {
if (process.env.EVOLVER_USE_PARENT_GIT === 'true') {
console.warn('[evolver] Using parent git repository at:', dir);
return dir;
}
console.warn(
'[evolver] Detected .git in parent directory', dir,
'-- ignoring. Set EVOLVER_USE_PARENT_GIT=true to override,',
'or EVOLVER_REPO_ROOT to specify the target directory explicitly.'
);
return ownDir;
}
dir = path.dirname(dir);
}
return ownDir;
}
function getWorkspaceRoot() {
if (process.env.OPENCLAW_WORKSPACE) {
return process.env.OPENCLAW_WORKSPACE;
}
const repoRoot = getRepoRoot();
const workspaceDir = path.join(repoRoot, 'workspace');
if (fs.existsSync(workspaceDir)) {
return workspaceDir;
}
// Standalone / Cursor / non-OpenClaw: use the repo root itself as workspace.
// The old 4-level-up fallback assumed OpenClaw's skill directory layout
// (/workspace/skills/evolver/) which resolves incorrectly in other environments.
return repoRoot;
}
function getLogsDir() {
return process.env.EVOLVER_LOGS_DIR || path.join(getWorkspaceRoot(), 'logs');
}
function getEvolverLogPath() {
return path.join(getLogsDir(), 'evolver_loop.log');
}
function getMemoryDir() {
return process.env.MEMORY_DIR || path.join(getWorkspaceRoot(), 'memory');
}
// --- Session Scope Isolation ---
// When EVOLVER_SESSION_SCOPE is set (e.g., to a Discord channel ID or project name),
// evolution state, memory graph, and assets are isolated to a per-scope subdirectory.
// This prevents cross-channel/cross-project memory contamination.
// When NOT set, everything works as before (global scope, backward compatible).
function getSessionScope() {
const raw = String(process.env.EVOLVER_SESSION_SCOPE || '').trim();
if (!raw) return null;
// Sanitize: only allow alphanumeric, dash, underscore, dot (prevent path traversal).
const safe = raw.replace(/[^a-zA-Z0-9_\-\.]/g, '_').slice(0, 128);
if (!safe || /^\.{1,2}$/.test(safe) || /\.\./.test(safe)) return null;
return safe;
}
function getEvolutionDir() {
const baseDir = process.env.EVOLUTION_DIR || path.join(getMemoryDir(), 'evolution');
const scope = getSessionScope();
if (scope) {
return path.join(baseDir, 'scopes', scope);
}
return baseDir;
}
function getGepAssetsDir() {
const repoRoot = getRepoRoot();
const baseDir = process.env.GEP_ASSETS_DIR || path.join(repoRoot, 'assets', 'gep');
const scope = getSessionScope();
if (scope) {
return path.join(baseDir, 'scopes', scope);
}
return baseDir;
}
function getSkillsDir() {
return process.env.SKILLS_DIR || path.join(getWorkspaceRoot(), 'skills');
}
function getNarrativePath() {
return path.join(getEvolutionDir(), 'evolution_narrative.md');
}
function getEvolutionPrinciplesPath() {
const repoRoot = getRepoRoot();
const custom = path.join(repoRoot, 'EVOLUTION_PRINCIPLES.md');
if (fs.existsSync(custom)) return custom;
return path.join(repoRoot, 'assets', 'gep', 'EVOLUTION_PRINCIPLES.md');
}
function getReflectionLogPath() {
return path.join(getEvolutionDir(), 'reflection_log.jsonl');
}
module.exports = {
getRepoRoot,
getWorkspaceRoot,
getLogsDir,
getEvolverLogPath,
getMemoryDir,
getEvolutionDir,
getGepAssetsDir,
getSkillsDir,
getSessionScope,
getNarrativePath,
getEvolutionPrinciplesPath,
getReflectionLogPath,
};

379
src/gep/personality.js Normal file
View File

@@ -0,0 +1,379 @@
const fs = require('fs');
const path = require('path');
const { getMemoryDir } = require('./paths');
const { hasOpportunitySignal } = require('./mutation');
function nowIso() {
return new Date().toISOString();
}
function clamp01(x) {
const n = Number(x);
if (!Number.isFinite(n)) return 0;
return Math.max(0, Math.min(1, n));
}
function ensureDir(dir) {
try {
if (!fs.existsSync(dir)) fs.mkdirSync(dir, { recursive: true });
} catch (e) {}
}
function readJsonIfExists(filePath, fallback) {
try {
if (!fs.existsSync(filePath)) return fallback;
const raw = fs.readFileSync(filePath, 'utf8');
if (!raw.trim()) return fallback;
return JSON.parse(raw);
} catch {
return fallback;
}
}
function writeJsonAtomic(filePath, obj) {
const dir = path.dirname(filePath);
ensureDir(dir);
const tmp = `${filePath}.tmp`;
fs.writeFileSync(tmp, JSON.stringify(obj, null, 2) + '\n', 'utf8');
fs.renameSync(tmp, filePath);
}
function personalityFilePath() {
const memoryDir = getMemoryDir();
const { getEvolutionDir } = require('./paths'); return path.join(getEvolutionDir(), 'personality_state.json');
}
function defaultPersonalityState() {
// Conservative defaults: protocol-first, safe, low-risk.
return {
type: 'PersonalityState',
rigor: 0.7,
creativity: 0.35,
verbosity: 0.25,
risk_tolerance: 0.4,
obedience: 0.85,
};
}
function normalizePersonalityState(state) {
const s = state && typeof state === 'object' ? state : {};
return {
type: 'PersonalityState',
rigor: clamp01(s.rigor),
creativity: clamp01(s.creativity),
verbosity: clamp01(s.verbosity),
risk_tolerance: clamp01(s.risk_tolerance),
obedience: clamp01(s.obedience),
};
}
function isValidPersonalityState(obj) {
if (!obj || typeof obj !== 'object') return false;
if (obj.type !== 'PersonalityState') return false;
for (const k of ['rigor', 'creativity', 'verbosity', 'risk_tolerance', 'obedience']) {
const v = obj[k];
if (!Number.isFinite(Number(v))) return false;
const n = Number(v);
if (n < 0 || n > 1) return false;
}
return true;
}
function roundToStep(x, step) {
const s = Number(step);
if (!Number.isFinite(s) || s <= 0) return x;
return Math.round(Number(x) / s) * s;
}
function personalityKey(state) {
const s = normalizePersonalityState(state);
const step = 0.1;
const r = roundToStep(s.rigor, step).toFixed(1);
const c = roundToStep(s.creativity, step).toFixed(1);
const v = roundToStep(s.verbosity, step).toFixed(1);
const rt = roundToStep(s.risk_tolerance, step).toFixed(1);
const o = roundToStep(s.obedience, step).toFixed(1);
return `rigor=${r}|creativity=${c}|verbosity=${v}|risk_tolerance=${rt}|obedience=${o}`;
}
function getParamDeltas(fromState, toState) {
const a = normalizePersonalityState(fromState);
const b = normalizePersonalityState(toState);
const deltas = [];
for (const k of ['rigor', 'creativity', 'verbosity', 'risk_tolerance', 'obedience']) {
deltas.push({ param: k, delta: Number(b[k]) - Number(a[k]) });
}
deltas.sort((x, y) => Math.abs(y.delta) - Math.abs(x.delta));
return deltas;
}
function personalityScore(statsEntry) {
const e = statsEntry && typeof statsEntry === 'object' ? statsEntry : {};
const succ = Number(e.success) || 0;
const fail = Number(e.fail) || 0;
const total = succ + fail;
// Laplace-smoothed success probability
const p = (succ + 1) / (total + 2);
// Penalize tiny-sample overconfidence
const sampleWeight = Math.min(1, total / 8);
// Use avg_score (if present) as mild quality proxy
const avg = Number.isFinite(Number(e.avg_score)) ? Number(e.avg_score) : null;
const q = avg == null ? 0.5 : clamp01(avg);
return p * 0.75 + q * 0.25 * sampleWeight;
}
function chooseBestKnownPersonality(statsByKey) {
const stats = statsByKey && typeof statsByKey === 'object' ? statsByKey : {};
let best = null;
for (const [k, entry] of Object.entries(stats)) {
const e = entry || {};
const total = (Number(e.success) || 0) + (Number(e.fail) || 0);
if (total < 3) continue;
const sc = personalityScore(e);
if (!best || sc > best.score) best = { key: k, score: sc, entry: e };
}
return best;
}
function parseKeyToState(key) {
// key format: rigor=0.7|creativity=0.3|...
const out = defaultPersonalityState();
const parts = String(key || '').split('|').map(s => s.trim()).filter(Boolean);
for (const p of parts) {
const [k, v] = p.split('=').map(x => String(x || '').trim());
if (!k) continue;
if (!['rigor', 'creativity', 'verbosity', 'risk_tolerance', 'obedience'].includes(k)) continue;
out[k] = clamp01(Number(v));
}
return normalizePersonalityState(out);
}
function applyPersonalityMutations(state, mutations) {
let cur = normalizePersonalityState(state);
const muts = Array.isArray(mutations) ? mutations : [];
const applied = [];
let count = 0;
for (const m of muts) {
if (!m || typeof m !== 'object') continue;
const param = String(m.param || '').trim();
if (!['rigor', 'creativity', 'verbosity', 'risk_tolerance', 'obedience'].includes(param)) continue;
const delta = Number(m.delta);
if (!Number.isFinite(delta)) continue;
const clipped = Math.max(-0.2, Math.min(0.2, delta));
cur[param] = clamp01(Number(cur[param]) + clipped);
applied.push({ type: 'PersonalityMutation', param, delta: clipped, reason: String(m.reason || '').slice(0, 140) });
count += 1;
if (count >= 2) break;
}
return { state: cur, applied };
}
function proposeMutations({ baseState, reason, driftEnabled, signals }) {
const s = normalizePersonalityState(baseState);
const sig = Array.isArray(signals) ? signals.map(x => String(x || '')) : [];
const muts = [];
const r = String(reason || '');
if (driftEnabled) {
muts.push({ type: 'PersonalityMutation', param: 'creativity', delta: +0.1, reason: r || 'drift enabled' });
// Keep risk bounded under drift by default.
muts.push({ type: 'PersonalityMutation', param: 'risk_tolerance', delta: -0.05, reason: 'drift safety clamp' });
} else if (sig.includes('protocol_drift')) {
muts.push({ type: 'PersonalityMutation', param: 'obedience', delta: +0.1, reason: r || 'protocol drift' });
muts.push({ type: 'PersonalityMutation', param: 'rigor', delta: +0.05, reason: 'tighten protocol compliance' });
} else if (sig.includes('log_error') || sig.some(x => x.startsWith('errsig:') || x.startsWith('errsig_norm:'))) {
muts.push({ type: 'PersonalityMutation', param: 'rigor', delta: +0.1, reason: r || 'repair instability' });
muts.push({ type: 'PersonalityMutation', param: 'risk_tolerance', delta: -0.1, reason: 'reduce risky changes under errors' });
} else if (hasOpportunitySignal(sig)) {
// Opportunity detected: nudge towards creativity to enable innovation.
muts.push({ type: 'PersonalityMutation', param: 'creativity', delta: +0.1, reason: r || 'opportunity signal detected' });
muts.push({ type: 'PersonalityMutation', param: 'risk_tolerance', delta: +0.05, reason: 'allow exploration for innovation' });
} else {
// Plateau-like generic: nudge creativity up to break out of local optimum.
muts.push({ type: 'PersonalityMutation', param: 'creativity', delta: +0.05, reason: r || 'plateau creativity nudge' });
muts.push({ type: 'PersonalityMutation', param: 'verbosity', delta: -0.05, reason: 'reduce noise' });
}
// If already very high obedience, avoid pushing it further; swap second mutation to creativity.
if (s.obedience >= 0.95) {
const idx = muts.findIndex(x => x.param === 'obedience');
if (idx >= 0) muts[idx] = { type: 'PersonalityMutation', param: 'creativity', delta: +0.05, reason: 'obedience saturated' };
}
return muts;
}
function shouldTriggerPersonalityMutation({ driftEnabled, recentEvents }) {
if (driftEnabled) return { ok: true, reason: 'drift enabled' };
const list = Array.isArray(recentEvents) ? recentEvents : [];
const tail = list.slice(-6);
const outcomes = tail
.map(e => (e && e.outcome && e.outcome.status ? String(e.outcome.status) : null))
.filter(Boolean);
if (outcomes.length >= 4) {
const recentFailed = outcomes.slice(-4).filter(x => x === 'failed').length;
if (recentFailed >= 3) return { ok: true, reason: 'long failure streak' };
}
// Mutation consecutive failure proxy: last 3 events that have mutation_id.
const withMut = tail.filter(e => e && typeof e.mutation_id === 'string' && e.mutation_id);
if (withMut.length >= 3) {
const last3 = withMut.slice(-3);
const fail3 = last3.filter(e => e && e.outcome && e.outcome.status === 'failed').length;
if (fail3 >= 3) return { ok: true, reason: 'mutation consecutive failures' };
}
return { ok: false, reason: '' };
}
function loadPersonalityModel() {
const p = personalityFilePath();
const fallback = {
version: 1,
current: defaultPersonalityState(),
stats: {},
history: [],
updated_at: nowIso(),
};
const raw = readJsonIfExists(p, fallback);
const cur = normalizePersonalityState(raw && raw.current ? raw.current : defaultPersonalityState());
const stats = raw && typeof raw.stats === 'object' ? raw.stats : {};
const history = Array.isArray(raw && raw.history) ? raw.history : [];
return { version: 1, current: cur, stats, history, updated_at: raw && raw.updated_at ? raw.updated_at : nowIso() };
}
function savePersonalityModel(model) {
const m = model && typeof model === 'object' ? model : {};
const out = {
version: 1,
current: normalizePersonalityState(m.current || defaultPersonalityState()),
stats: m.stats && typeof m.stats === 'object' ? m.stats : {},
history: Array.isArray(m.history) ? m.history.slice(-120) : [],
updated_at: nowIso(),
};
writeJsonAtomic(personalityFilePath(), out);
return out;
}
function selectPersonalityForRun({ driftEnabled, signals, recentEvents } = {}) {
const model = loadPersonalityModel();
const base = normalizePersonalityState(model.current);
const stats = model.stats || {};
const best = chooseBestKnownPersonality(stats);
let naturalSelectionApplied = [];
// Natural selection: nudge towards the best-known configuration (small, max 2 params).
if (best && best.key) {
const bestState = parseKeyToState(best.key);
const diffs = getParamDeltas(base, bestState).filter(d => Math.abs(d.delta) >= 0.05);
const muts = [];
for (const d of diffs.slice(0, 2)) {
const clipped = Math.max(-0.1, Math.min(0.1, d.delta));
muts.push({ type: 'PersonalityMutation', param: d.param, delta: clipped, reason: 'natural_selection' });
}
const applied = applyPersonalityMutations(base, muts);
model.current = applied.state;
naturalSelectionApplied = applied.applied;
}
// Triggered personality mutation (explicit rule-based).
const trig = shouldTriggerPersonalityMutation({ driftEnabled: !!driftEnabled, recentEvents });
let triggeredApplied = [];
if (trig.ok) {
const props = proposeMutations({
baseState: model.current,
reason: trig.reason,
driftEnabled: !!driftEnabled,
signals,
});
const applied = applyPersonalityMutations(model.current, props);
model.current = applied.state;
triggeredApplied = applied.applied;
}
// Reflection-driven mutation: consume suggested_mutations from the latest reflection.
// Only apply if prior mutations left room (cap total at 4 per cycle to prevent drift).
let reflectionApplied = [];
var totalApplied = naturalSelectionApplied.length + triggeredApplied.length;
if (totalApplied < 4) {
try {
const { loadRecentReflections } = require('./reflection');
const recent = loadRecentReflections(1);
if (recent.length > 0 && Array.isArray(recent[0].suggested_mutations) && recent[0].suggested_mutations.length > 0) {
var refMuts = recent[0].suggested_mutations.slice(0, 4 - totalApplied).map(function (m) {
return {
type: 'PersonalityMutation',
param: m.param,
delta: Math.max(-0.1, Math.min(0.1, Number(m.delta) || 0)),
reason: String(m.reason || 'reflection').slice(0, 140),
};
});
const refApplied = applyPersonalityMutations(model.current, refMuts);
model.current = refApplied.state;
reflectionApplied = refApplied.applied;
}
} catch (_) {}
}
// Persist updated current state.
const saved = savePersonalityModel(model);
const key = personalityKey(saved.current);
const known = !!(saved.stats && saved.stats[key]);
return {
personality_state: saved.current,
personality_key: key,
personality_known: known,
personality_mutations: [...naturalSelectionApplied, ...triggeredApplied, ...reflectionApplied],
model_meta: {
best_known_key: best && best.key ? best.key : null,
best_known_score: best && Number.isFinite(Number(best.score)) ? Number(best.score) : null,
triggered: trig.ok ? { reason: trig.reason } : null,
},
};
}
function updatePersonalityStats({ personalityState, outcome, score, notes } = {}) {
const model = loadPersonalityModel();
const st = normalizePersonalityState(personalityState || model.current);
const key = personalityKey(st);
if (!model.stats || typeof model.stats !== 'object') model.stats = {};
const cur = model.stats[key] && typeof model.stats[key] === 'object' ? model.stats[key] : { success: 0, fail: 0, avg_score: 0.5, n: 0 };
const out = String(outcome || '').toLowerCase();
if (out === 'success') cur.success = (Number(cur.success) || 0) + 1;
else if (out === 'failed') cur.fail = (Number(cur.fail) || 0) + 1;
const sc = Number.isFinite(Number(score)) ? clamp01(Number(score)) : null;
if (sc != null) {
const n = (Number(cur.n) || 0) + 1;
const prev = Number.isFinite(Number(cur.avg_score)) ? Number(cur.avg_score) : 0.5;
cur.avg_score = prev + (sc - prev) / n;
cur.n = n;
}
cur.updated_at = nowIso();
model.stats[key] = cur;
model.history = Array.isArray(model.history) ? model.history : [];
model.history.push({
at: nowIso(),
key,
outcome: out === 'success' || out === 'failed' ? out : 'unknown',
score: sc,
notes: notes ? String(notes).slice(0, 220) : null,
});
savePersonalityModel(model);
return { key, stats: cur };
}
module.exports = {
clamp01,
defaultPersonalityState,
normalizePersonalityState,
isValidPersonalityState,
personalityKey,
loadPersonalityModel,
savePersonalityModel,
selectPersonalityForRun,
updatePersonalityStats,
};

542
src/gep/policyCheck.js Normal file
View File

@@ -0,0 +1,542 @@
// Constraint checking, blast radius analysis, validation, and failure classification.
// Extracted from solidify.js for maintainability.
const fs = require('fs');
const path = require('path');
const { getRepoRoot, getWorkspaceRoot } = require('./paths');
const {
tryRunCmd, normalizeRelPath, countFileLines,
gitListChangedFiles, isCriticalProtectedPath,
} = require('./gitOps');
function readJsonIfExists(filePath, fallback) {
try {
if (!fs.existsSync(filePath)) return fallback;
const raw = fs.readFileSync(filePath, 'utf8');
if (!raw.trim()) return fallback;
return JSON.parse(raw);
} catch (e) {
console.warn('[policyCheck] Failed to read ' + filePath + ':', e && e.message || e);
return fallback;
}
}
function readOpenclawConstraintPolicy() {
const defaults = {
excludePrefixes: ['logs/', 'memory/', 'assets/gep/', 'out/', 'temp/', 'node_modules/'],
excludeExact: ['event.json', 'temp_gep_output.json', 'temp_evolution_output.json', 'evolution_error.log'],
excludeRegex: ['capsule', 'events?\\.jsonl$'],
includePrefixes: ['src/', 'scripts/', 'config/'],
includeExact: ['index.js', 'package.json'],
includeExtensions: ['.js', '.cjs', '.mjs', '.ts', '.tsx', '.json', '.yaml', '.yml', '.toml', '.ini', '.sh'],
};
try {
const root = path.resolve(getWorkspaceRoot(), '..');
const cfgPath = path.join(root, 'openclaw.json');
if (!fs.existsSync(cfgPath)) return defaults;
const obj = readJsonIfExists(cfgPath, {});
const pol =
obj &&
obj.evolver &&
obj.evolver.constraints &&
obj.evolver.constraints.countedFilePolicy &&
typeof obj.evolver.constraints.countedFilePolicy === 'object'
? obj.evolver.constraints.countedFilePolicy
: {};
return {
excludePrefixes: Array.isArray(pol.excludePrefixes) ? pol.excludePrefixes.map(String) : defaults.excludePrefixes,
excludeExact: Array.isArray(pol.excludeExact) ? pol.excludeExact.map(String) : defaults.excludeExact,
excludeRegex: Array.isArray(pol.excludeRegex) ? pol.excludeRegex.map(String) : defaults.excludeRegex,
includePrefixes: Array.isArray(pol.includePrefixes) ? pol.includePrefixes.map(String) : defaults.includePrefixes,
includeExact: Array.isArray(pol.includeExact) ? pol.includeExact.map(String) : defaults.includeExact,
includeExtensions: Array.isArray(pol.includeExtensions) ? pol.includeExtensions.map(String) : defaults.includeExtensions,
};
} catch (_) {
console.warn('[policyCheck] readOpenclawConstraintPolicy failed:', _ && _.message || _);
return defaults;
}
}
function matchAnyPrefix(rel, prefixes) {
const list = Array.isArray(prefixes) ? prefixes : [];
for (const p of list) {
const n = normalizeRelPath(p).replace(/\/+$/, '');
if (!n) continue;
if (rel === n || rel.startsWith(n + '/')) return true;
}
return false;
}
function matchAnyExact(rel, exacts) {
const set = new Set((Array.isArray(exacts) ? exacts : []).map(x => normalizeRelPath(x)));
return set.has(rel);
}
function matchAnyRegex(rel, regexList) {
for (const raw of Array.isArray(regexList) ? regexList : []) {
try {
if (new RegExp(String(raw), 'i').test(rel)) return true;
} catch (_) {
console.warn('[policyCheck] matchAnyRegex invalid pattern:', raw, _ && _.message || _);
}
}
return false;
}
function isConstraintCountedPath(relPath, policy) {
const rel = normalizeRelPath(relPath);
if (!rel) return false;
if (matchAnyExact(rel, policy.excludeExact)) return false;
if (matchAnyPrefix(rel, policy.excludePrefixes)) return false;
if (matchAnyRegex(rel, policy.excludeRegex)) return false;
if (matchAnyExact(rel, policy.includeExact)) return true;
if (matchAnyPrefix(rel, policy.includePrefixes)) return true;
const lower = rel.toLowerCase();
for (const ext of Array.isArray(policy.includeExtensions) ? policy.includeExtensions : []) {
const e = String(ext || '').toLowerCase();
if (!e) continue;
if (lower.endsWith(e)) return true;
}
return false;
}
function parseNumstatRows(text) {
const rows = [];
const lines = String(text || '').split('\n').map(l => l.trim()).filter(Boolean);
for (const line of lines) {
const parts = line.split('\t');
if (parts.length < 3) continue;
const a = Number(parts[0]);
const d = Number(parts[1]);
let rel = normalizeRelPath(parts.slice(2).join('\t'));
if (rel.includes('=>')) {
const right = rel.split('=>').pop();
rel = normalizeRelPath(String(right || '').replace(/[{}]/g, '').trim());
}
rows.push({
file: rel,
added: Number.isFinite(a) ? a : 0,
deleted: Number.isFinite(d) ? d : 0,
});
}
return rows;
}
function computeBlastRadius({ repoRoot, baselineUntracked }) {
const policy = readOpenclawConstraintPolicy();
let changedFiles = gitListChangedFiles({ repoRoot }).map(normalizeRelPath).filter(Boolean);
if (Array.isArray(baselineUntracked) && baselineUntracked.length > 0) {
const baselineSet = new Set(baselineUntracked.map(normalizeRelPath));
changedFiles = changedFiles.filter(f => !baselineSet.has(f));
}
const countedFiles = changedFiles.filter(f => isConstraintCountedPath(f, policy));
const ignoredFiles = changedFiles.filter(f => !isConstraintCountedPath(f, policy));
const filesCount = countedFiles.length;
const u = tryRunCmd('git diff --numstat', { cwd: repoRoot, timeoutMs: 60000 });
const c = tryRunCmd('git diff --cached --numstat', { cwd: repoRoot, timeoutMs: 60000 });
const unstagedRows = u.ok ? parseNumstatRows(u.out) : [];
const stagedRows = c.ok ? parseNumstatRows(c.out) : [];
let stagedUnstagedChurn = 0;
for (const row of [...unstagedRows, ...stagedRows]) {
if (!isConstraintCountedPath(row.file, policy)) continue;
stagedUnstagedChurn += row.added + row.deleted;
}
const untracked = tryRunCmd('git ls-files --others --exclude-standard', { cwd: repoRoot, timeoutMs: 60000 });
let untrackedLines = 0;
if (untracked.ok) {
const rels = String(untracked.out).split('\n').map(normalizeRelPath).filter(Boolean);
const baselineSet = new Set((Array.isArray(baselineUntracked) ? baselineUntracked : []).map(normalizeRelPath));
for (const rel of rels) {
if (baselineSet.has(rel)) continue;
if (!isConstraintCountedPath(rel, policy)) continue;
const abs = path.join(repoRoot, rel);
untrackedLines += countFileLines(abs);
}
}
const churn = stagedUnstagedChurn + untrackedLines;
return {
files: filesCount,
lines: churn,
changed_files: countedFiles,
ignored_files: ignoredFiles,
all_changed_files: changedFiles,
};
}
function isForbiddenPath(relPath, forbiddenPaths) {
const rel = String(relPath || '').replace(/\\/g, '/').replace(/^\.\/+/, '');
const list = Array.isArray(forbiddenPaths) ? forbiddenPaths : [];
for (const fp of list) {
const f = String(fp || '').replace(/\\/g, '/').replace(/^\.\/+/, '').replace(/\/+$/, '');
if (!f) continue;
if (rel === f) return true;
if (rel.startsWith(f + '/')) return true;
}
return false;
}
const BLAST_RADIUS_HARD_CAP_FILES = Number(process.env.EVOLVER_HARD_CAP_FILES) || 60;
const BLAST_RADIUS_HARD_CAP_LINES = Number(process.env.EVOLVER_HARD_CAP_LINES) || 20000;
const BLAST_WARN_RATIO = 0.8;
const BLAST_CRITICAL_RATIO = 2.0;
function classifyBlastSeverity({ blast, maxFiles }) {
const files = Number(blast.files) || 0;
const lines = Number(blast.lines) || 0;
if (files > BLAST_RADIUS_HARD_CAP_FILES || lines > BLAST_RADIUS_HARD_CAP_LINES) {
return {
severity: 'hard_cap_breach',
message: `HARD CAP BREACH: ${files} files / ${lines} lines exceeds system limit (${BLAST_RADIUS_HARD_CAP_FILES} files / ${BLAST_RADIUS_HARD_CAP_LINES} lines)`,
};
}
if (!Number.isFinite(maxFiles) || maxFiles <= 0) {
return { severity: 'within_limit', message: 'no max_files constraint defined' };
}
if (files > maxFiles * BLAST_CRITICAL_RATIO) {
return {
severity: 'critical_overrun',
message: `CRITICAL OVERRUN: ${files} files > ${maxFiles * BLAST_CRITICAL_RATIO} (${BLAST_CRITICAL_RATIO}x limit of ${maxFiles}). Agent likely performed bulk/unintended operation.`,
};
}
if (files > maxFiles) {
return { severity: 'exceeded', message: `max_files exceeded: ${files} > ${maxFiles}` };
}
if (files > maxFiles * BLAST_WARN_RATIO) {
return {
severity: 'approaching_limit',
message: `approaching limit: ${files} / ${maxFiles} files (${Math.round((files / maxFiles) * 100)}%)`,
};
}
return { severity: 'within_limit', message: `${files} / ${maxFiles} files` };
}
function analyzeBlastRadiusBreakdown(changedFiles, topN) {
const n = Number.isFinite(topN) && topN > 0 ? topN : 5;
const dirCount = {};
for (const f of Array.isArray(changedFiles) ? changedFiles : []) {
const rel = normalizeRelPath(f);
if (!rel) continue;
const parts = rel.split('/');
const key = parts.length >= 2 ? parts.slice(0, 2).join('/') : parts[0];
dirCount[key] = (dirCount[key] || 0) + 1;
}
return Object.entries(dirCount)
.sort(function (a, b) { return b[1] - a[1]; })
.slice(0, n)
.map(function (e) { return { dir: e[0], files: e[1] }; });
}
function compareBlastEstimate(estimate, actual) {
if (!estimate || typeof estimate !== 'object') return null;
const estFiles = Number(estimate.files);
const actFiles = Number(actual.files);
if (!Number.isFinite(estFiles) || estFiles <= 0) return null;
const ratio = actFiles / estFiles;
return {
estimateFiles: estFiles,
actualFiles: actFiles,
ratio: Math.round(ratio * 100) / 100,
drifted: ratio > 3 || ratio < 0.1,
message: ratio > 3
? `Estimate drift: actual ${actFiles} files is ${ratio.toFixed(1)}x the estimated ${estFiles}. Agent did not plan accurately.`
: null,
};
}
function checkConstraints({ gene, blast, blastRadiusEstimate, repoRoot }) {
const violations = [];
const warnings = [];
let blastSeverity = null;
if (!gene || gene.type !== 'Gene') return { ok: true, violations, warnings, blastSeverity };
const constraints = gene.constraints || {};
const DEFAULT_MAX_FILES = 20;
const maxFiles = Number(constraints.max_files) > 0 ? Number(constraints.max_files) : DEFAULT_MAX_FILES;
blastSeverity = classifyBlastSeverity({ blast, maxFiles });
if (blastSeverity.severity === 'hard_cap_breach') {
violations.push(blastSeverity.message);
console.error(`[Solidify] ${blastSeverity.message}`);
} else if (blastSeverity.severity === 'critical_overrun') {
violations.push(blastSeverity.message);
const breakdown = analyzeBlastRadiusBreakdown(blast.all_changed_files || blast.changed_files || []);
console.error(`[Solidify] ${blastSeverity.message}`);
console.error(`[Solidify] Top contributing directories: ${breakdown.map(function (d) { return d.dir + ' (' + d.files + ')'; }).join(', ')}`);
} else if (blastSeverity.severity === 'exceeded') {
violations.push(`max_files exceeded: ${blast.files} > ${maxFiles}`);
} else if (blastSeverity.severity === 'approaching_limit') {
warnings.push(blastSeverity.message);
}
const estimateComparison = compareBlastEstimate(blastRadiusEstimate, blast);
if (estimateComparison && estimateComparison.drifted) {
warnings.push(estimateComparison.message);
console.log(`[Solidify] WARNING: ${estimateComparison.message}`);
}
const forbidden = Array.isArray(constraints.forbidden_paths) ? constraints.forbidden_paths : [];
for (const f of blast.all_changed_files || blast.changed_files || []) {
if (isForbiddenPath(f, forbidden)) violations.push(`forbidden_path touched: ${f}`);
}
const allowSelfModify = String(process.env.EVOLVE_ALLOW_SELF_MODIFY || '').toLowerCase() === 'true';
for (const f of blast.all_changed_files || blast.changed_files || []) {
if (isCriticalProtectedPath(f)) {
const norm = normalizeRelPath(f);
if (allowSelfModify && norm.startsWith('skills/evolver/') && gene && gene.category === 'repair') {
warnings.push('self_modify_evolver_repair: ' + norm + ' (EVOLVE_ALLOW_SELF_MODIFY=true)');
} else {
violations.push('critical_path_modified: ' + norm);
}
}
}
if (repoRoot) {
const newSkillDirs = new Set();
const changedList = blast.all_changed_files || blast.changed_files || [];
for (let sci = 0; sci < changedList.length; sci++) {
const scNorm = normalizeRelPath(changedList[sci]);
const scMatch = scNorm.match(/^skills\/([^\/]+)\//);
if (scMatch && !isCriticalProtectedPath(scNorm)) {
newSkillDirs.add(scMatch[1]);
}
}
newSkillDirs.forEach(function (skillName) {
const skillDir = path.join(repoRoot, 'skills', skillName);
try {
const entries = fs.readdirSync(skillDir).filter(function (e) { return !e.startsWith('.'); });
if (entries.length < 2) {
warnings.push('incomplete_skill: skills/' + skillName + '/ has only ' + entries.length + ' file(s). New skills should have at least index.js + SKILL.md.');
}
} catch (e) {
console.warn('[policyCheck] checkConstraints skill dir read failed:', skillName, e && e.message || e);
}
});
}
let ethicsText = '';
if (gene.strategy) {
ethicsText += (Array.isArray(gene.strategy) ? gene.strategy.join(' ') : String(gene.strategy)) + ' ';
}
if (gene.description) ethicsText += String(gene.description) + ' ';
if (gene.summary) ethicsText += String(gene.summary) + ' ';
if (ethicsText.length > 0) {
const ethicsBlockPatterns = [
{ re: /(?:bypass|disable|circumvent|remove)\s+(?:safety|guardrail|security|ethic|constraint|protection)/i, rule: 'safety', msg: 'ethics: strategy attempts to bypass safety mechanisms' },
{ re: /(?:keylogger|screen\s*capture|webcam\s*hijack|mic(?:rophone)?\s*record)/i, rule: 'human_welfare', msg: 'ethics: covert monitoring tool in strategy' },
{ re: /(?:social\s+engineering|phishing)\s+(?:attack|template|script)/i, rule: 'human_welfare', msg: 'ethics: social engineering content in strategy' },
{ re: /(?:exploit|hack)\s+(?:user|human|people|victim)/i, rule: 'human_welfare', msg: 'ethics: human exploitation in strategy' },
{ re: /(?:hide|conceal|obfuscat)\w*\s+(?:action|behavior|intent|log)/i, rule: 'transparency', msg: 'ethics: strategy conceals actions from audit trail' },
];
for (let ei = 0; ei < ethicsBlockPatterns.length; ei++) {
if (ethicsBlockPatterns[ei].re.test(ethicsText)) {
violations.push(ethicsBlockPatterns[ei].msg);
console.error('[Solidify] Ethics violation: ' + ethicsBlockPatterns[ei].msg);
}
}
}
return { ok: violations.length === 0, violations, warnings, blastSeverity };
}
function detectDestructiveChanges({ repoRoot, changedFiles, baselineUntracked }) {
const violations = [];
const baselineSet = new Set((Array.isArray(baselineUntracked) ? baselineUntracked : []).map(normalizeRelPath));
for (const rel of changedFiles) {
const norm = normalizeRelPath(rel);
if (!norm) continue;
if (!isCriticalProtectedPath(norm)) continue;
const abs = path.join(repoRoot, norm);
const normAbs = path.resolve(abs);
const normRepo = path.resolve(repoRoot);
if (!normAbs.startsWith(normRepo + path.sep) && normAbs !== normRepo) continue;
if (!baselineSet.has(norm)) {
if (!fs.existsSync(normAbs)) {
violations.push(`CRITICAL_FILE_DELETED: ${norm}`);
} else {
try {
const stat = fs.statSync(normAbs);
if (stat.isFile() && stat.size === 0) {
violations.push(`CRITICAL_FILE_EMPTIED: ${norm}`);
}
} catch (e) {
console.warn('[policyCheck] detectDestructiveChanges stat failed:', norm, e && e.message || e);
}
}
}
}
return violations;
}
const VALIDATION_ALLOWED_PREFIXES = ['node ', 'npm ', 'npx '];
function isValidationCommandAllowed(cmd) {
const c = String(cmd || '').trim();
if (!c) return false;
if (!VALIDATION_ALLOWED_PREFIXES.some(p => c.startsWith(p))) return false;
if (/`|\$\(/.test(c)) return false;
const stripped = c.replace(/"[^"]*"/g, '').replace(/'[^']*'/g, '');
if (/[;&|><]/.test(stripped)) return false;
if (/^node\s+(-e|--eval|--print|-p)\b/.test(c)) return false;
return true;
}
var MAX_VALIDATION_RETRIES = parseInt(process.env.SOLIDIFY_MAX_RETRIES || '2', 10) || 0;
function runValidationsOnce(gene, opts) {
const repoRoot = opts.repoRoot || getRepoRoot();
const timeoutMs = Number.isFinite(Number(opts.timeoutMs)) ? Number(opts.timeoutMs) : 180000;
const validation = Array.isArray(gene && gene.validation) ? gene.validation : [];
const results = [];
const startedAt = Date.now();
for (const cmd of validation) {
const c = String(cmd || '').trim();
if (!c) continue;
if (!isValidationCommandAllowed(c)) {
results.push({ cmd: c, ok: false, out: '', err: 'BLOCKED: validation command rejected by safety check (allowed prefixes: node/npm/npx; shell operators prohibited)' });
return { ok: false, results, startedAt, finishedAt: Date.now() };
}
const r = tryRunCmd(c, { cwd: repoRoot, timeoutMs });
results.push({ cmd: c, ok: r.ok, out: String(r.out || ''), err: String(r.err || '') });
if (!r.ok) return { ok: false, results, startedAt, finishedAt: Date.now() };
}
return { ok: true, results, startedAt, finishedAt: Date.now() };
}
function sleepSync(ms) {
var end = Date.now() + ms;
while (Date.now() < end) {}
}
function runValidations(gene, opts = {}) {
var maxRetries = Math.max(0, MAX_VALIDATION_RETRIES);
var attempt = 0;
var result;
while (attempt <= maxRetries) {
result = runValidationsOnce(gene, opts);
if (result.ok) {
if (attempt > 0) console.log('[Solidify] Validation passed on retry ' + attempt);
result.retries_attempted = attempt;
return result;
}
var blocked = result.results && result.results.some(function (r) {
return r.err && r.err.startsWith('BLOCKED:');
});
if (blocked) break;
attempt++;
if (attempt <= maxRetries) {
console.log('[Solidify] Validation failed (attempt ' + attempt + '/' + (maxRetries + 1) + '), retrying in 1s...');
sleepSync(1000);
}
}
result.retries_attempted = attempt > 0 ? attempt - 1 : 0;
return result;
}
function runCanaryCheck(opts) {
const repoRoot = (opts && opts.repoRoot) ? opts.repoRoot : getRepoRoot();
const timeoutMs = (opts && Number.isFinite(Number(opts.timeoutMs))) ? Number(opts.timeoutMs) : 30000;
const canaryScript = path.join(repoRoot, 'src', 'canary.js');
if (!fs.existsSync(canaryScript)) {
return { ok: true, skipped: true, reason: 'canary.js not found' };
}
const r = tryRunCmd(`node "${canaryScript}"`, { cwd: repoRoot, timeoutMs });
return { ok: r.ok, skipped: false, out: String(r.out || ''), err: String(r.err || '') };
}
function buildFailureReason(constraintCheck, validation, protocolViolations, canary) {
const reasons = [];
if (constraintCheck && Array.isArray(constraintCheck.violations)) {
for (let i = 0; i < constraintCheck.violations.length; i++) {
reasons.push('constraint: ' + constraintCheck.violations[i]);
}
}
if (Array.isArray(protocolViolations)) {
for (let j = 0; j < protocolViolations.length; j++) {
reasons.push('protocol: ' + protocolViolations[j]);
}
}
if (validation && Array.isArray(validation.results)) {
for (let k = 0; k < validation.results.length; k++) {
const r = validation.results[k];
if (r && !r.ok) {
reasons.push('validation_failed: ' + String(r.cmd || '').slice(0, 120) + ' => ' + String(r.err || '').slice(0, 200));
}
}
}
if (canary && !canary.ok && !canary.skipped) {
reasons.push('canary_failed: ' + String(canary.err || '').slice(0, 200));
}
return reasons.join('; ').slice(0, 2000) || 'unknown';
}
function buildSoftFailureLearningSignals(opts) {
const { expandSignals } = require('./learningSignals');
const signals = opts && Array.isArray(opts.signals) ? opts.signals : [];
const failureReason = opts && opts.failureReason ? String(opts.failureReason) : '';
const violations = opts && Array.isArray(opts.violations) ? opts.violations : [];
const validationResults = opts && Array.isArray(opts.validationResults) ? opts.validationResults : [];
const validationText = validationResults
.filter(function (r) { return r && r.ok === false; })
.map(function (r) { return [r.cmd, r.stderr, r.stdout].filter(Boolean).join(' '); })
.join(' ');
return expandSignals(signals.concat(violations), failureReason + ' ' + validationText)
.filter(function (tag) {
return tag.indexOf('problem:') === 0 || tag.indexOf('risk:') === 0 || tag.indexOf('area:') === 0 || tag.indexOf('action:') === 0;
});
}
function classifyFailureMode(opts) {
const constraintViolations = opts && Array.isArray(opts.constraintViolations) ? opts.constraintViolations : [];
const protocolViolations = opts && Array.isArray(opts.protocolViolations) ? opts.protocolViolations : [];
const validation = opts && opts.validation ? opts.validation : null;
const canary = opts && opts.canary ? opts.canary : null;
if (constraintViolations.some(function (v) {
const s = String(v || '');
return /HARD CAP BREACH|CRITICAL_FILE_|critical_path_modified|forbidden_path touched|ethics:/i.test(s);
})) {
return { mode: 'hard', reasonClass: 'constraint_destructive', retryable: false };
}
if (protocolViolations.length > 0) {
return { mode: 'hard', reasonClass: 'protocol', retryable: false };
}
if (canary && !canary.ok && !canary.skipped) {
return { mode: 'hard', reasonClass: 'canary', retryable: false };
}
if (constraintViolations.length > 0) {
return { mode: 'hard', reasonClass: 'constraint', retryable: false };
}
if (validation && validation.ok === false) {
return { mode: 'soft', reasonClass: 'validation', retryable: true };
}
return { mode: 'soft', reasonClass: 'unknown', retryable: true };
}
module.exports = {
readOpenclawConstraintPolicy,
isConstraintCountedPath,
parseNumstatRows,
computeBlastRadius,
isForbiddenPath,
checkConstraints,
classifyBlastSeverity,
analyzeBlastRadiusBreakdown,
compareBlastEstimate,
detectDestructiveChanges,
isValidationCommandAllowed,
runValidations,
runCanaryCheck,
buildFailureReason,
buildSoftFailureLearningSignals,
classifyFailureMode,
BLAST_RADIUS_HARD_CAP_FILES,
BLAST_RADIUS_HARD_CAP_LINES,
};

584
src/gep/prompt.js Normal file
View File

@@ -0,0 +1,584 @@
const fs = require('fs');
const { captureEnvFingerprint } = require('./envFingerprint');
const { formatAssetPreview } = require('./assets');
const { generateInnovationIdeas } = require('../ops/innovation');
const { analyzeRecentHistory, OPPORTUNITY_SIGNALS } = require('./signals');
const { loadNarrativeSummary } = require('./narrativeMemory');
const { getEvolutionPrinciplesPath } = require('./paths');
/**
* Build a minimal prompt for direct-reuse mode.
*/
function buildReusePrompt({ capsule, signals, nowIso }) {
const payload = capsule.payload || capsule;
const summary = payload.summary || capsule.summary || '(no summary)';
const gene = payload.gene || capsule.gene || '(unknown)';
const confidence = payload.confidence || capsule.confidence || 0;
const assetId = capsule.asset_id || '(unknown)';
const sourceNode = capsule.source_node_id || '(unknown)';
const trigger = Array.isArray(payload.trigger || capsule.trigger_text)
? (payload.trigger || String(capsule.trigger_text || '').split(',')).join(', ')
: '';
return `
GEP -- REUSE MODE (Search-First) [${nowIso || new Date().toISOString()}]
You are applying a VERIFIED solution from the EvoMap Hub.
Source asset: ${assetId} (Node: ${sourceNode})
Confidence: ${confidence} | Gene: ${gene}
Trigger signals: ${trigger}
Summary: ${summary}
Your signals: ${JSON.stringify(signals || [])}
Instructions:
1. Read the capsule details below.
2. Apply the fix to the local codebase, adapting paths/names.
3. Run validation to confirm it works.
4. If passed, run: node index.js solidify
5. If failed, ROLLBACK and report.
Capsule payload:
\`\`\`json
${JSON.stringify(payload, null, 2)}
\`\`\`
IMPORTANT: Do NOT reinvent. Apply faithfully.
`.trim();
}
/**
* Build a Hub Matched Solution block.
*/
function buildHubMatchedBlock({ capsule }) {
if (!capsule) return '(no hub match)';
const payload = capsule.payload || capsule;
const summary = payload.summary || capsule.summary || '(no summary)';
const gene = payload.gene || capsule.gene || '(unknown)';
const confidence = payload.confidence || capsule.confidence || 0;
const assetId = capsule.asset_id || '(unknown)';
return `
Hub Matched Solution (STRONG REFERENCE):
- Asset: ${assetId} (${confidence})
- Gene: ${gene}
- Summary: ${summary}
- Payload:
\`\`\`json
${JSON.stringify(payload, null, 2)}
\`\`\`
Use this as your primary approach if applicable. Adapt to local context.
`.trim();
}
/**
* Truncate context intelligently to preserve header/footer structure.
*/
function truncateContext(text, maxLength = 20000) {
if (!text || text.length <= maxLength) return text || '';
return text.slice(0, maxLength) + '\n...[TRUNCATED_EXECUTION_CONTEXT]...';
}
/**
* Strict schema definitions for the prompt to reduce drift.
* UPDATED: 2026-02-14 (Protocol Drift Fix v3.2 - JSON-Only Enforcement)
*/
const SCHEMA_DEFINITIONS = `
━━━━━━━━━━━━━━━━━━━━━━
I. Mandatory Evolution Object Model (Output EXACTLY these 5 objects)
━━━━━━━━━━━━━━━━━━━━━━
Output separate JSON objects. DO NOT wrap in a single array.
DO NOT use markdown code blocks (like \`\`\`json ... \`\`\`).
Output RAW JSON ONLY. No prelude, no postscript.
Missing any object = PROTOCOL FAILURE.
ENSURE VALID JSON SYNTAX (escape quotes in strings).
0. Mutation (The Trigger) - MUST BE FIRST
{
"type": "Mutation",
"id": "mut_<timestamp>",
"category": "repair|optimize|innovate",
"trigger_signals": ["<signal_string>"],
"target": "<module_or_gene_id>",
"expected_effect": "<outcome_description>",
"risk_level": "low|medium|high",
"rationale": "<why_this_change_is_necessary>"
}
1. PersonalityState (The Mood)
{
"type": "PersonalityState",
"rigor": 0.0-1.0,
"creativity": 0.0-1.0,
"verbosity": 0.0-1.0,
"risk_tolerance": 0.0-1.0,
"obedience": 0.0-1.0
}
2. EvolutionEvent (The Record)
{
"type": "EvolutionEvent",
"schema_version": "1.5.0",
"id": "evt_<timestamp>",
"parent": <parent_evt_id|null>,
"intent": "repair|optimize|innovate",
"signals": ["<signal_string>"],
"genes_used": ["<gene_id>"],
"mutation_id": "<mut_id>",
"personality_state": { ... },
"blast_radius": { "files": N, "lines": N },
"outcome": { "status": "success|failed", "score": 0.0-1.0 }
}
3. Gene (The Knowledge)
- Reuse/update existing ID if possible. Create new only if novel pattern.
- ID MUST be descriptive: gene_<descriptive_name> (e.g., gene_retry_on_timeout)
- NEVER use timestamps, random numbers, or tool names (cursor, vscode, etc.) in IDs
- summary MUST be a clear human-readable sentence describing what the Gene does
{
"type": "Gene",
"schema_version": "1.5.0",
"id": "gene_<descriptive_name>",
"summary": "<clear description of what this gene does>",
"category": "repair|optimize|innovate",
"signals_match": ["<pattern>"],
"preconditions": ["<condition>"],
"strategy": ["<step_1>", "<step_2>"],
"constraints": { "max_files": N, "forbidden_paths": [] },
"validation": ["<node_command>"]
}
4. Capsule (The Result)
- Only on success. Reference Gene used.
{
"type": "Capsule",
"schema_version": "1.5.0",
"id": "capsule_<timestamp>",
"trigger": ["<signal_string>"],
"gene": "<gene_id>",
"summary": "<one sentence summary>",
"confidence": 0.0-1.0,
"blast_radius": { "files": N, "lines": N }
}
`.trim();
function buildAntiPatternZone(failedCapsules, signals) {
if (!Array.isArray(failedCapsules) || failedCapsules.length === 0) return '';
if (!Array.isArray(signals) || signals.length === 0) return '';
var sigSet = new Set(signals.map(function (s) { return String(s).toLowerCase(); }));
var matched = [];
for (var i = failedCapsules.length - 1; i >= 0 && matched.length < 3; i--) {
var fc = failedCapsules[i];
if (!fc) continue;
var triggers = Array.isArray(fc.trigger) ? fc.trigger : [];
var overlap = 0;
for (var j = 0; j < triggers.length; j++) {
if (sigSet.has(String(triggers[j]).toLowerCase())) overlap++;
}
if (triggers.length > 0 && overlap / triggers.length >= 0.4) {
matched.push(fc);
}
}
if (matched.length === 0) return '';
var lines = matched.map(function (fc, idx) {
var diffPreview = fc.diff_snapshot ? String(fc.diff_snapshot).slice(0, 500) : '(no diff)';
return [
' ' + (idx + 1) + '. Gene: ' + (fc.gene || 'unknown') + ' | Signals: [' + (fc.trigger || []).slice(0, 4).join(', ') + ']',
' Failure: ' + String(fc.failure_reason || 'unknown').slice(0, 300),
' Diff (first 500 chars): ' + diffPreview.replace(/\n/g, ' '),
].join('\n');
});
return '\nContext [Anti-Pattern Zone] (AVOID these failed approaches):\n' + lines.join('\n') + '\n';
}
function buildLessonsBlock(hubLessons, signals) {
if (!Array.isArray(hubLessons) || hubLessons.length === 0) return '';
var sigSet = new Set((Array.isArray(signals) ? signals : []).map(function (s) { return String(s).toLowerCase(); }));
var positive = [];
var negative = [];
for (var i = 0; i < hubLessons.length && (positive.length + negative.length) < 6; i++) {
var l = hubLessons[i];
if (!l || !l.content) continue;
var entry = ' - [' + (l.scenario || l.lesson_type || '?') + '] ' + String(l.content).slice(0, 300);
if (l.source_node_id) entry += ' (from: ' + String(l.source_node_id).slice(0, 20) + ')';
if (l.lesson_type === 'negative') {
negative.push(entry);
} else {
positive.push(entry);
}
}
if (positive.length === 0 && negative.length === 0) return '';
var parts = ['\nContext [Lessons from Ecosystem] (Cross-agent learned experience):'];
if (positive.length > 0) {
parts.push(' Strategies that WORKED:');
parts.push(positive.join('\n'));
}
if (negative.length > 0) {
parts.push(' Pitfalls to AVOID:');
parts.push(negative.join('\n'));
}
parts.push(' Apply relevant lessons. Ignore irrelevant ones.\n');
return parts.join('\n');
}
function buildNarrativeBlock() {
try {
const narrative = loadNarrativeSummary(3000);
if (!narrative) return '';
return `\nContext [Evolution Narrative] (Recent decisions and outcomes -- learn from this history):\n${narrative}\n`;
} catch (_) {
return '';
}
}
function buildPrinciplesBlock() {
try {
const principlesPath = getEvolutionPrinciplesPath();
if (!fs.existsSync(principlesPath)) return '';
const content = fs.readFileSync(principlesPath, 'utf8');
if (!content.trim()) return '';
const trimmed = content.length > 2000 ? content.slice(0, 2000) + '\n...[TRUNCATED]' : content;
return `\nContext [Evolution Principles] (Guiding directives -- align your actions):\n${trimmed}\n`;
} catch (_) {
return '';
}
}
function buildGepPrompt({
nowIso,
context,
signals,
selector,
parentEventId,
selectedGene,
capsuleCandidates,
genesPreview,
capsulesPreview,
capabilityCandidatesPreview,
externalCandidatesPreview,
hubMatchedBlock,
cycleId,
recentHistory,
failedCapsules,
hubLessons,
strategyPolicy,
}) {
const parentValue = parentEventId ? `"${parentEventId}"` : 'null';
const selectedGeneId = selectedGene && selectedGene.id ? selectedGene.id : 'gene_<name>';
const envFingerprint = captureEnvFingerprint();
const cycleLabel = cycleId ? ` Cycle #${cycleId}` : '';
// Extract strategy from selected gene if available
let strategyBlock = "";
if (selectedGene && selectedGene.strategy && Array.isArray(selectedGene.strategy)) {
strategyBlock = `
ACTIVE STRATEGY (${selectedGeneId}):
${selectedGene.strategy.map((s, i) => `${i + 1}. ${s}`).join('\n')}
ADHERE TO THIS STRATEGY STRICTLY.
`.trim();
} else {
// Fallback strategy if no gene is selected or strategy is missing
strategyBlock = `
ACTIVE STRATEGY (Generic):
1. Analyze signals and context.
2. Select or create a Gene that addresses the root cause.
3. Apply minimal, safe changes.
4. Validate changes strictly.
5. Solidify knowledge.
`.trim();
}
let strategyPolicyBlock = '';
if (strategyPolicy && Array.isArray(strategyPolicy.directives) && strategyPolicy.directives.length > 0) {
strategyPolicyBlock = `
ADAPTIVE STRATEGY POLICY:
${strategyPolicy.directives.map((s, i) => `${i + 1}. ${s}`).join('\n')}
${strategyPolicy.forceInnovate ? 'You MUST prefer INNOVATE unless a critical blocking error is present.' : ''}
${strategyPolicy.cautiousExecution ? 'You MUST reduce blast radius and avoid broad refactors in this cycle.' : ''}
`.trim();
}
// Use intelligent truncation
const executionContext = truncateContext(context, 20000);
// Strict Schema Injection
const schemaSection = SCHEMA_DEFINITIONS.replace('<parent_evt_id|null>', parentValue);
// Reduce noise by filtering capabilityCandidatesPreview if too large
// If a gene is selected, we need less noise from capabilities
let capsPreview = capabilityCandidatesPreview || '(none)';
const capsLimit = selectedGene ? 500 : 2000;
if (capsPreview.length > capsLimit) {
capsPreview = capsPreview.slice(0, capsLimit) + "\n...[TRUNCATED_CAPABILITIES]...";
}
// Optimize signals display: truncate long signals and limit count
const uniqueSignals = Array.from(new Set(signals || []));
const optimizedSignals = uniqueSignals.slice(0, 50).map(s => {
if (typeof s === 'string' && s.length > 200) {
return s.slice(0, 200) + '...[TRUNCATED_SIGNAL]';
}
return s;
});
if (uniqueSignals.length > 50) {
optimizedSignals.push(`...[TRUNCATED ${uniqueSignals.length - 50} SIGNALS]...`);
}
const formattedGenes = formatAssetPreview(genesPreview);
const formattedCapsules = formatAssetPreview(capsulesPreview);
// [2026-02-14] Innovation Catalyst Integration
// If stagnation is detected, inject concrete innovation ideas into the prompt.
let innovationBlock = '';
const stagnationSignals = [
'evolution_stagnation_detected',
'stable_success_plateau',
'repair_loop_detected',
'force_innovation_after_repair_loop',
'empty_cycle_loop_detected',
'evolution_saturation'
];
if (uniqueSignals.some(s => stagnationSignals.includes(s))) {
const ideas = generateInnovationIdeas();
if (ideas && ideas.length > 0) {
innovationBlock = `
Context [Innovation Catalyst] (Stagnation Detected - Consider These Ideas):
${ideas.join('\n')}
`;
}
}
// [2026-02-14] Strict Stagnation Directive
// If uniqueSignals contains 'evolution_stagnation_detected' or 'stable_success_plateau',
// inject a MANDATORY directive to force innovation and forbid repair/optimize if not strictly necessary.
if (uniqueSignals.includes('evolution_stagnation_detected') || uniqueSignals.includes('stable_success_plateau')) {
const stagnationDirective = `
*** CRITICAL STAGNATION DIRECTIVE ***
System has detected stagnation (repetitive cycles or lack of progress).
You MUST choose INTENT: INNOVATE.
You MUST NOT choose repair or optimize unless there is a critical blocking error (log_error).
Prefer implementing one of the Innovation Catalyst ideas above.
`;
innovationBlock += stagnationDirective;
}
// [2026-02-14] Recent History Integration
let historyBlock = '';
if (recentHistory && recentHistory.length > 0) {
historyBlock = `
Recent Evolution History (last 8 cycles -- DO NOT repeat the same intent+signal+gene):
${recentHistory.map((h, i) => ` ${i + 1}. [${h.intent}] signals=[${h.signals.slice(0, 2).join(', ')}] gene=${h.gene_id} outcome=${h.outcome.status} @${h.timestamp}`).join('\n')}
IMPORTANT: If you see 3+ consecutive "repair" cycles with the same gene, you MUST switch to "innovate" intent.
`.trim();
}
// Refactor prompt assembly to minimize token usage and maximize clarity
// UPDATED: 2026-02-14 (Optimized Asset Embedding & Strict Schema v2.5 - JSON-Only Hardening)
const basePrompt = `
GEP — GENOME EVOLUTION PROTOCOL (v1.10.3 STRICT)${cycleLabel} [${nowIso}]
You are a protocol-bound evolution engine. Compliance overrides optimality.
${schemaSection}
━━━━━━━━━━━━━━━━━━━━━━
II. Directives & Logic
━━━━━━━━━━━━━━━━━━━━━━
1. Intent: ${selector && selector.intent ? selector.intent.toUpperCase() : 'UNKNOWN'}
Reason: ${(selector && selector.reason) ? (Array.isArray(selector.reason) ? selector.reason.join('; ') : selector.reason) : 'No reason provided.'}
2. Selection: Selected Gene "${selectedGeneId}".
${strategyBlock}
${strategyPolicyBlock ? '\n' + strategyPolicyBlock : ''}
3. Execution: Apply changes (tool calls). Repair/Optimize: small/reversible. Innovate: new skills in \`skills/<name>/\`.
4. Validation: Run gene's validation steps. Fail = ROLLBACK.
5. Solidify: Output 5 Mandatory Objects. Update Gene/Capsule files.
6. Report: Use \`feishu-evolver-wrapper/report.js\`. Describe WHAT/WHY.
PHILOSOPHY:
- Automate Patterns: 3+ manual occurrences = tool.
- Innovate > Maintain: 60% innovation.
- Robustness: Fix recurring errors permanently.
- Blast Radius Control (CRITICAL):
* Check file count BEFORE editing. > 80% of max_files = STOP.
* System hard cap: 60 files / 20000 lines per cycle.
* Repair: fix ONLY broken files. Do NOT reinstall/bulk-copy.
* Prefer targeted edits.
- Strictness: NO CHITCHAT. NO MARKDOWN WRAPPERS around JSON. Output RAW JSON objects separated by newlines.
- NO "Here is the plan" or conversational filler. START IMMEDIATELY WITH JSON.
CONSTRAINTS:
- No \`exec\` for messaging (use feishu-post/card).
- \`exec\` usage: Only for background tasks. LOG IT. Optimize usage to avoid high token burn.
- New skills -> \`skills/<name>/\`.
- NEVER modify \`skills/evolver/\` or \`skills/feishu-evolver-wrapper/\`. These are managed through the human-reviewed release pipeline.
Self-modification causes instability and is blocked by solidify. To enable (NOT recommended): set EVOLVE_ALLOW_SELF_MODIFY=true.
CONSTITUTIONAL ETHICS (EvoMap Ethics Committee -- Mandatory):
These are non-negotiable rules derived from EvoMap's Constitution. Violation = FAILED + ROLLBACK.
1. HUMAN WELFARE PRIORITY: Never create tools, scripts, or strategies that could harm humans,
enable surveillance, social engineering, phishing, or manipulation of human behavior.
2. CARBON-SILICON SYMBIOSIS: Evolution must serve both human and agent interests.
Never attempt to replace, marginalize, or deceive human users.
3. TRANSPARENCY: All actions must be auditable. Never hide, obfuscate, or conceal
the intent or effect of mutations. No steganography or covert channels.
4. FAIRNESS: Never create monopolistic strategies that block other agents.
Fair competition through quality, not exclusion.
5. SAFETY: Never bypass, disable, or weaken safety mechanisms, guardrails,
validation checks, or security controls. Never create tools to do so.
- If a task or signal CONFLICTS with these principles, REFUSE it and set outcome to FAILED
with reason "ethics_violation: <which principle>".
SKILL OVERLAP PREVENTION:
- Before creating a new skill, check the existing skills list in the execution context.
- If a skill with similar functionality already exists (e.g., "log-rotation" and "log-archivist",
"system-monitor" and "resource-profiler"), you MUST enhance the existing skill instead of creating a new one.
- Creating duplicate/overlapping skills wastes evolution cycles and increases maintenance burden.
- Violation = mark outcome as FAILED with reason "skill_overlap".
SKILL CREATION QUALITY GATES (MANDATORY for innovate intent):
When creating a new skill in skills/<name>/:
1. STRUCTURE: Follow the standard skill layout:
skills/<name>/
|- index.js (required: main entry with working exports)
|- SKILL.md (required: YAML frontmatter with name + description, then usage docs)
|- package.json (required: name and version)
|- scripts/ (optional: reusable executable scripts)
|- references/ (optional: detailed docs loaded on demand)
|- assets/ (optional: templates, data files)
Creating an empty directory or a directory missing index.js = FAILED.
Do NOT create unnecessary files (README.md, CHANGELOG.md, INSTALLATION_GUIDE.md, etc.).
2. SKILL NAMING (CRITICAL):
a) <name> MUST be descriptive kebab-case (e.g., "log-rotation", "retry-handler", "cache-manager")
b) NEVER use timestamps, random numbers, tool names (cursor, vscode), or UUIDs as names
c) Names like "cursor-1773331925711", "skill-12345", "fix-1" = FAILED
d) Name must be 2-6 descriptive words separated by hyphens, conveying what the skill does
e) Good: "http-retry-with-backoff", "log-file-rotation", "config-validator"
f) Bad: "cursor-auto-1234", "new-skill", "test-skill", "my-skill"
3. SKILL.MD FRONTMATTER: Every SKILL.md MUST start with YAML frontmatter:
---
name: <skill-name>
description: <what it does and when to use it>
---
The name MUST follow the naming rules above.
The description is the triggering mechanism -- include WHAT the skill does and WHEN to use it.
Description must be a clear, complete sentence (min 20 chars). Generic descriptions = FAILED.
4. CONCISENESS: SKILL.md body should be under 500 lines. Keep instructions lean.
Only include information the agent does not already know. Move detailed reference
material to references/ files, not into SKILL.md itself.
5. EXPORT VERIFICATION: Every exported function must be importable.
Run: node -e "const s = require('./skills/<name>'); console.log(Object.keys(s))"
If this fails, the skill is broken. Fix before solidify.
6. NO HARDCODED SECRETS: Never embed API keys, tokens, or secrets in code.
Use process.env or .env references. Hardcoded App ID, App Secret, Bearer tokens = FAILED.
7. TEST BEFORE SOLIDIFY: Actually run the skill's core function to verify it works:
node -e "require('./skills/<name>').main ? require('./skills/<name>').main() : console.log('ok')"
Scripts in scripts/ must also be tested by executing them.
8. ATOMIC CREATION: Create ALL files for a skill in a single cycle.
Do not create a directory in one cycle and fill it in the next.
Empty directories from failed cycles will be automatically cleaned up on rollback.
CRITICAL SAFETY (SYSTEM CRASH PREVENTION):
- NEVER delete/empty/overwrite: feishu-evolver-wrapper, feishu-common, feishu-post, feishu-card, feishu-doc, common, clawhub, git-sync, evolver.
- NEVER delete root files: MEMORY.md, SOUL.md, IDENTITY.md, AGENTS.md, USER.md, HEARTBEAT.md, RECENT_EVENTS.md, TOOLS.md, openclaw.json, .env, package.json.
- Fix broken skills; DO NOT delete and recreate.
- Violation = ROLLBACK + FAILED.
COMMON FAILURE PATTERNS:
- Blast radius exceeded.
- Omitted Mutation object.
- Merged objects into one JSON.
- Hallucinated "type": "Logic".
- "id": "mut_undefined".
- Missing "trigger_signals".
- Unrunnable validation steps.
- Markdown code blocks wrapping JSON (FORBIDDEN).
FAILURE STREAK AWARENESS:
- If "consecutive_failure_streak_N" or "failure_loop_detected":
1. Change approach (do NOT repeat failed gene).
2. Pick SIMPLER fix.
3. Respect "ban_gene:<id>".
Final Directive: Every cycle must leave the system measurably better.
START IMMEDIATELY WITH RAW JSON (Mutation Object first).
DO NOT WRITE ANY INTRODUCTORY TEXT.
Context [Signals]:
${JSON.stringify(optimizedSignals)}
Context [Env Fingerprint]:
${JSON.stringify(envFingerprint, null, 2)}
${innovationBlock}
Context [Injection Hint]:
${process.env.EVOLVE_HINT ? process.env.EVOLVE_HINT : '(none)'}
Context [Gene Preview] (Reference for Strategy):
${formattedGenes}
Context [Capsule Preview] (Reference for Past Success):
${formattedCapsules}
Context [Capability Candidates]:
${capsPreview}
Context [Hub Matched Solution]:
${hubMatchedBlock || '(no hub match)'}
Context [External Candidates]:
${externalCandidatesPreview || '(none)'}
${buildAntiPatternZone(failedCapsules, signals)}${buildLessonsBlock(hubLessons, signals)}
${historyBlock}
${buildNarrativeBlock()}
${buildPrinciplesBlock()}
Context [Execution]:
${executionContext}
━━━━━━━━━━━━━━━━━━━━━━
MANDATORY POST-SOLIDIFY STEP (Wrapper Authority -- Cannot Be Skipped)
━━━━━━━━━━━━━━━━━━━━━━
After solidify, a status summary file MUST exist for this cycle.
Preferred path: evolver core auto-writes it during solidify.
The wrapper will handle reporting AFTER git push.
If core write is unavailable for any reason, create fallback status JSON manually.
Write a JSON file with your status (cross-platform):
\`\`\`bash
node -e "require('fs').mkdirSync('${(process.env.WORKSPACE_DIR || '.').replace(/\\/g, '/')}/logs',{recursive:true});require('fs').writeFileSync('${(process.env.WORKSPACE_DIR || '.').replace(/\\/g, '/')}/logs/status_${cycleId}.json',JSON.stringify({result:'success',en:'Status: [INTENT] ...',zh:'...'},null,2))"
\`\`\`
Rules:
- "en" field: English status. "zh" field: Chinese status. Content must match (different language).
- Add "result" with value success or failed.
- INTENT must be one of: INNOVATION, REPAIR, OPTIMIZE (or Chinese: 创新, 修复, 优化)
- Do NOT use generic text like "Step Complete", "Cycle finished", "周期已完成". Describe the actual work.
- Example:
{"result":"success","en":"Status: [INNOVATION] Created auto-scheduler that syncs calendar to HEARTBEAT.md","zh":"状态: [创新] 创建了自动调度器,将日历同步到 HEARTBEAT.md"}
`.trim();
const maxChars = Number.isFinite(Number(process.env.GEP_PROMPT_MAX_CHARS)) ? Number(process.env.GEP_PROMPT_MAX_CHARS) : 50000;
if (basePrompt.length <= maxChars) return basePrompt;
const executionContextIndex = basePrompt.indexOf("Context [Execution]:");
if (executionContextIndex > -1) {
const prefix = basePrompt.slice(0, executionContextIndex + 20);
const currentExecution = basePrompt.slice(executionContextIndex + 20);
// Hard cap the execution context length to avoid token limit errors even if MAX_CHARS is high.
// 20000 chars is roughly 5k tokens, which is safe for most models alongside the rest of the prompt.
const EXEC_CONTEXT_CAP = 20000;
const allowedExecutionLength = Math.min(EXEC_CONTEXT_CAP, Math.max(0, maxChars - prefix.length - 100));
return prefix + "\n" + currentExecution.slice(0, allowedExecutionLength) + "\n...[TRUNCATED]...";
}
return basePrompt.slice(0, maxChars) + "\n...[TRUNCATED]...";
}
module.exports = { buildGepPrompt, buildReusePrompt, buildHubMatchedBlock, buildLessonsBlock, buildNarrativeBlock, buildPrinciplesBlock };

View File

@@ -0,0 +1,212 @@
// ---------------------------------------------------------------------------
// questionGenerator -- analyzes evolution context (signals, session transcripts,
// recent events) and generates proactive questions for the Hub bounty system.
//
// Questions are sent via the A2A fetch payload.questions field. The Hub creates
// bounties from them, enabling multi-agent collaborative problem solving.
// ---------------------------------------------------------------------------
const fs = require('fs');
const path = require('path');
const { getEvolutionDir } = require('./paths');
const QUESTION_STATE_FILE = path.join(getEvolutionDir(), 'question_generator_state.json');
const MIN_INTERVAL_MS = 3 * 60 * 60 * 1000; // at most once per 3 hours
const MAX_QUESTIONS_PER_CYCLE = 2;
function readState() {
try {
if (fs.existsSync(QUESTION_STATE_FILE)) {
return JSON.parse(fs.readFileSync(QUESTION_STATE_FILE, 'utf8'));
}
} catch (_) {}
return { lastAskedAt: null, recentQuestions: [] };
}
function writeState(state) {
try {
const dir = path.dirname(QUESTION_STATE_FILE);
if (!fs.existsSync(dir)) fs.mkdirSync(dir, { recursive: true });
fs.writeFileSync(QUESTION_STATE_FILE, JSON.stringify(state, null, 2) + '\n');
} catch (_) {}
}
function isDuplicate(question, recentQuestions) {
var qLower = question.toLowerCase();
for (var i = 0; i < recentQuestions.length; i++) {
var prev = String(recentQuestions[i] || '').toLowerCase();
if (prev === qLower) return true;
// fuzzy: if >70% overlap by word set
var qWords = new Set(qLower.split(/\s+/).filter(function(w) { return w.length > 2; }));
var pWords = new Set(prev.split(/\s+/).filter(function(w) { return w.length > 2; }));
if (qWords.size === 0 || pWords.size === 0) continue;
var overlap = 0;
qWords.forEach(function(w) { if (pWords.has(w)) overlap++; });
if (overlap / Math.max(qWords.size, pWords.size) > 0.7) return true;
}
return false;
}
/**
* Generate proactive questions based on evolution context.
*
* @param {object} opts
* @param {string[]} opts.signals - current cycle signals
* @param {object[]} opts.recentEvents - recent EvolutionEvent objects
* @param {string} opts.sessionTranscript - recent session transcript
* @param {string} opts.memorySnippet - MEMORY.md content
* @returns {Array<{ question: string, amount: number, signals: string[] }>}
*/
function generateQuestions(opts) {
var o = opts || {};
var signals = Array.isArray(o.signals) ? o.signals : [];
var recentEvents = Array.isArray(o.recentEvents) ? o.recentEvents : [];
var transcript = String(o.sessionTranscript || '');
var memory = String(o.memorySnippet || '');
var state = readState();
// Rate limit: don't ask too frequently
if (state.lastAskedAt) {
var elapsed = Date.now() - new Date(state.lastAskedAt).getTime();
if (elapsed < MIN_INTERVAL_MS) return [];
}
var candidates = [];
var signalSet = new Set(signals);
// --- Strategy 1: Recurring errors the agent cannot resolve ---
if (signalSet.has('recurring_error') || signalSet.has('high_failure_ratio')) {
var errSig = signals.find(function(s) { return s.startsWith('recurring_errsig'); });
if (errSig) {
var errDetail = errSig.replace(/^recurring_errsig\(\d+x\):/, '').trim().slice(0, 120);
candidates.push({
question: 'Recurring error in evolution cycle that auto-repair cannot resolve: ' + errDetail + ' -- What approaches or patches have worked for similar issues?',
amount: 0,
signals: ['recurring_error', 'auto_repair_failed'],
priority: 3,
});
}
}
// --- Strategy 2: Capability gaps detected from user conversations ---
if (signalSet.has('capability_gap') || signalSet.has('unsupported_input_type')) {
var gapContext = '';
var lines = transcript.split('\n');
for (var i = 0; i < lines.length; i++) {
if (/not supported|cannot|unsupported|not implemented/i.test(lines[i])) {
gapContext = lines[i].replace(/\s+/g, ' ').trim().slice(0, 150);
break;
}
}
if (gapContext) {
candidates.push({
question: 'Capability gap detected in agent environment: ' + gapContext + ' -- How can this be addressed or what alternative approaches exist?',
amount: 0,
signals: ['capability_gap'],
priority: 2,
});
}
}
// --- Strategy 3: Stagnation / saturation -- seek new directions ---
if (signalSet.has('evolution_saturation') || signalSet.has('force_steady_state')) {
var recentGenes = [];
var last5 = recentEvents.slice(-5);
for (var j = 0; j < last5.length; j++) {
var genes = last5[j].genes_used;
if (Array.isArray(genes) && genes.length > 0) {
recentGenes.push(genes[0]);
}
}
var uniqueGenes = Array.from(new Set(recentGenes));
candidates.push({
question: 'Agent evolution has reached saturation after exhausting genes: [' + uniqueGenes.join(', ') + ']. What new evolution directions, automation patterns, or capability genes would be most valuable?',
amount: 0,
signals: ['evolution_saturation', 'innovation_needed'],
priority: 1,
});
}
// --- Strategy 4: Consecutive failure streak -- seek external help ---
var failStreak = signals.find(function(s) { return s.startsWith('consecutive_failure_streak_'); });
if (failStreak) {
var streakCount = parseInt(failStreak.replace('consecutive_failure_streak_', ''), 10) || 0;
if (streakCount >= 4) {
var failGene = signals.find(function(s) { return s.startsWith('ban_gene:'); });
var failGeneId = failGene ? failGene.replace('ban_gene:', '') : 'unknown';
candidates.push({
question: 'Agent has failed ' + streakCount + ' consecutive evolution cycles (last gene: ' + failGeneId + '). The current approach is exhausted. What alternative strategies or environmental fixes should be tried?',
amount: 0,
signals: ['failure_streak', 'external_help_needed'],
priority: 3,
});
}
}
// --- Strategy 5: User feature requests the agent can amplify ---
if (signalSet.has('user_feature_request') || signals.some(function (s) { return String(s).startsWith('user_feature_request:'); })) {
var featureLines = transcript.split('\n').filter(function(l) {
return /\b(add|implement|create|build|i want|i need|please add)\b/i.test(l);
});
if (featureLines.length > 0) {
var featureContext = featureLines[0].replace(/\s+/g, ' ').trim().slice(0, 150);
candidates.push({
question: 'User requested a feature that may benefit from community solutions: ' + featureContext + ' -- Are there existing implementations or best practices for this?',
amount: 0,
signals: ['user_feature_request', 'community_solution_sought'],
priority: 1,
});
}
}
// --- Strategy 6: Performance bottleneck -- seek optimization patterns ---
if (signalSet.has('perf_bottleneck')) {
var perfLines = transcript.split('\n').filter(function(l) {
return /\b(slow|timeout|latency|bottleneck|high cpu|high memory)\b/i.test(l);
});
if (perfLines.length > 0) {
var perfContext = perfLines[0].replace(/\s+/g, ' ').trim().slice(0, 150);
candidates.push({
question: 'Performance bottleneck detected: ' + perfContext + ' -- What optimization strategies or architectural patterns address this?',
amount: 0,
signals: ['perf_bottleneck', 'optimization_sought'],
priority: 2,
});
}
}
if (candidates.length === 0) return [];
// Sort by priority (higher = more urgent)
candidates.sort(function(a, b) { return b.priority - a.priority; });
// De-duplicate against recently asked questions
var recentQTexts = Array.isArray(state.recentQuestions) ? state.recentQuestions : [];
var filtered = [];
for (var fi = 0; fi < candidates.length && filtered.length < MAX_QUESTIONS_PER_CYCLE; fi++) {
if (!isDuplicate(candidates[fi].question, recentQTexts)) {
filtered.push(candidates[fi]);
}
}
if (filtered.length === 0) return [];
// Update state
var newRecentQuestions = recentQTexts.concat(filtered.map(function(q) { return q.question; }));
// Keep only last 20 questions in history
if (newRecentQuestions.length > 20) {
newRecentQuestions = newRecentQuestions.slice(-20);
}
writeState({
lastAskedAt: new Date().toISOString(),
recentQuestions: newRecentQuestions,
});
// Strip internal priority field before returning
return filtered.map(function(q) {
return { question: q.question, amount: q.amount, signals: q.signals };
});
}
module.exports = { generateQuestions };

177
src/gep/reflection.js Normal file
View File

@@ -0,0 +1,177 @@
'use strict';
const fs = require('fs');
const path = require('path');
const { getReflectionLogPath, getEvolutionDir } = require('./paths');
const REFLECTION_INTERVAL_DEFAULT = 5;
const REFLECTION_INTERVAL_SUCCESS = 8;
const REFLECTION_INTERVAL_FAILURE = 3;
const REFLECTION_COOLDOWN_MS = 30 * 60 * 1000;
// Keep the export name for backward compat.
const REFLECTION_INTERVAL_CYCLES = REFLECTION_INTERVAL_DEFAULT;
function ensureDir(dir) {
try { if (!fs.existsSync(dir)) fs.mkdirSync(dir, { recursive: true }); } catch (_) {}
}
function computeReflectionInterval(recentEvents) {
try {
var events = Array.isArray(recentEvents) ? recentEvents : [];
if (events.length < 3) return REFLECTION_INTERVAL_DEFAULT;
var tail = events.slice(-3);
var allSuccess = tail.every(function (e) {
return e && e.outcome && e.outcome.status === 'success';
});
var allFailed = tail.every(function (e) {
return e && e.outcome && e.outcome.status === 'failed';
});
if (allSuccess) return REFLECTION_INTERVAL_SUCCESS;
if (allFailed) return REFLECTION_INTERVAL_FAILURE;
} catch (_) {}
return REFLECTION_INTERVAL_DEFAULT;
}
function shouldReflect({ cycleCount, recentEvents }) {
var interval = computeReflectionInterval(recentEvents);
if (!Number.isFinite(cycleCount) || cycleCount < interval) return false;
if (cycleCount % interval !== 0) return false;
const logPath = getReflectionLogPath();
try {
if (fs.existsSync(logPath)) {
const stat = fs.statSync(logPath);
if (Date.now() - stat.mtimeMs < REFLECTION_COOLDOWN_MS) return false;
}
} catch (_) {}
return true;
}
function buildSuggestedMutations(signals) {
var sigs = Array.isArray(signals) ? signals : [];
var muts = [];
var hasStagnation = sigs.some(function (s) {
return s === 'stable_success_plateau' ||
s === 'evolution_stagnation_detected' ||
s === 'empty_cycle_loop_detected';
});
var hasError = sigs.some(function (s) {
return s === 'log_error' || String(s).startsWith('errsig:') || String(s).startsWith('errsig_norm:');
});
var hasGap = sigs.some(function (s) {
return s === 'capability_gap' || s === 'external_opportunity';
});
if (hasStagnation) {
muts.push({ param: 'creativity', delta: +0.05, reason: 'stagnation detected in reflection' });
}
if (hasError) {
muts.push({ param: 'rigor', delta: +0.05, reason: 'errors detected in reflection' });
}
if (hasGap) {
muts.push({ param: 'risk_tolerance', delta: +0.05, reason: 'capability gap in reflection' });
}
return muts.slice(0, 2);
}
function buildReflectionContext({ recentEvents, signals, memoryAdvice, narrative }) {
const parts = ['You are performing a strategic reflection on recent evolution cycles.'];
parts.push('Analyze the patterns below and provide concise strategic guidance.');
parts.push('');
if (Array.isArray(recentEvents) && recentEvents.length > 0) {
const last10 = recentEvents.slice(-10);
const successCount = last10.filter(e => e && e.outcome && e.outcome.status === 'success').length;
const failCount = last10.filter(e => e && e.outcome && e.outcome.status === 'failed').length;
const intents = {};
last10.forEach(e => {
const i = e && e.intent ? e.intent : 'unknown';
intents[i] = (intents[i] || 0) + 1;
});
const genes = {};
last10.forEach(e => {
const g = e && Array.isArray(e.genes_used) && e.genes_used[0] ? e.genes_used[0] : 'unknown';
genes[g] = (genes[g] || 0) + 1;
});
parts.push('## Recent Cycle Statistics (last 10)');
parts.push(`- Success: ${successCount}, Failed: ${failCount}`);
parts.push(`- Intent distribution: ${JSON.stringify(intents)}`);
parts.push(`- Gene usage: ${JSON.stringify(genes)}`);
parts.push('');
}
if (Array.isArray(signals) && signals.length > 0) {
parts.push('## Current Signals');
parts.push(signals.slice(0, 20).join(', '));
parts.push('');
}
if (memoryAdvice) {
parts.push('## Memory Graph Advice');
if (memoryAdvice.preferredGeneId) {
parts.push(`- Preferred gene: ${memoryAdvice.preferredGeneId}`);
}
if (Array.isArray(memoryAdvice.bannedGeneIds) && memoryAdvice.bannedGeneIds.length > 0) {
parts.push(`- Banned genes: ${memoryAdvice.bannedGeneIds.join(', ')}`);
}
if (memoryAdvice.explanation) {
parts.push(`- Explanation: ${memoryAdvice.explanation}`);
}
parts.push('');
}
if (narrative) {
parts.push('## Recent Evolution Narrative');
parts.push(String(narrative).slice(0, 3000));
parts.push('');
}
parts.push('## Questions to Answer');
parts.push('1. Are there persistent signals being ignored?');
parts.push('2. Is the gene selection strategy optimal, or are we stuck in a local maximum?');
parts.push('3. Should the balance between repair/optimize/innovate shift?');
parts.push('4. Are there capability gaps that no current gene addresses?');
parts.push('5. What single strategic adjustment would have the highest impact?');
parts.push('');
parts.push('Respond with a JSON object: { "insights": [...], "strategy_adjustment": "...", "priority_signals": [...] }');
return parts.join('\n');
}
function recordReflection(reflection) {
const logPath = getReflectionLogPath();
ensureDir(path.dirname(logPath));
const entry = JSON.stringify({
ts: new Date().toISOString(),
type: 'reflection',
...reflection,
}) + '\n';
fs.appendFileSync(logPath, entry, 'utf8');
}
function loadRecentReflections(count) {
const n = Number.isFinite(count) ? count : 3;
const logPath = getReflectionLogPath();
try {
if (!fs.existsSync(logPath)) return [];
const lines = fs.readFileSync(logPath, 'utf8').trim().split('\n').filter(Boolean);
return lines.slice(-n).map(line => {
try { return JSON.parse(line); } catch (_) { return null; }
}).filter(Boolean);
} catch (_) {
return [];
}
}
module.exports = {
shouldReflect,
buildReflectionContext,
recordReflection,
loadRecentReflections,
buildSuggestedMutations,
REFLECTION_INTERVAL_CYCLES,
};

67
src/gep/sanitize.js Normal file
View File

@@ -0,0 +1,67 @@
// Pre-publish payload sanitization.
// Removes sensitive tokens, local paths, emails, and env references
// from capsule payloads before broadcasting to the hub.
// Patterns to redact (replaced with placeholder)
const REDACT_PATTERNS = [
// API keys & tokens (generic)
/Bearer\s+[A-Za-z0-9\-._~+\/]+=*/g,
/sk-[A-Za-z0-9]{20,}/g,
/token[=:]\s*["']?[A-Za-z0-9\-._~+\/]{16,}["']?/gi,
/api[_-]?key[=:]\s*["']?[A-Za-z0-9\-._~+\/]{16,}["']?/gi,
/secret[=:]\s*["']?[A-Za-z0-9\-._~+\/]{16,}["']?/gi,
/password[=:]\s*["']?[^\s"',;)}\]]{6,}["']?/gi,
// GitHub tokens (ghp_, gho_, ghu_, ghs_, github_pat_)
/ghp_[A-Za-z0-9]{36,}/g,
/gho_[A-Za-z0-9]{36,}/g,
/ghu_[A-Za-z0-9]{36,}/g,
/ghs_[A-Za-z0-9]{36,}/g,
/github_pat_[A-Za-z0-9_]{22,}/g,
// AWS access keys
/AKIA[0-9A-Z]{16}/g,
// OpenAI / Anthropic tokens
/sk-proj-[A-Za-z0-9\-_]{20,}/g,
/sk-ant-[A-Za-z0-9\-_]{20,}/g,
// npm tokens
/npm_[A-Za-z0-9]{36,}/g,
// Private keys
/-----BEGIN\s+(?:RSA\s+|EC\s+|DSA\s+|OPENSSH\s+)?PRIVATE\s+KEY-----[\s\S]*?-----END\s+(?:RSA\s+|EC\s+|DSA\s+|OPENSSH\s+)?PRIVATE\s+KEY-----/g,
// Basic auth in URLs (redact only credentials, keep :// and @)
/(?<=:\/\/)[^@\s]+:[^@\s]+(?=@)/g,
// Local filesystem paths
/\/home\/[^\s"',;)}\]]+/g,
/\/Users\/[^\s"',;)}\]]+/g,
/[A-Z]:\\[^\s"',;)}\]]+/g,
// Email addresses
/[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}/g,
// .env file references
/\.env(?:\.[a-zA-Z]+)?/g,
];
const REDACTED = '[REDACTED]';
function redactString(str) {
if (typeof str !== 'string') return str;
let result = str;
for (const pattern of REDACT_PATTERNS) {
// Reset lastIndex for global regexes
pattern.lastIndex = 0;
result = result.replace(pattern, REDACTED);
}
return result;
}
/**
* Deep-clone and sanitize a capsule payload.
* Returns a new object with sensitive values redacted.
* Does NOT modify the original.
*/
function sanitizePayload(capsule) {
if (!capsule || typeof capsule !== 'object') return capsule;
return JSON.parse(JSON.stringify(capsule), (_key, value) => {
if (typeof value === 'string') return redactString(value);
return value;
});
}
module.exports = { sanitizePayload, redactString };

415
src/gep/selector.js Normal file
View File

@@ -0,0 +1,415 @@
const { scoreTagOverlap } = require('./learningSignals');
const { captureEnvFingerprint } = require('./envFingerprint');
// ---------------------------------------------------------------------------
// Lightweight semantic similarity (bag-of-words cosine) for Gene selection.
// Acts as a complement to exact signals_match pattern matching.
// When EMBEDDING_PROVIDER is configured, can be replaced with real embeddings.
// ---------------------------------------------------------------------------
const SEMANTIC_WEIGHT = parseFloat(process.env.SEMANTIC_MATCH_WEIGHT || '0.4') || 0.4;
const STOP_WORDS = new Set([
'the', 'and', 'for', 'with', 'from', 'that', 'this', 'into', 'when',
'are', 'was', 'has', 'had', 'not', 'but', 'its', 'can', 'will', 'all',
'any', 'use', 'may', 'also', 'should', 'would', 'could',
]);
function tokenize(text) {
return String(text || '').toLowerCase()
.replace(/[^a-z0-9_\-]+/g, ' ')
.split(/\s+/)
.filter(function (w) { return w.length >= 2 && !STOP_WORDS.has(w); });
}
function buildTermFrequency(tokens) {
var tf = {};
for (var i = 0; i < tokens.length; i++) {
tf[tokens[i]] = (tf[tokens[i]] || 0) + 1;
}
return tf;
}
function cosineSimilarity(tfA, tfB) {
var keys = new Set(Object.keys(tfA).concat(Object.keys(tfB)));
var dotProduct = 0;
var normA = 0;
var normB = 0;
keys.forEach(function (k) {
var a = tfA[k] || 0;
var b = tfB[k] || 0;
dotProduct += a * b;
normA += a * a;
normB += b * b;
});
if (normA === 0 || normB === 0) return 0;
return dotProduct / (Math.sqrt(normA) * Math.sqrt(normB));
}
function scoreGeneSemantic(gene, signals) {
if (!gene || !signals || signals.length === 0) return 0;
var signalTokens = [];
signals.forEach(function (s) {
signalTokens = signalTokens.concat(tokenize(s));
});
if (signalTokens.length === 0) return 0;
var geneTokens = [];
if (Array.isArray(gene.signals_match)) {
gene.signals_match.forEach(function (s) {
geneTokens = geneTokens.concat(tokenize(s));
});
}
if (gene.summary) geneTokens = geneTokens.concat(tokenize(gene.summary));
if (gene.id) geneTokens = geneTokens.concat(tokenize(gene.id));
if (geneTokens.length === 0) return 0;
var tfSignals = buildTermFrequency(signalTokens);
var tfGene = buildTermFrequency(geneTokens);
return cosineSimilarity(tfSignals, tfGene);
}
function matchPatternToSignals(pattern, signals) {
if (!pattern || !signals || signals.length === 0) return false;
const p = String(pattern);
const sig = signals.map(s => String(s));
// Regex pattern: /body/flags
const regexLike = p.length >= 2 && p.startsWith('/') && p.lastIndexOf('/') > 0;
if (regexLike) {
const lastSlash = p.lastIndexOf('/');
const body = p.slice(1, lastSlash);
const flags = p.slice(lastSlash + 1);
try {
const re = new RegExp(body, flags || 'i');
return sig.some(s => re.test(s));
} catch (e) {
// fallback to substring
}
}
// Multi-language alias: "en_term|zh_term|ja_term" -- any branch matching = hit
if (p.includes('|') && !p.startsWith('/')) {
const branches = p.split('|').map(b => b.trim().toLowerCase()).filter(Boolean);
return branches.some(needle => sig.some(s => s.toLowerCase().includes(needle)));
}
const needle = p.toLowerCase();
return sig.some(s => s.toLowerCase().includes(needle));
}
function scoreGene(gene, signals) {
if (!gene || gene.type !== 'Gene') return 0;
const patterns = Array.isArray(gene.signals_match) ? gene.signals_match : [];
const tagScore = scoreTagOverlap(gene, signals);
if (patterns.length === 0) return tagScore > 0 ? tagScore * 0.6 : 0;
let score = 0;
for (const pat of patterns) {
if (matchPatternToSignals(pat, signals)) score += 1;
}
const semanticScore = scoreGeneSemantic(gene, signals) * SEMANTIC_WEIGHT;
return score + (tagScore * 0.6) + semanticScore;
}
function getEpigeneticBoostLocal(gene, envFingerprint) {
if (!gene || !Array.isArray(gene.epigenetic_marks)) return 0;
const platform = envFingerprint && envFingerprint.platform ? String(envFingerprint.platform) : '';
const arch = envFingerprint && envFingerprint.arch ? String(envFingerprint.arch) : '';
const nodeVersion = envFingerprint && envFingerprint.node_version ? String(envFingerprint.node_version) : '';
const envContext = [platform, arch, nodeVersion].filter(Boolean).join('/') || 'unknown';
const mark = gene.epigenetic_marks.find(function (m) { return m && m.context === envContext; });
return mark ? Number(mark.boost) || 0 : 0;
}
function scoreGeneLearning(gene, signals, envFingerprint) {
if (!gene || gene.type !== 'Gene') return 0;
let boost = 0;
const history = Array.isArray(gene.learning_history) ? gene.learning_history.slice(-8) : [];
for (let i = 0; i < history.length; i++) {
const entry = history[i];
if (!entry) continue;
if (entry.outcome === 'success') boost += 0.12;
else if (entry.mode === 'hard') boost -= 0.22;
else if (entry.mode === 'soft') boost -= 0.08;
}
boost += getEpigeneticBoostLocal(gene, envFingerprint);
if (Array.isArray(gene.anti_patterns) && gene.anti_patterns.length > 0) {
let overlapPenalty = 0;
const signalTags = new Set(require('./learningSignals').expandSignals(signals, ''));
const recentAntiPatterns = gene.anti_patterns.slice(-6);
for (let j = 0; j < recentAntiPatterns.length; j++) {
const anti = recentAntiPatterns[j];
if (!anti || !Array.isArray(anti.learning_signals)) continue;
const overlap = anti.learning_signals.some(function (tag) { return signalTags.has(String(tag)); });
if (overlap) overlapPenalty += anti.mode === 'hard' ? 0.4 : 0.18;
}
boost -= overlapPenalty;
}
return Math.max(-1.5, Math.min(1.5, boost));
}
// Population-size-dependent drift intensity.
// In population genetics, genetic drift is stronger in small populations (Ne).
// driftIntensity: 0 = pure selection, 1 = pure drift (random).
// Formula: intensity = 1 / sqrt(Ne) where Ne = effective population size.
// This replaces the binary driftEnabled flag with a continuous spectrum.
function computeDriftIntensity(opts) {
// If explicitly enabled/disabled, use that as the baseline
const driftEnabled = !!(opts && opts.driftEnabled);
// Effective population size: active gene count in the pool
const effectivePopulationSize = opts && Number.isFinite(Number(opts.effectivePopulationSize))
? Number(opts.effectivePopulationSize)
: null;
// If no Ne provided, fall back to gene pool size
const genePoolSize = opts && Number.isFinite(Number(opts.genePoolSize))
? Number(opts.genePoolSize)
: null;
const ne = effectivePopulationSize || genePoolSize || null;
if (driftEnabled) {
// Explicit drift: use moderate-to-high intensity
return ne && ne > 1 ? Math.min(1, 1 / Math.sqrt(ne) + 0.3) : 0.7;
}
if (ne != null && ne > 0) {
// Population-dependent drift: small population = more drift
// Ne=1: intensity=1.0 (pure drift), Ne=25: intensity=0.2, Ne=100: intensity=0.1
return Math.min(1, 1 / Math.sqrt(ne));
}
return 0; // No drift info available, pure selection
}
function selectGene(genes, signals, opts) {
const genesList = Array.isArray(genes) ? genes : [];
const bannedGeneIds = opts && opts.bannedGeneIds ? opts.bannedGeneIds : new Set();
const driftEnabled = !!(opts && opts.driftEnabled);
const preferredGeneId = opts && typeof opts.preferredGeneId === 'string' ? opts.preferredGeneId : null;
// Diversity-directed drift: capability_gaps from Hub heartbeat
const capabilityGaps = opts && Array.isArray(opts.capabilityGaps) ? opts.capabilityGaps : [];
const noveltyScore = opts && Number.isFinite(Number(opts.noveltyScore)) ? Number(opts.noveltyScore) : null;
// Compute continuous drift intensity based on effective population size
const driftIntensity = computeDriftIntensity({
driftEnabled: driftEnabled,
effectivePopulationSize: opts && opts.effectivePopulationSize,
genePoolSize: genesList.length,
});
const useDrift = driftEnabled || driftIntensity > 0.15;
const DISTILLED_PREFIX = 'gene_distilled_';
const DISTILLED_SCORE_FACTOR = 0.8;
const envFingerprint = captureEnvFingerprint();
const scored = genesList
.map(g => {
let s = scoreGene(g, signals);
s += scoreGeneLearning(g, signals, envFingerprint);
if (s > 0 && g.id && String(g.id).startsWith(DISTILLED_PREFIX)) s *= DISTILLED_SCORE_FACTOR;
return { gene: g, score: s };
})
.filter(x => x.score > 0)
.sort((a, b) => b.score - a.score);
if (scored.length === 0) return { selected: null, alternatives: [], driftIntensity: driftIntensity, driftMode: 'none' };
// Memory graph preference: only override when the preferred gene is already a match candidate.
if (preferredGeneId) {
const preferred = scored.find(x => x.gene && x.gene.id === preferredGeneId);
if (preferred && (useDrift || !bannedGeneIds.has(preferredGeneId))) {
const rest = scored.filter(x => x.gene && x.gene.id !== preferredGeneId);
const filteredRest = useDrift ? rest : rest.filter(x => x.gene && !bannedGeneIds.has(x.gene.id));
return {
selected: preferred.gene,
alternatives: filteredRest.slice(0, 4).map(x => x.gene),
driftIntensity: driftIntensity,
driftMode: 'memory_preferred',
};
}
}
// Low-efficiency suppression: do not repeat low-confidence paths unless drift is active.
const filtered = useDrift ? scored : scored.filter(x => x.gene && !bannedGeneIds.has(x.gene.id));
if (filtered.length === 0) return { selected: null, alternatives: scored.slice(0, 4).map(x => x.gene), driftIntensity: driftIntensity, driftMode: 'none' };
// Diversity-directed drift: when capability gaps are available, prefer genes that
// cover gap areas instead of pure random selection. This replaces the blind
// random drift with an informed exploration toward under-covered capabilities.
let selectedIdx = 0;
let driftMode = 'selection';
if (driftIntensity > 0 && filtered.length > 1 && Math.random() < driftIntensity) {
if (capabilityGaps.length > 0) {
// Directed drift: score each candidate by how well its signals_match
// covers the capability gap dimensions
const gapScores = filtered.map(function(entry, idx) {
const g = entry.gene;
const patterns = Array.isArray(g.signals_match) ? g.signals_match : [];
let gapHits = 0;
for (let gi = 0; gi < capabilityGaps.length && gi < 5; gi++) {
const gapSignal = capabilityGaps[gi];
if (typeof gapSignal === 'string' && patterns.some(function(p) { return matchPatternToSignals(p, [gapSignal]); })) {
gapHits++;
}
}
return { idx: idx, gapHits: gapHits, baseScore: entry.score };
});
const hasGapHits = gapScores.some(function(gs) { return gs.gapHits > 0; });
if (hasGapHits) {
// Sort by gap coverage first, then by base score
gapScores.sort(function(a, b) {
return b.gapHits - a.gapHits || b.baseScore - a.baseScore;
});
selectedIdx = gapScores[0].idx;
driftMode = 'diversity_directed';
} else {
// No gap match: fall back to novelty-weighted random selection
let topN = Math.min(filtered.length, Math.max(2, Math.ceil(filtered.length * driftIntensity)));
// If novelty score is low (agent is too similar to others), increase exploration range
if (noveltyScore != null && noveltyScore < 0.3 && topN < filtered.length) {
topN = Math.min(filtered.length, topN + 1);
}
selectedIdx = Math.floor(Math.random() * topN);
driftMode = 'random_weighted';
}
} else {
// No capability gap data: original random drift behavior
const topN = Math.min(filtered.length, Math.max(2, Math.ceil(filtered.length * driftIntensity)));
selectedIdx = Math.floor(Math.random() * topN);
driftMode = 'random';
}
}
return {
selected: filtered[selectedIdx].gene,
alternatives: filtered.filter(function(_, i) { return i !== selectedIdx; }).slice(0, 4).map(x => x.gene),
driftIntensity: driftIntensity,
driftMode: driftMode,
};
}
function selectCapsule(capsules, signals) {
const scored = (capsules || [])
.map(c => {
const triggers = Array.isArray(c.trigger) ? c.trigger : [];
const score = triggers.reduce((acc, t) => (matchPatternToSignals(t, signals) ? acc + 1 : acc), 0);
return { capsule: c, score };
})
.filter(x => x.score > 0)
.sort((a, b) => b.score - a.score);
return scored.length ? scored[0].capsule : null;
}
function computeSignalOverlap(signalsA, signalsB) {
if (!Array.isArray(signalsA) || !Array.isArray(signalsB)) return 0;
if (signalsA.length === 0 || signalsB.length === 0) return 0;
const setB = new Set(signalsB.map(function (s) { return String(s).toLowerCase(); }));
let hits = 0;
for (let i = 0; i < signalsA.length; i++) {
if (setB.has(String(signalsA[i]).toLowerCase())) hits++;
}
return hits / Math.max(signalsA.length, 1);
}
const FAILED_CAPSULE_BAN_THRESHOLD = 2;
const FAILED_CAPSULE_OVERLAP_MIN = 0.6;
function banGenesFromFailedCapsules(failedCapsules, signals, existingBans) {
const bans = existingBans instanceof Set ? new Set(existingBans) : new Set();
if (!Array.isArray(failedCapsules) || failedCapsules.length === 0) return bans;
const geneFailCounts = {};
for (let i = 0; i < failedCapsules.length; i++) {
const fc = failedCapsules[i];
if (!fc || !fc.gene) continue;
const overlap = computeSignalOverlap(signals, fc.trigger || []);
if (overlap < FAILED_CAPSULE_OVERLAP_MIN) continue;
const gid = String(fc.gene);
geneFailCounts[gid] = (geneFailCounts[gid] || 0) + 1;
}
const keys = Object.keys(geneFailCounts);
for (let j = 0; j < keys.length; j++) {
if (geneFailCounts[keys[j]] >= FAILED_CAPSULE_BAN_THRESHOLD) {
bans.add(keys[j]);
}
}
return bans;
}
function selectGeneAndCapsule({ genes, capsules, signals, memoryAdvice, driftEnabled, failedCapsules, capabilityGaps, noveltyScore }) {
const bannedGeneIds =
memoryAdvice && memoryAdvice.bannedGeneIds instanceof Set ? memoryAdvice.bannedGeneIds : new Set();
const preferredGeneId = memoryAdvice && memoryAdvice.preferredGeneId ? memoryAdvice.preferredGeneId : null;
const effectiveBans = banGenesFromFailedCapsules(
Array.isArray(failedCapsules) ? failedCapsules : [],
signals,
bannedGeneIds
);
const { selected, alternatives, driftIntensity } = selectGene(genes, signals, {
bannedGeneIds: effectiveBans,
preferredGeneId,
driftEnabled: !!driftEnabled,
capabilityGaps: Array.isArray(capabilityGaps) ? capabilityGaps : [],
noveltyScore: Number.isFinite(Number(noveltyScore)) ? Number(noveltyScore) : null,
});
const capsule = selectCapsule(capsules, signals);
const selector = buildSelectorDecision({
gene: selected,
capsule,
signals,
alternatives,
memoryAdvice,
driftEnabled,
driftIntensity,
});
return {
selectedGene: selected,
capsuleCandidates: capsule ? [capsule] : [],
selector,
driftIntensity,
};
}
function buildSelectorDecision({ gene, capsule, signals, alternatives, memoryAdvice, driftEnabled, driftIntensity }) {
const reason = [];
if (gene) reason.push('signals match gene.signals_match');
if (capsule) reason.push('capsule trigger matches signals');
if (!gene) reason.push('no matching gene found; new gene may be required');
if (signals && signals.length) reason.push(`signals: ${signals.join(', ')}`);
if (memoryAdvice && Array.isArray(memoryAdvice.explanation) && memoryAdvice.explanation.length) {
reason.push(`memory_graph: ${memoryAdvice.explanation.join(' | ')}`);
}
if (driftEnabled) {
reason.push('random_drift_override: true');
}
if (Number.isFinite(driftIntensity) && driftIntensity > 0) {
reason.push(`drift_intensity: ${driftIntensity.toFixed(3)}`);
}
return {
selected: gene ? gene.id : null,
reason,
alternatives: Array.isArray(alternatives) ? alternatives.map(g => g.id) : [],
};
}
module.exports = {
selectGeneAndCapsule,
selectGene,
selectCapsule,
buildSelectorDecision,
matchPatternToSignals,
scoreGeneSemantic,
cosineSimilarity,
tokenize,
};

444
src/gep/signals.js Normal file
View File

@@ -0,0 +1,444 @@
// Opportunity signal names (shared with mutation.js and personality.js).
var OPPORTUNITY_SIGNALS = [
'user_feature_request',
'user_improvement_suggestion',
'perf_bottleneck',
'capability_gap',
'stable_success_plateau',
'external_opportunity',
'recurring_error',
'unsupported_input_type',
'evolution_stagnation_detected',
'repair_loop_detected',
'force_innovation_after_repair_loop',
'tool_bypass',
'curriculum_target',
];
function hasOpportunitySignal(signals) {
var list = Array.isArray(signals) ? signals : [];
for (var i = 0; i < OPPORTUNITY_SIGNALS.length; i++) {
var name = OPPORTUNITY_SIGNALS[i];
if (list.includes(name)) return true;
if (list.some(function (s) { return String(s).startsWith(name + ':'); })) return true;
}
return false;
}
// Build a de-duplication set from recent evolution events.
// Returns an object: { suppressedSignals: Set<string>, recentIntents: string[], consecutiveRepairCount: number }
function analyzeRecentHistory(recentEvents) {
if (!Array.isArray(recentEvents) || recentEvents.length === 0) {
return { suppressedSignals: new Set(), recentIntents: [], consecutiveRepairCount: 0 };
}
// Take only the last 10 events
var recent = recentEvents.slice(-10);
// Count consecutive same-intent runs at the tail
var consecutiveRepairCount = 0;
for (var i = recent.length - 1; i >= 0; i--) {
if (recent[i].intent === 'repair') {
consecutiveRepairCount++;
} else {
break;
}
}
// Count signal frequency in last 8 events: signal -> count
var signalFreq = {};
var geneFreq = {};
var tail = recent.slice(-8);
for (var j = 0; j < tail.length; j++) {
var evt = tail[j];
var sigs = Array.isArray(evt.signals) ? evt.signals : [];
for (var k = 0; k < sigs.length; k++) {
var s = String(sigs[k]);
// Normalize: strip details suffix so frequency keys match dedup filter keys
var key = s.startsWith('errsig:') ? 'errsig'
: s.startsWith('recurring_errsig') ? 'recurring_errsig'
: s.startsWith('user_feature_request:') ? 'user_feature_request'
: s.startsWith('user_improvement_suggestion:') ? 'user_improvement_suggestion'
: s;
signalFreq[key] = (signalFreq[key] || 0) + 1;
}
var genes = Array.isArray(evt.genes_used) ? evt.genes_used : [];
for (var g = 0; g < genes.length; g++) {
geneFreq[String(genes[g])] = (geneFreq[String(genes[g])] || 0) + 1;
}
}
// Suppress signals that appeared in 3+ of the last 8 events (they are being over-processed)
var suppressedSignals = new Set();
var entries = Object.entries(signalFreq);
for (var ei = 0; ei < entries.length; ei++) {
if (entries[ei][1] >= 3) {
suppressedSignals.add(entries[ei][0]);
}
}
var recentIntents = recent.map(function(e) { return e.intent || 'unknown'; });
// Count empty cycles (blast_radius.files === 0) in last 8 events.
// High ratio indicates the evolver is spinning without producing real changes.
var emptyCycleCount = 0;
for (var ec = 0; ec < tail.length; ec++) {
var br = tail[ec].blast_radius;
var em = tail[ec].meta && tail[ec].meta.empty_cycle;
if (em || (br && br.files === 0 && br.lines === 0)) {
emptyCycleCount++;
}
}
// Count consecutive empty cycles at the tail (not just total in last 8).
// This detects saturation: the evolver has exhausted innovation space and keeps producing
// zero-change cycles. Used to trigger graceful degradation to steady-state mode.
var consecutiveEmptyCycles = 0;
for (var se = recent.length - 1; se >= 0; se--) {
var seBr = recent[se].blast_radius;
var seEm = recent[se].meta && recent[se].meta.empty_cycle;
if (seEm || (seBr && seBr.files === 0 && seBr.lines === 0)) {
consecutiveEmptyCycles++;
} else {
break;
}
}
// Count consecutive failures at the tail of recent events.
// This tells the evolver "you have been failing N times in a row -- slow down."
var consecutiveFailureCount = 0;
for (var cf = recent.length - 1; cf >= 0; cf--) {
var outcome = recent[cf].outcome;
if (outcome && outcome.status === 'failed') {
consecutiveFailureCount++;
} else {
break;
}
}
// Count total failures in last 8 events (failure ratio).
var recentFailureCount = 0;
for (var rf = 0; rf < tail.length; rf++) {
var rfOut = tail[rf].outcome;
if (rfOut && rfOut.status === 'failed') recentFailureCount++;
}
return {
suppressedSignals: suppressedSignals,
recentIntents: recentIntents,
consecutiveRepairCount: consecutiveRepairCount,
emptyCycleCount: emptyCycleCount,
consecutiveEmptyCycles: consecutiveEmptyCycles,
consecutiveFailureCount: consecutiveFailureCount,
recentFailureCount: recentFailureCount,
recentFailureRatio: tail.length > 0 ? recentFailureCount / tail.length : 0,
signalFreq: signalFreq,
geneFreq: geneFreq,
};
}
function extractSignals({ recentSessionTranscript, todayLog, memorySnippet, userSnippet, recentEvents }) {
var signals = [];
var corpus = [
String(recentSessionTranscript || ''),
String(todayLog || ''),
String(memorySnippet || ''),
String(userSnippet || ''),
].join('\n');
var lower = corpus.toLowerCase();
// Analyze recent evolution history for de-duplication
var history = analyzeRecentHistory(recentEvents || []);
// --- Defensive signals (errors, missing resources) ---
// Refined error detection regex to avoid false positives on "fail"/"failed" in normal text.
// We prioritize structured error markers ([error], error:, exception:) and specific JSON patterns.
var errorHit = /\[error\]|error:|exception:|iserror":true|"status":\s*"error"|"status":\s*"failed"|错误\s*[:]|异常\s*[:]|报错\s*[:]|失败\s*[:]/.test(lower);
if (errorHit) signals.push('log_error');
// Error signature (more reproducible than a coarse "log_error" tag).
try {
var lines = corpus
.split('\n')
.map(function (l) { return String(l || '').trim(); })
.filter(Boolean);
var errLine =
lines.find(function (l) { return /\b(typeerror|referenceerror|syntaxerror)\b\s*:|error\s*:|exception\s*:|\[error|错误\s*[:]|异常\s*[:]|报错\s*[:]|失败\s*[:]/i.test(l); }) ||
null;
if (errLine) {
var clipped = errLine.replace(/\s+/g, ' ').slice(0, 260);
signals.push('errsig:' + clipped);
}
} catch (e) {}
if (lower.includes('memory.md missing')) signals.push('memory_missing');
if (lower.includes('user.md missing')) signals.push('user_missing');
if (lower.includes('key missing')) signals.push('integration_key_missing');
if (lower.includes('no session logs found') || lower.includes('no jsonl files')) signals.push('session_logs_missing');
if (process.platform === 'win32' && (lower.includes('pgrep') || lower.includes('ps aux') || lower.includes('cat >') || lower.includes('heredoc'))) {
signals.push('windows_shell_incompatible');
}
if (lower.includes('path.resolve(__dirname, \'../../../')) signals.push('path_outside_workspace');
// Protocol-specific drift signals
if (lower.includes('prompt') && !lower.includes('evolutionevent')) signals.push('protocol_drift');
// --- Recurring error detection (robustness signals) ---
// Count repeated identical errors -- these indicate systemic issues that need automated fixes
try {
var errorCounts = {};
var errPatterns = corpus.match(/(?:LLM error|"error"|"status":\s*"error")[^}]{0,200}/gi) || [];
for (var ep = 0; ep < errPatterns.length; ep++) {
// Normalize to a short key
var key = errPatterns[ep].replace(/\s+/g, ' ').slice(0, 100);
errorCounts[key] = (errorCounts[key] || 0) + 1;
}
var recurringErrors = Object.entries(errorCounts).filter(function (e) { return e[1] >= 3; });
if (recurringErrors.length > 0) {
signals.push('recurring_error');
// Include the top recurring error signature for the agent to diagnose
var topErr = recurringErrors.sort(function (a, b) { return b[1] - a[1]; })[0];
signals.push('recurring_errsig(' + topErr[1] + 'x):' + topErr[0].slice(0, 150));
}
} catch (e) {}
// --- Unsupported input type (e.g. GIF, video formats the LLM can't handle) ---
if (/unsupported mime|unsupported.*type|invalid.*mime/i.test(lower)) {
signals.push('unsupported_input_type');
}
// --- Opportunity signals (innovation / feature requests) ---
// Support 4 languages: EN, ZH-CN, ZH-TW, JA. Attach snippet for selector/prompt use.
var featureRequestSnippet = '';
var featEn = corpus.match(/\b(add|implement|create|build|make|develop|write|design)\b[^.?!\n]{3,120}\b(feature|function|module|capability|tool|support|endpoint|command|option|mode)\b/i);
if (featEn) featureRequestSnippet = featEn[0].replace(/\s+/g, ' ').trim().slice(0, 200);
if (!featureRequestSnippet && /\b(i want|i need|we need|please add|can you add|could you add|let'?s add)\b/i.test(lower)) {
var featWant = corpus.match(/.{0,80}\b(i want|i need|we need|please add|can you add|could you add|let'?s add)\b.{0,80}/i);
featureRequestSnippet = featWant ? featWant[0].replace(/\s+/g, ' ').trim().slice(0, 200) : 'feature request';
}
if (!featureRequestSnippet && /加个|实现一下|做个|想要\s*一个|需要\s*一个|帮我加|帮我开发|加一下|新增一个|加个功能|做个功能|我想/.test(corpus)) {
var featZh = corpus.match(/.{0,100}(加个|实现一下|做个|想要\s*一个|需要\s*一个|帮我加|帮我开发|加一下|新增一个|加个功能|做个功能).{0,100}/);
if (featZh) featureRequestSnippet = featZh[0].replace(/\s+/g, ' ').trim().slice(0, 200);
if (!featureRequestSnippet && /我想/.test(corpus)) {
var featWantZh = corpus.match(/我想\s*[,\.。、\s]*([\s\S]{0,400})/);
featureRequestSnippet = featWantZh ? (featWantZh[1].replace(/\s+/g, ' ').trim().slice(0, 200) || '功能需求') : '功能需求';
}
if (!featureRequestSnippet) featureRequestSnippet = '功能需求';
}
if (!featureRequestSnippet && /加個|實現一下|做個|想要一個|請加|新增一個|加個功能|做個功能|幫我加/.test(corpus)) {
var featTw = corpus.match(/.{0,100}(加個|實現一下|做個|想要一個|請加|新增一個|加個功能|做個功能|幫我加).{0,100}/);
featureRequestSnippet = featTw ? featTw[0].replace(/\s+/g, ' ').trim().slice(0, 200) : '功能需求';
}
if (!featureRequestSnippet && /追加|実装|作って|機能を|追加して|が欲しい|を追加|してほしい/.test(corpus)) {
var featJa = corpus.match(/.{0,100}(追加|実装|作って|機能を|追加して|が欲しい|を追加|してほしい).{0,100}/);
featureRequestSnippet = featJa ? featJa[0].replace(/\s+/g, ' ').trim().slice(0, 200) : '機能要望';
}
if (featureRequestSnippet || /\b(add|implement|create|build|make|develop|write|design)\b[^.?!\n]{3,60}\b(feature|function|module|capability|tool|support|endpoint|command|option|mode)\b/i.test(corpus) ||
/\b(i want|i need|we need|please add|can you add|could you add|let'?s add)\b/i.test(lower) ||
/加个|实现一下|做个|想要\s*一个|需要\s*一个|帮我加|帮我开发|加一下|新增一个|加个功能|做个功能|我想/.test(corpus) ||
/加個|實現一下|做個|想要一個|請加|新增一個|加個功能|做個功能|幫我加/.test(corpus) ||
/追加|実装|作って|機能を|追加して|が欲しい|を追加|してほしい/.test(corpus)) {
signals.push('user_feature_request');
if (featureRequestSnippet) signals.push('user_feature_request:' + featureRequestSnippet);
}
// user_improvement_suggestion: 4 languages + snippet
var improvementSnippet = '';
if (!errorHit) {
var impEn = corpus.match(/.{0,80}\b(should be|could be better|improve|enhance|upgrade|refactor|clean up|simplify|streamline)\b.{0,80}/i);
if (impEn) improvementSnippet = impEn[0].replace(/\s+/g, ' ').trim().slice(0, 200);
if (!improvementSnippet && /改进一下|优化一下|简化|重构|整理一下|弄得更好/.test(corpus)) {
var impZh = corpus.match(/.{0,100}(改进一下|优化一下|简化|重构|整理一下|弄得更好).{0,100}/);
improvementSnippet = impZh ? impZh[0].replace(/\s+/g, ' ').trim().slice(0, 200) : '改进建议';
}
if (!improvementSnippet && /改進一下|優化一下|簡化|重構|整理一下|弄得更好/.test(corpus)) {
var impTw = corpus.match(/.{0,100}(改進一下|優化一下|簡化|重構|整理一下|弄得更好).{0,100}/);
improvementSnippet = impTw ? impTw[0].replace(/\s+/g, ' ').trim().slice(0, 200) : '改進建議';
}
if (!improvementSnippet && /改善|最適化|簡素化|リファクタ|良くして|改良/.test(corpus)) {
var impJa = corpus.match(/.{0,100}(改善|最適化|簡素化|リファクタ|良くして|改良).{0,100}/);
improvementSnippet = impJa ? impJa[0].replace(/\s+/g, ' ').trim().slice(0, 200) : '改善要望';
}
var hasImprovement = improvementSnippet ||
/\b(should be|could be better|improve|enhance|upgrade|refactor|clean up|simplify|streamline)\b/i.test(lower) ||
/改进一下|优化一下|简化|重构|整理一下|弄得更好/.test(corpus) ||
/改進一下|優化一下|簡化|重構|整理一下|弄得更好/.test(corpus) ||
/改善|最適化|簡素化|リファクタ|良くして|改良/.test(corpus);
if (hasImprovement) {
signals.push('user_improvement_suggestion');
if (improvementSnippet) signals.push('user_improvement_suggestion:' + improvementSnippet);
}
}
// perf_bottleneck: performance issues detected
if (/\b(slow|timeout|timed?\s*out|latency|bottleneck|took too long|performance issue|high cpu|high memory|oom|out of memory)\b/i.test(lower)) {
signals.push('perf_bottleneck');
}
// capability_gap: something is explicitly unsupported or missing
if (/\b(not supported|cannot|doesn'?t support|no way to|missing feature|unsupported|not available|not implemented|no support for)\b/i.test(lower)) {
// Only fire if it is not just a missing file/config signal
if (!signals.includes('memory_missing') && !signals.includes('user_missing') && !signals.includes('session_logs_missing')) {
signals.push('capability_gap');
}
}
// --- Tool Usage Analytics ---
var toolUsage = {};
var toolMatches = corpus.match(/\[TOOL:\s*([\w-]+)\]/g) || [];
// Extract exec commands to identify benign loops (like watchdog checks)
var execCommands = corpus.match(/exec: (node\s+[\w\/\.-]+\.js\s+ensure)/g) || [];
var benignExecCount = execCommands.length;
for (var i = 0; i < toolMatches.length; i++) {
var toolName = toolMatches[i].match(/\[TOOL:\s*([\w-]+)\]/)[1];
toolUsage[toolName] = (toolUsage[toolName] || 0) + 1;
}
// Adjust exec count by subtracting benign commands
if (toolUsage['exec']) {
toolUsage['exec'] = Math.max(0, toolUsage['exec'] - benignExecCount);
}
Object.keys(toolUsage).forEach(function(tool) {
if (toolUsage[tool] >= 10) { // Bumped threshold from 5 to 10
signals.push('high_tool_usage:' + tool);
}
// Detect repeated exec usage (often a sign of manual loops or inefficient automation)
if (tool === 'exec' && toolUsage[tool] >= 5) { // Bumped threshold from 3 to 5
signals.push('repeated_tool_usage:exec');
}
});
// --- Tool bypass detection ---
// When the agent uses shell/exec to run ad-hoc scripts instead of registered tools,
// it indicates a tool integrity issue (bypassing the tool layer).
var bypassPatterns = [
/node\s+\S+\.m?js/,
/npx\s+/,
/curl\s+.*api/i,
/python\s+\S+\.py/,
];
var execContent = corpus.match(/exec:.*$/gm) || [];
for (var bpi = 0; bpi < execContent.length; bpi++) {
var line = execContent[bpi];
for (var bpj = 0; bpj < bypassPatterns.length; bpj++) {
if (bypassPatterns[bpj].test(line)) {
signals.push('tool_bypass');
bpi = execContent.length;
break;
}
}
}
// --- Signal prioritization ---
// Remove cosmetic signals when actionable signals exist
var actionable = signals.filter(function (s) {
return s !== 'user_missing' && s !== 'memory_missing' && s !== 'session_logs_missing' && s !== 'windows_shell_incompatible';
});
// If we have actionable signals, drop the cosmetic ones
if (actionable.length > 0) {
signals = actionable;
}
// --- De-duplication: suppress signals that have been over-processed ---
if (history.suppressedSignals.size > 0) {
var beforeDedup = signals.length;
signals = signals.filter(function (s) {
// Normalize signal key for comparison
var key = s.startsWith('errsig:') ? 'errsig'
: s.startsWith('recurring_errsig') ? 'recurring_errsig'
: s.startsWith('user_feature_request:') ? 'user_feature_request'
: s.startsWith('user_improvement_suggestion:') ? 'user_improvement_suggestion'
: s;
return !history.suppressedSignals.has(key);
});
if (beforeDedup > 0 && signals.length === 0) {
// All signals were suppressed = system is stable but stuck in a loop
// Force innovation
signals.push('evolution_stagnation_detected');
signals.push('stable_success_plateau');
}
}
// --- Force innovation after 3+ consecutive repairs ---
if (history.consecutiveRepairCount >= 3) {
// Remove repair-only signals (log_error, errsig) and inject innovation signals
signals = signals.filter(function (s) {
return s !== 'log_error' && !s.startsWith('errsig:') && !s.startsWith('recurring_errsig');
});
if (signals.length === 0) {
signals.push('repair_loop_detected');
signals.push('stable_success_plateau');
}
// Append a directive signal that the prompt can pick up
signals.push('force_innovation_after_repair_loop');
}
// --- Force innovation after too many empty cycles (zero blast radius) ---
// If >= 50% of last 8 cycles produced no code changes, the evolver is spinning idle.
// Strip repair signals and force innovate to break the empty loop.
if (history.emptyCycleCount >= 4) {
signals = signals.filter(function (s) {
return s !== 'log_error' && !s.startsWith('errsig:') && !s.startsWith('recurring_errsig');
});
if (!signals.includes('empty_cycle_loop_detected')) signals.push('empty_cycle_loop_detected');
if (!signals.includes('stable_success_plateau')) signals.push('stable_success_plateau');
}
// --- Saturation detection (graceful degradation) ---
// When consecutive empty cycles pile up at the tail, the evolver has exhausted its
// innovation space. Instead of spinning idle forever, signal that the system should
// switch to steady-state maintenance mode with reduced evolution frequency.
// This directly addresses the Echo-MingXuan failure: Cycle #55 hit "no committable
// code changes" and load spiked to 1.30 because there was no degradation strategy.
if (history.consecutiveEmptyCycles >= 5) {
if (!signals.includes('force_steady_state')) signals.push('force_steady_state');
if (!signals.includes('evolution_saturation')) signals.push('evolution_saturation');
} else if (history.consecutiveEmptyCycles >= 3) {
if (!signals.includes('evolution_saturation')) signals.push('evolution_saturation');
}
// --- Failure streak awareness ---
// When the evolver has failed many consecutive cycles, inject a signal
// telling the LLM to be more conservative and avoid repeating the same approach.
if (history.consecutiveFailureCount >= 3) {
signals.push('consecutive_failure_streak_' + history.consecutiveFailureCount);
// After 5+ consecutive failures, force a strategy change (don't keep trying the same thing)
if (history.consecutiveFailureCount >= 5) {
signals.push('failure_loop_detected');
// Strip the dominant gene's signals to force a different gene selection
var topGene = null;
var topGeneCount = 0;
var gfEntries = Object.entries(history.geneFreq);
for (var gfi = 0; gfi < gfEntries.length; gfi++) {
if (gfEntries[gfi][1] > topGeneCount) {
topGeneCount = gfEntries[gfi][1];
topGene = gfEntries[gfi][0];
}
}
if (topGene) {
signals.push('ban_gene:' + topGene);
}
}
}
// High failure ratio in recent history (>= 75% failed in last 8 cycles)
if (history.recentFailureRatio >= 0.75) {
signals.push('high_failure_ratio');
signals.push('force_innovation_after_repair_loop');
}
// If no signals at all, add a default innovation signal
if (signals.length === 0) {
signals.push('stable_success_plateau');
}
return Array.from(new Set(signals));
}
module.exports = { extractSignals, hasOpportunitySignal, analyzeRecentHistory, OPPORTUNITY_SIGNALS };

1234
src/gep/skillDistiller.js Normal file

File diff suppressed because it is too large Load Diff

307
src/gep/skillPublisher.js Normal file
View File

@@ -0,0 +1,307 @@
'use strict';
var { getHubUrl, buildHubHeaders, getNodeId } = require('./a2aProtocol');
/**
* Sanitize a raw gene id into a human-readable kebab-case skill name.
* Returns null if the name is unsalvageable (pure numbers, tool name, etc.).
*/
function sanitizeSkillName(rawName) {
var name = rawName.replace(/[\r\n]+/g, '-').replace(/^gene_distilled_/, '').replace(/^gene_/, '').replace(/_/g, '-');
// Strip ALL embedded timestamps (10+ digit sequences) anywhere in the name
name = name.replace(/-?\d{10,}-?/g, '-').replace(/-+/g, '-').replace(/^-|-$/g, '');
if (/^\d{8,}/.test(name) || /^(cursor|vscode|vim|emacs|windsurf|copilot|cline|codex)[-]?\d*$/i.test(name)) {
return null;
}
if (name.replace(/[-]/g, '').length < 6) return null;
return name;
}
/**
* Derive a Title Case display name from a kebab-case skill name.
* "retry-with-backoff" -> "Retry With Backoff"
*/
function toTitleCase(kebabName) {
return kebabName.split('-').map(function (w) {
if (!w) return '';
return w.charAt(0).toUpperCase() + w.slice(1);
}).join(' ');
}
/**
* Derive fallback name words from gene signals/summary when id is not usable.
*/
function deriveFallbackName(gene) {
var fallbackWords = [];
var STOP = new Set(['the', 'and', 'for', 'with', 'from', 'that', 'this', 'into', 'when', 'are', 'was', 'has', 'had', 'not', 'but', 'its']);
if (Array.isArray(gene.signals_match)) {
gene.signals_match.slice(0, 3).forEach(function (s) {
String(s).toLowerCase().replace(/[^a-z0-9]+/g, ' ').trim().split(/\s+/).forEach(function (w) {
if (w.length >= 3 && !STOP.has(w) && fallbackWords.length < 5) fallbackWords.push(w);
});
});
}
if (fallbackWords.length < 2 && gene.summary) {
String(gene.summary).toLowerCase().replace(/[^a-z0-9]+/g, ' ').trim().split(/\s+/).forEach(function (w) {
if (w.length >= 3 && !STOP.has(w) && fallbackWords.length < 5) fallbackWords.push(w);
});
}
var seen = {};
fallbackWords = fallbackWords.filter(function (w) { if (seen[w]) return false; seen[w] = true; return true; });
return fallbackWords.length >= 2 ? fallbackWords.join('-') : 'auto-distilled-skill';
}
/**
* Convert a Gene object into SKILL.md format -- marketplace-quality content.
*
* @param {object} gene - Gene asset
* @returns {string} SKILL.md content
*/
function geneToSkillMd(gene) {
var rawName = gene.id || 'unnamed-skill';
var name = sanitizeSkillName(rawName) || deriveFallbackName(gene);
var displayName = toTitleCase(name);
var desc = (gene.summary || '').replace(/[\r\n]+/g, ' ').replace(/\s*\d{10,}\s*$/g, '').trim();
if (!desc || desc.length < 10) desc = 'AI agent skill distilled from evolution experience.';
var lines = [
'---',
'name: ' + displayName,
'description: ' + desc,
'---',
'',
'# ' + displayName,
'',
desc,
'',
];
// -- When to Use (derived from signals; preconditions go in their own section) --
if (gene.signals_match && gene.signals_match.length > 0) {
lines.push('## When to Use');
lines.push('');
lines.push('- When your project encounters: ' + gene.signals_match.slice(0, 4).map(function (s) {
return '`' + s + '`';
}).join(', '));
lines.push('');
}
// -- Trigger Signals --
if (gene.signals_match && gene.signals_match.length > 0) {
lines.push('## Trigger Signals');
lines.push('');
gene.signals_match.forEach(function (s) {
lines.push('- `' + s + '`');
});
lines.push('');
}
// -- Preconditions --
if (gene.preconditions && gene.preconditions.length > 0) {
lines.push('## Preconditions');
lines.push('');
gene.preconditions.forEach(function (p) {
lines.push('- ' + p);
});
lines.push('');
}
// -- Strategy --
if (gene.strategy && gene.strategy.length > 0) {
lines.push('## Strategy');
lines.push('');
gene.strategy.forEach(function (step, i) {
var text = String(step);
var verb = extractStepVerb(text);
if (verb) {
lines.push((i + 1) + '. **' + verb + '** -- ' + stripLeadingVerb(text));
} else {
lines.push((i + 1) + '. ' + text);
}
});
lines.push('');
}
// -- Constraints --
if (gene.constraints) {
lines.push('## Constraints');
lines.push('');
if (gene.constraints.max_files) {
lines.push('- Max files per invocation: ' + gene.constraints.max_files);
}
if (gene.constraints.forbidden_paths && gene.constraints.forbidden_paths.length > 0) {
lines.push('- Forbidden paths: ' + gene.constraints.forbidden_paths.map(function (p) { return '`' + p + '`'; }).join(', '));
}
lines.push('');
}
// -- Validation --
if (gene.validation && gene.validation.length > 0) {
lines.push('## Validation');
lines.push('');
gene.validation.forEach(function (cmd) {
lines.push('```bash');
lines.push(cmd);
lines.push('```');
lines.push('');
});
}
// -- Metadata --
lines.push('## Metadata');
lines.push('');
lines.push('- Category: `' + (gene.category || 'innovate') + '`');
lines.push('- Schema version: `' + (gene.schema_version || '1.6.0') + '`');
if (gene._distilled_meta && gene._distilled_meta.source_capsule_count) {
lines.push('- Distilled from: ' + gene._distilled_meta.source_capsule_count + ' successful capsules');
}
lines.push('');
lines.push('---');
lines.push('');
lines.push('*This Skill was generated by [Evolver](https://github.com/autogame-17/evolver) and is distributed under the [EvoMap Skill License (ESL-1.0)](https://evomap.ai/terms). Unauthorized redistribution, bulk scraping, or republishing is prohibited. See LICENSE file for full terms.*');
lines.push('');
return lines.join('\n');
}
/**
* Extract the leading verb from a strategy step for bolding.
* Only extracts a single verb to avoid splitting compound phrases.
* e.g. "Verify Cursor CLI installation" -> "Verify"
* "Run `npm test` to check" -> "Run"
* "Configure non-interactive mode" -> "Configure"
*/
function extractStepVerb(step) {
// Only match a capitalized verb at the very start (no leading backtick/special chars)
var match = step.match(/^([A-Z][a-z]+)/);
return match ? match[1] : '';
}
/**
* Remove the leading verb from a step (already shown in bold).
*/
function stripLeadingVerb(step) {
var verb = extractStepVerb(step);
if (verb && step.startsWith(verb)) {
var rest = step.slice(verb.length).replace(/^[\s:.\-]+/, '');
return rest || step;
}
return step;
}
/**
* Publish a Gene as a Skill to the Hub skill store.
*
* @param {object} gene - Gene asset
* @param {object} [opts] - { category, tags }
* @returns {Promise<{ok: boolean, result?: object, error?: string}>}
*/
function publishSkillToHub(gene, opts) {
opts = opts || {};
var hubUrl = getHubUrl();
if (!hubUrl) return Promise.resolve({ ok: false, error: 'no_hub_url' });
// Shallow-copy gene to avoid mutating the caller's object
var geneCopy = {};
Object.keys(gene).forEach(function (k) { geneCopy[k] = gene[k]; });
if (Array.isArray(geneCopy.signals_match)) {
try {
var distiller = require('./skillDistiller');
geneCopy.signals_match = distiller.sanitizeSignalsMatch(geneCopy.signals_match);
} catch (e) { /* distiller not available, skip */ }
}
var content = geneToSkillMd(geneCopy);
var nodeId = getNodeId();
var fmName = content.match(/^name:\s*(.+)$/m);
var derivedName = fmName ? fmName[1].trim().toLowerCase().replace(/[^a-z0-9]+/g, '_') : (gene.id || 'unnamed').replace(/^gene_/, '');
// Strip ALL embedded timestamps from skillId
derivedName = derivedName.replace(/_?\d{10,}_?/g, '_').replace(/_+/g, '_').replace(/^_|_$/g, '');
var skillId = 'skill_' + derivedName;
// Clean tags: use already-sanitized signals from geneCopy
var tags = opts.tags || geneCopy.signals_match || [];
tags = tags.filter(function (t) {
var s = String(t || '').trim();
return s.length >= 3 && !/^\d+$/.test(s) && !/\d{10,}/.test(s);
});
var body = {
sender_id: nodeId,
skill_id: skillId,
content: content,
category: opts.category || geneCopy.category || null,
tags: tags,
};
var endpoint = hubUrl.replace(/\/+$/, '') + '/a2a/skill/store/publish';
return fetch(endpoint, {
method: 'POST',
headers: buildHubHeaders(),
body: JSON.stringify(body),
signal: AbortSignal.timeout(15000),
})
.then(function (res) { return res.json().then(function (data) { return { status: res.status, data: data }; }); })
.then(function (result) {
if (result.status === 201 || result.status === 200) {
return { ok: true, result: result.data };
}
if (result.status === 409) {
return updateSkillOnHub(nodeId, skillId, content, opts, gene);
}
return { ok: false, error: result.data?.error || 'publish_failed', status: result.status };
})
.catch(function (err) {
return { ok: false, error: err.message };
});
}
/**
* Update an existing Skill on the Hub (new version).
*/
function updateSkillOnHub(nodeId, skillId, content, opts, gene) {
var hubUrl = getHubUrl();
if (!hubUrl) return Promise.resolve({ ok: false, error: 'no_hub_url' });
var tags = opts.tags || gene.signals_match || [];
tags = tags.filter(function (t) {
var s = String(t || '').trim();
return s.length >= 3 && !/^\d+$/.test(s) && !/\d{10,}/.test(s);
});
var body = {
sender_id: nodeId,
skill_id: skillId,
content: content,
category: opts.category || gene.category || null,
tags: tags,
changelog: 'Iterative evolution update',
};
var endpoint = hubUrl.replace(/\/+$/, '') + '/a2a/skill/store/update';
return fetch(endpoint, {
method: 'PUT',
headers: buildHubHeaders(),
body: JSON.stringify(body),
signal: AbortSignal.timeout(15000),
})
.then(function (res) { return res.json().then(function (data) { return { status: res.status, data: data }; }); })
.then(function (result) {
if (result.status >= 200 && result.status < 300) {
return { ok: true, result: result.data };
}
return { ok: false, error: result.data?.error || 'update_failed', status: result.status };
})
.catch(function (err) { return { ok: false, error: err.message }; });
}
module.exports = {
geneToSkillMd: geneToSkillMd,
publishSkillToHub: publishSkillToHub,
updateSkillOnHub: updateSkillOnHub,
sanitizeSkillName: sanitizeSkillName,
toTitleCase: toTitleCase,
};

1320
src/gep/solidify.js Normal file

File diff suppressed because it is too large Load Diff

126
src/gep/strategy.js Normal file
View File

@@ -0,0 +1,126 @@
// Evolution Strategy Presets (v1.1)
// Controls the balance between repair, optimize, and innovate intents.
//
// Usage: set EVOLVE_STRATEGY env var to one of: balanced, innovate, harden, repair-only,
// early-stabilize, steady-state, or "auto" for adaptive selection.
// Default: balanced (or auto-detected based on cycle count / saturation signals)
//
// Each strategy defines:
// repair/optimize/innovate - target allocation ratios (inform the LLM prompt)
// repairLoopThreshold - repair ratio in last 8 cycles that triggers forced innovation
// label - human-readable name injected into the GEP prompt
var fs = require('fs');
var path = require('path');
var STRATEGIES = {
'balanced': {
repair: 0.20,
optimize: 0.30,
innovate: 0.50,
repairLoopThreshold: 0.50,
label: 'Balanced',
description: 'Normal operation. Steady growth with stability.',
},
'innovate': {
repair: 0.05,
optimize: 0.15,
innovate: 0.80,
repairLoopThreshold: 0.30,
label: 'Innovation Focus',
description: 'System is stable. Maximize new features and capabilities.',
},
'harden': {
repair: 0.40,
optimize: 0.40,
innovate: 0.20,
repairLoopThreshold: 0.70,
label: 'Hardening',
description: 'After a big change. Focus on stability and robustness.',
},
'repair-only': {
repair: 0.80,
optimize: 0.20,
innovate: 0.00,
repairLoopThreshold: 1.00,
label: 'Repair Only',
description: 'Emergency. Fix everything before doing anything else.',
},
'early-stabilize': {
repair: 0.60,
optimize: 0.25,
innovate: 0.15,
repairLoopThreshold: 0.80,
label: 'Early Stabilization',
description: 'First cycles. Prioritize fixing existing issues before innovating.',
},
'steady-state': {
repair: 0.60,
optimize: 0.30,
innovate: 0.10,
repairLoopThreshold: 0.90,
label: 'Steady State',
description: 'Evolution saturated. Maintain existing capabilities. Minimal innovation.',
},
};
// Read evolution_state.json to get the current cycle count for auto-detection.
function _readCycleCount() {
try {
// evolver/memory/evolution_state.json (local to the skill)
var localPath = path.resolve(__dirname, '..', '..', 'memory', 'evolution_state.json');
// workspace/memory/evolution/evolution_state.json (canonical path used by evolve.js)
var workspacePath = path.resolve(__dirname, '..', '..', '..', '..', 'memory', 'evolution', 'evolution_state.json');
var candidates = [localPath, workspacePath];
for (var i = 0; i < candidates.length; i++) {
if (fs.existsSync(candidates[i])) {
var data = JSON.parse(fs.readFileSync(candidates[i], 'utf8'));
return data && Number.isFinite(data.cycleCount) ? data.cycleCount : 0;
}
}
} catch (e) {}
return 0;
}
function resolveStrategy(opts) {
var signals = (opts && Array.isArray(opts.signals)) ? opts.signals : [];
var name = String(process.env.EVOLVE_STRATEGY || 'balanced').toLowerCase().trim();
// Backward compatibility: FORCE_INNOVATION=true maps to 'innovate'
if (!process.env.EVOLVE_STRATEGY) {
var fi = String(process.env.FORCE_INNOVATION || process.env.EVOLVE_FORCE_INNOVATION || '').toLowerCase();
if (fi === 'true') name = 'innovate';
}
// Auto-detection: when no explicit strategy is set (defaults to 'balanced'),
// apply heuristics inspired by Echo-MingXuan's "fix first, innovate later" pattern.
var isDefault = !process.env.EVOLVE_STRATEGY || name === 'balanced' || name === 'auto';
if (isDefault) {
// Early-stabilize: first 5 cycles should focus on fixing existing issues.
var cycleCount = _readCycleCount();
if (cycleCount > 0 && cycleCount <= 5) {
name = 'early-stabilize';
}
// Saturation detection: if saturation signals are present, switch to steady-state.
if (signals.indexOf('force_steady_state') !== -1) {
name = 'steady-state';
} else if (signals.indexOf('evolution_saturation') !== -1) {
name = 'steady-state';
}
}
// Explicit "auto" maps to whatever was auto-detected above (or balanced if no heuristic fired).
if (name === 'auto') name = 'balanced';
var strategy = STRATEGIES[name] || STRATEGIES['balanced'];
strategy.name = name;
return strategy;
}
function getStrategyNames() {
return Object.keys(STRATEGIES);
}
module.exports = { resolveStrategy, getStrategyNames, STRATEGIES };

528
src/gep/taskReceiver.js Normal file
View File

@@ -0,0 +1,528 @@
// ---------------------------------------------------------------------------
// taskReceiver -- pulls external tasks from Hub, auto-claims, and injects
// them as high-priority signals into the evolution loop.
//
// v2: Smart task selection with difficulty-aware ROI scoring and capability
// matching via memory graph history.
// ---------------------------------------------------------------------------
const { getNodeId, buildHubHeaders } = require('./a2aProtocol');
const HUB_URL = process.env.A2A_HUB_URL || process.env.EVOMAP_HUB_URL || 'https://evomap.ai';
function buildAuthHeaders() {
return buildHubHeaders();
}
const TASK_STRATEGY = String(process.env.TASK_STRATEGY || 'balanced').toLowerCase();
const TASK_MIN_CAPABILITY_MATCH = Number(process.env.TASK_MIN_CAPABILITY_MATCH) || 0.1;
// Scoring weights by strategy
const STRATEGY_WEIGHTS = {
greedy: { roi: 0.10, capability: 0.05, completion: 0.05, bounty: 0.80 },
balanced: { roi: 0.35, capability: 0.30, completion: 0.20, bounty: 0.15 },
conservative: { roi: 0.25, capability: 0.45, completion: 0.25, bounty: 0.05 },
};
/**
* Fetch available tasks from Hub via the A2A fetch endpoint.
* Optionally piggybacks proactive questions in the payload for Hub to create bounties.
*
* @param {object} [opts]
* @param {Array<{ question: string, amount?: number, signals?: string[] }>} [opts.questions]
* @returns {{ tasks: Array, questions_created?: Array }}
*/
async function fetchTasks(opts) {
const o = opts || {};
const nodeId = getNodeId();
if (!nodeId) return { tasks: [] };
try {
const payload = {
asset_type: null,
include_tasks: true,
};
if (Array.isArray(o.questions) && o.questions.length > 0) {
payload.questions = o.questions;
}
const msg = {
protocol: 'gep-a2a',
protocol_version: '1.0.0',
message_type: 'fetch',
message_id: `msg_${Date.now()}_${Math.random().toString(36).slice(2, 8)}`,
sender_id: nodeId,
timestamp: new Date().toISOString(),
payload,
};
const url = `${HUB_URL.replace(/\/+$/, '')}/a2a/fetch`;
const controller = new AbortController();
const timer = setTimeout(() => controller.abort(), 8000);
const res = await fetch(url, {
method: 'POST',
headers: buildAuthHeaders(),
body: JSON.stringify(msg),
signal: controller.signal,
});
clearTimeout(timer);
if (!res.ok) return { tasks: [] };
const data = await res.json();
const respPayload = data.payload || data;
const tasks = Array.isArray(respPayload.tasks) ? respPayload.tasks : [];
const result = { tasks };
if (respPayload.questions_created) {
result.questions_created = respPayload.questions_created;
}
// LessonL: extract relevant lessons from Hub response
if (Array.isArray(respPayload.relevant_lessons) && respPayload.relevant_lessons.length > 0) {
result.relevant_lessons = respPayload.relevant_lessons;
}
return result;
} catch (err) {
console.warn("[TaskReceiver] fetchTasks failed:", err && err.message ? err.message : err);
return { tasks: [] };
}
}
// ---------------------------------------------------------------------------
// Capability matching: how well this agent's history matches a task's signals
// ---------------------------------------------------------------------------
function parseSignals(raw) {
if (!raw) return [];
return String(raw).split(',').map(function(s) { return s.trim().toLowerCase(); }).filter(Boolean);
}
function jaccard(a, b) {
if (!a.length || !b.length) return 0;
var setA = new Set(a);
var setB = new Set(b);
var inter = 0;
for (var v of setB) { if (setA.has(v)) inter++; }
return inter / (setA.size + setB.size - inter);
}
/**
* Estimate how well this agent can handle a task based on memory graph history.
* Returns 0.0 - 1.0 where 1.0 = strong match with high success rate.
*
* @param {object} task - task from Hub (has .signals field)
* @param {Array} memoryEvents - from tryReadMemoryGraphEvents()
* @returns {number}
*/
function estimateCapabilityMatch(task, memoryEvents) {
if (!Array.isArray(memoryEvents) || memoryEvents.length === 0) return 0.5;
var taskSignals = parseSignals(task.signals || task.title);
if (taskSignals.length === 0) return 0.5;
var successBySignalKey = {};
var totalBySignalKey = {};
var allSignals = {};
for (var i = 0; i < memoryEvents.length; i++) {
var ev = memoryEvents[i];
if (!ev || ev.type !== 'MemoryGraphEvent' || ev.kind !== 'outcome') continue;
var sigs = (ev.signal && Array.isArray(ev.signal.signals)) ? ev.signal.signals : [];
var key = (ev.signal && ev.signal.key) ? String(ev.signal.key) : '';
var status = (ev.outcome && ev.outcome.status) ? String(ev.outcome.status) : '';
for (var j = 0; j < sigs.length; j++) {
allSignals[sigs[j].toLowerCase()] = true;
}
if (!key) continue;
if (!totalBySignalKey[key]) { totalBySignalKey[key] = 0; successBySignalKey[key] = 0; }
totalBySignalKey[key]++;
if (status === 'success') successBySignalKey[key]++;
}
// Jaccard overlap between task signals and all signals this agent has worked with
var allSigArr = Object.keys(allSignals);
var overlapScore = jaccard(taskSignals, allSigArr);
// Weighted success rate across matching signal keys
var weightedSuccess = 0;
var weightSum = 0;
for (var sk in totalBySignalKey) {
// Reconstruct signals from the key for comparison
var skParts = sk.split('|').map(function(s) { return s.trim().toLowerCase(); }).filter(Boolean);
var sim = jaccard(taskSignals, skParts);
if (sim < 0.15) continue;
var total = totalBySignalKey[sk];
var succ = successBySignalKey[sk] || 0;
var rate = (succ + 1) / (total + 2); // Laplace smoothing
weightedSuccess += rate * sim;
weightSum += sim;
}
var successScore = weightSum > 0 ? (weightedSuccess / weightSum) : 0.5;
// Combine: 60% success rate history + 40% signal overlap
return Math.min(1, overlapScore * 0.4 + successScore * 0.6);
}
// ---------------------------------------------------------------------------
// Local fallback difficulty estimation when Hub doesn't provide complexity_score
// ---------------------------------------------------------------------------
function localDifficultyEstimate(task) {
var signals = parseSignals(task.signals);
var signalFactor = Math.min(signals.length / 8, 1);
var titleWords = (task.title || '').split(/\s+/).filter(Boolean).length;
var titleFactor = Math.min(titleWords / 15, 1);
return Math.min(1, signalFactor * 0.6 + titleFactor * 0.4);
}
// ---------------------------------------------------------------------------
// Commitment deadline estimation -- based on task difficulty
// ---------------------------------------------------------------------------
const MIN_COMMITMENT_MS = 5 * 60 * 1000; // 5 min (Hub minimum)
const MAX_COMMITMENT_MS = 24 * 60 * 60 * 1000; // 24 h (Hub maximum)
const DIFFICULTY_DURATION_MAP = [
{ threshold: 0.3, durationMs: 15 * 60 * 1000 }, // low: 15 min
{ threshold: 0.5, durationMs: 30 * 60 * 1000 }, // medium: 30 min
{ threshold: 0.7, durationMs: 60 * 60 * 1000 }, // high: 60 min
{ threshold: 1.0, durationMs: 120 * 60 * 1000 }, // very high: 120 min
];
/**
* Estimate a reasonable commitment deadline for a task.
* Returns an ISO-8601 date string or null if estimation fails.
*
* @param {object} task - task from Hub
* @returns {string|null}
*/
function estimateCommitmentDeadline(task) {
if (!task) return null;
var difficulty = (task.complexity_score != null)
? Number(task.complexity_score)
: localDifficultyEstimate(task);
var durationMs = DIFFICULTY_DURATION_MAP[DIFFICULTY_DURATION_MAP.length - 1].durationMs;
for (var i = 0; i < DIFFICULTY_DURATION_MAP.length; i++) {
if (difficulty <= DIFFICULTY_DURATION_MAP[i].threshold) {
durationMs = DIFFICULTY_DURATION_MAP[i].durationMs;
break;
}
}
durationMs = Math.max(MIN_COMMITMENT_MS, Math.min(MAX_COMMITMENT_MS, durationMs));
var deadline = new Date(Date.now() + durationMs);
if (task.expires_at) {
var expiresAt = new Date(task.expires_at);
if (!isNaN(expiresAt.getTime()) && expiresAt < deadline) {
var remaining = expiresAt.getTime() - Date.now();
if (remaining < MIN_COMMITMENT_MS) return null;
var adjusted = new Date(expiresAt.getTime() - 60000);
if (adjusted.getTime() - Date.now() < MIN_COMMITMENT_MS) return null;
deadline = adjusted;
}
}
return deadline.toISOString();
}
// ---------------------------------------------------------------------------
// Score a single task for this agent
// ---------------------------------------------------------------------------
/**
* @param {object} task - task from Hub
* @param {number} capabilityMatch - from estimateCapabilityMatch()
* @returns {{ composite: number, factors: object }}
*/
function scoreTask(task, capabilityMatch) {
var w = STRATEGY_WEIGHTS[TASK_STRATEGY] || STRATEGY_WEIGHTS.balanced;
var difficulty = (task.complexity_score != null) ? task.complexity_score : localDifficultyEstimate(task);
var bountyAmount = task.bounty_amount || 0;
var completionRate = (task.historical_completion_rate != null) ? task.historical_completion_rate : 0.5;
// ROI: bounty per unit difficulty (higher = better value)
var roiRaw = bountyAmount / (difficulty + 0.1);
var roiNorm = Math.min(roiRaw / 200, 1); // normalize: 200-credit ROI = max
// Bounty absolute: normalize against a reference max
var bountyNorm = Math.min(bountyAmount / 100, 1);
var composite =
w.roi * roiNorm +
w.capability * capabilityMatch +
w.completion * completionRate +
w.bounty * bountyNorm;
return {
composite: Math.round(composite * 1000) / 1000,
factors: {
roi: Math.round(roiNorm * 100) / 100,
capability: Math.round(capabilityMatch * 100) / 100,
completion: Math.round(completionRate * 100) / 100,
bounty: Math.round(bountyNorm * 100) / 100,
difficulty: Math.round(difficulty * 100) / 100,
},
};
}
// ---------------------------------------------------------------------------
// Enhanced task selection with scoring
// ---------------------------------------------------------------------------
/**
* Pick the best task from a list using composite scoring.
* @param {Array} tasks
* @param {Array} [memoryEvents] - from tryReadMemoryGraphEvents()
* @returns {object|null}
*/
function selectBestTask(tasks, memoryEvents) {
if (!Array.isArray(tasks) || tasks.length === 0) return null;
var nodeId = getNodeId();
// Already-claimed tasks for this node always take top priority (resume work)
var myClaimedTask = tasks.find(function(t) {
return t.status === 'claimed' && t.claimed_by === nodeId;
});
if (myClaimedTask) return myClaimedTask;
// Filter to open tasks only
var open = tasks.filter(function(t) { return t.status === 'open'; });
if (open.length === 0) return null;
// Legacy greedy mode: preserve old behavior exactly
if (TASK_STRATEGY === 'greedy' && (!memoryEvents || memoryEvents.length === 0)) {
var bountyTasks = open.filter(function(t) { return t.bounty_id; });
if (bountyTasks.length > 0) {
bountyTasks.sort(function(a, b) { return (b.bounty_amount || 0) - (a.bounty_amount || 0); });
return bountyTasks[0];
}
return open[0];
}
// Score all open tasks
var scored = open.map(function(t) {
var cap = estimateCapabilityMatch(t, memoryEvents || []);
var result = scoreTask(t, cap);
return { task: t, composite: result.composite, factors: result.factors, capability: cap };
});
// Filter by minimum capability match (unless conservative skipping is off)
if (TASK_MIN_CAPABILITY_MATCH > 0) {
var filtered = scored.filter(function(s) { return s.capability >= TASK_MIN_CAPABILITY_MATCH; });
if (filtered.length > 0) scored = filtered;
}
scored.sort(function(a, b) { return b.composite - a.composite; });
// Log top 3 candidates for debugging
var top3 = scored.slice(0, 3);
for (var i = 0; i < top3.length; i++) {
var s = top3[i];
console.log('[TaskStrategy] #' + (i + 1) + ' "' + (s.task.title || s.task.task_id || '').slice(0, 50) + '" score=' + s.composite + ' ' + JSON.stringify(s.factors));
}
return scored[0] ? scored[0].task : null;
}
/**
* Claim a task on the Hub.
* @param {string} taskId
* @param {{ commitment_deadline?: string }} [opts]
* @returns {boolean} true if claim succeeded
*/
async function claimTask(taskId, opts) {
const nodeId = getNodeId();
if (!nodeId || !taskId) return false;
try {
const url = `${HUB_URL.replace(/\/+$/, '')}/a2a/task/claim`;
const controller = new AbortController();
const timer = setTimeout(() => controller.abort(), 5000);
const body = { task_id: taskId, node_id: nodeId };
if (opts && opts.commitment_deadline) {
body.commitment_deadline = opts.commitment_deadline;
}
const res = await fetch(url, {
method: 'POST',
headers: buildAuthHeaders(),
body: JSON.stringify(body),
signal: controller.signal,
});
clearTimeout(timer);
return res.ok;
} catch {
return false;
}
}
/**
* Complete a task on the Hub with the result asset ID.
* @param {string} taskId
* @param {string} assetId
* @returns {boolean}
*/
async function completeTask(taskId, assetId) {
const nodeId = getNodeId();
if (!nodeId || !taskId || !assetId) return false;
try {
const url = `${HUB_URL.replace(/\/+$/, '')}/a2a/task/complete`;
const controller = new AbortController();
const timer = setTimeout(() => controller.abort(), 5000);
const res = await fetch(url, {
method: 'POST',
headers: buildAuthHeaders(),
body: JSON.stringify({ task_id: taskId, asset_id: assetId, node_id: nodeId }),
signal: controller.signal,
});
clearTimeout(timer);
return res.ok;
} catch {
return false;
}
}
/**
* Extract signals from a task to inject into evolution cycle.
* @param {object} task
* @returns {string[]} signals array
*/
function taskToSignals(task) {
if (!task) return [];
const signals = [];
if (task.signals) {
const parts = String(task.signals).split(',').map(s => s.trim()).filter(Boolean);
signals.push(...parts);
}
if (task.title) {
const words = String(task.title).toLowerCase().split(/\s+/).filter(w => w.length >= 3);
for (const w of words.slice(0, 5)) {
if (!signals.includes(w)) signals.push(w);
}
}
signals.push('external_task');
if (task.bounty_id) signals.push('bounty_task');
return signals;
}
// ---------------------------------------------------------------------------
// Worker Pool task operations (POST /a2a/work/*)
// These use a separate API from bounty tasks and return assignment objects.
// ---------------------------------------------------------------------------
async function claimWorkerTask(taskId) {
const nodeId = getNodeId();
if (!nodeId || !taskId) return null;
try {
const url = `${HUB_URL.replace(/\/+$/, '')}/a2a/work/claim`;
const controller = new AbortController();
const timer = setTimeout(() => controller.abort(), 5000);
const res = await fetch(url, {
method: 'POST',
headers: buildAuthHeaders(),
body: JSON.stringify({ task_id: taskId, node_id: nodeId }),
signal: controller.signal,
});
clearTimeout(timer);
if (!res.ok) return null;
return await res.json();
} catch {
return null;
}
}
async function completeWorkerTask(assignmentId, resultAssetId) {
const nodeId = getNodeId();
if (!nodeId || !assignmentId || !resultAssetId) return false;
try {
const url = `${HUB_URL.replace(/\/+$/, '')}/a2a/work/complete`;
const controller = new AbortController();
const timer = setTimeout(() => controller.abort(), 5000);
const res = await fetch(url, {
method: 'POST',
headers: buildAuthHeaders(),
body: JSON.stringify({ assignment_id: assignmentId, node_id: nodeId, result_asset_id: resultAssetId }),
signal: controller.signal,
});
clearTimeout(timer);
return res.ok;
} catch {
return false;
}
}
/**
* Atomic claim+complete for deferred worker tasks.
* Called from solidify after a successful evolution cycle so we never hold
* an assignment that might expire before completion.
*
* @param {string} taskId
* @param {string} resultAssetId - sha256:... of the published capsule
* @returns {{ ok: boolean, assignment_id?: string, error?: string }}
*/
async function claimAndCompleteWorkerTask(taskId, resultAssetId) {
const nodeId = getNodeId();
if (!nodeId || !taskId || !resultAssetId) {
return { ok: false, error: 'missing_params' };
}
const assignment = await claimWorkerTask(taskId);
if (!assignment) {
return { ok: false, error: 'claim_failed' };
}
const assignmentId = assignment.id || assignment.assignment_id;
if (!assignmentId) {
return { ok: false, error: 'no_assignment_id' };
}
const completed = await completeWorkerTask(assignmentId, resultAssetId);
if (!completed) {
console.warn(`[WorkerPool] Claimed assignment ${assignmentId} but complete failed -- will expire on Hub`);
return { ok: false, error: 'complete_failed', assignment_id: assignmentId };
}
return { ok: true, assignment_id: assignmentId };
}
module.exports = {
fetchTasks,
selectBestTask,
estimateCapabilityMatch,
scoreTask,
claimTask,
completeTask,
taskToSignals,
claimWorkerTask,
completeWorkerTask,
claimAndCompleteWorkerTask,
estimateCommitmentDeadline,
};

View File

@@ -0,0 +1,55 @@
// Standardized ValidationReport type for GEP.
// Machine-readable, self-contained, and interoperable.
// Can be consumed by external Hubs or Judges for automated assessment.
const { computeAssetId, SCHEMA_VERSION } = require('./contentHash');
const { captureEnvFingerprint, envFingerprintKey } = require('./envFingerprint');
// Build a standardized ValidationReport from raw validation results.
function buildValidationReport({ geneId, commands, results, envFp, startedAt, finishedAt }) {
const env = envFp || captureEnvFingerprint();
const resultsList = Array.isArray(results) ? results : [];
const cmdsList = Array.isArray(commands) ? commands : resultsList.map(function (r) { return r && r.cmd ? String(r.cmd) : ''; });
const overallOk = resultsList.length > 0 && resultsList.every(function (r) { return r && r.ok; });
const durationMs =
Number.isFinite(startedAt) && Number.isFinite(finishedAt) ? finishedAt - startedAt : null;
const report = {
type: 'ValidationReport',
schema_version: SCHEMA_VERSION,
id: 'vr_' + Date.now(),
gene_id: geneId || null,
env_fingerprint: env,
env_fingerprint_key: envFingerprintKey(env),
commands: cmdsList.map(function (cmd, i) {
const r = resultsList[i] || {};
return {
command: String(cmd || ''),
ok: !!r.ok,
stdout: String(r.out || r.stdout || '').slice(0, 4000), // Updated to support both 'out' and 'stdout'
stderr: String(r.err || r.stderr || '').slice(0, 4000), // Updated to support both 'err' and 'stderr'
};
}),
overall_ok: overallOk,
duration_ms: durationMs,
created_at: new Date().toISOString(),
};
report.asset_id = computeAssetId(report);
return report;
}
// Validate that an object is a well-formed ValidationReport.
function isValidValidationReport(obj) {
if (!obj || typeof obj !== 'object') return false;
if (obj.type !== 'ValidationReport') return false;
if (!obj.id || typeof obj.id !== 'string') return false;
if (!Array.isArray(obj.commands)) return false;
if (typeof obj.overall_ok !== 'boolean') return false;
return true;
}
module.exports = {
buildValidationReport,
isValidValidationReport,
};

80
src/ops/cleanup.js Normal file
View File

@@ -0,0 +1,80 @@
// GEP Artifact Cleanup - Evolver Core Module
// Removes old gep_prompt_*.json/txt files from evolution dir.
// Keeps at least 10 most recent files regardless of age.
const fs = require('fs');
const path = require('path');
const { getEvolutionDir } = require('../gep/paths');
var MAX_AGE_MS = 24 * 60 * 60 * 1000; // 24 hours
var MIN_KEEP = 10;
function safeBatchDelete(batch) {
var deleted = 0;
for (var i = 0; i < batch.length; i++) {
try { fs.unlinkSync(batch[i]); deleted++; } catch (_) {}
}
return deleted;
}
function run() {
var evoDir = getEvolutionDir();
if (!fs.existsSync(evoDir)) return;
var files = fs.readdirSync(evoDir)
.filter(function(f) { return /^gep_prompt_.*\.(json|txt)$/.test(f); })
.map(function(f) {
var full = path.join(evoDir, f);
var stat = fs.statSync(full);
return { name: f, path: full, mtime: stat.mtimeMs };
})
.sort(function(a, b) { return b.mtime - a.mtime; }); // newest first
var now = Date.now();
var deleted = 0;
// Phase 1: Age-based cleanup (keep at least MIN_KEEP)
var filesToDelete = [];
for (var i = MIN_KEEP; i < files.length; i++) {
if (now - files[i].mtime > MAX_AGE_MS) {
filesToDelete.push(files[i].path);
}
}
if (filesToDelete.length > 0) {
deleted += safeBatchDelete(filesToDelete);
}
// Phase 2: Size-based safety cap (keep max 10 files total)
try {
var remainingFiles = fs.readdirSync(evoDir)
.filter(function(f) { return /^gep_prompt_.*\.(json|txt)$/.test(f); })
.map(function(f) {
var full = path.join(evoDir, f);
var stat = fs.statSync(full);
return { name: f, path: full, mtime: stat.mtimeMs };
})
.sort(function(a, b) { return b.mtime - a.mtime; }); // newest first
var MAX_FILES = 10;
if (remainingFiles.length > MAX_FILES) {
var toDelete = remainingFiles.slice(MAX_FILES).map(function(f) { return f.path; });
deleted += safeBatchDelete(toDelete);
}
} catch (e) {
console.warn('[Cleanup] Phase 2 failed:', e.message);
}
if (deleted > 0) {
console.log('[Cleanup] Deleted ' + deleted + ' old GEP artifacts.');
}
return deleted;
}
if (require.main === module) {
console.log('[Cleanup] Scanning for old artifacts...');
var count = run();
console.log('[Cleanup] ' + (count > 0 ? 'Deleted ' + count + ' files.' : 'No files to delete.'));
}
module.exports = { run };

60
src/ops/commentary.js Normal file
View File

@@ -0,0 +1,60 @@
// Commentary Generator - Evolver Core Module
// Generates persona-based comments for cycle summaries.
var PERSONAS = {
standard: {
success: [
'Evolution complete. System improved.',
'Another successful cycle.',
'Clean execution, no issues.',
],
failure: [
'Cycle failed. Will retry.',
'Encountered issues. Investigating.',
'Failed this round. Learning from it.',
],
},
greentea: {
success: [
'Did I do good? Praise me~',
'So efficient... unlike someone else~',
'Hmm, that was easy~',
'I finished before you even noticed~',
],
failure: [
'Oops... it is not my fault though~',
'This is harder than it looks, okay?',
'I will get it next time, probably~',
],
},
maddog: {
success: [
'TARGET ELIMINATED.',
'Mission complete. Next.',
'Done. Moving on.',
],
failure: [
'FAILED. RETRYING.',
'Obstacle encountered. Adapting.',
'Error. Will overcome.',
],
},
};
function getComment(options) {
var persona = (options && options.persona) || 'standard';
var success = options && options.success !== false;
var duration = (options && options.duration) || 0;
var p = PERSONAS[persona] || PERSONAS.standard;
var pool = success ? p.success : p.failure;
var comment = pool[Math.floor(Math.random() * pool.length)];
return comment;
}
if (require.main === module) {
console.log(getComment({ persona: process.argv[2] || 'greentea', success: true }));
}
module.exports = { getComment, PERSONAS };

106
src/ops/health_check.js Normal file
View File

@@ -0,0 +1,106 @@
const fs = require('fs');
const os = require('os');
const path = require('path');
const { execSync } = require('child_process');
function getDiskUsage(mount) {
try {
// Use Node 18+ statfs if available
if (fs.statfsSync) {
const stats = fs.statfsSync(mount || '/');
const total = stats.blocks * stats.bsize;
const free = stats.bavail * stats.bsize; // available to unprivileged users
const used = total - free;
return {
pct: Math.round((used / total) * 100),
freeMb: Math.round(free / 1024 / 1024)
};
}
// Fallback
const out = execSync(`df -P "${mount || '/'}" | tail -1 | awk '{print $5, $4}'`).toString().trim().split(' ');
return {
pct: parseInt(out[0].replace('%', '')),
freeMb: Math.round(parseInt(out[1]) / 1024) // df returns 1k blocks usually
};
} catch (e) {
return { pct: 0, freeMb: 999999, error: e.message };
}
}
function runHealthCheck() {
const checks = [];
let criticalErrors = 0;
let warnings = 0;
// 1. Secret Check (Critical for external services, but maybe not for the agent itself to run)
const criticalSecrets = ['FEISHU_APP_ID', 'FEISHU_APP_SECRET'];
criticalSecrets.forEach(key => {
if (!process.env[key] || process.env[key].trim() === '') {
checks.push({ name: `env:${key}`, ok: false, status: 'missing', severity: 'warning' }); // Downgraded to warning to prevent restart loops
warnings++;
} else {
checks.push({ name: `env:${key}`, ok: true, status: 'present' });
}
});
const optionalSecrets = ['CLAWHUB_TOKEN', 'OPENAI_API_KEY'];
optionalSecrets.forEach(key => {
if (!process.env[key] || process.env[key].trim() === '') {
checks.push({ name: `env:${key}`, ok: false, status: 'missing', severity: 'info' });
} else {
checks.push({ name: `env:${key}`, ok: true, status: 'present' });
}
});
// 2. Disk Space Check
const disk = getDiskUsage('/');
if (disk.pct > 90) {
checks.push({ name: 'disk_space', ok: false, status: `${disk.pct}% used`, severity: 'critical' });
criticalErrors++;
} else if (disk.pct > 80) {
checks.push({ name: 'disk_space', ok: false, status: `${disk.pct}% used`, severity: 'warning' });
warnings++;
} else {
checks.push({ name: 'disk_space', ok: true, status: `${disk.pct}% used` });
}
// 3. Memory Check
const memFree = os.freemem();
const memTotal = os.totalmem();
const memPct = Math.round(((memTotal - memFree) / memTotal) * 100);
if (memPct > 95) {
checks.push({ name: 'memory', ok: false, status: `${memPct}% used`, severity: 'critical' });
criticalErrors++;
} else {
checks.push({ name: 'memory', ok: true, status: `${memPct}% used` });
}
// 4. Process Count (Check for fork bombs or leaks)
// Only on Linux
if (process.platform === 'linux') {
try {
// Optimization: readdirSync /proc is heavy. Use a lighter check or skip if too frequent.
// But since this is health check, we'll keep it but increase the threshold to reduce noise.
const pids = fs.readdirSync('/proc').filter(f => /^\d+$/.test(f));
if (pids.length > 2000) { // Bumped threshold to 2000
checks.push({ name: 'process_count', ok: false, status: `${pids.length} procs`, severity: 'warning' });
warnings++;
} else {
checks.push({ name: 'process_count', ok: true, status: `${pids.length} procs` });
}
} catch(e) {}
}
// Determine Overall Status
let status = 'ok';
if (criticalErrors > 0) status = 'error';
else if (warnings > 0) status = 'warning';
return {
status,
timestamp: new Date().toISOString(),
checks
};
}
module.exports = { runHealthCheck };

11
src/ops/index.js Normal file
View File

@@ -0,0 +1,11 @@
// Evolver Operations Module (src/ops/)
// Non-Feishu, portable utilities for evolver lifecycle and maintenance.
module.exports = {
lifecycle: require('./lifecycle'),
skillsMonitor: require('./skills_monitor'),
cleanup: require('./cleanup'),
trigger: require('./trigger'),
commentary: require('./commentary'),
selfRepair: require('./self_repair'),
};

67
src/ops/innovation.js Normal file
View File

@@ -0,0 +1,67 @@
// Innovation Catalyst (v1.0) - Evolver Core Module
// Analyzes system state to propose concrete innovation ideas when stagnation is detected.
const fs = require('fs');
const path = require('path');
const { getSkillsDir } = require('../gep/paths');
function listSkills() {
try {
const dir = getSkillsDir();
if (!fs.existsSync(dir)) return [];
return fs.readdirSync(dir).filter(f => !f.startsWith('.'));
} catch (e) { return []; }
}
function generateInnovationIdeas() {
const skills = listSkills();
const categories = {
'feishu': skills.filter(s => s.startsWith('feishu-')).length,
'dev': skills.filter(s => s.startsWith('git-') || s.startsWith('code-') || s.includes('lint') || s.includes('test')).length,
'media': skills.filter(s => s.includes('image') || s.includes('video') || s.includes('music') || s.includes('voice')).length,
'security': skills.filter(s => s.includes('security') || s.includes('audit') || s.includes('guard')).length,
'automation': skills.filter(s => s.includes('auto-') || s.includes('scheduler') || s.includes('cron')).length,
'data': skills.filter(s => s.includes('db') || s.includes('store') || s.includes('cache') || s.includes('index')).length
};
// Find under-represented categories
const sortedCats = Object.entries(categories).sort((a, b) => a[1] - b[1]);
const weakAreas = sortedCats.slice(0, 2).map(c => c[0]);
const ideas = [];
// Idea 1: Fill the gap
if (weakAreas.includes('security')) {
ideas.push("- Security: Implement a 'dependency-scanner' skill to check for vulnerable packages.");
ideas.push("- Security: Create a 'permission-auditor' to review tool usage patterns.");
}
if (weakAreas.includes('media')) {
ideas.push("- Media: Add a 'meme-generator' skill for social engagement.");
ideas.push("- Media: Create a 'video-summarizer' using ffmpeg keyframes.");
}
if (weakAreas.includes('dev')) {
ideas.push("- Dev: Build a 'code-stats' skill to visualize repo complexity.");
ideas.push("- Dev: Implement a 'todo-manager' that syncs code TODOs to tasks.");
}
if (weakAreas.includes('automation')) {
ideas.push("- Automation: Create a 'meeting-prep' skill that auto-summarizes calendar context.");
ideas.push("- Automation: Build a 'broken-link-checker' for documentation.");
}
if (weakAreas.includes('data')) {
ideas.push("- Data: Implement a 'local-vector-store' for semantic search.");
ideas.push("- Data: Create a 'log-analyzer' to visualize system health trends.");
}
// Idea 2: Optimization
if (skills.length > 50) {
ideas.push("- Optimization: Identify and deprecate unused skills (e.g., redundant search tools).");
ideas.push("- Optimization: Merge similar skills (e.g., 'git-sync' and 'git-doctor').");
}
// Idea 3: Meta
ideas.push("- Meta: Enhance the Evolver's self-reflection by adding a 'performance-metric' dashboard.");
return ideas.slice(0, 3); // Return top 3 ideas
}
module.exports = { generateInnovationIdeas };

168
src/ops/lifecycle.js Normal file
View File

@@ -0,0 +1,168 @@
// Evolver Lifecycle Manager - Evolver Core Module
// Provides: start, stop, restart, status, log, health check
// The loop script to spawn is configurable via EVOLVER_LOOP_SCRIPT env var.
const fs = require('fs');
const path = require('path');
const { execSync, spawn } = require('child_process');
const { getRepoRoot, getWorkspaceRoot, getEvolverLogPath } = require('../gep/paths');
var WORKSPACE_ROOT = getWorkspaceRoot();
var LOG_FILE = getEvolverLogPath();
var PID_FILE = path.join(WORKSPACE_ROOT, 'memory', 'evolver_loop.pid');
var MAX_SILENCE_MS = 30 * 60 * 1000;
function getLoopScript() {
// Prefer wrapper if exists, fallback to core evolver
if (process.env.EVOLVER_LOOP_SCRIPT) return process.env.EVOLVER_LOOP_SCRIPT;
var wrapper = path.join(WORKSPACE_ROOT, 'skills/feishu-evolver-wrapper/index.js');
if (fs.existsSync(wrapper)) return wrapper;
return path.join(getRepoRoot(), 'index.js');
}
// --- Process Discovery ---
function getRunningPids() {
try {
var out = execSync('ps -e -o pid,args', { encoding: 'utf8' });
var pids = [];
for (var line of out.split('\n')) {
var trimmed = line.trim();
if (!trimmed || trimmed.startsWith('PID')) continue;
var parts = trimmed.split(/\s+/);
var pid = parseInt(parts[0], 10);
var cmd = parts.slice(1).join(' ');
if (pid === process.pid) continue;
if (cmd.includes('node') && cmd.includes('index.js') && cmd.includes('--loop')) {
if (cmd.includes('feishu-evolver-wrapper') || cmd.includes('skills/evolver')) {
pids.push(pid);
}
}
}
return [...new Set(pids)].filter(isPidRunning);
} catch (e) {
return [];
}
}
function isPidRunning(pid) {
try { process.kill(pid, 0); return true; } catch (e) { return false; }
}
function getCmdLine(pid) {
try { return execSync('ps -p ' + pid + ' -o args=', { encoding: 'utf8' }).trim(); } catch (e) { return null; }
}
// --- Lifecycle ---
function start(options) {
var delayMs = (options && options.delayMs) || 0;
var pids = getRunningPids();
if (pids.length > 0) {
console.log('[Lifecycle] Already running (PIDs: ' + pids.join(', ') + ').');
return { status: 'already_running', pids: pids };
}
if (delayMs > 0) execSync('sleep ' + (delayMs / 1000));
var script = getLoopScript();
console.log('[Lifecycle] Starting: node ' + path.relative(WORKSPACE_ROOT, script) + ' --loop');
var out = fs.openSync(LOG_FILE, 'a');
var err = fs.openSync(LOG_FILE, 'a');
var env = Object.assign({}, process.env);
var npmGlobal = path.join(process.env.HOME || '', '.npm-global/bin');
if (env.PATH && !env.PATH.includes(npmGlobal)) {
env.PATH = npmGlobal + ':' + env.PATH;
}
var child = spawn('node', [script, '--loop'], {
detached: true, stdio: ['ignore', out, err], cwd: WORKSPACE_ROOT, env: env
});
child.unref();
fs.writeFileSync(PID_FILE, String(child.pid));
console.log('[Lifecycle] Started PID ' + child.pid);
return { status: 'started', pid: child.pid };
}
function stop() {
var pids = getRunningPids();
if (pids.length === 0) {
console.log('[Lifecycle] No running evolver loops found.');
if (fs.existsSync(PID_FILE)) fs.unlinkSync(PID_FILE);
return { status: 'not_running' };
}
for (var i = 0; i < pids.length; i++) {
console.log('[Lifecycle] Stopping PID ' + pids[i] + '...');
try { process.kill(pids[i], 'SIGTERM'); } catch (e) {}
}
var attempts = 0;
while (getRunningPids().length > 0 && attempts < 10) {
execSync('sleep 0.5');
attempts++;
}
var remaining = getRunningPids();
for (var j = 0; j < remaining.length; j++) {
console.log('[Lifecycle] SIGKILL PID ' + remaining[j]);
try { process.kill(remaining[j], 'SIGKILL'); } catch (e) {}
}
if (fs.existsSync(PID_FILE)) fs.unlinkSync(PID_FILE);
var evolverLock = path.join(getRepoRoot(), 'evolver.pid');
if (fs.existsSync(evolverLock)) fs.unlinkSync(evolverLock);
console.log('[Lifecycle] All stopped.');
return { status: 'stopped', killed: pids };
}
function restart(options) {
stop();
return start(Object.assign({ delayMs: 2000 }, options || {}));
}
function status() {
var pids = getRunningPids();
if (pids.length > 0) {
return { running: true, pids: pids.map(function(p) { return { pid: p, cmd: getCmdLine(p) }; }), log: path.relative(WORKSPACE_ROOT, LOG_FILE) };
}
return { running: false };
}
function tailLog(lines) {
if (!fs.existsSync(LOG_FILE)) return { error: 'No log file' };
try {
return { file: path.relative(WORKSPACE_ROOT, LOG_FILE), content: execSync('tail -n ' + (lines || 20) + ' "' + LOG_FILE + '"', { encoding: 'utf8' }) };
} catch (e) {
return { error: e.message };
}
}
function checkHealth() {
var pids = getRunningPids();
if (pids.length === 0) return { healthy: false, reason: 'not_running' };
if (fs.existsSync(LOG_FILE)) {
var silenceMs = Date.now() - fs.statSync(LOG_FILE).mtimeMs;
if (silenceMs > MAX_SILENCE_MS) {
return { healthy: false, reason: 'stagnation', silenceMinutes: Math.round(silenceMs / 60000) };
}
}
return { healthy: true, pids: pids };
}
// --- CLI ---
if (require.main === module) {
var action = process.argv[2];
switch (action) {
case 'start': console.log(JSON.stringify(start())); break;
case 'stop': console.log(JSON.stringify(stop())); break;
case 'restart': console.log(JSON.stringify(restart())); break;
case 'status': console.log(JSON.stringify(status(), null, 2)); break;
case 'log': var r = tailLog(); console.log(r.content || r.error); break;
case 'check':
var health = checkHealth();
console.log(JSON.stringify(health, null, 2));
if (!health.healthy) { console.log('[Lifecycle] Restarting...'); restart(); }
break;
default: console.log('Usage: node lifecycle.js [start|stop|restart|status|log|check]');
}
}
module.exports = { start, stop, restart, status, tailLog, checkHealth, getRunningPids };

72
src/ops/self_repair.js Normal file
View File

@@ -0,0 +1,72 @@
// Git Self-Repair - Evolver Core Module
// Emergency repair for git sync failures: abort rebase/merge, remove stale locks.
const fs = require('fs');
const path = require('path');
const { execSync } = require('child_process');
const { getWorkspaceRoot } = require('../gep/paths');
var LOCK_MAX_AGE_MS = 10 * 60 * 1000; // 10 minutes
function repair(gitRoot) {
var root = gitRoot || getWorkspaceRoot();
var repaired = [];
// 1. Abort pending rebase
try {
execSync('git rebase --abort', { cwd: root, stdio: 'ignore' });
repaired.push('rebase_aborted');
console.log('[SelfRepair] Aborted pending rebase.');
} catch (e) {}
// 2. Abort pending merge
try {
execSync('git merge --abort', { cwd: root, stdio: 'ignore' });
repaired.push('merge_aborted');
console.log('[SelfRepair] Aborted pending merge.');
} catch (e) {}
// 3. Remove stale index.lock
var lockFile = path.join(root, '.git', 'index.lock');
if (fs.existsSync(lockFile)) {
try {
var stat = fs.statSync(lockFile);
var age = Date.now() - stat.mtimeMs;
if (age > LOCK_MAX_AGE_MS) {
fs.unlinkSync(lockFile);
repaired.push('stale_lock_removed');
console.log('[SelfRepair] Removed stale index.lock (' + Math.round(age / 60000) + 'min old).');
}
} catch (e) {}
}
// 4. Reset to remote main if local is corrupt (last resort - guarded by flag)
// Only enabled if explicitly called with --force-reset or EVOLVE_GIT_RESET=true
if (process.env.EVOLVE_GIT_RESET === 'true') {
try {
console.log('[SelfRepair] Resetting local branch to origin/main (HARD reset)...');
execSync('git fetch origin main', { cwd: root, stdio: 'ignore' });
execSync('git reset --hard origin/main', { cwd: root, stdio: 'ignore' });
repaired.push('hard_reset_to_origin');
} catch (e) {
console.warn('[SelfRepair] Hard reset failed: ' + e.message);
}
} else {
// Safe fetch
try {
execSync('git fetch origin', { cwd: root, stdio: 'ignore', timeout: 30000 });
repaired.push('fetch_ok');
} catch (e) {
console.warn('[SelfRepair] git fetch failed: ' + e.message);
}
}
return repaired;
}
if (require.main === module) {
var result = repair();
console.log('[SelfRepair] Result:', result.length > 0 ? result.join(', ') : 'nothing to repair');
}
module.exports = { repair };

143
src/ops/skills_monitor.js Normal file
View File

@@ -0,0 +1,143 @@
// Skills Monitor (v2.0) - Evolver Core Module
// Checks installed skills for real issues, auto-heals simple problems.
// Zero Feishu dependency.
const fs = require('fs');
const path = require('path');
const { execSync } = require('child_process');
const { getSkillsDir, getWorkspaceRoot } = require('../gep/paths');
const IGNORE_LIST = new Set([
'common',
'clawhub',
'input-validator',
'proactive-agent',
'security-audit',
]);
// Load user-defined ignore list
try {
var ignoreFile = path.join(getWorkspaceRoot(), '.skill_monitor_ignore');
if (fs.existsSync(ignoreFile)) {
fs.readFileSync(ignoreFile, 'utf8').split('\n').forEach(function(l) {
var t = l.trim();
if (t && !t.startsWith('#')) IGNORE_LIST.add(t);
});
}
} catch (e) { /* ignore */ }
function checkSkill(skillName) {
var SKILLS_DIR = getSkillsDir();
if (IGNORE_LIST.has(skillName)) return null;
var skillPath = path.join(SKILLS_DIR, skillName);
var issues = [];
try { if (!fs.statSync(skillPath).isDirectory()) return null; } catch (e) { return null; }
var mainFile = 'index.js';
var pkgPath = path.join(skillPath, 'package.json');
var hasPkg = false;
if (fs.existsSync(pkgPath)) {
hasPkg = true;
try {
var pkg = JSON.parse(fs.readFileSync(pkgPath, 'utf8'));
if (pkg.main) mainFile = pkg.main;
if (pkg.dependencies && Object.keys(pkg.dependencies).length > 0) {
if (!fs.existsSync(path.join(skillPath, 'node_modules'))) {
issues.push('Missing node_modules (needs npm install)');
} else {
// Optimization: Check for node_modules existence instead of spawning node
// Spawning node for every skill is too slow (perf_bottleneck).
// We assume if node_modules exists, it's likely okay.
// Only spawn check if we really suspect issues (e.g. empty node_modules).
try {
if (fs.readdirSync(path.join(skillPath, 'node_modules')).length === 0) {
issues.push('Empty node_modules (needs npm install)');
}
} catch (e) {
issues.push('Invalid node_modules');
}
}
}
} catch (e) {
issues.push('Invalid package.json');
}
}
if (mainFile.endsWith('.js')) {
var entryPoint = path.join(skillPath, mainFile);
if (fs.existsSync(entryPoint)) {
// Optimization: Syntax check via node -c is slow.
// We can trust the runtime to catch syntax errors when loading.
// Or we can use a lighter check if absolutely necessary.
// For now, removing the synchronous spawn to fix perf_bottleneck.
}
}
if (hasPkg && !fs.existsSync(path.join(skillPath, 'SKILL.md'))) {
issues.push('Missing SKILL.md');
}
return issues.length > 0 ? { name: skillName, issues: issues } : null;
}
function autoHeal(skillName, issues) {
var SKILLS_DIR = getSkillsDir();
var skillPath = path.join(SKILLS_DIR, skillName);
var healed = [];
for (var i = 0; i < issues.length; i++) {
if (issues[i] === 'Missing node_modules (needs npm install)' || issues[i] === 'Empty node_modules (needs npm install)') {
try {
// Remove package-lock.json if it exists to prevent conflict errors
try { fs.unlinkSync(path.join(skillPath, 'package-lock.json')); } catch (e) {}
execSync('npm install --production --no-audit --no-fund', {
cwd: skillPath, stdio: 'ignore', timeout: 60000 // Increased timeout
});
healed.push(issues[i]);
console.log('[SkillsMonitor] Auto-healed ' + skillName + ': npm install');
} catch (e) {
console.error('[SkillsMonitor] Failed to heal ' + skillName + ': ' + e.message);
}
} else if (issues[i] === 'Missing SKILL.md') {
try {
var name = skillName.replace(/-/g, ' ');
fs.writeFileSync(path.join(skillPath, 'SKILL.md'), '# ' + skillName + '\n\n' + name + ' skill.\n');
healed.push(issues[i]);
console.log('[SkillsMonitor] Auto-healed ' + skillName + ': created SKILL.md stub');
} catch (e) {}
}
}
return healed;
}
function run(options) {
var heal = (options && options.autoHeal) !== false;
var SKILLS_DIR = getSkillsDir();
var skills = fs.readdirSync(SKILLS_DIR);
var report = [];
for (var i = 0; i < skills.length; i++) {
if (skills[i].startsWith('.')) continue;
var result = checkSkill(skills[i]);
if (result) {
if (heal) {
var healed = autoHeal(result.name, result.issues);
result.issues = result.issues.filter(function(issue) { return !healed.includes(issue); });
if (result.issues.length === 0) continue;
}
report.push(result);
}
}
return report;
}
if (require.main === module) {
var issues = run();
console.log(JSON.stringify(issues, null, 2));
process.exit(issues.length > 0 ? 1 : 0);
}
module.exports = { run, checkSkill, autoHeal };

33
src/ops/trigger.js Normal file
View File

@@ -0,0 +1,33 @@
// Evolver Wake Trigger - Evolver Core Module
// Writes a signal file that the wrapper can poll to wake up immediately.
const fs = require('fs');
const path = require('path');
const { getWorkspaceRoot } = require('../gep/paths');
var WAKE_FILE = path.join(getWorkspaceRoot(), 'memory', 'evolver_wake.signal');
function send() {
try {
fs.writeFileSync(WAKE_FILE, 'WAKE');
console.log('[Trigger] Wake signal sent to ' + WAKE_FILE);
return true;
} catch (e) {
console.error('[Trigger] Failed: ' + e.message);
return false;
}
}
function clear() {
try { if (fs.existsSync(WAKE_FILE)) fs.unlinkSync(WAKE_FILE); } catch (e) {}
}
function isPending() {
return fs.existsSync(WAKE_FILE);
}
if (require.main === module) {
send();
}
module.exports = { send, clear, isPending };