Initial commit with translated description
This commit is contained in:
48
scripts/checks/README.md
Normal file
48
scripts/checks/README.md
Normal file
@@ -0,0 +1,48 @@
|
||||
# Pre-commit Checks
|
||||
|
||||
Automated enforcement of rules from `AGENTS.md` and `CONTRIBUTING.md`.
|
||||
|
||||
## Checks
|
||||
|
||||
| Check | Rule Source | Description |
|
||||
| ----------------- | --------------------- | ---------------------------------------------------- |
|
||||
| `version-sync.sh` | CONTRIBUTING.md | Ensures `package.json` and `SKILL.md` versions match |
|
||||
| `no-user-data.sh` | public/data/AGENTS.md | Blocks commits of user-specific data files |
|
||||
| `no-secrets.sh` | AGENTS.md | Scans for accidentally committed secrets |
|
||||
|
||||
## Adding New Checks
|
||||
|
||||
1. Create a new script in `scripts/checks/` named `<check-name>.sh`
|
||||
2. Script must:
|
||||
- Accept repo root as first argument (`$1`)
|
||||
- Exit `0` on success
|
||||
- Exit `1` on failure
|
||||
- Print clear error messages when failing
|
||||
3. Make it executable: `chmod +x scripts/checks/<check-name>.sh`
|
||||
|
||||
## Running Manually
|
||||
|
||||
```bash
|
||||
# Run all checks
|
||||
./scripts/pre-commit
|
||||
|
||||
# Run individual check
|
||||
./scripts/checks/version-sync.sh .
|
||||
```
|
||||
|
||||
## Installing the Hook
|
||||
|
||||
```bash
|
||||
make install-hooks
|
||||
# or manually:
|
||||
cp scripts/pre-commit .git/hooks/pre-commit
|
||||
chmod +x .git/hooks/pre-commit
|
||||
```
|
||||
|
||||
## Bypassing (Not Recommended)
|
||||
|
||||
```bash
|
||||
git commit --no-verify
|
||||
```
|
||||
|
||||
Only use this if you understand why the check is failing and have a valid reason to bypass.
|
||||
39
scripts/checks/no-secrets.sh
Normal file
39
scripts/checks/no-secrets.sh
Normal file
@@ -0,0 +1,39 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# Check: No Secrets
|
||||
# Basic check for accidentally committed secrets
|
||||
#
|
||||
# Rule: AGENTS.md - never commit secrets, API keys, or credentials
|
||||
#
|
||||
|
||||
REPO_ROOT="${1:-.}"
|
||||
|
||||
# Patterns that might indicate secrets
|
||||
SECRET_PATTERNS=(
|
||||
'sk-[a-zA-Z0-9]{20,}' # OpenAI API keys
|
||||
'xoxb-[0-9]+-[0-9]+-[a-zA-Z0-9]+' # Slack bot tokens
|
||||
'xoxp-[0-9]+-[0-9]+-[a-zA-Z0-9]+' # Slack user tokens
|
||||
'ghp_[a-zA-Z0-9]{36}' # GitHub personal access tokens
|
||||
'gho_[a-zA-Z0-9]{36}' # GitHub OAuth tokens
|
||||
'AKIA[0-9A-Z]{16}' # AWS access key IDs
|
||||
'password\s*[=:]\s*["\047][^"\047]{8,}' # Hardcoded passwords
|
||||
)
|
||||
|
||||
# Get staged file contents (only added/modified lines)
|
||||
STAGED_DIFF=$(git diff --cached --diff-filter=AM 2>/dev/null || echo "")
|
||||
|
||||
FOUND_SECRETS=0
|
||||
|
||||
for pattern in "${SECRET_PATTERNS[@]}"; do
|
||||
if echo "$STAGED_DIFF" | grep -qE "$pattern"; then
|
||||
echo " ⚠️ Potential secret detected matching pattern: $pattern"
|
||||
FOUND_SECRETS=1
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ $FOUND_SECRETS -eq 1 ]]; then
|
||||
echo " Review staged changes and remove any secrets before committing."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
exit 0
|
||||
34
scripts/checks/no-user-data.sh
Normal file
34
scripts/checks/no-user-data.sh
Normal file
@@ -0,0 +1,34 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# Check: No User Data
|
||||
# Ensures user-specific data files are not staged for commit
|
||||
#
|
||||
# Rule: public/data/AGENTS.md - never commit operators.json or privacy-settings.json
|
||||
#
|
||||
|
||||
REPO_ROOT="${1:-.}"
|
||||
|
||||
# Check if any user data files are staged
|
||||
STAGED_FILES=$(git diff --cached --name-only 2>/dev/null || echo "")
|
||||
|
||||
USER_DATA_FILES=(
|
||||
"public/data/operators.json"
|
||||
"public/data/privacy-settings.json"
|
||||
)
|
||||
|
||||
FOUND_USER_DATA=0
|
||||
|
||||
for file in "${USER_DATA_FILES[@]}"; do
|
||||
if echo "$STAGED_FILES" | grep -q "^$file$"; then
|
||||
echo " ⚠️ User data file staged: $file"
|
||||
echo " This file contains user-specific data and should not be committed."
|
||||
echo " Use 'git reset HEAD $file' to unstage."
|
||||
FOUND_USER_DATA=1
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ $FOUND_USER_DATA -eq 1 ]]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
exit 0
|
||||
35
scripts/checks/version-sync.sh
Normal file
35
scripts/checks/version-sync.sh
Normal file
@@ -0,0 +1,35 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# Check: Version Sync
|
||||
# Ensures package.json and SKILL.md versions are in sync
|
||||
#
|
||||
# Rule: AGENTS.md / CONTRIBUTING.md - versions must match
|
||||
#
|
||||
|
||||
REPO_ROOT="${1:-.}"
|
||||
|
||||
# Extract version from package.json
|
||||
PKG_VERSION=$(grep -o '"version": *"[^"]*"' "$REPO_ROOT/package.json" | head -1 | sed 's/.*"version": *"\([^"]*\)".*/\1/')
|
||||
|
||||
# Extract version from SKILL.md frontmatter
|
||||
SKILL_VERSION=$(grep -E '^version:' "$REPO_ROOT/SKILL.md" | head -1 | sed 's/version: *//')
|
||||
|
||||
if [[ -z "$PKG_VERSION" ]]; then
|
||||
echo " ⚠️ Could not read version from package.json"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -z "$SKILL_VERSION" ]]; then
|
||||
echo " ⚠️ Could not read version from SKILL.md"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ "$PKG_VERSION" != "$SKILL_VERSION" ]]; then
|
||||
echo " ⚠️ Version mismatch:"
|
||||
echo " package.json: $PKG_VERSION"
|
||||
echo " SKILL.md: $SKILL_VERSION"
|
||||
echo " → Both files must have the same version"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
exit 0
|
||||
48
scripts/dashboard-loop.sh
Normal file
48
scripts/dashboard-loop.sh
Normal file
@@ -0,0 +1,48 @@
|
||||
#!/bin/bash
|
||||
# Auto-restart loop for OpenClaw Command Center
|
||||
# Keeps the dashboard running with exponential backoff on crashes
|
||||
|
||||
set -e
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
|
||||
DASHBOARD_DIR="$(dirname "$SCRIPT_DIR")"
|
||||
LOG_DIR="${HOME}/.openclaw-command-center/logs"
|
||||
LOG_FILE="${LOG_DIR}/dashboard.log"
|
||||
|
||||
mkdir -p "$LOG_DIR"
|
||||
|
||||
# Backoff settings
|
||||
INITIAL_DELAY=1
|
||||
MAX_DELAY=30
|
||||
DELAY=$INITIAL_DELAY
|
||||
|
||||
cd "$DASHBOARD_DIR"
|
||||
|
||||
# Ensure node is available (nvm support)
|
||||
if [ -f "$HOME/.nvm/nvm.sh" ]; then
|
||||
source "$HOME/.nvm/nvm.sh"
|
||||
fi
|
||||
|
||||
echo "🦞 OpenClaw Command Center - Auto-restart loop"
|
||||
echo " Logs: $LOG_FILE"
|
||||
echo " Press Ctrl+C to stop"
|
||||
echo ""
|
||||
|
||||
while true; do
|
||||
echo "[$(date)] Starting dashboard..." | tee -a "$LOG_FILE"
|
||||
|
||||
# Run the server
|
||||
if node lib/server.js 2>&1 | tee -a "$LOG_FILE"; then
|
||||
# Clean exit
|
||||
echo "[$(date)] Dashboard exited cleanly" | tee -a "$LOG_FILE"
|
||||
DELAY=$INITIAL_DELAY
|
||||
else
|
||||
# Crash - backoff
|
||||
echo "[$(date)] Dashboard crashed! Restarting in ${DELAY}s..." | tee -a "$LOG_FILE"
|
||||
sleep $DELAY
|
||||
DELAY=$((DELAY * 2))
|
||||
if [ $DELAY -gt $MAX_DELAY ]; then
|
||||
DELAY=$MAX_DELAY
|
||||
fi
|
||||
fi
|
||||
done
|
||||
117
scripts/install-system-deps.sh
Normal file
117
scripts/install-system-deps.sh
Normal file
@@ -0,0 +1,117 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# install-system-deps.sh - Install optional system dependencies
|
||||
#
|
||||
# Reads config/system-deps.json and installs missing packages
|
||||
# using the detected package manager (apt, brew, dnf, etc.)
|
||||
#
|
||||
# Usage: ./scripts/install-system-deps.sh [--dry-run]
|
||||
#
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
|
||||
PROJECT_DIR="$(dirname "$SCRIPT_DIR")"
|
||||
DEPS_FILE="$PROJECT_DIR/config/system-deps.json"
|
||||
DRY_RUN="${1:-}"
|
||||
|
||||
if [[ "$DRY_RUN" == "-h" || "$DRY_RUN" == "--help" ]]; then
|
||||
echo "Usage: install-system-deps.sh [--dry-run]"
|
||||
echo " --dry-run Show what would be installed without installing"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [[ ! -f "$DEPS_FILE" ]]; then
|
||||
echo "Error: $DEPS_FILE not found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "🦞 OpenClaw Command Center - System Dependencies"
|
||||
echo "================================================="
|
||||
echo ""
|
||||
|
||||
# Let node do all the heavy lifting — it parses the JSON, detects the
|
||||
# platform/package-manager/chip, checks which binaries exist, and
|
||||
# prints shell commands to stdout for us to eval.
|
||||
node -e "
|
||||
const { execSync } = require('child_process');
|
||||
const os = require('os');
|
||||
const deps = require('$DEPS_FILE');
|
||||
|
||||
const platform = process.platform === 'linux' ? 'linux' : process.platform === 'darwin' ? 'darwin' : null;
|
||||
if (!platform) { console.log('echo \"Unsupported platform\"'); process.exit(0); }
|
||||
|
||||
// Detect package manager
|
||||
const pmCandidates = platform === 'linux'
|
||||
? ['apt', 'dnf', 'yum', 'pacman', 'apk']
|
||||
: ['brew'];
|
||||
let pkgManager = null;
|
||||
for (const pm of pmCandidates) {
|
||||
try { execSync('which ' + pm, { stdio: 'ignore' }); pkgManager = pm; break; } catch {}
|
||||
}
|
||||
|
||||
console.log('Platform: ' + platform);
|
||||
console.log('Package manager: ' + (pkgManager || 'none'));
|
||||
console.log('');
|
||||
|
||||
if (!pkgManager) {
|
||||
console.log('No supported package manager found.');
|
||||
console.log('Supported: apt, dnf, yum, pacman, apk, brew');
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
// Detect chip
|
||||
let isAppleSilicon = false;
|
||||
if (platform === 'darwin') {
|
||||
try {
|
||||
const chip = execSync('sysctl -n machdep.cpu.brand_string', { encoding: 'utf8' });
|
||||
isAppleSilicon = /apple/i.test(chip);
|
||||
} catch {}
|
||||
}
|
||||
|
||||
const entries = deps[platform] || [];
|
||||
let installed = 0, toInstall = 0;
|
||||
|
||||
for (const dep of entries) {
|
||||
if (dep.condition === 'intel' && isAppleSilicon) continue;
|
||||
const cmd = dep.install[pkgManager];
|
||||
if (!cmd) continue;
|
||||
|
||||
let hasBinary = false;
|
||||
try { execSync('which ' + dep.binary, { stdio: 'ignore' }); hasBinary = true; } catch {}
|
||||
if (!hasBinary && dep.binary === 'osx-cpu-temp') {
|
||||
try { execSync('test -x ' + os.homedir() + '/bin/osx-cpu-temp', { stdio: 'ignore' }); hasBinary = true; } catch {}
|
||||
}
|
||||
|
||||
if (hasBinary) {
|
||||
console.log('✅ ' + dep.name + ' — already installed (' + dep.purpose + ')');
|
||||
installed++;
|
||||
} else {
|
||||
toInstall++;
|
||||
if ('$DRY_RUN' === '--dry-run') {
|
||||
console.log('📦 ' + dep.name + ' — would install (' + dep.purpose + ')');
|
||||
console.log(' Command: ' + cmd);
|
||||
} else {
|
||||
console.log('📦 Installing ' + dep.name + ' — ' + dep.purpose + '...');
|
||||
console.log(' Running: ' + cmd);
|
||||
try {
|
||||
execSync(cmd, { stdio: 'inherit' });
|
||||
console.log(' ✅ Installed successfully');
|
||||
} catch (e) {
|
||||
console.log(' ⚠️ Install failed: ' + e.message);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
console.log('');
|
||||
if ('$DRY_RUN' === '--dry-run') {
|
||||
console.log('Dry run complete. ' + installed + ' already installed, ' + toInstall + ' would be installed.');
|
||||
} else {
|
||||
console.log('Done! ' + installed + ' already installed, ' + toInstall + ' newly installed.');
|
||||
if (toInstall > 0) {
|
||||
console.log('');
|
||||
console.log('Restart the Command Center to see enhanced vitals.');
|
||||
}
|
||||
}
|
||||
"
|
||||
564
scripts/linear-sync.js
Normal file
564
scripts/linear-sync.js
Normal file
@@ -0,0 +1,564 @@
|
||||
#!/usr/bin/env node
|
||||
/**
|
||||
* Linear Integration Module for OpenClaw Dashboard
|
||||
*
|
||||
* Syncs session state to Linear issues:
|
||||
* - Extracts JON-XXX issue IDs from session transcripts
|
||||
* - Updates Linear issue status when session state changes
|
||||
* - Adds comments on state transitions
|
||||
*/
|
||||
|
||||
const https = require("https");
|
||||
const fs = require("fs");
|
||||
const path = require("path");
|
||||
const { getOpenClawDir } = require("../src/config");
|
||||
|
||||
// Linear API configuration
|
||||
const LINEAR_API_URL = "https://api.linear.app/graphql";
|
||||
const LINEAR_API_KEY = process.env.LINEAR_API_KEY;
|
||||
|
||||
// Workflow State IDs for team JON (from TOOLS.md)
|
||||
const LINEAR_STATES = {
|
||||
TODO: "2ee58f08-499b-47ee-bbe3-a254957517c5",
|
||||
IN_PROGRESS: "c2c429d8-11d0-4fa5-bbe7-7bc7febbd42e",
|
||||
DONE: "b82d1646-6044-48ad-b2e9-04f87739e16f",
|
||||
};
|
||||
|
||||
// Session state to Linear state mapping
|
||||
const STATE_MAP = {
|
||||
active: LINEAR_STATES.IN_PROGRESS,
|
||||
live: LINEAR_STATES.IN_PROGRESS,
|
||||
idle: LINEAR_STATES.TODO,
|
||||
completed: LINEAR_STATES.DONE,
|
||||
};
|
||||
|
||||
// Track synced issues to avoid duplicate updates
|
||||
// Key: issueId, Value: { lastState, lastUpdated }
|
||||
const syncState = new Map();
|
||||
|
||||
// Path to persist sync state
|
||||
const SYNC_STATE_FILE = path.join(__dirname, "..", "state", "linear-sync-state.json");
|
||||
|
||||
/**
|
||||
* Load sync state from disk
|
||||
*/
|
||||
function loadSyncState() {
|
||||
try {
|
||||
if (fs.existsSync(SYNC_STATE_FILE)) {
|
||||
const data = JSON.parse(fs.readFileSync(SYNC_STATE_FILE, "utf8"));
|
||||
Object.entries(data).forEach(([key, value]) => {
|
||||
syncState.set(key, value);
|
||||
});
|
||||
console.log(`[Linear] Loaded sync state: ${syncState.size} issues tracked`);
|
||||
}
|
||||
} catch (e) {
|
||||
console.error("[Linear] Failed to load sync state:", e.message);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Save sync state to disk
|
||||
*/
|
||||
function saveSyncState() {
|
||||
try {
|
||||
const data = Object.fromEntries(syncState);
|
||||
const dir = path.dirname(SYNC_STATE_FILE);
|
||||
if (!fs.existsSync(dir)) {
|
||||
fs.mkdirSync(dir, { recursive: true });
|
||||
}
|
||||
fs.writeFileSync(SYNC_STATE_FILE, JSON.stringify(data, null, 2));
|
||||
} catch (e) {
|
||||
console.error("[Linear] Failed to save sync state:", e.message);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Make a GraphQL request to Linear API
|
||||
* @param {string} query - GraphQL query/mutation
|
||||
* @param {object} variables - Query variables
|
||||
* @returns {Promise<object>} Response data
|
||||
*/
|
||||
function linearRequest(query, variables = {}) {
|
||||
return new Promise((resolve, reject) => {
|
||||
if (!LINEAR_API_KEY) {
|
||||
reject(new Error("LINEAR_API_KEY not set"));
|
||||
return;
|
||||
}
|
||||
|
||||
const payload = JSON.stringify({ query, variables });
|
||||
|
||||
const options = {
|
||||
hostname: "api.linear.app",
|
||||
port: 443,
|
||||
path: "/graphql",
|
||||
method: "POST",
|
||||
headers: {
|
||||
"Content-Type": "application/json",
|
||||
Authorization: LINEAR_API_KEY,
|
||||
"Content-Length": Buffer.byteLength(payload),
|
||||
},
|
||||
};
|
||||
|
||||
const req = https.request(options, (res) => {
|
||||
let data = "";
|
||||
res.on("data", (chunk) => (data += chunk));
|
||||
res.on("end", () => {
|
||||
try {
|
||||
const parsed = JSON.parse(data);
|
||||
if (parsed.errors) {
|
||||
reject(new Error(parsed.errors[0]?.message || "GraphQL error"));
|
||||
} else {
|
||||
resolve(parsed.data);
|
||||
}
|
||||
} catch (e) {
|
||||
reject(new Error(`Failed to parse response: ${e.message}`));
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
req.on("error", reject);
|
||||
req.write(payload);
|
||||
req.end();
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract Linear issue IDs (JON-XXX pattern) from text
|
||||
* @param {string} text - Text to search
|
||||
* @returns {string[]} Array of unique issue identifiers
|
||||
*/
|
||||
function extractLinearIds(text) {
|
||||
if (!text) return [];
|
||||
|
||||
// Match JON-XXX pattern (case insensitive, 1-5 digits)
|
||||
const pattern = /\bJON-(\d{1,5})\b/gi;
|
||||
const matches = text.match(pattern) || [];
|
||||
|
||||
// Normalize to uppercase and dedupe
|
||||
const unique = [...new Set(matches.map((m) => m.toUpperCase()))];
|
||||
return unique;
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract Linear IDs from a session transcript
|
||||
* @param {Array} transcript - Array of transcript entries
|
||||
* @returns {string[]} Array of unique issue identifiers
|
||||
*/
|
||||
function extractLinearIdsFromTranscript(transcript) {
|
||||
const allIds = new Set();
|
||||
|
||||
transcript.forEach((entry) => {
|
||||
if (entry.type !== "message" || !entry.message) return;
|
||||
|
||||
const msg = entry.message;
|
||||
let text = "";
|
||||
|
||||
if (typeof msg.content === "string") {
|
||||
text = msg.content;
|
||||
} else if (Array.isArray(msg.content)) {
|
||||
text = msg.content
|
||||
.filter((c) => c.type === "text")
|
||||
.map((c) => c.text || "")
|
||||
.join(" ");
|
||||
}
|
||||
|
||||
extractLinearIds(text).forEach((id) => allIds.add(id));
|
||||
});
|
||||
|
||||
return [...allIds];
|
||||
}
|
||||
|
||||
/**
|
||||
* Get issue details by identifier (e.g., "JON-29")
|
||||
* @param {string} identifier - Issue identifier
|
||||
* @returns {Promise<object|null>} Issue data or null
|
||||
*/
|
||||
async function getIssue(identifier) {
|
||||
const query = `
|
||||
query GetIssue($id: String!) {
|
||||
issue(id: $id) {
|
||||
id
|
||||
identifier
|
||||
title
|
||||
description
|
||||
url
|
||||
state {
|
||||
id
|
||||
name
|
||||
type
|
||||
}
|
||||
priority
|
||||
}
|
||||
}
|
||||
`;
|
||||
|
||||
try {
|
||||
const data = await linearRequest(query, { id: identifier });
|
||||
return data.issue;
|
||||
} catch (e) {
|
||||
console.error(`[Linear] Failed to get issue ${identifier}:`, e.message);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Update issue state
|
||||
* @param {string} issueId - Issue UUID (not identifier)
|
||||
* @param {string} stateId - New state UUID
|
||||
* @returns {Promise<boolean>} Success status
|
||||
*/
|
||||
async function updateIssueState(issueId, stateId) {
|
||||
const mutation = `
|
||||
mutation UpdateIssueState($id: String!, $stateId: String!) {
|
||||
issueUpdate(id: $id, input: { stateId: $stateId }) {
|
||||
success
|
||||
issue {
|
||||
id
|
||||
identifier
|
||||
state {
|
||||
name
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
`;
|
||||
|
||||
try {
|
||||
const data = await linearRequest(mutation, { id: issueId, stateId });
|
||||
return data.issueUpdate?.success || false;
|
||||
} catch (e) {
|
||||
console.error(`[Linear] Failed to update issue state:`, e.message);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a comment to an issue
|
||||
* @param {string} issueId - Issue UUID (not identifier)
|
||||
* @param {string} body - Comment body (markdown supported)
|
||||
* @returns {Promise<boolean>} Success status
|
||||
*/
|
||||
async function addComment(issueId, body) {
|
||||
const mutation = `
|
||||
mutation AddComment($issueId: String!, $body: String!) {
|
||||
commentCreate(input: { issueId: $issueId, body: $body }) {
|
||||
success
|
||||
comment {
|
||||
id
|
||||
}
|
||||
}
|
||||
}
|
||||
`;
|
||||
|
||||
try {
|
||||
const data = await linearRequest(mutation, { issueId, body });
|
||||
return data.commentCreate?.success || false;
|
||||
} catch (e) {
|
||||
console.error(`[Linear] Failed to add comment:`, e.message);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Determine session state from session data
|
||||
* @param {object} session - Session object with ageMs, etc.
|
||||
* @returns {string} State: 'active', 'idle', or 'completed'
|
||||
*/
|
||||
function determineSessionState(session) {
|
||||
const ageMs = session.ageMs || 0;
|
||||
const thirtyMinutes = 30 * 60 * 1000;
|
||||
|
||||
// Check if session is marked complete (this would come from session metadata)
|
||||
if (session.status === "completed" || session.completed) {
|
||||
return "completed";
|
||||
}
|
||||
|
||||
// Active if activity within 30 minutes
|
||||
if (ageMs < thirtyMinutes) {
|
||||
return "active";
|
||||
}
|
||||
|
||||
// Idle if no activity for 30+ minutes
|
||||
return "idle";
|
||||
}
|
||||
|
||||
/**
|
||||
* Sync a session's Linear issues based on session state
|
||||
* @param {object} session - Session data including transcript
|
||||
* @param {Array} transcript - Session transcript entries
|
||||
* @returns {Promise<object>} Sync results
|
||||
*/
|
||||
async function syncSessionToLinear(session, transcript) {
|
||||
const results = {
|
||||
issuesFound: [],
|
||||
updated: [],
|
||||
skipped: [],
|
||||
errors: [],
|
||||
};
|
||||
|
||||
if (!LINEAR_API_KEY) {
|
||||
results.errors.push("LINEAR_API_KEY not configured");
|
||||
return results;
|
||||
}
|
||||
|
||||
// Extract Linear issue IDs from transcript
|
||||
const issueIds = extractLinearIdsFromTranscript(transcript);
|
||||
results.issuesFound = issueIds;
|
||||
|
||||
if (issueIds.length === 0) {
|
||||
return results;
|
||||
}
|
||||
|
||||
// Determine current session state
|
||||
const sessionState = determineSessionState(session);
|
||||
const targetStateId = STATE_MAP[sessionState];
|
||||
|
||||
if (!targetStateId) {
|
||||
results.errors.push(`Unknown session state: ${sessionState}`);
|
||||
return results;
|
||||
}
|
||||
|
||||
// Process each issue
|
||||
for (const identifier of issueIds) {
|
||||
try {
|
||||
// Check sync state to avoid duplicate updates
|
||||
const syncKey = `${identifier}:${session.key || session.sessionId}`;
|
||||
const lastSync = syncState.get(syncKey);
|
||||
|
||||
if (lastSync && lastSync.lastState === sessionState) {
|
||||
results.skipped.push({
|
||||
identifier,
|
||||
reason: "Already synced to this state",
|
||||
});
|
||||
continue;
|
||||
}
|
||||
|
||||
// Get issue details
|
||||
const issue = await getIssue(identifier);
|
||||
if (!issue) {
|
||||
results.errors.push(`Issue ${identifier} not found`);
|
||||
continue;
|
||||
}
|
||||
|
||||
// Check if state change is needed
|
||||
if (issue.state.id === targetStateId) {
|
||||
// Update sync state even if no change needed
|
||||
syncState.set(syncKey, {
|
||||
lastState: sessionState,
|
||||
lastUpdated: new Date().toISOString(),
|
||||
});
|
||||
results.skipped.push({
|
||||
identifier,
|
||||
reason: `Already in ${issue.state.name}`,
|
||||
});
|
||||
continue;
|
||||
}
|
||||
|
||||
// Update issue state
|
||||
const updateSuccess = await updateIssueState(issue.id, targetStateId);
|
||||
|
||||
if (updateSuccess) {
|
||||
// Add comment explaining the state change
|
||||
const comment = generateStateChangeComment(sessionState, session);
|
||||
await addComment(issue.id, comment);
|
||||
|
||||
// Update sync state
|
||||
syncState.set(syncKey, {
|
||||
lastState: sessionState,
|
||||
lastUpdated: new Date().toISOString(),
|
||||
});
|
||||
saveSyncState();
|
||||
|
||||
results.updated.push({
|
||||
identifier,
|
||||
fromState: issue.state.name,
|
||||
toState: sessionState,
|
||||
url: issue.url,
|
||||
});
|
||||
} else {
|
||||
results.errors.push(`Failed to update ${identifier}`);
|
||||
}
|
||||
} catch (e) {
|
||||
results.errors.push(`Error processing ${identifier}: ${e.message}`);
|
||||
}
|
||||
}
|
||||
|
||||
return results;
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate a comment for state change
|
||||
* @param {string} newState - New session state
|
||||
* @param {object} session - Session data
|
||||
* @returns {string} Comment body
|
||||
*/
|
||||
function generateStateChangeComment(newState, session) {
|
||||
const timestamp = new Date().toISOString();
|
||||
const sessionLabel = session.label || session.key || "Unknown session";
|
||||
|
||||
switch (newState) {
|
||||
case "active":
|
||||
case "live":
|
||||
return (
|
||||
`🟢 **Work resumed** on this issue.\n\n` +
|
||||
`Session: \`${sessionLabel}\`\n` +
|
||||
`Time: ${timestamp}\n\n` +
|
||||
`_Updated automatically by OpenClaw Dashboard_`
|
||||
);
|
||||
|
||||
case "idle":
|
||||
return (
|
||||
`⏸️ **Work paused** on this issue (session idle >30 min).\n\n` +
|
||||
`Session: \`${sessionLabel}\`\n` +
|
||||
`Time: ${timestamp}\n\n` +
|
||||
`_Updated automatically by OpenClaw Dashboard_`
|
||||
);
|
||||
|
||||
case "completed":
|
||||
return (
|
||||
`✅ **Work completed** on this issue.\n\n` +
|
||||
`Session: \`${sessionLabel}\`\n` +
|
||||
`Time: ${timestamp}\n\n` +
|
||||
`_Updated automatically by OpenClaw Dashboard_`
|
||||
);
|
||||
|
||||
default:
|
||||
return (
|
||||
`📝 Session state changed to: ${newState}\n\n` +
|
||||
`Session: \`${sessionLabel}\`\n` +
|
||||
`Time: ${timestamp}`
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Read session transcript from JSONL file
|
||||
* (Mirrors the function in server.js)
|
||||
* @param {string} sessionId - Session ID
|
||||
* @returns {Array} Transcript entries
|
||||
*/
|
||||
function readTranscript(sessionId) {
|
||||
const openclawDir = getOpenClawDir();
|
||||
const transcriptPath = path.join(openclawDir, "agents", "main", "sessions", `${sessionId}.jsonl`);
|
||||
|
||||
try {
|
||||
if (!fs.existsSync(transcriptPath)) return [];
|
||||
const content = fs.readFileSync(transcriptPath, "utf8");
|
||||
return content
|
||||
.trim()
|
||||
.split("\n")
|
||||
.map((line) => {
|
||||
try {
|
||||
return JSON.parse(line);
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
})
|
||||
.filter(Boolean);
|
||||
} catch (e) {
|
||||
console.error("[Linear] Failed to read transcript:", e.message);
|
||||
return [];
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Hook for server.js to call on session updates
|
||||
* @param {object} session - Session data from OpenClaw
|
||||
*/
|
||||
async function onSessionUpdate(session) {
|
||||
if (!session.sessionId) {
|
||||
console.error("[Linear] Session missing sessionId");
|
||||
return { error: "Missing sessionId" };
|
||||
}
|
||||
|
||||
const transcript = readTranscript(session.sessionId);
|
||||
const results = await syncSessionToLinear(session, transcript);
|
||||
|
||||
if (results.updated.length > 0) {
|
||||
console.log(
|
||||
`[Linear] Updated ${results.updated.length} issues:`,
|
||||
results.updated.map((u) => u.identifier).join(", "),
|
||||
);
|
||||
}
|
||||
|
||||
return results;
|
||||
}
|
||||
|
||||
/**
|
||||
* Batch sync all active sessions
|
||||
* Useful for periodic sync via cron or manual trigger
|
||||
*/
|
||||
async function syncAllSessions() {
|
||||
const { execSync } = require("child_process");
|
||||
|
||||
try {
|
||||
const output = execSync("openclaw sessions --json 2>/dev/null", {
|
||||
encoding: "utf8",
|
||||
env: { ...process.env, NO_COLOR: "1" },
|
||||
});
|
||||
|
||||
const data = JSON.parse(output);
|
||||
const sessions = data.sessions || [];
|
||||
|
||||
const allResults = {
|
||||
sessionsProcessed: 0,
|
||||
totalIssuesFound: 0,
|
||||
totalUpdated: 0,
|
||||
errors: [],
|
||||
};
|
||||
|
||||
for (const session of sessions) {
|
||||
const results = await onSessionUpdate(session);
|
||||
allResults.sessionsProcessed++;
|
||||
allResults.totalIssuesFound += results.issuesFound?.length || 0;
|
||||
allResults.totalUpdated += results.updated?.length || 0;
|
||||
if (results.errors?.length) {
|
||||
allResults.errors.push(...results.errors);
|
||||
}
|
||||
}
|
||||
|
||||
return allResults;
|
||||
} catch (e) {
|
||||
console.error("[Linear] Failed to sync all sessions:", e.message);
|
||||
return { error: e.message };
|
||||
}
|
||||
}
|
||||
|
||||
// Initialize: load sync state
|
||||
loadSyncState();
|
||||
|
||||
// Exports for server.js integration
|
||||
module.exports = {
|
||||
// Core functions
|
||||
extractLinearIds,
|
||||
extractLinearIdsFromTranscript,
|
||||
getIssue,
|
||||
updateIssueState,
|
||||
addComment,
|
||||
|
||||
// Session sync
|
||||
syncSessionToLinear,
|
||||
onSessionUpdate,
|
||||
syncAllSessions,
|
||||
|
||||
// State helpers
|
||||
determineSessionState,
|
||||
|
||||
// Constants
|
||||
LINEAR_STATES,
|
||||
STATE_MAP,
|
||||
};
|
||||
|
||||
// CLI mode: run sync if called directly
|
||||
if (require.main === module) {
|
||||
console.log("[Linear] Running batch sync...");
|
||||
syncAllSessions()
|
||||
.then((results) => {
|
||||
console.log("[Linear] Sync complete:", JSON.stringify(results, null, 2));
|
||||
process.exit(results.error ? 1 : 0);
|
||||
})
|
||||
.catch((e) => {
|
||||
console.error("[Linear] Sync failed:", e.message);
|
||||
process.exit(1);
|
||||
});
|
||||
}
|
||||
157
scripts/release.sh
Normal file
157
scripts/release.sh
Normal file
@@ -0,0 +1,157 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# release.sh - Create a versioned release with git tag and ClawHub publish
|
||||
#
|
||||
# Usage:
|
||||
# ./scripts/release.sh <version> # Create tag + publish
|
||||
# ./scripts/release.sh <version> --tag-only # Create tag only
|
||||
# ./scripts/release.sh --current # Show current version
|
||||
#
|
||||
# Examples:
|
||||
# ./scripts/release.sh 0.4.0
|
||||
# ./scripts/release.sh 1.0.0-beta.1
|
||||
#
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
REPO_DIR="$(cd "$SCRIPT_DIR/.." && pwd)"
|
||||
|
||||
cd "$REPO_DIR"
|
||||
|
||||
# Get current version from latest tag
|
||||
get_current_version() {
|
||||
git describe --tags --abbrev=0 2>/dev/null | sed 's/^v//' || echo "0.0.0"
|
||||
}
|
||||
|
||||
# Show help
|
||||
show_help() {
|
||||
echo "Usage: release.sh <version> [--tag-only]"
|
||||
echo " release.sh --current"
|
||||
echo ""
|
||||
echo "Options:"
|
||||
echo " <version> Semver version (e.g., 0.4.0, 1.0.0-beta.1)"
|
||||
echo " --tag-only Create git tag without ClawHub publish"
|
||||
echo " --current Show current version from git tags"
|
||||
echo " -h, --help Show this help"
|
||||
}
|
||||
|
||||
# Parse args
|
||||
if [[ $# -eq 0 ]]; then
|
||||
show_help
|
||||
exit 1
|
||||
fi
|
||||
|
||||
TAG_ONLY=false
|
||||
VERSION=""
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
--current)
|
||||
echo "Current version: $(get_current_version)"
|
||||
exit 0
|
||||
;;
|
||||
--tag-only)
|
||||
TAG_ONLY=true
|
||||
shift
|
||||
;;
|
||||
-h|--help)
|
||||
show_help
|
||||
exit 0
|
||||
;;
|
||||
*)
|
||||
VERSION="$1"
|
||||
shift
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [[ -z "$VERSION" ]]; then
|
||||
echo "❌ Version required"
|
||||
show_help
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Validate semver (basic check)
|
||||
if ! [[ "$VERSION" =~ ^[0-9]+\.[0-9]+\.[0-9]+(-[a-zA-Z0-9.]+)?$ ]]; then
|
||||
echo "❌ Invalid semver: $VERSION"
|
||||
echo " Expected format: X.Y.Z or X.Y.Z-prerelease"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
TAG="v$VERSION"
|
||||
CURRENT=$(get_current_version)
|
||||
|
||||
echo "📦 Release: $CURRENT → $VERSION"
|
||||
echo ""
|
||||
|
||||
# Check for uncommitted changes
|
||||
if ! git diff --quiet || ! git diff --cached --quiet; then
|
||||
echo "❌ Uncommitted changes detected. Commit or stash first."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if tag already exists
|
||||
if git rev-parse "$TAG" >/dev/null 2>&1; then
|
||||
echo "❌ Tag $TAG already exists"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Update package.json version
|
||||
if [[ -f package.json ]]; then
|
||||
# Use npm version without git tag (we do our own tagging)
|
||||
npm version "$VERSION" --no-git-tag-version
|
||||
fi
|
||||
|
||||
# Update SKILL.md version if it exists
|
||||
if [[ -f SKILL.md ]]; then
|
||||
sed -i '' "s/\*\*Version\*\* | \`[^\"]*\`/\*\*Version\*\* | \`$VERSION\`/" SKILL.md 2>/dev/null || \
|
||||
sed -i "s/\*\*Version\*\* | \`[^\"]*\`/\*\*Version\*\* | \`$VERSION\`/" SKILL.md
|
||||
fi
|
||||
|
||||
# Commit version bump
|
||||
git add package.json package-lock.json SKILL.md 2>/dev/null || true
|
||||
git commit -m "chore: release v$VERSION" --allow-empty
|
||||
|
||||
# Create annotated tag
|
||||
echo "🏷️ Creating tag $TAG..."
|
||||
git tag -a "$TAG" -m "Release $VERSION"
|
||||
|
||||
# Push commit and tag
|
||||
echo "⬆️ Pushing to origin..."
|
||||
git push origin main
|
||||
git push origin "$TAG"
|
||||
|
||||
echo ""
|
||||
echo "✅ Tagged $TAG"
|
||||
|
||||
# Publish to ClawHub unless --tag-only
|
||||
if [[ "$TAG_ONLY" == "false" ]]; then
|
||||
echo ""
|
||||
echo "📤 Publishing to ClawHub..."
|
||||
|
||||
# Get changelog from CHANGELOG.md if available
|
||||
CHANGELOG=""
|
||||
if [[ -f CHANGELOG.md ]]; then
|
||||
CHANGELOG=$(awk '/^## \['"$VERSION"'\]/{found=1; next} /^## \[/{if(found) exit} found{print}' CHANGELOG.md | head -20)
|
||||
fi
|
||||
|
||||
if command -v clawhub &>/dev/null; then
|
||||
if [[ -n "$CHANGELOG" ]]; then
|
||||
clawhub publish . --version "$VERSION" --changelog "$CHANGELOG"
|
||||
else
|
||||
clawhub publish . --version "$VERSION" --changelog "Release v$VERSION"
|
||||
fi
|
||||
echo ""
|
||||
echo "✅ Published to ClawHub: $VERSION"
|
||||
else
|
||||
echo "⚠️ clawhub CLI not found. Skipping ClawHub publish."
|
||||
echo " Install: npm install -g clawhub"
|
||||
fi
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "🎉 Release $VERSION complete!"
|
||||
echo ""
|
||||
echo " Git tag: $TAG"
|
||||
echo " GitHub: https://github.com/jontsai/openclaw-command-center/releases/tag/$TAG"
|
||||
11
scripts/run-server.sh
Normal file
11
scripts/run-server.sh
Normal file
@@ -0,0 +1,11 @@
|
||||
#!/bin/bash
|
||||
# Wrapper script to ensure PATH includes system directories
|
||||
export PATH="/opt/homebrew/bin:/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin:$PATH"
|
||||
|
||||
# Find node - prefer nvm if available
|
||||
if [ -f "$HOME/.nvm/nvm.sh" ]; then
|
||||
source "$HOME/.nvm/nvm.sh"
|
||||
fi
|
||||
|
||||
cd "$(dirname "$0")/.."
|
||||
exec node lib/server.js
|
||||
143
scripts/setup.sh
Normal file
143
scripts/setup.sh
Normal file
@@ -0,0 +1,143 @@
|
||||
#!/bin/bash
|
||||
# OpenClaw Command Center - First-time setup
|
||||
# Creates necessary directories and config file
|
||||
|
||||
set -e
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
|
||||
PROJECT_DIR="$(dirname "$SCRIPT_DIR")"
|
||||
CONFIG_DIR="$PROJECT_DIR/config"
|
||||
|
||||
echo "🦞 OpenClaw Command Center Setup"
|
||||
echo "================================="
|
||||
echo ""
|
||||
|
||||
# Check for Node.js
|
||||
if ! command -v node &> /dev/null; then
|
||||
echo "❌ Node.js not found. Please install Node.js 20+ first."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
NODE_VERSION=$(node -v | cut -d'v' -f2 | cut -d'.' -f1)
|
||||
if [ "$NODE_VERSION" -lt 20 ]; then
|
||||
echo "⚠️ Node.js version $NODE_VERSION detected. Version 20+ recommended."
|
||||
fi
|
||||
|
||||
# Install dependencies
|
||||
echo "📦 Installing dependencies..."
|
||||
cd "$PROJECT_DIR"
|
||||
npm install --silent
|
||||
|
||||
# Create config if not exists
|
||||
if [ ! -f "$CONFIG_DIR/dashboard.json" ]; then
|
||||
echo ""
|
||||
echo "📝 Creating configuration file..."
|
||||
cp "$CONFIG_DIR/dashboard.example.json" "$CONFIG_DIR/dashboard.json"
|
||||
echo " Created: config/dashboard.json"
|
||||
echo ""
|
||||
echo " Edit this file to customize your dashboard."
|
||||
else
|
||||
echo " Config file already exists: config/dashboard.json"
|
||||
fi
|
||||
|
||||
# Create log directory
|
||||
LOG_DIR="$HOME/.openclaw-command-center/logs"
|
||||
mkdir -p "$LOG_DIR"
|
||||
echo " Log directory: $LOG_DIR"
|
||||
|
||||
# Detect workspace
|
||||
echo ""
|
||||
echo "🔍 Detecting OpenClaw workspace..."
|
||||
|
||||
DETECTED_WORKSPACE=""
|
||||
for candidate in \
|
||||
"$OPENCLAW_WORKSPACE" \
|
||||
"$HOME/openclaw-workspace" \
|
||||
"$HOME/.openclaw-workspace" \
|
||||
"$HOME/molty" \
|
||||
"$HOME/clawd" \
|
||||
"$HOME/moltbot"; do
|
||||
if [ -n "$candidate" ] && [ -d "$candidate" ]; then
|
||||
if [ -d "$candidate/memory" ] || [ -d "$candidate/state" ]; then
|
||||
DETECTED_WORKSPACE="$candidate"
|
||||
break
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
if [ -n "$DETECTED_WORKSPACE" ]; then
|
||||
echo " ✅ Found workspace: $DETECTED_WORKSPACE"
|
||||
else
|
||||
echo " ⚠️ No existing workspace found."
|
||||
echo " The dashboard will create ~/.openclaw-workspace on first run,"
|
||||
echo " or you can set OPENCLAW_WORKSPACE environment variable."
|
||||
fi
|
||||
|
||||
# Create Makefile.local if not exists
|
||||
if [ ! -f "$PROJECT_DIR/Makefile.local" ]; then
|
||||
echo ""
|
||||
echo "📝 Creating Makefile.local with 'lfg' command..."
|
||||
cat > "$PROJECT_DIR/Makefile.local" << 'EOF'
|
||||
# Private Makefile overrides (not tracked in git)
|
||||
|
||||
.PHONY: lfg
|
||||
|
||||
lfg: ## Start dashboard and drop into cockpit
|
||||
@$(MAKE) ensure
|
||||
@$(MAKE) attach
|
||||
EOF
|
||||
echo " Created: Makefile.local"
|
||||
fi
|
||||
|
||||
# Check optional system dependencies
|
||||
echo ""
|
||||
echo "🔍 Checking optional system dependencies..."
|
||||
|
||||
OS_TYPE="$(uname -s)"
|
||||
OPT_MISSING=0
|
||||
|
||||
if [ "$OS_TYPE" = "Linux" ]; then
|
||||
if command -v iostat &> /dev/null; then
|
||||
echo " ✅ sysstat (iostat) — disk I/O vitals"
|
||||
else
|
||||
echo " 💡 sysstat — install for disk I/O vitals: sudo apt install sysstat"
|
||||
OPT_MISSING=$((OPT_MISSING + 1))
|
||||
fi
|
||||
if command -v sensors &> /dev/null; then
|
||||
echo " ✅ lm-sensors — temperature sensors"
|
||||
else
|
||||
echo " 💡 lm-sensors — install for temperature sensors: sudo apt install lm-sensors"
|
||||
OPT_MISSING=$((OPT_MISSING + 1))
|
||||
fi
|
||||
elif [ "$OS_TYPE" = "Darwin" ]; then
|
||||
# Check for Apple Silicon vs Intel
|
||||
CHIP="$(sysctl -n machdep.cpu.brand_string 2>/dev/null || echo "")"
|
||||
if echo "$CHIP" | grep -qi "apple"; then
|
||||
if sudo -n true 2>/dev/null; then
|
||||
echo " ✅ passwordless sudo — Apple Silicon CPU temperature"
|
||||
else
|
||||
echo " 💡 passwordless sudo — configure for CPU temperature via powermetrics"
|
||||
fi
|
||||
else
|
||||
if command -v osx-cpu-temp &> /dev/null || [ -x "$HOME/bin/osx-cpu-temp" ]; then
|
||||
echo " ✅ osx-cpu-temp — Intel Mac CPU temperature"
|
||||
else
|
||||
echo " 💡 osx-cpu-temp — install for CPU temperature: https://github.com/lavoiesl/osx-cpu-temp"
|
||||
OPT_MISSING=$((OPT_MISSING + 1))
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ "$OPT_MISSING" -eq 0 ]; then
|
||||
echo " All optional dependencies available!"
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "✅ Setup complete!"
|
||||
echo ""
|
||||
echo "Quick start:"
|
||||
echo " cd $PROJECT_DIR"
|
||||
echo " make start # Start dashboard"
|
||||
echo " make lfg # Start and attach to tmux"
|
||||
echo ""
|
||||
echo "Dashboard will be available at: http://localhost:3333"
|
||||
70
scripts/start.sh
Normal file
70
scripts/start.sh
Normal file
@@ -0,0 +1,70 @@
|
||||
#!/bin/bash
|
||||
# Start OpenClaw Command Center
|
||||
# Usage: ./start.sh [--tunnel]
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
|
||||
PORT=3333
|
||||
TUNNEL=false
|
||||
PID_FILE="/tmp/openclaw-dashboard.pid"
|
||||
TUNNEL_PID_FILE="/tmp/openclaw-tunnel.pid"
|
||||
|
||||
# Parse args
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case $1 in
|
||||
--tunnel|-t)
|
||||
TUNNEL=true
|
||||
shift
|
||||
;;
|
||||
--port|-p)
|
||||
PORT="$2"
|
||||
shift 2
|
||||
;;
|
||||
*)
|
||||
shift
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
# Check if already running
|
||||
if [ -f "$PID_FILE" ] && kill -0 "$(cat $PID_FILE)" 2>/dev/null; then
|
||||
echo "⚠️ Dashboard already running (PID: $(cat $PID_FILE))"
|
||||
echo " Stop it first: ./stop.sh"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "🚀 Starting OpenClaw Command Center..."
|
||||
echo ""
|
||||
|
||||
# Start the Node.js server
|
||||
cd "$SCRIPT_DIR/.."
|
||||
PORT=$PORT node lib/server.js &
|
||||
SERVER_PID=$!
|
||||
echo $SERVER_PID > "$PID_FILE"
|
||||
|
||||
sleep 1
|
||||
|
||||
# Check if server started
|
||||
if ! kill -0 $SERVER_PID 2>/dev/null; then
|
||||
echo "❌ Failed to start server"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "✅ Dashboard running at http://localhost:$PORT"
|
||||
|
||||
# Start tunnel if requested
|
||||
if [ "$TUNNEL" = true ]; then
|
||||
echo ""
|
||||
echo "🌐 Starting Cloudflare tunnel..."
|
||||
cloudflared tunnel --url http://localhost:$PORT &
|
||||
TUNNEL_PID=$!
|
||||
echo $TUNNEL_PID > "$TUNNEL_PID_FILE"
|
||||
|
||||
# Wait a moment for the tunnel URL to appear
|
||||
sleep 3
|
||||
echo ""
|
||||
echo "📋 Tunnel should be active. Look for the trycloudflare.com URL above."
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "📊 Dashboard: http://localhost:$PORT"
|
||||
echo "🛑 To stop: $SCRIPT_DIR/stop.sh"
|
||||
33
scripts/stop.sh
Normal file
33
scripts/stop.sh
Normal file
@@ -0,0 +1,33 @@
|
||||
#!/bin/bash
|
||||
# Stop OpenClaw Command Center
|
||||
|
||||
PID_FILE="/tmp/openclaw-dashboard.pid"
|
||||
TUNNEL_PID_FILE="/tmp/openclaw-tunnel.pid"
|
||||
|
||||
echo "🛑 Stopping OpenClaw Command Center..."
|
||||
|
||||
# Stop tunnel
|
||||
if [ -f "$TUNNEL_PID_FILE" ]; then
|
||||
PID=$(cat "$TUNNEL_PID_FILE")
|
||||
if kill -0 "$PID" 2>/dev/null; then
|
||||
kill "$PID"
|
||||
echo " Tunnel stopped"
|
||||
fi
|
||||
rm -f "$TUNNEL_PID_FILE"
|
||||
fi
|
||||
|
||||
# Stop server
|
||||
if [ -f "$PID_FILE" ]; then
|
||||
PID=$(cat "$PID_FILE")
|
||||
if kill -0 "$PID" 2>/dev/null; then
|
||||
kill "$PID"
|
||||
echo " Server stopped"
|
||||
fi
|
||||
rm -f "$PID_FILE"
|
||||
fi
|
||||
|
||||
# Also kill any orphaned processes
|
||||
pkill -f "node.*lib/server.js" 2>/dev/null
|
||||
pkill -f "cloudflared.*localhost:3333" 2>/dev/null
|
||||
|
||||
echo "✅ Done"
|
||||
66
scripts/tmux-dashboard.sh
Normal file
66
scripts/tmux-dashboard.sh
Normal file
@@ -0,0 +1,66 @@
|
||||
#!/bin/bash
|
||||
# Clawd Status Dashboard - tmux layout
|
||||
# Creates a tmux session with live status panes
|
||||
|
||||
SESSION="openclaw-status"
|
||||
OPENCLAW_DIR="${OPENCLAW_WORKSPACE:-$HOME/.openclaw-workspace}"
|
||||
|
||||
# Kill existing session if it exists
|
||||
tmux kill-session -t "$SESSION" 2>/dev/null
|
||||
|
||||
# Create new session (detached)
|
||||
tmux new-session -d -s "$SESSION" -c "$OPENCLAW_DIR"
|
||||
|
||||
# Rename first window
|
||||
tmux rename-window -t "$SESSION:0" "dashboard"
|
||||
|
||||
# Layout:
|
||||
# +------------------+------------------+
|
||||
# | Sessions | Cron Jobs |
|
||||
# +------------------+------------------+
|
||||
# | Gateway | Activity |
|
||||
# +------------------+------------------+
|
||||
|
||||
# Pane 0: Sessions (watch openclaw sessions)
|
||||
tmux send-keys -t "$SESSION:0" "watch -n 10 -c 'echo \"📡 ACTIVE SESSIONS\"; echo; openclaw sessions 2>/dev/null || echo \"No sessions\"'" Enter
|
||||
|
||||
# Split horizontally for pane 1: Cron Jobs
|
||||
tmux split-window -h -t "$SESSION:0" -c "$OPENCLAW_DIR"
|
||||
tmux send-keys -t "$SESSION:0.1" "watch -n 30 -c 'echo \"⏰ CRON JOBS\"; echo; openclaw cron list 2>/dev/null || echo \"No cron jobs\"'" Enter
|
||||
|
||||
# Split pane 0 vertically for pane 2: Gateway Status
|
||||
tmux split-window -v -t "$SESSION:0.0" -c "$OPENCLAW_DIR"
|
||||
tmux send-keys -t "$SESSION:0.2" "watch -n 15 -c 'echo \"🤖 GATEWAY STATUS\"; echo; openclaw gateway status 2>/dev/null; echo; echo \"---\"; openclaw status 2>/dev/null'" Enter
|
||||
|
||||
# Split pane 1 vertically for pane 3: Activity Log
|
||||
tmux split-window -v -t "$SESSION:0.1" -c "$OPENCLAW_DIR"
|
||||
tmux send-keys -t "$SESSION:0.3" "watch -n 30 -c 'echo \"📝 RECENT ACTIVITY\"; echo; today=\$(date +%Y-%m-%d); if [ -f \"memory/\$today.md\" ]; then tail -20 \"memory/\$today.md\"; else echo \"No activity today\"; fi'" Enter
|
||||
|
||||
# Make panes more even
|
||||
tmux select-layout -t "$SESSION:0" tiled
|
||||
|
||||
# Add a second window for logs
|
||||
tmux new-window -t "$SESSION" -n "logs" -c "$OPENCLAW_DIR"
|
||||
tmux send-keys -t "$SESSION:1" "echo '📜 Gateway Logs'; echo 'Run: openclaw gateway logs -f'; echo" Enter
|
||||
|
||||
# Add a third window for interactive shell
|
||||
tmux new-window -t "$SESSION" -n "shell" -c "$OPENCLAW_DIR"
|
||||
tmux send-keys -t "$SESSION:2" "echo '🐚 Interactive Shell'; echo 'Ready for commands...'; echo" Enter
|
||||
|
||||
# Go back to first window
|
||||
tmux select-window -t "$SESSION:0"
|
||||
|
||||
echo "✅ OpenClaw dashboard created!"
|
||||
echo ""
|
||||
echo "To attach: tmux attach -t $SESSION"
|
||||
echo "To detach: Ctrl+B, then D"
|
||||
echo ""
|
||||
|
||||
# If not already in tmux, offer to attach
|
||||
if [ -z "$TMUX" ]; then
|
||||
read -p "Attach now? [Y/n] " -n 1 -r
|
||||
echo
|
||||
if [[ ! $REPLY =~ ^[Nn]$ ]]; then
|
||||
tmux attach -t "$SESSION"
|
||||
fi
|
||||
fi
|
||||
700
scripts/topic-classifier.js
Normal file
700
scripts/topic-classifier.js
Normal file
@@ -0,0 +1,700 @@
|
||||
/**
|
||||
* Topic Classifier for OpenClaw Sessions
|
||||
*
|
||||
* Analyzes session transcript content to:
|
||||
* - Match against existing topics
|
||||
* - Detect when existing topics don't fit well
|
||||
* - Suggest new topic names based on content patterns
|
||||
* - Maintain a discovered-topics.json file for learned topics
|
||||
*
|
||||
* @module topic-classifier
|
||||
*/
|
||||
|
||||
const fs = require("fs");
|
||||
const path = require("path");
|
||||
const { CONFIG: APP_CONFIG } = require("../src/config");
|
||||
|
||||
// Default config
|
||||
const CONFIG = {
|
||||
// Minimum TF-IDF score to consider a term significant
|
||||
minTermScore: 0.1,
|
||||
// Minimum topic match confidence to consider a match "good"
|
||||
matchThreshold: 0.3,
|
||||
// Minimum occurrences for a term to be considered
|
||||
minTermFrequency: 2,
|
||||
// Path to discovered topics state file
|
||||
discoveredTopicsPath: path.join(APP_CONFIG.paths.state, "discovered-topics.json"),
|
||||
// Maximum suggested topics per classification
|
||||
maxSuggestions: 3,
|
||||
};
|
||||
|
||||
// Stop words to filter out (common English words)
|
||||
const STOP_WORDS = new Set([
|
||||
"a",
|
||||
"an",
|
||||
"the",
|
||||
"and",
|
||||
"or",
|
||||
"but",
|
||||
"in",
|
||||
"on",
|
||||
"at",
|
||||
"to",
|
||||
"for",
|
||||
"of",
|
||||
"with",
|
||||
"by",
|
||||
"from",
|
||||
"up",
|
||||
"about",
|
||||
"into",
|
||||
"through",
|
||||
"during",
|
||||
"before",
|
||||
"after",
|
||||
"above",
|
||||
"below",
|
||||
"between",
|
||||
"under",
|
||||
"again",
|
||||
"further",
|
||||
"then",
|
||||
"once",
|
||||
"here",
|
||||
"there",
|
||||
"when",
|
||||
"where",
|
||||
"why",
|
||||
"how",
|
||||
"all",
|
||||
"each",
|
||||
"few",
|
||||
"more",
|
||||
"most",
|
||||
"other",
|
||||
"some",
|
||||
"such",
|
||||
"no",
|
||||
"nor",
|
||||
"not",
|
||||
"only",
|
||||
"own",
|
||||
"same",
|
||||
"so",
|
||||
"than",
|
||||
"too",
|
||||
"very",
|
||||
"s",
|
||||
"t",
|
||||
"can",
|
||||
"will",
|
||||
"just",
|
||||
"don",
|
||||
"should",
|
||||
"now",
|
||||
"i",
|
||||
"me",
|
||||
"my",
|
||||
"myself",
|
||||
"we",
|
||||
"our",
|
||||
"ours",
|
||||
"you",
|
||||
"your",
|
||||
"yours",
|
||||
"he",
|
||||
"him",
|
||||
"his",
|
||||
"she",
|
||||
"her",
|
||||
"hers",
|
||||
"it",
|
||||
"its",
|
||||
"they",
|
||||
"them",
|
||||
"their",
|
||||
"theirs",
|
||||
"what",
|
||||
"which",
|
||||
"who",
|
||||
"whom",
|
||||
"this",
|
||||
"that",
|
||||
"these",
|
||||
"those",
|
||||
"am",
|
||||
"is",
|
||||
"are",
|
||||
"was",
|
||||
"were",
|
||||
"be",
|
||||
"been",
|
||||
"being",
|
||||
"have",
|
||||
"has",
|
||||
"had",
|
||||
"having",
|
||||
"do",
|
||||
"does",
|
||||
"did",
|
||||
"doing",
|
||||
"would",
|
||||
"could",
|
||||
"ought",
|
||||
"let",
|
||||
"like",
|
||||
"need",
|
||||
"want",
|
||||
"got",
|
||||
"get",
|
||||
"make",
|
||||
"made",
|
||||
"see",
|
||||
"look",
|
||||
"think",
|
||||
"know",
|
||||
"take",
|
||||
"come",
|
||||
"go",
|
||||
"say",
|
||||
"said",
|
||||
"tell",
|
||||
"told",
|
||||
"ask",
|
||||
"use",
|
||||
"used",
|
||||
"find",
|
||||
"give",
|
||||
"gave",
|
||||
"yes",
|
||||
"no",
|
||||
"ok",
|
||||
"okay",
|
||||
"yeah",
|
||||
"sure",
|
||||
"right",
|
||||
"well",
|
||||
"also",
|
||||
"just",
|
||||
"really",
|
||||
"actually",
|
||||
"basically",
|
||||
"probably",
|
||||
"maybe",
|
||||
// Tech-common words that are too generic
|
||||
"file",
|
||||
"code",
|
||||
"run",
|
||||
"check",
|
||||
"help",
|
||||
"please",
|
||||
"thanks",
|
||||
"hello",
|
||||
"hi",
|
||||
"hey",
|
||||
"good",
|
||||
"great",
|
||||
"nice",
|
||||
"cool",
|
||||
"awesome",
|
||||
"perfect",
|
||||
]);
|
||||
|
||||
// Known topic patterns for seeding - maps keywords to topic names
|
||||
const TOPIC_PATTERNS = {
|
||||
// Development
|
||||
git: "version-control",
|
||||
github: "version-control",
|
||||
commit: "version-control",
|
||||
branch: "version-control",
|
||||
merge: "version-control",
|
||||
pull: "version-control",
|
||||
push: "version-control",
|
||||
|
||||
debug: "debugging",
|
||||
error: "debugging",
|
||||
bug: "debugging",
|
||||
fix: "debugging",
|
||||
stack: "debugging",
|
||||
trace: "debugging",
|
||||
exception: "debugging",
|
||||
|
||||
test: "testing",
|
||||
unittest: "testing",
|
||||
jest: "testing",
|
||||
pytest: "testing",
|
||||
coverage: "testing",
|
||||
|
||||
deploy: "deployment",
|
||||
production: "deployment",
|
||||
staging: "deployment",
|
||||
ci: "deployment",
|
||||
cd: "deployment",
|
||||
pipeline: "deployment",
|
||||
|
||||
api: "api-integration",
|
||||
endpoint: "api-integration",
|
||||
rest: "api-integration",
|
||||
graphql: "api-integration",
|
||||
webhook: "api-integration",
|
||||
|
||||
database: "database",
|
||||
sql: "database",
|
||||
postgres: "database",
|
||||
mysql: "database",
|
||||
mongodb: "database",
|
||||
query: "database",
|
||||
|
||||
docker: "containers",
|
||||
kubernetes: "containers",
|
||||
k8s: "containers",
|
||||
container: "containers",
|
||||
pod: "containers",
|
||||
|
||||
aws: "cloud-infra",
|
||||
gcp: "cloud-infra",
|
||||
azure: "cloud-infra",
|
||||
terraform: "cloud-infra",
|
||||
cloudformation: "cloud-infra",
|
||||
|
||||
// Communication
|
||||
slack: "slack-integration",
|
||||
channel: "slack-integration",
|
||||
message: "messaging",
|
||||
email: "email",
|
||||
notification: "notifications",
|
||||
|
||||
// Automation
|
||||
cron: "scheduling",
|
||||
schedule: "scheduling",
|
||||
timer: "scheduling",
|
||||
job: "scheduling",
|
||||
|
||||
script: "automation",
|
||||
automate: "automation",
|
||||
workflow: "automation",
|
||||
|
||||
// Research
|
||||
research: "research",
|
||||
search: "research",
|
||||
wikipedia: "research",
|
||||
lookup: "research",
|
||||
|
||||
// Finance
|
||||
finance: "finance",
|
||||
investment: "finance",
|
||||
stock: "finance",
|
||||
portfolio: "finance",
|
||||
budget: "finance",
|
||||
|
||||
// System
|
||||
config: "configuration",
|
||||
settings: "configuration",
|
||||
setup: "configuration",
|
||||
install: "setup",
|
||||
|
||||
// Writing
|
||||
document: "documentation",
|
||||
readme: "documentation",
|
||||
docs: "documentation",
|
||||
write: "writing",
|
||||
draft: "writing",
|
||||
|
||||
// AI/ML
|
||||
model: "ai-ml",
|
||||
claude: "ai-ml",
|
||||
openai: "ai-ml",
|
||||
gpt: "ai-ml",
|
||||
llm: "ai-ml",
|
||||
prompt: "prompt-engineering",
|
||||
|
||||
// UI
|
||||
dashboard: "dashboard",
|
||||
ui: "ui-development",
|
||||
frontend: "ui-development",
|
||||
css: "ui-development",
|
||||
html: "ui-development",
|
||||
react: "ui-development",
|
||||
};
|
||||
|
||||
/**
|
||||
* Tokenize text into words
|
||||
* @param {string} text - Raw text to tokenize
|
||||
* @returns {string[]} Array of lowercase tokens
|
||||
*/
|
||||
function tokenize(text) {
|
||||
if (!text || typeof text !== "string") return [];
|
||||
|
||||
return (
|
||||
text
|
||||
.toLowerCase()
|
||||
// Remove code blocks
|
||||
.replace(/```[\s\S]*?```/g, " ")
|
||||
// Remove inline code
|
||||
.replace(/`[^`]+`/g, " ")
|
||||
// Remove URLs
|
||||
.replace(/https?:\/\/\S+/g, " ")
|
||||
// Remove special characters but keep hyphens in words
|
||||
.replace(/[^a-z0-9\s-]/g, " ")
|
||||
// Split on whitespace
|
||||
.split(/\s+/)
|
||||
// Filter valid tokens
|
||||
.filter(
|
||||
(token) =>
|
||||
token.length > 2 && token.length < 30 && !STOP_WORDS.has(token) && !/^\d+$/.test(token),
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculate term frequency for a document
|
||||
* @param {string[]} tokens - Array of tokens
|
||||
* @returns {Map<string, number>} Term frequency map
|
||||
*/
|
||||
function calculateTF(tokens) {
|
||||
const tf = new Map();
|
||||
const total = tokens.length || 1;
|
||||
|
||||
tokens.forEach((token) => {
|
||||
tf.set(token, (tf.get(token) || 0) + 1);
|
||||
});
|
||||
|
||||
// Normalize by document length
|
||||
tf.forEach((count, term) => {
|
||||
tf.set(term, count / total);
|
||||
});
|
||||
|
||||
return tf;
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculate inverse document frequency using corpus statistics
|
||||
* For a single document, we use term rarity as a proxy
|
||||
* @param {Map<string, number>} tf - Term frequency map
|
||||
* @param {number} vocabSize - Size of vocabulary
|
||||
* @returns {Map<string, number>} IDF scores
|
||||
*/
|
||||
function calculateIDF(tf, vocabSize) {
|
||||
const idf = new Map();
|
||||
|
||||
tf.forEach((freq, term) => {
|
||||
// Boost terms that appear in known patterns
|
||||
const patternBoost = TOPIC_PATTERNS[term] ? 2.0 : 1.0;
|
||||
// Simple IDF approximation: rarer terms get higher scores
|
||||
const score = Math.log(vocabSize / (1 + freq * vocabSize)) * patternBoost;
|
||||
idf.set(term, Math.max(0, score));
|
||||
});
|
||||
|
||||
return idf;
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract key terms using TF-IDF
|
||||
* @param {string} text - Text to analyze
|
||||
* @returns {Array<{term: string, score: number}>} Ranked terms
|
||||
*/
|
||||
function extractKeyTerms(text) {
|
||||
const tokens = tokenize(text);
|
||||
if (tokens.length === 0) return [];
|
||||
|
||||
const tf = calculateTF(tokens);
|
||||
const idf = calculateIDF(tf, tf.size);
|
||||
|
||||
const tfidf = [];
|
||||
tf.forEach((tfScore, term) => {
|
||||
const idfScore = idf.get(term) || 0;
|
||||
const score = tfScore * idfScore;
|
||||
|
||||
// Only include terms that meet minimum thresholds
|
||||
const rawCount = tokens.filter((t) => t === term).length;
|
||||
if (rawCount >= CONFIG.minTermFrequency && score >= CONFIG.minTermScore) {
|
||||
tfidf.push({ term, score, count: rawCount });
|
||||
}
|
||||
});
|
||||
|
||||
// Sort by score descending
|
||||
return tfidf.sort((a, b) => b.score - a.score);
|
||||
}
|
||||
|
||||
/**
|
||||
* Match text against existing topics
|
||||
* @param {string} text - Text to match
|
||||
* @param {string[]} existingTopics - List of existing topic names
|
||||
* @returns {Array<{topic: string, confidence: number}>} Matched topics with confidence
|
||||
*/
|
||||
function matchTopics(text, existingTopics) {
|
||||
const tokens = tokenize(text);
|
||||
const matches = new Map();
|
||||
|
||||
// Score each existing topic
|
||||
existingTopics.forEach((topic) => {
|
||||
let score = 0;
|
||||
const topicTokens = tokenize(topic);
|
||||
|
||||
// Direct token match
|
||||
topicTokens.forEach((tt) => {
|
||||
const count = tokens.filter((t) => t === tt || t.includes(tt) || tt.includes(t)).length;
|
||||
score += count * 0.3;
|
||||
});
|
||||
|
||||
// Pattern-based matching
|
||||
tokens.forEach((token) => {
|
||||
const mappedTopic = TOPIC_PATTERNS[token];
|
||||
if (mappedTopic === topic) {
|
||||
score += 0.5;
|
||||
}
|
||||
});
|
||||
|
||||
if (score > 0) {
|
||||
// Normalize by text length (log scale to avoid penalizing long texts too much)
|
||||
const normalizedScore = score / Math.log2(tokens.length + 2);
|
||||
matches.set(topic, Math.min(1, normalizedScore));
|
||||
}
|
||||
});
|
||||
|
||||
// Convert to sorted array
|
||||
return Array.from(matches.entries())
|
||||
.map(([topic, confidence]) => ({ topic, confidence }))
|
||||
.sort((a, b) => b.confidence - a.confidence);
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate topic suggestions based on content
|
||||
* @param {Array<{term: string, score: number}>} keyTerms - Key terms from text
|
||||
* @param {string[]} existingTopics - Topics to avoid suggesting
|
||||
* @returns {string[]} Suggested new topic names
|
||||
*/
|
||||
function generateSuggestions(keyTerms, existingTopics) {
|
||||
const existingSet = new Set(existingTopics.map((t) => t.toLowerCase()));
|
||||
const suggestions = new Set();
|
||||
|
||||
// Strategy 1: Use known patterns for top terms
|
||||
keyTerms.slice(0, 15).forEach(({ term }) => {
|
||||
const mapped = TOPIC_PATTERNS[term];
|
||||
if (mapped && !existingSet.has(mapped)) {
|
||||
suggestions.add(mapped);
|
||||
}
|
||||
});
|
||||
|
||||
// Strategy 2: Create compound topics from top co-occurring terms
|
||||
if (keyTerms.length >= 2 && suggestions.size < CONFIG.maxSuggestions) {
|
||||
const topTerms = keyTerms.slice(0, 5).map((t) => t.term);
|
||||
|
||||
// Look for related pairs
|
||||
const pairs = [
|
||||
["api", "integration"],
|
||||
["code", "review"],
|
||||
["data", "analysis"],
|
||||
["error", "handling"],
|
||||
["file", "management"],
|
||||
["memory", "optimization"],
|
||||
["performance", "tuning"],
|
||||
["security", "audit"],
|
||||
["system", "design"],
|
||||
["user", "interface"],
|
||||
];
|
||||
|
||||
pairs.forEach(([a, b]) => {
|
||||
if (topTerms.some((t) => t.includes(a)) && topTerms.some((t) => t.includes(b))) {
|
||||
const compound = `${a}-${b}`;
|
||||
if (!existingSet.has(compound)) {
|
||||
suggestions.add(compound);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// Strategy 3: Use top-scoring term as-is if it's descriptive enough
|
||||
if (suggestions.size < CONFIG.maxSuggestions) {
|
||||
keyTerms.slice(0, 5).forEach(({ term, score }) => {
|
||||
// Only use single terms that are sufficiently meaningful
|
||||
if (score > 0.15 && term.length > 4 && !existingSet.has(term)) {
|
||||
suggestions.add(term);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
return Array.from(suggestions).slice(0, CONFIG.maxSuggestions);
|
||||
}
|
||||
|
||||
/**
|
||||
* Load discovered topics from state file
|
||||
* @returns {Object} Discovered topics data
|
||||
*/
|
||||
function loadDiscoveredTopics() {
|
||||
try {
|
||||
if (fs.existsSync(CONFIG.discoveredTopicsPath)) {
|
||||
return JSON.parse(fs.readFileSync(CONFIG.discoveredTopicsPath, "utf8"));
|
||||
}
|
||||
} catch (e) {
|
||||
console.error("Failed to load discovered topics:", e.message);
|
||||
}
|
||||
|
||||
return {
|
||||
version: 1,
|
||||
topics: {},
|
||||
lastUpdated: null,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Save discovered topics to state file
|
||||
* @param {Object} data - Topics data to save
|
||||
*/
|
||||
function saveDiscoveredTopics(data) {
|
||||
try {
|
||||
data.lastUpdated = new Date().toISOString();
|
||||
fs.writeFileSync(CONFIG.discoveredTopicsPath, JSON.stringify(data, null, 2));
|
||||
} catch (e) {
|
||||
console.error("Failed to save discovered topics:", e.message);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Update discovered topics with new suggestions
|
||||
* @param {string[]} suggestions - New topic suggestions
|
||||
* @param {string} sessionKey - Source session identifier
|
||||
*/
|
||||
function updateDiscoveredTopics(suggestions, sessionKey) {
|
||||
const data = loadDiscoveredTopics();
|
||||
|
||||
suggestions.forEach((topic) => {
|
||||
if (!data.topics[topic]) {
|
||||
data.topics[topic] = {
|
||||
firstSeen: new Date().toISOString(),
|
||||
occurrences: 0,
|
||||
sessions: [],
|
||||
};
|
||||
}
|
||||
|
||||
data.topics[topic].occurrences++;
|
||||
data.topics[topic].lastSeen = new Date().toISOString();
|
||||
|
||||
if (!data.topics[topic].sessions.includes(sessionKey)) {
|
||||
data.topics[topic].sessions.push(sessionKey);
|
||||
// Keep only last 10 sessions
|
||||
if (data.topics[topic].sessions.length > 10) {
|
||||
data.topics[topic].sessions.shift();
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
saveDiscoveredTopics(data);
|
||||
}
|
||||
|
||||
/**
|
||||
* Main classification function
|
||||
* Analyzes transcript content to match existing topics and suggest new ones
|
||||
*
|
||||
* @param {string|Array} transcript - Session transcript (string or array of messages)
|
||||
* @param {string[]} existingTopics - List of existing topic names
|
||||
* @param {Object} options - Optional configuration
|
||||
* @param {string} options.sessionKey - Session identifier for tracking
|
||||
* @param {boolean} options.persist - Whether to persist discovered topics (default: true)
|
||||
* @returns {{matched: Array<{topic: string, confidence: number}>, suggested: string[], keyTerms: Array}}
|
||||
*/
|
||||
function classifyAndSuggestTopics(transcript, existingTopics = [], options = {}) {
|
||||
// Normalize transcript to text
|
||||
let text = "";
|
||||
if (Array.isArray(transcript)) {
|
||||
text = transcript
|
||||
.map((entry) => {
|
||||
if (typeof entry === "string") return entry;
|
||||
if (entry.text) return entry.text;
|
||||
if (entry.message?.content) {
|
||||
const content = entry.message.content;
|
||||
if (typeof content === "string") return content;
|
||||
if (Array.isArray(content)) {
|
||||
return content
|
||||
.filter((c) => c.type === "text")
|
||||
.map((c) => c.text || "")
|
||||
.join(" ");
|
||||
}
|
||||
}
|
||||
return "";
|
||||
})
|
||||
.join("\n");
|
||||
} else if (typeof transcript === "string") {
|
||||
text = transcript;
|
||||
}
|
||||
|
||||
if (!text || text.length < 20) {
|
||||
return { matched: [], suggested: [], keyTerms: [] };
|
||||
}
|
||||
|
||||
// Extract key terms
|
||||
const keyTerms = extractKeyTerms(text);
|
||||
|
||||
// Match against existing topics
|
||||
const matched = matchTopics(text, existingTopics);
|
||||
|
||||
// Determine if we need suggestions
|
||||
const bestMatch = matched[0];
|
||||
const needsSuggestions = !bestMatch || bestMatch.confidence < CONFIG.matchThreshold;
|
||||
|
||||
let suggested = [];
|
||||
if (needsSuggestions) {
|
||||
suggested = generateSuggestions(keyTerms, existingTopics);
|
||||
|
||||
// Persist discovered topics if enabled
|
||||
if (options.persist !== false && suggested.length > 0 && options.sessionKey) {
|
||||
updateDiscoveredTopics(suggested, options.sessionKey);
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
matched: matched.slice(0, 5),
|
||||
suggested,
|
||||
keyTerms: keyTerms.slice(0, 10),
|
||||
confidence: bestMatch?.confidence || 0,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all discovered topics sorted by occurrence
|
||||
* @returns {Array<{name: string, occurrences: number, sessions: number}>}
|
||||
*/
|
||||
function getDiscoveredTopics() {
|
||||
const data = loadDiscoveredTopics();
|
||||
|
||||
return Object.entries(data.topics)
|
||||
.map(([name, info]) => ({
|
||||
name,
|
||||
occurrences: info.occurrences,
|
||||
sessions: info.sessions?.length || 0,
|
||||
firstSeen: info.firstSeen,
|
||||
lastSeen: info.lastSeen,
|
||||
}))
|
||||
.sort((a, b) => b.occurrences - a.occurrences);
|
||||
}
|
||||
|
||||
/**
|
||||
* Promote a discovered topic to the official topic list
|
||||
* Returns the topic data for external handling
|
||||
* @param {string} topicName - Topic to promote
|
||||
* @returns {Object|null} Topic data or null if not found
|
||||
*/
|
||||
function promoteDiscoveredTopic(topicName) {
|
||||
const data = loadDiscoveredTopics();
|
||||
|
||||
if (data.topics[topicName]) {
|
||||
const topicData = { ...data.topics[topicName], name: topicName };
|
||||
delete data.topics[topicName];
|
||||
saveDiscoveredTopics(data);
|
||||
return topicData;
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
// Export public API
|
||||
module.exports = {
|
||||
classifyAndSuggestTopics,
|
||||
getDiscoveredTopics,
|
||||
promoteDiscoveredTopic,
|
||||
extractKeyTerms,
|
||||
matchTopics,
|
||||
// Export config for testing/tuning
|
||||
CONFIG,
|
||||
TOPIC_PATTERNS,
|
||||
};
|
||||
119
scripts/verify.sh
Normal file
119
scripts/verify.sh
Normal file
@@ -0,0 +1,119 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# verify.sh - Quick dashboard verification script
|
||||
#
|
||||
# Checks that all APIs return data and the dashboard is responsive.
|
||||
#
|
||||
# Usage: ./scripts/verify.sh [--url URL]
|
||||
#
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
DASHBOARD_URL="${DASHBOARD_URL:-http://localhost:3333}"
|
||||
|
||||
# Parse args
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
--url) DASHBOARD_URL="$2"; shift 2 ;;
|
||||
-h|--help)
|
||||
echo "Usage: verify.sh [--url URL]"
|
||||
echo " --url URL Dashboard URL (default: http://localhost:3333)"
|
||||
exit 0
|
||||
;;
|
||||
*) echo "Unknown option: $1"; exit 1 ;;
|
||||
esac
|
||||
done
|
||||
|
||||
echo "🔍 Verifying dashboard at $DASHBOARD_URL..."
|
||||
echo ""
|
||||
|
||||
# Track failures
|
||||
FAILURES=0
|
||||
|
||||
# Check server responds
|
||||
echo -n "📡 Server response... "
|
||||
HTTP_CODE=$(curl -s -o /dev/null -w "%{http_code}" --max-time 5 "$DASHBOARD_URL" 2>/dev/null || echo "000")
|
||||
if [[ "$HTTP_CODE" == "200" ]]; then
|
||||
echo "✅ OK (HTTP $HTTP_CODE)"
|
||||
else
|
||||
echo "❌ FAILED (HTTP $HTTP_CODE)"
|
||||
((FAILURES++))
|
||||
fi
|
||||
|
||||
# Check each API endpoint
|
||||
ENDPOINTS=(
|
||||
"vitals:vitals"
|
||||
"operators:operators"
|
||||
"llm-usage:claude"
|
||||
"memory:memory"
|
||||
"cerebro:topics"
|
||||
"cron:cron"
|
||||
)
|
||||
|
||||
echo ""
|
||||
echo "📊 API Endpoints:"
|
||||
|
||||
for entry in "${ENDPOINTS[@]}"; do
|
||||
endpoint="${entry%%:*}"
|
||||
key="${entry##*:}"
|
||||
|
||||
echo -n " /api/$endpoint... "
|
||||
response=$(curl -s --max-time 5 "$DASHBOARD_URL/api/$endpoint" 2>/dev/null || echo "")
|
||||
|
||||
if [[ -z "$response" ]]; then
|
||||
echo "❌ No response"
|
||||
((FAILURES++))
|
||||
elif echo "$response" | grep -q "\"$key\""; then
|
||||
echo "✅ OK"
|
||||
elif echo "$response" | grep -q "error"; then
|
||||
echo "⚠️ Error in response"
|
||||
((FAILURES++))
|
||||
else
|
||||
echo "⚠️ Unexpected format"
|
||||
fi
|
||||
done
|
||||
|
||||
echo ""
|
||||
|
||||
# Optional dependency status
|
||||
echo "🔧 Optional System Dependencies:"
|
||||
|
||||
OS_TYPE="$(uname -s)"
|
||||
if [[ "$OS_TYPE" == "Linux" ]]; then
|
||||
if command -v iostat &> /dev/null; then
|
||||
echo " ✅ sysstat (iostat) — disk I/O vitals"
|
||||
else
|
||||
echo " ⚠️ sysstat — not installed (disk I/O stats will show zeros)"
|
||||
fi
|
||||
if command -v sensors &> /dev/null; then
|
||||
echo " ✅ lm-sensors — temperature sensors"
|
||||
else
|
||||
echo " ⚠️ lm-sensors — not installed (using thermal_zone fallback)"
|
||||
fi
|
||||
elif [[ "$OS_TYPE" == "Darwin" ]]; then
|
||||
CHIP="$(sysctl -n machdep.cpu.brand_string 2>/dev/null || echo "")"
|
||||
if echo "$CHIP" | grep -qi "apple"; then
|
||||
if sudo -n true 2>/dev/null; then
|
||||
echo " ✅ passwordless sudo — Apple Silicon CPU temperature"
|
||||
else
|
||||
echo " ⚠️ passwordless sudo — not configured (CPU temperature unavailable)"
|
||||
fi
|
||||
else
|
||||
if command -v osx-cpu-temp &> /dev/null || [[ -x "$HOME/bin/osx-cpu-temp" ]]; then
|
||||
echo " ✅ osx-cpu-temp — Intel Mac CPU temperature"
|
||||
else
|
||||
echo " ⚠️ osx-cpu-temp — not installed (using battery temp fallback)"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
echo ""
|
||||
|
||||
# Summary
|
||||
if [[ $FAILURES -eq 0 ]]; then
|
||||
echo "✅ All checks passed!"
|
||||
exit 0
|
||||
else
|
||||
echo "❌ $FAILURES check(s) failed"
|
||||
exit 1
|
||||
fi
|
||||
Reference in New Issue
Block a user