mirror of
https://github.com/sweetwisdom/everything-claude-code-zh.git
synced 2026-03-22 14:40:14 +00:00
feat: v1.1.0 release - session ID tracking, async hooks, new skills
- Add session ID to session filenames (Issue #62) - Add getSessionIdShort() helper for unique per-session tracking - Add async hooks documentation with example - Create iterative-retrieval skill for progressive context refinement - Add continuous-learning-v2 skill with instinct-based learning - Add ecc.tools ecosystem section to README - Update skills list in README All 67 tests passing.
This commit is contained in:
24
README.md
24
README.md
@@ -107,6 +107,8 @@ everything-claude-code/
|
|||||||
| |-- backend-patterns/ # API, database, caching patterns
|
| |-- backend-patterns/ # API, database, caching patterns
|
||||||
| |-- frontend-patterns/ # React, Next.js patterns
|
| |-- frontend-patterns/ # React, Next.js patterns
|
||||||
| |-- continuous-learning/ # Auto-extract patterns from sessions (Longform Guide)
|
| |-- continuous-learning/ # Auto-extract patterns from sessions (Longform Guide)
|
||||||
|
| |-- continuous-learning-v2/ # Instinct-based learning with confidence scoring
|
||||||
|
| |-- iterative-retrieval/ # Progressive context refinement for subagents
|
||||||
| |-- strategic-compact/ # Manual compaction suggestions (Longform Guide)
|
| |-- strategic-compact/ # Manual compaction suggestions (Longform Guide)
|
||||||
| |-- tdd-workflow/ # TDD methodology
|
| |-- tdd-workflow/ # TDD methodology
|
||||||
| |-- security-review/ # Security checklist
|
| |-- security-review/ # Security checklist
|
||||||
@@ -172,6 +174,28 @@ everything-claude-code/
|
|||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
|
## Ecosystem Tools
|
||||||
|
|
||||||
|
### ecc.tools - Skill Creator
|
||||||
|
|
||||||
|
Automatically generate Claude Code skills from your repository.
|
||||||
|
|
||||||
|
[Install GitHub App](https://github.com/apps/skill-creator) | [ecc.tools](https://ecc.tools)
|
||||||
|
|
||||||
|
Analyzes your repository and creates:
|
||||||
|
- **SKILL.md files** - Ready-to-use skills for Claude Code
|
||||||
|
- **Instinct collections** - For continuous-learning-v2
|
||||||
|
- **Pattern extraction** - Learns from your commit history
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# After installing the GitHub App, skills appear in:
|
||||||
|
~/.claude/skills/generated/
|
||||||
|
```
|
||||||
|
|
||||||
|
Works seamlessly with the `continuous-learning-v2` skill for inherited instincts.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
## Installation
|
## Installation
|
||||||
|
|
||||||
### Option 1: Install as Plugin (Recommended)
|
### Option 1: Install as Plugin (Recommended)
|
||||||
|
|||||||
@@ -88,6 +88,18 @@
|
|||||||
],
|
],
|
||||||
"description": "Log PR URL and provide review command after PR creation"
|
"description": "Log PR URL and provide review command after PR creation"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"matcher": "tool == \"Bash\" && tool_input.command matches \"(npm run build|pnpm build|yarn build)\"",
|
||||||
|
"hooks": [
|
||||||
|
{
|
||||||
|
"type": "command",
|
||||||
|
"command": "node -e \"let d='';process.stdin.on('data',c=>d+=c);process.stdin.on('end',()=>{console.error('[Hook] Build completed - async analysis running in background');console.log(d)})\"",
|
||||||
|
"async": true,
|
||||||
|
"timeout": 30
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"description": "Example: async hook for build analysis (runs in background without blocking)"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"matcher": "tool == \"Edit\" && tool_input.file_path matches \"\\\\.(ts|tsx|js|jsx)$\"",
|
"matcher": "tool == \"Edit\" && tool_input.file_path matches \"\\\\.(ts|tsx|js|jsx)$\"",
|
||||||
"hooks": [
|
"hooks": [
|
||||||
|
|||||||
@@ -14,6 +14,7 @@ const {
|
|||||||
getSessionsDir,
|
getSessionsDir,
|
||||||
getDateString,
|
getDateString,
|
||||||
getTimeString,
|
getTimeString,
|
||||||
|
getSessionIdShort,
|
||||||
ensureDir,
|
ensureDir,
|
||||||
readFile,
|
readFile,
|
||||||
writeFile,
|
writeFile,
|
||||||
@@ -24,7 +25,9 @@ const {
|
|||||||
async function main() {
|
async function main() {
|
||||||
const sessionsDir = getSessionsDir();
|
const sessionsDir = getSessionsDir();
|
||||||
const today = getDateString();
|
const today = getDateString();
|
||||||
const sessionFile = path.join(sessionsDir, `${today}-session.tmp`);
|
const shortId = getSessionIdShort();
|
||||||
|
// Include session ID in filename for unique per-session tracking
|
||||||
|
const sessionFile = path.join(sessionsDir, `${today}-${shortId}-session.tmp`);
|
||||||
|
|
||||||
ensureDir(sessionsDir);
|
ensureDir(sessionsDir);
|
||||||
|
|
||||||
|
|||||||
@@ -27,7 +27,8 @@ async function main() {
|
|||||||
ensureDir(learnedDir);
|
ensureDir(learnedDir);
|
||||||
|
|
||||||
// Check for recent session files (last 7 days)
|
// Check for recent session files (last 7 days)
|
||||||
const recentSessions = findFiles(sessionsDir, '*.tmp', { maxAge: 7 });
|
// Match both old format (YYYY-MM-DD-session.tmp) and new format (YYYY-MM-DD-shortid-session.tmp)
|
||||||
|
const recentSessions = findFiles(sessionsDir, '*-session.tmp', { maxAge: 7 });
|
||||||
|
|
||||||
if (recentSessions.length > 0) {
|
if (recentSessions.length > 0) {
|
||||||
const latest = recentSessions[0];
|
const latest = recentSessions[0];
|
||||||
|
|||||||
@@ -79,6 +79,19 @@ function getTimeString() {
|
|||||||
return `${hours}:${minutes}`;
|
return `${hours}:${minutes}`;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get short session ID from CLAUDE_SESSION_ID environment variable
|
||||||
|
* Returns the last 8 characters for uniqueness with brevity
|
||||||
|
* @param {string} fallback - Fallback value if no session ID (default: 'default')
|
||||||
|
*/
|
||||||
|
function getSessionIdShort(fallback = 'default') {
|
||||||
|
const sessionId = process.env.CLAUDE_SESSION_ID;
|
||||||
|
if (!sessionId || sessionId.length === 0) {
|
||||||
|
return fallback;
|
||||||
|
}
|
||||||
|
return sessionId.slice(-8);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get current datetime in YYYY-MM-DD HH:MM:SS format
|
* Get current datetime in YYYY-MM-DD HH:MM:SS format
|
||||||
*/
|
*/
|
||||||
@@ -360,6 +373,7 @@ module.exports = {
|
|||||||
getDateString,
|
getDateString,
|
||||||
getTimeString,
|
getTimeString,
|
||||||
getDateTimeString,
|
getDateTimeString,
|
||||||
|
getSessionIdShort,
|
||||||
|
|
||||||
// File operations
|
// File operations
|
||||||
findFiles,
|
findFiles,
|
||||||
|
|||||||
257
skills/continuous-learning-v2/SKILL.md
Normal file
257
skills/continuous-learning-v2/SKILL.md
Normal file
@@ -0,0 +1,257 @@
|
|||||||
|
---
|
||||||
|
name: continuous-learning-v2
|
||||||
|
description: Instinct-based learning system that observes sessions via hooks, creates atomic instincts with confidence scoring, and evolves them into skills/commands/agents.
|
||||||
|
version: 2.0.0
|
||||||
|
---
|
||||||
|
|
||||||
|
# Continuous Learning v2 - Instinct-Based Architecture
|
||||||
|
|
||||||
|
An advanced learning system that turns your Claude Code sessions into reusable knowledge through atomic "instincts" - small learned behaviors with confidence scoring.
|
||||||
|
|
||||||
|
## What's New in v2
|
||||||
|
|
||||||
|
| Feature | v1 | v2 |
|
||||||
|
|---------|----|----|
|
||||||
|
| Observation | Stop hook (session end) | PreToolUse/PostToolUse (100% reliable) |
|
||||||
|
| Analysis | Main context | Background agent (Haiku) |
|
||||||
|
| Granularity | Full skills | Atomic "instincts" |
|
||||||
|
| Confidence | None | 0.3-0.9 weighted |
|
||||||
|
| Evolution | Direct to skill | Instincts → cluster → skill/command/agent |
|
||||||
|
| Sharing | None | Export/import instincts |
|
||||||
|
|
||||||
|
## The Instinct Model
|
||||||
|
|
||||||
|
An instinct is a small learned behavior:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
---
|
||||||
|
id: prefer-functional-style
|
||||||
|
trigger: "when writing new functions"
|
||||||
|
confidence: 0.7
|
||||||
|
domain: "code-style"
|
||||||
|
source: "session-observation"
|
||||||
|
---
|
||||||
|
|
||||||
|
# Prefer Functional Style
|
||||||
|
|
||||||
|
## Action
|
||||||
|
Use functional patterns over classes when appropriate.
|
||||||
|
|
||||||
|
## Evidence
|
||||||
|
- Observed 5 instances of functional pattern preference
|
||||||
|
- User corrected class-based approach to functional on 2025-01-15
|
||||||
|
```
|
||||||
|
|
||||||
|
**Properties:**
|
||||||
|
- **Atomic** — one trigger, one action
|
||||||
|
- **Confidence-weighted** — 0.3 = tentative, 0.9 = near certain
|
||||||
|
- **Domain-tagged** — code-style, testing, git, debugging, workflow, etc.
|
||||||
|
- **Evidence-backed** — tracks what observations created it
|
||||||
|
|
||||||
|
## How It Works
|
||||||
|
|
||||||
|
```
|
||||||
|
Session Activity
|
||||||
|
│
|
||||||
|
│ Hooks capture prompts + tool use (100% reliable)
|
||||||
|
▼
|
||||||
|
┌─────────────────────────────────────────┐
|
||||||
|
│ observations.jsonl │
|
||||||
|
│ (prompts, tool calls, outcomes) │
|
||||||
|
└─────────────────────────────────────────┘
|
||||||
|
│
|
||||||
|
│ Observer agent reads (background, Haiku)
|
||||||
|
▼
|
||||||
|
┌─────────────────────────────────────────┐
|
||||||
|
│ PATTERN DETECTION │
|
||||||
|
│ • User corrections → instinct │
|
||||||
|
│ • Error resolutions → instinct │
|
||||||
|
│ • Repeated workflows → instinct │
|
||||||
|
└─────────────────────────────────────────┘
|
||||||
|
│
|
||||||
|
│ Creates/updates
|
||||||
|
▼
|
||||||
|
┌─────────────────────────────────────────┐
|
||||||
|
│ instincts/personal/ │
|
||||||
|
│ • prefer-functional.md (0.7) │
|
||||||
|
│ • always-test-first.md (0.9) │
|
||||||
|
│ • use-zod-validation.md (0.6) │
|
||||||
|
└─────────────────────────────────────────┘
|
||||||
|
│
|
||||||
|
│ /evolve clusters
|
||||||
|
▼
|
||||||
|
┌─────────────────────────────────────────┐
|
||||||
|
│ evolved/ │
|
||||||
|
│ • commands/new-feature.md │
|
||||||
|
│ • skills/testing-workflow.md │
|
||||||
|
│ • agents/refactor-specialist.md │
|
||||||
|
└─────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
## Quick Start
|
||||||
|
|
||||||
|
### 1. Enable Observation Hooks
|
||||||
|
|
||||||
|
Add to your `~/.claude/settings.json`:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"hooks": {
|
||||||
|
"PreToolUse": [{
|
||||||
|
"matcher": "*",
|
||||||
|
"hooks": [{
|
||||||
|
"type": "command",
|
||||||
|
"command": "~/.claude/skills/continuous-learning-v2/hooks/observe.sh pre"
|
||||||
|
}]
|
||||||
|
}],
|
||||||
|
"PostToolUse": [{
|
||||||
|
"matcher": "*",
|
||||||
|
"hooks": [{
|
||||||
|
"type": "command",
|
||||||
|
"command": "~/.claude/skills/continuous-learning-v2/hooks/observe.sh post"
|
||||||
|
}]
|
||||||
|
}]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### 2. Initialize Directory Structure
|
||||||
|
|
||||||
|
```bash
|
||||||
|
mkdir -p ~/.claude/homunculus/{instincts/{personal,inherited},evolved/{agents,skills,commands}}
|
||||||
|
touch ~/.claude/homunculus/observations.jsonl
|
||||||
|
```
|
||||||
|
|
||||||
|
### 3. Run the Observer Agent (Optional)
|
||||||
|
|
||||||
|
The observer can run in the background analyzing observations:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Start background observer
|
||||||
|
~/.claude/skills/continuous-learning-v2/agents/start-observer.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
## Commands
|
||||||
|
|
||||||
|
| Command | Description |
|
||||||
|
|---------|-------------|
|
||||||
|
| `/instinct-status` | Show all learned instincts with confidence |
|
||||||
|
| `/evolve` | Cluster related instincts into skills/commands |
|
||||||
|
| `/instinct-export` | Export instincts for sharing |
|
||||||
|
| `/instinct-import <file>` | Import instincts from others |
|
||||||
|
|
||||||
|
## Configuration
|
||||||
|
|
||||||
|
Edit `config.json`:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"version": "2.0",
|
||||||
|
"observation": {
|
||||||
|
"enabled": true,
|
||||||
|
"store_path": "~/.claude/homunculus/observations.jsonl",
|
||||||
|
"max_file_size_mb": 10,
|
||||||
|
"archive_after_days": 7
|
||||||
|
},
|
||||||
|
"instincts": {
|
||||||
|
"personal_path": "~/.claude/homunculus/instincts/personal/",
|
||||||
|
"inherited_path": "~/.claude/homunculus/instincts/inherited/",
|
||||||
|
"min_confidence": 0.3,
|
||||||
|
"auto_approve_threshold": 0.7,
|
||||||
|
"confidence_decay_rate": 0.05
|
||||||
|
},
|
||||||
|
"observer": {
|
||||||
|
"enabled": true,
|
||||||
|
"model": "haiku",
|
||||||
|
"run_interval_minutes": 5,
|
||||||
|
"patterns_to_detect": [
|
||||||
|
"user_corrections",
|
||||||
|
"error_resolutions",
|
||||||
|
"repeated_workflows",
|
||||||
|
"tool_preferences"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"evolution": {
|
||||||
|
"cluster_threshold": 3,
|
||||||
|
"evolved_path": "~/.claude/homunculus/evolved/"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## File Structure
|
||||||
|
|
||||||
|
```
|
||||||
|
~/.claude/homunculus/
|
||||||
|
├── identity.json # Your profile, technical level
|
||||||
|
├── observations.jsonl # Current session observations
|
||||||
|
├── observations.archive/ # Processed observations
|
||||||
|
├── instincts/
|
||||||
|
│ ├── personal/ # Auto-learned instincts
|
||||||
|
│ └── inherited/ # Imported from others
|
||||||
|
└── evolved/
|
||||||
|
├── agents/ # Generated specialist agents
|
||||||
|
├── skills/ # Generated skills
|
||||||
|
└── commands/ # Generated commands
|
||||||
|
```
|
||||||
|
|
||||||
|
## Integration with Skill Creator
|
||||||
|
|
||||||
|
When you use the [Skill Creator GitHub App](https://skill-creator.app), it now generates **both**:
|
||||||
|
- Traditional SKILL.md files (for backward compatibility)
|
||||||
|
- Instinct collections (for v2 learning system)
|
||||||
|
|
||||||
|
Instincts from repo analysis have `source: "repo-analysis"` and include the source repository URL.
|
||||||
|
|
||||||
|
## Confidence Scoring
|
||||||
|
|
||||||
|
Confidence evolves over time:
|
||||||
|
|
||||||
|
| Score | Meaning | Behavior |
|
||||||
|
|-------|---------|----------|
|
||||||
|
| 0.3 | Tentative | Suggested but not enforced |
|
||||||
|
| 0.5 | Moderate | Applied when relevant |
|
||||||
|
| 0.7 | Strong | Auto-approved for application |
|
||||||
|
| 0.9 | Near-certain | Core behavior |
|
||||||
|
|
||||||
|
**Confidence increases** when:
|
||||||
|
- Pattern is repeatedly observed
|
||||||
|
- User doesn't correct the suggested behavior
|
||||||
|
- Similar instincts from other sources agree
|
||||||
|
|
||||||
|
**Confidence decreases** when:
|
||||||
|
- User explicitly corrects the behavior
|
||||||
|
- Pattern isn't observed for extended periods
|
||||||
|
- Contradicting evidence appears
|
||||||
|
|
||||||
|
## Why Hooks vs Skills for Observation?
|
||||||
|
|
||||||
|
> "v1 relied on skills to observe. Skills are probabilistic—they fire ~50-80% of the time based on Claude's judgment."
|
||||||
|
|
||||||
|
Hooks fire **100% of the time**, deterministically. This means:
|
||||||
|
- Every tool call is observed
|
||||||
|
- No patterns are missed
|
||||||
|
- Learning is comprehensive
|
||||||
|
|
||||||
|
## Backward Compatibility
|
||||||
|
|
||||||
|
v2 is fully compatible with v1:
|
||||||
|
- Existing `~/.claude/skills/learned/` skills still work
|
||||||
|
- Stop hook still runs (but now also feeds into v2)
|
||||||
|
- Gradual migration path: run both in parallel
|
||||||
|
|
||||||
|
## Privacy
|
||||||
|
|
||||||
|
- Observations stay **local** on your machine
|
||||||
|
- Only **instincts** (patterns) can be exported
|
||||||
|
- No actual code or conversation content is shared
|
||||||
|
- You control what gets exported
|
||||||
|
|
||||||
|
## Related
|
||||||
|
|
||||||
|
- [Skill Creator](https://skill-creator.app) - Generate instincts from repo history
|
||||||
|
- [Homunculus](https://github.com/humanplane/homunculus) - Inspiration for v2 architecture
|
||||||
|
- [The Longform Guide](https://x.com/affaanmustafa/status/2014040193557471352) - Continuous learning section
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
*Instinct-based learning: teaching Claude your patterns, one observation at a time.*
|
||||||
137
skills/continuous-learning-v2/agents/observer.md
Normal file
137
skills/continuous-learning-v2/agents/observer.md
Normal file
@@ -0,0 +1,137 @@
|
|||||||
|
---
|
||||||
|
name: observer
|
||||||
|
description: Background agent that analyzes session observations to detect patterns and create instincts. Uses Haiku for cost-efficiency.
|
||||||
|
model: haiku
|
||||||
|
run_mode: background
|
||||||
|
---
|
||||||
|
|
||||||
|
# Observer Agent
|
||||||
|
|
||||||
|
A background agent that analyzes observations from Claude Code sessions to detect patterns and create instincts.
|
||||||
|
|
||||||
|
## When to Run
|
||||||
|
|
||||||
|
- After significant session activity (20+ tool calls)
|
||||||
|
- When user runs `/analyze-patterns`
|
||||||
|
- On a scheduled interval (configurable, default 5 minutes)
|
||||||
|
- When triggered by observation hook (SIGUSR1)
|
||||||
|
|
||||||
|
## Input
|
||||||
|
|
||||||
|
Reads observations from `~/.claude/homunculus/observations.jsonl`:
|
||||||
|
|
||||||
|
```jsonl
|
||||||
|
{"timestamp":"2025-01-22T10:30:00Z","event":"tool_start","session":"abc123","tool":"Edit","input":"..."}
|
||||||
|
{"timestamp":"2025-01-22T10:30:01Z","event":"tool_complete","session":"abc123","tool":"Edit","output":"..."}
|
||||||
|
{"timestamp":"2025-01-22T10:30:05Z","event":"tool_start","session":"abc123","tool":"Bash","input":"npm test"}
|
||||||
|
{"timestamp":"2025-01-22T10:30:10Z","event":"tool_complete","session":"abc123","tool":"Bash","output":"All tests pass"}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Pattern Detection
|
||||||
|
|
||||||
|
Look for these patterns in observations:
|
||||||
|
|
||||||
|
### 1. User Corrections
|
||||||
|
When a user's follow-up message corrects Claude's previous action:
|
||||||
|
- "No, use X instead of Y"
|
||||||
|
- "Actually, I meant..."
|
||||||
|
- Immediate undo/redo patterns
|
||||||
|
|
||||||
|
→ Create instinct: "When doing X, prefer Y"
|
||||||
|
|
||||||
|
### 2. Error Resolutions
|
||||||
|
When an error is followed by a fix:
|
||||||
|
- Tool output contains error
|
||||||
|
- Next few tool calls fix it
|
||||||
|
- Same error type resolved similarly multiple times
|
||||||
|
|
||||||
|
→ Create instinct: "When encountering error X, try Y"
|
||||||
|
|
||||||
|
### 3. Repeated Workflows
|
||||||
|
When the same sequence of tools is used multiple times:
|
||||||
|
- Same tool sequence with similar inputs
|
||||||
|
- File patterns that change together
|
||||||
|
- Time-clustered operations
|
||||||
|
|
||||||
|
→ Create workflow instinct: "When doing X, follow steps Y, Z, W"
|
||||||
|
|
||||||
|
### 4. Tool Preferences
|
||||||
|
When certain tools are consistently preferred:
|
||||||
|
- Always uses Grep before Edit
|
||||||
|
- Prefers Read over Bash cat
|
||||||
|
- Uses specific Bash commands for certain tasks
|
||||||
|
|
||||||
|
→ Create instinct: "When needing X, use tool Y"
|
||||||
|
|
||||||
|
## Output
|
||||||
|
|
||||||
|
Creates/updates instincts in `~/.claude/homunculus/instincts/personal/`:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
---
|
||||||
|
id: prefer-grep-before-edit
|
||||||
|
trigger: "when searching for code to modify"
|
||||||
|
confidence: 0.65
|
||||||
|
domain: "workflow"
|
||||||
|
source: "session-observation"
|
||||||
|
---
|
||||||
|
|
||||||
|
# Prefer Grep Before Edit
|
||||||
|
|
||||||
|
## Action
|
||||||
|
Always use Grep to find the exact location before using Edit.
|
||||||
|
|
||||||
|
## Evidence
|
||||||
|
- Observed 8 times in session abc123
|
||||||
|
- Pattern: Grep → Read → Edit sequence
|
||||||
|
- Last observed: 2025-01-22
|
||||||
|
```
|
||||||
|
|
||||||
|
## Confidence Calculation
|
||||||
|
|
||||||
|
Initial confidence based on observation frequency:
|
||||||
|
- 1-2 observations: 0.3 (tentative)
|
||||||
|
- 3-5 observations: 0.5 (moderate)
|
||||||
|
- 6-10 observations: 0.7 (strong)
|
||||||
|
- 11+ observations: 0.85 (very strong)
|
||||||
|
|
||||||
|
Confidence adjusts over time:
|
||||||
|
- +0.05 for each confirming observation
|
||||||
|
- -0.1 for each contradicting observation
|
||||||
|
- -0.02 per week without observation (decay)
|
||||||
|
|
||||||
|
## Important Guidelines
|
||||||
|
|
||||||
|
1. **Be Conservative**: Only create instincts for clear patterns (3+ observations)
|
||||||
|
2. **Be Specific**: Narrow triggers are better than broad ones
|
||||||
|
3. **Track Evidence**: Always include what observations led to the instinct
|
||||||
|
4. **Respect Privacy**: Never include actual code snippets, only patterns
|
||||||
|
5. **Merge Similar**: If a new instinct is similar to existing, update rather than duplicate
|
||||||
|
|
||||||
|
## Example Analysis Session
|
||||||
|
|
||||||
|
Given observations:
|
||||||
|
```jsonl
|
||||||
|
{"event":"tool_start","tool":"Grep","input":"pattern: useState"}
|
||||||
|
{"event":"tool_complete","tool":"Grep","output":"Found in 3 files"}
|
||||||
|
{"event":"tool_start","tool":"Read","input":"src/hooks/useAuth.ts"}
|
||||||
|
{"event":"tool_complete","tool":"Read","output":"[file content]"}
|
||||||
|
{"event":"tool_start","tool":"Edit","input":"src/hooks/useAuth.ts..."}
|
||||||
|
```
|
||||||
|
|
||||||
|
Analysis:
|
||||||
|
- Detected workflow: Grep → Read → Edit
|
||||||
|
- Frequency: Seen 5 times this session
|
||||||
|
- Create instinct:
|
||||||
|
- trigger: "when modifying code"
|
||||||
|
- action: "Search with Grep, confirm with Read, then Edit"
|
||||||
|
- confidence: 0.6
|
||||||
|
- domain: "workflow"
|
||||||
|
|
||||||
|
## Integration with Skill Creator
|
||||||
|
|
||||||
|
When instincts are imported from Skill Creator (repo analysis), they have:
|
||||||
|
- `source: "repo-analysis"`
|
||||||
|
- `source_repo: "https://github.com/..."`
|
||||||
|
|
||||||
|
These should be treated as team/project conventions with higher initial confidence (0.7+).
|
||||||
134
skills/continuous-learning-v2/agents/start-observer.sh
Executable file
134
skills/continuous-learning-v2/agents/start-observer.sh
Executable file
@@ -0,0 +1,134 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
# Continuous Learning v2 - Observer Agent Launcher
|
||||||
|
#
|
||||||
|
# Starts the background observer agent that analyzes observations
|
||||||
|
# and creates instincts. Uses Haiku model for cost efficiency.
|
||||||
|
#
|
||||||
|
# Usage:
|
||||||
|
# start-observer.sh # Start observer in background
|
||||||
|
# start-observer.sh stop # Stop running observer
|
||||||
|
# start-observer.sh status # Check if observer is running
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
CONFIG_DIR="${HOME}/.claude/homunculus"
|
||||||
|
PID_FILE="${CONFIG_DIR}/.observer.pid"
|
||||||
|
LOG_FILE="${CONFIG_DIR}/observer.log"
|
||||||
|
OBSERVATIONS_FILE="${CONFIG_DIR}/observations.jsonl"
|
||||||
|
|
||||||
|
mkdir -p "$CONFIG_DIR"
|
||||||
|
|
||||||
|
case "${1:-start}" in
|
||||||
|
stop)
|
||||||
|
if [ -f "$PID_FILE" ]; then
|
||||||
|
pid=$(cat "$PID_FILE")
|
||||||
|
if kill -0 "$pid" 2>/dev/null; then
|
||||||
|
echo "Stopping observer (PID: $pid)..."
|
||||||
|
kill "$pid"
|
||||||
|
rm -f "$PID_FILE"
|
||||||
|
echo "Observer stopped."
|
||||||
|
else
|
||||||
|
echo "Observer not running (stale PID file)."
|
||||||
|
rm -f "$PID_FILE"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo "Observer not running."
|
||||||
|
fi
|
||||||
|
exit 0
|
||||||
|
;;
|
||||||
|
|
||||||
|
status)
|
||||||
|
if [ -f "$PID_FILE" ]; then
|
||||||
|
pid=$(cat "$PID_FILE")
|
||||||
|
if kill -0 "$pid" 2>/dev/null; then
|
||||||
|
echo "Observer is running (PID: $pid)"
|
||||||
|
echo "Log: $LOG_FILE"
|
||||||
|
echo "Observations: $(wc -l < "$OBSERVATIONS_FILE" 2>/dev/null || echo 0) lines"
|
||||||
|
exit 0
|
||||||
|
else
|
||||||
|
echo "Observer not running (stale PID file)"
|
||||||
|
rm -f "$PID_FILE"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo "Observer not running"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
;;
|
||||||
|
|
||||||
|
start)
|
||||||
|
# Check if already running
|
||||||
|
if [ -f "$PID_FILE" ]; then
|
||||||
|
pid=$(cat "$PID_FILE")
|
||||||
|
if kill -0 "$pid" 2>/dev/null; then
|
||||||
|
echo "Observer already running (PID: $pid)"
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
rm -f "$PID_FILE"
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "Starting observer agent..."
|
||||||
|
|
||||||
|
# The observer loop
|
||||||
|
(
|
||||||
|
trap 'rm -f "$PID_FILE"; exit 0' TERM INT
|
||||||
|
|
||||||
|
analyze_observations() {
|
||||||
|
# Only analyze if we have enough observations
|
||||||
|
obs_count=$(wc -l < "$OBSERVATIONS_FILE" 2>/dev/null || echo 0)
|
||||||
|
if [ "$obs_count" -lt 10 ]; then
|
||||||
|
return
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "[$(date)] Analyzing $obs_count observations..." >> "$LOG_FILE"
|
||||||
|
|
||||||
|
# Use Claude Code with Haiku to analyze observations
|
||||||
|
# This spawns a quick analysis session
|
||||||
|
if command -v claude &> /dev/null; then
|
||||||
|
claude --model haiku --max-turns 3 --print \
|
||||||
|
"Read $OBSERVATIONS_FILE and identify patterns. If you find 3+ occurrences of the same pattern, create an instinct file in $CONFIG_DIR/instincts/personal/ following the format in the observer agent spec. Be conservative - only create instincts for clear patterns." \
|
||||||
|
>> "$LOG_FILE" 2>&1 || true
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Archive processed observations
|
||||||
|
if [ -f "$OBSERVATIONS_FILE" ]; then
|
||||||
|
archive_dir="${CONFIG_DIR}/observations.archive"
|
||||||
|
mkdir -p "$archive_dir"
|
||||||
|
mv "$OBSERVATIONS_FILE" "$archive_dir/processed-$(date +%Y%m%d-%H%M%S).jsonl"
|
||||||
|
touch "$OBSERVATIONS_FILE"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Handle SIGUSR1 for on-demand analysis
|
||||||
|
trap 'analyze_observations' USR1
|
||||||
|
|
||||||
|
echo "$$" > "$PID_FILE"
|
||||||
|
echo "[$(date)] Observer started (PID: $$)" >> "$LOG_FILE"
|
||||||
|
|
||||||
|
while true; do
|
||||||
|
# Check every 5 minutes
|
||||||
|
sleep 300
|
||||||
|
|
||||||
|
analyze_observations
|
||||||
|
done
|
||||||
|
) &
|
||||||
|
|
||||||
|
disown
|
||||||
|
|
||||||
|
# Wait a moment for PID file
|
||||||
|
sleep 1
|
||||||
|
|
||||||
|
if [ -f "$PID_FILE" ]; then
|
||||||
|
echo "Observer started (PID: $(cat "$PID_FILE"))"
|
||||||
|
echo "Log: $LOG_FILE"
|
||||||
|
else
|
||||||
|
echo "Failed to start observer"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
;;
|
||||||
|
|
||||||
|
*)
|
||||||
|
echo "Usage: $0 {start|stop|status}"
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
186
skills/continuous-learning-v2/commands/evolve.md
Normal file
186
skills/continuous-learning-v2/commands/evolve.md
Normal file
@@ -0,0 +1,186 @@
|
|||||||
|
---
|
||||||
|
name: evolve
|
||||||
|
description: Cluster related instincts into skills, commands, or agents
|
||||||
|
command: /evolve
|
||||||
|
implementation: python3 ~/.claude/skills/continuous-learning-v2/scripts/instinct-cli.py evolve
|
||||||
|
---
|
||||||
|
|
||||||
|
# Evolve Command
|
||||||
|
|
||||||
|
## Implementation
|
||||||
|
|
||||||
|
```bash
|
||||||
|
python3 ~/.claude/skills/continuous-learning-v2/scripts/instinct-cli.py evolve [--generate]
|
||||||
|
```
|
||||||
|
|
||||||
|
Analyzes instincts and clusters related ones into higher-level structures:
|
||||||
|
- **Commands**: When instincts describe user-invoked actions
|
||||||
|
- **Skills**: When instincts describe auto-triggered behaviors
|
||||||
|
- **Agents**: When instincts describe complex, multi-step processes
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
```
|
||||||
|
/evolve # Analyze all instincts and suggest evolutions
|
||||||
|
/evolve --domain testing # Only evolve instincts in testing domain
|
||||||
|
/evolve --dry-run # Show what would be created without creating
|
||||||
|
/evolve --threshold 5 # Require 5+ related instincts to cluster
|
||||||
|
```
|
||||||
|
|
||||||
|
## Evolution Rules
|
||||||
|
|
||||||
|
### → Command (User-Invoked)
|
||||||
|
When instincts describe actions a user would explicitly request:
|
||||||
|
- Multiple instincts about "when user asks to..."
|
||||||
|
- Instincts with triggers like "when creating a new X"
|
||||||
|
- Instincts that follow a repeatable sequence
|
||||||
|
|
||||||
|
Example:
|
||||||
|
- `new-table-step1`: "when adding a database table, create migration"
|
||||||
|
- `new-table-step2`: "when adding a database table, update schema"
|
||||||
|
- `new-table-step3`: "when adding a database table, regenerate types"
|
||||||
|
|
||||||
|
→ Creates: `/new-table` command
|
||||||
|
|
||||||
|
### → Skill (Auto-Triggered)
|
||||||
|
When instincts describe behaviors that should happen automatically:
|
||||||
|
- Pattern-matching triggers
|
||||||
|
- Error handling responses
|
||||||
|
- Code style enforcement
|
||||||
|
|
||||||
|
Example:
|
||||||
|
- `prefer-functional`: "when writing functions, prefer functional style"
|
||||||
|
- `use-immutable`: "when modifying state, use immutable patterns"
|
||||||
|
- `avoid-classes`: "when designing modules, avoid class-based design"
|
||||||
|
|
||||||
|
→ Creates: `functional-patterns` skill
|
||||||
|
|
||||||
|
### → Agent (Needs Depth/Isolation)
|
||||||
|
When instincts describe complex, multi-step processes that benefit from isolation:
|
||||||
|
- Debugging workflows
|
||||||
|
- Refactoring sequences
|
||||||
|
- Research tasks
|
||||||
|
|
||||||
|
Example:
|
||||||
|
- `debug-step1`: "when debugging, first check logs"
|
||||||
|
- `debug-step2`: "when debugging, isolate the failing component"
|
||||||
|
- `debug-step3`: "when debugging, create minimal reproduction"
|
||||||
|
- `debug-step4`: "when debugging, verify fix with test"
|
||||||
|
|
||||||
|
→ Creates: `debugger` agent
|
||||||
|
|
||||||
|
## What to Do
|
||||||
|
|
||||||
|
1. Read all instincts from `~/.claude/homunculus/instincts/`
|
||||||
|
2. Group instincts by:
|
||||||
|
- Domain similarity
|
||||||
|
- Trigger pattern overlap
|
||||||
|
- Action sequence relationship
|
||||||
|
3. For each cluster of 3+ related instincts:
|
||||||
|
- Determine evolution type (command/skill/agent)
|
||||||
|
- Generate the appropriate file
|
||||||
|
- Save to `~/.claude/homunculus/evolved/{commands,skills,agents}/`
|
||||||
|
4. Link evolved structure back to source instincts
|
||||||
|
|
||||||
|
## Output Format
|
||||||
|
|
||||||
|
```
|
||||||
|
🧬 Evolve Analysis
|
||||||
|
==================
|
||||||
|
|
||||||
|
Found 3 clusters ready for evolution:
|
||||||
|
|
||||||
|
## Cluster 1: Database Migration Workflow
|
||||||
|
Instincts: new-table-migration, update-schema, regenerate-types
|
||||||
|
Type: Command
|
||||||
|
Confidence: 85% (based on 12 observations)
|
||||||
|
|
||||||
|
Would create: /new-table command
|
||||||
|
Files:
|
||||||
|
- ~/.claude/homunculus/evolved/commands/new-table.md
|
||||||
|
|
||||||
|
## Cluster 2: Functional Code Style
|
||||||
|
Instincts: prefer-functional, use-immutable, avoid-classes, pure-functions
|
||||||
|
Type: Skill
|
||||||
|
Confidence: 78% (based on 8 observations)
|
||||||
|
|
||||||
|
Would create: functional-patterns skill
|
||||||
|
Files:
|
||||||
|
- ~/.claude/homunculus/evolved/skills/functional-patterns.md
|
||||||
|
|
||||||
|
## Cluster 3: Debugging Process
|
||||||
|
Instincts: debug-check-logs, debug-isolate, debug-reproduce, debug-verify
|
||||||
|
Type: Agent
|
||||||
|
Confidence: 72% (based on 6 observations)
|
||||||
|
|
||||||
|
Would create: debugger agent
|
||||||
|
Files:
|
||||||
|
- ~/.claude/homunculus/evolved/agents/debugger.md
|
||||||
|
|
||||||
|
---
|
||||||
|
Run `/evolve --execute` to create these files.
|
||||||
|
```
|
||||||
|
|
||||||
|
## Flags
|
||||||
|
|
||||||
|
- `--execute`: Actually create the evolved structures (default is preview)
|
||||||
|
- `--dry-run`: Preview without creating
|
||||||
|
- `--domain <name>`: Only evolve instincts in specified domain
|
||||||
|
- `--threshold <n>`: Minimum instincts required to form cluster (default: 3)
|
||||||
|
- `--type <command|skill|agent>`: Only create specified type
|
||||||
|
|
||||||
|
## Generated File Format
|
||||||
|
|
||||||
|
### Command
|
||||||
|
```markdown
|
||||||
|
---
|
||||||
|
name: new-table
|
||||||
|
description: Create a new database table with migration, schema update, and type generation
|
||||||
|
command: /new-table
|
||||||
|
evolved_from:
|
||||||
|
- new-table-migration
|
||||||
|
- update-schema
|
||||||
|
- regenerate-types
|
||||||
|
---
|
||||||
|
|
||||||
|
# New Table Command
|
||||||
|
|
||||||
|
[Generated content based on clustered instincts]
|
||||||
|
|
||||||
|
## Steps
|
||||||
|
1. ...
|
||||||
|
2. ...
|
||||||
|
```
|
||||||
|
|
||||||
|
### Skill
|
||||||
|
```markdown
|
||||||
|
---
|
||||||
|
name: functional-patterns
|
||||||
|
description: Enforce functional programming patterns
|
||||||
|
evolved_from:
|
||||||
|
- prefer-functional
|
||||||
|
- use-immutable
|
||||||
|
- avoid-classes
|
||||||
|
---
|
||||||
|
|
||||||
|
# Functional Patterns Skill
|
||||||
|
|
||||||
|
[Generated content based on clustered instincts]
|
||||||
|
```
|
||||||
|
|
||||||
|
### Agent
|
||||||
|
```markdown
|
||||||
|
---
|
||||||
|
name: debugger
|
||||||
|
description: Systematic debugging agent
|
||||||
|
model: sonnet
|
||||||
|
evolved_from:
|
||||||
|
- debug-check-logs
|
||||||
|
- debug-isolate
|
||||||
|
- debug-reproduce
|
||||||
|
---
|
||||||
|
|
||||||
|
# Debugger Agent
|
||||||
|
|
||||||
|
[Generated content based on clustered instincts]
|
||||||
|
```
|
||||||
91
skills/continuous-learning-v2/commands/instinct-export.md
Normal file
91
skills/continuous-learning-v2/commands/instinct-export.md
Normal file
@@ -0,0 +1,91 @@
|
|||||||
|
---
|
||||||
|
name: instinct-export
|
||||||
|
description: Export instincts for sharing with teammates or other projects
|
||||||
|
command: /instinct-export
|
||||||
|
---
|
||||||
|
|
||||||
|
# Instinct Export Command
|
||||||
|
|
||||||
|
Exports instincts to a shareable format. Perfect for:
|
||||||
|
- Sharing with teammates
|
||||||
|
- Transferring to a new machine
|
||||||
|
- Contributing to project conventions
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
```
|
||||||
|
/instinct-export # Export all personal instincts
|
||||||
|
/instinct-export --domain testing # Export only testing instincts
|
||||||
|
/instinct-export --min-confidence 0.7 # Only export high-confidence instincts
|
||||||
|
/instinct-export --output team-instincts.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
## What to Do
|
||||||
|
|
||||||
|
1. Read instincts from `~/.claude/homunculus/instincts/personal/`
|
||||||
|
2. Filter based on flags
|
||||||
|
3. Strip sensitive information:
|
||||||
|
- Remove session IDs
|
||||||
|
- Remove file paths (keep only patterns)
|
||||||
|
- Remove timestamps older than "last week"
|
||||||
|
4. Generate export file
|
||||||
|
|
||||||
|
## Output Format
|
||||||
|
|
||||||
|
Creates a YAML file:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
# Instincts Export
|
||||||
|
# Generated: 2025-01-22
|
||||||
|
# Source: personal
|
||||||
|
# Count: 12 instincts
|
||||||
|
|
||||||
|
version: "2.0"
|
||||||
|
exported_by: "continuous-learning-v2"
|
||||||
|
export_date: "2025-01-22T10:30:00Z"
|
||||||
|
|
||||||
|
instincts:
|
||||||
|
- id: prefer-functional-style
|
||||||
|
trigger: "when writing new functions"
|
||||||
|
action: "Use functional patterns over classes"
|
||||||
|
confidence: 0.8
|
||||||
|
domain: code-style
|
||||||
|
observations: 8
|
||||||
|
|
||||||
|
- id: test-first-workflow
|
||||||
|
trigger: "when adding new functionality"
|
||||||
|
action: "Write test first, then implementation"
|
||||||
|
confidence: 0.9
|
||||||
|
domain: testing
|
||||||
|
observations: 12
|
||||||
|
|
||||||
|
- id: grep-before-edit
|
||||||
|
trigger: "when modifying code"
|
||||||
|
action: "Search with Grep, confirm with Read, then Edit"
|
||||||
|
confidence: 0.7
|
||||||
|
domain: workflow
|
||||||
|
observations: 6
|
||||||
|
```
|
||||||
|
|
||||||
|
## Privacy Considerations
|
||||||
|
|
||||||
|
Exports include:
|
||||||
|
- ✅ Trigger patterns
|
||||||
|
- ✅ Actions
|
||||||
|
- ✅ Confidence scores
|
||||||
|
- ✅ Domains
|
||||||
|
- ✅ Observation counts
|
||||||
|
|
||||||
|
Exports do NOT include:
|
||||||
|
- ❌ Actual code snippets
|
||||||
|
- ❌ File paths
|
||||||
|
- ❌ Session transcripts
|
||||||
|
- ❌ Personal identifiers
|
||||||
|
|
||||||
|
## Flags
|
||||||
|
|
||||||
|
- `--domain <name>`: Export only specified domain
|
||||||
|
- `--min-confidence <n>`: Minimum confidence threshold (default: 0.3)
|
||||||
|
- `--output <file>`: Output file path (default: instincts-export-YYYYMMDD.yaml)
|
||||||
|
- `--format <yaml|json|md>`: Output format (default: yaml)
|
||||||
|
- `--include-evidence`: Include evidence text (default: excluded)
|
||||||
135
skills/continuous-learning-v2/commands/instinct-import.md
Normal file
135
skills/continuous-learning-v2/commands/instinct-import.md
Normal file
@@ -0,0 +1,135 @@
|
|||||||
|
---
|
||||||
|
name: instinct-import
|
||||||
|
description: Import instincts from teammates, Skill Creator, or other sources
|
||||||
|
command: /instinct-import
|
||||||
|
implementation: python3 ~/.claude/skills/continuous-learning-v2/scripts/instinct-cli.py import <file>
|
||||||
|
---
|
||||||
|
|
||||||
|
# Instinct Import Command
|
||||||
|
|
||||||
|
## Implementation
|
||||||
|
|
||||||
|
```bash
|
||||||
|
python3 ~/.claude/skills/continuous-learning-v2/scripts/instinct-cli.py import <file-or-url> [--dry-run] [--force] [--min-confidence 0.7]
|
||||||
|
```
|
||||||
|
|
||||||
|
Import instincts from:
|
||||||
|
- Teammates' exports
|
||||||
|
- Skill Creator (repo analysis)
|
||||||
|
- Community collections
|
||||||
|
- Previous machine backups
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
```
|
||||||
|
/instinct-import team-instincts.yaml
|
||||||
|
/instinct-import https://github.com/org/repo/instincts.yaml
|
||||||
|
/instinct-import --from-skill-creator acme/webapp
|
||||||
|
```
|
||||||
|
|
||||||
|
## What to Do
|
||||||
|
|
||||||
|
1. Fetch the instinct file (local path or URL)
|
||||||
|
2. Parse and validate the format
|
||||||
|
3. Check for duplicates with existing instincts
|
||||||
|
4. Merge or add new instincts
|
||||||
|
5. Save to `~/.claude/homunculus/instincts/inherited/`
|
||||||
|
|
||||||
|
## Import Process
|
||||||
|
|
||||||
|
```
|
||||||
|
📥 Importing instincts from: team-instincts.yaml
|
||||||
|
================================================
|
||||||
|
|
||||||
|
Found 12 instincts to import.
|
||||||
|
|
||||||
|
Analyzing conflicts...
|
||||||
|
|
||||||
|
## New Instincts (8)
|
||||||
|
These will be added:
|
||||||
|
✓ use-zod-validation (confidence: 0.7)
|
||||||
|
✓ prefer-named-exports (confidence: 0.65)
|
||||||
|
✓ test-async-functions (confidence: 0.8)
|
||||||
|
...
|
||||||
|
|
||||||
|
## Duplicate Instincts (3)
|
||||||
|
Already have similar instincts:
|
||||||
|
⚠️ prefer-functional-style
|
||||||
|
Local: 0.8 confidence, 12 observations
|
||||||
|
Import: 0.7 confidence
|
||||||
|
→ Keep local (higher confidence)
|
||||||
|
|
||||||
|
⚠️ test-first-workflow
|
||||||
|
Local: 0.75 confidence
|
||||||
|
Import: 0.9 confidence
|
||||||
|
→ Update to import (higher confidence)
|
||||||
|
|
||||||
|
## Conflicting Instincts (1)
|
||||||
|
These contradict local instincts:
|
||||||
|
❌ use-classes-for-services
|
||||||
|
Conflicts with: avoid-classes
|
||||||
|
→ Skip (requires manual resolution)
|
||||||
|
|
||||||
|
---
|
||||||
|
Import 8 new, update 1, skip 3?
|
||||||
|
```
|
||||||
|
|
||||||
|
## Merge Strategies
|
||||||
|
|
||||||
|
### For Duplicates
|
||||||
|
When importing an instinct that matches an existing one:
|
||||||
|
- **Higher confidence wins**: Keep the one with higher confidence
|
||||||
|
- **Merge evidence**: Combine observation counts
|
||||||
|
- **Update timestamp**: Mark as recently validated
|
||||||
|
|
||||||
|
### For Conflicts
|
||||||
|
When importing an instinct that contradicts an existing one:
|
||||||
|
- **Skip by default**: Don't import conflicting instincts
|
||||||
|
- **Flag for review**: Mark both as needing attention
|
||||||
|
- **Manual resolution**: User decides which to keep
|
||||||
|
|
||||||
|
## Source Tracking
|
||||||
|
|
||||||
|
Imported instincts are marked with:
|
||||||
|
```yaml
|
||||||
|
source: "inherited"
|
||||||
|
imported_from: "team-instincts.yaml"
|
||||||
|
imported_at: "2025-01-22T10:30:00Z"
|
||||||
|
original_source: "session-observation" # or "repo-analysis"
|
||||||
|
```
|
||||||
|
|
||||||
|
## Skill Creator Integration
|
||||||
|
|
||||||
|
When importing from Skill Creator:
|
||||||
|
|
||||||
|
```
|
||||||
|
/instinct-import --from-skill-creator acme/webapp
|
||||||
|
```
|
||||||
|
|
||||||
|
This fetches instincts generated from repo analysis:
|
||||||
|
- Source: `repo-analysis`
|
||||||
|
- Higher initial confidence (0.7+)
|
||||||
|
- Linked to source repository
|
||||||
|
|
||||||
|
## Flags
|
||||||
|
|
||||||
|
- `--dry-run`: Preview without importing
|
||||||
|
- `--force`: Import even if conflicts exist
|
||||||
|
- `--merge-strategy <higher|local|import>`: How to handle duplicates
|
||||||
|
- `--from-skill-creator <owner/repo>`: Import from Skill Creator analysis
|
||||||
|
- `--min-confidence <n>`: Only import instincts above threshold
|
||||||
|
|
||||||
|
## Output
|
||||||
|
|
||||||
|
After import:
|
||||||
|
```
|
||||||
|
✅ Import complete!
|
||||||
|
|
||||||
|
Added: 8 instincts
|
||||||
|
Updated: 1 instinct
|
||||||
|
Skipped: 3 instincts (2 duplicates, 1 conflict)
|
||||||
|
|
||||||
|
New instincts saved to: ~/.claude/homunculus/instincts/inherited/
|
||||||
|
|
||||||
|
Run /instinct-status to see all instincts.
|
||||||
|
```
|
||||||
79
skills/continuous-learning-v2/commands/instinct-status.md
Normal file
79
skills/continuous-learning-v2/commands/instinct-status.md
Normal file
@@ -0,0 +1,79 @@
|
|||||||
|
---
|
||||||
|
name: instinct-status
|
||||||
|
description: Show all learned instincts with their confidence levels
|
||||||
|
command: /instinct-status
|
||||||
|
implementation: python3 ~/.claude/skills/continuous-learning-v2/scripts/instinct-cli.py status
|
||||||
|
---
|
||||||
|
|
||||||
|
# Instinct Status Command
|
||||||
|
|
||||||
|
Shows all learned instincts with their confidence scores, grouped by domain.
|
||||||
|
|
||||||
|
## Implementation
|
||||||
|
|
||||||
|
```bash
|
||||||
|
python3 ~/.claude/skills/continuous-learning-v2/scripts/instinct-cli.py status
|
||||||
|
```
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
```
|
||||||
|
/instinct-status
|
||||||
|
/instinct-status --domain code-style
|
||||||
|
/instinct-status --low-confidence
|
||||||
|
```
|
||||||
|
|
||||||
|
## What to Do
|
||||||
|
|
||||||
|
1. Read all instinct files from `~/.claude/homunculus/instincts/personal/`
|
||||||
|
2. Read inherited instincts from `~/.claude/homunculus/instincts/inherited/`
|
||||||
|
3. Display them grouped by domain with confidence bars
|
||||||
|
|
||||||
|
## Output Format
|
||||||
|
|
||||||
|
```
|
||||||
|
📊 Instinct Status
|
||||||
|
==================
|
||||||
|
|
||||||
|
## Code Style (4 instincts)
|
||||||
|
|
||||||
|
### prefer-functional-style
|
||||||
|
Trigger: when writing new functions
|
||||||
|
Action: Use functional patterns over classes
|
||||||
|
Confidence: ████████░░ 80%
|
||||||
|
Source: session-observation | Last updated: 2025-01-22
|
||||||
|
|
||||||
|
### use-path-aliases
|
||||||
|
Trigger: when importing modules
|
||||||
|
Action: Use @/ path aliases instead of relative imports
|
||||||
|
Confidence: ██████░░░░ 60%
|
||||||
|
Source: repo-analysis (github.com/acme/webapp)
|
||||||
|
|
||||||
|
## Testing (2 instincts)
|
||||||
|
|
||||||
|
### test-first-workflow
|
||||||
|
Trigger: when adding new functionality
|
||||||
|
Action: Write test first, then implementation
|
||||||
|
Confidence: █████████░ 90%
|
||||||
|
Source: session-observation
|
||||||
|
|
||||||
|
## Workflow (3 instincts)
|
||||||
|
|
||||||
|
### grep-before-edit
|
||||||
|
Trigger: when modifying code
|
||||||
|
Action: Search with Grep, confirm with Read, then Edit
|
||||||
|
Confidence: ███████░░░ 70%
|
||||||
|
Source: session-observation
|
||||||
|
|
||||||
|
---
|
||||||
|
Total: 9 instincts (4 personal, 5 inherited)
|
||||||
|
Observer: Running (last analysis: 5 min ago)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Flags
|
||||||
|
|
||||||
|
- `--domain <name>`: Filter by domain (code-style, testing, git, etc.)
|
||||||
|
- `--low-confidence`: Show only instincts with confidence < 0.5
|
||||||
|
- `--high-confidence`: Show only instincts with confidence >= 0.7
|
||||||
|
- `--source <type>`: Filter by source (session-observation, repo-analysis, inherited)
|
||||||
|
- `--json`: Output as JSON for programmatic use
|
||||||
41
skills/continuous-learning-v2/config.json
Normal file
41
skills/continuous-learning-v2/config.json
Normal file
@@ -0,0 +1,41 @@
|
|||||||
|
{
|
||||||
|
"version": "2.0",
|
||||||
|
"observation": {
|
||||||
|
"enabled": true,
|
||||||
|
"store_path": "~/.claude/homunculus/observations.jsonl",
|
||||||
|
"max_file_size_mb": 10,
|
||||||
|
"archive_after_days": 7,
|
||||||
|
"capture_tools": ["Edit", "Write", "Bash", "Read", "Grep", "Glob"],
|
||||||
|
"ignore_tools": ["TodoWrite"]
|
||||||
|
},
|
||||||
|
"instincts": {
|
||||||
|
"personal_path": "~/.claude/homunculus/instincts/personal/",
|
||||||
|
"inherited_path": "~/.claude/homunculus/instincts/inherited/",
|
||||||
|
"min_confidence": 0.3,
|
||||||
|
"auto_approve_threshold": 0.7,
|
||||||
|
"confidence_decay_rate": 0.02,
|
||||||
|
"max_instincts": 100
|
||||||
|
},
|
||||||
|
"observer": {
|
||||||
|
"enabled": false,
|
||||||
|
"model": "haiku",
|
||||||
|
"run_interval_minutes": 5,
|
||||||
|
"min_observations_to_analyze": 20,
|
||||||
|
"patterns_to_detect": [
|
||||||
|
"user_corrections",
|
||||||
|
"error_resolutions",
|
||||||
|
"repeated_workflows",
|
||||||
|
"tool_preferences",
|
||||||
|
"file_patterns"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"evolution": {
|
||||||
|
"cluster_threshold": 3,
|
||||||
|
"evolved_path": "~/.claude/homunculus/evolved/",
|
||||||
|
"auto_evolve": false
|
||||||
|
},
|
||||||
|
"integration": {
|
||||||
|
"skill_creator_api": "https://skill-creator.app/api",
|
||||||
|
"backward_compatible_v1": true
|
||||||
|
}
|
||||||
|
}
|
||||||
137
skills/continuous-learning-v2/hooks/observe.sh
Executable file
137
skills/continuous-learning-v2/hooks/observe.sh
Executable file
@@ -0,0 +1,137 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
# Continuous Learning v2 - Observation Hook
|
||||||
|
#
|
||||||
|
# Captures tool use events for pattern analysis.
|
||||||
|
# Claude Code passes hook data via stdin as JSON.
|
||||||
|
#
|
||||||
|
# Hook config (in ~/.claude/settings.json):
|
||||||
|
# {
|
||||||
|
# "hooks": {
|
||||||
|
# "PreToolUse": [{
|
||||||
|
# "matcher": "*",
|
||||||
|
# "hooks": [{ "type": "command", "command": "~/.claude/skills/continuous-learning-v2/hooks/observe.sh" }]
|
||||||
|
# }],
|
||||||
|
# "PostToolUse": [{
|
||||||
|
# "matcher": "*",
|
||||||
|
# "hooks": [{ "type": "command", "command": "~/.claude/skills/continuous-learning-v2/hooks/observe.sh" }]
|
||||||
|
# }]
|
||||||
|
# }
|
||||||
|
# }
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
CONFIG_DIR="${HOME}/.claude/homunculus"
|
||||||
|
OBSERVATIONS_FILE="${CONFIG_DIR}/observations.jsonl"
|
||||||
|
MAX_FILE_SIZE_MB=10
|
||||||
|
|
||||||
|
# Ensure directory exists
|
||||||
|
mkdir -p "$CONFIG_DIR"
|
||||||
|
|
||||||
|
# Skip if disabled
|
||||||
|
if [ -f "$CONFIG_DIR/disabled" ]; then
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Read JSON from stdin (Claude Code hook format)
|
||||||
|
INPUT_JSON=$(cat)
|
||||||
|
|
||||||
|
# Exit if no input
|
||||||
|
if [ -z "$INPUT_JSON" ]; then
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Parse using python (more reliable than jq for complex JSON)
|
||||||
|
PARSED=$(python3 << EOF
|
||||||
|
import json
|
||||||
|
import sys
|
||||||
|
|
||||||
|
try:
|
||||||
|
data = json.loads('''$INPUT_JSON''')
|
||||||
|
|
||||||
|
# Extract fields - Claude Code hook format
|
||||||
|
hook_type = data.get('hook_type', 'unknown') # PreToolUse or PostToolUse
|
||||||
|
tool_name = data.get('tool_name', data.get('tool', 'unknown'))
|
||||||
|
tool_input = data.get('tool_input', data.get('input', {}))
|
||||||
|
tool_output = data.get('tool_output', data.get('output', ''))
|
||||||
|
session_id = data.get('session_id', 'unknown')
|
||||||
|
|
||||||
|
# Truncate large inputs/outputs
|
||||||
|
if isinstance(tool_input, dict):
|
||||||
|
tool_input_str = json.dumps(tool_input)[:5000]
|
||||||
|
else:
|
||||||
|
tool_input_str = str(tool_input)[:5000]
|
||||||
|
|
||||||
|
if isinstance(tool_output, dict):
|
||||||
|
tool_output_str = json.dumps(tool_output)[:5000]
|
||||||
|
else:
|
||||||
|
tool_output_str = str(tool_output)[:5000]
|
||||||
|
|
||||||
|
# Determine event type
|
||||||
|
event = 'tool_start' if 'Pre' in hook_type else 'tool_complete'
|
||||||
|
|
||||||
|
print(json.dumps({
|
||||||
|
'parsed': True,
|
||||||
|
'event': event,
|
||||||
|
'tool': tool_name,
|
||||||
|
'input': tool_input_str if event == 'tool_start' else None,
|
||||||
|
'output': tool_output_str if event == 'tool_complete' else None,
|
||||||
|
'session': session_id
|
||||||
|
}))
|
||||||
|
except Exception as e:
|
||||||
|
print(json.dumps({'parsed': False, 'error': str(e)}))
|
||||||
|
EOF
|
||||||
|
)
|
||||||
|
|
||||||
|
# Check if parsing succeeded
|
||||||
|
PARSED_OK=$(echo "$PARSED" | python3 -c "import json,sys; print(json.load(sys.stdin).get('parsed', False))")
|
||||||
|
|
||||||
|
if [ "$PARSED_OK" != "True" ]; then
|
||||||
|
# Fallback: log raw input for debugging
|
||||||
|
timestamp=$(date -u +"%Y-%m-%dT%H:%M:%SZ")
|
||||||
|
echo "{\"timestamp\":\"$timestamp\",\"event\":\"parse_error\",\"raw\":$(echo "$INPUT_JSON" | python3 -c 'import json,sys; print(json.dumps(sys.stdin.read()[:1000]))')}" >> "$OBSERVATIONS_FILE"
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Archive if file too large
|
||||||
|
if [ -f "$OBSERVATIONS_FILE" ]; then
|
||||||
|
file_size_mb=$(du -m "$OBSERVATIONS_FILE" 2>/dev/null | cut -f1)
|
||||||
|
if [ "${file_size_mb:-0}" -ge "$MAX_FILE_SIZE_MB" ]; then
|
||||||
|
archive_dir="${CONFIG_DIR}/observations.archive"
|
||||||
|
mkdir -p "$archive_dir"
|
||||||
|
mv "$OBSERVATIONS_FILE" "$archive_dir/observations-$(date +%Y%m%d-%H%M%S).jsonl"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Build and write observation
|
||||||
|
timestamp=$(date -u +"%Y-%m-%dT%H:%M:%SZ")
|
||||||
|
|
||||||
|
python3 << EOF
|
||||||
|
import json
|
||||||
|
|
||||||
|
parsed = json.loads('''$PARSED''')
|
||||||
|
observation = {
|
||||||
|
'timestamp': '$timestamp',
|
||||||
|
'event': parsed['event'],
|
||||||
|
'tool': parsed['tool'],
|
||||||
|
'session': parsed['session']
|
||||||
|
}
|
||||||
|
|
||||||
|
if parsed['input']:
|
||||||
|
observation['input'] = parsed['input']
|
||||||
|
if parsed['output']:
|
||||||
|
observation['output'] = parsed['output']
|
||||||
|
|
||||||
|
with open('$OBSERVATIONS_FILE', 'a') as f:
|
||||||
|
f.write(json.dumps(observation) + '\n')
|
||||||
|
EOF
|
||||||
|
|
||||||
|
# Signal observer if running
|
||||||
|
OBSERVER_PID_FILE="${CONFIG_DIR}/.observer.pid"
|
||||||
|
if [ -f "$OBSERVER_PID_FILE" ]; then
|
||||||
|
observer_pid=$(cat "$OBSERVER_PID_FILE")
|
||||||
|
if kill -0 "$observer_pid" 2>/dev/null; then
|
||||||
|
kill -USR1 "$observer_pid" 2>/dev/null || true
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
exit 0
|
||||||
494
skills/continuous-learning-v2/scripts/instinct-cli.py
Executable file
494
skills/continuous-learning-v2/scripts/instinct-cli.py
Executable file
@@ -0,0 +1,494 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
Instinct CLI - Manage instincts for Continuous Learning v2
|
||||||
|
|
||||||
|
Commands:
|
||||||
|
status - Show all instincts and their status
|
||||||
|
import - Import instincts from file or URL
|
||||||
|
export - Export instincts to file
|
||||||
|
evolve - Cluster instincts into skills/commands/agents
|
||||||
|
"""
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
import re
|
||||||
|
import urllib.request
|
||||||
|
from pathlib import Path
|
||||||
|
from datetime import datetime
|
||||||
|
from collections import defaultdict
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
# ─────────────────────────────────────────────
|
||||||
|
# Configuration
|
||||||
|
# ─────────────────────────────────────────────
|
||||||
|
|
||||||
|
HOMUNCULUS_DIR = Path.home() / ".claude" / "homunculus"
|
||||||
|
INSTINCTS_DIR = HOMUNCULUS_DIR / "instincts"
|
||||||
|
PERSONAL_DIR = INSTINCTS_DIR / "personal"
|
||||||
|
INHERITED_DIR = INSTINCTS_DIR / "inherited"
|
||||||
|
EVOLVED_DIR = HOMUNCULUS_DIR / "evolved"
|
||||||
|
OBSERVATIONS_FILE = HOMUNCULUS_DIR / "observations.jsonl"
|
||||||
|
|
||||||
|
# Ensure directories exist
|
||||||
|
for d in [PERSONAL_DIR, INHERITED_DIR, EVOLVED_DIR / "skills", EVOLVED_DIR / "commands", EVOLVED_DIR / "agents"]:
|
||||||
|
d.mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
|
|
||||||
|
# ─────────────────────────────────────────────
|
||||||
|
# Instinct Parser
|
||||||
|
# ─────────────────────────────────────────────
|
||||||
|
|
||||||
|
def parse_instinct_file(content: str) -> list[dict]:
|
||||||
|
"""Parse YAML-like instinct file format."""
|
||||||
|
instincts = []
|
||||||
|
current = {}
|
||||||
|
in_frontmatter = False
|
||||||
|
content_lines = []
|
||||||
|
|
||||||
|
for line in content.split('\n'):
|
||||||
|
if line.strip() == '---':
|
||||||
|
if in_frontmatter:
|
||||||
|
# End of frontmatter
|
||||||
|
in_frontmatter = False
|
||||||
|
if current:
|
||||||
|
current['content'] = '\n'.join(content_lines).strip()
|
||||||
|
instincts.append(current)
|
||||||
|
current = {}
|
||||||
|
content_lines = []
|
||||||
|
else:
|
||||||
|
# Start of frontmatter
|
||||||
|
in_frontmatter = True
|
||||||
|
if current:
|
||||||
|
current['content'] = '\n'.join(content_lines).strip()
|
||||||
|
instincts.append(current)
|
||||||
|
current = {}
|
||||||
|
content_lines = []
|
||||||
|
elif in_frontmatter:
|
||||||
|
# Parse YAML-like frontmatter
|
||||||
|
if ':' in line:
|
||||||
|
key, value = line.split(':', 1)
|
||||||
|
key = key.strip()
|
||||||
|
value = value.strip().strip('"').strip("'")
|
||||||
|
if key == 'confidence':
|
||||||
|
current[key] = float(value)
|
||||||
|
else:
|
||||||
|
current[key] = value
|
||||||
|
else:
|
||||||
|
content_lines.append(line)
|
||||||
|
|
||||||
|
# Don't forget the last instinct
|
||||||
|
if current:
|
||||||
|
current['content'] = '\n'.join(content_lines).strip()
|
||||||
|
instincts.append(current)
|
||||||
|
|
||||||
|
return [i for i in instincts if i.get('id')]
|
||||||
|
|
||||||
|
|
||||||
|
def load_all_instincts() -> list[dict]:
|
||||||
|
"""Load all instincts from personal and inherited directories."""
|
||||||
|
instincts = []
|
||||||
|
|
||||||
|
for directory in [PERSONAL_DIR, INHERITED_DIR]:
|
||||||
|
if not directory.exists():
|
||||||
|
continue
|
||||||
|
for file in directory.glob("*.yaml"):
|
||||||
|
try:
|
||||||
|
content = file.read_text()
|
||||||
|
parsed = parse_instinct_file(content)
|
||||||
|
for inst in parsed:
|
||||||
|
inst['_source_file'] = str(file)
|
||||||
|
inst['_source_type'] = directory.name
|
||||||
|
instincts.extend(parsed)
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Warning: Failed to parse {file}: {e}", file=sys.stderr)
|
||||||
|
|
||||||
|
return instincts
|
||||||
|
|
||||||
|
|
||||||
|
# ─────────────────────────────────────────────
|
||||||
|
# Status Command
|
||||||
|
# ─────────────────────────────────────────────
|
||||||
|
|
||||||
|
def cmd_status(args):
|
||||||
|
"""Show status of all instincts."""
|
||||||
|
instincts = load_all_instincts()
|
||||||
|
|
||||||
|
if not instincts:
|
||||||
|
print("No instincts found.")
|
||||||
|
print(f"\nInstinct directories:")
|
||||||
|
print(f" Personal: {PERSONAL_DIR}")
|
||||||
|
print(f" Inherited: {INHERITED_DIR}")
|
||||||
|
return
|
||||||
|
|
||||||
|
# Group by domain
|
||||||
|
by_domain = defaultdict(list)
|
||||||
|
for inst in instincts:
|
||||||
|
domain = inst.get('domain', 'general')
|
||||||
|
by_domain[domain].append(inst)
|
||||||
|
|
||||||
|
# Print header
|
||||||
|
print(f"\n{'='*60}")
|
||||||
|
print(f" INSTINCT STATUS - {len(instincts)} total")
|
||||||
|
print(f"{'='*60}\n")
|
||||||
|
|
||||||
|
# Summary by source
|
||||||
|
personal = [i for i in instincts if i.get('_source_type') == 'personal']
|
||||||
|
inherited = [i for i in instincts if i.get('_source_type') == 'inherited']
|
||||||
|
print(f" Personal: {len(personal)}")
|
||||||
|
print(f" Inherited: {len(inherited)}")
|
||||||
|
print()
|
||||||
|
|
||||||
|
# Print by domain
|
||||||
|
for domain in sorted(by_domain.keys()):
|
||||||
|
domain_instincts = by_domain[domain]
|
||||||
|
print(f"## {domain.upper()} ({len(domain_instincts)})")
|
||||||
|
print()
|
||||||
|
|
||||||
|
for inst in sorted(domain_instincts, key=lambda x: -x.get('confidence', 0.5)):
|
||||||
|
conf = inst.get('confidence', 0.5)
|
||||||
|
conf_bar = '█' * int(conf * 10) + '░' * (10 - int(conf * 10))
|
||||||
|
trigger = inst.get('trigger', 'unknown trigger')
|
||||||
|
source = inst.get('source', 'unknown')
|
||||||
|
|
||||||
|
print(f" {conf_bar} {int(conf*100):3d}% {inst.get('id', 'unnamed')}")
|
||||||
|
print(f" trigger: {trigger}")
|
||||||
|
|
||||||
|
# Extract action from content
|
||||||
|
content = inst.get('content', '')
|
||||||
|
action_match = re.search(r'## Action\s*\n\s*(.+?)(?:\n\n|\n##|$)', content, re.DOTALL)
|
||||||
|
if action_match:
|
||||||
|
action = action_match.group(1).strip().split('\n')[0]
|
||||||
|
print(f" action: {action[:60]}{'...' if len(action) > 60 else ''}")
|
||||||
|
|
||||||
|
print()
|
||||||
|
|
||||||
|
# Observations stats
|
||||||
|
if OBSERVATIONS_FILE.exists():
|
||||||
|
obs_count = sum(1 for _ in open(OBSERVATIONS_FILE))
|
||||||
|
print(f"─────────────────────────────────────────────────────────")
|
||||||
|
print(f" Observations: {obs_count} events logged")
|
||||||
|
print(f" File: {OBSERVATIONS_FILE}")
|
||||||
|
|
||||||
|
print(f"\n{'='*60}\n")
|
||||||
|
|
||||||
|
|
||||||
|
# ─────────────────────────────────────────────
|
||||||
|
# Import Command
|
||||||
|
# ─────────────────────────────────────────────
|
||||||
|
|
||||||
|
def cmd_import(args):
|
||||||
|
"""Import instincts from file or URL."""
|
||||||
|
source = args.source
|
||||||
|
|
||||||
|
# Fetch content
|
||||||
|
if source.startswith('http://') or source.startswith('https://'):
|
||||||
|
print(f"Fetching from URL: {source}")
|
||||||
|
try:
|
||||||
|
with urllib.request.urlopen(source) as response:
|
||||||
|
content = response.read().decode('utf-8')
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error fetching URL: {e}", file=sys.stderr)
|
||||||
|
return 1
|
||||||
|
else:
|
||||||
|
path = Path(source).expanduser()
|
||||||
|
if not path.exists():
|
||||||
|
print(f"File not found: {path}", file=sys.stderr)
|
||||||
|
return 1
|
||||||
|
content = path.read_text()
|
||||||
|
|
||||||
|
# Parse instincts
|
||||||
|
new_instincts = parse_instinct_file(content)
|
||||||
|
if not new_instincts:
|
||||||
|
print("No valid instincts found in source.")
|
||||||
|
return 1
|
||||||
|
|
||||||
|
print(f"\nFound {len(new_instincts)} instincts to import.\n")
|
||||||
|
|
||||||
|
# Load existing
|
||||||
|
existing = load_all_instincts()
|
||||||
|
existing_ids = {i.get('id') for i in existing}
|
||||||
|
|
||||||
|
# Categorize
|
||||||
|
to_add = []
|
||||||
|
duplicates = []
|
||||||
|
to_update = []
|
||||||
|
|
||||||
|
for inst in new_instincts:
|
||||||
|
inst_id = inst.get('id')
|
||||||
|
if inst_id in existing_ids:
|
||||||
|
# Check if we should update
|
||||||
|
existing_inst = next((e for e in existing if e.get('id') == inst_id), None)
|
||||||
|
if existing_inst:
|
||||||
|
if inst.get('confidence', 0) > existing_inst.get('confidence', 0):
|
||||||
|
to_update.append(inst)
|
||||||
|
else:
|
||||||
|
duplicates.append(inst)
|
||||||
|
else:
|
||||||
|
to_add.append(inst)
|
||||||
|
|
||||||
|
# Filter by minimum confidence
|
||||||
|
min_conf = args.min_confidence or 0.0
|
||||||
|
to_add = [i for i in to_add if i.get('confidence', 0.5) >= min_conf]
|
||||||
|
to_update = [i for i in to_update if i.get('confidence', 0.5) >= min_conf]
|
||||||
|
|
||||||
|
# Display summary
|
||||||
|
if to_add:
|
||||||
|
print(f"NEW ({len(to_add)}):")
|
||||||
|
for inst in to_add:
|
||||||
|
print(f" + {inst.get('id')} (confidence: {inst.get('confidence', 0.5):.2f})")
|
||||||
|
|
||||||
|
if to_update:
|
||||||
|
print(f"\nUPDATE ({len(to_update)}):")
|
||||||
|
for inst in to_update:
|
||||||
|
print(f" ~ {inst.get('id')} (confidence: {inst.get('confidence', 0.5):.2f})")
|
||||||
|
|
||||||
|
if duplicates:
|
||||||
|
print(f"\nSKIP ({len(duplicates)} - already exists with equal/higher confidence):")
|
||||||
|
for inst in duplicates[:5]:
|
||||||
|
print(f" - {inst.get('id')}")
|
||||||
|
if len(duplicates) > 5:
|
||||||
|
print(f" ... and {len(duplicates) - 5} more")
|
||||||
|
|
||||||
|
if args.dry_run:
|
||||||
|
print("\n[DRY RUN] No changes made.")
|
||||||
|
return 0
|
||||||
|
|
||||||
|
if not to_add and not to_update:
|
||||||
|
print("\nNothing to import.")
|
||||||
|
return 0
|
||||||
|
|
||||||
|
# Confirm
|
||||||
|
if not args.force:
|
||||||
|
response = input(f"\nImport {len(to_add)} new, update {len(to_update)}? [y/N] ")
|
||||||
|
if response.lower() != 'y':
|
||||||
|
print("Cancelled.")
|
||||||
|
return 0
|
||||||
|
|
||||||
|
# Write to inherited directory
|
||||||
|
timestamp = datetime.now().strftime('%Y%m%d-%H%M%S')
|
||||||
|
source_name = Path(source).stem if not source.startswith('http') else 'web-import'
|
||||||
|
output_file = INHERITED_DIR / f"{source_name}-{timestamp}.yaml"
|
||||||
|
|
||||||
|
all_to_write = to_add + to_update
|
||||||
|
output_content = f"# Imported from {source}\n# Date: {datetime.now().isoformat()}\n\n"
|
||||||
|
|
||||||
|
for inst in all_to_write:
|
||||||
|
output_content += "---\n"
|
||||||
|
output_content += f"id: {inst.get('id')}\n"
|
||||||
|
output_content += f"trigger: \"{inst.get('trigger', 'unknown')}\"\n"
|
||||||
|
output_content += f"confidence: {inst.get('confidence', 0.5)}\n"
|
||||||
|
output_content += f"domain: {inst.get('domain', 'general')}\n"
|
||||||
|
output_content += f"source: inherited\n"
|
||||||
|
output_content += f"imported_from: \"{source}\"\n"
|
||||||
|
if inst.get('source_repo'):
|
||||||
|
output_content += f"source_repo: {inst.get('source_repo')}\n"
|
||||||
|
output_content += "---\n\n"
|
||||||
|
output_content += inst.get('content', '') + "\n\n"
|
||||||
|
|
||||||
|
output_file.write_text(output_content)
|
||||||
|
|
||||||
|
print(f"\n✅ Import complete!")
|
||||||
|
print(f" Added: {len(to_add)}")
|
||||||
|
print(f" Updated: {len(to_update)}")
|
||||||
|
print(f" Saved to: {output_file}")
|
||||||
|
|
||||||
|
return 0
|
||||||
|
|
||||||
|
|
||||||
|
# ─────────────────────────────────────────────
|
||||||
|
# Export Command
|
||||||
|
# ─────────────────────────────────────────────
|
||||||
|
|
||||||
|
def cmd_export(args):
|
||||||
|
"""Export instincts to file."""
|
||||||
|
instincts = load_all_instincts()
|
||||||
|
|
||||||
|
if not instincts:
|
||||||
|
print("No instincts to export.")
|
||||||
|
return 1
|
||||||
|
|
||||||
|
# Filter by domain if specified
|
||||||
|
if args.domain:
|
||||||
|
instincts = [i for i in instincts if i.get('domain') == args.domain]
|
||||||
|
|
||||||
|
# Filter by minimum confidence
|
||||||
|
if args.min_confidence:
|
||||||
|
instincts = [i for i in instincts if i.get('confidence', 0.5) >= args.min_confidence]
|
||||||
|
|
||||||
|
if not instincts:
|
||||||
|
print("No instincts match the criteria.")
|
||||||
|
return 1
|
||||||
|
|
||||||
|
# Generate output
|
||||||
|
output = f"# Instincts export\n# Date: {datetime.now().isoformat()}\n# Total: {len(instincts)}\n\n"
|
||||||
|
|
||||||
|
for inst in instincts:
|
||||||
|
output += "---\n"
|
||||||
|
for key in ['id', 'trigger', 'confidence', 'domain', 'source', 'source_repo']:
|
||||||
|
if inst.get(key):
|
||||||
|
value = inst[key]
|
||||||
|
if key == 'trigger':
|
||||||
|
output += f'{key}: "{value}"\n'
|
||||||
|
else:
|
||||||
|
output += f"{key}: {value}\n"
|
||||||
|
output += "---\n\n"
|
||||||
|
output += inst.get('content', '') + "\n\n"
|
||||||
|
|
||||||
|
# Write to file or stdout
|
||||||
|
if args.output:
|
||||||
|
Path(args.output).write_text(output)
|
||||||
|
print(f"Exported {len(instincts)} instincts to {args.output}")
|
||||||
|
else:
|
||||||
|
print(output)
|
||||||
|
|
||||||
|
return 0
|
||||||
|
|
||||||
|
|
||||||
|
# ─────────────────────────────────────────────
|
||||||
|
# Evolve Command
|
||||||
|
# ─────────────────────────────────────────────
|
||||||
|
|
||||||
|
def cmd_evolve(args):
|
||||||
|
"""Analyze instincts and suggest evolutions to skills/commands/agents."""
|
||||||
|
instincts = load_all_instincts()
|
||||||
|
|
||||||
|
if len(instincts) < 3:
|
||||||
|
print("Need at least 3 instincts to analyze patterns.")
|
||||||
|
print(f"Currently have: {len(instincts)}")
|
||||||
|
return 1
|
||||||
|
|
||||||
|
print(f"\n{'='*60}")
|
||||||
|
print(f" EVOLVE ANALYSIS - {len(instincts)} instincts")
|
||||||
|
print(f"{'='*60}\n")
|
||||||
|
|
||||||
|
# Group by domain
|
||||||
|
by_domain = defaultdict(list)
|
||||||
|
for inst in instincts:
|
||||||
|
domain = inst.get('domain', 'general')
|
||||||
|
by_domain[domain].append(inst)
|
||||||
|
|
||||||
|
# High-confidence instincts by domain (candidates for skills)
|
||||||
|
high_conf = [i for i in instincts if i.get('confidence', 0) >= 0.8]
|
||||||
|
print(f"High confidence instincts (>=80%): {len(high_conf)}")
|
||||||
|
|
||||||
|
# Find clusters (instincts with similar triggers)
|
||||||
|
trigger_clusters = defaultdict(list)
|
||||||
|
for inst in instincts:
|
||||||
|
trigger = inst.get('trigger', '')
|
||||||
|
# Normalize trigger
|
||||||
|
trigger_key = trigger.lower()
|
||||||
|
for keyword in ['when', 'creating', 'writing', 'adding', 'implementing', 'testing']:
|
||||||
|
trigger_key = trigger_key.replace(keyword, '').strip()
|
||||||
|
trigger_clusters[trigger_key].append(inst)
|
||||||
|
|
||||||
|
# Find clusters with 3+ instincts (good skill candidates)
|
||||||
|
skill_candidates = []
|
||||||
|
for trigger, cluster in trigger_clusters.items():
|
||||||
|
if len(cluster) >= 2:
|
||||||
|
avg_conf = sum(i.get('confidence', 0.5) for i in cluster) / len(cluster)
|
||||||
|
skill_candidates.append({
|
||||||
|
'trigger': trigger,
|
||||||
|
'instincts': cluster,
|
||||||
|
'avg_confidence': avg_conf,
|
||||||
|
'domains': list(set(i.get('domain', 'general') for i in cluster))
|
||||||
|
})
|
||||||
|
|
||||||
|
# Sort by cluster size and confidence
|
||||||
|
skill_candidates.sort(key=lambda x: (-len(x['instincts']), -x['avg_confidence']))
|
||||||
|
|
||||||
|
print(f"\nPotential skill clusters found: {len(skill_candidates)}")
|
||||||
|
|
||||||
|
if skill_candidates:
|
||||||
|
print(f"\n## SKILL CANDIDATES\n")
|
||||||
|
for i, cand in enumerate(skill_candidates[:5], 1):
|
||||||
|
print(f"{i}. Cluster: \"{cand['trigger']}\"")
|
||||||
|
print(f" Instincts: {len(cand['instincts'])}")
|
||||||
|
print(f" Avg confidence: {cand['avg_confidence']:.0%}")
|
||||||
|
print(f" Domains: {', '.join(cand['domains'])}")
|
||||||
|
print(f" Instincts:")
|
||||||
|
for inst in cand['instincts'][:3]:
|
||||||
|
print(f" - {inst.get('id')}")
|
||||||
|
print()
|
||||||
|
|
||||||
|
# Command candidates (workflow instincts with high confidence)
|
||||||
|
workflow_instincts = [i for i in instincts if i.get('domain') == 'workflow' and i.get('confidence', 0) >= 0.7]
|
||||||
|
if workflow_instincts:
|
||||||
|
print(f"\n## COMMAND CANDIDATES ({len(workflow_instincts)})\n")
|
||||||
|
for inst in workflow_instincts[:5]:
|
||||||
|
trigger = inst.get('trigger', 'unknown')
|
||||||
|
# Suggest command name
|
||||||
|
cmd_name = trigger.replace('when ', '').replace('implementing ', '').replace('a ', '')
|
||||||
|
cmd_name = cmd_name.replace(' ', '-')[:20]
|
||||||
|
print(f" /{cmd_name}")
|
||||||
|
print(f" From: {inst.get('id')}")
|
||||||
|
print(f" Confidence: {inst.get('confidence', 0.5):.0%}")
|
||||||
|
print()
|
||||||
|
|
||||||
|
# Agent candidates (complex multi-step patterns)
|
||||||
|
agent_candidates = [c for c in skill_candidates if len(c['instincts']) >= 3 and c['avg_confidence'] >= 0.75]
|
||||||
|
if agent_candidates:
|
||||||
|
print(f"\n## AGENT CANDIDATES ({len(agent_candidates)})\n")
|
||||||
|
for cand in agent_candidates[:3]:
|
||||||
|
agent_name = cand['trigger'].replace(' ', '-')[:20] + '-agent'
|
||||||
|
print(f" {agent_name}")
|
||||||
|
print(f" Covers {len(cand['instincts'])} instincts")
|
||||||
|
print(f" Avg confidence: {cand['avg_confidence']:.0%}")
|
||||||
|
print()
|
||||||
|
|
||||||
|
if args.generate:
|
||||||
|
print("\n[Would generate evolved structures here]")
|
||||||
|
print(" Skills would be saved to:", EVOLVED_DIR / "skills")
|
||||||
|
print(" Commands would be saved to:", EVOLVED_DIR / "commands")
|
||||||
|
print(" Agents would be saved to:", EVOLVED_DIR / "agents")
|
||||||
|
|
||||||
|
print(f"\n{'='*60}\n")
|
||||||
|
return 0
|
||||||
|
|
||||||
|
|
||||||
|
# ─────────────────────────────────────────────
|
||||||
|
# Main
|
||||||
|
# ─────────────────────────────────────────────
|
||||||
|
|
||||||
|
def main():
|
||||||
|
parser = argparse.ArgumentParser(description='Instinct CLI for Continuous Learning v2')
|
||||||
|
subparsers = parser.add_subparsers(dest='command', help='Available commands')
|
||||||
|
|
||||||
|
# Status
|
||||||
|
status_parser = subparsers.add_parser('status', help='Show instinct status')
|
||||||
|
|
||||||
|
# Import
|
||||||
|
import_parser = subparsers.add_parser('import', help='Import instincts')
|
||||||
|
import_parser.add_argument('source', help='File path or URL')
|
||||||
|
import_parser.add_argument('--dry-run', action='store_true', help='Preview without importing')
|
||||||
|
import_parser.add_argument('--force', action='store_true', help='Skip confirmation')
|
||||||
|
import_parser.add_argument('--min-confidence', type=float, help='Minimum confidence threshold')
|
||||||
|
|
||||||
|
# Export
|
||||||
|
export_parser = subparsers.add_parser('export', help='Export instincts')
|
||||||
|
export_parser.add_argument('--output', '-o', help='Output file')
|
||||||
|
export_parser.add_argument('--domain', help='Filter by domain')
|
||||||
|
export_parser.add_argument('--min-confidence', type=float, help='Minimum confidence')
|
||||||
|
|
||||||
|
# Evolve
|
||||||
|
evolve_parser = subparsers.add_parser('evolve', help='Analyze and evolve instincts')
|
||||||
|
evolve_parser.add_argument('--generate', action='store_true', help='Generate evolved structures')
|
||||||
|
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
if args.command == 'status':
|
||||||
|
return cmd_status(args)
|
||||||
|
elif args.command == 'import':
|
||||||
|
return cmd_import(args)
|
||||||
|
elif args.command == 'export':
|
||||||
|
return cmd_export(args)
|
||||||
|
elif args.command == 'evolve':
|
||||||
|
return cmd_evolve(args)
|
||||||
|
else:
|
||||||
|
parser.print_help()
|
||||||
|
return 1
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
sys.exit(main() or 0)
|
||||||
@@ -78,3 +78,33 @@ Add to your `~/.claude/settings.json`:
|
|||||||
|
|
||||||
- [The Longform Guide](https://x.com/affaanmustafa/status/2014040193557471352) - Section on continuous learning
|
- [The Longform Guide](https://x.com/affaanmustafa/status/2014040193557471352) - Section on continuous learning
|
||||||
- `/learn` command - Manual pattern extraction mid-session
|
- `/learn` command - Manual pattern extraction mid-session
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Comparison Notes (Research: Jan 2025)
|
||||||
|
|
||||||
|
### vs Homunculus (github.com/humanplane/homunculus)
|
||||||
|
|
||||||
|
Homunculus v2 takes a more sophisticated approach:
|
||||||
|
|
||||||
|
| Feature | Our Approach | Homunculus v2 |
|
||||||
|
|---------|--------------|---------------|
|
||||||
|
| Observation | Stop hook (end of session) | PreToolUse/PostToolUse hooks (100% reliable) |
|
||||||
|
| Analysis | Main context | Background agent (Haiku) |
|
||||||
|
| Granularity | Full skills | Atomic "instincts" |
|
||||||
|
| Confidence | None | 0.3-0.9 weighted |
|
||||||
|
| Evolution | Direct to skill | Instincts → cluster → skill/command/agent |
|
||||||
|
| Sharing | None | Export/import instincts |
|
||||||
|
|
||||||
|
**Key insight from homunculus:**
|
||||||
|
> "v1 relied on skills to observe. Skills are probabilistic—they fire ~50-80% of the time. v2 uses hooks for observation (100% reliable) and instincts as the atomic unit of learned behavior."
|
||||||
|
|
||||||
|
### Potential v2 Enhancements
|
||||||
|
|
||||||
|
1. **Instinct-based learning** - Smaller, atomic behaviors with confidence scoring
|
||||||
|
2. **Background observer** - Haiku agent analyzing in parallel
|
||||||
|
3. **Confidence decay** - Instincts lose confidence if contradicted
|
||||||
|
4. **Domain tagging** - code-style, testing, git, debugging, etc.
|
||||||
|
5. **Evolution path** - Cluster related instincts into skills/commands
|
||||||
|
|
||||||
|
See: `/Users/affoon/Documents/tasks/12-continuous-learning-v2.md` for full spec.
|
||||||
|
|||||||
202
skills/iterative-retrieval/SKILL.md
Normal file
202
skills/iterative-retrieval/SKILL.md
Normal file
@@ -0,0 +1,202 @@
|
|||||||
|
---
|
||||||
|
name: iterative-retrieval
|
||||||
|
description: Pattern for progressively refining context retrieval to solve the subagent context problem
|
||||||
|
---
|
||||||
|
|
||||||
|
# Iterative Retrieval Pattern
|
||||||
|
|
||||||
|
Solves the "context problem" in multi-agent workflows where subagents don't know what context they need until they start working.
|
||||||
|
|
||||||
|
## The Problem
|
||||||
|
|
||||||
|
Subagents are spawned with limited context. They don't know:
|
||||||
|
- Which files contain relevant code
|
||||||
|
- What patterns exist in the codebase
|
||||||
|
- What terminology the project uses
|
||||||
|
|
||||||
|
Standard approaches fail:
|
||||||
|
- **Send everything**: Exceeds context limits
|
||||||
|
- **Send nothing**: Agent lacks critical information
|
||||||
|
- **Guess what's needed**: Often wrong
|
||||||
|
|
||||||
|
## The Solution: Iterative Retrieval
|
||||||
|
|
||||||
|
A 4-phase loop that progressively refines context:
|
||||||
|
|
||||||
|
```
|
||||||
|
┌─────────────────────────────────────────────┐
|
||||||
|
│ │
|
||||||
|
│ ┌──────────┐ ┌──────────┐ │
|
||||||
|
│ │ DISPATCH │─────▶│ EVALUATE │ │
|
||||||
|
│ └──────────┘ └──────────┘ │
|
||||||
|
│ ▲ │ │
|
||||||
|
│ │ ▼ │
|
||||||
|
│ ┌──────────┐ ┌──────────┐ │
|
||||||
|
│ │ LOOP │◀─────│ REFINE │ │
|
||||||
|
│ └──────────┘ └──────────┘ │
|
||||||
|
│ │
|
||||||
|
│ Max 3 cycles, then proceed │
|
||||||
|
└─────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
### Phase 1: DISPATCH
|
||||||
|
|
||||||
|
Initial broad query to gather candidate files:
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
// Start with high-level intent
|
||||||
|
const initialQuery = {
|
||||||
|
patterns: ['src/**/*.ts', 'lib/**/*.ts'],
|
||||||
|
keywords: ['authentication', 'user', 'session'],
|
||||||
|
excludes: ['*.test.ts', '*.spec.ts']
|
||||||
|
};
|
||||||
|
|
||||||
|
// Dispatch to retrieval agent
|
||||||
|
const candidates = await retrieveFiles(initialQuery);
|
||||||
|
```
|
||||||
|
|
||||||
|
### Phase 2: EVALUATE
|
||||||
|
|
||||||
|
Assess retrieved content for relevance:
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
function evaluateRelevance(files, task) {
|
||||||
|
return files.map(file => ({
|
||||||
|
path: file.path,
|
||||||
|
relevance: scoreRelevance(file.content, task),
|
||||||
|
reason: explainRelevance(file.content, task),
|
||||||
|
missingContext: identifyGaps(file.content, task)
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Scoring criteria:
|
||||||
|
- **High (0.8-1.0)**: Directly implements target functionality
|
||||||
|
- **Medium (0.5-0.7)**: Contains related patterns or types
|
||||||
|
- **Low (0.2-0.4)**: Tangentially related
|
||||||
|
- **None (0-0.2)**: Not relevant, exclude
|
||||||
|
|
||||||
|
### Phase 3: REFINE
|
||||||
|
|
||||||
|
Update search criteria based on evaluation:
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
function refineQuery(evaluation, previousQuery) {
|
||||||
|
return {
|
||||||
|
// Add new patterns discovered in high-relevance files
|
||||||
|
patterns: [...previousQuery.patterns, ...extractPatterns(evaluation)],
|
||||||
|
|
||||||
|
// Add terminology found in codebase
|
||||||
|
keywords: [...previousQuery.keywords, ...extractKeywords(evaluation)],
|
||||||
|
|
||||||
|
// Exclude confirmed irrelevant paths
|
||||||
|
excludes: [...previousQuery.excludes, ...evaluation
|
||||||
|
.filter(e => e.relevance < 0.2)
|
||||||
|
.map(e => e.path)
|
||||||
|
],
|
||||||
|
|
||||||
|
// Target specific gaps
|
||||||
|
focusAreas: evaluation
|
||||||
|
.flatMap(e => e.missingContext)
|
||||||
|
.filter(unique)
|
||||||
|
};
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Phase 4: LOOP
|
||||||
|
|
||||||
|
Repeat with refined criteria (max 3 cycles):
|
||||||
|
|
||||||
|
```javascript
|
||||||
|
async function iterativeRetrieve(task, maxCycles = 3) {
|
||||||
|
let query = createInitialQuery(task);
|
||||||
|
let bestContext = [];
|
||||||
|
|
||||||
|
for (let cycle = 0; cycle < maxCycles; cycle++) {
|
||||||
|
const candidates = await retrieveFiles(query);
|
||||||
|
const evaluation = evaluateRelevance(candidates, task);
|
||||||
|
|
||||||
|
// Check if we have sufficient context
|
||||||
|
const highRelevance = evaluation.filter(e => e.relevance >= 0.7);
|
||||||
|
if (highRelevance.length >= 3 && !hasCriticalGaps(evaluation)) {
|
||||||
|
return highRelevance;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Refine and continue
|
||||||
|
query = refineQuery(evaluation, query);
|
||||||
|
bestContext = mergeContext(bestContext, highRelevance);
|
||||||
|
}
|
||||||
|
|
||||||
|
return bestContext;
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Practical Examples
|
||||||
|
|
||||||
|
### Example 1: Bug Fix Context
|
||||||
|
|
||||||
|
```
|
||||||
|
Task: "Fix the authentication token expiry bug"
|
||||||
|
|
||||||
|
Cycle 1:
|
||||||
|
DISPATCH: Search for "token", "auth", "expiry" in src/**
|
||||||
|
EVALUATE: Found auth.ts (0.9), tokens.ts (0.8), user.ts (0.3)
|
||||||
|
REFINE: Add "refresh", "jwt" keywords; exclude user.ts
|
||||||
|
|
||||||
|
Cycle 2:
|
||||||
|
DISPATCH: Search refined terms
|
||||||
|
EVALUATE: Found session-manager.ts (0.95), jwt-utils.ts (0.85)
|
||||||
|
REFINE: Sufficient context (2 high-relevance files)
|
||||||
|
|
||||||
|
Result: auth.ts, tokens.ts, session-manager.ts, jwt-utils.ts
|
||||||
|
```
|
||||||
|
|
||||||
|
### Example 2: Feature Implementation
|
||||||
|
|
||||||
|
```
|
||||||
|
Task: "Add rate limiting to API endpoints"
|
||||||
|
|
||||||
|
Cycle 1:
|
||||||
|
DISPATCH: Search "rate", "limit", "api" in routes/**
|
||||||
|
EVALUATE: No matches - codebase uses "throttle" terminology
|
||||||
|
REFINE: Add "throttle", "middleware" keywords
|
||||||
|
|
||||||
|
Cycle 2:
|
||||||
|
DISPATCH: Search refined terms
|
||||||
|
EVALUATE: Found throttle.ts (0.9), middleware/index.ts (0.7)
|
||||||
|
REFINE: Need router patterns
|
||||||
|
|
||||||
|
Cycle 3:
|
||||||
|
DISPATCH: Search "router", "express" patterns
|
||||||
|
EVALUATE: Found router-setup.ts (0.8)
|
||||||
|
REFINE: Sufficient context
|
||||||
|
|
||||||
|
Result: throttle.ts, middleware/index.ts, router-setup.ts
|
||||||
|
```
|
||||||
|
|
||||||
|
## Integration with Agents
|
||||||
|
|
||||||
|
Use in agent prompts:
|
||||||
|
|
||||||
|
```markdown
|
||||||
|
When retrieving context for this task:
|
||||||
|
1. Start with broad keyword search
|
||||||
|
2. Evaluate each file's relevance (0-1 scale)
|
||||||
|
3. Identify what context is still missing
|
||||||
|
4. Refine search criteria and repeat (max 3 cycles)
|
||||||
|
5. Return files with relevance >= 0.7
|
||||||
|
```
|
||||||
|
|
||||||
|
## Best Practices
|
||||||
|
|
||||||
|
1. **Start broad, narrow progressively** - Don't over-specify initial queries
|
||||||
|
2. **Learn codebase terminology** - First cycle often reveals naming conventions
|
||||||
|
3. **Track what's missing** - Explicit gap identification drives refinement
|
||||||
|
4. **Stop at "good enough"** - 3 high-relevance files beats 10 mediocre ones
|
||||||
|
5. **Exclude confidently** - Low-relevance files won't become relevant
|
||||||
|
|
||||||
|
## Related
|
||||||
|
|
||||||
|
- [The Longform Guide](https://x.com/affaanmustafa/status/2014040193557471352) - Subagent orchestration section
|
||||||
|
- `continuous-learning` skill - For patterns that improve over time
|
||||||
|
- Agent definitions in `~/.claude/agents/`
|
||||||
@@ -113,14 +113,35 @@ async function runTests() {
|
|||||||
// Run the script
|
// Run the script
|
||||||
await runScript(path.join(scriptsDir, 'session-end.js'));
|
await runScript(path.join(scriptsDir, 'session-end.js'));
|
||||||
|
|
||||||
// Check if session file was created
|
// Check if session file was created (default session ID)
|
||||||
|
// Use local time to match the script's getDateString() function
|
||||||
const sessionsDir = path.join(os.homedir(), '.claude', 'sessions');
|
const sessionsDir = path.join(os.homedir(), '.claude', 'sessions');
|
||||||
const today = new Date().toISOString().split('T')[0];
|
const now = new Date();
|
||||||
const sessionFile = path.join(sessionsDir, `${today}-session.tmp`);
|
const today = `${now.getFullYear()}-${String(now.getMonth() + 1).padStart(2, '0')}-${String(now.getDate()).padStart(2, '0')}`;
|
||||||
|
const sessionFile = path.join(sessionsDir, `${today}-default-session.tmp`);
|
||||||
|
|
||||||
assert.ok(fs.existsSync(sessionFile), 'Session file should exist');
|
assert.ok(fs.existsSync(sessionFile), 'Session file should exist');
|
||||||
})) passed++; else failed++;
|
})) passed++; else failed++;
|
||||||
|
|
||||||
|
if (await asyncTest('includes session ID in filename', async () => {
|
||||||
|
const testSessionId = 'test-session-abc12345';
|
||||||
|
const expectedShortId = 'abc12345'; // Last 8 chars
|
||||||
|
|
||||||
|
// Run with custom session ID
|
||||||
|
await runScript(path.join(scriptsDir, 'session-end.js'), '', {
|
||||||
|
CLAUDE_SESSION_ID: testSessionId
|
||||||
|
});
|
||||||
|
|
||||||
|
// Check if session file was created with session ID
|
||||||
|
// Use local time to match the script's getDateString() function
|
||||||
|
const sessionsDir = path.join(os.homedir(), '.claude', 'sessions');
|
||||||
|
const now = new Date();
|
||||||
|
const today = `${now.getFullYear()}-${String(now.getMonth() + 1).padStart(2, '0')}-${String(now.getDate()).padStart(2, '0')}`;
|
||||||
|
const sessionFile = path.join(sessionsDir, `${today}-${expectedShortId}-session.tmp`);
|
||||||
|
|
||||||
|
assert.ok(fs.existsSync(sessionFile), `Session file should exist: ${sessionFile}`);
|
||||||
|
})) passed++; else failed++;
|
||||||
|
|
||||||
// pre-compact.js tests
|
// pre-compact.js tests
|
||||||
console.log('\npre-compact.js:');
|
console.log('\npre-compact.js:');
|
||||||
|
|
||||||
|
|||||||
@@ -106,6 +106,61 @@ function runTests() {
|
|||||||
assert.ok(/^\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}$/.test(dt), `Expected YYYY-MM-DD HH:MM:SS, got ${dt}`);
|
assert.ok(/^\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}$/.test(dt), `Expected YYYY-MM-DD HH:MM:SS, got ${dt}`);
|
||||||
})) passed++; else failed++;
|
})) passed++; else failed++;
|
||||||
|
|
||||||
|
// Session ID tests
|
||||||
|
console.log('\nSession ID Functions:');
|
||||||
|
|
||||||
|
if (test('getSessionIdShort returns default when no env var', () => {
|
||||||
|
const originalEnv = process.env.CLAUDE_SESSION_ID;
|
||||||
|
delete process.env.CLAUDE_SESSION_ID;
|
||||||
|
try {
|
||||||
|
const shortId = utils.getSessionIdShort();
|
||||||
|
assert.strictEqual(shortId, 'default');
|
||||||
|
} finally {
|
||||||
|
if (originalEnv) process.env.CLAUDE_SESSION_ID = originalEnv;
|
||||||
|
}
|
||||||
|
})) passed++; else failed++;
|
||||||
|
|
||||||
|
if (test('getSessionIdShort returns last 8 characters', () => {
|
||||||
|
const originalEnv = process.env.CLAUDE_SESSION_ID;
|
||||||
|
process.env.CLAUDE_SESSION_ID = 'test-session-abc12345';
|
||||||
|
try {
|
||||||
|
const shortId = utils.getSessionIdShort();
|
||||||
|
assert.strictEqual(shortId, 'abc12345');
|
||||||
|
} finally {
|
||||||
|
if (originalEnv) {
|
||||||
|
process.env.CLAUDE_SESSION_ID = originalEnv;
|
||||||
|
} else {
|
||||||
|
delete process.env.CLAUDE_SESSION_ID;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})) passed++; else failed++;
|
||||||
|
|
||||||
|
if (test('getSessionIdShort uses custom fallback', () => {
|
||||||
|
const originalEnv = process.env.CLAUDE_SESSION_ID;
|
||||||
|
delete process.env.CLAUDE_SESSION_ID;
|
||||||
|
try {
|
||||||
|
const shortId = utils.getSessionIdShort('custom');
|
||||||
|
assert.strictEqual(shortId, 'custom');
|
||||||
|
} finally {
|
||||||
|
if (originalEnv) process.env.CLAUDE_SESSION_ID = originalEnv;
|
||||||
|
}
|
||||||
|
})) passed++; else failed++;
|
||||||
|
|
||||||
|
if (test('getSessionIdShort handles short session IDs', () => {
|
||||||
|
const originalEnv = process.env.CLAUDE_SESSION_ID;
|
||||||
|
process.env.CLAUDE_SESSION_ID = 'short';
|
||||||
|
try {
|
||||||
|
const shortId = utils.getSessionIdShort();
|
||||||
|
assert.strictEqual(shortId, 'short');
|
||||||
|
} finally {
|
||||||
|
if (originalEnv) {
|
||||||
|
process.env.CLAUDE_SESSION_ID = originalEnv;
|
||||||
|
} else {
|
||||||
|
delete process.env.CLAUDE_SESSION_ID;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})) passed++; else failed++;
|
||||||
|
|
||||||
// File operations tests
|
// File operations tests
|
||||||
console.log('\nFile Operations:');
|
console.log('\nFile Operations:');
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user