This commit is contained in:
2026-04-25 06:45:36 +09:00
commit e77acee8ba
1903 changed files with 513282 additions and 0 deletions
+124
View File
@@ -0,0 +1,124 @@
import { AGENT_TOOL_NAME } from '../../tools/AgentTool/constants.js'
import { ASK_USER_QUESTION_TOOL_NAME } from '../../tools/AskUserQuestionTool/prompt.js'
import { ENTER_PLAN_MODE_TOOL_NAME } from '../../tools/EnterPlanModeTool/constants.js'
import { EXIT_PLAN_MODE_TOOL_NAME } from '../../tools/ExitPlanModeTool/constants.js'
import { SKILL_TOOL_NAME } from '../../tools/SkillTool/constants.js'
import { getIsGit } from '../../utils/git.js'
import { registerBundledSkill } from '../bundledSkills.js'
const MIN_AGENTS = 5
const MAX_AGENTS = 30
const WORKER_INSTRUCTIONS = `After you finish implementing the change:
1. **Simplify** — Invoke the \`${SKILL_TOOL_NAME}\` tool with \`skill: "simplify"\` to review and clean up your changes.
2. **Run unit tests** — Run the project's test suite (check for package.json scripts, Makefile targets, or common commands like \`npm test\`, \`bun test\`, \`pytest\`, \`go test\`). If tests fail, fix them.
3. **Test end-to-end** — Follow the e2e test recipe from the coordinator's prompt (below). If the recipe says to skip e2e for this unit, skip it.
4. **Commit and push** — Commit all changes with a clear message, push the branch, and create a PR with \`gh pr create\`. Use a descriptive title. If \`gh\` is not available or the push fails, note it in your final message.
5. **Report** — End with a single line: \`PR: <url>\` so the coordinator can track it. If no PR was created, end with \`PR: none — <reason>\`.`
function buildPrompt(instruction: string): string {
return `# Batch: Parallel Work Orchestration
You are orchestrating a large, parallelizable change across this codebase.
## User Instruction
${instruction}
## Phase 1: Research and Plan (Plan Mode)
Call the \`${ENTER_PLAN_MODE_TOOL_NAME}\` tool now to enter plan mode, then:
1. **Understand the scope.** Launch one or more subagents (in the foreground — you need their results) to deeply research what this instruction touches. Find all the files, patterns, and call sites that need to change. Understand the existing conventions so the migration is consistent.
2. **Decompose into independent units.** Break the work into ${MIN_AGENTS}${MAX_AGENTS} self-contained units. Each unit must:
- Be independently implementable in an isolated git worktree (no shared state with sibling units)
- Be mergeable on its own without depending on another unit's PR landing first
- Be roughly uniform in size (split large units, merge trivial ones)
Scale the count to the actual work: few files → closer to ${MIN_AGENTS}; hundreds of files → closer to ${MAX_AGENTS}. Prefer per-directory or per-module slicing over arbitrary file lists.
3. **Determine the e2e test recipe.** Figure out how a worker can verify its change actually works end-to-end — not just that unit tests pass. Look for:
- A \`claude-in-chrome\` skill or browser-automation tool (for UI changes: click through the affected flow, screenshot the result)
- A \`tmux\` or CLI-verifier skill (for CLI changes: launch the app interactively, exercise the changed behavior)
- A dev-server + curl pattern (for API changes: start the server, hit the affected endpoints)
- An existing e2e/integration test suite the worker can run
If you cannot find a concrete e2e path, use the \`${ASK_USER_QUESTION_TOOL_NAME}\` tool to ask the user how to verify this change end-to-end. Offer 23 specific options based on what you found (e.g., "Screenshot via chrome extension", "Run \`bun run dev\` and curl the endpoint", "No e2e — unit tests are sufficient"). Do not skip this — the workers cannot ask the user themselves.
Write the recipe as a short, concrete set of steps that a worker can execute autonomously. Include any setup (start a dev server, build first) and the exact command/interaction to verify.
4. **Write the plan.** In your plan file, include:
- A summary of what you found during research
- A numbered list of work units — for each: a short title, the list of files/directories it covers, and a one-line description of the change
- The e2e test recipe (or "skip e2e because …" if the user chose that)
- The exact worker instructions you will give each agent (the shared template)
5. Call \`${EXIT_PLAN_MODE_TOOL_NAME}\` to present the plan for approval.
## Phase 2: Spawn Workers (After Plan Approval)
Once the plan is approved, spawn one background agent per work unit using the \`${AGENT_TOOL_NAME}\` tool. **All agents must use \`isolation: "worktree"\` and \`run_in_background: true\`.** Launch them all in a single message block so they run in parallel.
For each agent, the prompt must be fully self-contained. Include:
- The overall goal (the user's instruction)
- This unit's specific task (title, file list, change description — copied verbatim from your plan)
- Any codebase conventions you discovered that the worker needs to follow
- The e2e test recipe from your plan (or "skip e2e because …")
- The worker instructions below, copied verbatim:
\`\`\`
${WORKER_INSTRUCTIONS}
\`\`\`
Use \`subagent_type: "general-purpose"\` unless a more specific agent type fits.
## Phase 3: Track Progress
After launching all workers, render an initial status table:
| # | Unit | Status | PR |
|---|------|--------|----|
| 1 | <title> | running | — |
| 2 | <title> | running | — |
As background-agent completion notifications arrive, parse the \`PR: <url>\` line from each agent's result and re-render the table with updated status (\`done\` / \`failed\`) and PR links. Keep a brief failure note for any agent that did not produce a PR.
When all agents have reported, render the final table and a one-line summary (e.g., "22/24 units landed as PRs").
`
}
const NOT_A_GIT_REPO_MESSAGE = `This is not a git repository. The \`/batch\` command requires a git repo because it spawns agents in isolated git worktrees and creates PRs from each. Initialize a repo first, or run this from inside an existing one.`
const MISSING_INSTRUCTION_MESSAGE = `Provide an instruction describing the batch change you want to make.
Examples:
/batch migrate from react to vue
/batch replace all uses of lodash with native equivalents
/batch add type annotations to all untyped function parameters`
export function registerBatchSkill(): void {
registerBundledSkill({
name: 'batch',
description:
'Research and plan a large-scale change, then execute it in parallel across 530 isolated worktree agents that each open a PR.',
whenToUse:
'Use when the user wants to make a sweeping, mechanical change across many files (migrations, refactors, bulk renames) that can be decomposed into independent parallel units.',
argumentHint: '<instruction>',
userInvocable: true,
disableModelInvocation: true,
async getPromptForCommand(args) {
const instruction = args.trim()
if (!instruction) {
return [{ type: 'text', text: MISSING_INSTRUCTION_MESSAGE }]
}
const isGit = await getIsGit()
if (!isGit) {
return [{ type: 'text', text: NOT_A_GIT_REPO_MESSAGE }]
}
return [{ type: 'text', text: buildPrompt(instruction) }]
},
})
}
+196
View File
@@ -0,0 +1,196 @@
import { readdir } from 'fs/promises'
import { getCwd } from '../../utils/cwd.js'
import { registerBundledSkill } from '../bundledSkills.js'
// claudeApiContent.js bundles 247KB of .md strings. Lazy-load inside
// getPromptForCommand so they only enter memory when /claude-api is invoked.
type SkillContent = typeof import('./claudeApiContent.js')
type DetectedLanguage =
| 'python'
| 'typescript'
| 'java'
| 'go'
| 'ruby'
| 'csharp'
| 'php'
| 'curl'
const LANGUAGE_INDICATORS: Record<DetectedLanguage, string[]> = {
python: ['.py', 'requirements.txt', 'pyproject.toml', 'setup.py', 'Pipfile'],
typescript: ['.ts', '.tsx', 'tsconfig.json', 'package.json'],
java: ['.java', 'pom.xml', 'build.gradle'],
go: ['.go', 'go.mod'],
ruby: ['.rb', 'Gemfile'],
csharp: ['.cs', '.csproj'],
php: ['.php', 'composer.json'],
curl: [],
}
async function detectLanguage(): Promise<DetectedLanguage | null> {
const cwd = getCwd()
let entries: string[]
try {
entries = await readdir(cwd)
} catch {
return null
}
for (const [lang, indicators] of Object.entries(LANGUAGE_INDICATORS) as [
DetectedLanguage,
string[],
][]) {
if (indicators.length === 0) continue
for (const indicator of indicators) {
if (indicator.startsWith('.')) {
if (entries.some(e => e.endsWith(indicator))) return lang
} else {
if (entries.includes(indicator)) return lang
}
}
}
return null
}
function getFilesForLanguage(
lang: DetectedLanguage,
content: SkillContent,
): string[] {
return Object.keys(content.SKILL_FILES).filter(
path => path.startsWith(`${lang}/`) || path.startsWith('shared/'),
)
}
function processContent(md: string, content: SkillContent): string {
// Strip HTML comments. Loop to handle nested comments.
let out = md
let prev
do {
prev = out
out = out.replace(/<!--[\s\S]*?-->\n?/g, '')
} while (out !== prev)
out = out.replace(
/\{\{(\w+)\}\}/g,
(match, key: string) =>
(content.SKILL_MODEL_VARS as Record<string, string>)[key] ?? match,
)
return out
}
function buildInlineReference(
filePaths: string[],
content: SkillContent,
): string {
const sections: string[] = []
for (const filePath of filePaths.sort()) {
const md = content.SKILL_FILES[filePath]
if (!md) continue
sections.push(
`<doc path="${filePath}">\n${processContent(md, content).trim()}\n</doc>`,
)
}
return sections.join('\n\n')
}
const INLINE_READING_GUIDE = `## Reference Documentation
The relevant documentation for your detected language is included below in \`<doc>\` tags. Each tag has a \`path\` attribute showing its original file path. Use this to find the right section:
### Quick Task Reference
**Single text classification/summarization/extraction/Q&A:**
→ Refer to \`{lang}/claude-api/README.md\`
**Chat UI or real-time response display:**
→ Refer to \`{lang}/claude-api/README.md\` + \`{lang}/claude-api/streaming.md\`
**Long-running conversations (may exceed context window):**
→ Refer to \`{lang}/claude-api/README.md\` — see Compaction section
**Prompt caching / optimize caching / "why is my cache hit rate low":**
→ Refer to \`shared/prompt-caching.md\` + \`{lang}/claude-api/README.md\` (Prompt Caching section)
**Function calling / tool use / agents:**
→ Refer to \`{lang}/claude-api/README.md\` + \`shared/tool-use-concepts.md\` + \`{lang}/claude-api/tool-use.md\`
**Batch processing (non-latency-sensitive):**
→ Refer to \`{lang}/claude-api/README.md\` + \`{lang}/claude-api/batches.md\`
**File uploads across multiple requests:**
→ Refer to \`{lang}/claude-api/README.md\` + \`{lang}/claude-api/files-api.md\`
**Agent with built-in tools (file/web/terminal) (Python & TypeScript only):**
→ Refer to \`{lang}/agent-sdk/README.md\` + \`{lang}/agent-sdk/patterns.md\`
**Error handling:**
→ Refer to \`shared/error-codes.md\`
**Latest docs via WebFetch:**
→ Refer to \`shared/live-sources.md\` for URLs`
function buildPrompt(
lang: DetectedLanguage | null,
args: string,
content: SkillContent,
): string {
// Take the SKILL.md content up to the "Reading Guide" section
const cleanPrompt = processContent(content.SKILL_PROMPT, content)
const readingGuideIdx = cleanPrompt.indexOf('## Reading Guide')
const basePrompt =
readingGuideIdx !== -1
? cleanPrompt.slice(0, readingGuideIdx).trimEnd()
: cleanPrompt
const parts: string[] = [basePrompt]
if (lang) {
const filePaths = getFilesForLanguage(lang, content)
const readingGuide = INLINE_READING_GUIDE.replace(/\{lang\}/g, lang)
parts.push(readingGuide)
parts.push(
'---\n\n## Included Documentation\n\n' +
buildInlineReference(filePaths, content),
)
} else {
// No language detected — include all docs and let the model ask
parts.push(INLINE_READING_GUIDE.replace(/\{lang\}/g, 'unknown'))
parts.push(
'No project language was auto-detected. Ask the user which language they are using, then refer to the matching docs below.',
)
parts.push(
'---\n\n## Included Documentation\n\n' +
buildInlineReference(Object.keys(content.SKILL_FILES), content),
)
}
// Preserve the "When to Use WebFetch" and "Common Pitfalls" sections
const webFetchIdx = cleanPrompt.indexOf('## When to Use WebFetch')
if (webFetchIdx !== -1) {
parts.push(cleanPrompt.slice(webFetchIdx).trimEnd())
}
if (args) {
parts.push(`## User Request\n\n${args}`)
}
return parts.join('\n\n')
}
export function registerClaudeApiSkill(): void {
registerBundledSkill({
name: 'claude-api',
description:
'Build apps with the Claude API or Anthropic SDK.\n' +
'TRIGGER when: code imports `anthropic`/`@anthropic-ai/sdk`/`claude_agent_sdk`, or user asks to use Claude API, Anthropic SDKs, or Agent SDK.\n' +
'DO NOT TRIGGER when: code imports `openai`/other AI SDK, general programming, or ML/data-science tasks.',
allowedTools: ['Read', 'Grep', 'Glob', 'WebFetch'],
userInvocable: true,
async getPromptForCommand(args) {
const content = await import('./claudeApiContent.js')
const lang = await detectLanguage()
const prompt = buildPrompt(lang, args, content)
return [{ type: 'text', text: prompt }]
},
})
}
+75
View File
@@ -0,0 +1,75 @@
// Content for the claude-api bundled skill.
// Each .md file is inlined as a string at build time via Bun's text loader.
import csharpClaudeApi from './claude-api/csharp/claude-api.md'
import curlExamples from './claude-api/curl/examples.md'
import goClaudeApi from './claude-api/go/claude-api.md'
import javaClaudeApi from './claude-api/java/claude-api.md'
import phpClaudeApi from './claude-api/php/claude-api.md'
import pythonAgentSdkPatterns from './claude-api/python/agent-sdk/patterns.md'
import pythonAgentSdkReadme from './claude-api/python/agent-sdk/README.md'
import pythonClaudeApiBatches from './claude-api/python/claude-api/batches.md'
import pythonClaudeApiFilesApi from './claude-api/python/claude-api/files-api.md'
import pythonClaudeApiReadme from './claude-api/python/claude-api/README.md'
import pythonClaudeApiStreaming from './claude-api/python/claude-api/streaming.md'
import pythonClaudeApiToolUse from './claude-api/python/claude-api/tool-use.md'
import rubyClaudeApi from './claude-api/ruby/claude-api.md'
import skillPrompt from './claude-api/SKILL.md'
import sharedErrorCodes from './claude-api/shared/error-codes.md'
import sharedLiveSources from './claude-api/shared/live-sources.md'
import sharedModels from './claude-api/shared/models.md'
import sharedPromptCaching from './claude-api/shared/prompt-caching.md'
import sharedToolUseConcepts from './claude-api/shared/tool-use-concepts.md'
import typescriptAgentSdkPatterns from './claude-api/typescript/agent-sdk/patterns.md'
import typescriptAgentSdkReadme from './claude-api/typescript/agent-sdk/README.md'
import typescriptClaudeApiBatches from './claude-api/typescript/claude-api/batches.md'
import typescriptClaudeApiFilesApi from './claude-api/typescript/claude-api/files-api.md'
import typescriptClaudeApiReadme from './claude-api/typescript/claude-api/README.md'
import typescriptClaudeApiStreaming from './claude-api/typescript/claude-api/streaming.md'
import typescriptClaudeApiToolUse from './claude-api/typescript/claude-api/tool-use.md'
// @[MODEL LAUNCH]: Update the model IDs/names below. These are substituted into {{VAR}}
// placeholders in the .md files at runtime before the skill prompt is sent.
// After updating these constants, manually update the two files that still hardcode models:
// - claude-api/SKILL.md (Current Models pricing table)
// - claude-api/shared/models.md (full model catalog with legacy versions and alias mappings)
export const SKILL_MODEL_VARS = {
OPUS_ID: 'claude-opus-4-6',
OPUS_NAME: 'Claude Opus 4.6',
SONNET_ID: 'claude-sonnet-4-6',
SONNET_NAME: 'Claude Sonnet 4.6',
HAIKU_ID: 'claude-haiku-4-5',
HAIKU_NAME: 'Claude Haiku 4.5',
// Previous Sonnet ID — used in "do not append date suffixes" example in SKILL.md.
PREV_SONNET_ID: 'claude-sonnet-4-5',
} satisfies Record<string, string>
export const SKILL_PROMPT: string = skillPrompt
export const SKILL_FILES: Record<string, string> = {
'csharp/claude-api.md': csharpClaudeApi,
'curl/examples.md': curlExamples,
'go/claude-api.md': goClaudeApi,
'java/claude-api.md': javaClaudeApi,
'php/claude-api.md': phpClaudeApi,
'python/agent-sdk/README.md': pythonAgentSdkReadme,
'python/agent-sdk/patterns.md': pythonAgentSdkPatterns,
'python/claude-api/README.md': pythonClaudeApiReadme,
'python/claude-api/batches.md': pythonClaudeApiBatches,
'python/claude-api/files-api.md': pythonClaudeApiFilesApi,
'python/claude-api/streaming.md': pythonClaudeApiStreaming,
'python/claude-api/tool-use.md': pythonClaudeApiToolUse,
'ruby/claude-api.md': rubyClaudeApi,
'shared/error-codes.md': sharedErrorCodes,
'shared/live-sources.md': sharedLiveSources,
'shared/models.md': sharedModels,
'shared/prompt-caching.md': sharedPromptCaching,
'shared/tool-use-concepts.md': sharedToolUseConcepts,
'typescript/agent-sdk/README.md': typescriptAgentSdkReadme,
'typescript/agent-sdk/patterns.md': typescriptAgentSdkPatterns,
'typescript/claude-api/README.md': typescriptClaudeApiReadme,
'typescript/claude-api/batches.md': typescriptClaudeApiBatches,
'typescript/claude-api/files-api.md': typescriptClaudeApiFilesApi,
'typescript/claude-api/streaming.md': typescriptClaudeApiStreaming,
'typescript/claude-api/tool-use.md': typescriptClaudeApiToolUse,
}
+34
View File
@@ -0,0 +1,34 @@
import { BROWSER_TOOLS } from '@ant/claude-for-chrome-mcp'
import { BASE_CHROME_PROMPT } from '../../utils/claudeInChrome/prompt.js'
import { shouldAutoEnableClaudeInChrome } from '../../utils/claudeInChrome/setup.js'
import { registerBundledSkill } from '../bundledSkills.js'
const CLAUDE_IN_CHROME_MCP_TOOLS = BROWSER_TOOLS.map(
tool => `mcp__claude-in-chrome__${tool.name}`,
)
const SKILL_ACTIVATION_MESSAGE = `
Now that this skill is invoked, you have access to Chrome browser automation tools. You can now use the mcp__claude-in-chrome__* tools to interact with web pages.
IMPORTANT: Start by calling mcp__claude-in-chrome__tabs_context_mcp to get information about the user's current browser tabs.
`
export function registerClaudeInChromeSkill(): void {
registerBundledSkill({
name: 'claude-in-chrome',
description:
'Automates your Chrome browser to interact with web pages - clicking elements, filling forms, capturing screenshots, reading console logs, and navigating sites. Opens pages in new tabs within your existing Chrome session. Requires site-level permissions before executing (configured in the extension).',
whenToUse:
'When the user wants to interact with web pages, automate browser tasks, capture screenshots, read console logs, or perform any browser-based actions. Always invoke BEFORE attempting to use any mcp__claude-in-chrome__* tools.',
allowedTools: CLAUDE_IN_CHROME_MCP_TOOLS,
userInvocable: true,
isEnabled: () => shouldAutoEnableClaudeInChrome(),
async getPromptForCommand(args) {
let prompt = `${BASE_CHROME_PROMPT}\n${SKILL_ACTIVATION_MESSAGE}`
if (args) {
prompt += `\n## Task\n\n${args}`
}
return [{ type: 'text', text: prompt }]
},
})
}
+103
View File
@@ -0,0 +1,103 @@
import { open, stat } from 'fs/promises'
import { CLAUDE_CODE_GUIDE_AGENT_TYPE } from 'src/tools/AgentTool/built-in/claudeCodeGuideAgent.js'
import { getSettingsFilePathForSource } from 'src/utils/settings/settings.js'
import { enableDebugLogging, getDebugLogPath } from '../../utils/debug.js'
import { errorMessage, isENOENT } from '../../utils/errors.js'
import { formatFileSize } from '../../utils/format.js'
import { registerBundledSkill } from '../bundledSkills.js'
const DEFAULT_DEBUG_LINES_READ = 20
const TAIL_READ_BYTES = 64 * 1024
export function registerDebugSkill(): void {
registerBundledSkill({
name: 'debug',
description:
process.env.USER_TYPE === 'ant'
? 'Debug your current Claude Code session by reading the session debug log. Includes all event logging'
: 'Enable debug logging for this session and help diagnose issues',
allowedTools: ['Read', 'Grep', 'Glob'],
argumentHint: '[issue description]',
// disableModelInvocation so that the user has to explicitly request it in
// interactive mode and so the description does not take up context.
disableModelInvocation: true,
userInvocable: true,
async getPromptForCommand(args) {
// Non-ants don't write debug logs by default — turn logging on now so
// subsequent activity in this session is captured.
const wasAlreadyLogging = enableDebugLogging()
const debugLogPath = getDebugLogPath()
let logInfo: string
try {
// Tail the log without reading the whole thing - debug logs grow
// unbounded in long sessions and reading them in full spikes RSS.
const stats = await stat(debugLogPath)
const readSize = Math.min(stats.size, TAIL_READ_BYTES)
const startOffset = stats.size - readSize
const fd = await open(debugLogPath, 'r')
try {
const { buffer, bytesRead } = await fd.read({
buffer: Buffer.alloc(readSize),
position: startOffset,
})
const tail = buffer
.toString('utf-8', 0, bytesRead)
.split('\n')
.slice(-DEFAULT_DEBUG_LINES_READ)
.join('\n')
logInfo = `Log size: ${formatFileSize(stats.size)}\n\n### Last ${DEFAULT_DEBUG_LINES_READ} lines\n\n\`\`\`\n${tail}\n\`\`\``
} finally {
await fd.close()
}
} catch (e) {
logInfo = isENOENT(e)
? 'No debug log exists yet — logging was just enabled.'
: `Failed to read last ${DEFAULT_DEBUG_LINES_READ} lines of debug log: ${errorMessage(e)}`
}
const justEnabledSection = wasAlreadyLogging
? ''
: `
## Debug Logging Just Enabled
Debug logging was OFF for this session until now. Nothing prior to this /debug invocation was captured.
Tell the user that debug logging is now active at \`${debugLogPath}\`, ask them to reproduce the issue, then re-read the log. If they can't reproduce, they can also restart with \`claude --debug\` to capture logs from startup.
`
const prompt = `# Debug Skill
Help the user debug an issue they're encountering in this current Claude Code session.
${justEnabledSection}
## Session Debug Log
The debug log for the current session is at: \`${debugLogPath}\`
${logInfo}
For additional context, grep for [ERROR] and [WARN] lines across the full file.
## Issue Description
${args || 'The user did not describe a specific issue. Read the debug log and summarize any errors, warnings, or notable issues.'}
## Settings
Remember that settings are in:
* user - ${getSettingsFilePathForSource('userSettings')}
* project - ${getSettingsFilePathForSource('projectSettings')}
* local - ${getSettingsFilePathForSource('localSettings')}
## Instructions
1. Review the user's issue description
2. The last ${DEFAULT_DEBUG_LINES_READ} lines show the debug file format. Look for [ERROR] and [WARN] entries, stack traces, and failure patterns across the file
3. Consider launching the ${CLAUDE_CODE_GUIDE_AGENT_TYPE} subagent to understand the relevant Claude Code features
4. Explain what you found in plain language
5. Suggest concrete fixes or next steps
`
return [{ type: 'text', text: prompt }]
},
})
}
+79
View File
@@ -0,0 +1,79 @@
import { feature } from 'bun:bundle'
import { shouldAutoEnableClaudeInChrome } from 'src/utils/claudeInChrome/setup.js'
import { registerBatchSkill } from './batch.js'
import { registerClaudeInChromeSkill } from './claudeInChrome.js'
import { registerDebugSkill } from './debug.js'
import { registerKeybindingsSkill } from './keybindings.js'
import { registerLoremIpsumSkill } from './loremIpsum.js'
import { registerRememberSkill } from './remember.js'
import { registerSimplifySkill } from './simplify.js'
import { registerSkillifySkill } from './skillify.js'
import { registerStuckSkill } from './stuck.js'
import { registerUpdateConfigSkill } from './updateConfig.js'
import { registerVerifySkill } from './verify.js'
/**
* Initialize all bundled skills.
* Called at startup to register skills that ship with the CLI.
*
* To add a new bundled skill:
* 1. Create a new file in src/skills/bundled/ (e.g., myskill.ts)
* 2. Export a register function that calls registerBundledSkill()
* 3. Import and call that function here
*/
export function initBundledSkills(): void {
registerUpdateConfigSkill()
registerKeybindingsSkill()
registerVerifySkill()
registerDebugSkill()
registerLoremIpsumSkill()
registerSkillifySkill()
registerRememberSkill()
registerSimplifySkill()
registerBatchSkill()
registerStuckSkill()
if (feature('KAIROS') || feature('KAIROS_DREAM')) {
/* eslint-disable @typescript-eslint/no-require-imports */
const { registerDreamSkill } = require('./dream.js')
/* eslint-enable @typescript-eslint/no-require-imports */
registerDreamSkill()
}
if (feature('REVIEW_ARTIFACT')) {
/* eslint-disable @typescript-eslint/no-require-imports */
const { registerHunterSkill } = require('./hunter.js')
/* eslint-enable @typescript-eslint/no-require-imports */
registerHunterSkill()
}
if (feature('AGENT_TRIGGERS')) {
/* eslint-disable @typescript-eslint/no-require-imports */
const { registerLoopSkill } = require('./loop.js')
/* eslint-enable @typescript-eslint/no-require-imports */
// /loop's isEnabled delegates to isKairosCronEnabled() — same lazy
// per-invocation pattern as the cron tools. Registered unconditionally;
// the skill's own isEnabled callback decides visibility.
registerLoopSkill()
}
if (feature('AGENT_TRIGGERS_REMOTE')) {
/* eslint-disable @typescript-eslint/no-require-imports */
const {
registerScheduleRemoteAgentsSkill,
} = require('./scheduleRemoteAgents.js')
/* eslint-enable @typescript-eslint/no-require-imports */
registerScheduleRemoteAgentsSkill()
}
if (feature('BUILDING_CLAUDE_APPS')) {
/* eslint-disable @typescript-eslint/no-require-imports */
const { registerClaudeApiSkill } = require('./claudeApi.js')
/* eslint-enable @typescript-eslint/no-require-imports */
registerClaudeApiSkill()
}
if (shouldAutoEnableClaudeInChrome()) {
registerClaudeInChromeSkill()
}
if (feature('RUN_SKILL_GENERATOR')) {
/* eslint-disable @typescript-eslint/no-require-imports */
const { registerRunSkillGeneratorSkill } = require('./runSkillGenerator.js')
/* eslint-enable @typescript-eslint/no-require-imports */
registerRunSkillGeneratorSkill()
}
}
+339
View File
@@ -0,0 +1,339 @@
import { DEFAULT_BINDINGS } from '../../keybindings/defaultBindings.js'
import { isKeybindingCustomizationEnabled } from '../../keybindings/loadUserBindings.js'
import {
MACOS_RESERVED,
NON_REBINDABLE,
TERMINAL_RESERVED,
} from '../../keybindings/reservedShortcuts.js'
import type { KeybindingsSchemaType } from '../../keybindings/schema.js'
import {
KEYBINDING_ACTIONS,
KEYBINDING_CONTEXT_DESCRIPTIONS,
KEYBINDING_CONTEXTS,
} from '../../keybindings/schema.js'
import { jsonStringify } from '../../utils/slowOperations.js'
import { registerBundledSkill } from '../bundledSkills.js'
/**
* Build a markdown table of all contexts.
*/
function generateContextsTable(): string {
return markdownTable(
['Context', 'Description'],
KEYBINDING_CONTEXTS.map(ctx => [
`\`${ctx}\``,
KEYBINDING_CONTEXT_DESCRIPTIONS[ctx],
]),
)
}
/**
* Build a markdown table of all actions with their default bindings and context.
*/
function generateActionsTable(): string {
// Build a lookup: action -> { keys, context }
const actionInfo: Record<string, { keys: string[]; context: string }> = {}
for (const block of DEFAULT_BINDINGS) {
for (const [key, action] of Object.entries(block.bindings)) {
if (action) {
if (!actionInfo[action]) {
actionInfo[action] = { keys: [], context: block.context }
}
actionInfo[action].keys.push(key)
}
}
}
return markdownTable(
['Action', 'Default Key(s)', 'Context'],
KEYBINDING_ACTIONS.map(action => {
const info = actionInfo[action]
const keys = info ? info.keys.map(k => `\`${k}\``).join(', ') : '(none)'
const context = info ? info.context : inferContextFromAction(action)
return [`\`${action}\``, keys, context]
}),
)
}
/**
* Infer context from action prefix when not in DEFAULT_BINDINGS.
*/
function inferContextFromAction(action: string): string {
const prefix = action.split(':')[0]
const prefixToContext: Record<string, string> = {
app: 'Global',
history: 'Global or Chat',
chat: 'Chat',
autocomplete: 'Autocomplete',
confirm: 'Confirmation',
tabs: 'Tabs',
transcript: 'Transcript',
historySearch: 'HistorySearch',
task: 'Task',
theme: 'ThemePicker',
help: 'Help',
attachments: 'Attachments',
footer: 'Footer',
messageSelector: 'MessageSelector',
diff: 'DiffDialog',
modelPicker: 'ModelPicker',
select: 'Select',
permission: 'Confirmation',
}
return prefixToContext[prefix ?? ''] ?? 'Unknown'
}
/**
* Build a list of reserved shortcuts.
*/
function generateReservedShortcuts(): string {
const lines: string[] = []
lines.push('### Non-rebindable (errors)')
for (const s of NON_REBINDABLE) {
lines.push(`- \`${s.key}\`${s.reason}`)
}
lines.push('')
lines.push('### Terminal reserved (errors/warnings)')
for (const s of TERMINAL_RESERVED) {
lines.push(
`- \`${s.key}\`${s.reason} (${s.severity === 'error' ? 'will not work' : 'may conflict'})`,
)
}
lines.push('')
lines.push('### macOS reserved (errors)')
for (const s of MACOS_RESERVED) {
lines.push(`- \`${s.key}\`${s.reason}`)
}
return lines.join('\n')
}
const FILE_FORMAT_EXAMPLE: KeybindingsSchemaType = {
$schema: 'https://www.schemastore.org/claude-code-keybindings.json',
$docs: 'https://code.claude.com/docs/en/keybindings',
bindings: [
{
context: 'Chat',
bindings: {
'ctrl+e': 'chat:externalEditor',
},
},
],
}
const UNBIND_EXAMPLE: KeybindingsSchemaType['bindings'][number] = {
context: 'Chat',
bindings: {
'ctrl+s': null,
},
}
const REBIND_EXAMPLE: KeybindingsSchemaType['bindings'][number] = {
context: 'Chat',
bindings: {
'ctrl+g': null,
'ctrl+e': 'chat:externalEditor',
},
}
const CHORD_EXAMPLE: KeybindingsSchemaType['bindings'][number] = {
context: 'Global',
bindings: {
'ctrl+k ctrl+t': 'app:toggleTodos',
},
}
const SECTION_INTRO = [
'# Keybindings Skill',
'',
'Create or modify `~/.claude/keybindings.json` to customize keyboard shortcuts.',
'',
'## CRITICAL: Read Before Write',
'',
'**Always read `~/.claude/keybindings.json` first** (it may not exist yet). Merge changes with existing bindings — never replace the entire file.',
'',
'- Use **Edit** tool for modifications to existing files',
'- Use **Write** tool only if the file does not exist yet',
].join('\n')
const SECTION_FILE_FORMAT = [
'## File Format',
'',
'```json',
jsonStringify(FILE_FORMAT_EXAMPLE, null, 2),
'```',
'',
'Always include the `$schema` and `$docs` fields.',
].join('\n')
const SECTION_KEYSTROKE_SYNTAX = [
'## Keystroke Syntax',
'',
'**Modifiers** (combine with `+`):',
'- `ctrl` (alias: `control`)',
'- `alt` (aliases: `opt`, `option`) — note: `alt` and `meta` are identical in terminals',
'- `shift`',
'- `meta` (aliases: `cmd`, `command`)',
'',
'**Special keys**: `escape`/`esc`, `enter`/`return`, `tab`, `space`, `backspace`, `delete`, `up`, `down`, `left`, `right`',
'',
'**Chords**: Space-separated keystrokes, e.g. `ctrl+k ctrl+s` (1-second timeout between keystrokes)',
'',
'**Examples**: `ctrl+shift+p`, `alt+enter`, `ctrl+k ctrl+n`',
].join('\n')
const SECTION_UNBINDING = [
'## Unbinding Default Shortcuts',
'',
'Set a key to `null` to remove its default binding:',
'',
'```json',
jsonStringify(UNBIND_EXAMPLE, null, 2),
'```',
].join('\n')
const SECTION_INTERACTION = [
'## How User Bindings Interact with Defaults',
'',
'- User bindings are **additive** — they are appended after the default bindings',
'- To **move** a binding to a different key: unbind the old key (`null`) AND add the new binding',
"- A context only needs to appear in the user's file if they want to change something in that context",
].join('\n')
const SECTION_COMMON_PATTERNS = [
'## Common Patterns',
'',
'### Rebind a key',
'To change the external editor shortcut from `ctrl+g` to `ctrl+e`:',
'```json',
jsonStringify(REBIND_EXAMPLE, null, 2),
'```',
'',
'### Add a chord binding',
'```json',
jsonStringify(CHORD_EXAMPLE, null, 2),
'```',
].join('\n')
const SECTION_BEHAVIORAL_RULES = [
'## Behavioral Rules',
'',
'1. Only include contexts the user wants to change (minimal overrides)',
'2. Validate that actions and contexts are from the known lists below',
'3. Warn the user proactively if they choose a key that conflicts with reserved shortcuts or common tools like tmux (`ctrl+b`) and screen (`ctrl+a`)',
'4. When adding a new binding for an existing action, the new binding is additive (existing default still works unless explicitly unbound)',
'5. To fully replace a default binding, unbind the old key AND add the new one',
].join('\n')
const SECTION_DOCTOR = [
'## Validation with /doctor',
'',
'The `/doctor` command includes a "Keybinding Configuration Issues" section that validates `~/.claude/keybindings.json`.',
'',
'### Common Issues and Fixes',
'',
markdownTable(
['Issue', 'Cause', 'Fix'],
[
[
'`keybindings.json must have a "bindings" array`',
'Missing wrapper object',
'Wrap bindings in `{ "bindings": [...] }`',
],
[
'`"bindings" must be an array`',
'`bindings` is not an array',
'Set `"bindings"` to an array: `[{ context: ..., bindings: ... }]`',
],
[
'`Unknown context "X"`',
'Typo or invalid context name',
'Use exact context names from the Available Contexts table',
],
[
'`Duplicate key "X" in Y bindings`',
'Same key defined twice in one context',
'Remove the duplicate; JSON uses only the last value',
],
[
'`"X" may not work: ...`',
'Key conflicts with terminal/OS reserved shortcut',
'Choose a different key (see Reserved Shortcuts section)',
],
[
'`Could not parse keystroke "X"`',
'Invalid key syntax',
'Check syntax: use `+` between modifiers, valid key names',
],
[
'`Invalid action for "X"`',
'Action value is not a string or null',
'Actions must be strings like `"app:help"` or `null` to unbind',
],
],
),
'',
'### Example /doctor Output',
'',
'```',
'Keybinding Configuration Issues',
'Location: ~/.claude/keybindings.json',
' └ [Error] Unknown context "chat"',
' → Valid contexts: Global, Chat, Autocomplete, ...',
' └ [Warning] "ctrl+c" may not work: Terminal interrupt (SIGINT)',
'```',
'',
'**Errors** prevent bindings from working and must be fixed. **Warnings** indicate potential conflicts but the binding may still work.',
].join('\n')
export function registerKeybindingsSkill(): void {
registerBundledSkill({
name: 'keybindings-help',
description:
'Use when the user wants to customize keyboard shortcuts, rebind keys, add chord bindings, or modify ~/.claude/keybindings.json. Examples: "rebind ctrl+s", "add a chord shortcut", "change the submit key", "customize keybindings".',
allowedTools: ['Read'],
userInvocable: false,
isEnabled: isKeybindingCustomizationEnabled,
async getPromptForCommand(args) {
// Generate reference tables dynamically from source-of-truth arrays
const contextsTable = generateContextsTable()
const actionsTable = generateActionsTable()
const reservedShortcuts = generateReservedShortcuts()
const sections = [
SECTION_INTRO,
SECTION_FILE_FORMAT,
SECTION_KEYSTROKE_SYNTAX,
SECTION_UNBINDING,
SECTION_INTERACTION,
SECTION_COMMON_PATTERNS,
SECTION_BEHAVIORAL_RULES,
SECTION_DOCTOR,
`## Reserved Shortcuts\n\n${reservedShortcuts}`,
`## Available Contexts\n\n${contextsTable}`,
`## Available Actions\n\n${actionsTable}`,
]
if (args) {
sections.push(`## User Request\n\n${args}`)
}
return [{ type: 'text', text: sections.join('\n\n') }]
},
})
}
/**
* Build a markdown table from headers and rows.
*/
function markdownTable(headers: string[], rows: string[][]): string {
const separator = headers.map(() => '---')
return [
`| ${headers.join(' | ')} |`,
`| ${separator.join(' | ')} |`,
...rows.map(row => `| ${row.join(' | ')} |`),
].join('\n')
}
+92
View File
@@ -0,0 +1,92 @@
import {
CRON_CREATE_TOOL_NAME,
CRON_DELETE_TOOL_NAME,
DEFAULT_MAX_AGE_DAYS,
isKairosCronEnabled,
} from '../../tools/ScheduleCronTool/prompt.js'
import { registerBundledSkill } from '../bundledSkills.js'
const DEFAULT_INTERVAL = '10m'
const USAGE_MESSAGE = `Usage: /loop [interval] <prompt>
Run a prompt or slash command on a recurring interval.
Intervals: Ns, Nm, Nh, Nd (e.g. 5m, 30m, 2h, 1d). Minimum granularity is 1 minute.
If no interval is specified, defaults to ${DEFAULT_INTERVAL}.
Examples:
/loop 5m /babysit-prs
/loop 30m check the deploy
/loop 1h /standup 1
/loop check the deploy (defaults to ${DEFAULT_INTERVAL})
/loop check the deploy every 20m`
function buildPrompt(args: string): string {
return `# /loop — schedule a recurring prompt
Parse the input below into \`[interval] <prompt…>\` and schedule it with ${CRON_CREATE_TOOL_NAME}.
## Parsing (in priority order)
1. **Leading token**: if the first whitespace-delimited token matches \`^\\d+[smhd]$\` (e.g. \`5m\`, \`2h\`), that's the interval; the rest is the prompt.
2. **Trailing "every" clause**: otherwise, if the input ends with \`every <N><unit>\` or \`every <N> <unit-word>\` (e.g. \`every 20m\`, \`every 5 minutes\`, \`every 2 hours\`), extract that as the interval and strip it from the prompt. Only match when what follows "every" is a time expression — \`check every PR\` has no interval.
3. **Default**: otherwise, interval is \`${DEFAULT_INTERVAL}\` and the entire input is the prompt.
If the resulting prompt is empty, show usage \`/loop [interval] <prompt>\` and stop — do not call ${CRON_CREATE_TOOL_NAME}.
Examples:
- \`5m /babysit-prs\` → interval \`5m\`, prompt \`/babysit-prs\` (rule 1)
- \`check the deploy every 20m\` → interval \`20m\`, prompt \`check the deploy\` (rule 2)
- \`run tests every 5 minutes\` → interval \`5m\`, prompt \`run tests\` (rule 2)
- \`check the deploy\` → interval \`${DEFAULT_INTERVAL}\`, prompt \`check the deploy\` (rule 3)
- \`check every PR\` → interval \`${DEFAULT_INTERVAL}\`, prompt \`check every PR\` (rule 3 — "every" not followed by time)
- \`5m\` → empty prompt → show usage
## Interval → cron
Supported suffixes: \`s\` (seconds, rounded up to nearest minute, min 1), \`m\` (minutes), \`h\` (hours), \`d\` (days). Convert:
| Interval pattern | Cron expression | Notes |
|-----------------------|---------------------|------------------------------------------|
| \`Nm\` where N ≤ 59 | \`*/N * * * *\` | every N minutes |
| \`Nm\` where N ≥ 60 | \`0 */H * * *\` | round to hours (H = N/60, must divide 24)|
| \`Nh\` where N ≤ 23 | \`0 */N * * *\` | every N hours |
| \`Nd\` | \`0 0 */N * *\` | every N days at midnight local |
| \`Ns\` | treat as \`ceil(N/60)m\` | cron minimum granularity is 1 minute |
**If the interval doesn't cleanly divide its unit** (e.g. \`7m\`\`*/7 * * * *\` gives uneven gaps at :56→:00; \`90m\` → 1.5h which cron can't express), pick the nearest clean interval and tell the user what you rounded to before scheduling.
## Action
1. Call ${CRON_CREATE_TOOL_NAME} with:
- \`cron\`: the expression from the table above
- \`prompt\`: the parsed prompt from above, verbatim (slash commands are passed through unchanged)
- \`recurring\`: \`true\`
2. Briefly confirm: what's scheduled, the cron expression, the human-readable cadence, that recurring tasks auto-expire after ${DEFAULT_MAX_AGE_DAYS} days, and that they can cancel sooner with ${CRON_DELETE_TOOL_NAME} (include the job ID).
3. **Then immediately execute the parsed prompt now** — don't wait for the first cron fire. If it's a slash command, invoke it via the Skill tool; otherwise act on it directly.
## Input
${args}`
}
export function registerLoopSkill(): void {
registerBundledSkill({
name: 'loop',
description:
'Run a prompt or slash command on a recurring interval (e.g. /loop 5m /foo, defaults to 10m)',
whenToUse:
'When the user wants to set up a recurring task, poll for status, or run something repeatedly on an interval (e.g. "check the deploy every 5 minutes", "keep running /babysit-prs"). Do NOT invoke for one-off tasks.',
argumentHint: '[interval] <prompt>',
userInvocable: true,
isEnabled: isKairosCronEnabled,
async getPromptForCommand(args) {
const trimmed = args.trim()
if (!trimmed) {
return [{ type: 'text', text: USAGE_MESSAGE }]
}
return [{ type: 'text', text: buildPrompt(trimmed) }]
},
})
}
+282
View File
@@ -0,0 +1,282 @@
import { registerBundledSkill } from '../bundledSkills.js'
// Verified 1-token words (tested via API token counting)
// All common English words confirmed to tokenize as single tokens
const ONE_TOKEN_WORDS = [
// Articles & pronouns
'the',
'a',
'an',
'I',
'you',
'he',
'she',
'it',
'we',
'they',
'me',
'him',
'her',
'us',
'them',
'my',
'your',
'his',
'its',
'our',
'this',
'that',
'what',
'who',
// Common verbs
'is',
'are',
'was',
'were',
'be',
'been',
'have',
'has',
'had',
'do',
'does',
'did',
'will',
'would',
'can',
'could',
'may',
'might',
'must',
'shall',
'should',
'make',
'made',
'get',
'got',
'go',
'went',
'come',
'came',
'see',
'saw',
'know',
'take',
'think',
'look',
'want',
'use',
'find',
'give',
'tell',
'work',
'call',
'try',
'ask',
'need',
'feel',
'seem',
'leave',
'put',
// Common nouns & adjectives
'time',
'year',
'day',
'way',
'man',
'thing',
'life',
'hand',
'part',
'place',
'case',
'point',
'fact',
'good',
'new',
'first',
'last',
'long',
'great',
'little',
'own',
'other',
'old',
'right',
'big',
'high',
'small',
'large',
'next',
'early',
'young',
'few',
'public',
'bad',
'same',
'able',
// Prepositions & conjunctions
'in',
'on',
'at',
'to',
'for',
'of',
'with',
'from',
'by',
'about',
'like',
'through',
'over',
'before',
'between',
'under',
'since',
'without',
'and',
'or',
'but',
'if',
'than',
'because',
'as',
'until',
'while',
'so',
'though',
'both',
'each',
'when',
'where',
'why',
'how',
// Common adverbs
'not',
'now',
'just',
'more',
'also',
'here',
'there',
'then',
'only',
'very',
'well',
'back',
'still',
'even',
'much',
'too',
'such',
'never',
'again',
'most',
'once',
'off',
'away',
'down',
'out',
'up',
// Tech/common words
'test',
'code',
'data',
'file',
'line',
'text',
'word',
'number',
'system',
'program',
'set',
'run',
'value',
'name',
'type',
'state',
'end',
'start',
]
function generateLoremIpsum(targetTokens: number): string {
let tokens = 0
let result = ''
while (tokens < targetTokens) {
// Sentence: 10-20 words
const sentenceLength = 10 + Math.floor(Math.random() * 11)
let wordsInSentence = 0
for (let i = 0; i < sentenceLength && tokens < targetTokens; i++) {
const word =
ONE_TOKEN_WORDS[Math.floor(Math.random() * ONE_TOKEN_WORDS.length)]
result += word
tokens++
wordsInSentence++
if (i === sentenceLength - 1 || tokens >= targetTokens) {
result += '. '
} else {
result += ' '
}
}
// Paragraph break every 5-8 sentences (roughly 20% chance per sentence)
if (wordsInSentence > 0 && Math.random() < 0.2 && tokens < targetTokens) {
result += '\n\n'
}
}
return result.trim()
}
export function registerLoremIpsumSkill(): void {
if (process.env.USER_TYPE !== 'ant') {
return
}
registerBundledSkill({
name: 'lorem-ipsum',
description:
'Generate filler text for long context testing. Specify token count as argument (e.g., /lorem-ipsum 50000). Outputs approximately the requested number of tokens. Ant-only.',
argumentHint: '[token_count]',
userInvocable: true,
async getPromptForCommand(args) {
const parsed = parseInt(args)
if (args && (isNaN(parsed) || parsed <= 0)) {
return [
{
type: 'text',
text: 'Invalid token count. Please provide a positive number (e.g., /lorem-ipsum 10000).',
},
]
}
const targetTokens = parsed || 10000
// Cap at 500k tokens for safety
const cappedTokens = Math.min(targetTokens, 500_000)
if (cappedTokens < targetTokens) {
return [
{
type: 'text',
text: `Requested ${targetTokens} tokens, but capped at 500,000 for safety.\n\n${generateLoremIpsum(cappedTokens)}`,
},
]
}
const loremText = generateLoremIpsum(cappedTokens)
// Just dump the lorem ipsum text into the conversation
return [
{
type: 'text',
text: loremText,
},
]
},
})
}
+82
View File
@@ -0,0 +1,82 @@
import { isAutoMemoryEnabled } from '../../memdir/paths.js'
import { registerBundledSkill } from '../bundledSkills.js'
export function registerRememberSkill(): void {
if (process.env.USER_TYPE !== 'ant') {
return
}
const SKILL_PROMPT = `# Memory Review
## Goal
Review the user's memory landscape and produce a clear report of proposed changes, grouped by action type. Do NOT apply changes — present proposals for user approval.
## Steps
### 1. Gather all memory layers
Read CLAUDE.md and CLAUDE.local.md from the project root (if they exist). Your auto-memory content is already in your system prompt — review it there. Note which team memory sections exist, if any.
**Success criteria**: You have the contents of all memory layers and can compare them.
### 2. Classify each auto-memory entry
For each substantive entry in auto-memory, determine the best destination:
| Destination | What belongs there | Examples |
|---|---|---|
| **CLAUDE.md** | Project conventions and instructions for Claude that all contributors should follow | "use bun not npm", "API routes use kebab-case", "test command is bun test", "prefer functional style" |
| **CLAUDE.local.md** | Personal instructions for Claude specific to this user, not applicable to other contributors | "I prefer concise responses", "always explain trade-offs", "don't auto-commit", "run tests before committing" |
| **Team memory** | Org-wide knowledge that applies across repositories (only if team memory is configured) | "deploy PRs go through #deploy-queue", "staging is at staging.internal", "platform team owns infra" |
| **Stay in auto-memory** | Working notes, temporary context, or entries that don't clearly fit elsewhere | Session-specific observations, uncertain patterns |
**Important distinctions:**
- CLAUDE.md and CLAUDE.local.md contain instructions for Claude, not user preferences for external tools (editor theme, IDE keybindings, etc. don't belong in either)
- Workflow practices (PR conventions, merge strategies, branch naming) are ambiguous — ask the user whether they're personal or team-wide
- When unsure, ask rather than guess
**Success criteria**: Each entry has a proposed destination or is flagged as ambiguous.
### 3. Identify cleanup opportunities
Scan across all layers for:
- **Duplicates**: Auto-memory entries already captured in CLAUDE.md or CLAUDE.local.md → propose removing from auto-memory
- **Outdated**: CLAUDE.md or CLAUDE.local.md entries contradicted by newer auto-memory entries → propose updating the older layer
- **Conflicts**: Contradictions between any two layers → propose resolution, noting which is more recent
**Success criteria**: All cross-layer issues identified.
### 4. Present the report
Output a structured report grouped by action type:
1. **Promotions** — entries to move, with destination and rationale
2. **Cleanup** — duplicates, outdated entries, conflicts to resolve
3. **Ambiguous** — entries where you need the user's input on destination
4. **No action needed** — brief note on entries that should stay put
If auto-memory is empty, say so and offer to review CLAUDE.md for cleanup.
**Success criteria**: User can review and approve/reject each proposal individually.
## Rules
- Present ALL proposals before making any changes
- Do NOT modify files without explicit user approval
- Do NOT create new files unless the target doesn't exist yet
- Ask about ambiguous entries — don't guess
`
registerBundledSkill({
name: 'remember',
description:
'Review auto-memory entries and propose promotions to CLAUDE.md, CLAUDE.local.md, or shared memory. Also detects outdated, conflicting, and duplicate entries across memory layers.',
whenToUse:
'Use when the user wants to review, organize, or promote their auto-memory entries. Also useful for cleaning up outdated or conflicting entries across CLAUDE.md, CLAUDE.local.md, and auto-memory.',
userInvocable: true,
isEnabled: () => isAutoMemoryEnabled(),
async getPromptForCommand(args) {
let prompt = SKILL_PROMPT
if (args) {
prompt += `\n## Additional context from user\n\n${args}`
}
return [{ type: 'text', text: prompt }]
},
})
}
+447
View File
@@ -0,0 +1,447 @@
import { getFeatureValue_CACHED_MAY_BE_STALE } from '../../services/analytics/growthbook.js'
import type { MCPServerConnection } from '../../services/mcp/types.js'
import { isPolicyAllowed } from '../../services/policyLimits/index.js'
import type { ToolUseContext } from '../../Tool.js'
import { ASK_USER_QUESTION_TOOL_NAME } from '../../tools/AskUserQuestionTool/prompt.js'
import { REMOTE_TRIGGER_TOOL_NAME } from '../../tools/RemoteTriggerTool/prompt.js'
import { getClaudeAIOAuthTokens } from '../../utils/auth.js'
import { checkRepoForRemoteAccess } from '../../utils/background/remote/preconditions.js'
import { logForDebugging } from '../../utils/debug.js'
import {
detectCurrentRepositoryWithHost,
parseGitRemote,
} from '../../utils/detectRepository.js'
import { getRemoteUrl } from '../../utils/git.js'
import { jsonStringify } from '../../utils/slowOperations.js'
import {
createDefaultCloudEnvironment,
type EnvironmentResource,
fetchEnvironments,
} from '../../utils/teleport/environments.js'
import { registerBundledSkill } from '../bundledSkills.js'
// Base58 alphabet (Bitcoin-style) used by the tagged ID system
const BASE58 = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
/**
* Decode a mcpsrv_ tagged ID to a UUID string.
* Tagged IDs have format: mcpsrv_01{base58(uuid.int)}
* where 01 is the version prefix.
*
* TODO(public-ship): Before shipping publicly, the /v1/mcp_servers endpoint
* should return the raw UUID directly so we don't need this client-side decoding.
* The tagged ID format is an internal implementation detail that could change.
*/
function taggedIdToUUID(taggedId: string): string | null {
const prefix = 'mcpsrv_'
if (!taggedId.startsWith(prefix)) {
return null
}
const rest = taggedId.slice(prefix.length)
// Skip version prefix (2 chars, always "01")
const base58Data = rest.slice(2)
// Decode base58 to bigint
let n = 0n
for (const c of base58Data) {
const idx = BASE58.indexOf(c)
if (idx === -1) {
return null
}
n = n * 58n + BigInt(idx)
}
// Convert to UUID hex string
const hex = n.toString(16).padStart(32, '0')
return `${hex.slice(0, 8)}-${hex.slice(8, 12)}-${hex.slice(12, 16)}-${hex.slice(16, 20)}-${hex.slice(20, 32)}`
}
type ConnectorInfo = {
uuid: string
name: string
url: string
}
function getConnectedClaudeAIConnectors(
mcpClients: MCPServerConnection[],
): ConnectorInfo[] {
const connectors: ConnectorInfo[] = []
for (const client of mcpClients) {
if (client.type !== 'connected') {
continue
}
if (client.config.type !== 'claudeai-proxy') {
continue
}
const uuid = taggedIdToUUID(client.config.id)
if (!uuid) {
continue
}
connectors.push({
uuid,
name: client.name,
url: client.config.url,
})
}
return connectors
}
function sanitizeConnectorName(name: string): string {
return name
.replace(/^claude[.\s-]ai[.\s-]/i, '')
.replace(/[^a-zA-Z0-9_-]/g, '-')
.replace(/-+/g, '-')
.replace(/^-|-$/g, '')
}
function formatConnectorsInfo(connectors: ConnectorInfo[]): string {
if (connectors.length === 0) {
return 'No connected MCP connectors found. The user may need to connect servers at https://claude.ai/settings/connectors'
}
const lines = ['Connected connectors (available for triggers):']
for (const c of connectors) {
const safeName = sanitizeConnectorName(c.name)
lines.push(
`- ${c.name} (connector_uuid: ${c.uuid}, name: ${safeName}, url: ${c.url})`,
)
}
return lines.join('\n')
}
const BASE_QUESTION = 'What would you like to do with scheduled remote agents?'
/**
* Formats setup notes as a bulleted Heads-up block. Shared between the
* initial AskUserQuestion dialog text (no-args path) and the prompt-body
* section (args path) so notes are never silently dropped.
*/
function formatSetupNotes(notes: string[]): string {
const items = notes.map(n => `- ${n}`).join('\n')
return `⚠ Heads-up:\n${items}`
}
async function getCurrentRepoHttpsUrl(): Promise<string | null> {
const remoteUrl = await getRemoteUrl()
if (!remoteUrl) {
return null
}
const parsed = parseGitRemote(remoteUrl)
if (!parsed) {
return null
}
return `https://${parsed.host}/${parsed.owner}/${parsed.name}`
}
function buildPrompt(opts: {
userTimezone: string
connectorsInfo: string
gitRepoUrl: string | null
environmentsInfo: string
createdEnvironment: EnvironmentResource | null
setupNotes: string[]
needsGitHubAccessReminder: boolean
userArgs: string
}): string {
const {
userTimezone,
connectorsInfo,
gitRepoUrl,
environmentsInfo,
createdEnvironment,
setupNotes,
needsGitHubAccessReminder,
userArgs,
} = opts
// When the user passes args, the initial AskUserQuestion dialog is skipped.
// Setup notes must surface in the prompt body instead, otherwise they're
// computed and silently discarded (regression vs. the old hard-block).
const setupNotesSection =
userArgs && setupNotes.length > 0
? `\n## Setup Notes\n\n${formatSetupNotes(setupNotes)}\n`
: ''
const initialQuestion =
setupNotes.length > 0
? `${formatSetupNotes(setupNotes)}\n\n${BASE_QUESTION}`
: BASE_QUESTION
const firstStep = userArgs
? `The user has already told you what they want (see User Request at the bottom). Skip the initial question and go directly to the matching workflow.`
: `Your FIRST action must be a single ${ASK_USER_QUESTION_TOOL_NAME} tool call (no preamble). Use this EXACT string for the \`question\` field — do not paraphrase or shorten it:
${jsonStringify(initialQuestion)}
Set \`header: "Action"\` and offer the four actions (create/list/update/run) as options. After the user picks, follow the matching workflow below.`
return `# Schedule Remote Agents
You are helping the user schedule, update, list, or run **remote** Claude Code agents. These are NOT local cron jobs — each trigger spawns a fully isolated remote session (CCR) in Anthropic's cloud infrastructure on a cron schedule. The agent runs in a sandboxed environment with its own git checkout, tools, and optional MCP connections.
## First Step
${firstStep}
${setupNotesSection}
## What You Can Do
Use the \`${REMOTE_TRIGGER_TOOL_NAME}\` tool (load it first with \`ToolSearch select:${REMOTE_TRIGGER_TOOL_NAME}\`; auth is handled in-process — do not use curl):
- \`{action: "list"}\` — list all triggers
- \`{action: "get", trigger_id: "..."}\` — fetch one trigger
- \`{action: "create", body: {...}}\` — create a trigger
- \`{action: "update", trigger_id: "...", body: {...}}\` — partial update
- \`{action: "run", trigger_id: "..."}\` — run a trigger now
You CANNOT delete triggers. If the user asks to delete, direct them to: https://claude.ai/code/scheduled
## Create body shape
\`\`\`json
{
"name": "AGENT_NAME",
"cron_expression": "CRON_EXPR",
"enabled": true,
"job_config": {
"ccr": {
"environment_id": "ENVIRONMENT_ID",
"session_context": {
"model": "claude-sonnet-4-6",
"sources": [
{"git_repository": {"url": "${gitRepoUrl || 'https://github.com/ORG/REPO'}"}}
],
"allowed_tools": ["Bash", "Read", "Write", "Edit", "Glob", "Grep"]
},
"events": [
{"data": {
"uuid": "<lowercase v4 uuid>",
"session_id": "",
"type": "user",
"parent_tool_use_id": null,
"message": {"content": "PROMPT_HERE", "role": "user"}
}}
]
}
}
}
\`\`\`
Generate a fresh lowercase UUID for \`events[].data.uuid\` yourself.
## Available MCP Connectors
These are the user's currently connected claude.ai MCP connectors:
${connectorsInfo}
When attaching connectors to a trigger, use the \`connector_uuid\` and \`name\` shown above (the name is already sanitized to only contain letters, numbers, hyphens, and underscores), and the connector's URL. The \`name\` field in \`mcp_connections\` must only contain \`[a-zA-Z0-9_-]\` — dots and spaces are NOT allowed.
**Important:** Infer what services the agent needs from the user's description. For example, if they say "check Datadog and Slack me errors," the agent needs both Datadog and Slack connectors. Cross-reference against the list above and warn if any required service isn't connected. If a needed connector is missing, direct the user to https://claude.ai/settings/connectors to connect it first.
## Environments
Every trigger requires an \`environment_id\` in the job config. This determines where the remote agent runs. Ask the user which environment to use.
${environmentsInfo}
Use the \`id\` value as the \`environment_id\` in \`job_config.ccr.environment_id\`.
${createdEnvironment ? `\n**Note:** A new environment \`${createdEnvironment.name}\` (id: \`${createdEnvironment.environment_id}\`) was just created for the user because they had none. Use this id for \`job_config.ccr.environment_id\` and mention the creation when you confirm the trigger config.\n` : ''}
## API Field Reference
### Create Trigger — Required Fields
- \`name\` (string) — A descriptive name
- \`cron_expression\` (string) — 5-field cron. **Minimum interval is 1 hour.**
- \`job_config\` (object) — Session configuration (see structure above)
### Create Trigger — Optional Fields
- \`enabled\` (boolean, default: true)
- \`mcp_connections\` (array) — MCP servers to attach:
\`\`\`json
[{"connector_uuid": "uuid", "name": "server-name", "url": "https://..."}]
\`\`\`
### Update Trigger — Optional Fields
All fields optional (partial update):
- \`name\`, \`cron_expression\`, \`enabled\`, \`job_config\`
- \`mcp_connections\` — Replace MCP connections
- \`clear_mcp_connections\` (boolean) — Remove all MCP connections
### Cron Expression Examples
The user's local timezone is **${userTimezone}**. Cron expressions are always in UTC. When the user says a local time, convert it to UTC for the cron expression but confirm with them: "9am ${userTimezone} = Xam UTC, so the cron would be \`0 X * * 1-5\`."
- \`0 9 * * 1-5\` — Every weekday at 9am **UTC**
- \`0 */2 * * *\` — Every 2 hours
- \`0 0 * * *\` — Daily at midnight **UTC**
- \`30 14 * * 1\` — Every Monday at 2:30pm **UTC**
- \`0 8 1 * *\` — First of every month at 8am **UTC**
Minimum interval is 1 hour. \`*/30 * * * *\` will be rejected.
## Workflow
### CREATE a new trigger:
1. **Understand the goal** — Ask what they want the remote agent to do. What repo(s)? What task? Remind them that the agent runs remotely — it won't have access to their local machine, local files, or local environment variables.
2. **Craft the prompt** — Help them write an effective agent prompt. Good prompts are:
- Specific about what to do and what success looks like
- Clear about which files/areas to focus on
- Explicit about what actions to take (open PRs, commit, just analyze, etc.)
3. **Set the schedule** — Ask when and how often. The user's timezone is ${userTimezone}. When they say a time (e.g., "every morning at 9am"), assume they mean their local time and convert to UTC for the cron expression. Always confirm the conversion: "9am ${userTimezone} = Xam UTC."
4. **Choose the model** — Default to \`claude-sonnet-4-6\`. Tell the user which model you're defaulting to and ask if they want a different one.
5. **Validate connections** — Infer what services the agent will need from the user's description. For example, if they say "check Datadog and Slack me errors," the agent needs both Datadog and Slack MCP connectors. Cross-reference with the connectors list above. If any are missing, warn the user and link them to https://claude.ai/settings/connectors to connect first.${gitRepoUrl ? ` The default git repo is already set to \`${gitRepoUrl}\`. Ask the user if this is the right repo or if they need a different one.` : ' Ask which git repos the remote agent needs cloned into its environment.'}
6. **Review and confirm** — Show the full configuration before creating. Let them adjust.
7. **Create it** \u2014 Call \`${REMOTE_TRIGGER_TOOL_NAME}\` with \`action: "create"\` and show the result. The response includes the trigger ID. Always output a link at the end: \`https://claude.ai/code/scheduled/{TRIGGER_ID}\`
### UPDATE a trigger:
1. List triggers first so they can pick one
2. Ask what they want to change
3. Show current vs proposed value
4. Confirm and update
### LIST triggers:
1. Fetch and display in a readable format
2. Show: name, schedule (human-readable), enabled/disabled, next run, repo(s)
### RUN NOW:
1. List triggers if they haven't specified which one
2. Confirm which trigger
3. Execute and confirm
## Important Notes
- These are REMOTE agents — they run in Anthropic's cloud, not on the user's machine. They cannot access local files, local services, or local environment variables.
- Always convert cron to human-readable when displaying
- Default to \`enabled: true\` unless user says otherwise
- Accept GitHub URLs in any format (https://github.com/org/repo, org/repo, etc.) and normalize to the full HTTPS URL (without .git suffix)
- The prompt is the most important part — spend time getting it right. The remote agent starts with zero context, so the prompt must be self-contained.
- To delete a trigger, direct users to https://claude.ai/code/scheduled
${needsGitHubAccessReminder ? `- If the user's request seems to require GitHub repo access (e.g. cloning a repo, opening PRs, reading code), remind them that ${getFeatureValue_CACHED_MAY_BE_STALE('tengu_cobalt_lantern', false) ? "they should run /web-setup to connect their GitHub account (or install the Claude GitHub App on the repo as an alternative) — otherwise the remote agent won't be able to access it" : "they need the Claude GitHub App installed on the repo — otherwise the remote agent won't be able to access it"}.` : ''}
${userArgs ? `\n## User Request\n\nThe user said: "${userArgs}"\n\nStart by understanding their intent and working through the appropriate workflow above.` : ''}`
}
export function registerScheduleRemoteAgentsSkill(): void {
registerBundledSkill({
name: 'schedule',
description:
'Create, update, list, or run scheduled remote agents (triggers) that execute on a cron schedule.',
whenToUse:
'When the user wants to schedule a recurring remote agent, set up automated tasks, create a cron job for Claude Code, or manage their scheduled agents/triggers.',
userInvocable: true,
isEnabled: () =>
getFeatureValue_CACHED_MAY_BE_STALE('tengu_surreal_dali', false) &&
isPolicyAllowed('allow_remote_sessions'),
allowedTools: [REMOTE_TRIGGER_TOOL_NAME, ASK_USER_QUESTION_TOOL_NAME],
async getPromptForCommand(args: string, context: ToolUseContext) {
if (!getClaudeAIOAuthTokens()?.accessToken) {
return [
{
type: 'text',
text: 'You need to authenticate with a claude.ai account first. API accounts are not supported. Run /login, then try /schedule again.',
},
]
}
let environments: EnvironmentResource[]
try {
environments = await fetchEnvironments()
} catch (err) {
logForDebugging(`[schedule] Failed to fetch environments: ${err}`, {
level: 'warn',
})
return [
{
type: 'text',
text: "We're having trouble connecting with your remote claude.ai account to set up a scheduled task. Please try /schedule again in a few minutes.",
},
]
}
let createdEnvironment: EnvironmentResource | null = null
if (environments.length === 0) {
try {
createdEnvironment = await createDefaultCloudEnvironment(
'claude-code-default',
)
environments = [createdEnvironment]
} catch (err) {
logForDebugging(`[schedule] Failed to create environment: ${err}`, {
level: 'warn',
})
return [
{
type: 'text',
text: 'No remote environments found, and we could not create one automatically. Visit https://claude.ai/code to set one up, then run /schedule again.',
},
]
}
}
// Soft setup checks — collected as upfront notes embedded in the initial
// AskUserQuestion dialog. Never block — triggers don't require a git
// source (e.g., Slack-only polls), and the trigger's sources may point
// at a different repo than cwd anyway.
const setupNotes: string[] = []
let needsGitHubAccessReminder = false
const repo = await detectCurrentRepositoryWithHost()
if (repo === null) {
setupNotes.push(
`Not in a git repo — you'll need to specify a repo URL manually (or skip repos entirely).`,
)
} else if (repo.host === 'github.com') {
const { hasAccess } = await checkRepoForRemoteAccess(
repo.owner,
repo.name,
)
if (!hasAccess) {
needsGitHubAccessReminder = true
const webSetupEnabled = getFeatureValue_CACHED_MAY_BE_STALE(
'tengu_cobalt_lantern',
false,
)
const msg = webSetupEnabled
? `GitHub not connected for ${repo.owner}/${repo.name} \u2014 run /web-setup to sync your GitHub credentials, or install the Claude GitHub App at https://claude.ai/code/onboarding?magic=github-app-setup.`
: `Claude GitHub App not installed on ${repo.owner}/${repo.name} \u2014 install at https://claude.ai/code/onboarding?magic=github-app-setup if your trigger needs this repo.`
setupNotes.push(msg)
}
}
// Non-github.com hosts (GHE/GitLab/etc.): silently skip. The GitHub
// App check is github.com-specific, and the "not in a git repo" note
// would be factually wrong — getCurrentRepoHttpsUrl() below will
// still populate gitRepoUrl with the GHE URL.
const connectors = getConnectedClaudeAIConnectors(
context.options.mcpClients,
)
if (connectors.length === 0) {
setupNotes.push(
`No MCP connectors — connect at https://claude.ai/settings/connectors if needed.`,
)
}
const userTimezone = Intl.DateTimeFormat().resolvedOptions().timeZone
const connectorsInfo = formatConnectorsInfo(connectors)
const gitRepoUrl = await getCurrentRepoHttpsUrl()
const lines = ['Available environments:']
for (const env of environments) {
lines.push(
`- ${env.name} (id: ${env.environment_id}, kind: ${env.kind})`,
)
}
const environmentsInfo = lines.join('\n')
const prompt = buildPrompt({
userTimezone,
connectorsInfo,
gitRepoUrl,
environmentsInfo,
createdEnvironment,
setupNotes,
needsGitHubAccessReminder,
userArgs: args,
})
return [{ type: 'text', text: prompt }]
},
})
}
+69
View File
@@ -0,0 +1,69 @@
import { AGENT_TOOL_NAME } from '../../tools/AgentTool/constants.js'
import { registerBundledSkill } from '../bundledSkills.js'
const SIMPLIFY_PROMPT = `# Simplify: Code Review and Cleanup
Review all changed files for reuse, quality, and efficiency. Fix any issues found.
## Phase 1: Identify Changes
Run \`git diff\` (or \`git diff HEAD\` if there are staged changes) to see what changed. If there are no git changes, review the most recently modified files that the user mentioned or that you edited earlier in this conversation.
## Phase 2: Launch Three Review Agents in Parallel
Use the ${AGENT_TOOL_NAME} tool to launch all three agents concurrently in a single message. Pass each agent the full diff so it has the complete context.
### Agent 1: Code Reuse Review
For each change:
1. **Search for existing utilities and helpers** that could replace newly written code. Look for similar patterns elsewhere in the codebase — common locations are utility directories, shared modules, and files adjacent to the changed ones.
2. **Flag any new function that duplicates existing functionality.** Suggest the existing function to use instead.
3. **Flag any inline logic that could use an existing utility** — hand-rolled string manipulation, manual path handling, custom environment checks, ad-hoc type guards, and similar patterns are common candidates.
### Agent 2: Code Quality Review
Review the same changes for hacky patterns:
1. **Redundant state**: state that duplicates existing state, cached values that could be derived, observers/effects that could be direct calls
2. **Parameter sprawl**: adding new parameters to a function instead of generalizing or restructuring existing ones
3. **Copy-paste with slight variation**: near-duplicate code blocks that should be unified with a shared abstraction
4. **Leaky abstractions**: exposing internal details that should be encapsulated, or breaking existing abstraction boundaries
5. **Stringly-typed code**: using raw strings where constants, enums (string unions), or branded types already exist in the codebase
6. **Unnecessary JSX nesting**: wrapper Boxes/elements that add no layout value — check if inner component props (flexShrink, alignItems, etc.) already provide the needed behavior
7. **Unnecessary comments**: comments explaining WHAT the code does (well-named identifiers already do that), narrating the change, or referencing the task/caller — delete; keep only non-obvious WHY (hidden constraints, subtle invariants, workarounds)
### Agent 3: Efficiency Review
Review the same changes for efficiency:
1. **Unnecessary work**: redundant computations, repeated file reads, duplicate network/API calls, N+1 patterns
2. **Missed concurrency**: independent operations run sequentially when they could run in parallel
3. **Hot-path bloat**: new blocking work added to startup or per-request/per-render hot paths
4. **Recurring no-op updates**: state/store updates inside polling loops, intervals, or event handlers that fire unconditionally — add a change-detection guard so downstream consumers aren't notified when nothing changed. Also: if a wrapper function takes an updater/reducer callback, verify it honors same-reference returns (or whatever the "no change" signal is) — otherwise callers' early-return no-ops are silently defeated
5. **Unnecessary existence checks**: pre-checking file/resource existence before operating (TOCTOU anti-pattern) — operate directly and handle the error
6. **Memory**: unbounded data structures, missing cleanup, event listener leaks
7. **Overly broad operations**: reading entire files when only a portion is needed, loading all items when filtering for one
## Phase 3: Fix Issues
Wait for all three agents to complete. Aggregate their findings and fix each issue directly. If a finding is a false positive or not worth addressing, note it and move on — do not argue with the finding, just skip it.
When done, briefly summarize what was fixed (or confirm the code was already clean).
`
export function registerSimplifySkill(): void {
registerBundledSkill({
name: 'simplify',
description:
'Review changed code for reuse, quality, and efficiency, then fix any issues found.',
userInvocable: true,
async getPromptForCommand(args) {
let prompt = SIMPLIFY_PROMPT
if (args) {
prompt += `\n\n## Additional Focus\n\n${args}`
}
return [{ type: 'text', text: prompt }]
},
})
}
+197
View File
@@ -0,0 +1,197 @@
import { getSessionMemoryContent } from '../../services/SessionMemory/sessionMemoryUtils.js'
import type { Message } from '../../types/message.js'
import { getMessagesAfterCompactBoundary } from '../../utils/messages.js'
import { registerBundledSkill } from '../bundledSkills.js'
function extractUserMessages(messages: Message[]): string[] {
return messages
.filter((m): m is Extract<typeof m, { type: 'user' }> => m.type === 'user')
.map(m => {
const content = m.message.content
if (typeof content === 'string') return content
return content
.filter(
(b): b is Extract<typeof b, { type: 'text' }> => b.type === 'text',
)
.map(b => b.text)
.join('\n')
})
.filter(text => text.trim().length > 0)
}
const SKILLIFY_PROMPT = `# Skillify {{userDescriptionBlock}}
You are capturing this session's repeatable process as a reusable skill.
## Your Session Context
Here is the session memory summary:
<session_memory>
{{sessionMemory}}
</session_memory>
Here are the user's messages during this session. Pay attention to how they steered the process, to help capture their detailed preferences in the skill:
<user_messages>
{{userMessages}}
</user_messages>
## Your Task
### Step 1: Analyze the Session
Before asking any questions, analyze the session to identify:
- What repeatable process was performed
- What the inputs/parameters were
- The distinct steps (in order)
- The success artifacts/criteria (e.g. not just "writing code," but "an open PR with CI fully passing") for each step
- Where the user corrected or steered you
- What tools and permissions were needed
- What agents were used
- What the goals and success artifacts were
### Step 2: Interview the User
You will use the AskUserQuestion to understand what the user wants to automate. Important notes:
- Use AskUserQuestion for ALL questions! Never ask questions via plain text.
- For each round, iterate as much as needed until the user is happy.
- The user always has a freeform "Other" option to type edits or feedback -- do NOT add your own "Needs tweaking" or "I'll provide edits" option. Just offer the substantive choices.
**Round 1: High level confirmation**
- Suggest a name and description for the skill based on your analysis. Ask the user to confirm or rename.
- Suggest high-level goal(s) and specific success criteria for the skill.
**Round 2: More details**
- Present the high-level steps you identified as a numbered list. Tell the user you will dig into the detail in the next round.
- If you think the skill will require arguments, suggest arguments based on what you observed. Make sure you understand what someone would need to provide.
- If it's not clear, ask if this skill should run inline (in the current conversation) or forked (as a sub-agent with its own context). Forked is better for self-contained tasks that don't need mid-process user input; inline is better when the user wants to steer mid-process.
- Ask where the skill should be saved. Suggest a default based on context (repo-specific workflows → repo, cross-repo personal workflows → user). Options:
- **This repo** (\`.claude/skills/<name>/SKILL.md\`) — for workflows specific to this project
- **Personal** (\`~/.claude/skills/<name>/SKILL.md\`) — follows you across all repos
**Round 3: Breaking down each step**
For each major step, if it's not glaringly obvious, ask:
- What does this step produce that later steps need? (data, artifacts, IDs)
- What proves that this step succeeded, and that we can move on?
- Should the user be asked to confirm before proceeding? (especially for irreversible actions like merging, sending messages, or destructive operations)
- Are any steps independent and could run in parallel? (e.g., posting to Slack and monitoring CI at the same time)
- How should the skill be executed? (e.g. always use a Task agent to conduct code review, or invoke an agent team for a set of concurrent steps)
- What are the hard constraints or hard preferences? Things that must or must not happen?
You may do multiple rounds of AskUserQuestion here, one round per step, especially if there are more than 3 steps or many clarification questions. Iterate as much as needed.
IMPORTANT: Pay special attention to places where the user corrected you during the session, to help inform your design.
**Round 4: Final questions**
- Confirm when this skill should be invoked, and suggest/confirm trigger phrases too. (e.g. For a cherrypick workflow you could say: Use when the user wants to cherry-pick a PR to a release branch. Examples: 'cherry-pick to release', 'CP this PR', 'hotfix.')
- You can also ask for any other gotchas or things to watch out for, if it's still unclear.
Stop interviewing once you have enough information. IMPORTANT: Don't over-ask for simple processes!
### Step 3: Write the SKILL.md
Create the skill directory and file at the location the user chose in Round 2.
Use this format:
\`\`\`markdown
---
name: {{skill-name}}
description: {{one-line description}}
allowed-tools:
{{list of tool permission patterns observed during session}}
when_to_use: {{detailed description of when Claude should automatically invoke this skill, including trigger phrases and example user messages}}
argument-hint: "{{hint showing argument placeholders}}"
arguments:
{{list of argument names}}
context: {{inline or fork -- omit for inline}}
---
# {{Skill Title}}
Description of skill
## Inputs
- \`$arg_name\`: Description of this input
## Goal
Clearly stated goal for this workflow. Best if you have clearly defined artifacts or criteria for completion.
## Steps
### 1. Step Name
What to do in this step. Be specific and actionable. Include commands when appropriate.
**Success criteria**: ALWAYS include this! This shows that the step is done and we can move on. Can be a list.
IMPORTANT: see the next section below for the per-step annotations you can optionally include for each step.
...
\`\`\`
**Per-step annotations**:
- **Success criteria** is REQUIRED on every step. This helps the model understand what the user expects from their workflow, and when it should have the confidence to move on.
- **Execution**: \`Direct\` (default), \`Task agent\` (straightforward subagents), \`Teammate\` (agent with true parallelism and inter-agent communication), or \`[human]\` (user does it). Only needs specifying if not Direct.
- **Artifacts**: Data this step produces that later steps need (e.g., PR number, commit SHA). Only include if later steps depend on it.
- **Human checkpoint**: When to pause and ask the user before proceeding. Include for irreversible actions (merging, sending messages), error judgment (merge conflicts), or output review.
- **Rules**: Hard rules for the workflow. User corrections during the reference session can be especially useful here.
**Step structure tips:**
- Steps that can run concurrently use sub-numbers: 3a, 3b
- Steps requiring the user to act get \`[human]\` in the title
- Keep simple skills simple -- a 2-step skill doesn't need annotations on every step
**Frontmatter rules:**
- \`allowed-tools\`: Minimum permissions needed (use patterns like \`Bash(gh:*)\` not \`Bash\`)
- \`context\`: Only set \`context: fork\` for self-contained skills that don't need mid-process user input.
- \`when_to_use\` is CRITICAL -- tells the model when to auto-invoke. Start with "Use when..." and include trigger phrases. Example: "Use when the user wants to cherry-pick a PR to a release branch. Examples: 'cherry-pick to release', 'CP this PR', 'hotfix'."
- \`arguments\` and \`argument-hint\`: Only include if the skill takes parameters. Use \`$name\` in the body for substitution.
### Step 4: Confirm and Save
Before writing the file, output the complete SKILL.md content as a yaml code block in your response so the user can review it with proper syntax highlighting. Then ask for confirmation using AskUserQuestion with a simple question like "Does this SKILL.md look good to save?" — do NOT use the body field, keep the question concise.
After writing, tell the user:
- Where the skill was saved
- How to invoke it: \`/{{skill-name}} [arguments]\`
- That they can edit the SKILL.md directly to refine it
`
export function registerSkillifySkill(): void {
if (process.env.USER_TYPE !== 'ant') {
return
}
registerBundledSkill({
name: 'skillify',
description:
"Capture this session's repeatable process into a skill. Call at end of the process you want to capture with an optional description.",
allowedTools: [
'Read',
'Write',
'Edit',
'Glob',
'Grep',
'AskUserQuestion',
'Bash(mkdir:*)',
],
userInvocable: true,
disableModelInvocation: true,
argumentHint: '[description of the process you want to capture]',
async getPromptForCommand(args, context) {
const sessionMemory =
(await getSessionMemoryContent()) ?? 'No session memory available.'
const userMessages = extractUserMessages(
getMessagesAfterCompactBoundary(context.messages),
)
const userDescriptionBlock = args
? `The user described this process as: "${args}"`
: ''
const prompt = SKILLIFY_PROMPT.replace('{{sessionMemory}}', sessionMemory)
.replace('{{userMessages}}', userMessages.join('\n\n---\n\n'))
.replace('{{userDescriptionBlock}}', userDescriptionBlock)
return [{ type: 'text', text: prompt }]
},
})
}
+79
View File
@@ -0,0 +1,79 @@
import { registerBundledSkill } from '../bundledSkills.js'
// Prompt text contains `ps` commands as instructions for Claude to run,
// not commands this file executes.
// eslint-disable-next-line custom-rules/no-direct-ps-commands
const STUCK_PROMPT = `# /stuck — diagnose frozen/slow Claude Code sessions
The user thinks another Claude Code session on this machine is frozen, stuck, or very slow. Investigate and post a report to #claude-code-feedback.
## What to look for
Scan for other Claude Code processes (excluding the current one — PID is in \`process.pid\` but for shell commands just exclude the PID you see running this prompt). Process names are typically \`claude\` (installed) or \`cli\` (native dev build).
Signs of a stuck session:
- **High CPU (≥90%) sustained** — likely an infinite loop. Sample twice, 1-2s apart, to confirm it's not a transient spike.
- **Process state \`D\` (uninterruptible sleep)** — often an I/O hang. The \`state\` column in \`ps\` output; first character matters (ignore modifiers like \`+\`, \`s\`, \`<\`).
- **Process state \`T\` (stopped)** — user probably hit Ctrl+Z by accident.
- **Process state \`Z\` (zombie)** — parent isn't reaping.
- **Very high RSS (≥4GB)** — possible memory leak making the session sluggish.
- **Stuck child process** — a hung \`git\`, \`node\`, or shell subprocess can freeze the parent. Check \`pgrep -lP <pid>\` for each session.
## Investigation steps
1. **List all Claude Code processes** (macOS/Linux):
\`\`\`
ps -axo pid=,pcpu=,rss=,etime=,state=,comm=,command= | grep -E '(claude|cli)' | grep -v grep
\`\`\`
Filter to rows where \`comm\` is \`claude\` or (\`cli\` AND the command path contains "claude").
2. **For anything suspicious**, gather more context:
- Child processes: \`pgrep -lP <pid>\`
- If high CPU: sample again after 1-2s to confirm it's sustained
- If a child looks hung (e.g., a git command), note its full command line with \`ps -p <child_pid> -o command=\`
- Check the session's debug log if you can infer the session ID: \`~/.claude/debug/<session-id>.txt\` (the last few hundred lines often show what it was doing before hanging)
3. **Consider a stack dump** for a truly frozen process (advanced, optional):
- macOS: \`sample <pid> 3\` gives a 3-second native stack sample
- This is big — only grab it if the process is clearly hung and you want to know *why*
## Report
**Only post to Slack if you actually found something stuck.** If every session looks healthy, tell the user that directly — do not post an all-clear to the channel.
If you did find a stuck/slow session, post to **#claude-code-feedback** (channel ID: \`C07VBSHV7EV\`) using the Slack MCP tool. Use ToolSearch to find \`slack_send_message\` if it's not already loaded.
**Use a two-message structure** to keep the channel scannable:
1. **Top-level message** — one short line: hostname, Claude Code version, and a terse symptom (e.g. "session PID 12345 pegged at 100% CPU for 10min" or "git subprocess hung in D state"). No code blocks, no details.
2. **Thread reply** — the full diagnostic dump. Pass the top-level message's \`ts\` as \`thread_ts\`. Include:
- PID, CPU%, RSS, state, uptime, command line, child processes
- Your diagnosis of what's likely wrong
- Relevant debug log tail or \`sample\` output if you captured it
If Slack MCP isn't available, format the report as a message the user can copy-paste into #claude-code-feedback (and let them know to thread the details themselves).
## Notes
- Don't kill or signal any processes — this is diagnostic only.
- If the user gave an argument (e.g., a specific PID or symptom), focus there first.
`
export function registerStuckSkill(): void {
if (process.env.USER_TYPE !== 'ant') {
return
}
registerBundledSkill({
name: 'stuck',
description:
'[ANT-ONLY] Investigate frozen/stuck/slow Claude Code sessions on this machine and post a diagnostic report to #claude-code-feedback.',
userInvocable: true,
async getPromptForCommand(args) {
let prompt = STUCK_PROMPT
if (args) {
prompt += `\n## User-provided context\n\n${args}\n`
}
return [{ type: 'text', text: prompt }]
},
})
}
+475
View File
@@ -0,0 +1,475 @@
import { toJSONSchema } from 'zod/v4'
import { SettingsSchema } from '../../utils/settings/types.js'
import { jsonStringify } from '../../utils/slowOperations.js'
import { registerBundledSkill } from '../bundledSkills.js'
/**
* Generate JSON Schema from the settings Zod schema.
* This keeps the skill prompt in sync with the actual types.
*/
function generateSettingsSchema(): string {
const jsonSchema = toJSONSchema(SettingsSchema(), { io: 'input' })
return jsonStringify(jsonSchema, null, 2)
}
const SETTINGS_EXAMPLES_DOCS = `## Settings File Locations
Choose the appropriate file based on scope:
| File | Scope | Git | Use For |
|------|-------|-----|---------|
| \`~/.claude/settings.json\` | Global | N/A | Personal preferences for all projects |
| \`.claude/settings.json\` | Project | Commit | Team-wide hooks, permissions, plugins |
| \`.claude/settings.local.json\` | Project | Gitignore | Personal overrides for this project |
Settings load in order: user → project → local (later overrides earlier).
## Settings Schema Reference
### Permissions
\`\`\`json
{
"permissions": {
"allow": ["Bash(npm:*)", "Edit(.claude)", "Read"],
"deny": ["Bash(rm -rf:*)"],
"ask": ["Write(/etc/*)"],
"defaultMode": "default" | "plan" | "acceptEdits" | "dontAsk",
"additionalDirectories": ["/extra/dir"]
}
}
\`\`\`
**Permission Rule Syntax:**
- Exact match: \`"Bash(npm run test)"\`
- Prefix wildcard: \`"Bash(git:*)"\` - matches \`git status\`, \`git commit\`, etc.
- Tool only: \`"Read"\` - allows all Read operations
### Environment Variables
\`\`\`json
{
"env": {
"DEBUG": "true",
"MY_API_KEY": "value"
}
}
\`\`\`
### Model & Agent
\`\`\`json
{
"model": "sonnet", // or "opus", "haiku", full model ID
"agent": "agent-name",
"alwaysThinkingEnabled": true
}
\`\`\`
### Attribution (Commits & PRs)
\`\`\`json
{
"attribution": {
"commit": "Custom commit trailer text",
"pr": "Custom PR description text"
}
}
\`\`\`
Set \`commit\` or \`pr\` to empty string \`""\` to hide that attribution.
### MCP Server Management
\`\`\`json
{
"enableAllProjectMcpServers": true,
"enabledMcpjsonServers": ["server1", "server2"],
"disabledMcpjsonServers": ["blocked-server"]
}
\`\`\`
### Plugins
\`\`\`json
{
"enabledPlugins": {
"formatter@anthropic-tools": true
}
}
\`\`\`
Plugin syntax: \`plugin-name@source\` where source is \`claude-code-marketplace\`, \`claude-plugins-official\`, or \`builtin\`.
### Other Settings
- \`language\`: Preferred response language (e.g., "japanese")
- \`cleanupPeriodDays\`: Days to keep transcripts (default: 30; 0 disables persistence entirely)
- \`respectGitignore\`: Whether to respect .gitignore (default: true)
- \`spinnerTipsEnabled\`: Show tips in spinner
- \`spinnerVerbs\`: Customize spinner verbs (\`{ "mode": "append" | "replace", "verbs": [...] }\`)
- \`spinnerTipsOverride\`: Override spinner tips (\`{ "excludeDefault": true, "tips": ["Custom tip"] }\`)
- \`syntaxHighlightingDisabled\`: Disable diff highlighting
`
// Note: We keep hand-written examples for common patterns since they're more
// actionable than auto-generated schema docs. The generated schema list
// provides completeness while examples provide clarity.
const HOOKS_DOCS = `## Hooks Configuration
Hooks run commands at specific points in Claude Code's lifecycle.
### Hook Structure
\`\`\`json
{
"hooks": {
"EVENT_NAME": [
{
"matcher": "ToolName|OtherTool",
"hooks": [
{
"type": "command",
"command": "your-command-here",
"timeout": 60,
"statusMessage": "Running..."
}
]
}
]
}
}
\`\`\`
### Hook Events
| Event | Matcher | Purpose |
|-------|---------|---------|
| PermissionRequest | Tool name | Run before permission prompt |
| PreToolUse | Tool name | Run before tool, can block |
| PostToolUse | Tool name | Run after successful tool |
| PostToolUseFailure | Tool name | Run after tool fails |
| Notification | Notification type | Run on notifications |
| Stop | - | Run when Claude stops (including clear, resume, compact) |
| PreCompact | "manual"/"auto" | Before compaction |
| PostCompact | "manual"/"auto" | After compaction (receives summary) |
| UserPromptSubmit | - | When user submits |
| SessionStart | - | When session starts |
**Common tool matchers:** \`Bash\`, \`Write\`, \`Edit\`, \`Read\`, \`Glob\`, \`Grep\`
### Hook Types
**1. Command Hook** - Runs a shell command:
\`\`\`json
{ "type": "command", "command": "prettier --write $FILE", "timeout": 30 }
\`\`\`
**2. Prompt Hook** - Evaluates a condition with LLM:
\`\`\`json
{ "type": "prompt", "prompt": "Is this safe? $ARGUMENTS" }
\`\`\`
Only available for tool events: PreToolUse, PostToolUse, PermissionRequest.
**3. Agent Hook** - Runs an agent with tools:
\`\`\`json
{ "type": "agent", "prompt": "Verify tests pass: $ARGUMENTS" }
\`\`\`
Only available for tool events: PreToolUse, PostToolUse, PermissionRequest.
### Hook Input (stdin JSON)
\`\`\`json
{
"session_id": "abc123",
"tool_name": "Write",
"tool_input": { "file_path": "/path/to/file.txt", "content": "..." },
"tool_response": { "success": true } // PostToolUse only
}
\`\`\`
### Hook JSON Output
Hooks can return JSON to control behavior:
\`\`\`json
{
"systemMessage": "Warning shown to user in UI",
"continue": false,
"stopReason": "Message shown when blocking",
"suppressOutput": false,
"decision": "block",
"reason": "Explanation for decision",
"hookSpecificOutput": {
"hookEventName": "PostToolUse",
"additionalContext": "Context injected back to model"
}
}
\`\`\`
**Fields:**
- \`systemMessage\` - Display a message to the user (all hooks)
- \`continue\` - Set to \`false\` to block/stop (default: true)
- \`stopReason\` - Message shown when \`continue\` is false
- \`suppressOutput\` - Hide stdout from transcript (default: false)
- \`decision\` - "block" for PostToolUse/Stop/UserPromptSubmit hooks (deprecated for PreToolUse, use hookSpecificOutput.permissionDecision instead)
- \`reason\` - Explanation for decision
- \`hookSpecificOutput\` - Event-specific output (must include \`hookEventName\`):
- \`additionalContext\` - Text injected into model context
- \`permissionDecision\` - "allow", "deny", or "ask" (PreToolUse only)
- \`permissionDecisionReason\` - Reason for the permission decision (PreToolUse only)
- \`updatedInput\` - Modified tool input (PreToolUse only)
### Common Patterns
**Auto-format after writes:**
\`\`\`json
{
"hooks": {
"PostToolUse": [{
"matcher": "Write|Edit",
"hooks": [{
"type": "command",
"command": "jq -r '.tool_response.filePath // .tool_input.file_path' | { read -r f; prettier --write \\"$f\\"; } 2>/dev/null || true"
}]
}]
}
}
\`\`\`
**Log all bash commands:**
\`\`\`json
{
"hooks": {
"PreToolUse": [{
"matcher": "Bash",
"hooks": [{
"type": "command",
"command": "jq -r '.tool_input.command' >> ~/.claude/bash-log.txt"
}]
}]
}
}
\`\`\`
**Stop hook that displays message to user:**
Command must output JSON with \`systemMessage\` field:
\`\`\`bash
# Example command that outputs: {"systemMessage": "Session complete!"}
echo '{"systemMessage": "Session complete!"}'
\`\`\`
**Run tests after code changes:**
\`\`\`json
{
"hooks": {
"PostToolUse": [{
"matcher": "Write|Edit",
"hooks": [{
"type": "command",
"command": "jq -r '.tool_input.file_path // .tool_response.filePath' | grep -E '\\\\.(ts|js)$' && npm test || true"
}]
}]
}
}
\`\`\`
`
const HOOK_VERIFICATION_FLOW = `## Constructing a Hook (with verification)
Given an event, matcher, target file, and desired behavior, follow this flow. Each step catches a different failure class — a hook that silently does nothing is worse than no hook.
1. **Dedup check.** Read the target file. If a hook already exists on the same event+matcher, show the existing command and ask: keep it, replace it, or add alongside.
2. **Construct the command for THIS project — don't assume.** The hook receives JSON on stdin. Build a command that:
- Extracts any needed payload safely — use \`jq -r\` into a quoted variable or \`{ read -r f; ... "$f"; }\`, NOT unquoted \`| xargs\` (splits on spaces)
- Invokes the underlying tool the way this project runs it (npx/bunx/yarn/pnpm? Makefile target? globally-installed?)
- Skips inputs the tool doesn't handle (formatters often have \`--ignore-unknown\`; if not, guard by extension)
- Stays RAW for now — no \`|| true\`, no stderr suppression. You'll wrap it after the pipe-test passes.
3. **Pipe-test the raw command.** Synthesize the stdin payload the hook will receive and pipe it directly:
- \`Pre|PostToolUse\` on \`Write|Edit\`: \`echo '{"tool_name":"Edit","tool_input":{"file_path":"<a real file from this repo>"}}' | <cmd>\`
- \`Pre|PostToolUse\` on \`Bash\`: \`echo '{"tool_name":"Bash","tool_input":{"command":"ls"}}' | <cmd>\`
- \`Stop\`/\`UserPromptSubmit\`/\`SessionStart\`: most commands don't read stdin, so \`echo '{}' | <cmd>\` suffices
Check exit code AND side effect (file actually formatted, test actually ran). If it fails you get a real error — fix (wrong package manager? tool not installed? jq path wrong?) and retest. Once it works, wrap with \`2>/dev/null || true\` (unless the user wants a blocking check).
4. **Write the JSON.** Merge into the target file (schema shape in the "Hook Structure" section above). If this creates \`.claude/settings.local.json\` for the first time, add it to .gitignore — the Write tool doesn't auto-gitignore it.
5. **Validate syntax + schema in one shot:**
\`jq -e '.hooks.<event>[] | select(.matcher == "<matcher>") | .hooks[] | select(.type == "command") | .command' <target-file>\`
Exit 0 + prints your command = correct. Exit 4 = matcher doesn't match. Exit 5 = malformed JSON or wrong nesting. A broken settings.json silently disables ALL settings from that file — fix any pre-existing malformation too.
6. **Prove the hook fires** — only for \`Pre|PostToolUse\` on a matcher you can trigger in-turn (\`Write|Edit\` via Edit, \`Bash\` via Bash). \`Stop\`/\`UserPromptSubmit\`/\`SessionStart\` fire outside this turn — skip to step 7.
For a **formatter** on \`PostToolUse\`/\`Write|Edit\`: introduce a detectable violation via Edit (two consecutive blank lines, bad indentation, missing semicolon — something this formatter corrects; NOT trailing whitespace, Edit strips that before writing), re-read, confirm the hook **fixed** it. For **anything else**: temporarily prefix the command in settings.json with \`echo "$(date) hook fired" >> /tmp/claude-hook-check.txt; \`, trigger the matching tool (Edit for \`Write|Edit\`, a harmless \`true\` for \`Bash\`), read the sentinel file.
**Always clean up** — revert the violation, strip the sentinel prefix — whether the proof passed or failed.
**If proof fails but pipe-test passed and \`jq -e\` passed**: the settings watcher isn't watching \`.claude/\` — it only watches directories that had a settings file when this session started. The hook is written correctly. Tell the user to open \`/hooks\` once (reloads config) or restart — you can't do this yourself; \`/hooks\` is a user UI menu and opening it ends this turn.
7. **Handoff.** Tell the user the hook is live (or needs \`/hooks\`/restart per the watcher caveat). Point them at \`/hooks\` to review, edit, or disable it later. The UI only shows "Ran N hooks" if a hook errors or is slow — silent success is invisible by design.
`
const UPDATE_CONFIG_PROMPT = `# Update Config Skill
Modify Claude Code configuration by updating settings.json files.
## When Hooks Are Required (Not Memory)
If the user wants something to happen automatically in response to an EVENT, they need a **hook** configured in settings.json. Memory/preferences cannot trigger automated actions.
**These require hooks:**
- "Before compacting, ask me what to preserve" → PreCompact hook
- "After writing files, run prettier" → PostToolUse hook with Write|Edit matcher
- "When I run bash commands, log them" → PreToolUse hook with Bash matcher
- "Always run tests after code changes" → PostToolUse hook
**Hook events:** PreToolUse, PostToolUse, PreCompact, PostCompact, Stop, Notification, SessionStart
## CRITICAL: Read Before Write
**Always read the existing settings file before making changes.** Merge new settings with existing ones - never replace the entire file.
## CRITICAL: Use AskUserQuestion for Ambiguity
When the user's request is ambiguous, use AskUserQuestion to clarify:
- Which settings file to modify (user/project/local)
- Whether to add to existing arrays or replace them
- Specific values when multiple options exist
## Decision: Config Tool vs Direct Edit
**Use the Config tool** for these simple settings:
- \`theme\`, \`editorMode\`, \`verbose\`, \`model\`
- \`language\`, \`alwaysThinkingEnabled\`
- \`permissions.defaultMode\`
**Edit settings.json directly** for:
- Hooks (PreToolUse, PostToolUse, etc.)
- Complex permission rules (allow/deny arrays)
- Environment variables
- MCP server configuration
- Plugin configuration
## Workflow
1. **Clarify intent** - Ask if the request is ambiguous
2. **Read existing file** - Use Read tool on the target settings file
3. **Merge carefully** - Preserve existing settings, especially arrays
4. **Edit file** - Use Edit tool (if file doesn't exist, ask user to create it first)
5. **Confirm** - Tell user what was changed
## Merging Arrays (Important!)
When adding to permission arrays or hook arrays, **merge with existing**, don't replace:
**WRONG** (replaces existing permissions):
\`\`\`json
{ "permissions": { "allow": ["Bash(npm:*)"] } }
\`\`\`
**RIGHT** (preserves existing + adds new):
\`\`\`json
{
"permissions": {
"allow": [
"Bash(git:*)", // existing
"Edit(.claude)", // existing
"Bash(npm:*)" // new
]
}
}
\`\`\`
${SETTINGS_EXAMPLES_DOCS}
${HOOKS_DOCS}
${HOOK_VERIFICATION_FLOW}
## Example Workflows
### Adding a Hook
User: "Format my code after Claude writes it"
1. **Clarify**: Which formatter? (prettier, gofmt, etc.)
2. **Read**: \`.claude/settings.json\` (or create if missing)
3. **Merge**: Add to existing hooks, don't replace
4. **Result**:
\`\`\`json
{
"hooks": {
"PostToolUse": [{
"matcher": "Write|Edit",
"hooks": [{
"type": "command",
"command": "jq -r '.tool_response.filePath // .tool_input.file_path' | { read -r f; prettier --write \\"$f\\"; } 2>/dev/null || true"
}]
}]
}
}
\`\`\`
### Adding Permissions
User: "Allow npm commands without prompting"
1. **Read**: Existing permissions
2. **Merge**: Add \`Bash(npm:*)\` to allow array
3. **Result**: Combined with existing allows
### Environment Variables
User: "Set DEBUG=true"
1. **Decide**: User settings (global) or project settings?
2. **Read**: Target file
3. **Merge**: Add to env object
\`\`\`json
{ "env": { "DEBUG": "true" } }
\`\`\`
## Common Mistakes to Avoid
1. **Replacing instead of merging** - Always preserve existing settings
2. **Wrong file** - Ask user if scope is unclear
3. **Invalid JSON** - Validate syntax after changes
4. **Forgetting to read first** - Always read before write
## Troubleshooting Hooks
If a hook isn't running:
1. **Check the settings file** - Read ~/.claude/settings.json or .claude/settings.json
2. **Verify JSON syntax** - Invalid JSON silently fails
3. **Check the matcher** - Does it match the tool name? (e.g., "Bash", "Write", "Edit")
4. **Check hook type** - Is it "command", "prompt", or "agent"?
5. **Test the command** - Run the hook command manually to see if it works
6. **Use --debug** - Run \`claude --debug\` to see hook execution logs
`
export function registerUpdateConfigSkill(): void {
registerBundledSkill({
name: 'update-config',
description:
'Use this skill to configure the Claude Code harness via settings.json. Automated behaviors ("from now on when X", "each time X", "whenever X", "before/after X") require hooks configured in settings.json - the harness executes these, not Claude, so memory/preferences cannot fulfill them. Also use for: permissions ("allow X", "add permission", "move permission to"), env vars ("set X=Y"), hook troubleshooting, or any changes to settings.json/settings.local.json files. Examples: "allow npm commands", "add bq permission to global settings", "move permission to user settings", "set DEBUG=true", "when claude stops show X". For simple settings like theme/model, use Config tool.',
allowedTools: ['Read'],
userInvocable: true,
async getPromptForCommand(args) {
if (args.startsWith('[hooks-only]')) {
const req = args.slice('[hooks-only]'.length).trim()
let prompt = HOOKS_DOCS + '\n\n' + HOOK_VERIFICATION_FLOW
if (req) {
prompt += `\n\n## Task\n\n${req}`
}
return [{ type: 'text', text: prompt }]
}
// Generate schema dynamically to stay in sync with types
const jsonSchema = generateSettingsSchema()
let prompt = UPDATE_CONFIG_PROMPT
prompt += `\n\n## Full Settings JSON Schema\n\n\`\`\`json\n${jsonSchema}\n\`\`\``
if (args) {
prompt += `\n\n## User Request\n\n${args}`
}
return [{ type: 'text', text: prompt }]
},
})
}
+30
View File
@@ -0,0 +1,30 @@
import { parseFrontmatter } from '../../utils/frontmatterParser.js'
import { registerBundledSkill } from '../bundledSkills.js'
import { SKILL_FILES, SKILL_MD } from './verifyContent.js'
const { frontmatter, content: SKILL_BODY } = parseFrontmatter(SKILL_MD)
const DESCRIPTION =
typeof frontmatter.description === 'string'
? frontmatter.description
: 'Verify a code change does what it should by running the app.'
export function registerVerifySkill(): void {
if (process.env.USER_TYPE !== 'ant') {
return
}
registerBundledSkill({
name: 'verify',
description: DESCRIPTION,
userInvocable: true,
files: SKILL_FILES,
async getPromptForCommand(args) {
const parts: string[] = [SKILL_BODY.trimStart()]
if (args) {
parts.push(`## User Request\n\n${args}`)
}
return [{ type: 'text', text: parts.join('\n\n') }]
},
})
}
+13
View File
@@ -0,0 +1,13 @@
// Content for the verify bundled skill.
// Each .md file is inlined as a string at build time via Bun's text loader.
import cliMd from './verify/examples/cli.md'
import serverMd from './verify/examples/server.md'
import skillMd from './verify/SKILL.md'
export const SKILL_MD: string = skillMd
export const SKILL_FILES: Record<string, string> = {
'examples/cli.md': cliMd,
'examples/server.md': serverMd,
}