mirror of
https://github.com/codeany-ai/open-agent-sdk-typescript.git
synced 2026-04-25 07:00:49 +09:00
85dff47d74
- Add skill system with types, registry, SkillTool, and 5 bundled skills
(simplify, commit, review, debug, test)
- Integrate hooks into QueryEngine at 9 lifecycle points (SessionStart,
UserPromptSubmit, PreToolUse, PostToolUse, PostToolUseFailure,
PreCompact, PostCompact, Stop, SessionEnd)
- Add LLM provider abstraction supporting both Anthropic Messages API
and OpenAI Chat Completions API (works with GPT, DeepSeek, Qwen, etc.)
- Add CODEANY_API_TYPE env var ('anthropic-messages' | 'openai-completions')
with auto-detection from model name
- Remove all ANTHROPIC_* env var references, only support CODEANY_* prefix
- Add model pricing and context windows for OpenAI/DeepSeek models
- Remove direct @anthropic-ai/sdk dependency from all files except the
Anthropic provider (types.ts, engine.ts, etc. are now provider-agnostic)
- Add PermissionBehavior type export
- Bump version to 0.2.0
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
61 lines
1.8 KiB
TypeScript
61 lines
1.8 KiB
TypeScript
/**
|
|
* Anthropic Messages API Provider
|
|
*
|
|
* Wraps the @anthropic-ai/sdk client. Since our internal format is
|
|
* Anthropic-like, this is mostly a thin pass-through.
|
|
*/
|
|
|
|
import Anthropic from '@anthropic-ai/sdk'
|
|
import type {
|
|
LLMProvider,
|
|
CreateMessageParams,
|
|
CreateMessageResponse,
|
|
} from './types.js'
|
|
|
|
export class AnthropicProvider implements LLMProvider {
|
|
readonly apiType = 'anthropic-messages' as const
|
|
private client: Anthropic
|
|
|
|
constructor(opts: { apiKey?: string; baseURL?: string }) {
|
|
this.client = new Anthropic({
|
|
apiKey: opts.apiKey,
|
|
baseURL: opts.baseURL,
|
|
})
|
|
}
|
|
|
|
async createMessage(params: CreateMessageParams): Promise<CreateMessageResponse> {
|
|
const requestParams: Anthropic.MessageCreateParamsNonStreaming = {
|
|
model: params.model,
|
|
max_tokens: params.maxTokens,
|
|
system: params.system,
|
|
messages: params.messages as Anthropic.MessageParam[],
|
|
tools: params.tools
|
|
? (params.tools as Anthropic.Tool[])
|
|
: undefined,
|
|
}
|
|
|
|
// Add extended thinking if configured
|
|
if (params.thinking?.type === 'enabled' && params.thinking.budget_tokens) {
|
|
(requestParams as any).thinking = {
|
|
type: 'enabled',
|
|
budget_tokens: params.thinking.budget_tokens,
|
|
}
|
|
}
|
|
|
|
const response = await this.client.messages.create(requestParams)
|
|
|
|
return {
|
|
content: response.content as CreateMessageResponse['content'],
|
|
stopReason: response.stop_reason || 'end_turn',
|
|
usage: {
|
|
input_tokens: response.usage.input_tokens,
|
|
output_tokens: response.usage.output_tokens,
|
|
cache_creation_input_tokens:
|
|
(response.usage as any).cache_creation_input_tokens,
|
|
cache_read_input_tokens:
|
|
(response.usage as any).cache_read_input_tokens,
|
|
},
|
|
}
|
|
}
|
|
}
|