Files
open-agent-sdk-typescript/src/providers/types.ts
T
idoubi 85dff47d74 feat: add skills system, hooks integration, and OpenAI-compatible provider
- Add skill system with types, registry, SkillTool, and 5 bundled skills
  (simplify, commit, review, debug, test)
- Integrate hooks into QueryEngine at 9 lifecycle points (SessionStart,
  UserPromptSubmit, PreToolUse, PostToolUse, PostToolUseFailure,
  PreCompact, PostCompact, Stop, SessionEnd)
- Add LLM provider abstraction supporting both Anthropic Messages API
  and OpenAI Chat Completions API (works with GPT, DeepSeek, Qwen, etc.)
- Add CODEANY_API_TYPE env var ('anthropic-messages' | 'openai-completions')
  with auto-detection from model name
- Remove all ANTHROPIC_* env var references, only support CODEANY_* prefix
- Add model pricing and context windows for OpenAI/DeepSeek models
- Remove direct @anthropic-ai/sdk dependency from all files except the
  Anthropic provider (types.ts, engine.ts, etc. are now provider-agnostic)
- Add PermissionBehavior type export
- Bump version to 0.2.0

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-04-03 23:29:29 +08:00

86 lines
2.6 KiB
TypeScript

/**
* LLM Provider Abstraction Types
*
* Defines a provider interface that normalizes API differences between
* Anthropic Messages API and OpenAI Chat Completions API.
*
* Internally the SDK uses Anthropic-like message format as the canonical
* representation. Providers convert to/from their native API format.
*/
// --------------------------------------------------------------------------
// API Type
// --------------------------------------------------------------------------
export type ApiType = 'anthropic-messages' | 'openai-completions'
// --------------------------------------------------------------------------
// Normalized Request
// --------------------------------------------------------------------------
export interface CreateMessageParams {
model: string
maxTokens: number
system: string
messages: NormalizedMessageParam[]
tools?: NormalizedTool[]
thinking?: { type: string; budget_tokens?: number }
}
/**
* Normalized message format (Anthropic-like).
* This is the internal representation used throughout the SDK.
*/
export interface NormalizedMessageParam {
role: 'user' | 'assistant'
content: string | NormalizedContentBlock[]
}
export type NormalizedContentBlock =
| { type: 'text'; text: string }
| { type: 'tool_use'; id: string; name: string; input: any }
| { type: 'tool_result'; tool_use_id: string; content: string; is_error?: boolean }
| { type: 'image'; source: any }
| { type: 'thinking'; thinking: string }
export interface NormalizedTool {
name: string
description: string
input_schema: {
type: 'object'
properties: Record<string, any>
required?: string[]
}
}
// --------------------------------------------------------------------------
// Normalized Response
// --------------------------------------------------------------------------
export interface CreateMessageResponse {
content: NormalizedResponseBlock[]
stopReason: 'end_turn' | 'max_tokens' | 'tool_use' | string
usage: {
input_tokens: number
output_tokens: number
cache_creation_input_tokens?: number
cache_read_input_tokens?: number
}
}
export type NormalizedResponseBlock =
| { type: 'text'; text: string }
| { type: 'tool_use'; id: string; name: string; input: any }
// --------------------------------------------------------------------------
// Provider Interface
// --------------------------------------------------------------------------
export interface LLMProvider {
/** The API type this provider implements. */
readonly apiType: ApiType
/** Send a message and get a response. */
createMessage(params: CreateMessageParams): Promise<CreateMessageResponse>
}