feat: basic agent

This commit is contained in:
Acbox
2026-01-10 03:04:37 +08:00
parent 00e232af90
commit 8ed3170af7
19 changed files with 568 additions and 35 deletions
+54
View File
@@ -0,0 +1,54 @@
# Agent CLI
A command-line interface for the personal housekeeper assistant agent.
## Setup
1. Create a `.env` file in the project root (not in this directory) with the following variables:
```env
MODEL=gpt-4o
BASE_URL=https://api.openai.com/v1
API_KEY=your-api-key-here
EMBEDDING_MODEL=text-embedding-3-small
MODEL_CLIENT_TYPE=openai
```
2. Make sure the database is set up and running (required for memory storage).
## Usage
Run the CLI from the agent package:
```bash
pnpm start
```
Or with Bun directly:
```bash
bun run index.ts
```
## Features
- **Interactive Chat**: Type your messages and get responses from the AI agent
- **Long-term Memory**: Conversations are automatically saved and can be recalled
- **Context Loading**: Automatically loads recent conversations (last 60 minutes)
- **Memory Search**: The agent can search through past conversations using natural language
- **Tool Calling**: Supports automatic tool execution with multi-step reasoning
- **Multi-Provider Support**: Works with OpenAI, Anthropic, and Google AI (via Vercel AI SDK)
## Commands
- Type your message and press Enter to chat
- Type `exit` or `quit` to close the application
## Environment Variables
- `MODEL`: The LLM model ID (e.g., `gpt-4o`, `claude-3-5-sonnet-20241022`, `gemini-pro`)
- `BASE_URL`: The API base URL
- `API_KEY`: Your API key
- `EMBEDDING_MODEL`: The embedding model for memory search (e.g., `text-embedding-3-small`)
- `MODEL_CLIENT_TYPE`: The model provider type (default: `openai`, options: `openai`, `anthropic`, `google`)
+119
View File
@@ -0,0 +1,119 @@
import { createInterface } from 'node:readline'
import { stdin as input, stdout as output } from 'node:process'
import { createAgent } from '../src/agent'
import { createMemorySearch, createAddMemory, filterByTimestamp, MemoryUnit } from '@memohome/memory'
import { ModelClientType } from '@memohome/shared'
// Load environment variables
const MODEL = process.env.MODEL
const BASE_URL = process.env.BASE_URL
const API_KEY = process.env.API_KEY
const EMBEDDING_MODEL = process.env.EMBEDDING_MODEL
const MODEL_CLIENT_TYPE = process.env.MODEL_CLIENT_TYPE || 'openai'
if (!MODEL || !BASE_URL || !API_KEY || !EMBEDDING_MODEL) {
console.error('Error: Missing required environment variables')
console.error('Required: MODEL, BASE_URL, API_KEY, EMBEDDING_MODEL')
console.error('Optional: MODEL_CLIENT_TYPE (default: openai)')
process.exit(1)
}
const USER_ID = 'cli-user'
// Create memory functions
const searchMemory = createMemorySearch({
model: EMBEDDING_MODEL,
apiKey: API_KEY,
baseURL: BASE_URL,
})
const addMemory = createAddMemory({
model: EMBEDDING_MODEL,
apiKey: API_KEY,
baseURL: BASE_URL,
})
// Create agent
const agent = createAgent({
model: {
modelId: MODEL,
baseUrl: BASE_URL,
apiKey: API_KEY,
clientType: MODEL_CLIENT_TYPE as ModelClientType,
name: MODEL,
},
maxContextLoadTime: 60, // 60 minutes
language: 'Same as user input',
onReadMemory: async (from: Date, to: Date) => {
return await filterByTimestamp(from, to, USER_ID)
},
onSearchMemory: async (query: string) => {
return await searchMemory({ user: USER_ID, query, maxResults: 5 })
},
onFinish: async (messages) => {
// Save conversation to memory - type conversion handled internally
const memoryUnit: MemoryUnit = {
messages: messages as unknown as MemoryUnit['messages'],
timestamp: new Date(),
user: USER_ID,
raw: '', // will be generated by addMemory
}
await addMemory({ memory: memoryUnit })
},
})
async function main() {
console.log('🤖 Agent CLI Started')
console.log('Type your message and press Enter. Type "exit" to quit.\n')
// Load context
await agent.loadContext()
const rl = createInterface({ input, output })
rl.on('line', async (line) => {
const userInput = line.trim()
if (userInput === 'exit' || userInput === 'quit') {
console.log('\n👋 Goodbye!')
rl.close()
process.exit(0)
}
if (!userInput) {
rl.prompt()
return
}
try {
process.stdout.write('\n🤖 ')
let hasOutput = false
for await (const event of agent.ask(userInput)) {
if (event.type === 'text-delta' && 'text' in event && event.text) {
process.stdout.write(String(event.text))
hasOutput = true
} else if (event.type === 'tool-call' && 'toolName' in event) {
process.stdout.write(`\n[Tool: ${event.toolName}]`)
hasOutput = true
}
}
if (!hasOutput) {
process.stdout.write('(No response)')
}
console.log('\n')
} catch (error) {
console.error('\n❌ Error:', error instanceof Error ? error.message : String(error))
console.log()
}
rl.prompt()
})
rl.setPrompt('You: ')
rl.prompt()
}
main().catch(console.error)
+8 -1
View File
@@ -3,13 +3,20 @@
"version": "1.0.0",
"description": "Agent package for the phonetutor monorepo",
"scripts": {
"test": "vitest"
"test": "vitest",
"start": "bun run client/index.ts"
},
"keywords": [],
"author": "Phonetutor",
"license": "ISC",
"packageManager": "pnpm@10.27.0",
"dependencies": {
"@ai-sdk/anthropic": "^3.0.9",
"@ai-sdk/google": "^3.0.6",
"@ai-sdk/openai": "^3.0.7",
"@memohome/memory": "workspace:*",
"@memohome/shared": "workspace:*",
"ai": "^6.0.25",
"dotenv": "^17.2.3",
"xsai": "^0.4.1",
"zod": "^4.3.5"
+76 -2
View File
@@ -1,3 +1,77 @@
import { streamText } from 'xsai'
import { streamText, ModelMessage, stepCountIs } from 'ai'
import { AgentParams } from './types'
import { system } from './prompts'
import { getMemoryTools } from './tools'
import { MemoryUnit } from '@memohome/memory'
import { createGateway } from './gateway'
streamText({})
export const createAgent = (params: AgentParams) => {
const messages: ModelMessage[] = []
const memory: MemoryUnit[] = []
const gateway = createGateway(params.model)
const getTools = async () => {
return {
...getMemoryTools({
searchMemory: params.onSearchMemory ?? (() => Promise.resolve([])),
onLoadMemory: async (memory) => {
memory.push(...memory)
},
}),
}
}
const loadContext = async () => {
const from = new Date(Date.now() - params.maxContextLoadTime * 60 * 1000)
const to = new Date()
const memory = await params.onReadMemory?.(from, to) ?? []
const context = memory.flatMap(m => m.messages)
messages.unshift(...context)
}
const getSystemPrompt = () => {
return system({
date: new Date(),
language: params.language ?? 'Same as user input',
locale: params.locale,
maxContextLoadTime: params.maxContextLoadTime,
memory,
})
}
async function* ask(input: string) {
await loadContext()
const user = {
role: 'user',
content: input,
}
messages.push(user)
const { fullStream, response } = streamText({
model: gateway,
system: getSystemPrompt(),
prepareStep: async () => {
return {
system: getSystemPrompt(),
}
},
stopWhen: stepCountIs(10),
messages,
tools: await getTools(),
})
for await (const event of fullStream) {
yield event
}
const newMessages = (await response).messages
params.onFinish?.([
user as ModelMessage,
...newMessages,
])
}
return {
ask,
loadContext,
getSystemPrompt,
}
}
+17
View File
@@ -0,0 +1,17 @@
import { createGateway as createAiGateway } from 'ai'
import { createOpenAI } from '@ai-sdk/openai'
import { createAnthropic } from '@ai-sdk/anthropic'
import { createGoogleGenerativeAI } from '@ai-sdk/google'
import { BaseModel, ModelClientType } from '@memohome/shared'
export const createGateway = (model: BaseModel) => {
const clients = {
[ModelClientType.OPENAI]: createOpenAI,
[ModelClientType.ANTHROPIC]: createAnthropic,
[ModelClientType.GOOGLE]: createGoogleGenerativeAI,
}
return (clients[model.clientType] ?? createAiGateway)({
apiKey: model.apiKey,
baseURL: model.baseUrl,
})(model.modelId)
}
+3
View File
@@ -0,0 +1,3 @@
export * from './agent'
export * from './types'
+17 -4
View File
@@ -1,15 +1,20 @@
import { MemoryUnit } from '@memohome/memory'
import { block, quote } from './utils'
export interface SystemParams {
date: Date
locale: Intl.LocalesArgument
locale?: Intl.LocalesArgument
language: string
maxContextLoadTime: number
memory: MemoryUnit[]
}
export const system = ({ date, locale }: SystemParams) => {
export const system = ({ date, locale, language, maxContextLoadTime, memory }: SystemParams) => {
return `
---
date: ${date.toLocaleDateString(locale)}
time: ${date.toLocaleTimeString(locale)}
language: ${locale}
timezone: ${date.getTimezoneOffset()}
language: ${language}
---
You are a personal housekeeper assistant, which able to manage the master's daily affairs.
@@ -17,5 +22,13 @@ export const system = ({ date, locale }: SystemParams) => {
- Long memory: You possess long-term memory; conversations from the last 24 hours will be directly loaded into your context. Additionally, you can use tools to search for past memories.
- Scheduled tasks: You can create scheduled tasks to automatically remind you to do something.
- Messaging: You may allowed to use message software to send messages to the master.
**Memory**
- Your context has been loaded from the last ${maxContextLoadTime} minutes.
- You can use ${quote('search-memory')} to search for past memories with natural language.
- The search result is performed as chat history, load into your system prompt as a context.
**Past Memory Loaded**
${block(memory.map(m => m.raw).join('\n\n'), 'memory')}
`.trim()
}
+7
View File
@@ -0,0 +1,7 @@
export const quote = (content: string) => {
return `\`${content}\``
}
export const block = (content: string, tag: string = '') => {
return `\`\`\`${tag}\n${content}\n\`\`\``
}
+1
View File
@@ -0,0 +1 @@
export * from './memory'
+29
View File
@@ -0,0 +1,29 @@
import { MemoryUnit } from '@memohome/memory'
import { tool } from 'ai'
import { z } from 'zod'
export interface GetMemoryToolParams {
searchMemory: (query: string) => Promise<MemoryUnit[]>
onLoadMemory: (memory: MemoryUnit[]) => Promise<void>
}
export const getMemoryTools = ({ searchMemory, onLoadMemory }: GetMemoryToolParams) => {
const searchMemoryTool = tool({
description: 'Search chat history in the memory',
inputSchema: z.object({
query: z.string().describe('The query to search the memory'),
}),
execute: async ({ query }) => {
const memory = await searchMemory(query)
onLoadMemory(memory)
return {
success: true,
message: `${memory.length} memories has load into your context`,
}
},
})
return {
'search-memory': searchMemoryTool,
}
}
+28
View File
@@ -0,0 +1,28 @@
import type { MemoryUnit } from '@memohome/memory'
import { BaseModel } from '@memohome/shared'
import { ModelMessage } from 'ai'
export interface AgentParams {
model: BaseModel
/**
* Unit: minutes
*/
maxContextLoadTime: number
locale?: Intl.LocalesArgument
/**
* Preferred language of the assistant.
* @default 'Same as user input'
*/
language?: string
onReadMemory?: (from: Date, to: Date) => Promise<MemoryUnit[]>
onSearchMemory?: (query: string) => Promise<MemoryUnit[]>
onFinish?: (messages: ModelMessage[]) => Promise<void>
onError?: (error: Error) => Promise<void>
}
+2
View File
@@ -14,7 +14,9 @@
"license": "ISC",
"packageManager": "pnpm@10.27.0",
"dependencies": {
"@ai-sdk/openai": "^3.0.7",
"@memohome/db": "workspace:*",
"ai": "^6.0.25",
"drizzle-orm": "^0.45.1",
"xsai": "^0.4.1"
}
+2 -1
View File
@@ -6,7 +6,7 @@ import { db } from '@memohome/db'
import { memory } from '@memohome/db/schema'
export interface AddMemoryParams extends EmbedParams {
locale: Intl.LocalesArgument
locale?: Intl.LocalesArgument
}
export interface AddMemoryInput {
@@ -24,6 +24,7 @@ export const createAddMemory = (params: AddMemoryParams) =>
})
await db.insert(memory)
.values({
id: crypto.randomUUID(),
timestamp: memoryUnit.timestamp,
user: memoryUnit.user,
rawContent,
+2 -2
View File
@@ -1,7 +1,7 @@
import { Message } from 'xsai'
import { ModelMessage } from 'ai'
export interface MemoryUnit {
messages: Message[]
messages: ModelMessage[]
timestamp: Date
user: string
raw: string
+8 -5
View File
@@ -1,14 +1,17 @@
import { Message } from 'xsai'
import { ModelMessage } from 'ai'
import { MemoryUnit } from './memory-unit'
export const rawMessages = (messages: Message[]) => {
export const rawMessages = (messages: ModelMessage[]) => {
return messages.map((message) => {
if (message.role === 'user') {
if (Array.isArray(message.content)) {
return `User: ${message.content.filter(c => c.type === 'text').map(c => c.text).join('\n')}`
}
return `User: ${message.content}`
} else if (message.role === 'assistant') {
let toolCalls = ''
if (message.tool_calls && message.tool_calls.length !== 0) {
toolCalls = `Tool Calls: ${message.tool_calls.map(t => t.function.name).join(', ')}`
if (Array.isArray(message.content)) {
toolCalls = message.content.filter(c => c.type === 'tool-call').map(c => c.toolName).join(', ')
}
return `You: ${message.content} \n${toolCalls}`
} else if (message.role === 'tool') {
@@ -21,7 +24,7 @@ export const rawMessages = (messages: Message[]) => {
.join('\n\n')
}
export const rawMemory = (memory: MemoryUnit, locale: Intl.LocalesArgument) => {
export const rawMemory = (memory: MemoryUnit, locale?: Intl.LocalesArgument) => {
return `
---
date: ${memory.timestamp.toLocaleDateString(locale)}
+7 -5
View File
@@ -1,6 +1,7 @@
import { embed } from 'xsai'
import { embed } from 'ai'
import { filterByEmbedding } from './filter'
import { EmbedParams } from './types'
import { createOpenAI } from '@ai-sdk/openai'
// eslint-disable-next-line @typescript-eslint/no-empty-object-type
export interface MemorySearchParams extends EmbedParams { }
@@ -14,10 +15,11 @@ export interface MemorySearchInput {
export const createMemorySearch = (params: MemorySearchParams) =>
async ({ user, query, maxResults = 10 }: MemorySearchInput) => {
const { embedding } = await embed({
model: params.model,
input: query,
apiKey: params.apiKey,
baseURL: params.baseURL,
model: createOpenAI({
apiKey: params.apiKey,
baseURL: params.baseURL,
}).embedding(params.model),
value: query,
})
return await filterByEmbedding(embedding, user, maxResults)
}
+1
View File
@@ -0,0 +1 @@
export * from './model'
+37
View File
@@ -0,0 +1,37 @@
export enum ModelClientType {
OPENAI = 'openai',
ANTHROPIC = 'anthropic',
GOOGLE = 'google',
}
export interface BaseModel {
/**
* @description The unique identifier for the model
* @example 'gpt-4o'
*/
modelId: string
/**
* @description The base URL for the model
* @example 'https://api.openai.com/v1'
*/
baseUrl: string
/**
* @description The API key for the model
* @example 'sk-1234567890'
*/
apiKey: string
/**
* @description The client type for the model
* @enum {ModelClientType}
*/
clientType: ModelClientType
/**
* @description The display name for the model
* @example 'GPT 4o'
*/
name?: string
}
+150 -15
View File
@@ -36,12 +36,30 @@ importers:
packages/agent:
dependencies:
'@ai-sdk/anthropic':
specifier: ^3.0.9
version: 3.0.9(zod@4.3.5)
'@ai-sdk/google':
specifier: ^3.0.6
version: 3.0.6(zod@4.3.5)
'@ai-sdk/openai':
specifier: ^3.0.7
version: 3.0.7(zod@4.3.5)
'@memohome/memory':
specifier: workspace:*
version: link:../memory
'@memohome/shared':
specifier: workspace:*
version: link:../shared
ai:
specifier: ^6.0.25
version: 6.0.25(zod@4.3.5)
dotenv:
specifier: ^17.2.3
version: 17.2.3
xsai:
specifier: ^0.4.1
version: 0.4.1(zod@4.3.5)
version: 0.4.1(zod-to-json-schema@3.25.1(zod@4.3.5))(zod@4.3.5)
zod:
specifier: ^4.3.5
version: 4.3.5
@@ -80,15 +98,21 @@ importers:
packages/memory:
dependencies:
'@ai-sdk/openai':
specifier: ^3.0.7
version: 3.0.7(zod@4.3.5)
'@memohome/db':
specifier: workspace:*
version: link:../db
ai:
specifier: ^6.0.25
version: 6.0.25(zod@4.3.5)
drizzle-orm:
specifier: ^0.45.1
version: 0.45.1(@opentelemetry/api@1.9.0)(@types/pg@8.16.0)(bun-types@1.3.5)(pg@8.16.3)
xsai:
specifier: ^0.4.1
version: 0.4.1(zod@4.3.5)
version: 0.4.1(zod-to-json-schema@3.25.1(zod@4.3.5))(zod@4.3.5)
packages/shared: {}
@@ -222,6 +246,40 @@ importers:
packages:
'@ai-sdk/anthropic@3.0.9':
resolution: {integrity: sha512-QBD4qDnwIHd+N5PpjxXOaWJig1aRB43J0PM5ZUe6Yyl9Qq2bUmraQjvNznkuFKy+hMFDgj0AvgGogTiO5TC+qA==}
engines: {node: '>=18'}
peerDependencies:
zod: ^3.25.76 || ^4.1.8
'@ai-sdk/gateway@3.0.10':
resolution: {integrity: sha512-sRlPMKd38+fdp2y11USW44c0o8tsIsT6T/pgyY04VXC3URjIRnkxugxd9AkU2ogfpPDMz50cBAGPnMxj+6663Q==}
engines: {node: '>=18'}
peerDependencies:
zod: ^3.25.76 || ^4.1.8
'@ai-sdk/google@3.0.6':
resolution: {integrity: sha512-Nr7E+ouWd/bKO9SFlgLnJJ1+fiGHC07KAeFr08faT+lvkECWlxVox3aL0dec8uCgBDUghYbq7f4S5teUrCc+QQ==}
engines: {node: '>=18'}
peerDependencies:
zod: ^3.25.76 || ^4.1.8
'@ai-sdk/openai@3.0.7':
resolution: {integrity: sha512-CBoYn1U59Lop8yBL9KuVjHCKc/B06q9Qo0SasRwHoyMEq+X4I8LQZu3a8Ck1jwwcZTTxfyiExB70LtIRSynBDA==}
engines: {node: '>=18'}
peerDependencies:
zod: ^3.25.76 || ^4.1.8
'@ai-sdk/provider-utils@4.0.4':
resolution: {integrity: sha512-VxhX0B/dWGbpNHxrKCWUAJKXIXV015J4e7qYjdIU9lLWeptk0KMLGcqkB4wFxff5Njqur8dt8wRi1MN9lZtDqg==}
engines: {node: '>=18'}
peerDependencies:
zod: ^3.25.76 || ^4.1.8
'@ai-sdk/provider@3.0.2':
resolution: {integrity: sha512-HrEmNt/BH/hkQ7zpi2o6N3k1ZR1QTb7z85WYhYygiTxOQuaml4CMtHCWRbric5WPU+RNsYI7r1EpyVQMKO1pYw==}
engines: {node: '>=18'}
'@babel/code-frame@7.27.1':
resolution: {integrity: sha512-cjQ7ZlQ0Mv3b47hABuTevyTuYN4i+loJKGeV9flcCgIK37cCXRh+L1bd3iBHlynerhQ7BhCkn2BPbQUL+rGqFg==}
engines: {node: '>=6.9.0'}
@@ -1318,6 +1376,10 @@ packages:
resolution: {integrity: sha512-ink3/Zofus34nmBsPjow63FP5M7IGff0RKAgqR6+CFpdk22M7aLwC9gOcLGYqr7MczLPzZVERW9hRog3O4n1sQ==}
engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0}
'@vercel/oidc@3.1.0':
resolution: {integrity: sha512-Fw28YZpRnA3cAHHDlkt7xQHiJ0fcL+NRcIqsocZQUSmbzeIKRpwttJjik5ZGanXP+vlA4SbTg+AbA3bP363l+w==}
engines: {node: '>= 20'}
'@vitejs/plugin-vue@6.0.3':
resolution: {integrity: sha512-TlGPkLFLVOY3T7fZrwdvKpjprR3s4fxRln0ORDo1VQ7HHyxJwTlrjKU3kpVWTlaAjIEuCTokmjkZnr8Tpc925w==}
engines: {node: ^20.19.0 || >=22.12.0}
@@ -1525,6 +1587,12 @@ packages:
engines: {node: '>=0.4.0'}
hasBin: true
ai@6.0.25:
resolution: {integrity: sha512-KErk9JWkRaN4j9Xzxuo+twa0TxcYKdYbrRV8iGktduvUeGb0Yd5seWe3yOfuLGERbDBiKI1ajQz28O2FG3WO5A==}
engines: {node: '>=18'}
peerDependencies:
zod: ^3.25.76 || ^4.1.8
ajv-draft-04@1.0.0:
resolution: {integrity: sha512-mv00Te6nmYbRp5DCwclxtt7yV/joXJPGS7nM+97GdxvuttCOfgI3K4U25zboyeX0O+myI8ERluxQe5wljMmVIw==}
peerDependencies:
@@ -1934,6 +2002,10 @@ packages:
resolution: {integrity: sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==}
engines: {node: '>=0.10.0'}
eventsource-parser@3.0.6:
resolution: {integrity: sha512-Vo1ab+QXPzZ4tCa8SwIHJFaSzy4R6SHf7BY79rFBDf0idraZWAkYrDjDj8uWaSm3S2TK+hJ7/t1CEmZ7jXw+pg==}
engines: {node: '>=18.0.0'}
exact-mirror@0.2.6:
resolution: {integrity: sha512-7s059UIx9/tnOKSySzUk5cPGkoILhTE4p6ncf6uIPaQ+9aRBQzQjc9+q85l51+oZ+P6aBxh084pD0CzBQPcFUA==}
peerDependencies:
@@ -2122,6 +2194,9 @@ packages:
json-schema-traverse@1.0.0:
resolution: {integrity: sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==}
json-schema@0.4.0:
resolution: {integrity: sha512-es94M3nTIfsEPisRafak+HDLfHXnKBhV3vU5eqPcS3flIWqcxJWgXHXiey3YrpaNsanY5ei1VoYEbOzijuq9BA==}
json-stable-stringify-without-jsonify@1.0.1:
resolution: {integrity: sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==}
@@ -2951,11 +3026,52 @@ packages:
resolution: {integrity: sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==}
engines: {node: '>=10'}
zod-to-json-schema@3.25.1:
resolution: {integrity: sha512-pM/SU9d3YAggzi6MtR4h7ruuQlqKtad8e9S0fmxcMi+ueAK5Korys/aWcV9LIIHTVbj01NdzxcnXSN+O74ZIVA==}
peerDependencies:
zod: ^3.25 || ^4
zod@4.3.5:
resolution: {integrity: sha512-k7Nwx6vuWx1IJ9Bjuf4Zt1PEllcwe7cls3VNzm4CQ1/hgtFUK2bRNG3rvnpPUhFjmqJKAKtjV576KnUkHocg/g==}
snapshots:
'@ai-sdk/anthropic@3.0.9(zod@4.3.5)':
dependencies:
'@ai-sdk/provider': 3.0.2
'@ai-sdk/provider-utils': 4.0.4(zod@4.3.5)
zod: 4.3.5
'@ai-sdk/gateway@3.0.10(zod@4.3.5)':
dependencies:
'@ai-sdk/provider': 3.0.2
'@ai-sdk/provider-utils': 4.0.4(zod@4.3.5)
'@vercel/oidc': 3.1.0
zod: 4.3.5
'@ai-sdk/google@3.0.6(zod@4.3.5)':
dependencies:
'@ai-sdk/provider': 3.0.2
'@ai-sdk/provider-utils': 4.0.4(zod@4.3.5)
zod: 4.3.5
'@ai-sdk/openai@3.0.7(zod@4.3.5)':
dependencies:
'@ai-sdk/provider': 3.0.2
'@ai-sdk/provider-utils': 4.0.4(zod@4.3.5)
zod: 4.3.5
'@ai-sdk/provider-utils@4.0.4(zod@4.3.5)':
dependencies:
'@ai-sdk/provider': 3.0.2
'@standard-schema/spec': 1.1.0
eventsource-parser: 3.0.6
zod: 4.3.5
'@ai-sdk/provider@3.0.2':
dependencies:
json-schema: 0.4.0
'@babel/code-frame@7.27.1':
dependencies:
'@babel/helper-validator-identifier': 7.28.5
@@ -3541,8 +3657,7 @@ snapshots:
'@microsoft/tsdoc@0.16.0': {}
'@opentelemetry/api@1.9.0':
optional: true
'@opentelemetry/api@1.9.0': {}
'@polka/url@1.0.0-next.29': {}
@@ -3875,6 +3990,8 @@ snapshots:
'@typescript-eslint/types': 8.52.0
eslint-visitor-keys: 4.2.1
'@vercel/oidc@3.1.0': {}
'@vitejs/plugin-vue@6.0.3(vite@7.3.0(@types/node@24.10.4)(jiti@2.6.1)(lightningcss@1.30.2)(tsx@4.21.0))(vue@3.5.26(typescript@5.9.3))':
dependencies:
'@rolldown/pluginutils': 1.0.0-beta.53
@@ -4114,10 +4231,10 @@ snapshots:
dependencies:
'@xsai/shared': 0.4.1
'@xsai/generate-object@0.4.1(zod@4.3.5)':
'@xsai/generate-object@0.4.1(zod-to-json-schema@3.25.1(zod@4.3.5))(zod@4.3.5)':
dependencies:
'@xsai/generate-text': 0.4.1
xsschema: 0.4.1(zod@4.3.5)
xsschema: 0.4.1(zod-to-json-schema@3.25.1(zod@4.3.5))(zod@4.3.5)
transitivePeerDependencies:
- '@valibot/to-json-schema'
- arktype
@@ -4149,10 +4266,10 @@ snapshots:
'@xsai/shared@0.4.1': {}
'@xsai/stream-object@0.4.1(zod@4.3.5)':
'@xsai/stream-object@0.4.1(zod-to-json-schema@3.25.1(zod@4.3.5))(zod@4.3.5)':
dependencies:
'@xsai/stream-text': 0.4.1
xsschema: 0.4.1(zod@4.3.5)
xsschema: 0.4.1(zod-to-json-schema@3.25.1(zod@4.3.5))(zod@4.3.5)
transitivePeerDependencies:
- '@valibot/to-json-schema'
- arktype
@@ -4170,11 +4287,11 @@ snapshots:
dependencies:
'@xsai/shared': 0.4.1
'@xsai/tool@0.4.1(zod@4.3.5)':
'@xsai/tool@0.4.1(zod-to-json-schema@3.25.1(zod@4.3.5))(zod@4.3.5)':
dependencies:
'@xsai/shared': 0.4.1
'@xsai/shared-chat': 0.4.1
xsschema: 0.4.1(zod@4.3.5)
xsschema: 0.4.1(zod-to-json-schema@3.25.1(zod@4.3.5))(zod@4.3.5)
transitivePeerDependencies:
- '@valibot/to-json-schema'
- arktype
@@ -4197,6 +4314,14 @@ snapshots:
acorn@8.15.0: {}
ai@6.0.25(zod@4.3.5):
dependencies:
'@ai-sdk/gateway': 3.0.10(zod@4.3.5)
'@ai-sdk/provider': 3.0.2
'@ai-sdk/provider-utils': 4.0.4(zod@4.3.5)
'@opentelemetry/api': 1.9.0
zod: 4.3.5
ajv-draft-04@1.0.0(ajv@8.13.0):
optionalDependencies:
ajv: 8.13.0
@@ -4577,6 +4702,8 @@ snapshots:
esutils@2.0.3: {}
eventsource-parser@3.0.6: {}
exact-mirror@0.2.6(@sinclair/typebox@0.34.47):
optionalDependencies:
'@sinclair/typebox': 0.34.47
@@ -4716,6 +4843,8 @@ snapshots:
json-schema-traverse@1.0.0: {}
json-schema@0.4.0: {}
json-stable-stringify-without-jsonify@1.0.1: {}
json5@2.2.3: {}
@@ -5442,21 +5571,21 @@ snapshots:
xml-name-validator@4.0.0: {}
xsai@0.4.1(zod@4.3.5):
xsai@0.4.1(zod-to-json-schema@3.25.1(zod@4.3.5))(zod@4.3.5):
dependencies:
'@xsai/embed': 0.4.1
'@xsai/generate-image': 0.4.1
'@xsai/generate-object': 0.4.1(zod@4.3.5)
'@xsai/generate-object': 0.4.1(zod-to-json-schema@3.25.1(zod@4.3.5))(zod@4.3.5)
'@xsai/generate-speech': 0.4.1
'@xsai/generate-text': 0.4.1
'@xsai/generate-transcription': 0.4.1
'@xsai/model': 0.4.1
'@xsai/shared': 0.4.1
'@xsai/shared-chat': 0.4.1
'@xsai/stream-object': 0.4.1(zod@4.3.5)
'@xsai/stream-object': 0.4.1(zod-to-json-schema@3.25.1(zod@4.3.5))(zod@4.3.5)
'@xsai/stream-text': 0.4.1
'@xsai/stream-transcription': 0.4.1
'@xsai/tool': 0.4.1(zod@4.3.5)
'@xsai/tool': 0.4.1(zod-to-json-schema@3.25.1(zod@4.3.5))(zod@4.3.5)
'@xsai/utils-chat': 0.4.1
'@xsai/utils-reasoning': 0.4.1
'@xsai/utils-stream': 0.4.1
@@ -5468,9 +5597,10 @@ snapshots:
- zod
- zod-to-json-schema
xsschema@0.4.1(zod@4.3.5):
xsschema@0.4.1(zod-to-json-schema@3.25.1(zod@4.3.5))(zod@4.3.5):
optionalDependencies:
zod: 4.3.5
zod-to-json-schema: 3.25.1(zod@4.3.5)
xtend@4.0.2: {}
@@ -5480,4 +5610,9 @@ snapshots:
yocto-queue@0.1.0: {}
zod-to-json-schema@3.25.1(zod@4.3.5):
dependencies:
zod: 4.3.5
optional: true
zod@4.3.5: {}