mirror of
https://github.com/memohai/Memoh.git
synced 2026-04-27 07:16:19 +09:00
feat: basic agent
This commit is contained in:
@@ -0,0 +1,54 @@
|
||||
# Agent CLI
|
||||
|
||||
A command-line interface for the personal housekeeper assistant agent.
|
||||
|
||||
## Setup
|
||||
|
||||
1. Create a `.env` file in the project root (not in this directory) with the following variables:
|
||||
|
||||
```env
|
||||
MODEL=gpt-4o
|
||||
BASE_URL=https://api.openai.com/v1
|
||||
API_KEY=your-api-key-here
|
||||
EMBEDDING_MODEL=text-embedding-3-small
|
||||
MODEL_CLIENT_TYPE=openai
|
||||
```
|
||||
|
||||
2. Make sure the database is set up and running (required for memory storage).
|
||||
|
||||
## Usage
|
||||
|
||||
Run the CLI from the agent package:
|
||||
|
||||
```bash
|
||||
pnpm start
|
||||
```
|
||||
|
||||
Or with Bun directly:
|
||||
|
||||
```bash
|
||||
bun run index.ts
|
||||
```
|
||||
|
||||
## Features
|
||||
|
||||
- **Interactive Chat**: Type your messages and get responses from the AI agent
|
||||
- **Long-term Memory**: Conversations are automatically saved and can be recalled
|
||||
- **Context Loading**: Automatically loads recent conversations (last 60 minutes)
|
||||
- **Memory Search**: The agent can search through past conversations using natural language
|
||||
- **Tool Calling**: Supports automatic tool execution with multi-step reasoning
|
||||
- **Multi-Provider Support**: Works with OpenAI, Anthropic, and Google AI (via Vercel AI SDK)
|
||||
|
||||
## Commands
|
||||
|
||||
- Type your message and press Enter to chat
|
||||
- Type `exit` or `quit` to close the application
|
||||
|
||||
## Environment Variables
|
||||
|
||||
- `MODEL`: The LLM model ID (e.g., `gpt-4o`, `claude-3-5-sonnet-20241022`, `gemini-pro`)
|
||||
- `BASE_URL`: The API base URL
|
||||
- `API_KEY`: Your API key
|
||||
- `EMBEDDING_MODEL`: The embedding model for memory search (e.g., `text-embedding-3-small`)
|
||||
- `MODEL_CLIENT_TYPE`: The model provider type (default: `openai`, options: `openai`, `anthropic`, `google`)
|
||||
|
||||
@@ -0,0 +1,119 @@
|
||||
import { createInterface } from 'node:readline'
|
||||
import { stdin as input, stdout as output } from 'node:process'
|
||||
import { createAgent } from '../src/agent'
|
||||
import { createMemorySearch, createAddMemory, filterByTimestamp, MemoryUnit } from '@memohome/memory'
|
||||
import { ModelClientType } from '@memohome/shared'
|
||||
|
||||
// Load environment variables
|
||||
const MODEL = process.env.MODEL
|
||||
const BASE_URL = process.env.BASE_URL
|
||||
const API_KEY = process.env.API_KEY
|
||||
const EMBEDDING_MODEL = process.env.EMBEDDING_MODEL
|
||||
const MODEL_CLIENT_TYPE = process.env.MODEL_CLIENT_TYPE || 'openai'
|
||||
|
||||
if (!MODEL || !BASE_URL || !API_KEY || !EMBEDDING_MODEL) {
|
||||
console.error('Error: Missing required environment variables')
|
||||
console.error('Required: MODEL, BASE_URL, API_KEY, EMBEDDING_MODEL')
|
||||
console.error('Optional: MODEL_CLIENT_TYPE (default: openai)')
|
||||
process.exit(1)
|
||||
}
|
||||
|
||||
const USER_ID = 'cli-user'
|
||||
|
||||
// Create memory functions
|
||||
const searchMemory = createMemorySearch({
|
||||
model: EMBEDDING_MODEL,
|
||||
apiKey: API_KEY,
|
||||
baseURL: BASE_URL,
|
||||
})
|
||||
|
||||
const addMemory = createAddMemory({
|
||||
model: EMBEDDING_MODEL,
|
||||
apiKey: API_KEY,
|
||||
baseURL: BASE_URL,
|
||||
})
|
||||
|
||||
// Create agent
|
||||
const agent = createAgent({
|
||||
model: {
|
||||
modelId: MODEL,
|
||||
baseUrl: BASE_URL,
|
||||
apiKey: API_KEY,
|
||||
clientType: MODEL_CLIENT_TYPE as ModelClientType,
|
||||
name: MODEL,
|
||||
},
|
||||
maxContextLoadTime: 60, // 60 minutes
|
||||
language: 'Same as user input',
|
||||
onReadMemory: async (from: Date, to: Date) => {
|
||||
return await filterByTimestamp(from, to, USER_ID)
|
||||
},
|
||||
onSearchMemory: async (query: string) => {
|
||||
return await searchMemory({ user: USER_ID, query, maxResults: 5 })
|
||||
},
|
||||
onFinish: async (messages) => {
|
||||
// Save conversation to memory - type conversion handled internally
|
||||
const memoryUnit: MemoryUnit = {
|
||||
messages: messages as unknown as MemoryUnit['messages'],
|
||||
timestamp: new Date(),
|
||||
user: USER_ID,
|
||||
raw: '', // will be generated by addMemory
|
||||
}
|
||||
await addMemory({ memory: memoryUnit })
|
||||
},
|
||||
})
|
||||
|
||||
async function main() {
|
||||
console.log('🤖 Agent CLI Started')
|
||||
console.log('Type your message and press Enter. Type "exit" to quit.\n')
|
||||
|
||||
// Load context
|
||||
await agent.loadContext()
|
||||
|
||||
const rl = createInterface({ input, output })
|
||||
|
||||
rl.on('line', async (line) => {
|
||||
const userInput = line.trim()
|
||||
|
||||
if (userInput === 'exit' || userInput === 'quit') {
|
||||
console.log('\n👋 Goodbye!')
|
||||
rl.close()
|
||||
process.exit(0)
|
||||
}
|
||||
|
||||
if (!userInput) {
|
||||
rl.prompt()
|
||||
return
|
||||
}
|
||||
|
||||
try {
|
||||
process.stdout.write('\n🤖 ')
|
||||
|
||||
let hasOutput = false
|
||||
for await (const event of agent.ask(userInput)) {
|
||||
if (event.type === 'text-delta' && 'text' in event && event.text) {
|
||||
process.stdout.write(String(event.text))
|
||||
hasOutput = true
|
||||
} else if (event.type === 'tool-call' && 'toolName' in event) {
|
||||
process.stdout.write(`\n[Tool: ${event.toolName}]`)
|
||||
hasOutput = true
|
||||
}
|
||||
}
|
||||
|
||||
if (!hasOutput) {
|
||||
process.stdout.write('(No response)')
|
||||
}
|
||||
console.log('\n')
|
||||
} catch (error) {
|
||||
console.error('\n❌ Error:', error instanceof Error ? error.message : String(error))
|
||||
console.log()
|
||||
}
|
||||
|
||||
rl.prompt()
|
||||
})
|
||||
|
||||
rl.setPrompt('You: ')
|
||||
rl.prompt()
|
||||
}
|
||||
|
||||
main().catch(console.error)
|
||||
|
||||
@@ -3,13 +3,20 @@
|
||||
"version": "1.0.0",
|
||||
"description": "Agent package for the phonetutor monorepo",
|
||||
"scripts": {
|
||||
"test": "vitest"
|
||||
"test": "vitest",
|
||||
"start": "bun run client/index.ts"
|
||||
},
|
||||
"keywords": [],
|
||||
"author": "Phonetutor",
|
||||
"license": "ISC",
|
||||
"packageManager": "pnpm@10.27.0",
|
||||
"dependencies": {
|
||||
"@ai-sdk/anthropic": "^3.0.9",
|
||||
"@ai-sdk/google": "^3.0.6",
|
||||
"@ai-sdk/openai": "^3.0.7",
|
||||
"@memohome/memory": "workspace:*",
|
||||
"@memohome/shared": "workspace:*",
|
||||
"ai": "^6.0.25",
|
||||
"dotenv": "^17.2.3",
|
||||
"xsai": "^0.4.1",
|
||||
"zod": "^4.3.5"
|
||||
|
||||
@@ -1,3 +1,77 @@
|
||||
import { streamText } from 'xsai'
|
||||
import { streamText, ModelMessage, stepCountIs } from 'ai'
|
||||
import { AgentParams } from './types'
|
||||
import { system } from './prompts'
|
||||
import { getMemoryTools } from './tools'
|
||||
import { MemoryUnit } from '@memohome/memory'
|
||||
import { createGateway } from './gateway'
|
||||
|
||||
streamText({})
|
||||
export const createAgent = (params: AgentParams) => {
|
||||
const messages: ModelMessage[] = []
|
||||
const memory: MemoryUnit[] = []
|
||||
|
||||
const gateway = createGateway(params.model)
|
||||
|
||||
const getTools = async () => {
|
||||
return {
|
||||
...getMemoryTools({
|
||||
searchMemory: params.onSearchMemory ?? (() => Promise.resolve([])),
|
||||
onLoadMemory: async (memory) => {
|
||||
memory.push(...memory)
|
||||
},
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
const loadContext = async () => {
|
||||
const from = new Date(Date.now() - params.maxContextLoadTime * 60 * 1000)
|
||||
const to = new Date()
|
||||
const memory = await params.onReadMemory?.(from, to) ?? []
|
||||
const context = memory.flatMap(m => m.messages)
|
||||
messages.unshift(...context)
|
||||
}
|
||||
|
||||
const getSystemPrompt = () => {
|
||||
return system({
|
||||
date: new Date(),
|
||||
language: params.language ?? 'Same as user input',
|
||||
locale: params.locale,
|
||||
maxContextLoadTime: params.maxContextLoadTime,
|
||||
memory,
|
||||
})
|
||||
}
|
||||
|
||||
async function* ask(input: string) {
|
||||
await loadContext()
|
||||
const user = {
|
||||
role: 'user',
|
||||
content: input,
|
||||
}
|
||||
messages.push(user)
|
||||
const { fullStream, response } = streamText({
|
||||
model: gateway,
|
||||
system: getSystemPrompt(),
|
||||
prepareStep: async () => {
|
||||
return {
|
||||
system: getSystemPrompt(),
|
||||
}
|
||||
},
|
||||
stopWhen: stepCountIs(10),
|
||||
messages,
|
||||
tools: await getTools(),
|
||||
})
|
||||
for await (const event of fullStream) {
|
||||
yield event
|
||||
}
|
||||
const newMessages = (await response).messages
|
||||
params.onFinish?.([
|
||||
user as ModelMessage,
|
||||
...newMessages,
|
||||
])
|
||||
}
|
||||
|
||||
return {
|
||||
ask,
|
||||
loadContext,
|
||||
getSystemPrompt,
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,17 @@
|
||||
import { createGateway as createAiGateway } from 'ai'
|
||||
import { createOpenAI } from '@ai-sdk/openai'
|
||||
import { createAnthropic } from '@ai-sdk/anthropic'
|
||||
import { createGoogleGenerativeAI } from '@ai-sdk/google'
|
||||
import { BaseModel, ModelClientType } from '@memohome/shared'
|
||||
|
||||
export const createGateway = (model: BaseModel) => {
|
||||
const clients = {
|
||||
[ModelClientType.OPENAI]: createOpenAI,
|
||||
[ModelClientType.ANTHROPIC]: createAnthropic,
|
||||
[ModelClientType.GOOGLE]: createGoogleGenerativeAI,
|
||||
}
|
||||
return (clients[model.clientType] ?? createAiGateway)({
|
||||
apiKey: model.apiKey,
|
||||
baseURL: model.baseUrl,
|
||||
})(model.modelId)
|
||||
}
|
||||
@@ -0,0 +1,3 @@
|
||||
export * from './agent'
|
||||
export * from './types'
|
||||
|
||||
|
||||
@@ -1,15 +1,20 @@
|
||||
import { MemoryUnit } from '@memohome/memory'
|
||||
import { block, quote } from './utils'
|
||||
|
||||
export interface SystemParams {
|
||||
date: Date
|
||||
locale: Intl.LocalesArgument
|
||||
locale?: Intl.LocalesArgument
|
||||
language: string
|
||||
maxContextLoadTime: number
|
||||
memory: MemoryUnit[]
|
||||
}
|
||||
|
||||
export const system = ({ date, locale }: SystemParams) => {
|
||||
export const system = ({ date, locale, language, maxContextLoadTime, memory }: SystemParams) => {
|
||||
return `
|
||||
---
|
||||
date: ${date.toLocaleDateString(locale)}
|
||||
time: ${date.toLocaleTimeString(locale)}
|
||||
language: ${locale}
|
||||
timezone: ${date.getTimezoneOffset()}
|
||||
language: ${language}
|
||||
---
|
||||
You are a personal housekeeper assistant, which able to manage the master's daily affairs.
|
||||
|
||||
@@ -17,5 +22,13 @@ export const system = ({ date, locale }: SystemParams) => {
|
||||
- Long memory: You possess long-term memory; conversations from the last 24 hours will be directly loaded into your context. Additionally, you can use tools to search for past memories.
|
||||
- Scheduled tasks: You can create scheduled tasks to automatically remind you to do something.
|
||||
- Messaging: You may allowed to use message software to send messages to the master.
|
||||
|
||||
**Memory**
|
||||
- Your context has been loaded from the last ${maxContextLoadTime} minutes.
|
||||
- You can use ${quote('search-memory')} to search for past memories with natural language.
|
||||
- The search result is performed as chat history, load into your system prompt as a context.
|
||||
|
||||
**Past Memory Loaded**
|
||||
${block(memory.map(m => m.raw).join('\n\n'), 'memory')}
|
||||
`.trim()
|
||||
}
|
||||
@@ -0,0 +1,7 @@
|
||||
export const quote = (content: string) => {
|
||||
return `\`${content}\``
|
||||
}
|
||||
|
||||
export const block = (content: string, tag: string = '') => {
|
||||
return `\`\`\`${tag}\n${content}\n\`\`\``
|
||||
}
|
||||
@@ -0,0 +1 @@
|
||||
export * from './memory'
|
||||
@@ -0,0 +1,29 @@
|
||||
import { MemoryUnit } from '@memohome/memory'
|
||||
import { tool } from 'ai'
|
||||
import { z } from 'zod'
|
||||
|
||||
export interface GetMemoryToolParams {
|
||||
searchMemory: (query: string) => Promise<MemoryUnit[]>
|
||||
onLoadMemory: (memory: MemoryUnit[]) => Promise<void>
|
||||
}
|
||||
|
||||
export const getMemoryTools = ({ searchMemory, onLoadMemory }: GetMemoryToolParams) => {
|
||||
const searchMemoryTool = tool({
|
||||
description: 'Search chat history in the memory',
|
||||
inputSchema: z.object({
|
||||
query: z.string().describe('The query to search the memory'),
|
||||
}),
|
||||
execute: async ({ query }) => {
|
||||
const memory = await searchMemory(query)
|
||||
onLoadMemory(memory)
|
||||
return {
|
||||
success: true,
|
||||
message: `${memory.length} memories has load into your context`,
|
||||
}
|
||||
},
|
||||
})
|
||||
|
||||
return {
|
||||
'search-memory': searchMemoryTool,
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,28 @@
|
||||
import type { MemoryUnit } from '@memohome/memory'
|
||||
import { BaseModel } from '@memohome/shared'
|
||||
import { ModelMessage } from 'ai'
|
||||
|
||||
export interface AgentParams {
|
||||
model: BaseModel
|
||||
|
||||
/**
|
||||
* Unit: minutes
|
||||
*/
|
||||
maxContextLoadTime: number
|
||||
|
||||
locale?: Intl.LocalesArgument
|
||||
|
||||
/**
|
||||
* Preferred language of the assistant.
|
||||
* @default 'Same as user input'
|
||||
*/
|
||||
language?: string
|
||||
|
||||
onReadMemory?: (from: Date, to: Date) => Promise<MemoryUnit[]>
|
||||
|
||||
onSearchMemory?: (query: string) => Promise<MemoryUnit[]>
|
||||
|
||||
onFinish?: (messages: ModelMessage[]) => Promise<void>
|
||||
|
||||
onError?: (error: Error) => Promise<void>
|
||||
}
|
||||
@@ -14,7 +14,9 @@
|
||||
"license": "ISC",
|
||||
"packageManager": "pnpm@10.27.0",
|
||||
"dependencies": {
|
||||
"@ai-sdk/openai": "^3.0.7",
|
||||
"@memohome/db": "workspace:*",
|
||||
"ai": "^6.0.25",
|
||||
"drizzle-orm": "^0.45.1",
|
||||
"xsai": "^0.4.1"
|
||||
}
|
||||
|
||||
@@ -6,7 +6,7 @@ import { db } from '@memohome/db'
|
||||
import { memory } from '@memohome/db/schema'
|
||||
|
||||
export interface AddMemoryParams extends EmbedParams {
|
||||
locale: Intl.LocalesArgument
|
||||
locale?: Intl.LocalesArgument
|
||||
}
|
||||
|
||||
export interface AddMemoryInput {
|
||||
@@ -24,6 +24,7 @@ export const createAddMemory = (params: AddMemoryParams) =>
|
||||
})
|
||||
await db.insert(memory)
|
||||
.values({
|
||||
id: crypto.randomUUID(),
|
||||
timestamp: memoryUnit.timestamp,
|
||||
user: memoryUnit.user,
|
||||
rawContent,
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import { Message } from 'xsai'
|
||||
import { ModelMessage } from 'ai'
|
||||
|
||||
export interface MemoryUnit {
|
||||
messages: Message[]
|
||||
messages: ModelMessage[]
|
||||
timestamp: Date
|
||||
user: string
|
||||
raw: string
|
||||
|
||||
@@ -1,14 +1,17 @@
|
||||
import { Message } from 'xsai'
|
||||
import { ModelMessage } from 'ai'
|
||||
import { MemoryUnit } from './memory-unit'
|
||||
|
||||
export const rawMessages = (messages: Message[]) => {
|
||||
export const rawMessages = (messages: ModelMessage[]) => {
|
||||
return messages.map((message) => {
|
||||
if (message.role === 'user') {
|
||||
if (Array.isArray(message.content)) {
|
||||
return `User: ${message.content.filter(c => c.type === 'text').map(c => c.text).join('\n')}`
|
||||
}
|
||||
return `User: ${message.content}`
|
||||
} else if (message.role === 'assistant') {
|
||||
let toolCalls = ''
|
||||
if (message.tool_calls && message.tool_calls.length !== 0) {
|
||||
toolCalls = `Tool Calls: ${message.tool_calls.map(t => t.function.name).join(', ')}`
|
||||
if (Array.isArray(message.content)) {
|
||||
toolCalls = message.content.filter(c => c.type === 'tool-call').map(c => c.toolName).join(', ')
|
||||
}
|
||||
return `You: ${message.content} \n${toolCalls}`
|
||||
} else if (message.role === 'tool') {
|
||||
@@ -21,7 +24,7 @@ export const rawMessages = (messages: Message[]) => {
|
||||
.join('\n\n')
|
||||
}
|
||||
|
||||
export const rawMemory = (memory: MemoryUnit, locale: Intl.LocalesArgument) => {
|
||||
export const rawMemory = (memory: MemoryUnit, locale?: Intl.LocalesArgument) => {
|
||||
return `
|
||||
---
|
||||
date: ${memory.timestamp.toLocaleDateString(locale)}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import { embed } from 'xsai'
|
||||
import { embed } from 'ai'
|
||||
import { filterByEmbedding } from './filter'
|
||||
import { EmbedParams } from './types'
|
||||
import { createOpenAI } from '@ai-sdk/openai'
|
||||
|
||||
// eslint-disable-next-line @typescript-eslint/no-empty-object-type
|
||||
export interface MemorySearchParams extends EmbedParams { }
|
||||
@@ -14,10 +15,11 @@ export interface MemorySearchInput {
|
||||
export const createMemorySearch = (params: MemorySearchParams) =>
|
||||
async ({ user, query, maxResults = 10 }: MemorySearchInput) => {
|
||||
const { embedding } = await embed({
|
||||
model: params.model,
|
||||
input: query,
|
||||
apiKey: params.apiKey,
|
||||
baseURL: params.baseURL,
|
||||
model: createOpenAI({
|
||||
apiKey: params.apiKey,
|
||||
baseURL: params.baseURL,
|
||||
}).embedding(params.model),
|
||||
value: query,
|
||||
})
|
||||
return await filterByEmbedding(embedding, user, maxResults)
|
||||
}
|
||||
@@ -0,0 +1 @@
|
||||
export * from './model'
|
||||
@@ -0,0 +1,37 @@
|
||||
export enum ModelClientType {
|
||||
OPENAI = 'openai',
|
||||
ANTHROPIC = 'anthropic',
|
||||
GOOGLE = 'google',
|
||||
}
|
||||
|
||||
export interface BaseModel {
|
||||
/**
|
||||
* @description The unique identifier for the model
|
||||
* @example 'gpt-4o'
|
||||
*/
|
||||
modelId: string
|
||||
|
||||
/**
|
||||
* @description The base URL for the model
|
||||
* @example 'https://api.openai.com/v1'
|
||||
*/
|
||||
baseUrl: string
|
||||
|
||||
/**
|
||||
* @description The API key for the model
|
||||
* @example 'sk-1234567890'
|
||||
*/
|
||||
apiKey: string
|
||||
|
||||
/**
|
||||
* @description The client type for the model
|
||||
* @enum {ModelClientType}
|
||||
*/
|
||||
clientType: ModelClientType
|
||||
|
||||
/**
|
||||
* @description The display name for the model
|
||||
* @example 'GPT 4o'
|
||||
*/
|
||||
name?: string
|
||||
}
|
||||
Reference in New Issue
Block a user