refactor: divide agent logic and gateway into different package (#90)

* feat: add @memoh/agent

* chore: use @memoh/agent in @memoh-gateway
This commit is contained in:
Acbox Liu
2026-02-22 02:06:47 +08:00
committed by GitHub
parent c591af14b0
commit e6d70b523e
38 changed files with 198 additions and 884 deletions
+1 -4
View File
@@ -8,10 +8,7 @@
},
"dependencies": {
"@memoh/config": "workspace:*",
"@ai-sdk/anthropic": "^3.0.9",
"@ai-sdk/google": "^3.0.6",
"@ai-sdk/mcp": "^1.0.6",
"@ai-sdk/openai": "^3.0.7",
"@memoh/agent": "workspace:*",
"@elysiajs/bearer": "^1.4.2",
"@elysiajs/cors": "^1.4.1",
"@modelcontextprotocol/sdk": "^1.25.2",
+1 -4
View File
@@ -3,6 +3,7 @@ import { chatModule } from './modules/chat'
import { corsMiddleware } from './middlewares/cors'
import { errorMiddleware } from './middlewares/error'
import { loadConfig, getBaseUrl as getBaseUrlByConfig } from '@memoh/config'
import { AuthFetcher } from '@memoh/agent'
const config = loadConfig('../config.toml')
@@ -10,10 +11,6 @@ export const getBaseUrl = () => {
return getBaseUrlByConfig(config)
}
export type AuthFetcher = (
url: string,
options?: RequestInit,
) => Promise<Response>;
export const createAuthFetcher = (bearer: string | undefined): AuthFetcher => {
return async (url: string, options?: RequestInit) => {
const requestOptions = options ?? {}
+1 -1
View File
@@ -1,5 +1,5 @@
import z from 'zod'
import { allActions } from './types'
import { allActions } from '@memoh/agent'
export const AgentSkillModel = z.object({
name: z.string().min(1, 'Skill name is required'),
+1 -3
View File
@@ -1,11 +1,9 @@
import { Elysia } from 'elysia'
import z from 'zod'
import { createAgent } from '../agent'
import { createAgent, ModelConfig, allActions } from '@memoh/agent'
import { createAuthFetcher, getBaseUrl } from '../index'
import { ModelConfig } from '../types'
import { bearerMiddleware } from '../middlewares/bearer'
import { AgentSkillModel, AllowedActionModel, AttachmentModel, IdentityContextModel, InboxItemModel, MCPConnectionModel, ModelConfigModel, ScheduleModel } from '../models'
import { allActions } from '../types'
import { sseChunked } from '../utils/sse'
const AgentModel = z.object({
-358
View File
@@ -1,358 +0,0 @@
import { describe, test, expect } from 'bun:test'
import type { ModelMessage } from 'ai'
import {
parseAttachmentPaths,
extractAttachmentsFromText,
stripAttachmentsFromMessages,
dedupeAttachments,
AttachmentsStreamExtractor,
} from '../utils/attachments'
import { buildNativeImageParts } from '../agent'
import type { ContainerFileAttachment, GatewayInputAttachment } from '../types/attachment'
// ---------------------------------------------------------------------------
// parseAttachmentPaths
// ---------------------------------------------------------------------------
describe('parseAttachmentPaths', () => {
test('parses standard list', () => {
const input = `
- /path/to/file.pdf
- /path/to/video.mp4
`
expect(parseAttachmentPaths(input)).toEqual([
'/path/to/file.pdf',
'/path/to/video.mp4',
])
})
test('ignores lines without leading dash', () => {
const input = `
some random text
- /valid/path.txt
not a path
- /another/path.png
`
expect(parseAttachmentPaths(input)).toEqual([
'/valid/path.txt',
'/another/path.png',
])
})
test('returns empty array for empty input', () => {
expect(parseAttachmentPaths('')).toEqual([])
})
test('handles extra whitespace around paths', () => {
const input = ' - /spaced/path.txt '
expect(parseAttachmentPaths(input)).toEqual(['/spaced/path.txt'])
})
})
// ---------------------------------------------------------------------------
// extractAttachmentsFromText
// ---------------------------------------------------------------------------
describe('extractAttachmentsFromText', () => {
test('extracts a single block', () => {
const text = 'Hello world\n<attachments>\n- /file.pdf\n</attachments>\nGoodbye'
const { cleanedText, attachments } = extractAttachmentsFromText(text)
expect(attachments).toEqual([{ type: 'file', path: '/file.pdf' }])
expect(cleanedText).toBe('Hello world\n\nGoodbye')
})
test('extracts multiple blocks', () => {
const text = [
'Start',
'<attachments>',
'- /a.txt',
'</attachments>',
'Middle',
'<attachments>',
'- /b.txt',
'</attachments>',
'End',
].join('\n')
const { cleanedText, attachments } = extractAttachmentsFromText(text)
expect(attachments).toHaveLength(2)
expect(attachments.map(a => a.path)).toEqual(['/a.txt', '/b.txt'])
expect(cleanedText).toContain('Start')
expect(cleanedText).toContain('Middle')
expect(cleanedText).toContain('End')
expect(cleanedText).not.toContain('<attachments>')
})
test('deduplicates paths across blocks', () => {
const text = [
'<attachments>',
'- /dup.txt',
'</attachments>',
'<attachments>',
'- /dup.txt',
'</attachments>',
].join('\n')
const { attachments } = extractAttachmentsFromText(text)
expect(attachments).toHaveLength(1)
expect(attachments[0].path).toBe('/dup.txt')
})
test('returns original text when no blocks present', () => {
const text = 'No attachments here'
const { cleanedText, attachments } = extractAttachmentsFromText(text)
expect(cleanedText).toBe('No attachments here')
expect(attachments).toEqual([])
})
test('collapses excessive newlines left by removal', () => {
const text = 'Line1\n\n\n<attachments>\n- /f.txt\n</attachments>\n\n\nLine2'
const { cleanedText } = extractAttachmentsFromText(text)
// Should not have more than two consecutive newlines
expect(cleanedText).not.toMatch(/\n{3,}/)
})
})
// ---------------------------------------------------------------------------
// stripAttachmentsFromMessages
// ---------------------------------------------------------------------------
describe('stripAttachmentsFromMessages', () => {
test('strips from assistant message with string content', () => {
const messages: ModelMessage[] = [
{ role: 'user', content: 'hi' },
{
role: 'assistant',
content: 'Here you go\n<attachments>\n- /result.pdf\n</attachments>',
},
]
const { messages: stripped, attachments } = stripAttachmentsFromMessages(messages)
expect(attachments).toEqual([{ type: 'file', path: '/result.pdf' }])
const assistantMsg = stripped.find(m => m.role === 'assistant')!
expect((assistantMsg as { content: string }).content).not.toContain('<attachments>')
})
test('strips from assistant message with array content containing TextPart', () => {
const messages: ModelMessage[] = [
{
role: 'assistant',
content: [
{ type: 'text', text: 'Check this\n<attachments>\n- /img.png\n</attachments>' },
],
},
]
const { messages: stripped, attachments } = stripAttachmentsFromMessages(messages)
expect(attachments).toEqual([{ type: 'file', path: '/img.png' }])
const content = (stripped[0] as { content: Array<{ type: string; text?: string }> }).content
expect(content[0].text).not.toContain('<attachments>')
})
test('does not modify user or tool messages', () => {
const messages: ModelMessage[] = [
{ role: 'user', content: '<attachments>\n- /should-stay.txt\n</attachments>' },
]
const { messages: stripped, attachments } = stripAttachmentsFromMessages(messages)
expect(attachments).toEqual([])
expect((stripped[0] as { content: string }).content).toContain('<attachments>')
})
test('deduplicates attachments across messages', () => {
const messages: ModelMessage[] = [
{ role: 'assistant', content: '<attachments>\n- /same.txt\n</attachments>' },
{ role: 'assistant', content: '<attachments>\n- /same.txt\n</attachments>' },
]
const { attachments } = stripAttachmentsFromMessages(messages)
expect(attachments).toHaveLength(1)
})
})
// ---------------------------------------------------------------------------
// dedupeAttachments
// ---------------------------------------------------------------------------
describe('dedupeAttachments', () => {
test('deduplicates file attachments by path', () => {
const items: ContainerFileAttachment[] = [
{ type: 'file', path: '/a.txt' },
{ type: 'file', path: '/b.txt' },
{ type: 'file', path: '/a.txt' },
]
const result = dedupeAttachments(items)
expect(result).toHaveLength(2)
})
test('deduplicates image attachments by base64 prefix', () => {
const base64 = 'a'.repeat(100)
const result = dedupeAttachments([
{ type: 'image', base64 },
{ type: 'image', base64 },
])
expect(result).toHaveLength(1)
})
test('keeps different types separate', () => {
const result = dedupeAttachments([
{ type: 'file', path: '/a.txt' },
{ type: 'image', base64: 'abc' },
])
expect(result).toHaveLength(2)
})
})
describe('buildNativeImageParts', () => {
test('keeps inline data url and public url images', () => {
const attachments: GatewayInputAttachment[] = [
{ type: 'image', transport: 'inline_data_url', payload: 'data:image/png;base64,AAAA' },
{ type: 'image', transport: 'public_url', payload: 'https://example.com/demo.png' },
]
const parts = buildNativeImageParts(attachments)
expect(parts).toHaveLength(2)
expect(parts[0].image).toBe('data:image/png;base64,AAAA')
expect(parts[1].image).toBe('https://example.com/demo.png')
})
test('drops tool_file_ref images', () => {
const attachments: GatewayInputAttachment[] = [
{ type: 'image', transport: 'tool_file_ref', payload: '/data/media/image/demo.png' },
]
const parts = buildNativeImageParts(attachments)
expect(parts).toEqual([])
})
})
// ---------------------------------------------------------------------------
// AttachmentsStreamExtractor
// ---------------------------------------------------------------------------
describe('AttachmentsStreamExtractor', () => {
/** Helper: simulates streaming by feeding one character at a time. */
const feedCharByChar = (extractor: AttachmentsStreamExtractor, text: string) => {
let visibleText = ''
const attachments: ContainerFileAttachment[] = []
for (const ch of text) {
const result = extractor.push(ch)
visibleText += result.visibleText
attachments.push(...result.attachments)
}
const remainder = extractor.flushRemainder()
visibleText += remainder.visibleText
attachments.push(...remainder.attachments)
return { visibleText, attachments }
}
/** Helper: simulates streaming by feeding the entire string at once. */
const feedAtOnce = (extractor: AttachmentsStreamExtractor, text: string) => {
const result = extractor.push(text)
const remainder = extractor.flushRemainder()
return {
visibleText: result.visibleText + remainder.visibleText,
attachments: [...result.attachments, ...remainder.attachments],
}
}
test('passes through plain text (char-by-char)', () => {
const ext = new AttachmentsStreamExtractor()
const { visibleText, attachments } = feedCharByChar(ext, 'Hello world')
expect(visibleText).toBe('Hello world')
expect(attachments).toEqual([])
})
test('passes through plain text (all-at-once)', () => {
const ext = new AttachmentsStreamExtractor()
const { visibleText, attachments } = feedAtOnce(ext, 'Hello world')
expect(visibleText).toBe('Hello world')
expect(attachments).toEqual([])
})
test('extracts attachments block (char-by-char)', () => {
const ext = new AttachmentsStreamExtractor()
const input = 'Before<attachments>\n- /file.pdf\n</attachments>After'
const { visibleText, attachments } = feedCharByChar(ext, input)
expect(visibleText).toBe('BeforeAfter')
expect(attachments).toEqual([{ type: 'file', path: '/file.pdf' }])
})
test('extracts attachments block (all-at-once)', () => {
const ext = new AttachmentsStreamExtractor()
const input = 'Before<attachments>\n- /file.pdf\n</attachments>After'
const { visibleText, attachments } = feedAtOnce(ext, input)
expect(visibleText).toBe('BeforeAfter')
expect(attachments).toEqual([{ type: 'file', path: '/file.pdf' }])
})
test('extracts multiple paths from one block', () => {
const ext = new AttachmentsStreamExtractor()
const input = '<attachments>\n- /a.txt\n- /b.txt\n</attachments>'
const { attachments } = feedCharByChar(ext, input)
expect(attachments.map(a => a.path)).toEqual(['/a.txt', '/b.txt'])
})
test('handles multiple blocks in one stream', () => {
const ext = new AttachmentsStreamExtractor()
const input = 'A<attachments>\n- /x.txt\n</attachments>B<attachments>\n- /y.txt\n</attachments>C'
const { visibleText, attachments } = feedCharByChar(ext, input)
expect(visibleText).toBe('ABC')
expect(attachments.map(a => a.path)).toEqual(['/x.txt', '/y.txt'])
})
test('handles chunk boundaries splitting the opening tag', () => {
const ext = new AttachmentsStreamExtractor()
let visible = ''
const attachments: ContainerFileAttachment[] = []
// Feed the opening tag across two chunks
let r = ext.push('Hello <attach')
visible += r.visibleText
attachments.push(...r.attachments)
r = ext.push('ments>\n- /split.txt\n</attachments> Done')
visible += r.visibleText
attachments.push(...r.attachments)
const remainder = ext.flushRemainder()
visible += remainder.visibleText
attachments.push(...remainder.attachments)
expect(visible).toBe('Hello Done')
expect(attachments).toEqual([{ type: 'file', path: '/split.txt' }])
})
test('handles chunk boundaries splitting the closing tag', () => {
const ext = new AttachmentsStreamExtractor()
let visible = ''
const attachments: ContainerFileAttachment[] = []
let r = ext.push('<attachments>\n- /f.txt\n</attach')
visible += r.visibleText
attachments.push(...r.attachments)
r = ext.push('ments>Tail')
visible += r.visibleText
attachments.push(...r.attachments)
const remainder = ext.flushRemainder()
visible += remainder.visibleText
attachments.push(...remainder.attachments)
expect(visible).toBe('Tail')
expect(attachments).toEqual([{ type: 'file', path: '/f.txt' }])
})
test('flushRemainder returns raw text for unclosed block', () => {
const ext = new AttachmentsStreamExtractor()
ext.push('<attachments>\n- /orphan.txt\n')
const remainder = ext.flushRemainder()
// Unclosed block should be returned as visible text
expect(remainder.visibleText).toContain('<attachments>')
expect(remainder.visibleText).toContain('/orphan.txt')
expect(remainder.attachments).toEqual([])
})
test('text without any angle brackets passes through immediately', () => {
const ext = new AttachmentsStreamExtractor()
const r = ext.push('simple text without tags')
// Most of the text should be emitted (minus a small buffered tail)
const remainder = ext.flushRemainder()
const full = r.visibleText + remainder.visibleText
expect(full).toBe('simple text without tags')
})
})
-45
View File
@@ -1,45 +0,0 @@
import { describe, expect, test } from 'bun:test'
import { sseChunked } from '../utils/sse'
function parseChunkedSSE(payload: string): string {
const lines = payload.split('\n')
const dataLines = lines.filter(line => line.startsWith('data:'))
return dataLines.map(line => line.slice('data:'.length)).join('')
}
describe('sseChunked', () => {
test('reconstructs original payload losslessly', () => {
const input = JSON.stringify({
type: 'tool_call_end',
toolName: 'big_tool',
toolCallId: 'call-1',
// include whitespace and unicode so trimming/surrogate splitting bugs show up
result: ' leading spaces\tand tabs\nand unicode 😀😃😄 ',
blob: 'x'.repeat(200_000),
})
const chunked = sseChunked(input, 1024).toSSE()
const reconstructed = parseChunkedSSE(chunked)
expect(reconstructed).toBe(input)
})
test('chunkSize=1 does not produce invalid UTF-8 (surrogate pairs)', () => {
const input = `😀${'x'.repeat(1000)}😃`
const payload = sseChunked(input, 1).toSSE()
// Simulate the UTF-8 encode/decode step that happens over the network.
const encoded = new TextEncoder().encode(payload)
const decoded = new TextDecoder().decode(encoded)
expect(decoded).toBe(payload)
const reconstructed = parseChunkedSSE(decoded)
expect(reconstructed).toBe(input)
})
test('does not inject an extra space after data:', () => {
const input = ' abc'
const chunked = sseChunked(input, 2).toSSE()
expect(chunked.split('\n')[0]).toBe('data: a')
})
})
-94
View File
@@ -1,94 +0,0 @@
import { describe, expect, test } from 'bun:test'
import { getMCPTools } from '../tools/mcp'
describe('getMCPTools (unified endpoint)', () => {
test('loads tools from unified MCP HTTP endpoint', async () => {
const seenMethods: string[] = []
const seenAuthHeaders: string[] = []
const server = Bun.serve({
port: 0,
async fetch(request) {
seenAuthHeaders.push(request.headers.get('authorization') ?? '')
const body = await request.json().catch(() => ({} as Record<string, unknown>))
const method = typeof body?.method === 'string' ? body.method : ''
seenMethods.push(method)
if (method === 'initialize') {
return Response.json({
jsonrpc: '2.0',
id: body.id ?? null,
result: {
protocolVersion: '2025-06-18',
capabilities: {
tools: {
listChanged: false,
},
},
serverInfo: {
name: 'test-mcp',
version: '1.0.0',
},
},
})
}
if (method === 'notifications/initialized') {
return new Response(null, { status: 202 })
}
if (method === 'tools/list') {
return Response.json({
jsonrpc: '2.0',
id: body.id ?? null,
result: {
tools: [
{
name: 'search_memory',
description: 'Search memory',
inputSchema: {
type: 'object',
properties: {
query: { type: 'string' },
},
required: ['query'],
},
},
],
},
})
}
return Response.json({
jsonrpc: '2.0',
id: body.id ?? null,
error: {
code: -32601,
message: 'method not found',
},
})
},
})
try {
const endpoint = `http://127.0.0.1:${server.port}/bots/bot-1/tools`
const { tools, close } = await getMCPTools([{
type: 'http',
name: 'builtin',
url: endpoint,
headers: {
Authorization: 'Bearer test-token',
},
}])
expect(Object.keys(tools)).toContain('search_memory')
expect(seenMethods).toContain('initialize')
expect(seenMethods).toContain('tools/list')
expect(seenAuthHeaders.some(value => value === 'Bearer test-token')).toBe(true)
await close()
} finally {
server.stop(true)
}
})
})
-339
View File
@@ -1,339 +0,0 @@
interface JSONRPCRequest {
jsonrpc: string
id: string | number
method: string
params?: unknown
}
interface JSONRPCResponse<T = unknown> {
jsonrpc: string
id: string | number
result?: T
error?: {
code: number
message: string
}
}
interface ToolCallContent {
type: string
text?: string
}
interface ToolCallResult {
content: ToolCallContent[]
isError?: boolean
}
export interface FSFileEntry {
path: string
is_dir: boolean
size: number
mode: number
mod_time: string
}
export interface FSReadResult {
content: string
}
export interface FSReadBase64Result {
data: string
mime_type: string
}
export interface FSWriteResult {
ok: boolean
}
export interface FSListResult {
path: string
entries: FSFileEntry[]
}
export interface FSStatResult {
entry: FSFileEntry
}
export interface FSDeleteResult {
ok: boolean
}
export interface FSApplyPatchResult {
ok: boolean
}
export interface FSMkdirResult {
ok: boolean
}
export interface FSRenameResult {
ok: boolean
}
export interface GrepResult {
stdout: string
stderr: string
exit_code: number
}
export interface EchoResult {
text: string
}
export interface ToolInfo {
name: string
description?: string
inputSchema?: Record<string, unknown>
}
export interface ToolsListResult {
tools: ToolInfo[]
}
export interface UseContainerOptions {
url: string
fetch: (url: string, options?: RequestInit) => Promise<Response>
}
class JSONRPCClient {
private requestId = 0
constructor(
private url: string,
private fetch: (url: string, options?: RequestInit) => Promise<Response>
) {}
async call<T>(method: string, params?: unknown): Promise<T> {
const request: JSONRPCRequest = {
jsonrpc: '2.0',
id: ++this.requestId,
method,
params,
}
const response = await this.fetch(this.url, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify(request),
})
if (!response.ok) {
throw new Error(`HTTP ${response.status}: ${response.statusText}`)
}
const jsonResponse: JSONRPCResponse<T> = await response.json()
if (jsonResponse.error) {
throw new Error(
`JSON-RPC Error ${jsonResponse.error.code}: ${jsonResponse.error.message}`
)
}
return jsonResponse.result as T
}
}
class MCPToolCaller {
constructor(private rpcClient: JSONRPCClient) {}
async callTool<T>(toolName: string, args: Record<string, unknown>): Promise<T> {
const result = await this.rpcClient.call<ToolCallResult>('tools/call', {
name: toolName,
arguments: args,
})
if (result.isError) {
const errorMessage = result.content?.[0]?.text || 'Tool execution failed'
throw new Error(errorMessage)
}
const textContent = result.content?.[0]?.text
if (textContent) {
try {
return JSON.parse(textContent) as T
} catch {
return textContent as T
}
}
throw new Error('No result content returned')
}
}
class FileSystemOperations {
constructor(private toolCaller: MCPToolCaller) {}
async read(path: string): Promise<string> {
const result = await this.toolCaller.callTool<FSReadResult>('fs.read', { path })
return result.content
}
async readBase64(path: string): Promise<FSReadBase64Result> {
return this.toolCaller.callTool<FSReadBase64Result>('fs.read_base64', { path })
}
async write(path: string, content: string): Promise<boolean> {
const result = await this.toolCaller.callTool<FSWriteResult>('fs.write', {
path,
content,
})
return result.ok
}
async list(path: string = '.', recursive: boolean = false): Promise<FSListResult> {
return this.toolCaller.callTool<FSListResult>('fs.list', { path, recursive })
}
async stat(path: string): Promise<FSFileEntry> {
const result = await this.toolCaller.callTool<FSStatResult>('fs.stat', { path })
return result.entry
}
async delete(path: string): Promise<boolean> {
const result = await this.toolCaller.callTool<FSDeleteResult>('fs.delete', { path })
return result.ok
}
async mkdir(path: string): Promise<boolean> {
const result = await this.toolCaller.callTool<FSMkdirResult>('fs.mkdir', { path })
return result.ok
}
async rename(source: string, destination: string): Promise<boolean> {
const result = await this.toolCaller.callTool<FSRenameResult>('fs.rename', {
source,
destination,
})
return result.ok
}
async applyPatch(path: string, patch: string): Promise<boolean> {
const result = await this.toolCaller.callTool<FSApplyPatchResult>('fs.apply_patch', {
path,
patch,
})
return result.ok
}
async exists(path: string): Promise<boolean> {
try {
await this.stat(path)
return true
} catch {
return false
}
}
async readJSON<T = unknown>(path: string): Promise<T> {
const content = await this.read(path)
return JSON.parse(content)
}
async writeJSON(path: string, data: unknown, pretty: boolean = true): Promise<boolean> {
const content = pretty ? JSON.stringify(data, null, 2) : JSON.stringify(data)
return this.write(path, content)
}
async append(path: string, content: string): Promise<boolean> {
const exists = await this.exists(path)
if (exists) {
const existing = await this.read(path)
return this.write(path, existing + content)
}
return this.write(path, content)
}
async copy(source: string, destination: string): Promise<boolean> {
const stat = await this.stat(source)
if (stat.is_dir) {
throw new Error('Directory copy not implemented. Use list + copy for each file.')
}
const content = await this.read(source)
return this.write(destination, content)
}
}
class SearchOperations {
constructor(private toolCaller: MCPToolCaller) {}
async grep(pattern: string, args: string[] = []): Promise<GrepResult> {
return this.toolCaller.callTool<GrepResult>('grep', { pattern, args })
}
async search(
pattern: string,
options: {
caseSensitive?: boolean
lineNumbers?: boolean
filesOnly?: boolean
} = {}
): Promise<string> {
const args: string[] = ['-r']
if (!options.caseSensitive) args.push('-i')
if (options.lineNumbers) args.push('-n')
if (options.filesOnly) args.push('-l')
const result = await this.grep(pattern, args)
return result.stdout
}
async findFiles(pattern: string): Promise<string[]> {
const result = await this.grep(pattern, ['-r', '-l'])
return result.stdout
.split('\n')
.map((line) => line.trim())
.filter((line) => line.length > 0)
}
}
class UtilityOperations {
constructor(private toolCaller: MCPToolCaller) {}
async echo(text: string): Promise<string> {
const result = await this.toolCaller.callTool<EchoResult>('echo', { text })
return result.text
}
async ping(): Promise<boolean> {
try {
const result = await this.echo('ping')
return result === 'ping'
} catch {
return false
}
}
}
export class ContainerClient {
private rpcClient: JSONRPCClient
private toolCaller: MCPToolCaller
public readonly fs: FileSystemOperations
public readonly search: SearchOperations
public readonly utils: UtilityOperations
constructor(options: UseContainerOptions) {
this.rpcClient = new JSONRPCClient(options.url, options.fetch)
this.toolCaller = new MCPToolCaller(this.rpcClient)
this.fs = new FileSystemOperations(this.toolCaller)
this.search = new SearchOperations(this.toolCaller)
this.utils = new UtilityOperations(this.toolCaller)
}
async listTools(): Promise<ToolsListResult> {
return this.rpcClient.call<ToolsListResult>('tools/list')
}
async callTool<T = unknown>(toolName: string, args: Record<string, unknown> = {}): Promise<T> {
return this.toolCaller.callTool<T>(toolName, args)
}
}
export const useContainer = (options: UseContainerOptions): ContainerClient => {
return new ContainerClient(options)
}
View File
+34
View File
@@ -0,0 +1,34 @@
# dependencies (bun install)
node_modules
# output
out
dist
*.tgz
# code coverage
coverage
*.lcov
# logs
logs
_.log
report.[0-9]_.[0-9]_.[0-9]_.[0-9]_.json
# dotenv environment variable files
.env
.env.development.local
.env.test.local
.env.production.local
.env.local
# caches
.eslintcache
.cache
*.tsbuildinfo
# IntelliJ based IDEs
.idea
# Finder (MacOS) folder config
.DS_Store
+2
View File
@@ -0,0 +1,2 @@
# @memoh/config
+26
View File
@@ -0,0 +1,26 @@
{
"name": "@memoh/agent",
"version": "0.1.0-beta.5",
"exports": {
".": "./src/index.ts"
},
"packageManager": "pnpm@10.27.0",
"module": "src/index.ts",
"type": "module",
"private": true,
"peerDependencies": {
"typescript": "^5"
},
"dependencies": {
"@ai-sdk/anthropic": "^3.0.9",
"@ai-sdk/google": "^3.0.6",
"@ai-sdk/mcp": "^1.0.6",
"@ai-sdk/openai": "^3.0.7",
"@mozilla/readability": "^0.6.0",
"ai": "^6.0.25",
"jsdom": "^27.4.0",
"toml": "^3.0.0",
"turndown": "^7.2.2",
"zod": "^4.3.6"
}
}
@@ -12,15 +12,15 @@ import {
AgentInput,
AgentParams,
AgentSkill,
AgentStreamAction,
allActions,
MCPConnection,
Schedule,
} from './types'
import { ModelInput, hasInputModality } from './types/model'
import { system, schedule, subagentSystem } from './prompts'
import { AuthFetcher } from './index'
import { AuthFetcher } from './types'
import { createModel } from './model'
import { AgentAction } from './types/action'
import {
extractAttachmentsFromText,
stripAttachmentsFromMessages,
@@ -115,7 +115,8 @@ export const createAgent = (
})
const response = await fetch(url, { method: 'POST', headers, body })
if (!response.ok) return ''
const data = await response.json().catch(() => ({}))
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const data = await response.json().catch(() => ({})) as any
const structured =
data?.result?.structuredContent ?? data?.result?.content?.[0]?.text
if (typeof structured === 'string') {
@@ -360,7 +361,7 @@ export const createAgent = (
return 'Model stream failed'
}
async function* stream(input: AgentInput): AsyncGenerator<AgentAction> {
async function* stream(input: AgentInput): AsyncGenerator<AgentStreamAction> {
const userPrompt = generateUserPrompt(input)
const messages = [...input.messages, userPrompt]
input.skills.forEach((skill) => enableSkill(skill))
+6
View File
@@ -0,0 +1,6 @@
export * from './agent'
export * from './types'
export * from './model'
export * from './utils'
export * from './tools'
export * from './prompts'
@@ -1,4 +1,4 @@
import { AuthFetcher } from '..'
import { AuthFetcher } from '../types'
import { AgentAction, AgentAuthContext, IdentityContext, ModelConfig } from '../types'
import { ToolSet } from 'ai'
import { getWebTools } from './web'
@@ -32,3 +32,8 @@ export const getTools = (
}
return tools
}
export * from './web'
export * from './subagent'
export * from './skill'
export * from './mcp'
@@ -1,6 +1,6 @@
import { HTTPMCPConnection, MCPConnection, SSEMCPConnection, StdioMCPConnection } from '../types'
import { createMCPClient } from '@ai-sdk/mcp'
import { AuthFetcher } from '../index'
import { AuthFetcher } from '../types'
import type { AgentAuthContext } from '../types/agent'
type MCPToolOptions = {
@@ -59,7 +59,7 @@ export const getMCPTools = async (connections: MCPConnection[], options: MCPTool
const text = await response.text().catch(() => '')
throw new Error(`mcp-stdio failed: ${response.status} ${text}`)
}
const data = await response.json().catch(() => ({} as { url?: string }))
const data = await response.json().catch(() => ({})) as { url?: string }
const rawUrl = typeof data?.url === 'string' ? data.url : ''
if (!rawUrl) {
throw new Error('mcp-stdio response missing url')
@@ -2,7 +2,7 @@ import { tool } from 'ai'
import { z } from 'zod'
import { createAgent } from '../agent'
import { ModelConfig, AgentAuthContext } from '../types'
import { AuthFetcher } from '..'
import { AuthFetcher } from '../types'
import { AgentAction, IdentityContext } from '../types/agent'
export interface SubagentToolParams {
@@ -67,7 +67,7 @@ export interface AgentEndAction extends BaseAction {
usages: (LanguageModelUsage | null)[]
}
export type AgentAction =
export type AgentStreamAction =
| AgentStartAction
| ReasoningStartAction
| ReasoningDeltaAction
+4
View File
@@ -0,0 +1,4 @@
export type AuthFetcher = (
url: string,
options?: RequestInit,
) => Promise<Response>
@@ -2,4 +2,6 @@ export * from './agent'
export * from './model'
export * from './schedule'
export * from './attachment'
export * from './mcp'
export * from './mcp'
export * from './auth'
export * from './action'
+2
View File
@@ -0,0 +1,2 @@
export * from './attachments'
export * from './headers'
+22
View File
@@ -0,0 +1,22 @@
{
"compilerOptions": {
"target": "ES2022",
"module": "ESNext",
"lib": ["ES2022"],
"moduleResolution": "bundler",
"allowImportingTsExtensions": true,
"noEmit": true,
"strict": true,
"esModuleInterop": true,
"skipLibCheck": true,
"forceConsistentCasingInFileNames": true,
"resolveJsonModule": true,
"allowSyntheticDefaultImports": true,
"jsx": "react-jsx",
"outDir": "./dist",
"rootDir": "./src",
},
"include": ["src/**/*"],
"exclude": ["node_modules", "dist"]
}
+80 -26
View File
@@ -48,24 +48,15 @@ importers:
agent:
dependencies:
'@ai-sdk/anthropic':
specifier: ^3.0.9
version: 3.0.9(zod@4.3.5)
'@ai-sdk/google':
specifier: ^3.0.6
version: 3.0.6(zod@4.3.5)
'@ai-sdk/mcp':
specifier: ^1.0.6
version: 1.0.6(zod@4.3.5)
'@ai-sdk/openai':
specifier: ^3.0.7
version: 3.0.7(zod@4.3.5)
'@elysiajs/bearer':
specifier: ^1.4.2
version: 1.4.2(elysia@1.4.25(@sinclair/typebox@0.34.47)(@types/bun@1.3.9)(exact-mirror@0.2.6(@sinclair/typebox@0.34.47))(file-type@21.3.0)(openapi-types@12.1.3)(typescript@5.9.3))
'@elysiajs/cors':
specifier: ^1.4.1
version: 1.4.1(elysia@1.4.25(@sinclair/typebox@0.34.47)(@types/bun@1.3.9)(exact-mirror@0.2.6(@sinclair/typebox@0.34.47))(file-type@21.3.0)(openapi-types@12.1.3)(typescript@5.9.3))
'@memoh/agent':
specifier: workspace:*
version: link:../packages/agent
'@memoh/config':
specifier: workspace:*
version: link:../packages/config
@@ -113,6 +104,42 @@ importers:
specifier: ^3.5.0
version: 3.5.26(typescript@5.9.3)
packages/agent:
dependencies:
'@ai-sdk/anthropic':
specifier: ^3.0.9
version: 3.0.9(zod@4.3.6)
'@ai-sdk/google':
specifier: ^3.0.6
version: 3.0.6(zod@4.3.6)
'@ai-sdk/mcp':
specifier: ^1.0.6
version: 1.0.6(zod@4.3.6)
'@ai-sdk/openai':
specifier: ^3.0.7
version: 3.0.7(zod@4.3.6)
'@mozilla/readability':
specifier: ^0.6.0
version: 0.6.0
ai:
specifier: ^6.0.25
version: 6.0.25(zod@4.3.6)
jsdom:
specifier: ^27.4.0
version: 27.4.0
toml:
specifier: ^3.0.0
version: 3.0.0
turndown:
specifier: ^7.2.2
version: 7.2.2
typescript:
specifier: ^5
version: 5.9.3
zod:
specifier: ^4.3.6
version: 4.3.6
packages/cli:
dependencies:
'@memoh/sdk':
@@ -5131,6 +5158,9 @@ packages:
zod@4.3.5:
resolution: {integrity: sha512-k7Nwx6vuWx1IJ9Bjuf4Zt1PEllcwe7cls3VNzm4CQ1/hgtFUK2bRNG3rvnpPUhFjmqJKAKtjV576KnUkHocg/g==}
zod@4.3.6:
resolution: {integrity: sha512-rftlrkhHZOcjDwkGlnUtZZkvaPHCsDATp4pGpuOOMDaTdDDXF91wuVDJoWoPsKX/3YPQ5fHuF3STjcYyKr+Qhg==}
zwitch@2.0.4:
resolution: {integrity: sha512-bXE4cR/kVZhKZX/RjPEflHaKVhUVl85noU3v6b8apfQEc1x4A+zBxjZ4lN8LqGd6WZ3dl98pY4o717VFmoPp+A==}
@@ -5138,11 +5168,11 @@ snapshots:
'@acemir/cssom@0.9.31': {}
'@ai-sdk/anthropic@3.0.9(zod@4.3.5)':
'@ai-sdk/anthropic@3.0.9(zod@4.3.6)':
dependencies:
'@ai-sdk/provider': 3.0.2
'@ai-sdk/provider-utils': 4.0.4(zod@4.3.5)
zod: 4.3.5
'@ai-sdk/provider-utils': 4.0.4(zod@4.3.6)
zod: 4.3.6
'@ai-sdk/gateway@3.0.10(zod@4.3.5)':
dependencies:
@@ -5151,24 +5181,31 @@ snapshots:
'@vercel/oidc': 3.1.0
zod: 4.3.5
'@ai-sdk/google@3.0.6(zod@4.3.5)':
'@ai-sdk/gateway@3.0.10(zod@4.3.6)':
dependencies:
'@ai-sdk/provider': 3.0.2
'@ai-sdk/provider-utils': 4.0.4(zod@4.3.5)
zod: 4.3.5
'@ai-sdk/provider-utils': 4.0.4(zod@4.3.6)
'@vercel/oidc': 3.1.0
zod: 4.3.6
'@ai-sdk/mcp@1.0.6(zod@4.3.5)':
'@ai-sdk/google@3.0.6(zod@4.3.6)':
dependencies:
'@ai-sdk/provider': 3.0.2
'@ai-sdk/provider-utils': 4.0.5(zod@4.3.5)
'@ai-sdk/provider-utils': 4.0.4(zod@4.3.6)
zod: 4.3.6
'@ai-sdk/mcp@1.0.6(zod@4.3.6)':
dependencies:
'@ai-sdk/provider': 3.0.2
'@ai-sdk/provider-utils': 4.0.5(zod@4.3.6)
pkce-challenge: 5.0.1
zod: 4.3.5
zod: 4.3.6
'@ai-sdk/openai@3.0.7(zod@4.3.5)':
'@ai-sdk/openai@3.0.7(zod@4.3.6)':
dependencies:
'@ai-sdk/provider': 3.0.2
'@ai-sdk/provider-utils': 4.0.4(zod@4.3.5)
zod: 4.3.5
'@ai-sdk/provider-utils': 4.0.4(zod@4.3.6)
zod: 4.3.6
'@ai-sdk/provider-utils@4.0.4(zod@4.3.5)':
dependencies:
@@ -5177,12 +5214,19 @@ snapshots:
eventsource-parser: 3.0.6
zod: 4.3.5
'@ai-sdk/provider-utils@4.0.5(zod@4.3.5)':
'@ai-sdk/provider-utils@4.0.4(zod@4.3.6)':
dependencies:
'@ai-sdk/provider': 3.0.2
'@standard-schema/spec': 1.1.0
eventsource-parser: 3.0.6
zod: 4.3.5
zod: 4.3.6
'@ai-sdk/provider-utils@4.0.5(zod@4.3.6)':
dependencies:
'@ai-sdk/provider': 3.0.2
'@standard-schema/spec': 1.1.0
eventsource-parser: 3.0.6
zod: 4.3.6
'@ai-sdk/provider@3.0.2':
dependencies:
@@ -7230,6 +7274,14 @@ snapshots:
'@opentelemetry/api': 1.9.0
zod: 4.3.5
ai@6.0.25(zod@4.3.6):
dependencies:
'@ai-sdk/gateway': 3.0.10(zod@4.3.6)
'@ai-sdk/provider': 3.0.2
'@ai-sdk/provider-utils': 4.0.4(zod@4.3.6)
'@opentelemetry/api': 1.9.0
zod: 4.3.6
ajv-draft-04@1.0.0(ajv@8.13.0):
optionalDependencies:
ajv: 8.13.0
@@ -9966,4 +10018,6 @@ snapshots:
zod@4.3.5: {}
zod@4.3.6: {}
zwitch@2.0.4: {}