mirror of
https://github.com/memohai/Memoh.git
synced 2026-04-27 07:16:19 +09:00
refactor: bind container lifecycle to bot and improve schedule trigger flow
- Add SetupBotContainer to ContainerLifecycle interface so containers are automatically created when a bot is created, matching the existing cleanup-on-delete behavior. - Refactor schedule tools to use bot-scoped API paths and pass identity context for proper authorization. - Introduce dedicated trigger-schedule endpoint in chat resolver with explicit schedule payload instead of reusing the generic chat path. - Generate short-lived JWT tokens for schedule trigger callbacks with resolved bot owner identity. - Validate required parameters in NewLLMClient and NewOpenAIEmbedder constructors, returning errors instead of falling back to defaults. - Add unit tests for schedule token generation and chat resolver.
This commit is contained in:
@@ -20,29 +20,31 @@ type LLMClient struct {
|
||||
http *http.Client
|
||||
}
|
||||
|
||||
func NewLLMClient(log *slog.Logger, baseURL, apiKey, model string, timeout time.Duration) *LLMClient {
|
||||
func NewLLMClient(log *slog.Logger, baseURL, apiKey, model string, timeout time.Duration) (*LLMClient, error) {
|
||||
if strings.TrimSpace(baseURL) == "" {
|
||||
return nil, fmt.Errorf("llm client: base url is required")
|
||||
}
|
||||
if strings.TrimSpace(apiKey) == "" {
|
||||
return nil, fmt.Errorf("llm client: api key is required")
|
||||
}
|
||||
if strings.TrimSpace(model) == "" {
|
||||
return nil, fmt.Errorf("llm client: model is required")
|
||||
}
|
||||
if log == nil {
|
||||
log = slog.Default()
|
||||
}
|
||||
if baseURL == "" {
|
||||
baseURL = "https://api.openai.com/v1"
|
||||
}
|
||||
baseURL = strings.TrimRight(baseURL, "/")
|
||||
if model == "" {
|
||||
model = "gpt-4.1-nano"
|
||||
}
|
||||
if timeout <= 0 {
|
||||
timeout = 10 * time.Second
|
||||
}
|
||||
return &LLMClient{
|
||||
baseURL: baseURL,
|
||||
baseURL: strings.TrimRight(baseURL, "/"),
|
||||
apiKey: apiKey,
|
||||
model: model,
|
||||
logger: log.With(slog.String("client", "llm")),
|
||||
http: &http.Client{
|
||||
Timeout: timeout,
|
||||
},
|
||||
}
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c *LLMClient) Extract(ctx context.Context, req ExtractRequest) (ExtractResponse, error) {
|
||||
|
||||
@@ -20,7 +20,10 @@ func TestLLMClientExtract(t *testing.T) {
|
||||
}))
|
||||
defer server.Close()
|
||||
|
||||
client := NewLLMClient(nil, server.URL, "test-key", "gpt-4.1-nano-2025-04-14", 0)
|
||||
client, err := NewLLMClient(nil, server.URL, "test-key", "gpt-4.1-nano-2025-04-14", 0)
|
||||
if err != nil {
|
||||
t.Fatalf("new llm client: %v", err)
|
||||
}
|
||||
resp, err := client.Extract(context.Background(), ExtractRequest{
|
||||
Messages: []Message{{Role: "user", Content: "hi"}},
|
||||
})
|
||||
|
||||
Reference in New Issue
Block a user