refactor: bind container lifecycle to bot and improve schedule trigger flow

- Add SetupBotContainer to ContainerLifecycle interface so containers
  are automatically created when a bot is created, matching the existing
  cleanup-on-delete behavior.
- Refactor schedule tools to use bot-scoped API paths and pass identity
  context for proper authorization.
- Introduce dedicated trigger-schedule endpoint in chat resolver with
  explicit schedule payload instead of reusing the generic chat path.
- Generate short-lived JWT tokens for schedule trigger callbacks with
  resolved bot owner identity.
- Validate required parameters in NewLLMClient and NewOpenAIEmbedder
  constructors, returning errors instead of falling back to defaults.
- Add unit tests for schedule token generation and chat resolver.
This commit is contained in:
BBQ
2026-02-07 12:03:24 +08:00
parent a9596ab3a8
commit 83b6ee608c
16 changed files with 583 additions and 72 deletions
+12 -10
View File
@@ -20,29 +20,31 @@ type LLMClient struct {
http *http.Client
}
func NewLLMClient(log *slog.Logger, baseURL, apiKey, model string, timeout time.Duration) *LLMClient {
func NewLLMClient(log *slog.Logger, baseURL, apiKey, model string, timeout time.Duration) (*LLMClient, error) {
if strings.TrimSpace(baseURL) == "" {
return nil, fmt.Errorf("llm client: base url is required")
}
if strings.TrimSpace(apiKey) == "" {
return nil, fmt.Errorf("llm client: api key is required")
}
if strings.TrimSpace(model) == "" {
return nil, fmt.Errorf("llm client: model is required")
}
if log == nil {
log = slog.Default()
}
if baseURL == "" {
baseURL = "https://api.openai.com/v1"
}
baseURL = strings.TrimRight(baseURL, "/")
if model == "" {
model = "gpt-4.1-nano"
}
if timeout <= 0 {
timeout = 10 * time.Second
}
return &LLMClient{
baseURL: baseURL,
baseURL: strings.TrimRight(baseURL, "/"),
apiKey: apiKey,
model: model,
logger: log.With(slog.String("client", "llm")),
http: &http.Client{
Timeout: timeout,
},
}
}, nil
}
func (c *LLMClient) Extract(ctx context.Context, req ExtractRequest) (ExtractResponse, error) {
+4 -1
View File
@@ -20,7 +20,10 @@ func TestLLMClientExtract(t *testing.T) {
}))
defer server.Close()
client := NewLLMClient(nil, server.URL, "test-key", "gpt-4.1-nano-2025-04-14", 0)
client, err := NewLLMClient(nil, server.URL, "test-key", "gpt-4.1-nano-2025-04-14", 0)
if err != nil {
t.Fatalf("new llm client: %v", err)
}
resp, err := client.Extract(context.Background(), ExtractRequest{
Messages: []Message{{Role: "user", Content: "hi"}},
})