refactor: provider & models (#277)

* refactor: move client_type to provider, replace model fields with config JSONB

- Move `client_type` from `models` to `llm_providers` table
- Add `icon` field to `llm_providers`
- Replace `dimensions`, `input_modalities`, `supports_reasoning` on `models`
  with a single `config` JSONB column containing `dimensions`,
  `compatibilities` (vision, tool-call, image-output, reasoning),
  and `context_window`
- Auto-imported models default to vision + tool-call + reasoning
- Update all backend consumers (agent, flow resolver, handlers, memory)
- Regenerate sqlc, swagger, and TypeScript SDK
- Update frontend forms, display, and i18n for new schema

* ui: show provider icon avatar in sidebar and detail header, remove icon input

* feat: add built-in provider registry with YAML definitions and enable toggle

- Add `enable` column to llm_providers (default true, backward-compatible)
- Create internal/registry package to load YAML provider/model definitions
  on startup and upsert into database (new providers disabled by default)
- Add conf/providers/ with OpenAI, Anthropic, Google YAML definitions
- Add RegistryConfig to TOML config (providers_dir, default conf/providers)
- Model listing APIs and conversation flow now filter by enabled providers
- Frontend: enable switch in provider form, green status dot in sidebar,
  enabled providers sorted to top

* fix: make 0041 migration idempotent for fresh databases

Guard data migration steps with column-existence checks so the
migration succeeds on databases created from the updated init schema.
This commit is contained in:
Acbox Liu
2026-03-22 17:24:45 +08:00
committed by GitHub
parent de62f94315
commit b88ca96064
60 changed files with 1599 additions and 1224 deletions
+19 -1
View File
@@ -71,6 +71,7 @@ import (
"github.com/memohai/memoh/internal/models"
"github.com/memohai/memoh/internal/policy"
"github.com/memohai/memoh/internal/providers"
"github.com/memohai/memoh/internal/registry"
"github.com/memohai/memoh/internal/schedule"
"github.com/memohai/memoh/internal/searchproviders"
"github.com/memohai/memoh/internal/server"
@@ -258,6 +259,7 @@ func runServe() {
),
fx.Invoke(
injectToolProviders,
startRegistrySync,
startMemoryProviderBootstrap,
startScheduleService,
startHeartbeatService,
@@ -805,6 +807,22 @@ func provideServer(params serverParams) *server.Server {
// lifecycle hooks
// ---------------------------------------------------------------------------
func startRegistrySync(lc fx.Lifecycle, log *slog.Logger, cfg config.Config, queries *dbsqlc.Queries) {
lc.Append(fx.Hook{
OnStart: func(ctx context.Context) error {
defs, err := registry.Load(cfg.Registry.ProvidersPath())
if err != nil {
log.Warn("registry: failed to load provider definitions", slog.Any("error", err))
return nil
}
if len(defs) == 0 {
return nil
}
return registry.Sync(ctx, log, queries, defs)
},
})
}
func startMemoryProviderBootstrap(lc fx.Lifecycle, log *slog.Logger, mpService *memprovider.Service, registry *memprovider.Registry) {
mpService.SetRegistry(registry)
lc.Append(fx.Hook{
@@ -1019,7 +1037,7 @@ func (c *lazyLLMClient) resolve(ctx context.Context) (memprovider.LLM, error) {
if err != nil {
return nil, err
}
clientType := string(memoryModel.ClientType)
clientType := memoryProvider.ClientType
switch clientType {
case "openai-responses", "openai-completions", "anthropic-messages", "google-generative-ai":
default:
+19 -1
View File
@@ -72,6 +72,7 @@ import (
"github.com/memohai/memoh/internal/models"
"github.com/memohai/memoh/internal/policy"
"github.com/memohai/memoh/internal/providers"
"github.com/memohai/memoh/internal/registry"
"github.com/memohai/memoh/internal/schedule"
"github.com/memohai/memoh/internal/searchproviders"
"github.com/memohai/memoh/internal/server"
@@ -185,6 +186,7 @@ func runServe() {
),
fx.Invoke(
injectToolProviders,
startRegistrySync,
startMemoryProviderBootstrap,
startScheduleService,
startHeartbeatService,
@@ -270,6 +272,22 @@ func provideMemoryProviderRegistry(log *slog.Logger, chatService *conversation.S
return registry
}
func startRegistrySync(lc fx.Lifecycle, log *slog.Logger, cfg config.Config, queries *dbsqlc.Queries) {
lc.Append(fx.Hook{
OnStart: func(ctx context.Context) error {
defs, err := registry.Load(cfg.Registry.ProvidersPath())
if err != nil {
log.Warn("registry: failed to load provider definitions", slog.Any("error", err))
return nil
}
if len(defs) == 0 {
return nil
}
return registry.Sync(ctx, log, queries, defs)
},
})
}
func startMemoryProviderBootstrap(lc fx.Lifecycle, log *slog.Logger, mpService *memprovider.Service, registry *memprovider.Registry) {
mpService.SetRegistry(registry)
lc.Append(fx.Hook{
@@ -947,7 +965,7 @@ func (c *lazyLLMClient) resolve(ctx context.Context) (memprovider.LLM, error) {
if err != nil {
return nil, err
}
clientType := string(memoryModel.ClientType)
clientType := memoryProvider.ClientType
switch clientType {
case "openai-responses", "openai-completions", "anthropic-messages", "google-generative-ai":
default: