refactor: provider & models (#277)

* refactor: move client_type to provider, replace model fields with config JSONB

- Move `client_type` from `models` to `llm_providers` table
- Add `icon` field to `llm_providers`
- Replace `dimensions`, `input_modalities`, `supports_reasoning` on `models`
  with a single `config` JSONB column containing `dimensions`,
  `compatibilities` (vision, tool-call, image-output, reasoning),
  and `context_window`
- Auto-imported models default to vision + tool-call + reasoning
- Update all backend consumers (agent, flow resolver, handlers, memory)
- Regenerate sqlc, swagger, and TypeScript SDK
- Update frontend forms, display, and i18n for new schema

* ui: show provider icon avatar in sidebar and detail header, remove icon input

* feat: add built-in provider registry with YAML definitions and enable toggle

- Add `enable` column to llm_providers (default true, backward-compatible)
- Create internal/registry package to load YAML provider/model definitions
  on startup and upsert into database (new providers disabled by default)
- Add conf/providers/ with OpenAI, Anthropic, Google YAML definitions
- Add RegistryConfig to TOML config (providers_dir, default conf/providers)
- Model listing APIs and conversation flow now filter by enabled providers
- Frontend: enable switch in provider form, green status dot in sidebar,
  enabled providers sorted to top

* fix: make 0041 migration idempotent for fresh databases

Guard data migration steps with column-existence checks so the
migration succeeds on databases created from the updated init schema.
This commit is contained in:
Acbox Liu
2026-03-22 17:24:45 +08:00
committed by GitHub
parent de62f94315
commit b88ca96064
60 changed files with 1599 additions and 1224 deletions
+34 -52
View File
@@ -2,7 +2,6 @@ package models
import (
"errors"
"fmt"
"github.com/google/uuid"
)
@@ -14,14 +13,6 @@ const (
ModelTypeEmbedding ModelType = "embedding"
)
const (
ModelInputText = "text"
ModelInputImage = "image"
ModelInputAudio = "audio"
ModelInputVideo = "video"
ModelInputFile = "file"
)
type ClientType string
const (
@@ -31,21 +22,31 @@ const (
ClientTypeGoogleGenerativeAI ClientType = "google-generative-ai"
)
type Model struct {
ModelID string `json:"model_id"`
Name string `json:"name"`
LlmProviderID string `json:"llm_provider_id"`
ClientType ClientType `json:"client_type,omitempty"`
InputModalities []string `json:"input_modalities,omitempty"`
SupportsReasoning bool `json:"supports_reasoning"`
Type ModelType `json:"type"`
Dimensions int `json:"dimensions"`
const (
CompatVision = "vision"
CompatToolCall = "tool-call"
CompatImageOutput = "image-output"
CompatReasoning = "reasoning"
)
// validCompatibilities enumerates accepted compatibility tokens.
var validCompatibilities = map[string]struct{}{
CompatVision: {}, CompatToolCall: {}, CompatImageOutput: {}, CompatReasoning: {},
}
// validInputModalities is the set of recognised input modality tokens.
var validInputModalities = map[string]struct{}{
ModelInputText: {}, ModelInputImage: {}, ModelInputAudio: {},
ModelInputVideo: {}, ModelInputFile: {},
// ModelConfig holds the JSONB config stored per model.
type ModelConfig struct {
Dimensions *int `json:"dimensions,omitempty"`
Compatibilities []string `json:"compatibilities,omitempty"`
ContextWindow *int `json:"context_window,omitempty"`
}
type Model struct {
ModelID string `json:"model_id"`
Name string `json:"name"`
LlmProviderID string `json:"llm_provider_id"`
Type ModelType `json:"type"`
Config ModelConfig `json:"config"`
}
func (m *Model) Validate() error {
@@ -61,41 +62,23 @@ func (m *Model) Validate() error {
if m.Type != ModelTypeChat && m.Type != ModelTypeEmbedding {
return errors.New("invalid model type")
}
if m.Type == ModelTypeChat {
if m.ClientType == "" {
return errors.New("client_type is required for chat models")
}
if !isValidClientType(m.ClientType) {
return fmt.Errorf("invalid client_type: %s", m.ClientType)
if m.Type == ModelTypeEmbedding {
if m.Config.Dimensions == nil || *m.Config.Dimensions <= 0 {
return errors.New("dimensions must be greater than 0 for embedding models")
}
}
if m.Type == ModelTypeEmbedding && m.Dimensions <= 0 {
return errors.New("dimensions must be greater than 0")
}
if m.Type == ModelTypeChat {
for _, mod := range m.InputModalities {
if _, ok := validInputModalities[mod]; !ok {
return fmt.Errorf("invalid input modality: %s", mod)
}
for _, c := range m.Config.Compatibilities {
if _, ok := validCompatibilities[c]; !ok {
return errors.New("invalid compatibility: " + c)
}
}
return nil
}
// HasInputModality checks whether the model supports a given input modality.
func (m *Model) HasInputModality(mod string) bool {
for _, v := range m.InputModalities {
if v == mod {
return true
}
}
return false
}
// IsMultimodal returns true if the model supports any input modality beyond text.
func (m *Model) IsMultimodal() bool {
for _, v := range m.InputModalities {
if v != ModelInputText {
// HasCompatibility checks whether the model config includes the given capability.
func (m *Model) HasCompatibility(c string) bool {
for _, v := range m.Config.Compatibilities {
if v == c {
return true
}
}
@@ -122,8 +105,7 @@ type GetResponse struct {
type UpdateRequest Model
type ListRequest struct {
Type ModelType `json:"type,omitempty"`
ClientType ClientType `json:"client_type,omitempty"`
Type ModelType `json:"type,omitempty"`
}
type DeleteRequest struct {