mirror of
https://github.com/memohai/Memoh.git
synced 2026-04-27 07:16:19 +09:00
b88ca96064
* refactor: move client_type to provider, replace model fields with config JSONB - Move `client_type` from `models` to `llm_providers` table - Add `icon` field to `llm_providers` - Replace `dimensions`, `input_modalities`, `supports_reasoning` on `models` with a single `config` JSONB column containing `dimensions`, `compatibilities` (vision, tool-call, image-output, reasoning), and `context_window` - Auto-imported models default to vision + tool-call + reasoning - Update all backend consumers (agent, flow resolver, handlers, memory) - Regenerate sqlc, swagger, and TypeScript SDK - Update frontend forms, display, and i18n for new schema * ui: show provider icon avatar in sidebar and detail header, remove icon input * feat: add built-in provider registry with YAML definitions and enable toggle - Add `enable` column to llm_providers (default true, backward-compatible) - Create internal/registry package to load YAML provider/model definitions on startup and upsert into database (new providers disabled by default) - Add conf/providers/ with OpenAI, Anthropic, Google YAML definitions - Add RegistryConfig to TOML config (providers_dir, default conf/providers) - Model listing APIs and conversation flow now filter by enabled providers - Frontend: enable switch in provider form, green status dot in sidebar, enabled providers sorted to top * fix: make 0041 migration idempotent for fresh databases Guard data migration steps with column-existence checks so the migration succeeds on databases created from the updated init schema.
141 lines
3.4 KiB
Go
141 lines
3.4 KiB
Go
package models
|
|
|
|
import (
|
|
"errors"
|
|
|
|
"github.com/google/uuid"
|
|
)
|
|
|
|
type ModelType string
|
|
|
|
const (
|
|
ModelTypeChat ModelType = "chat"
|
|
ModelTypeEmbedding ModelType = "embedding"
|
|
)
|
|
|
|
type ClientType string
|
|
|
|
const (
|
|
ClientTypeOpenAIResponses ClientType = "openai-responses"
|
|
ClientTypeOpenAICompletions ClientType = "openai-completions"
|
|
ClientTypeAnthropicMessages ClientType = "anthropic-messages"
|
|
ClientTypeGoogleGenerativeAI ClientType = "google-generative-ai"
|
|
)
|
|
|
|
const (
|
|
CompatVision = "vision"
|
|
CompatToolCall = "tool-call"
|
|
CompatImageOutput = "image-output"
|
|
CompatReasoning = "reasoning"
|
|
)
|
|
|
|
// validCompatibilities enumerates accepted compatibility tokens.
|
|
var validCompatibilities = map[string]struct{}{
|
|
CompatVision: {}, CompatToolCall: {}, CompatImageOutput: {}, CompatReasoning: {},
|
|
}
|
|
|
|
// ModelConfig holds the JSONB config stored per model.
|
|
type ModelConfig struct {
|
|
Dimensions *int `json:"dimensions,omitempty"`
|
|
Compatibilities []string `json:"compatibilities,omitempty"`
|
|
ContextWindow *int `json:"context_window,omitempty"`
|
|
}
|
|
|
|
type Model struct {
|
|
ModelID string `json:"model_id"`
|
|
Name string `json:"name"`
|
|
LlmProviderID string `json:"llm_provider_id"`
|
|
Type ModelType `json:"type"`
|
|
Config ModelConfig `json:"config"`
|
|
}
|
|
|
|
func (m *Model) Validate() error {
|
|
if m.ModelID == "" {
|
|
return errors.New("model ID is required")
|
|
}
|
|
if m.LlmProviderID == "" {
|
|
return errors.New("llm provider ID is required")
|
|
}
|
|
if _, err := uuid.Parse(m.LlmProviderID); err != nil {
|
|
return errors.New("llm provider ID must be a valid UUID")
|
|
}
|
|
if m.Type != ModelTypeChat && m.Type != ModelTypeEmbedding {
|
|
return errors.New("invalid model type")
|
|
}
|
|
if m.Type == ModelTypeEmbedding {
|
|
if m.Config.Dimensions == nil || *m.Config.Dimensions <= 0 {
|
|
return errors.New("dimensions must be greater than 0 for embedding models")
|
|
}
|
|
}
|
|
for _, c := range m.Config.Compatibilities {
|
|
if _, ok := validCompatibilities[c]; !ok {
|
|
return errors.New("invalid compatibility: " + c)
|
|
}
|
|
}
|
|
return nil
|
|
}
|
|
|
|
// HasCompatibility checks whether the model config includes the given capability.
|
|
func (m *Model) HasCompatibility(c string) bool {
|
|
for _, v := range m.Config.Compatibilities {
|
|
if v == c {
|
|
return true
|
|
}
|
|
}
|
|
return false
|
|
}
|
|
|
|
type AddRequest Model
|
|
|
|
type AddResponse struct {
|
|
ID string `json:"id"`
|
|
ModelID string `json:"model_id"`
|
|
}
|
|
|
|
type GetRequest struct {
|
|
ID string `json:"id"`
|
|
}
|
|
|
|
type GetResponse struct {
|
|
ID string `json:"id"`
|
|
ModelID string `json:"model_id"`
|
|
Model
|
|
}
|
|
|
|
type UpdateRequest Model
|
|
|
|
type ListRequest struct {
|
|
Type ModelType `json:"type,omitempty"`
|
|
}
|
|
|
|
type DeleteRequest struct {
|
|
ID string `json:"id,omitempty"`
|
|
ModelID string `json:"model_id,omitempty"`
|
|
}
|
|
|
|
type DeleteResponse struct {
|
|
Message string `json:"message"`
|
|
}
|
|
|
|
type CountResponse struct {
|
|
Count int64 `json:"count"`
|
|
}
|
|
|
|
// TestStatus represents the outcome of probing a model.
|
|
type TestStatus string
|
|
|
|
const (
|
|
TestStatusOK TestStatus = "ok"
|
|
TestStatusAuthError TestStatus = "auth_error"
|
|
TestStatusModelNotSupported TestStatus = "model_not_supported"
|
|
TestStatusError TestStatus = "error"
|
|
)
|
|
|
|
// TestResponse is returned by POST /models/:id/test.
|
|
type TestResponse struct {
|
|
Status TestStatus `json:"status"`
|
|
Reachable bool `json:"reachable"`
|
|
LatencyMs int64 `json:"latency_ms,omitempty"`
|
|
Message string `json:"message,omitempty"`
|
|
}
|