feat: add thinking support (#100)

* feat: add thinking support

* feat: improve thinking block render in web and filter thinking content in channels

* fix: migrate
This commit is contained in:
Acbox Liu
2026-02-23 14:41:27 +08:00
committed by GitHub
parent 18535f97f2
commit 17cd077f34
31 changed files with 530 additions and 118 deletions
+6
View File
@@ -12,12 +12,18 @@ export const ClientTypeModel = z.enum([
'openai-responses', 'openai-completions', 'anthropic-messages', 'google-generative-ai',
])
export const ReasoningConfigModel = z.object({
enabled: z.boolean(),
effort: z.enum(['low', 'medium', 'high']),
}).optional()
export const ModelConfigModel = z.object({
modelId: z.string().min(1, 'Model ID is required'),
clientType: ClientTypeModel,
input: z.array(z.enum(['text', 'image', 'audio', 'video', 'file'])),
apiKey: z.string().min(1, 'API key is required'),
baseUrl: z.string(),
reasoning: ReasoningConfigModel,
})
export const AllowedActionModel = z.enum(allActions)
+5 -1
View File
@@ -86,6 +86,7 @@ CREATE TABLE IF NOT EXISTS models (
client_type TEXT,
dimensions INTEGER,
input_modalities TEXT[] NOT NULL DEFAULT ARRAY['text']::TEXT[],
supports_reasoning BOOLEAN NOT NULL DEFAULT false,
type TEXT NOT NULL DEFAULT 'chat',
created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT now(),
@@ -121,6 +122,8 @@ CREATE TABLE IF NOT EXISTS bots (
max_context_tokens INTEGER NOT NULL DEFAULT 0,
language TEXT NOT NULL DEFAULT 'auto',
allow_guest BOOLEAN NOT NULL DEFAULT false,
reasoning_enabled BOOLEAN NOT NULL DEFAULT false,
reasoning_effort TEXT NOT NULL DEFAULT 'medium',
max_inbox_items INTEGER NOT NULL DEFAULT 50,
chat_model_id UUID REFERENCES models(id) ON DELETE SET NULL,
memory_model_id UUID REFERENCES models(id) ON DELETE SET NULL,
@@ -130,7 +133,8 @@ CREATE TABLE IF NOT EXISTS bots (
created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT now(),
CONSTRAINT bots_type_check CHECK (type IN ('personal', 'public')),
CONSTRAINT bots_status_check CHECK (status IN ('creating', 'ready', 'deleting'))
CONSTRAINT bots_status_check CHECK (status IN ('creating', 'ready', 'deleting')),
CONSTRAINT bots_reasoning_effort_check CHECK (reasoning_effort IN ('low', 'medium', 'high'))
);
CREATE INDEX IF NOT EXISTS idx_bots_owner_user_id ON bots(owner_user_id);
+8
View File
@@ -0,0 +1,8 @@
-- 0014_reasoning (rollback)
-- Remove reasoning support flag from models and reasoning settings from bots.
ALTER TABLE bots DROP CONSTRAINT IF EXISTS bots_reasoning_effort_check;
ALTER TABLE bots DROP COLUMN IF EXISTS reasoning_effort;
ALTER TABLE bots DROP COLUMN IF EXISTS reasoning_enabled;
ALTER TABLE models DROP COLUMN IF EXISTS supports_reasoning;
+17
View File
@@ -0,0 +1,17 @@
-- 0014_reasoning
-- Add reasoning support flag to models and reasoning settings to bots.
ALTER TABLE models ADD COLUMN IF NOT EXISTS supports_reasoning BOOLEAN NOT NULL DEFAULT false;
ALTER TABLE bots ADD COLUMN IF NOT EXISTS reasoning_enabled BOOLEAN NOT NULL DEFAULT false;
ALTER TABLE bots ADD COLUMN IF NOT EXISTS reasoning_effort TEXT NOT NULL DEFAULT 'medium';
DO $$
BEGIN
IF NOT EXISTS (
SELECT 1 FROM pg_constraint WHERE conname = 'bots_reasoning_effort_check'
) THEN
ALTER TABLE bots ADD CONSTRAINT bots_reasoning_effort_check
CHECK (reasoning_effort IN ('low', 'medium', 'high'));
END IF;
END
$$;
+6 -6
View File
@@ -1,21 +1,21 @@
-- name: CreateBot :one
INSERT INTO bots (owner_user_id, type, display_name, avatar_url, is_active, metadata, status)
VALUES ($1, $2, $3, $4, $5, $6, $7)
RETURNING id, owner_user_id, type, display_name, avatar_url, is_active, status, max_context_load_time, max_context_tokens, max_inbox_items, language, allow_guest, chat_model_id, memory_model_id, embedding_model_id, search_provider_id, metadata, created_at, updated_at;
RETURNING id, owner_user_id, type, display_name, avatar_url, is_active, status, max_context_load_time, max_context_tokens, max_inbox_items, language, allow_guest, reasoning_enabled, reasoning_effort, chat_model_id, memory_model_id, embedding_model_id, search_provider_id, metadata, created_at, updated_at;
-- name: GetBotByID :one
SELECT id, owner_user_id, type, display_name, avatar_url, is_active, status, max_context_load_time, max_context_tokens, max_inbox_items, language, allow_guest, chat_model_id, memory_model_id, embedding_model_id, search_provider_id, metadata, created_at, updated_at
SELECT id, owner_user_id, type, display_name, avatar_url, is_active, status, max_context_load_time, max_context_tokens, max_inbox_items, language, allow_guest, reasoning_enabled, reasoning_effort, chat_model_id, memory_model_id, embedding_model_id, search_provider_id, metadata, created_at, updated_at
FROM bots
WHERE id = $1;
-- name: ListBotsByOwner :many
SELECT id, owner_user_id, type, display_name, avatar_url, is_active, status, max_context_load_time, max_context_tokens, max_inbox_items, language, allow_guest, chat_model_id, memory_model_id, embedding_model_id, search_provider_id, metadata, created_at, updated_at
SELECT id, owner_user_id, type, display_name, avatar_url, is_active, status, max_context_load_time, max_context_tokens, max_inbox_items, language, allow_guest, reasoning_enabled, reasoning_effort, chat_model_id, memory_model_id, embedding_model_id, search_provider_id, metadata, created_at, updated_at
FROM bots
WHERE owner_user_id = $1
ORDER BY created_at DESC;
-- name: ListBotsByMember :many
SELECT b.id, b.owner_user_id, b.type, b.display_name, b.avatar_url, b.is_active, b.status, b.max_context_load_time, b.max_context_tokens, b.max_inbox_items, b.language, b.allow_guest, b.chat_model_id, b.memory_model_id, b.embedding_model_id, b.search_provider_id, b.metadata, b.created_at, b.updated_at
SELECT b.id, b.owner_user_id, b.type, b.display_name, b.avatar_url, b.is_active, b.status, b.max_context_load_time, b.max_context_tokens, b.max_inbox_items, b.language, b.allow_guest, b.reasoning_enabled, b.reasoning_effort, b.chat_model_id, b.memory_model_id, b.embedding_model_id, b.search_provider_id, b.metadata, b.created_at, b.updated_at
FROM bots b
JOIN bot_members m ON m.bot_id = b.id
WHERE m.user_id = $1
@@ -29,14 +29,14 @@ SET display_name = $2,
metadata = $5,
updated_at = now()
WHERE id = $1
RETURNING id, owner_user_id, type, display_name, avatar_url, is_active, status, max_context_load_time, max_context_tokens, max_inbox_items, language, allow_guest, chat_model_id, memory_model_id, embedding_model_id, search_provider_id, metadata, created_at, updated_at;
RETURNING id, owner_user_id, type, display_name, avatar_url, is_active, status, max_context_load_time, max_context_tokens, max_inbox_items, language, allow_guest, reasoning_enabled, reasoning_effort, chat_model_id, memory_model_id, embedding_model_id, search_provider_id, metadata, created_at, updated_at;
-- name: UpdateBotOwner :one
UPDATE bots
SET owner_user_id = $2,
updated_at = now()
WHERE id = $1
RETURNING id, owner_user_id, type, display_name, avatar_url, is_active, status, max_context_load_time, max_context_tokens, max_inbox_items, language, allow_guest, chat_model_id, memory_model_id, embedding_model_id, search_provider_id, metadata, created_at, updated_at;
RETURNING id, owner_user_id, type, display_name, avatar_url, is_active, status, max_context_load_time, max_context_tokens, max_inbox_items, language, allow_guest, reasoning_enabled, reasoning_effort, chat_model_id, memory_model_id, embedding_model_id, search_provider_id, metadata, created_at, updated_at;
-- name: UpdateBotStatus :exec
UPDATE bots
+4 -1
View File
@@ -36,7 +36,7 @@ DELETE FROM llm_providers WHERE id = sqlc.arg(id);
SELECT COUNT(*) FROM llm_providers;
-- name: CreateModel :one
INSERT INTO models (model_id, name, llm_provider_id, client_type, dimensions, input_modalities, type)
INSERT INTO models (model_id, name, llm_provider_id, client_type, dimensions, input_modalities, supports_reasoning, type)
VALUES (
sqlc.arg(model_id),
sqlc.arg(name),
@@ -44,6 +44,7 @@ VALUES (
sqlc.narg(client_type),
sqlc.arg(dimensions),
sqlc.arg(input_modalities),
sqlc.arg(supports_reasoning),
sqlc.arg(type)
)
RETURNING *;
@@ -93,6 +94,7 @@ SET
client_type = sqlc.narg(client_type),
dimensions = sqlc.arg(dimensions),
input_modalities = sqlc.arg(input_modalities),
supports_reasoning = sqlc.arg(supports_reasoning),
type = sqlc.arg(type),
updated_at = now()
WHERE id = sqlc.arg(id)
@@ -107,6 +109,7 @@ SET
client_type = sqlc.narg(client_type),
dimensions = sqlc.arg(dimensions),
input_modalities = sqlc.arg(input_modalities),
supports_reasoning = sqlc.arg(supports_reasoning),
type = sqlc.arg(type),
updated_at = now()
WHERE model_id = sqlc.arg(model_id)
+9 -1
View File
@@ -6,6 +6,8 @@ SELECT
bots.max_inbox_items,
bots.language,
bots.allow_guest,
bots.reasoning_enabled,
bots.reasoning_effort,
chat_models.id AS chat_model_id,
memory_models.id AS memory_model_id,
embedding_models.id AS embedding_model_id,
@@ -25,13 +27,15 @@ WITH updated AS (
max_inbox_items = sqlc.arg(max_inbox_items),
language = sqlc.arg(language),
allow_guest = sqlc.arg(allow_guest),
reasoning_enabled = sqlc.arg(reasoning_enabled),
reasoning_effort = sqlc.arg(reasoning_effort),
chat_model_id = COALESCE(sqlc.narg(chat_model_id)::uuid, bots.chat_model_id),
memory_model_id = COALESCE(sqlc.narg(memory_model_id)::uuid, bots.memory_model_id),
embedding_model_id = COALESCE(sqlc.narg(embedding_model_id)::uuid, bots.embedding_model_id),
search_provider_id = COALESCE(sqlc.narg(search_provider_id)::uuid, bots.search_provider_id),
updated_at = now()
WHERE bots.id = sqlc.arg(id)
RETURNING bots.id, bots.max_context_load_time, bots.max_context_tokens, bots.max_inbox_items, bots.language, bots.allow_guest, bots.chat_model_id, bots.memory_model_id, bots.embedding_model_id, bots.search_provider_id
RETURNING bots.id, bots.max_context_load_time, bots.max_context_tokens, bots.max_inbox_items, bots.language, bots.allow_guest, bots.reasoning_enabled, bots.reasoning_effort, bots.chat_model_id, bots.memory_model_id, bots.embedding_model_id, bots.search_provider_id
)
SELECT
updated.id AS bot_id,
@@ -40,6 +44,8 @@ SELECT
updated.max_inbox_items,
updated.language,
updated.allow_guest,
updated.reasoning_enabled,
updated.reasoning_effort,
chat_models.id AS chat_model_id,
memory_models.id AS memory_model_id,
embedding_models.id AS embedding_model_id,
@@ -57,6 +63,8 @@ SET max_context_load_time = 1440,
max_inbox_items = 50,
language = 'auto',
allow_guest = false,
reasoning_enabled = false,
reasoning_effort = 'medium',
chat_model_id = NULL,
memory_model_id = NULL,
embedding_model_id = NULL,
+4 -2
View File
@@ -58,7 +58,7 @@ func (s *feishuOutboundStream) Push(ctx context.Context, event channel.StreamEve
}
return nil
case channel.StreamEventDelta:
if event.Delta == "" {
if event.Delta == "" || event.Phase == channel.StreamPhaseReasoning {
return nil
}
s.textBuffer.WriteString(event.Delta)
@@ -96,7 +96,9 @@ func (s *feishuOutboundStream) Push(ctx context.Context, event channel.StreamEve
Target: s.target,
Message: media,
})
case channel.StreamEventAgentStart, channel.StreamEventAgentEnd, channel.StreamEventPhaseStart, channel.StreamEventPhaseEnd, channel.StreamEventProcessingStarted, channel.StreamEventProcessingCompleted, channel.StreamEventProcessingFailed:
case channel.StreamEventPhaseStart, channel.StreamEventPhaseEnd:
return nil
case channel.StreamEventAgentStart, channel.StreamEventAgentEnd, channel.StreamEventProcessingStarted, channel.StreamEventProcessingCompleted, channel.StreamEventProcessingFailed:
return nil
case channel.StreamEventFinal:
if event.Final == nil || event.Final.Message.IsEmpty() {
+4 -2
View File
@@ -259,10 +259,12 @@ func (s *telegramOutboundStream) Push(ctx context.Context, event channel.StreamE
}
}
return nil
case channel.StreamEventProcessingFailed, channel.StreamEventAgentStart, channel.StreamEventAgentEnd, channel.StreamEventPhaseStart, channel.StreamEventPhaseEnd, channel.StreamEventProcessingStarted, channel.StreamEventProcessingCompleted:
case channel.StreamEventPhaseStart, channel.StreamEventPhaseEnd:
return nil
case channel.StreamEventProcessingFailed, channel.StreamEventAgentStart, channel.StreamEventAgentEnd, channel.StreamEventProcessingStarted, channel.StreamEventProcessingCompleted:
return nil
case channel.StreamEventDelta:
if event.Delta == "" {
if event.Delta == "" || event.Phase == channel.StreamPhaseReasoning {
return nil
}
s.mu.Lock()
+15
View File
@@ -140,12 +140,18 @@ func (r *Resolver) SetInboxService(service *inbox.Service) {
// --- gateway payload ---
type gatewayReasoningConfig struct {
Enabled bool `json:"enabled"`
Effort string `json:"effort"`
}
type gatewayModelConfig struct {
ModelID string `json:"modelId"`
ClientType string `json:"clientType"`
Input []string `json:"input"`
APIKey string `json:"apiKey"`
BaseURL string `json:"baseUrl"`
Reasoning *gatewayReasoningConfig `json:"reasoning,omitempty"`
}
type gatewayIdentity struct {
@@ -393,6 +399,14 @@ func (r *Resolver) resolve(ctx context.Context, req conversation.ChatRequest) (r
req.Query,
)
var reasoning *gatewayReasoningConfig
if chatModel.SupportsReasoning && botSettings.ReasoningEnabled {
reasoning = &gatewayReasoningConfig{
Enabled: true,
Effort: botSettings.ReasoningEffort,
}
}
payload := gatewayRequest{
Model: gatewayModelConfig{
ModelID: chatModel.ModelID,
@@ -400,6 +414,7 @@ func (r *Resolver) resolve(ctx context.Context, req conversation.ChatRequest) (r
Input: chatModel.InputModalities,
APIKey: provider.ApiKey,
BaseURL: provider.BaseUrl,
Reasoning: reasoning,
},
ActiveContextTime: maxCtx,
Channels: nonNilStrings(req.Channels),
+30 -6
View File
@@ -14,7 +14,7 @@ import (
const createBot = `-- name: CreateBot :one
INSERT INTO bots (owner_user_id, type, display_name, avatar_url, is_active, metadata, status)
VALUES ($1, $2, $3, $4, $5, $6, $7)
RETURNING id, owner_user_id, type, display_name, avatar_url, is_active, status, max_context_load_time, max_context_tokens, max_inbox_items, language, allow_guest, chat_model_id, memory_model_id, embedding_model_id, search_provider_id, metadata, created_at, updated_at
RETURNING id, owner_user_id, type, display_name, avatar_url, is_active, status, max_context_load_time, max_context_tokens, max_inbox_items, language, allow_guest, reasoning_enabled, reasoning_effort, chat_model_id, memory_model_id, embedding_model_id, search_provider_id, metadata, created_at, updated_at
`
type CreateBotParams struct {
@@ -40,6 +40,8 @@ type CreateBotRow struct {
MaxInboxItems int32 `json:"max_inbox_items"`
Language string `json:"language"`
AllowGuest bool `json:"allow_guest"`
ReasoningEnabled bool `json:"reasoning_enabled"`
ReasoningEffort string `json:"reasoning_effort"`
ChatModelID pgtype.UUID `json:"chat_model_id"`
MemoryModelID pgtype.UUID `json:"memory_model_id"`
EmbeddingModelID pgtype.UUID `json:"embedding_model_id"`
@@ -73,6 +75,8 @@ func (q *Queries) CreateBot(ctx context.Context, arg CreateBotParams) (CreateBot
&i.MaxInboxItems,
&i.Language,
&i.AllowGuest,
&i.ReasoningEnabled,
&i.ReasoningEffort,
&i.ChatModelID,
&i.MemoryModelID,
&i.EmbeddingModelID,
@@ -108,7 +112,7 @@ func (q *Queries) DeleteBotMember(ctx context.Context, arg DeleteBotMemberParams
}
const getBotByID = `-- name: GetBotByID :one
SELECT id, owner_user_id, type, display_name, avatar_url, is_active, status, max_context_load_time, max_context_tokens, max_inbox_items, language, allow_guest, chat_model_id, memory_model_id, embedding_model_id, search_provider_id, metadata, created_at, updated_at
SELECT id, owner_user_id, type, display_name, avatar_url, is_active, status, max_context_load_time, max_context_tokens, max_inbox_items, language, allow_guest, reasoning_enabled, reasoning_effort, chat_model_id, memory_model_id, embedding_model_id, search_provider_id, metadata, created_at, updated_at
FROM bots
WHERE id = $1
`
@@ -126,6 +130,8 @@ type GetBotByIDRow struct {
MaxInboxItems int32 `json:"max_inbox_items"`
Language string `json:"language"`
AllowGuest bool `json:"allow_guest"`
ReasoningEnabled bool `json:"reasoning_enabled"`
ReasoningEffort string `json:"reasoning_effort"`
ChatModelID pgtype.UUID `json:"chat_model_id"`
MemoryModelID pgtype.UUID `json:"memory_model_id"`
EmbeddingModelID pgtype.UUID `json:"embedding_model_id"`
@@ -151,6 +157,8 @@ func (q *Queries) GetBotByID(ctx context.Context, id pgtype.UUID) (GetBotByIDRow
&i.MaxInboxItems,
&i.Language,
&i.AllowGuest,
&i.ReasoningEnabled,
&i.ReasoningEffort,
&i.ChatModelID,
&i.MemoryModelID,
&i.EmbeddingModelID,
@@ -219,7 +227,7 @@ func (q *Queries) ListBotMembers(ctx context.Context, botID pgtype.UUID) ([]BotM
}
const listBotsByMember = `-- name: ListBotsByMember :many
SELECT b.id, b.owner_user_id, b.type, b.display_name, b.avatar_url, b.is_active, b.status, b.max_context_load_time, b.max_context_tokens, b.max_inbox_items, b.language, b.allow_guest, b.chat_model_id, b.memory_model_id, b.embedding_model_id, b.search_provider_id, b.metadata, b.created_at, b.updated_at
SELECT b.id, b.owner_user_id, b.type, b.display_name, b.avatar_url, b.is_active, b.status, b.max_context_load_time, b.max_context_tokens, b.max_inbox_items, b.language, b.allow_guest, b.reasoning_enabled, b.reasoning_effort, b.chat_model_id, b.memory_model_id, b.embedding_model_id, b.search_provider_id, b.metadata, b.created_at, b.updated_at
FROM bots b
JOIN bot_members m ON m.bot_id = b.id
WHERE m.user_id = $1
@@ -239,6 +247,8 @@ type ListBotsByMemberRow struct {
MaxInboxItems int32 `json:"max_inbox_items"`
Language string `json:"language"`
AllowGuest bool `json:"allow_guest"`
ReasoningEnabled bool `json:"reasoning_enabled"`
ReasoningEffort string `json:"reasoning_effort"`
ChatModelID pgtype.UUID `json:"chat_model_id"`
MemoryModelID pgtype.UUID `json:"memory_model_id"`
EmbeddingModelID pgtype.UUID `json:"embedding_model_id"`
@@ -270,6 +280,8 @@ func (q *Queries) ListBotsByMember(ctx context.Context, userID pgtype.UUID) ([]L
&i.MaxInboxItems,
&i.Language,
&i.AllowGuest,
&i.ReasoningEnabled,
&i.ReasoningEffort,
&i.ChatModelID,
&i.MemoryModelID,
&i.EmbeddingModelID,
@@ -289,7 +301,7 @@ func (q *Queries) ListBotsByMember(ctx context.Context, userID pgtype.UUID) ([]L
}
const listBotsByOwner = `-- name: ListBotsByOwner :many
SELECT id, owner_user_id, type, display_name, avatar_url, is_active, status, max_context_load_time, max_context_tokens, max_inbox_items, language, allow_guest, chat_model_id, memory_model_id, embedding_model_id, search_provider_id, metadata, created_at, updated_at
SELECT id, owner_user_id, type, display_name, avatar_url, is_active, status, max_context_load_time, max_context_tokens, max_inbox_items, language, allow_guest, reasoning_enabled, reasoning_effort, chat_model_id, memory_model_id, embedding_model_id, search_provider_id, metadata, created_at, updated_at
FROM bots
WHERE owner_user_id = $1
ORDER BY created_at DESC
@@ -308,6 +320,8 @@ type ListBotsByOwnerRow struct {
MaxInboxItems int32 `json:"max_inbox_items"`
Language string `json:"language"`
AllowGuest bool `json:"allow_guest"`
ReasoningEnabled bool `json:"reasoning_enabled"`
ReasoningEffort string `json:"reasoning_effort"`
ChatModelID pgtype.UUID `json:"chat_model_id"`
MemoryModelID pgtype.UUID `json:"memory_model_id"`
EmbeddingModelID pgtype.UUID `json:"embedding_model_id"`
@@ -339,6 +353,8 @@ func (q *Queries) ListBotsByOwner(ctx context.Context, ownerUserID pgtype.UUID)
&i.MaxInboxItems,
&i.Language,
&i.AllowGuest,
&i.ReasoningEnabled,
&i.ReasoningEffort,
&i.ChatModelID,
&i.MemoryModelID,
&i.EmbeddingModelID,
@@ -362,7 +378,7 @@ UPDATE bots
SET owner_user_id = $2,
updated_at = now()
WHERE id = $1
RETURNING id, owner_user_id, type, display_name, avatar_url, is_active, status, max_context_load_time, max_context_tokens, max_inbox_items, language, allow_guest, chat_model_id, memory_model_id, embedding_model_id, search_provider_id, metadata, created_at, updated_at
RETURNING id, owner_user_id, type, display_name, avatar_url, is_active, status, max_context_load_time, max_context_tokens, max_inbox_items, language, allow_guest, reasoning_enabled, reasoning_effort, chat_model_id, memory_model_id, embedding_model_id, search_provider_id, metadata, created_at, updated_at
`
type UpdateBotOwnerParams struct {
@@ -383,6 +399,8 @@ type UpdateBotOwnerRow struct {
MaxInboxItems int32 `json:"max_inbox_items"`
Language string `json:"language"`
AllowGuest bool `json:"allow_guest"`
ReasoningEnabled bool `json:"reasoning_enabled"`
ReasoningEffort string `json:"reasoning_effort"`
ChatModelID pgtype.UUID `json:"chat_model_id"`
MemoryModelID pgtype.UUID `json:"memory_model_id"`
EmbeddingModelID pgtype.UUID `json:"embedding_model_id"`
@@ -408,6 +426,8 @@ func (q *Queries) UpdateBotOwner(ctx context.Context, arg UpdateBotOwnerParams)
&i.MaxInboxItems,
&i.Language,
&i.AllowGuest,
&i.ReasoningEnabled,
&i.ReasoningEffort,
&i.ChatModelID,
&i.MemoryModelID,
&i.EmbeddingModelID,
@@ -427,7 +447,7 @@ SET display_name = $2,
metadata = $5,
updated_at = now()
WHERE id = $1
RETURNING id, owner_user_id, type, display_name, avatar_url, is_active, status, max_context_load_time, max_context_tokens, max_inbox_items, language, allow_guest, chat_model_id, memory_model_id, embedding_model_id, search_provider_id, metadata, created_at, updated_at
RETURNING id, owner_user_id, type, display_name, avatar_url, is_active, status, max_context_load_time, max_context_tokens, max_inbox_items, language, allow_guest, reasoning_enabled, reasoning_effort, chat_model_id, memory_model_id, embedding_model_id, search_provider_id, metadata, created_at, updated_at
`
type UpdateBotProfileParams struct {
@@ -451,6 +471,8 @@ type UpdateBotProfileRow struct {
MaxInboxItems int32 `json:"max_inbox_items"`
Language string `json:"language"`
AllowGuest bool `json:"allow_guest"`
ReasoningEnabled bool `json:"reasoning_enabled"`
ReasoningEffort string `json:"reasoning_effort"`
ChatModelID pgtype.UUID `json:"chat_model_id"`
MemoryModelID pgtype.UUID `json:"memory_model_id"`
EmbeddingModelID pgtype.UUID `json:"embedding_model_id"`
@@ -482,6 +504,8 @@ func (q *Queries) UpdateBotProfile(ctx context.Context, arg UpdateBotProfilePara
&i.MaxInboxItems,
&i.Language,
&i.AllowGuest,
&i.ReasoningEnabled,
&i.ReasoningEffort,
&i.ChatModelID,
&i.MemoryModelID,
&i.EmbeddingModelID,
+1 -1
View File
@@ -590,7 +590,7 @@ WITH updated AS (
SET display_name = $1,
updated_at = now()
WHERE bots.id = $2
RETURNING id, owner_user_id, type, display_name, avatar_url, is_active, status, max_context_load_time, max_context_tokens, language, allow_guest, max_inbox_items, chat_model_id, memory_model_id, embedding_model_id, search_provider_id, metadata, created_at, updated_at
RETURNING id, owner_user_id, type, display_name, avatar_url, is_active, status, max_context_load_time, max_context_tokens, language, allow_guest, reasoning_enabled, reasoning_effort, max_inbox_items, chat_model_id, memory_model_id, embedding_model_id, search_provider_id, metadata, created_at, updated_at
)
SELECT
updated.id AS id,
+3
View File
@@ -20,6 +20,8 @@ type Bot struct {
MaxContextTokens int32 `json:"max_context_tokens"`
Language string `json:"language"`
AllowGuest bool `json:"allow_guest"`
ReasoningEnabled bool `json:"reasoning_enabled"`
ReasoningEffort string `json:"reasoning_effort"`
MaxInboxItems int32 `json:"max_inbox_items"`
ChatModelID pgtype.UUID `json:"chat_model_id"`
MemoryModelID pgtype.UUID `json:"memory_model_id"`
@@ -222,6 +224,7 @@ type Model struct {
ClientType pgtype.Text `json:"client_type"`
Dimensions pgtype.Int4 `json:"dimensions"`
InputModalities []string `json:"input_modalities"`
SupportsReasoning bool `json:"supports_reasoning"`
Type string `json:"type"`
CreatedAt pgtype.Timestamptz `json:"created_at"`
UpdatedAt pgtype.Timestamptz `json:"updated_at"`
+37 -17
View File
@@ -83,7 +83,7 @@ func (q *Queries) CreateLlmProvider(ctx context.Context, arg CreateLlmProviderPa
}
const createModel = `-- name: CreateModel :one
INSERT INTO models (model_id, name, llm_provider_id, client_type, dimensions, input_modalities, type)
INSERT INTO models (model_id, name, llm_provider_id, client_type, dimensions, input_modalities, supports_reasoning, type)
VALUES (
$1,
$2,
@@ -91,9 +91,10 @@ VALUES (
$4,
$5,
$6,
$7
$7,
$8
)
RETURNING id, model_id, name, llm_provider_id, client_type, dimensions, input_modalities, type, created_at, updated_at
RETURNING id, model_id, name, llm_provider_id, client_type, dimensions, input_modalities, supports_reasoning, type, created_at, updated_at
`
type CreateModelParams struct {
@@ -103,6 +104,7 @@ type CreateModelParams struct {
ClientType pgtype.Text `json:"client_type"`
Dimensions pgtype.Int4 `json:"dimensions"`
InputModalities []string `json:"input_modalities"`
SupportsReasoning bool `json:"supports_reasoning"`
Type string `json:"type"`
}
@@ -114,6 +116,7 @@ func (q *Queries) CreateModel(ctx context.Context, arg CreateModelParams) (Model
arg.ClientType,
arg.Dimensions,
arg.InputModalities,
arg.SupportsReasoning,
arg.Type,
)
var i Model
@@ -125,6 +128,7 @@ func (q *Queries) CreateModel(ctx context.Context, arg CreateModelParams) (Model
&i.ClientType,
&i.Dimensions,
&i.InputModalities,
&i.SupportsReasoning,
&i.Type,
&i.CreatedAt,
&i.UpdatedAt,
@@ -236,7 +240,7 @@ func (q *Queries) GetLlmProviderByName(ctx context.Context, name string) (LlmPro
}
const getModelByID = `-- name: GetModelByID :one
SELECT id, model_id, name, llm_provider_id, client_type, dimensions, input_modalities, type, created_at, updated_at FROM models WHERE id = $1
SELECT id, model_id, name, llm_provider_id, client_type, dimensions, input_modalities, supports_reasoning, type, created_at, updated_at FROM models WHERE id = $1
`
func (q *Queries) GetModelByID(ctx context.Context, id pgtype.UUID) (Model, error) {
@@ -250,6 +254,7 @@ func (q *Queries) GetModelByID(ctx context.Context, id pgtype.UUID) (Model, erro
&i.ClientType,
&i.Dimensions,
&i.InputModalities,
&i.SupportsReasoning,
&i.Type,
&i.CreatedAt,
&i.UpdatedAt,
@@ -258,7 +263,7 @@ func (q *Queries) GetModelByID(ctx context.Context, id pgtype.UUID) (Model, erro
}
const getModelByModelID = `-- name: GetModelByModelID :one
SELECT id, model_id, name, llm_provider_id, client_type, dimensions, input_modalities, type, created_at, updated_at FROM models WHERE model_id = $1
SELECT id, model_id, name, llm_provider_id, client_type, dimensions, input_modalities, supports_reasoning, type, created_at, updated_at FROM models WHERE model_id = $1
`
func (q *Queries) GetModelByModelID(ctx context.Context, modelID string) (Model, error) {
@@ -272,6 +277,7 @@ func (q *Queries) GetModelByModelID(ctx context.Context, modelID string) (Model,
&i.ClientType,
&i.Dimensions,
&i.InputModalities,
&i.SupportsReasoning,
&i.Type,
&i.CreatedAt,
&i.UpdatedAt,
@@ -347,7 +353,7 @@ func (q *Queries) ListModelVariantsByModelUUID(ctx context.Context, modelUuid pg
}
const listModels = `-- name: ListModels :many
SELECT id, model_id, name, llm_provider_id, client_type, dimensions, input_modalities, type, created_at, updated_at FROM models
SELECT id, model_id, name, llm_provider_id, client_type, dimensions, input_modalities, supports_reasoning, type, created_at, updated_at FROM models
ORDER BY created_at DESC
`
@@ -368,6 +374,7 @@ func (q *Queries) ListModels(ctx context.Context) ([]Model, error) {
&i.ClientType,
&i.Dimensions,
&i.InputModalities,
&i.SupportsReasoning,
&i.Type,
&i.CreatedAt,
&i.UpdatedAt,
@@ -383,7 +390,7 @@ func (q *Queries) ListModels(ctx context.Context) ([]Model, error) {
}
const listModelsByClientType = `-- name: ListModelsByClientType :many
SELECT id, model_id, name, llm_provider_id, client_type, dimensions, input_modalities, type, created_at, updated_at FROM models
SELECT id, model_id, name, llm_provider_id, client_type, dimensions, input_modalities, supports_reasoning, type, created_at, updated_at FROM models
WHERE client_type = $1
ORDER BY created_at DESC
`
@@ -405,6 +412,7 @@ func (q *Queries) ListModelsByClientType(ctx context.Context, clientType pgtype.
&i.ClientType,
&i.Dimensions,
&i.InputModalities,
&i.SupportsReasoning,
&i.Type,
&i.CreatedAt,
&i.UpdatedAt,
@@ -420,7 +428,7 @@ func (q *Queries) ListModelsByClientType(ctx context.Context, clientType pgtype.
}
const listModelsByModelID = `-- name: ListModelsByModelID :many
SELECT id, model_id, name, llm_provider_id, client_type, dimensions, input_modalities, type, created_at, updated_at FROM models
SELECT id, model_id, name, llm_provider_id, client_type, dimensions, input_modalities, supports_reasoning, type, created_at, updated_at FROM models
WHERE model_id = $1
ORDER BY created_at DESC
`
@@ -442,6 +450,7 @@ func (q *Queries) ListModelsByModelID(ctx context.Context, modelID string) ([]Mo
&i.ClientType,
&i.Dimensions,
&i.InputModalities,
&i.SupportsReasoning,
&i.Type,
&i.CreatedAt,
&i.UpdatedAt,
@@ -457,7 +466,7 @@ func (q *Queries) ListModelsByModelID(ctx context.Context, modelID string) ([]Mo
}
const listModelsByProviderID = `-- name: ListModelsByProviderID :many
SELECT id, model_id, name, llm_provider_id, client_type, dimensions, input_modalities, type, created_at, updated_at FROM models
SELECT id, model_id, name, llm_provider_id, client_type, dimensions, input_modalities, supports_reasoning, type, created_at, updated_at FROM models
WHERE llm_provider_id = $1
ORDER BY created_at DESC
`
@@ -479,6 +488,7 @@ func (q *Queries) ListModelsByProviderID(ctx context.Context, llmProviderID pgty
&i.ClientType,
&i.Dimensions,
&i.InputModalities,
&i.SupportsReasoning,
&i.Type,
&i.CreatedAt,
&i.UpdatedAt,
@@ -494,7 +504,7 @@ func (q *Queries) ListModelsByProviderID(ctx context.Context, llmProviderID pgty
}
const listModelsByProviderIDAndType = `-- name: ListModelsByProviderIDAndType :many
SELECT id, model_id, name, llm_provider_id, client_type, dimensions, input_modalities, type, created_at, updated_at FROM models
SELECT id, model_id, name, llm_provider_id, client_type, dimensions, input_modalities, supports_reasoning, type, created_at, updated_at FROM models
WHERE llm_provider_id = $1
AND type = $2
ORDER BY created_at DESC
@@ -522,6 +532,7 @@ func (q *Queries) ListModelsByProviderIDAndType(ctx context.Context, arg ListMod
&i.ClientType,
&i.Dimensions,
&i.InputModalities,
&i.SupportsReasoning,
&i.Type,
&i.CreatedAt,
&i.UpdatedAt,
@@ -537,7 +548,7 @@ func (q *Queries) ListModelsByProviderIDAndType(ctx context.Context, arg ListMod
}
const listModelsByType = `-- name: ListModelsByType :many
SELECT id, model_id, name, llm_provider_id, client_type, dimensions, input_modalities, type, created_at, updated_at FROM models
SELECT id, model_id, name, llm_provider_id, client_type, dimensions, input_modalities, supports_reasoning, type, created_at, updated_at FROM models
WHERE type = $1
ORDER BY created_at DESC
`
@@ -559,6 +570,7 @@ func (q *Queries) ListModelsByType(ctx context.Context, type_ string) ([]Model,
&i.ClientType,
&i.Dimensions,
&i.InputModalities,
&i.SupportsReasoning,
&i.Type,
&i.CreatedAt,
&i.UpdatedAt,
@@ -623,10 +635,11 @@ SET
client_type = $4,
dimensions = $5,
input_modalities = $6,
type = $7,
supports_reasoning = $7,
type = $8,
updated_at = now()
WHERE id = $8
RETURNING id, model_id, name, llm_provider_id, client_type, dimensions, input_modalities, type, created_at, updated_at
WHERE id = $9
RETURNING id, model_id, name, llm_provider_id, client_type, dimensions, input_modalities, supports_reasoning, type, created_at, updated_at
`
type UpdateModelParams struct {
@@ -636,6 +649,7 @@ type UpdateModelParams struct {
ClientType pgtype.Text `json:"client_type"`
Dimensions pgtype.Int4 `json:"dimensions"`
InputModalities []string `json:"input_modalities"`
SupportsReasoning bool `json:"supports_reasoning"`
Type string `json:"type"`
ID pgtype.UUID `json:"id"`
}
@@ -648,6 +662,7 @@ func (q *Queries) UpdateModel(ctx context.Context, arg UpdateModelParams) (Model
arg.ClientType,
arg.Dimensions,
arg.InputModalities,
arg.SupportsReasoning,
arg.Type,
arg.ID,
)
@@ -660,6 +675,7 @@ func (q *Queries) UpdateModel(ctx context.Context, arg UpdateModelParams) (Model
&i.ClientType,
&i.Dimensions,
&i.InputModalities,
&i.SupportsReasoning,
&i.Type,
&i.CreatedAt,
&i.UpdatedAt,
@@ -676,10 +692,11 @@ SET
client_type = $4,
dimensions = $5,
input_modalities = $6,
type = $7,
supports_reasoning = $7,
type = $8,
updated_at = now()
WHERE model_id = $8
RETURNING id, model_id, name, llm_provider_id, client_type, dimensions, input_modalities, type, created_at, updated_at
WHERE model_id = $9
RETURNING id, model_id, name, llm_provider_id, client_type, dimensions, input_modalities, supports_reasoning, type, created_at, updated_at
`
type UpdateModelByModelIDParams struct {
@@ -689,6 +706,7 @@ type UpdateModelByModelIDParams struct {
ClientType pgtype.Text `json:"client_type"`
Dimensions pgtype.Int4 `json:"dimensions"`
InputModalities []string `json:"input_modalities"`
SupportsReasoning bool `json:"supports_reasoning"`
Type string `json:"type"`
ModelID string `json:"model_id"`
}
@@ -701,6 +719,7 @@ func (q *Queries) UpdateModelByModelID(ctx context.Context, arg UpdateModelByMod
arg.ClientType,
arg.Dimensions,
arg.InputModalities,
arg.SupportsReasoning,
arg.Type,
arg.ModelID,
)
@@ -713,6 +732,7 @@ func (q *Queries) UpdateModelByModelID(ctx context.Context, arg UpdateModelByMod
&i.ClientType,
&i.Dimensions,
&i.InputModalities,
&i.SupportsReasoning,
&i.Type,
&i.CreatedAt,
&i.UpdatedAt,
+26 -6
View File
@@ -18,6 +18,8 @@ SET max_context_load_time = 1440,
max_inbox_items = 50,
language = 'auto',
allow_guest = false,
reasoning_enabled = false,
reasoning_effort = 'medium',
chat_model_id = NULL,
memory_model_id = NULL,
embedding_model_id = NULL,
@@ -39,6 +41,8 @@ SELECT
bots.max_inbox_items,
bots.language,
bots.allow_guest,
bots.reasoning_enabled,
bots.reasoning_effort,
chat_models.id AS chat_model_id,
memory_models.id AS memory_model_id,
embedding_models.id AS embedding_model_id,
@@ -58,6 +62,8 @@ type GetSettingsByBotIDRow struct {
MaxInboxItems int32 `json:"max_inbox_items"`
Language string `json:"language"`
AllowGuest bool `json:"allow_guest"`
ReasoningEnabled bool `json:"reasoning_enabled"`
ReasoningEffort string `json:"reasoning_effort"`
ChatModelID pgtype.UUID `json:"chat_model_id"`
MemoryModelID pgtype.UUID `json:"memory_model_id"`
EmbeddingModelID pgtype.UUID `json:"embedding_model_id"`
@@ -74,6 +80,8 @@ func (q *Queries) GetSettingsByBotID(ctx context.Context, id pgtype.UUID) (GetSe
&i.MaxInboxItems,
&i.Language,
&i.AllowGuest,
&i.ReasoningEnabled,
&i.ReasoningEffort,
&i.ChatModelID,
&i.MemoryModelID,
&i.EmbeddingModelID,
@@ -90,13 +98,15 @@ WITH updated AS (
max_inbox_items = $3,
language = $4,
allow_guest = $5,
chat_model_id = COALESCE($6::uuid, bots.chat_model_id),
memory_model_id = COALESCE($7::uuid, bots.memory_model_id),
embedding_model_id = COALESCE($8::uuid, bots.embedding_model_id),
search_provider_id = COALESCE($9::uuid, bots.search_provider_id),
reasoning_enabled = $6,
reasoning_effort = $7,
chat_model_id = COALESCE($8::uuid, bots.chat_model_id),
memory_model_id = COALESCE($9::uuid, bots.memory_model_id),
embedding_model_id = COALESCE($10::uuid, bots.embedding_model_id),
search_provider_id = COALESCE($11::uuid, bots.search_provider_id),
updated_at = now()
WHERE bots.id = $10
RETURNING bots.id, bots.max_context_load_time, bots.max_context_tokens, bots.max_inbox_items, bots.language, bots.allow_guest, bots.chat_model_id, bots.memory_model_id, bots.embedding_model_id, bots.search_provider_id
WHERE bots.id = $12
RETURNING bots.id, bots.max_context_load_time, bots.max_context_tokens, bots.max_inbox_items, bots.language, bots.allow_guest, bots.reasoning_enabled, bots.reasoning_effort, bots.chat_model_id, bots.memory_model_id, bots.embedding_model_id, bots.search_provider_id
)
SELECT
updated.id AS bot_id,
@@ -105,6 +115,8 @@ SELECT
updated.max_inbox_items,
updated.language,
updated.allow_guest,
updated.reasoning_enabled,
updated.reasoning_effort,
chat_models.id AS chat_model_id,
memory_models.id AS memory_model_id,
embedding_models.id AS embedding_model_id,
@@ -122,6 +134,8 @@ type UpsertBotSettingsParams struct {
MaxInboxItems int32 `json:"max_inbox_items"`
Language string `json:"language"`
AllowGuest bool `json:"allow_guest"`
ReasoningEnabled bool `json:"reasoning_enabled"`
ReasoningEffort string `json:"reasoning_effort"`
ChatModelID pgtype.UUID `json:"chat_model_id"`
MemoryModelID pgtype.UUID `json:"memory_model_id"`
EmbeddingModelID pgtype.UUID `json:"embedding_model_id"`
@@ -136,6 +150,8 @@ type UpsertBotSettingsRow struct {
MaxInboxItems int32 `json:"max_inbox_items"`
Language string `json:"language"`
AllowGuest bool `json:"allow_guest"`
ReasoningEnabled bool `json:"reasoning_enabled"`
ReasoningEffort string `json:"reasoning_effort"`
ChatModelID pgtype.UUID `json:"chat_model_id"`
MemoryModelID pgtype.UUID `json:"memory_model_id"`
EmbeddingModelID pgtype.UUID `json:"embedding_model_id"`
@@ -149,6 +165,8 @@ func (q *Queries) UpsertBotSettings(ctx context.Context, arg UpsertBotSettingsPa
arg.MaxInboxItems,
arg.Language,
arg.AllowGuest,
arg.ReasoningEnabled,
arg.ReasoningEffort,
arg.ChatModelID,
arg.MemoryModelID,
arg.EmbeddingModelID,
@@ -163,6 +181,8 @@ func (q *Queries) UpsertBotSettings(ctx context.Context, arg UpsertBotSettingsPa
&i.MaxInboxItems,
&i.Language,
&i.AllowGuest,
&i.ReasoningEnabled,
&i.ReasoningEffort,
&i.ChatModelID,
&i.MemoryModelID,
&i.EmbeddingModelID,
+4
View File
@@ -52,6 +52,7 @@ func (s *Service) Create(ctx context.Context, req AddRequest) (AddResponse, erro
ModelID: model.ModelID,
LlmProviderID: llmProviderID,
InputModalities: inputMod,
SupportsReasoning: model.SupportsReasoning,
Type: string(model.Type),
}
if model.ClientType != "" {
@@ -217,6 +218,7 @@ func (s *Service) UpdateByID(ctx context.Context, id string, req UpdateRequest)
ID: uuid,
ModelID: model.ModelID,
InputModalities: inputMod,
SupportsReasoning: model.SupportsReasoning,
Type: string(model.Type),
}
if model.ClientType != "" {
@@ -270,6 +272,7 @@ func (s *Service) UpdateByModelID(ctx context.Context, modelID string, req Updat
params := sqlc.UpdateModelParams{
ID: current.ID,
InputModalities: inputMod,
SupportsReasoning: model.SupportsReasoning,
Type: string(model.Type),
}
if model.ClientType != "" {
@@ -364,6 +367,7 @@ func convertToGetResponse(dbModel sqlc.Model) GetResponse {
ModelID: dbModel.ModelID,
Model: Model{
ModelID: dbModel.ModelID,
SupportsReasoning: dbModel.SupportsReasoning,
Type: ModelType(dbModel.Type),
},
}
+1
View File
@@ -37,6 +37,7 @@ type Model struct {
LlmProviderID string `json:"llm_provider_id"`
ClientType ClientType `json:"client_type,omitempty"`
InputModalities []string `json:"input_modalities,omitempty"`
SupportsReasoning bool `json:"supports_reasoning"`
Type ModelType `json:"type"`
Dimensions int `json:"dimensions"`
}
+31 -3
View File
@@ -57,7 +57,7 @@ func (s *Service) UpsertBot(ctx context.Context, botID string, req UpsertRequest
}
isPersonalBot := strings.EqualFold(strings.TrimSpace(botRow.Type), "personal")
current := normalizeBotSetting(botRow.MaxContextLoadTime, botRow.MaxContextTokens, botRow.MaxInboxItems, botRow.Language, botRow.AllowGuest)
current := normalizeBotSetting(botRow.MaxContextLoadTime, botRow.MaxContextTokens, botRow.MaxInboxItems, botRow.Language, botRow.AllowGuest, botRow.ReasoningEnabled, botRow.ReasoningEffort)
if req.MaxContextLoadTime != nil && *req.MaxContextLoadTime > 0 {
current.MaxContextLoadTime = *req.MaxContextLoadTime
}
@@ -78,6 +78,12 @@ func (s *Service) UpsertBot(ctx context.Context, botID string, req UpsertRequest
} else if req.AllowGuest != nil {
current.AllowGuest = *req.AllowGuest
}
if req.ReasoningEnabled != nil {
current.ReasoningEnabled = *req.ReasoningEnabled
}
if req.ReasoningEffort != nil && isValidReasoningEffort(*req.ReasoningEffort) {
current.ReasoningEffort = *req.ReasoningEffort
}
chatModelUUID := pgtype.UUID{}
if value := strings.TrimSpace(req.ChatModelID); value != "" {
@@ -119,6 +125,8 @@ func (s *Service) UpsertBot(ctx context.Context, botID string, req UpsertRequest
MaxInboxItems: int32(current.MaxInboxItems),
Language: current.Language,
AllowGuest: current.AllowGuest,
ReasoningEnabled: current.ReasoningEnabled,
ReasoningEffort: current.ReasoningEffort,
ChatModelID: chatModelUUID,
MemoryModelID: memoryModelUUID,
EmbeddingModelID: embeddingModelUUID,
@@ -141,13 +149,15 @@ func (s *Service) Delete(ctx context.Context, botID string) error {
return s.queries.DeleteSettingsByBotID(ctx, pgID)
}
func normalizeBotSetting(maxContextLoadTime int32, maxContextTokens int32, maxInboxItems int32, language string, allowGuest bool) Settings {
func normalizeBotSetting(maxContextLoadTime int32, maxContextTokens int32, maxInboxItems int32, language string, allowGuest bool, reasoningEnabled bool, reasoningEffort string) Settings {
settings := Settings{
MaxContextLoadTime: int(maxContextLoadTime),
MaxContextTokens: int(maxContextTokens),
MaxInboxItems: int(maxInboxItems),
Language: strings.TrimSpace(language),
AllowGuest: allowGuest,
ReasoningEnabled: reasoningEnabled,
ReasoningEffort: strings.TrimSpace(reasoningEffort),
}
if settings.MaxContextLoadTime <= 0 {
settings.MaxContextLoadTime = DefaultMaxContextLoadTime
@@ -161,9 +171,21 @@ func normalizeBotSetting(maxContextLoadTime int32, maxContextTokens int32, maxIn
if settings.Language == "" {
settings.Language = DefaultLanguage
}
if !isValidReasoningEffort(settings.ReasoningEffort) {
settings.ReasoningEffort = DefaultReasoningEffort
}
return settings
}
func isValidReasoningEffort(effort string) bool {
switch effort {
case "low", "medium", "high":
return true
default:
return false
}
}
func normalizeBotSettingsReadRow(row sqlc.GetSettingsByBotIDRow) Settings {
return normalizeBotSettingsFields(
row.MaxContextLoadTime,
@@ -171,6 +193,8 @@ func normalizeBotSettingsReadRow(row sqlc.GetSettingsByBotIDRow) Settings {
row.MaxInboxItems,
row.Language,
row.AllowGuest,
row.ReasoningEnabled,
row.ReasoningEffort,
row.ChatModelID,
row.MemoryModelID,
row.EmbeddingModelID,
@@ -185,6 +209,8 @@ func normalizeBotSettingsWriteRow(row sqlc.UpsertBotSettingsRow) Settings {
row.MaxInboxItems,
row.Language,
row.AllowGuest,
row.ReasoningEnabled,
row.ReasoningEffort,
row.ChatModelID,
row.MemoryModelID,
row.EmbeddingModelID,
@@ -198,12 +224,14 @@ func normalizeBotSettingsFields(
maxInboxItems int32,
language string,
allowGuest bool,
reasoningEnabled bool,
reasoningEffort string,
chatModelID pgtype.UUID,
memoryModelID pgtype.UUID,
embeddingModelID pgtype.UUID,
searchProviderID pgtype.UUID,
) Settings {
settings := normalizeBotSetting(maxContextLoadTime, maxContextTokens, maxInboxItems, language, allowGuest)
settings := normalizeBotSetting(maxContextLoadTime, maxContextTokens, maxInboxItems, language, allowGuest, reasoningEnabled, reasoningEffort)
if chatModelID.Valid {
settings.ChatModelID = uuid.UUID(chatModelID.Bytes).String()
}
+5
View File
@@ -4,6 +4,7 @@ const (
DefaultMaxContextLoadTime = 24 * 60
DefaultMaxInboxItems = 50
DefaultLanguage = "auto"
DefaultReasoningEffort = "medium"
)
type Settings struct {
@@ -16,6 +17,8 @@ type Settings struct {
MaxInboxItems int `json:"max_inbox_items"`
Language string `json:"language"`
AllowGuest bool `json:"allow_guest"`
ReasoningEnabled bool `json:"reasoning_enabled"`
ReasoningEffort string `json:"reasoning_effort"`
}
type UpsertRequest struct {
@@ -28,4 +31,6 @@ type UpsertRequest struct {
MaxInboxItems *int `json:"max_inbox_items,omitempty"`
Language string `json:"language,omitempty"`
AllowGuest *bool `json:"allow_guest,omitempty"`
ReasoningEnabled *bool `json:"reasoning_enabled,omitempty"`
ReasoningEffort *string `json:"reasoning_effort,omitempty"`
}
+26 -1
View File
@@ -17,7 +17,7 @@ import {
MCPConnection,
Schedule,
} from './types'
import { ModelInput, hasInputModality } from './types/model'
import { ClientType, ModelConfig, ModelInput, hasInputModality } from './types/model'
import { system, schedule, subagentSystem } from './prompts'
import { AuthFetcher } from './types'
import { createModel } from './model'
@@ -33,6 +33,25 @@ import { getTools } from './tools'
import { buildIdentityHeaders } from './utils/headers'
import { createFS } from './utils'
const ANTHROPIC_BUDGET: Record<string, number> = { low: 5000, medium: 16000, high: 50000 }
const GOOGLE_BUDGET: Record<string, number> = { low: 5000, medium: 16000, high: 50000 }
const buildProviderOptions = (config: ModelConfig): Record<string, Record<string, unknown>> | undefined => {
if (!config.reasoning?.enabled) return undefined
const effort = config.reasoning.effort ?? 'medium'
switch (config.clientType) {
case ClientType.AnthropicMessages:
return { anthropic: { thinking: { type: 'enabled' as const, budgetTokens: ANTHROPIC_BUDGET[effort] } } }
case ClientType.OpenAIResponses:
case ClientType.OpenAICompletions:
return { openai: { reasoningEffort: effort } }
case ClientType.GoogleGenerativeAI:
return { google: { thinkingConfig: { thinkingBudget: GOOGLE_BUDGET[effort] } } }
default:
return undefined
}
}
const buildStepUsages = (
steps: { usage: LanguageModelUsage; response: { messages: unknown[] } }[],
): (LanguageModelUsage | null)[] => {
@@ -77,6 +96,8 @@ export const createAgent = (
fetch: AuthFetcher,
) => {
const model = createModel(modelConfig)
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const providerOptions = buildProviderOptions(modelConfig) as any
const enabledSkills: AgentSkill[] = []
const fs = createFS({ fetch, botId: identity.botId })
@@ -181,6 +202,7 @@ export const createAgent = (
model,
messages,
system: systemPrompt,
...(providerOptions && { providerOptions }),
stopWhen: stepCountIs(Infinity),
prepareStep: () => {
return {
@@ -238,6 +260,7 @@ export const createAgent = (
model,
messages,
system: generateSubagentSystemPrompt(),
...(providerOptions && { providerOptions }),
stopWhen: stepCountIs(Infinity),
prepareStep: () => {
return {
@@ -281,6 +304,7 @@ export const createAgent = (
model,
messages,
system: await generateSystemPrompt(),
...(providerOptions && { providerOptions }),
stopWhen: stepCountIs(Infinity),
onFinish: async () => {
await close()
@@ -343,6 +367,7 @@ export const createAgent = (
model,
messages,
system: systemPrompt,
...(providerOptions && { providerOptions }),
stopWhen: stepCountIs(Infinity),
prepareStep: () => {
return {
+8
View File
@@ -13,12 +13,20 @@ export enum ModelInput {
File = 'file',
}
export type ReasoningEffort = 'low' | 'medium' | 'high'
export interface ReasoningConfig {
enabled: boolean
effort: ReasoningEffort
}
export interface ModelConfig {
apiKey: string
baseUrl: string
modelId: string
clientType: ClientType
input: ModelInput[]
reasoning?: ReasoningConfig
}
export const hasInputModality = (config: ModelConfig, modality: ModelInput): boolean =>
+7
View File
@@ -778,6 +778,7 @@ export type ModelsAddRequest = {
llm_provider_id?: string;
model_id?: string;
name?: string;
supports_reasoning?: boolean;
type?: ModelsModelType;
};
@@ -800,6 +801,7 @@ export type ModelsGetResponse = {
llm_provider_id?: string;
model_id?: string;
name?: string;
supports_reasoning?: boolean;
type?: ModelsModelType;
};
@@ -812,6 +814,7 @@ export type ModelsUpdateRequest = {
llm_provider_id?: string;
model_id?: string;
name?: string;
supports_reasoning?: boolean;
type?: ModelsModelType;
};
@@ -970,6 +973,8 @@ export type SettingsSettings = {
max_context_tokens?: number;
max_inbox_items?: number;
memory_model_id?: string;
reasoning_effort?: string;
reasoning_enabled?: boolean;
search_provider_id?: string;
};
@@ -982,6 +987,8 @@ export type SettingsUpsertRequest = {
max_context_tokens?: number;
max_inbox_items?: number;
memory_model_id?: string;
reasoning_effort?: string;
reasoning_enabled?: boolean;
search_provider_id?: string;
};
@@ -161,6 +161,18 @@
</label>
</div>
</div>
<!-- Supports Reasoning (chat only) -->
<div
v-if="selectedType === 'chat'"
class="flex items-center justify-between"
>
<Label>{{ $t('models.supportsReasoning') }}</Label>
<Switch
:model-value="supportsReasoning"
@update:model-value="(val) => supportsReasoning = !!val"
/>
</div>
</div>
</template>
</FormDialogShell>
@@ -182,6 +194,7 @@ import {
FormItem,
Checkbox,
Label,
Switch,
} from '@memoh/ui'
import { useForm } from 'vee-validate'
import { inject, computed, watch, nextTick, type Ref, ref } from 'vue'
@@ -199,6 +212,7 @@ import { useDialogMutation } from '@/composables/useDialogMutation'
const availableInputModalities = ['text', 'image', 'audio', 'video', 'file'] as const
const selectedModalities = ref<string[]>(['text'])
const supportsReasoning = ref(false)
const { t } = useI18n()
const { run } = useDialogMutation()
@@ -340,6 +354,7 @@ async function addModel(e: Event) {
if (type === 'chat') {
payload.input_modalities = selectedModalities.value.length > 0 ? selectedModalities.value : ['text']
payload.supports_reasoning = supportsReasoning.value
}
await run(
@@ -375,10 +390,12 @@ watch(open, async () => {
const { client_type, type, model_id, name, dimensions, input_modalities } = editInfo.value
form.resetForm({ values: { type: type || 'chat', client_type: client_type || '', model_id, name, dimensions } })
selectedModalities.value = input_modalities ?? ['text']
supportsReasoning.value = !!editInfo.value.supports_reasoning
userEditedName.value = !!(name && name !== model_id)
} else {
form.resetForm({ values: { type: 'chat', client_type: '', model_id: '', name: '', dimensions: undefined } })
selectedModalities.value = ['text']
supportsReasoning.value = false
userEditedName.value = false
}
}, {
@@ -109,6 +109,7 @@ export function extractTextFromContent(content: unknown): string {
if (!part || typeof part !== 'object') return ''
const value = part as Record<string, unknown>
const partType = String(value.type ?? '').toLowerCase()
if (partType === 'reasoning') return ''
if (partType === 'text' && typeof value.text === 'string') return value.text.trim()
if (partType === 'link' && typeof value.url === 'string') return value.url.trim()
if (partType === 'emoji' && typeof value.emoji === 'string') return value.emoji.trim()
@@ -127,3 +128,44 @@ export function extractTextFromContent(content: unknown): string {
return ''
}
export function extractMessageReasoning(message: Message): string[] {
const raw = message.content
if (!raw) return []
if (typeof raw === 'string') {
try {
const parsed = JSON.parse(raw)
return extractReasoningParts(parsed?.content ?? parsed)
} catch {
return []
}
}
if (typeof raw === 'object') {
const obj = raw as Record<string, unknown>
if ('content' in obj && obj.content !== undefined && obj.content !== null) {
return extractReasoningParts(obj.content)
}
return extractReasoningParts(raw)
}
return []
}
function extractReasoningParts(content: unknown): string[] {
if (!Array.isArray(content)) {
if (content && typeof content === 'object') {
const obj = content as Record<string, unknown>
if (Array.isArray(obj.content)) return extractReasoningParts(obj.content)
}
return []
}
return content
.filter((part) => {
if (!part || typeof part !== 'object') return false
const value = part as Record<string, unknown>
return String(value.type ?? '').toLowerCase() === 'reasoning' && typeof value.text === 'string' && value.text.trim() !== ''
})
.map((part) => ((part as Record<string, unknown>).text as string).trim())
}
+6
View File
@@ -153,6 +153,7 @@
"dimensionsPlaceholder": "e.g. 1536",
"multimodal": "Multimodal",
"inputModalities": "Input Modalities",
"supportsReasoning": "Supports Reasoning",
"modality": {
"text": "Text",
"image": "Image",
@@ -367,6 +368,11 @@
"maxContextLoadTime": "Max Context Load Time",
"maxContextTokens": "Max Context Tokens",
"language": "Language",
"reasoningEnabled": "Enable Reasoning",
"reasoningEffort": "Reasoning Effort",
"reasoningEffortLow": "Low",
"reasoningEffortMedium": "Medium",
"reasoningEffortHigh": "High",
"allowGuest": "Allow Guest Access",
"allowGuestPersonalHint": "Personal bots do not support guest access. Use a public bot instead.",
"searchModel": "Search models…",
+6
View File
@@ -149,6 +149,7 @@
"dimensionsPlaceholder": "例如 1536",
"multimodal": "支持多模态",
"inputModalities": "输入模态",
"supportsReasoning": "支持推理",
"modality": {
"text": "文本",
"image": "图片",
@@ -363,6 +364,11 @@
"maxContextLoadTime": "最大上下文加载时间",
"maxContextTokens": "最大上下文Token数",
"language": "语言",
"reasoningEnabled": "启用推理",
"reasoningEffort": "推理等级",
"reasoningEffortLow": "低",
"reasoningEffortMedium": "中",
"reasoningEffortHigh": "高",
"allowGuest": "允许游客访问",
"allowGuestPersonalHint": "个人 Bot 不支持游客访问,请使用公开 Bot。",
"searchModel": "搜索模型…",
@@ -81,6 +81,47 @@
/>
</div>
<!-- Reasoning (only if chat model supports it) -->
<template v-if="chatModelSupportsReasoning">
<Separator />
<div class="space-y-4">
<div class="flex items-center justify-between">
<Label>{{ $t('bots.settings.reasoningEnabled') }}</Label>
<Switch
:model-value="form.reasoning_enabled"
@update:model-value="(val) => form.reasoning_enabled = !!val"
/>
</div>
<div
v-if="form.reasoning_enabled"
class="space-y-2"
>
<Label>{{ $t('bots.settings.reasoningEffort') }}</Label>
<Select
:model-value="form.reasoning_effort"
@update:model-value="(val) => form.reasoning_effort = val ?? 'medium'"
>
<SelectTrigger>
<SelectValue />
</SelectTrigger>
<SelectContent>
<SelectGroup>
<SelectItem value="low">
{{ $t('bots.settings.reasoningEffortLow') }}
</SelectItem>
<SelectItem value="medium">
{{ $t('bots.settings.reasoningEffortMedium') }}
</SelectItem>
<SelectItem value="high">
{{ $t('bots.settings.reasoningEffortHigh') }}
</SelectItem>
</SelectGroup>
</SelectContent>
</Select>
</div>
</div>
</template>
<!-- Allow Guest: only for public bot -->
<template v-if="isPublicBot">
<div class="flex items-center justify-between">
@@ -149,6 +190,12 @@ import {
Button,
Separator,
Spinner,
Select,
SelectContent,
SelectGroup,
SelectItem,
SelectTrigger,
SelectValue,
} from '@memoh/ui'
import { reactive, computed, watch } from 'vue'
import { useRouter } from 'vue-router'
@@ -237,6 +284,12 @@ const models = computed(() => modelData.value ?? [])
const providers = computed(() => providerData.value ?? [])
const searchProviders = computed(() => searchProviderData.value ?? [])
const chatModelSupportsReasoning = computed(() => {
if (!form.chat_model_id) return false
const m = models.value.find((m) => m.id === form.chat_model_id)
return !!m?.supports_reasoning
})
// ---- Form ----
const form = reactive<SettingsSettings>({
chat_model_id: '',
@@ -247,9 +300,10 @@ const form = reactive<SettingsSettings>({
max_context_tokens: 0,
language: '',
allow_guest: false,
reasoning_enabled: false,
reasoning_effort: 'medium',
})
//
watch(settings, (val) => {
if (val) {
form.chat_model_id = val.chat_model_id ?? ''
@@ -260,6 +314,8 @@ watch(settings, (val) => {
form.max_context_tokens = val.max_context_tokens ?? 0
form.language = val.language ?? ''
form.allow_guest = val.allow_guest ?? false
form.reasoning_enabled = val.reasoning_enabled ?? false
form.reasoning_effort = val.reasoning_effort || 'medium'
}
}, { immediate: true })
@@ -274,6 +330,8 @@ const hasChanges = computed(() => {
|| form.max_context_load_time !== (s.max_context_load_time ?? 0)
|| form.max_context_tokens !== (s.max_context_tokens ?? 0)
|| form.language !== (s.language ?? '')
|| form.reasoning_enabled !== (s.reasoning_enabled ?? false)
|| form.reasoning_effort !== (s.reasoning_effort || 'medium')
if (isPublicBot.value) {
changed = changed || form.allow_guest !== (s.allow_guest ?? false)
}
+20 -3
View File
@@ -16,6 +16,7 @@ import {
extractMessageText,
extractToolCalls,
extractAllToolResults,
extractMessageReasoning,
sendLocalChannelMessage,
streamLocalChannel,
streamMessageEvents,
@@ -153,9 +154,14 @@ export const useChatStore = defineStore('chat', () => {
const text = extractMessageText(raw)
const assetBlocks = buildAssetBlocks(raw)
if (!text && assetBlocks.length === 0) return null
const reasoningTexts = extractMessageReasoning(raw)
if (!text && assetBlocks.length === 0 && reasoningTexts.length === 0) return null
const blocks: ContentBlock[] = []
for (const r of reasoningTexts) {
blocks.push({ type: 'thinking', content: r, done: true })
}
if (text) blocks.push({ type: 'text', content: text })
blocks.push(...assetBlocks)
@@ -226,6 +232,7 @@ export const useChatStore = defineStore('chat', () => {
if (raw.role === 'assistant') {
const toolCalls = extractToolCalls(raw)
const text = extractMessageText(raw)
const reasoningTexts = extractMessageReasoning(raw)
if (toolCalls.length > 0) {
if (!pendingAssistant) {
@@ -240,6 +247,9 @@ export const useChatStore = defineStore('chat', () => {
...(channelTag && { platform: channelTag }),
}
}
for (const r of reasoningTexts) {
pendingAssistant.blocks.push({ type: 'thinking', content: r, done: true })
}
if (text) {
pendingAssistant.blocks.push({ type: 'text', content: text })
}
@@ -258,8 +268,10 @@ export const useChatStore = defineStore('chat', () => {
continue
}
// Assistant message without tool_calls
if (pendingAssistant && text) {
for (const r of reasoningTexts) {
pendingAssistant.blocks.push({ type: 'thinking', content: r, done: true })
}
pendingAssistant.blocks.push({ type: 'text', content: text })
flushPending()
continue
@@ -481,7 +493,12 @@ export const useChatStore = defineStore('chat', () => {
break
case 'reasoning_end':
if (session.thinkingBlockIdx >= 0 && session.assistantMsg.blocks[session.thinkingBlockIdx]?.type === 'thinking') {
;(session.assistantMsg.blocks[session.thinkingBlockIdx] as ThinkingBlock).done = true
const tb = session.assistantMsg.blocks[session.thinkingBlockIdx] as ThinkingBlock
if (tb.content.trim() === '') {
session.assistantMsg.blocks.splice(session.thinkingBlockIdx, 1)
} else {
tb.done = true
}
}
session.thinkingBlockIdx = -1
break
+21
View File
@@ -7728,6 +7728,9 @@ const docTemplate = `{
"name": {
"type": "string"
},
"supports_reasoning": {
"type": "boolean"
},
"type": {
"$ref": "#/definitions/models.ModelType"
}
@@ -7794,6 +7797,9 @@ const docTemplate = `{
"name": {
"type": "string"
},
"supports_reasoning": {
"type": "boolean"
},
"type": {
"$ref": "#/definitions/models.ModelType"
}
@@ -7834,6 +7840,9 @@ const docTemplate = `{
"name": {
"type": "string"
},
"supports_reasoning": {
"type": "boolean"
},
"type": {
"$ref": "#/definitions/models.ModelType"
}
@@ -8212,6 +8221,12 @@ const docTemplate = `{
"memory_model_id": {
"type": "string"
},
"reasoning_effort": {
"type": "string"
},
"reasoning_enabled": {
"type": "boolean"
},
"search_provider_id": {
"type": "string"
}
@@ -8244,6 +8259,12 @@ const docTemplate = `{
"memory_model_id": {
"type": "string"
},
"reasoning_effort": {
"type": "string"
},
"reasoning_enabled": {
"type": "boolean"
},
"search_provider_id": {
"type": "string"
}
+21
View File
@@ -7719,6 +7719,9 @@
"name": {
"type": "string"
},
"supports_reasoning": {
"type": "boolean"
},
"type": {
"$ref": "#/definitions/models.ModelType"
}
@@ -7785,6 +7788,9 @@
"name": {
"type": "string"
},
"supports_reasoning": {
"type": "boolean"
},
"type": {
"$ref": "#/definitions/models.ModelType"
}
@@ -7825,6 +7831,9 @@
"name": {
"type": "string"
},
"supports_reasoning": {
"type": "boolean"
},
"type": {
"$ref": "#/definitions/models.ModelType"
}
@@ -8203,6 +8212,12 @@
"memory_model_id": {
"type": "string"
},
"reasoning_effort": {
"type": "string"
},
"reasoning_enabled": {
"type": "boolean"
},
"search_provider_id": {
"type": "string"
}
@@ -8235,6 +8250,12 @@
"memory_model_id": {
"type": "string"
},
"reasoning_effort": {
"type": "string"
},
"reasoning_enabled": {
"type": "boolean"
},
"search_provider_id": {
"type": "string"
}
+14
View File
@@ -1265,6 +1265,8 @@ definitions:
type: string
name:
type: string
supports_reasoning:
type: boolean
type:
$ref: '#/definitions/models.ModelType'
type: object
@@ -1310,6 +1312,8 @@ definitions:
type: string
name:
type: string
supports_reasoning:
type: boolean
type:
$ref: '#/definitions/models.ModelType'
type: object
@@ -1337,6 +1341,8 @@ definitions:
type: string
name:
type: string
supports_reasoning:
type: boolean
type:
$ref: '#/definitions/models.ModelType'
type: object
@@ -1588,6 +1594,10 @@ definitions:
type: integer
memory_model_id:
type: string
reasoning_effort:
type: string
reasoning_enabled:
type: boolean
search_provider_id:
type: string
type: object
@@ -1609,6 +1619,10 @@ definitions:
type: integer
memory_model_id:
type: string
reasoning_effort:
type: string
reasoning_enabled:
type: boolean
search_provider_id:
type: string
type: object