feat: add per-message model and reasoning effort override

Allow users to select a different model and reasoning effort level
directly from the chat input toolbar, overriding the bot defaults
on a per-message basis. The backend accepts optional model_id and
reasoning_effort parameters via both WebSocket and HTTP APIs, with
request-level values taking priority over bot/session settings.

- Backend: extend wsClientMessage and LocalChannelMessageRequest with
  model_id/reasoning_effort fields; add ReasoningEffort to ChatRequest;
  update resolver to prioritize request-level reasoning effort
- Frontend: add ModelOptions and ReasoningEffortSelect shared components;
  refactor model-select to reuse ModelOptions; add model/reasoning
  selectors to chat input toolbar; initialize from bot settings
- Regenerate swagger spec and TypeScript SDK
This commit is contained in:
Acbox
2026-03-29 19:45:55 +08:00
parent 86d83108d9
commit 33f39c20ff
19 changed files with 593 additions and 150 deletions
@@ -28,10 +28,16 @@ export async function fetchMessages(
return (data as unknown as { items?: Message[] })?.items ?? []
}
export interface SendMessageOverrides {
modelId?: string
reasoningEffort?: string
}
export async function sendLocalChannelMessage(
botId: string,
text: string,
attachments?: ChatAttachment[],
overrides?: SendMessageOverrides,
): Promise<void> {
const msg: ChannelMessage = {}
const trimmedText = text.trim()
@@ -46,9 +52,12 @@ export async function sendLocalChannelMessage(
name: item.name ?? '',
}))
}
const body: Record<string, unknown> = { message: msg }
if (overrides?.modelId) body.model_id = overrides.modelId
if (overrides?.reasoningEffort) body.reasoning_effort = overrides.reasoningEffort
await postBotsByBotIdWebMessages({
path: { bot_id: botId },
body: { message: msg },
body: body as { message: ChannelMessage; model_id?: string; reasoning_effort?: string },
throwOnError: true,
})
}
@@ -6,6 +6,8 @@ export interface WSClientMessage {
text?: string
session_id?: string
attachments?: ChatAttachment[]
model_id?: string
reasoning_effort?: string
}
export interface ChatWebSocket {
+9
View File
@@ -166,6 +166,15 @@
"toolSpawnCount": "{count} tasks",
"unknownUser": "{platform} User",
"files": "Files",
"modelOverride": "Model",
"modelDefault": "Default",
"reasoningEffort": "Reasoning",
"reasoningOff": "Off",
"reasoningNone": "None",
"reasoningLow": "Low",
"reasoningMedium": "Medium",
"reasoningHigh": "High",
"reasoningXHigh": "X-High",
"sessions": "Sessions",
"newSession": "New Session",
"deleteSession": "Delete Session",
+9
View File
@@ -162,6 +162,15 @@
"toolSpawnCount": "{count} 个任务",
"unknownUser": "{platform}用户",
"files": "文件管理",
"modelOverride": "模型",
"modelDefault": "默认",
"reasoningEffort": "推理",
"reasoningOff": "关闭",
"reasoningNone": "无",
"reasoningLow": "低",
"reasoningMedium": "中",
"reasoningHigh": "高",
"reasoningXHigh": "极高",
"sessions": "会话",
"newSession": "新建会话",
"deleteSession": "删除会话",
@@ -209,67 +209,40 @@
/>
</div>
<!-- Reasoning (only if chat model supports it) -->
<template v-if="chatModelSupportsReasoning">
<!-- Reasoning -->
<Separator />
<div class="space-y-4">
<div class="flex items-center justify-between">
<Label>{{ $t('bots.settings.reasoningEnabled') }}</Label>
<Switch
:model-value="form.reasoning_enabled"
@update:model-value="(val) => form.reasoning_enabled = !!val"
/>
</div>
<div
v-if="form.reasoning_enabled"
class="space-y-2"
>
<div class="space-y-2">
<Label>{{ $t('bots.settings.reasoningEffort') }}</Label>
<Select
:model-value="form.reasoning_effort"
@update:model-value="(val) => form.reasoning_effort = val ?? 'medium'"
<Popover v-model:open="reasoningPopoverOpen">
<PopoverTrigger as-child>
<Button
variant="outline"
role="combobox"
:disabled="!chatModelSupportsReasoning"
class="w-full justify-between font-normal"
>
<SelectTrigger>
<SelectValue />
</SelectTrigger>
<SelectContent>
<SelectGroup>
<SelectItem
v-if="availableReasoningEfforts.includes('none')"
value="none"
<span class="flex items-center gap-2">
<Lightbulb
class="size-3.5"
:style="{ opacity: EFFORT_OPACITY[reasoningFormValue] ?? 0.5 }"
/>
{{ reasoningFormValue === 'off' ? $t('chat.reasoningOff') : $t(EFFORT_LABELS[reasoningFormValue] ?? reasoningFormValue) }}
</span>
<ChevronDown class="size-3.5 shrink-0 text-muted-foreground" />
</Button>
</PopoverTrigger>
<PopoverContent
class="w-[--reka-popover-trigger-width] p-0"
align="start"
>
{{ $t('bots.settings.reasoningEffortNone') }}
</SelectItem>
<SelectItem
v-if="availableReasoningEfforts.includes('low')"
value="low"
>
{{ $t('bots.settings.reasoningEffortLow') }}
</SelectItem>
<SelectItem
v-if="availableReasoningEfforts.includes('medium')"
value="medium"
>
{{ $t('bots.settings.reasoningEffortMedium') }}
</SelectItem>
<SelectItem
v-if="availableReasoningEfforts.includes('high')"
value="high"
>
{{ $t('bots.settings.reasoningEffortHigh') }}
</SelectItem>
<SelectItem
v-if="availableReasoningEfforts.includes('xhigh')"
value="xhigh"
>
{{ $t('bots.settings.reasoningEffortXHigh') }}
</SelectItem>
</SelectGroup>
</SelectContent>
</Select>
<ReasoningEffortSelect
v-model="reasoningFormValue"
:efforts="availableReasoningEfforts"
@update:model-value="reasoningPopoverOpen = false"
/>
</PopoverContent>
</Popover>
</div>
</div>
</template>
<!-- Save -->
<div class="flex justify-end">
@@ -321,23 +294,22 @@
import {
Label,
Input,
Switch,
Button,
Separator,
Spinner,
Select,
SelectContent,
SelectGroup,
SelectItem,
SelectTrigger,
SelectValue,
Popover,
PopoverTrigger,
PopoverContent,
} from '@memohai/ui'
import { reactive, computed, watch } from 'vue'
import { Lightbulb, ChevronDown } from 'lucide-vue-next'
import { reactive, computed, ref, watch } from 'vue'
import { useRouter } from 'vue-router'
import { toast } from 'vue-sonner'
import { useI18n } from 'vue-i18n'
import ConfirmPopover from '@/components/confirm-popover/index.vue'
import ModelSelect from './model-select.vue'
import ReasoningEffortSelect from './reasoning-effort-select.vue'
import { EFFORT_LABELS, EFFORT_OPACITY } from './reasoning-effort'
import SearchProviderSelect from './search-provider-select.vue'
import MemoryProviderSelect from './memory-provider-select.vue'
import TtsModelSelect from './tts-model-select.vue'
@@ -542,6 +514,20 @@ watch(availableReasoningEfforts, (efforts) => {
}
}, { immediate: true })
const reasoningPopoverOpen = ref(false)
const reasoningFormValue = computed({
get: () => form.reasoning_enabled ? form.reasoning_effort : 'off',
set: (v: string) => {
if (v === 'off') {
form.reasoning_enabled = false
} else {
form.reasoning_enabled = true
form.reasoning_effort = v
}
},
})
const { data: memoryStatusData, isLoading: isMemoryStatusLoading } = useQuery({
key: () => ['bot-memory-status', botIdRef.value, persistedMemoryProviderID.value],
query: async () => {
@@ -0,0 +1,162 @@
<template>
<div class="flex items-center border-b px-3">
<Search
class="mr-2 size-3.5 shrink-0 text-muted-foreground"
/>
<input
v-model="searchTerm"
:placeholder="$t('bots.settings.searchModel')"
aria-label="Search models"
class="flex h-10 w-full bg-transparent py-3 text-xs outline-none placeholder:text-muted-foreground"
>
</div>
<div
class="max-h-64 overflow-y-auto"
role="listbox"
>
<div
v-if="filteredGroups.length === 0"
class="py-6 text-center text-xs text-muted-foreground"
>
{{ $t('bots.settings.noModel') }}
</div>
<div
v-for="group in filteredGroups"
:key="group.key"
class="p-1"
>
<div
v-if="group.label"
class="px-2 py-1.5 text-xs font-medium text-muted-foreground"
>
{{ group.label }}
</div>
<button
v-for="option in group.items"
:key="option.value"
type="button"
role="option"
:aria-selected="modelValue === option.value"
class="relative flex w-full cursor-pointer items-center gap-2 rounded-md px-2 py-1.5 text-xs outline-none hover:bg-accent hover:text-accent-foreground"
:class="{ 'bg-accent': modelValue === option.value }"
@click="$emit('update:modelValue', option.value)"
>
<Check
v-if="modelValue === option.value"
class="size-3.5 shrink-0"
/>
<span
v-else
class="size-3.5 shrink-0"
/>
<span class="truncate">{{ option.label }}</span>
<span class="ml-auto flex items-center gap-1.5">
<ModelCapabilities
v-if="option.compatibilities?.length"
:compatibilities="option.compatibilities"
/>
<ContextWindowBadge :context-window="option.contextWindow" />
<span
v-if="option.description"
class="text-xs text-muted-foreground"
>
{{ option.description }}
</span>
</span>
</button>
</div>
</div>
</template>
<script setup lang="ts">
import { computed, ref, watch } from 'vue'
import { Search, Check } from 'lucide-vue-next'
import type { ModelsGetResponse, ProvidersGetResponse } from '@memohai/sdk'
import ModelCapabilities from '@/components/model-capabilities/index.vue'
import ContextWindowBadge from '@/components/context-window-badge/index.vue'
export interface ModelOption {
value: string
label: string
description?: string
groupKey: string
groupLabel: string
keywords: string[]
compatibilities?: string[]
contextWindow?: number
}
const props = defineProps<{
models: ModelsGetResponse[]
providers: ProvidersGetResponse[]
modelType: 'chat' | 'embedding'
open?: boolean
}>()
defineEmits<{
'update:modelValue': [value: string]
}>()
const modelValue = defineModel<string>({ default: '' })
const searchTerm = ref('')
watch(() => props.open, (v) => {
if (v) searchTerm.value = ''
})
const providerMap = computed(() => {
const map = new Map<string, string>()
for (const p of props.providers) {
if (p.id) map.set(p.id, p.name ?? p.id)
}
return map
})
const typeFilteredModels = computed(() =>
props.models.filter((m) => m.type === props.modelType),
)
const options = computed<ModelOption[]>(() =>
typeFilteredModels.value.map((model) => {
const providerId = model.llm_provider_id ?? ''
const config = model.config as { compatibilities?: string[]; context_window?: number } | undefined
return {
value: model.id || model.model_id || '',
label: model.name || model.model_id || '',
description: model.name ? model.model_id : undefined,
groupKey: providerId,
groupLabel: providerMap.value.get(providerId) ?? providerId,
keywords: [model.model_id ?? '', model.name ?? ''],
compatibilities: config?.compatibilities,
contextWindow: config?.context_window,
}
}),
)
const filteredOptions = computed(() => {
const keyword = searchTerm.value.trim().toLowerCase()
if (!keyword) return options.value
return options.value.filter((opt) => {
const terms = [opt.label, opt.description, ...opt.keywords]
.filter((t): t is string => Boolean(t))
.join(' ')
.toLowerCase()
return terms.includes(keyword)
})
})
const filteredGroups = computed(() => {
const groups = new Map<string, { key: string; label: string; items: ModelOption[] }>()
for (const opt of filteredOptions.value) {
if (!groups.has(opt.groupKey)) {
groups.set(opt.groupKey, { key: opt.groupKey, label: opt.groupLabel, items: [] })
}
groups.get(opt.groupKey)!.items.push(opt)
}
return Array.from(groups.values())
})
</script>
@@ -1,38 +1,42 @@
<template>
<SearchableSelectPopover
v-model="selected"
:options="options"
:placeholder="placeholder || ''"
<Popover v-model:open="open">
<PopoverTrigger as-child>
<Button
variant="outline"
role="combobox"
:aria-expanded="open"
:aria-label="placeholder || 'Select model'"
:search-placeholder="$t('bots.settings.searchModel')"
search-aria-label="Search models"
:empty-text="$t('bots.settings.noModel')"
class="w-full justify-between font-normal"
>
<template #option-suffix="{ option }">
<span class="ml-auto flex items-center gap-1.5">
<ModelCapabilities
v-if="optionMeta(option)?.compatibilities?.length"
:compatibilities="optionMeta(option)!.compatibilities!"
<span class="truncate">
{{ displayLabel || placeholder }}
</span>
<Search
class="ml-2 size-3.5 shrink-0 text-muted-foreground"
/>
<ContextWindowBadge :context-window="optionMeta(option)?.context_window" />
<span
v-if="option.description"
class="text-xs text-muted-foreground"
</Button>
</PopoverTrigger>
<PopoverContent
class="w-[--reka-popover-trigger-width] p-0"
align="start"
>
{{ option.description }}
</span>
</span>
</template>
</SearchableSelectPopover>
<ModelOptions
v-model="selected"
:models="models"
:providers="providers"
:model-type="modelType"
:open="open"
/>
</PopoverContent>
</Popover>
</template>
<script setup lang="ts">
import { computed } from 'vue'
import type { ModelsGetResponse, ModelsModelConfig, ProvidersGetResponse } from '@memohai/sdk'
import SearchableSelectPopover from '@/components/searchable-select-popover/index.vue'
import type { SearchableSelectOption } from '@/components/searchable-select-popover/index.vue'
import ModelCapabilities from '@/components/model-capabilities/index.vue'
import ContextWindowBadge from '@/components/context-window-badge/index.vue'
import { computed, ref, watch } from 'vue'
import { Search } from 'lucide-vue-next'
import { Popover, PopoverTrigger, PopoverContent, Button } from '@memohai/ui'
import type { ModelsGetResponse, ProvidersGetResponse } from '@memohai/sdk'
import ModelOptions from './model-options.vue'
const props = defineProps<{
models: ModelsGetResponse[]
@@ -42,35 +46,14 @@ const props = defineProps<{
}>()
const selected = defineModel<string>({ default: '' })
const open = ref(false)
const typeFilteredModels = computed(() =>
props.models.filter((m) => m.type === props.modelType),
)
const providerMap = computed(() => {
const map = new Map<string, string>()
for (const p of props.providers) {
map.set(p.id, p.name ?? p.id)
}
return map
watch(selected, () => {
open.value = false
})
function optionMeta(option: SearchableSelectOption): ModelsModelConfig | undefined {
return option.meta as ModelsModelConfig | undefined
}
const options = computed<SearchableSelectOption[]>(() =>
typeFilteredModels.value.map((model) => {
const providerId = model.llm_provider_id
return {
value: model.id || model.model_id,
label: model.name || model.model_id,
description: model.name ? model.model_id : undefined,
group: providerId,
groupLabel: providerMap.value.get(providerId) ?? providerId,
keywords: [model.model_id, model.name ?? ''],
meta: model.config,
}
}),
)
const displayLabel = computed(() => {
const model = props.models.find((m) => (m.id || m.model_id) === selected.value)
return model?.name || model?.model_id || selected.value
})
</script>
@@ -0,0 +1,49 @@
<template>
<div
class="flex flex-col gap-0.5 p-1"
role="listbox"
>
<button
type="button"
role="option"
:aria-selected="modelValue === 'off'"
class="flex w-full items-center gap-2 rounded-md px-2 py-1.5 text-xs hover:bg-accent hover:text-accent-foreground"
:class="{ 'bg-accent': modelValue === 'off' }"
@click="$emit('update:modelValue', 'off')"
>
<Lightbulb class="size-3.5 shrink-0 opacity-10" />
{{ $t('chat.reasoningOff') }}
</button>
<button
v-for="effort in efforts"
:key="effort"
type="button"
role="option"
:aria-selected="modelValue === effort"
class="flex w-full items-center gap-2 rounded-md px-2 py-1.5 text-xs hover:bg-accent hover:text-accent-foreground"
:class="{ 'bg-accent': modelValue === effort }"
@click="$emit('update:modelValue', effort)"
>
<Lightbulb
class="size-3.5 shrink-0"
:style="{ opacity: EFFORT_OPACITY[effort] ?? 0.5 }"
/>
{{ $t(EFFORT_LABELS[effort] ?? effort) }}
</button>
</div>
</template>
<script setup lang="ts">
import { Lightbulb } from 'lucide-vue-next'
import { EFFORT_LABELS, EFFORT_OPACITY } from './reasoning-effort'
defineProps<{
efforts: string[]
}>()
defineEmits<{
'update:modelValue': [value: string]
}>()
const modelValue = defineModel<string>({ default: '' })
</script>
@@ -0,0 +1,16 @@
export const EFFORT_LABELS: Record<string, string> = {
none: 'chat.reasoningNone',
low: 'chat.reasoningLow',
medium: 'chat.reasoningMedium',
high: 'chat.reasoningHigh',
xhigh: 'chat.reasoningXHigh',
}
export const EFFORT_OPACITY: Record<string, number> = {
off: 0.1,
none: 0.15,
low: 0.35,
medium: 0.6,
high: 0.85,
xhigh: 1,
}
@@ -141,6 +141,65 @@
align="block-end"
class="items-center py-1.5"
>
<!-- Model override selector -->
<Popover v-model:open="modelPopoverOpen">
<PopoverTrigger as-child>
<Button
type="button"
size="sm"
variant="ghost"
:disabled="!currentBotId || activeChatReadOnly"
class="gap-0.5 text-muted-foreground max-w-40"
>
<span class="truncate text-[11px]">{{ selectedModelLabel }}</span>
<ChevronDown class="size-3 shrink-0 opacity-50" />
</Button>
</PopoverTrigger>
<PopoverContent
class="w-96 p-0"
align="start"
>
<ModelOptions
v-model="overrideModelId"
:models="models"
:providers="providers"
model-type="chat"
:open="modelPopoverOpen"
@update:model-value="onModelSelected"
/>
</PopoverContent>
</Popover>
<!-- Reasoning effort selector -->
<Popover v-model:open="reasoningPopoverOpen">
<PopoverTrigger as-child>
<Button
type="button"
size="sm"
variant="ghost"
:disabled="!currentBotId || activeChatReadOnly || !activeModelSupportsReasoning"
class="gap-0.5 text-muted-foreground"
>
<Lightbulb
class="size-3.5 shrink-0"
:style="{ opacity: reasoningTriggerOpacity }"
/>
<span class="text-[11px]">{{ selectedReasoningLabel }}</span>
<ChevronDown class="size-3 shrink-0 opacity-50" />
</Button>
</PopoverTrigger>
<PopoverContent
class="w-40 p-0"
align="start"
>
<ReasoningEffortSelect
v-model="overrideReasoningEffort"
:efforts="availableReasoningEfforts"
@update:model-value="onReasoningSelected"
/>
</PopoverContent>
</Popover>
<Button
type="button"
size="sm"
@@ -242,25 +301,35 @@
</template>
<script setup lang="ts">
import { ref, computed, nextTick, onMounted, onBeforeUnmount, provide, useTemplateRef, watchEffect } from 'vue'
import { ref, computed, nextTick, onMounted, onBeforeUnmount, provide, useTemplateRef, watchEffect, watch } from 'vue'
import { useLocalStorage } from '@vueuse/core'
import { LoaderCircle, Image as ImageIcon, File as FileIcon, X, Paperclip, FolderOpen, Send } from 'lucide-vue-next'
import { ScrollArea, Button, InputGroup, InputGroupAddon, InputGroupTextarea } from '@memohai/ui'
import { LoaderCircle, Image as ImageIcon, File as FileIcon, X, Paperclip, FolderOpen, Send, ChevronDown, Lightbulb } from 'lucide-vue-next'
import { ScrollArea, Button, InputGroup, InputGroupAddon, InputGroupTextarea, Popover, PopoverContent, PopoverTrigger } from '@memohai/ui'
import { useChatStore } from '@/store/chat-list'
import { storeToRefs } from 'pinia'
import MessageItem from './message-item.vue'
import MediaGalleryLightbox from './media-gallery-lightbox.vue'
import FileManager from '@/components/file-manager/index.vue'
import ModelOptions from '@/pages/bots/components/model-options.vue'
import ReasoningEffortSelect from '@/pages/bots/components/reasoning-effort-select.vue'
import { EFFORT_LABELS, EFFORT_OPACITY } from '@/pages/bots/components/reasoning-effort'
import { useMediaGallery } from '../composables/useMediaGallery'
import { openInFileManagerKey } from '../composables/useFileManagerProvider'
import type { ChatAttachment } from '@/composables/api/useChat'
import { useScroll, useElementBounding } from '@vueuse/core'
import { useQuery } from '@pinia/colada'
import { getModels, getProviders, getBotsByBotIdSettings } from '@memohai/sdk'
import type { ModelsGetResponse, ProvidersGetResponse } from '@memohai/sdk'
import { useI18n } from 'vue-i18n'
const { t } = useI18n()
const chatStore = useChatStore()
const fileInput = ref<HTMLInputElement | null>(null)
const pendingFiles = ref<File[]>([])
const fileManagerOpen = ref(false)
const fileManagerRef = ref<InstanceType<typeof FileManager> | null>(null)
const modelPopoverOpen = ref(false)
const reasoningPopoverOpen = ref(false)
const FM_MIN_WIDTH = 320
const FM_MAX_WIDTH = 800
@@ -334,8 +403,104 @@ const {
loadingOlder,
loadingChats,
hasMoreOlder,
overrideModelId,
overrideReasoningEffort,
} = storeToRefs(chatStore)
const { data: modelData } = useQuery({
key: ['all-models'],
query: async () => {
const { data } = await getModels({ throwOnError: true })
return data
},
})
const { data: providerData } = useQuery({
key: ['all-providers'],
query: async () => {
const { data } = await getProviders({ throwOnError: true })
return data
},
})
const { data: botSettings } = useQuery({
key: () => ['bot-settings', currentBotId.value],
query: async () => {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const { data } = await (getBotsByBotIdSettings as any)({
path: { bot_id: currentBotId.value! },
throwOnError: true,
})
return data as import('@memohai/sdk').SettingsSettings | undefined
},
enabled: () => !!currentBotId.value,
})
const models = computed<ModelsGetResponse[]>(() => modelData.value ?? [])
const providers = computed<ProvidersGetResponse[]>(() => providerData.value ?? [])
const activeModel = computed(() => {
const id = overrideModelId.value || botSettings.value?.chat_model_id || ''
return models.value.find((m) => m.id === id)
})
const activeModelSupportsReasoning = computed(() =>
!!activeModel.value?.config?.compatibilities?.includes('reasoning'),
)
const availableReasoningEfforts = computed(() => {
const efforts = ((activeModel.value?.config as { reasoning_efforts?: string[] } | undefined)?.reasoning_efforts ?? [])
.filter((e) => ['none', 'low', 'medium', 'high', 'xhigh'].includes(e))
return efforts.length > 0 ? efforts : ['low', 'medium', 'high']
})
const selectedModelLabel = computed(() => {
const m = models.value.find((m) => m.id === overrideModelId.value)
return m?.name || m?.model_id || t('chat.modelDefault')
})
const selectedReasoningLabel = computed(() => {
const v = overrideReasoningEffort.value
if (v === 'off') return t('chat.reasoningOff')
return t(EFFORT_LABELS[v] ?? 'chat.modelDefault')
})
const reasoningTriggerOpacity = computed(() =>
EFFORT_OPACITY[overrideReasoningEffort.value] ?? 0.5,
)
function initFromBotSettings() {
if (!botSettings.value) return
if (!overrideModelId.value) {
overrideModelId.value = botSettings.value.chat_model_id ?? ''
}
if (!overrideReasoningEffort.value) {
if (botSettings.value.reasoning_enabled && botSettings.value.reasoning_effort) {
overrideReasoningEffort.value = botSettings.value.reasoning_effort
} else {
overrideReasoningEffort.value = 'off'
}
}
}
watch(botSettings, () => initFromBotSettings(), { immediate: true })
watch(currentBotId, () => {
overrideModelId.value = ''
overrideReasoningEffort.value = ''
})
function onModelSelected() {
modelPopoverOpen.value = false
if (!activeModelSupportsReasoning.value) {
overrideReasoningEffort.value = 'off'
}
}
function onReasoningSelected() {
reasoningPopoverOpen.value = false
}
const {
items: galleryItems,
openIndex: galleryOpenIndex,
+9 -4
View File
@@ -106,8 +106,8 @@ export const useChatStore = defineStore('chat', () => {
const hasMoreOlder = ref(true)
const initializing = ref(false)
const bots = ref<Bot[]>([])
const overrideModelId = ref<string>('')
const overrideReasoningEffort = ref<string>('')
let abortFn: (() => void) | null = null
let messageEventsSince = ''
@@ -867,10 +867,13 @@ export const useChatStore = defineStore('chat', () => {
rejectPendingAssistantStream(abortError)
}
const modelId = overrideModelId.value || undefined
const re = overrideReasoningEffort.value
const reasoningEffort = (re && re !== 'off') ? re : undefined
if (activeWs?.connected) {
activeWs.send({ type: 'message', text: trimmed, session_id: sid, attachments })
activeWs.send({ type: 'message', text: trimmed, session_id: sid, attachments, model_id: modelId, reasoning_effort: reasoningEffort })
} else {
await sendLocalChannelMessage(bid, trimmed, attachments)
await sendLocalChannelMessage(bid, trimmed, attachments, { modelId, reasoningEffort })
}
await completion
@@ -927,6 +930,8 @@ export const useChatStore = defineStore('chat', () => {
loadingOlder,
hasMoreOlder,
initializing,
overrideModelId,
overrideReasoningEffort,
initialize,
selectBot,
selectSession,
+9 -2
View File
@@ -516,7 +516,7 @@ func (p *ChannelInboundProcessor) HandleInbound(ctx context.Context, cfg channel
return result
}
chunkCh, streamErrCh := p.runner.StreamChat(ctx, conversation.ChatRequest{
chatReq := conversation.ChatRequest{
BotID: identity.BotID,
ChatID: activeChatID,
SessionID: sessionID,
@@ -536,7 +536,14 @@ func (p *ChannelInboundProcessor) HandleInbound(ctx context.Context, cfg channel
UserMessagePersisted: false,
Attachments: attachments,
OutboundAssetCollector: assetCollector,
})
}
if mid, _ := msg.Metadata["model_id"].(string); strings.TrimSpace(mid) != "" {
chatReq.Model = strings.TrimSpace(mid)
}
if re, _ := msg.Metadata["reasoning_effort"].(string); strings.TrimSpace(re) != "" {
chatReq.ReasoningEffort = strings.TrimSpace(re)
}
chunkCh, streamErrCh := p.runner.StreamChat(ctx, chatReq)
var (
finalMessages []conversation.ModelMessage
+5 -1
View File
@@ -234,9 +234,13 @@ func (r *Resolver) resolve(ctx context.Context, req conversation.ChatRequest) (r
inlineImages := extractNativeImageParts(mergedAttachments)
reasoningEffort := ""
if chatModel.HasCompatibility(models.CompatReasoning) && botSettings.ReasoningEnabled {
if chatModel.HasCompatibility(models.CompatReasoning) {
if re := strings.TrimSpace(req.ReasoningEffort); re != "" {
reasoningEffort = re
} else if botSettings.ReasoningEnabled {
reasoningEffort = botSettings.ReasoningEffort
}
}
var reasoningConfig *models.ReasoningConfig
if reasoningEffort != "" {
+1
View File
@@ -244,6 +244,7 @@ type ChatRequest struct {
Query string `json:"query"`
Model string `json:"model,omitempty"`
Provider string `json:"provider,omitempty"`
ReasoningEffort string `json:"reasoning_effort,omitempty"`
Channels []string `json:"channels,omitempty"`
CurrentChannel string `json:"current_channel,omitempty"`
Messages []ModelMessage `json:"messages,omitempty"`
+18
View File
@@ -169,6 +169,8 @@ func formatLocalStreamEvent(event channel.StreamEvent) ([]byte, error) {
// LocalChannelMessageRequest is the request body for posting a local channel message.
type LocalChannelMessageRequest struct {
Message channel.Message `json:"message"`
ModelID string `json:"model_id,omitempty"`
ReasoningEffort string `json:"reasoning_effort,omitempty"`
}
// PostMessage godoc
@@ -234,6 +236,18 @@ func (h *LocalChannelHandler) PostMessage(c echo.Context) error {
ReceivedAt: time.Now().UTC(),
Source: "local",
}
if mid := strings.TrimSpace(req.ModelID); mid != "" {
if msg.Metadata == nil {
msg.Metadata = make(map[string]any)
}
msg.Metadata["model_id"] = mid
}
if re := strings.TrimSpace(req.ReasoningEffort); re != "" {
if msg.Metadata == nil {
msg.Metadata = make(map[string]any)
}
msg.Metadata["reasoning_effort"] = re
}
if err := h.channelManager.HandleInbound(c.Request().Context(), cfg, msg); err != nil {
return echo.NewHTTPError(http.StatusInternalServerError, err.Error())
}
@@ -249,6 +263,8 @@ type wsClientMessage struct {
Text string `json:"text,omitempty"`
SessionID string `json:"session_id,omitempty"`
Attachments []json.RawMessage `json:"attachments,omitempty"`
ModelID string `json:"model_id,omitempty"`
ReasoningEffort string `json:"reasoning_effort,omitempty"`
}
// wsWriter serialises all WebSocket writes through a single goroutine to
@@ -419,6 +435,8 @@ func (h *LocalChannelHandler) HandleWebSocket(c echo.Context) error {
CurrentChannel: h.channelType.String(),
Channels: []string{h.channelType.String()},
Attachments: chatAttachments,
Model: strings.TrimSpace(msg.ModelID),
ReasoningEffort: strings.TrimSpace(msg.ReasoningEffort),
}
if streamErr := h.resolver.StreamChatWS(streamCtx, req, eventCh, abortCh); streamErr != nil {
if ctx.Err() == nil {
+2
View File
@@ -835,6 +835,8 @@ export type HandlersListSnapshotsResponse = {
export type HandlersLocalChannelMessageRequest = {
message?: ChannelMessage;
model_id?: string;
reasoning_effort?: string;
};
export type HandlersLoginRequest = {
+6
View File
@@ -10855,6 +10855,12 @@ const docTemplate = `{
"properties": {
"message": {
"$ref": "#/definitions/channel.Message"
},
"model_id": {
"type": "string"
},
"reasoning_effort": {
"type": "string"
}
}
},
+6
View File
@@ -10846,6 +10846,12 @@
"properties": {
"message": {
"$ref": "#/definitions/channel.Message"
},
"model_id": {
"type": "string"
},
"reasoning_effort": {
"type": "string"
}
}
},
+4
View File
@@ -1375,6 +1375,10 @@ definitions:
properties:
message:
$ref: '#/definitions/channel.Message'
model_id:
type: string
reasoning_effort:
type: string
type: object
handlers.LoginRequest:
properties: