refactor: move client_type key from provider to model

This commit is contained in:
Acbox
2026-02-18 18:30:27 +08:00
parent 77e9f585a1
commit d6c47472b2
43 changed files with 552 additions and 1015 deletions
+24 -23
View File
@@ -81,14 +81,13 @@ const ensureModelsReady = async () => {
}
const renderProvidersTable = (providers: ProvidersGetResponse[], models: ModelsGetResponse[]) => {
const rows: string[][] = [['Provider', 'Type', 'Base URL', 'Models']]
const rows: string[][] = [['Provider', 'Base URL', 'Models']]
for (const provider of providers) {
const providerModels = models
.filter(m => getProviderId(m) === provider.id)
.map(m => `${getModelId(m)} (${getModelType(m)})`)
rows.push([
provider.name ?? '',
provider.client_type ?? '',
provider.base_url ?? '',
providerModels.join(', ') || '-',
])
@@ -259,21 +258,12 @@ provider
.command('create')
.description('Create provider')
.option('--name <name>')
.option('--type <type>')
.option('--base_url <url>')
.option('--api_key <key>')
.action(async (opts) => {
ensureAuth()
const questions = []
if (!opts.name) questions.push({ type: 'input', name: 'name', message: 'Provider name:' })
if (!opts.type) {
questions.push({
type: 'list',
name: 'client_type',
message: 'Client type:',
choices: ['openai', 'openai-compat', 'anthropic', 'google', 'azure', 'bedrock', 'mistral', 'xai', 'ollama', 'dashscope'],
})
}
if (!opts.base_url) questions.push({ type: 'input', name: 'base_url', message: 'Base URL:' })
if (!opts.api_key) questions.push({ type: 'password', name: 'api_key', message: 'API key:' })
const answers = questions.length ? await inquirer.prompt(questions) : {}
@@ -282,7 +272,6 @@ provider
await postProviders({
body: {
name: opts.name ?? answers.name,
client_type: opts.type ?? answers.client_type,
base_url: opts.base_url ?? answers.base_url,
api_key: opts.api_key ?? answers.api_key,
},
@@ -349,6 +338,7 @@ model
.option('--model_id <model_id>')
.option('--name <name>')
.option('--provider <provider>')
.option('--client_type <client_type>', 'Client type: openai-responses, openai-completions, anthropic-messages, google-generative-ai')
.option('--type <type>')
.option('--dimensions <dimensions>')
.option('--multimodal', 'Is multimodal')
@@ -376,6 +366,16 @@ model
const answers = questions.length ? await inquirer.prompt(questions) : {}
const modelId = opts.model_id ?? answers.model_id
const modelType = opts.type ?? answers.type
let clientType = opts.client_type
if (modelType === 'chat' && !clientType) {
const ctAnswer = await inquirer.prompt([{
type: 'list',
name: 'client_type',
message: 'Client type:',
choices: ['openai-responses', 'openai-completions', 'anthropic-messages', 'google-generative-ai'],
}])
clientType = ctAnswer.client_type
}
let dimensions = opts.dimensions ? Number.parseInt(opts.dimensions, 10) : undefined
if (modelType === 'embedding' && (!dimensions || Number.isNaN(dimensions))) {
const dimAnswer = await inquirer.prompt([{
@@ -392,17 +392,18 @@ model
const inputModalities = opts.multimodal ? ['text', 'image'] : ['text']
const spinner = ora('Creating model...').start()
try {
await postModels({
body: {
model_id: modelId,
name: opts.name ?? modelId,
llm_provider_id: provider.id,
input_modalities: inputModalities,
type: modelType,
dimensions,
},
throwOnError: true,
})
const body: Record<string, unknown> = {
model_id: modelId,
name: opts.name ?? modelId,
llm_provider_id: provider.id,
input_modalities: inputModalities,
type: modelType,
dimensions,
}
if (modelType === 'chat' && clientType) {
body.client_type = clientType
}
await postModels({ body: body as any, throwOnError: true })
spinner.succeed('Model created')
} catch (err: unknown) {
spinner.fail(getErrorMessage(err) || 'Failed to create model')
-1
View File
@@ -12,7 +12,6 @@ export type {
ChannelChannelIdentityBinding,
ModelsGetResponse,
ModelsModelType,
ProvidersClientType,
ProvidersGetResponse,
ScheduleListResponse,
ScheduleSchedule,
+2 -2
View File
@@ -1363,7 +1363,7 @@ export const getProvidersQueryKey = (options?: Options<GetProvidersData>) => cre
/**
* List all LLM providers
*
* Get a list of all configured LLM providers, optionally filtered by client type
* Get a list of all configured LLM providers
*/
export const getProvidersQuery = defineQueryOptions((options?: Options<GetProvidersData>) => ({
key: getProvidersQueryKey(options),
@@ -1398,7 +1398,7 @@ export const getProvidersCountQueryKey = (options?: Options<GetProvidersCountDat
/**
* Count providers
*
* Get the total count of providers, optionally filtered by client type
* Get the total count of providers
*/
export const getProvidersCountQuery = defineQueryOptions((options?: Options<GetProvidersCountData>) => ({
key: getProvidersCountQueryKey(options),
File diff suppressed because one or more lines are too long
+2 -2
View File
@@ -813,7 +813,7 @@ export const putModelsById = <ThrowOnError extends boolean = false>(options: Opt
/**
* List all LLM providers
*
* Get a list of all configured LLM providers, optionally filtered by client type
* Get a list of all configured LLM providers
*/
export const getProviders = <ThrowOnError extends boolean = false>(options?: Options<GetProvidersData, ThrowOnError>) => (options?.client ?? client).get<GetProvidersResponses, GetProvidersErrors, ThrowOnError>({ url: '/providers', ...options });
@@ -834,7 +834,7 @@ export const postProviders = <ThrowOnError extends boolean = false>(options: Opt
/**
* Count providers
*
* Get the total count of providers, optionally filtered by client type
* Get the total count of providers
*/
export const getProvidersCount = <ThrowOnError extends boolean = false>(options?: Options<GetProvidersCountData, ThrowOnError>) => (options?.client ?? client).get<GetProvidersCountResponses, GetProvidersCountErrors, ThrowOnError>({ url: '/providers/count', ...options });
+8 -26
View File
@@ -691,6 +691,7 @@ export type MessageMessageAsset = {
};
export type ModelsAddRequest = {
client_type?: ModelsClientType;
dimensions?: number;
input_modalities?: Array<string>;
llm_provider_id?: string;
@@ -704,11 +705,14 @@ export type ModelsAddResponse = {
model_id?: string;
};
export type ModelsClientType = 'openai-responses' | 'openai-completions' | 'anthropic-messages' | 'google-generative-ai';
export type ModelsCountResponse = {
count?: number;
};
export type ModelsGetResponse = {
client_type?: ModelsClientType;
dimensions?: number;
input_modalities?: Array<string>;
llm_provider_id?: string;
@@ -720,6 +724,7 @@ export type ModelsGetResponse = {
export type ModelsModelType = 'chat' | 'embedding';
export type ModelsUpdateRequest = {
client_type?: ModelsClientType;
dimensions?: number;
input_modalities?: Array<string>;
llm_provider_id?: string;
@@ -728,8 +733,6 @@ export type ModelsUpdateRequest = {
type?: ModelsModelType;
};
export type ProvidersClientType = 'openai' | 'openai-compat' | 'anthropic' | 'google' | 'azure' | 'bedrock' | 'mistral' | 'xai' | 'ollama' | 'dashscope';
export type ProvidersCountResponse = {
count?: number;
};
@@ -737,7 +740,6 @@ export type ProvidersCountResponse = {
export type ProvidersCreateRequest = {
api_key?: string;
base_url: string;
client_type: ProvidersClientType;
metadata?: {
[key: string]: unknown;
};
@@ -750,7 +752,6 @@ export type ProvidersGetResponse = {
*/
api_key?: string;
base_url?: string;
client_type?: string;
created_at?: string;
id?: string;
metadata?: {
@@ -763,7 +764,6 @@ export type ProvidersGetResponse = {
export type ProvidersUpdateRequest = {
api_key?: string;
base_url?: string;
client_type?: ProvidersClientType;
metadata?: {
[key: string]: unknown;
};
@@ -3671,7 +3671,7 @@ export type GetModelsData = {
*/
type?: string;
/**
* Client type (openai, openai-compat, anthropic, google, azure, bedrock, mistral, xai, ollama, dashscope)
* Client type (openai-responses, openai-completions, anthropic-messages, google-generative-ai)
*/
client_type?: string;
};
@@ -3999,20 +3999,11 @@ export type PutModelsByIdResponse = PutModelsByIdResponses[keyof PutModelsByIdRe
export type GetProvidersData = {
body?: never;
path?: never;
query?: {
/**
* Client type filter (openai, openai-compat, anthropic, google, azure, bedrock, mistral, xai, ollama, dashscope)
*/
client_type?: string;
};
query?: never;
url: '/providers';
};
export type GetProvidersErrors = {
/**
* Bad Request
*/
400: HandlersErrorResponse;
/**
* Internal Server Error
*/
@@ -4065,20 +4056,11 @@ export type PostProvidersResponse = PostProvidersResponses[keyof PostProvidersRe
export type GetProvidersCountData = {
body?: never;
path?: never;
query?: {
/**
* Client type filter (openai, openai-compat, anthropic, google, azure, bedrock, mistral, xai, ollama, dashscope)
*/
client_type?: string;
};
query?: never;
url: '/providers/count';
};
export type GetProvidersCountErrors = {
/**
* Bad Request
*/
400: HandlersErrorResponse;
/**
* Internal Server Error
*/
@@ -73,34 +73,6 @@
</FormControl>
</FormItem>
</FormField>
<FormField
v-slot="{ componentField }"
name="client_type"
>
<FormItem>
<Label class="mb-2">
{{ $t('common.type') }}
</Label>
<FormControl>
<Select v-bind="componentField">
<SelectTrigger class="w-full">
<SelectValue :placeholder="$t('common.typePlaceholder')" />
</SelectTrigger>
<SelectContent>
<SelectGroup>
<SelectItem
v-for="type in CLIENT_TYPES"
:key="type"
:value="type"
>
{{ type }}
</SelectItem>
</SelectGroup>
</SelectContent>
</Select>
</FormControl>
</FormItem>
</FormField>
</div>
<DialogFooter class="mt-8">
<DialogClose as-child>
@@ -139,12 +111,6 @@ import {
FormControl,
FormItem,
DialogDescription,
Select,
SelectTrigger,
SelectValue,
SelectContent,
SelectGroup,
SelectItem,
Separator,
Label,
Spinner,
@@ -154,12 +120,6 @@ import z from 'zod'
import { useForm } from 'vee-validate'
import { useMutation, useQueryCache } from '@pinia/colada'
import { postProviders } from '@memoh/sdk'
import type { ProvidersClientType } from '@memoh/sdk'
const CLIENT_TYPES: ProvidersClientType[] = [
'openai', 'openai-compat', 'anthropic', 'google',
'azure', 'bedrock', 'mistral', 'xai', 'ollama', 'dashscope',
]
const open = defineModel<boolean>('open')
@@ -175,7 +135,6 @@ const { mutate: providerFetch, isLoading } = useMutation({
const providerSchema = toTypedSchema(z.object({
api_key: z.string().min(1),
base_url: z.string().min(1),
client_type: z.string().min(1),
name: z.string().min(1),
metadata: z.object({
additionalProp1: z.object({}),
@@ -46,6 +46,55 @@
</FormItem>
</FormField>
<!-- Client Type (chat only) -->
<div v-if="selectedType === 'chat'">
<Label class="mb-2">
{{ $t('models.clientType') }}
</Label>
<Popover v-model:open="clientTypeOpen">
<PopoverTrigger as-child>
<Button
variant="outline"
role="combobox"
:aria-expanded="clientTypeOpen"
class="w-full justify-between font-normal mt-2"
>
<span class="truncate">
{{ selectedClientTypeLabel || $t('models.clientTypePlaceholder') }}
</span>
<FontAwesomeIcon
:icon="['fas', 'chevron-down']"
class="ml-2 size-3 shrink-0 text-muted-foreground"
/>
</Button>
</PopoverTrigger>
<PopoverContent
class="w-[--reka-popover-trigger-width] p-1"
align="start"
>
<button
v-for="ct in CLIENT_TYPE_LIST"
:key="ct.value"
class="relative flex w-full cursor-pointer items-center gap-2 rounded-md px-2 py-1.5 text-sm outline-none hover:bg-accent hover:text-accent-foreground"
:class="{ 'bg-accent': form.values.client_type === ct.value }"
@click="selectClientType(ct.value)"
>
<FontAwesomeIcon
v-if="form.values.client_type === ct.value"
:icon="['fas', 'check']"
class="size-3.5"
/>
<span
v-else
class="size-3.5"
/>
<span class="truncate">{{ ct.label }}</span>
<span class="ml-auto text-xs text-muted-foreground">{{ ct.hint }}</span>
</button>
</PopoverContent>
</Popover>
</div>
<!-- Model -->
<FormField
v-slot="{ componentField }"
@@ -167,6 +216,9 @@ import {
SelectTrigger,
SelectValue,
FormItem,
Popover,
PopoverTrigger,
PopoverContent,
Checkbox,
Separator,
Label,
@@ -179,12 +231,14 @@ import z from 'zod'
import { useMutation, useQueryCache } from '@pinia/colada'
import { postModels, putModelsModelByModelId } from '@memoh/sdk'
import type { ModelsGetResponse } from '@memoh/sdk'
import { CLIENT_TYPE_LIST, CLIENT_TYPE_META } from '@/constants/client-types'
const availableInputModalities = ['text', 'image', 'audio', 'video', 'file'] as const
const selectedModalities = ref<string[]>(['text'])
const formSchema = toTypedSchema(z.object({
type: z.string().min(1),
client_type: z.string().optional(),
model_id: z.string().min(1),
name: z.string().optional(),
dimensions: z.coerce.number().min(1).optional(),
@@ -192,20 +246,36 @@ const formSchema = toTypedSchema(z.object({
const form = useForm({
validationSchema: formSchema,
initialValues: {
type: 'chat',
},
})
const selectedType = computed(() => form.values.type || editInfo?.value?.type)
const selectedType = computed(() => form.values.type || 'chat')
const clientTypeOpen = ref(false)
const selectedClientTypeLabel = computed(() => {
const ct = form.values.client_type
if (!ct) return ''
return CLIENT_TYPE_META[ct]?.label ?? ct
})
function selectClientType(value: string) {
form.setFieldValue('client_type', value)
clientTypeOpen.value = false
}
const open = inject<Ref<boolean>>('openModel', ref(false))
const title = inject<Ref<'edit' | 'title'>>('openModelTitle', ref('title'))
const editInfo = inject<Ref<ModelsGetResponse | null>>('openModelState', ref(null))
// handleSubmit
//
const canSubmit = computed(() => {
if (title.value === 'edit') return true
const { type, model_id } = form.values
return !!type && !!model_id
const { type, model_id, client_type } = form.values
if (!type || !model_id) return false
if (type === 'chat' && !client_type) return false
return true
})
function toggleModality(mod: string, checked: boolean) {
@@ -216,14 +286,6 @@ function toggleModality(mod: string, checked: boolean) {
}
}
const emptyValues = {
type: '' as string,
model_id: '' as string,
name: '' as string,
dimensions: undefined as number | undefined,
}
// Display Name Model ID
const userEditedName = ref(false)
watch(
@@ -269,14 +331,16 @@ async function addModel(e: Event) {
const isEdit = title.value === 'edit' && !!editInfo?.value
const fallback = editInfo?.value
// form.values editInfo
// Dialog vee-validate
const type = form.values.type || (isEdit ? fallback!.type : '')
const type = form.values.type || (isEdit ? fallback!.type : 'chat')
const client_type = type === 'chat'
? (form.values.client_type || (isEdit ? fallback!.client_type : ''))
: undefined
const model_id = form.values.model_id || (isEdit ? fallback!.model_id : '')
const name = form.values.name ?? (isEdit ? fallback!.name : '')
const dimensions = form.values.dimensions ?? (isEdit ? fallback!.dimensions : undefined)
if (!type || !model_id) return
if (type === 'chat' && !client_type) return
try {
const payload: Record<string, unknown> = {
@@ -285,6 +349,10 @@ async function addModel(e: Event) {
llm_provider_id: id,
}
if (type === 'chat' && client_type) {
payload.client_type = client_type
}
if (name) {
payload.name = name
}
@@ -315,20 +383,26 @@ watch(open, async () => {
return
}
// Dialog FormField
await nextTick()
if (editInfo?.value) {
const { type, model_id, name, dimensions, input_modalities } = editInfo.value
form.resetForm({ values: { type, model_id, name, dimensions } })
const { client_type, type, model_id, name, dimensions, input_modalities } = editInfo.value
form.resetForm({ values: { type: type || 'chat', client_type: client_type || '', model_id, name, dimensions } })
selectedModalities.value = input_modalities ?? ['text']
userEditedName.value = !!(name && name !== model_id)
} else {
form.resetForm({ values: { ...emptyValues } })
form.resetForm({ values: { type: 'chat', client_type: '', model_id: '', name: '', dimensions: undefined } })
selectedModalities.value = ['text']
userEditedName.value = false
}
}, {
immediate: true,
})
// Clear client_type when switching to embedding
watch(selectedType, (newType) => {
if (newType === 'embedding') {
form.setFieldValue('client_type', '')
}
})
</script>
@@ -0,0 +1,32 @@
import type { ModelsClientType } from '@memoh/sdk'
export interface ClientTypeMeta {
value: ModelsClientType
label: string
hint: string
}
export const CLIENT_TYPE_META: Record<string, ClientTypeMeta> = {
'openai-responses': {
value: 'openai-responses',
label: 'OpenAI Responses',
hint: '/v1/responses',
},
'openai-completions': {
value: 'openai-completions',
label: 'OpenAI Completions',
hint: '/v1/chat/completions',
},
'anthropic-messages': {
value: 'anthropic-messages',
label: 'Anthropic Messages',
hint: '/v1/messages',
},
'google-generative-ai': {
value: 'google-generative-ai',
label: 'Google Generative AI',
hint: 'Gemini API',
},
}
export const CLIENT_TYPE_LIST: ClientTypeMeta[] = Object.values(CLIENT_TYPE_META)
+2
View File
@@ -143,6 +143,8 @@
"deleteModelConfirm": "Are you sure you want to delete this model?",
"emptyTitle": "No Models",
"emptyDescription": "Click the button above to add a model for this provider",
"clientType": "Client Type",
"clientTypePlaceholder": "Select client type",
"model": "Model ID",
"modelPlaceholder": "e.g. gpt-4o",
"displayName": "Display Name",
+2
View File
@@ -139,6 +139,8 @@
"deleteModelConfirm": "确定要删除这个模型吗?",
"emptyTitle": "暂无模型",
"emptyDescription": "点击上方按钮为当前服务商添加模型",
"clientType": "客户端类型",
"clientTypePlaceholder": "选择客户端类型",
"model": "模型 ID",
"modelPlaceholder": "例如 gpt-4o",
"displayName": "显示名称",
@@ -6,6 +6,12 @@
<Badge variant="outline">
{{ model.type }}
</Badge>
<Badge
v-if="model.client_type"
variant="outline"
>
{{ model.client_type }}
</Badge>
</ItemDescription>
</ItemContent>
<ItemActions>
@@ -116,7 +116,6 @@ const emit = defineEmits<{
const providerSchema = toTypedSchema(z.object({
name: z.string().min(1),
base_url: z.string().min(1),
client_type: z.string().min(1),
api_key: z.string().optional(),
metadata: z.object({
additionalProp1: z.object({}),
@@ -132,8 +131,6 @@ watch(() => props.provider, (newVal) => {
form.setValues({
name: newVal.name,
base_url: newVal.base_url,
client_type: newVal.client_type,
// Keep key input empty by default so masked placeholders are never submitted back.
api_key: '',
})
}
@@ -144,12 +141,10 @@ const hasChanges = computed(() => {
const baseChanged = JSON.stringify({
name: form.values.name,
base_url: form.values.base_url,
client_type: form.values.client_type,
metadata: form.values.metadata,
}) !== JSON.stringify({
name: raw?.name,
base_url: raw?.base_url,
client_type: raw?.client_type,
metadata: { additionalProp1: {} },
})
@@ -161,7 +156,6 @@ const editProvider = form.handleSubmit(async (value) => {
const payload: Record<string, unknown> = {
name: value.name,
base_url: value.base_url,
client_type: value.client_type,
metadata: value.metadata,
}
if (value.api_key && value.api_key.trim() !== '') {
+2 -35
View File
@@ -14,12 +14,6 @@ import {
InputGroup, InputGroupAddon, InputGroupInput,
SidebarFooter,
Toggle,
Select,
SelectTrigger,
SelectValue,
SelectContent,
SelectGroup,
SelectItem,
Empty,
EmptyContent,
EmptyDescription,
@@ -28,21 +22,14 @@ import {
EmptyTitle,
} from '@memoh/ui'
import { getProviders } from '@memoh/sdk'
import type { ProvidersGetResponse, ProvidersClientType } from '@memoh/sdk'
import type { ProvidersGetResponse } from '@memoh/sdk'
import AddProvider from '@/components/add-provider/index.vue'
import { useQuery } from '@pinia/colada'
const CLIENT_TYPES: ProvidersClientType[] = [
'openai', 'openai-compat', 'anthropic', 'google',
'azure', 'bedrock', 'mistral', 'xai', 'ollama', 'dashscope',
]
const filterProvider = ref('')
const { data: providerData } = useQuery({
key: () => ['providers', filterProvider.value],
key: () => ['providers'],
query: async () => {
const { data } = await getProviders({
query: filterProvider.value ? { client_type: filterProvider.value } : undefined,
throwOnError: true,
})
return data
@@ -50,10 +37,6 @@ const { data: providerData } = useQuery({
})
const queryCache = useQueryCache()
watch(filterProvider, () => {
queryCache.invalidateQueries({ key: ['providers'] })
}, { immediate: true })
const curProvider = ref<ProvidersGetResponse>()
provide('curProvider', curProvider)
@@ -142,22 +125,6 @@ const openStatus = reactive({
</SidebarMenu>
</SidebarContent>
<SidebarFooter>
<Select v-model:model-value="filterProvider">
<SelectTrigger class="w-full">
<SelectValue :placeholder="$t('common.typePlaceholder')" />
</SelectTrigger>
<SelectContent>
<SelectGroup>
<SelectItem
v-for="type in CLIENT_TYPES"
:key="type"
:value="type"
>
{{ type }}
</SelectItem>
</SelectGroup>
</SelectContent>
</Select>
<AddProvider v-model:open="openStatus.provideOpen" />
</SidebarFooter>
</Sidebar>