mirror of
https://github.com/memohai/Memoh.git
synced 2026-04-25 07:00:48 +09:00
c9dcfe287f
* feat: expand speech provider support with new client types and configuration schema * feat: add icon support for speech providers and update related configurations * feat: add SVG support for Deepgram and Elevenlabs with Vue components * feat: except *-speech client type in llm provider * feat: enhance speech provider functionality with advanced settings and model import capabilities * chore: remove go.mod replace * feat: enhance speech provider functionality with advanced settings and model import capabilities * chore: update go module dependencies * feat: Ear and Mouth * fix: separate ear/mouth page * fix: separate audio domain and restore transcription templates Move speech and transcription internals into the audio domain, restore template-driven transcription providers, and regenerate Swagger/SDK so the frontend can stop hand-calling /transcription-* APIs. --------- Co-authored-by: aki <arisu@ieee.org>
30 lines
749 B
SQL
30 lines
749 B
SQL
-- 0071_split_transcription_providers
|
|
-- Add dedicated transcription provider client types.
|
|
|
|
ALTER TABLE providers DROP CONSTRAINT IF EXISTS providers_client_type_check;
|
|
|
|
ALTER TABLE providers
|
|
ADD CONSTRAINT providers_client_type_check CHECK (client_type IN (
|
|
'openai-responses',
|
|
'openai-completions',
|
|
'anthropic-messages',
|
|
'google-generative-ai',
|
|
'openai-codex',
|
|
'github-copilot',
|
|
'edge-speech',
|
|
'openai-speech',
|
|
'openai-transcription',
|
|
'openrouter-speech',
|
|
'openrouter-transcription',
|
|
'elevenlabs-speech',
|
|
'elevenlabs-transcription',
|
|
'deepgram-speech',
|
|
'deepgram-transcription',
|
|
'minimax-speech',
|
|
'volcengine-speech',
|
|
'alibabacloud-speech',
|
|
'microsoft-speech',
|
|
'google-speech',
|
|
'google-transcription'
|
|
));
|