From 26f595da3a00dcb48d05d69f9a1846d3a04b110a Mon Sep 17 00:00:00 2001 From: Peter Dave Hello Date: Thu, 26 Feb 2026 23:58:23 +0800 Subject: [PATCH 1/2] Refactor OpenAI-compatible provider execution into shared core Consolidate OpenAI-compatible API request handling into a single shared module and route background dispatch through provider registry lookup. This removes duplicated streaming/parsing logic from openai-api and custom-api while keeping existing behavior. Add config migration to preserve existing API keys and custom mode entries by mapping them into providerSecrets and custom provider records. Keep legacy fallbacks for apiMode customUrl/custom apiKey to avoid user-visible regressions during rollout. Normalize apiMode objects at runtime and compare selection using stable identity fields so migrated and legacy session data continue to match correctly. --- src/background/index.mjs | 87 +- src/components/ConversationCard/index.jsx | 50 +- src/config/index.mjs | 648 ++++- src/config/openai-provider-mappings.mjs | 30 + src/services/apis/aiml-api.mjs | 12 - src/services/apis/chatglm-api.mjs | 14 - src/services/apis/custom-api.mjs | 103 +- src/services/apis/deepseek-api.mjs | 12 - src/services/apis/moonshot-api.mjs | 12 - src/services/apis/ollama-api.mjs | 36 - src/services/apis/openai-api.mjs | 379 +-- src/services/apis/openai-compatible-core.mjs | 166 ++ src/services/apis/openrouter-api.mjs | 12 - src/services/apis/provider-registry.mjs | 750 +++++ src/services/init-session.mjs | 8 +- src/services/wrappers.mjs | 7 +- src/utils/model-name-convert.mjs | 232 +- .../unit/config/migrate-user-config.test.mjs | 934 +++++++ tests/unit/config/user-config.test.mjs | 62 + tests/unit/services/apis/custom-api.test.mjs | 2 +- .../services/apis/openai-api-compat.test.mjs | 1554 ++++++++++- .../services/apis/provider-registry.test.mjs | 2425 +++++++++++++++++ .../unit/services/apis/thin-adapters.test.mjs | 66 +- .../unit/services/wrappers-register.test.mjs | 3 +- tests/unit/utils/model-name-convert.test.mjs | 758 ++++++ 25 files changed, 7797 insertions(+), 565 deletions(-) create mode 100644 src/config/openai-provider-mappings.mjs delete mode 100644 src/services/apis/aiml-api.mjs delete mode 100644 src/services/apis/chatglm-api.mjs delete mode 100644 src/services/apis/deepseek-api.mjs delete mode 100644 src/services/apis/moonshot-api.mjs delete mode 100644 src/services/apis/ollama-api.mjs create mode 100644 src/services/apis/openai-compatible-core.mjs delete mode 100644 src/services/apis/openrouter-api.mjs create mode 100644 src/services/apis/provider-registry.mjs create mode 100644 tests/unit/config/migrate-user-config.test.mjs create mode 100644 tests/unit/services/apis/provider-registry.test.mjs diff --git a/src/background/index.mjs b/src/background/index.mjs index 7fcaa4286..aa41ecd4f 100644 --- a/src/background/index.mjs +++ b/src/background/index.mjs @@ -5,18 +5,10 @@ import { sendMessageFeedback, } from '../services/apis/chatgpt-web' import { generateAnswersWithBingWebApi } from '../services/apis/bing-web.mjs' -import { - generateAnswersWithOpenAiApi, - generateAnswersWithGptCompletionApi, -} from '../services/apis/openai-api' -import { generateAnswersWithCustomApi } from '../services/apis/custom-api.mjs' -import { generateAnswersWithOllamaApi } from '../services/apis/ollama-api.mjs' +import { generateAnswersWithOpenAICompatibleApi } from '../services/apis/openai-api' import { generateAnswersWithAzureOpenaiApi } from '../services/apis/azure-openai-api.mjs' import { generateAnswersWithClaudeApi } from '../services/apis/claude-api.mjs' -import { generateAnswersWithChatGLMApi } from '../services/apis/chatglm-api.mjs' import { generateAnswersWithWaylaidwandererApi } from '../services/apis/waylaidwanderer-api.mjs' -import { generateAnswersWithOpenRouterApi } from '../services/apis/openrouter-api.mjs' -import { generateAnswersWithAimlApi } from '../services/apis/aiml-api.mjs' import { defaultConfig, getUserConfig, @@ -52,10 +44,8 @@ import { refreshMenu } from './menus.mjs' import { registerCommands } from './commands.mjs' import { generateAnswersWithBardWebApi } from '../services/apis/bard-web.mjs' import { generateAnswersWithClaudeWebApi } from '../services/apis/claude-web.mjs' -import { generateAnswersWithMoonshotCompletionApi } from '../services/apis/moonshot-api.mjs' import { generateAnswersWithMoonshotWebApi } from '../services/apis/moonshot-web.mjs' import { isUsingModelName } from '../utils/model-name-convert.mjs' -import { generateAnswersWithDeepSeekApi } from '../services/apis/deepseek-api.mjs' import { redactSensitiveFields } from './redact.mjs' const RECONNECT_CONFIG = { @@ -345,6 +335,20 @@ function setPortProxy(port, proxyTabId) { } } +function isUsingOpenAICompatibleApiSession(session) { + return ( + isUsingCustomModel(session) || + isUsingChatgptApiModel(session) || + isUsingMoonshotApiModel(session) || + isUsingChatGLMApiModel(session) || + isUsingDeepSeekApiModel(session) || + isUsingOllamaApiModel(session) || + isUsingOpenRouterApiModel(session) || + isUsingAimlApiModel(session) || + isUsingGptCompletionApiModel(session) + ) +} + async function executeApi(session, port, config) { console.log( `[background] executeApi called for model: ${session.modelName}, apiMode: ${session.apiMode}`, @@ -360,29 +364,7 @@ async function executeApi(session, port, config) { ) } try { - if (isUsingCustomModel(session)) { - console.debug('[background] Using Custom Model API') - if (!session.apiMode) - await generateAnswersWithCustomApi( - port, - session.question, - session, - config.customModelApiUrl.trim() || 'http://localhost:8000/v1/chat/completions', - config.customApiKey, - config.customModelName, - ) - else - await generateAnswersWithCustomApi( - port, - session.question, - session, - session.apiMode.customUrl?.trim() || - config.customModelApiUrl.trim() || - 'http://localhost:8000/v1/chat/completions', - session.apiMode.apiKey?.trim() || config.customApiKey, - session.apiMode.customName, - ) - } else if (isUsingChatgptWebModel(session)) { + if (isUsingChatgptWebModel(session)) { console.debug('[background] Using ChatGPT Web Model') let tabId if ( @@ -507,46 +489,15 @@ async function executeApi(session, port, config) { console.debug('[background] Using Gemini Web Model') const cookies = await getBardCookies() await generateAnswersWithBardWebApi(port, session.question, session, cookies) - } else if (isUsingChatgptApiModel(session)) { - console.debug('[background] Using OpenAI API Model') - await generateAnswersWithOpenAiApi(port, session.question, session, config.apiKey) + } else if (isUsingOpenAICompatibleApiSession(session)) { + console.debug('[background] Using OpenAI-compatible API provider') + await generateAnswersWithOpenAICompatibleApi(port, session.question, session, config) } else if (isUsingClaudeApiModel(session)) { console.debug('[background] Using Anthropic API Model') await generateAnswersWithClaudeApi(port, session.question, session) - } else if (isUsingMoonshotApiModel(session)) { - console.debug('[background] Using Moonshot API Model') - await generateAnswersWithMoonshotCompletionApi( - port, - session.question, - session, - config.moonshotApiKey, - ) - } else if (isUsingChatGLMApiModel(session)) { - console.debug('[background] Using ChatGLM API Model') - await generateAnswersWithChatGLMApi(port, session.question, session) - } else if (isUsingDeepSeekApiModel(session)) { - console.debug('[background] Using DeepSeek API Model') - await generateAnswersWithDeepSeekApi(port, session.question, session, config.deepSeekApiKey) - } else if (isUsingOllamaApiModel(session)) { - console.debug('[background] Using Ollama API Model') - await generateAnswersWithOllamaApi(port, session.question, session) - } else if (isUsingOpenRouterApiModel(session)) { - console.debug('[background] Using OpenRouter API Model') - await generateAnswersWithOpenRouterApi( - port, - session.question, - session, - config.openRouterApiKey, - ) - } else if (isUsingAimlApiModel(session)) { - console.debug('[background] Using AIML API Model') - await generateAnswersWithAimlApi(port, session.question, session, config.aimlApiKey) } else if (isUsingAzureOpenAiApiModel(session)) { console.debug('[background] Using Azure OpenAI API Model') await generateAnswersWithAzureOpenaiApi(port, session.question, session) - } else if (isUsingGptCompletionApiModel(session)) { - console.debug('[background] Using GPT Completion API Model') - await generateAnswersWithGptCompletionApi(port, session.question, session, config.apiKey) } else if (isUsingGithubThirdPartyApiModel(session)) { console.debug('[background] Using Github Third Party API Model') await generateAnswersWithWaylaidwandererApi(port, session.question, session) diff --git a/src/components/ConversationCard/index.jsx b/src/components/ConversationCard/index.jsx index 440bfffe1..d131ff8fe 100644 --- a/src/components/ConversationCard/index.jsx +++ b/src/components/ConversationCard/index.jsx @@ -7,7 +7,7 @@ import { apiModeToModelName, createElementAtPosition, getApiModesFromConfig, - isApiModeSelected, + getUniquelySelectedApiModeIndex, isFirefox, isMobile, isSafari, @@ -38,6 +38,7 @@ import { generateAnswersWithBingWebApi } from '../../services/apis/bing-web.mjs' import { handlePortError } from '../../services/wrappers.mjs' const logo = Browser.runtime.getURL('logo.png') +const UNMATCHED_API_MODE_VALUE = '__current-session-api-mode__' class ConversationItemData extends Object { /** @@ -67,9 +68,36 @@ function ConversationCard(props) { /** * @type {[ConversationItemData[], (conversationItemData: ConversationItemData[]) => void]} - */ + */ const [conversationItemData, setConversationItemData] = useState([]) const config = useConfig() + const currentAiName = + session.aiName || + modelNameToDesc( + session.apiMode && typeof session.apiMode === 'object' + ? apiModeToModelName(session.apiMode) + : session.modelName, + t, + config.customModelName, + ) || + t(Models.customModel.desc) + const selectedApiModeIndex = useMemo( + () => getUniquelySelectedApiModeIndex(apiModes, session, { sessionCompat: true }), + [apiModes, session], + ) + const selectedApiModeDesc = + selectedApiModeIndex !== -1 + ? modelNameToDesc( + apiModeToModelName(apiModes[selectedApiModeIndex]), + t, + config.customModelName, + ) + : '' + const selectedApiModeValue = selectedApiModeDesc + ? String(selectedApiModeIndex) + : !session.apiMode && session.modelName === 'customModel' + ? '-1' + : UNMATCHED_API_MODE_VALUE useLayoutEffect(() => { if (session.conversationRecords.length === 0) { @@ -379,11 +407,16 @@ function ConversationCard(props) { style={props.notClampSize ? {} : { width: 0, flexGrow: 1 }} className="normal-button" required + value={selectedApiModeValue} onChange={(e) => { + if (e.target.value === UNMATCHED_API_MODE_VALUE) return + let apiMode = null let modelName = 'customModel' if (e.target.value !== '-1') { - apiMode = apiModes[e.target.value] + const selectedApiMode = apiModes[Number(e.target.value)] + if (!selectedApiMode) return + apiMode = selectedApiMode modelName = apiModeToModelName(apiMode) } const newSession = { @@ -401,20 +434,23 @@ function ConversationCard(props) { else setSession(newSession) }} > + {selectedApiModeValue === UNMATCHED_API_MODE_VALUE && ( + + )} {apiModes.map((apiMode, index) => { const modelName = apiModeToModelName(apiMode) const desc = modelNameToDesc(modelName, t, config.customModelName) if (desc) { return ( - ) } })} - + {props.draggable && !completeDraggable && ( diff --git a/src/config/index.mjs b/src/config/index.mjs index cb1e0c3ea..3860b7407 100644 --- a/src/config/index.mjs +++ b/src/config/index.mjs @@ -7,6 +7,10 @@ import { modelNameToDesc, } from '../utils/model-name-convert.mjs' import { t } from 'i18next' +import { + LEGACY_SECRET_KEY_TO_PROVIDER_ID, + OPENAI_COMPATIBLE_GROUP_TO_PROVIDER_ID as API_MODE_GROUP_TO_PROVIDER_ID, +} from './openai-provider-mappings.mjs' export const TriggerMode = { always: 'Always', @@ -583,9 +587,13 @@ export const defaultConfig = { customName: '', customUrl: '', apiKey: '', + providerId: '', active: false, }, ], + customOpenAIProviders: [], + providerSecrets: {}, + configSchemaVersion: 1, activeSelectionTools: ['translate', 'translateToEn', 'summary', 'polish', 'code', 'ask'], customSelectionTools: [ { @@ -758,6 +766,603 @@ export async function getPreferredLanguageKey() { return config.preferredLanguage } +const CONFIG_SCHEMA_VERSION = 1 + +function normalizeText(value) { + return typeof value === 'string' ? value.trim() : '' +} + +function normalizeProviderId(value) { + return normalizeText(value) + .toLowerCase() + .replace(/[^a-z0-9]+/g, '-') + .replace(/^-+|-+$/g, '') +} + +function normalizeEndpointUrlForCompare(value) { + return normalizeText(value).replace(/\/+$/, '') +} + +function isPlainObject(value) { + return Boolean(value) && typeof value === 'object' && !Array.isArray(value) +} + +function areStringRecordValuesEqual(leftRecord, rightRecord) { + const leftIsRecord = isPlainObject(leftRecord) + const rightIsRecord = isPlainObject(rightRecord) + if (!leftIsRecord || !rightIsRecord) { + return !leftIsRecord && !rightIsRecord && leftRecord === rightRecord + } + const left = leftRecord + const right = rightRecord + const leftKeys = Object.keys(left) + const rightKeys = Object.keys(right) + if (leftKeys.length !== rightKeys.length) return false + for (const key of leftKeys) { + if (!Object.hasOwn(right, key)) return false + if (normalizeText(left[key]) !== normalizeText(right[key])) return false + } + return true +} + +function ensureUniqueProviderId(providerIdSet, preferredId) { + let id = preferredId || 'custom-provider' + let suffix = 2 + while (providerIdSet.has(id)) { + id = `${preferredId || 'custom-provider'}-${suffix}` + suffix += 1 + } + return id +} + +function normalizeCustomProviderForStorage(provider, index, providerIdSet) { + if (!provider || typeof provider !== 'object') return null + const originalRawId = normalizeText(provider.id) + const originalId = normalizeProviderId(provider.id) + const sourceProviderOriginalRawId = normalizeText(provider.sourceProviderId) + const sourceProviderId = normalizeProviderId(provider.sourceProviderId) + const preferredId = originalId || `custom-provider-${index + 1}` + const id = ensureUniqueProviderId(providerIdSet, preferredId) + providerIdSet.add(id) + return { + originalId, + originalRawId, + sourceProviderOriginalId: sourceProviderId, + sourceProviderOriginalRawId, + provider: { + id, + name: normalizeText(provider.name) || `Custom Provider ${index + 1}`, + baseUrl: normalizeText(provider.baseUrl), + chatCompletionsPath: normalizeText(provider.chatCompletionsPath) || '/v1/chat/completions', + completionsPath: normalizeText(provider.completionsPath) || '/v1/completions', + chatCompletionsUrl: normalizeText(provider.chatCompletionsUrl), + completionsUrl: normalizeText(provider.completionsUrl), + enabled: provider.enabled !== false, + allowLegacyResponseField: provider.allowLegacyResponseField !== false, + ...(sourceProviderId ? { sourceProviderId } : {}), + }, + } +} + +function migrateUserConfig(options) { + const migrated = { ...options } + let dirty = false + + if (migrated.customChatGptWebApiUrl === 'https://chat.openai.com') { + migrated.customChatGptWebApiUrl = 'https://chatgpt.com' + dirty = true + } + + const hasProviderSecretsRecord = isPlainObject(migrated.providerSecrets) + const providerSecrets = hasProviderSecretsRecord ? { ...migrated.providerSecrets } : {} + if (!hasProviderSecretsRecord) { + dirty = true + } + for (const [legacyKey, providerId] of Object.entries(LEGACY_SECRET_KEY_TO_PROVIDER_ID)) { + const legacyKeyValue = normalizeText(migrated[legacyKey]) + const hasProviderSecret = Object.hasOwn(providerSecrets, providerId) + if (legacyKeyValue && !hasProviderSecret) { + providerSecrets[providerId] = legacyKeyValue + dirty = true + } + } + + const builtinProviderIds = new Set( + Object.values(API_MODE_GROUP_TO_PROVIDER_ID) + .map((providerId) => normalizeText(providerId)) + .filter((providerId) => providerId), + ) + const providerIdSet = new Set(builtinProviderIds) + const providerIdRenameLookup = new Map() + const providerIdRenames = [] + const rawCustomOpenAIProviders = Array.isArray(migrated.customOpenAIProviders) + ? migrated.customOpenAIProviders + : [] + const legacyCustomProviderIds = new Set( + rawCustomOpenAIProviders + .map((provider) => normalizeProviderId(provider?.id)) + .filter((providerId) => providerId), + ) + const normalizedProviderResults = rawCustomOpenAIProviders + .map((provider, index) => normalizeCustomProviderForStorage(provider, index, providerIdSet)) + .filter((result) => result && result.provider) + const unchangedProviderIds = new Set( + normalizedProviderResults + .filter( + ({ originalId, provider }) => originalId && originalId === normalizeProviderId(provider.id), + ) + .map(({ provider }) => normalizeProviderId(provider.id)) + .filter((id) => id), + ) + const customOpenAIProviders = normalizedProviderResults.map( + ({ originalId, originalRawId, sourceProviderOriginalRawId, provider }) => { + if (originalId && originalId !== provider.id) { + providerIdRenames.push({ oldId: originalId, oldRawId: originalRawId, newId: provider.id }) + if (!providerIdRenameLookup.has(originalId) && !unchangedProviderIds.has(originalId)) { + providerIdRenameLookup.set(originalId, provider.id) + } + dirty = true + } + if ( + normalizeText(sourceProviderOriginalRawId) && + normalizeText(sourceProviderOriginalRawId) !== normalizeText(provider.sourceProviderId) + ) { + dirty = true + } + return provider + }, + ) + if (!Array.isArray(migrated.customOpenAIProviders)) dirty = true + + for (const { + sourceProviderOriginalId, + sourceProviderOriginalRawId, + provider, + } of normalizedProviderResults) { + const currentSourceProviderId = normalizeProviderId(provider?.sourceProviderId) + if (!currentSourceProviderId) { + continue + } + const renamedSourceProviderByRawId = providerIdRenames.find( + ({ oldRawId }) => + normalizeText(oldRawId) && + normalizeText(oldRawId) === normalizeText(sourceProviderOriginalRawId), + ) + const renamedSourceProviderId = + (renamedSourceProviderByRawId && !unchangedProviderIds.has(sourceProviderOriginalId) + ? renamedSourceProviderByRawId.newId + : '') || + (!builtinProviderIds.has(sourceProviderOriginalId) + ? providerIdRenameLookup.get(sourceProviderOriginalId) + : '') + if (renamedSourceProviderId && currentSourceProviderId !== renamedSourceProviderId) { + provider.sourceProviderId = renamedSourceProviderId + dirty = true + } + } + + for (let index = providerIdRenames.length - 1; index >= 0; index -= 1) { + const { + oldId: oldProviderId, + oldRawId: oldRawProviderId, + newId: newProviderId, + } = providerIdRenames[index] + if (oldProviderId === newProviderId) continue + if (!legacyCustomProviderIds.has(oldProviderId)) continue + const hasRawIdSecret = Object.hasOwn(providerSecrets, oldRawProviderId) + const hasNormalizedIdSecret = Object.hasOwn(providerSecrets, oldProviderId) + const usesBuiltinSecretSlot = builtinProviderIds.has(oldProviderId) + if (usesBuiltinSecretSlot && !hasRawIdSecret) continue + if (!usesBuiltinSecretSlot && !hasRawIdSecret && !hasNormalizedIdSecret) continue + const rawIdSecret = hasRawIdSecret ? providerSecrets[oldRawProviderId] : undefined + const normalizedIdSecret = hasNormalizedIdSecret ? providerSecrets[oldProviderId] : undefined + const oldSecret = usesBuiltinSecretSlot + ? rawIdSecret + : hasRawIdSecret && rawIdSecret !== '' + ? rawIdSecret + : hasNormalizedIdSecret + ? normalizedIdSecret + : rawIdSecret + if ( + !Object.hasOwn(providerSecrets, newProviderId) || + providerSecrets[newProviderId] !== oldSecret + ) { + providerSecrets[newProviderId] = oldSecret + dirty = true + } + if (hasRawIdSecret && oldRawProviderId !== oldProviderId) { + delete providerSecrets[oldRawProviderId] + dirty = true + } + } + + const activeCustomProviderIds = new Set( + customOpenAIProviders.map((provider) => normalizeText(provider?.id)).filter(Boolean), + ) + + for (const { originalRawId, provider } of normalizedProviderResults) { + const rawProviderId = normalizeText(originalRawId) + const normalizedProviderId = normalizeText(provider?.id) + if (!rawProviderId || !normalizedProviderId || rawProviderId === normalizedProviderId) continue + if (!Object.hasOwn(providerSecrets, rawProviderId)) continue + const rawSecret = providerSecrets[rawProviderId] + const shouldPreserveRawSecretSlot = + builtinProviderIds.has(rawProviderId) || activeCustomProviderIds.has(rawProviderId) + if (!Object.hasOwn(providerSecrets, normalizedProviderId)) { + providerSecrets[normalizedProviderId] = rawSecret + dirty = true + } + if (!shouldPreserveRawSecretSlot) { + delete providerSecrets[rawProviderId] + dirty = true + } + } + + const customApiModes = Array.isArray(migrated.customApiModes) + ? migrated.customApiModes.map((apiMode) => ({ ...apiMode })) + : [] + if (!Array.isArray(migrated.customApiModes)) dirty = true + + let customProviderCounter = customOpenAIProviders.length + let customApiModesDirty = false + let customProvidersDirty = false + const migratedCustomModeProviderIds = new Map() + const legacyCustomProviderSecret = normalizeText(providerSecrets['legacy-custom-default']) + const hasOwnProviderSecret = (providerId) => + Object.prototype.hasOwnProperty.call(providerSecrets, providerId) + const getCustomModeMigrationSignature = (apiMode) => + JSON.stringify({ + groupName: normalizeText(apiMode?.groupName), + itemName: normalizeText(apiMode?.itemName), + isCustom: Boolean(apiMode?.isCustom), + customName: normalizeText(apiMode?.customName), + customUrl: normalizeEndpointUrlForCompare(normalizeText(apiMode?.customUrl)), + providerId: normalizeProviderId( + typeof apiMode?.providerId === 'string' ? apiMode.providerId : '', + ), + apiKey: normalizeText(apiMode?.apiKey), + }) + const isProviderSecretCompatibleForCustomMode = (modeApiKey, providerSecret) => { + const effectiveModeKey = normalizeText(modeApiKey) || legacyCustomProviderSecret + if (effectiveModeKey) { + return !providerSecret || providerSecret === effectiveModeKey + } + return !providerSecret + } + const materializeCustomProviderForMode = (targetProviderId, preferredName) => { + customProviderCounter += 1 + const sourceProvider = customOpenAIProviders.find((item) => item.id === targetProviderId) + const providerName = + normalizeText(preferredName) || + normalizeText(sourceProvider?.name) || + `Custom Provider ${customProviderCounter}` + const preferredId = + normalizeProviderId(preferredName) || + normalizeProviderId(sourceProvider?.name) || + `custom-provider-${customProviderCounter}` + const providerId = ensureUniqueProviderId(providerIdSet, preferredId) + providerIdSet.add(providerId) + const provider = sourceProvider + ? { + ...sourceProvider, + id: providerId, + name: providerName, + } + : { + id: providerId, + name: providerName, + baseUrl: '', + chatCompletionsPath: '/v1/chat/completions', + completionsPath: '/v1/completions', + chatCompletionsUrl: + normalizeText(migrated.customModelApiUrl) || defaultConfig.customModelApiUrl, + completionsUrl: '', + enabled: true, + allowLegacyResponseField: true, + } + customOpenAIProviders.push(provider) + customProvidersDirty = true + dirty = true + return providerId + } + const promoteCustomModeApiKeyToProvider = (apiMode, apiModeKey) => { + const targetProviderId = normalizeText(apiMode.providerId) || 'legacy-custom-default' + const existingProviderSecret = normalizeText(providerSecrets[targetProviderId]) + if (!hasOwnProviderSecret(targetProviderId)) { + providerSecrets[targetProviderId] = apiModeKey + dirty = true + return targetProviderId + } + if (existingProviderSecret === apiModeKey) { + return targetProviderId + } + const reassignedProviderId = materializeCustomProviderForMode( + targetProviderId, + apiMode.customName, + ) + providerSecrets[reassignedProviderId] = apiModeKey + dirty = true + return reassignedProviderId + } + for (const apiMode of customApiModes) { + if (!apiMode || typeof apiMode !== 'object') continue + if (apiMode.groupName !== 'customApiModelKeys') { + const nonCustomApiModeKey = normalizeText(apiMode.apiKey) + if (nonCustomApiModeKey) { + const targetProviderId = + API_MODE_GROUP_TO_PROVIDER_ID[normalizeText(apiMode.groupName)] || + normalizeText(apiMode.providerId) + if (targetProviderId) { + if (!hasOwnProviderSecret(targetProviderId)) { + providerSecrets[targetProviderId] = nonCustomApiModeKey + dirty = true + } + apiMode.apiKey = '' + customApiModesDirty = true + } + } + if (normalizeText(apiMode.providerId)) { + apiMode.providerId = '' + customApiModesDirty = true + } + continue + } + + const originalCustomModeSignature = getCustomModeMigrationSignature(apiMode) + const existingProviderIdRaw = typeof apiMode.providerId === 'string' ? apiMode.providerId : '' + const existingProviderId = normalizeProviderId(existingProviderIdRaw) + if (existingProviderId && existingProviderIdRaw !== existingProviderId) { + apiMode.providerId = existingProviderId + customApiModesDirty = true + } + let providerIdAssignedFromLegacyCustomUrl = false + const renamedProviderId = providerIdRenameLookup.get(existingProviderId) + if (renamedProviderId && normalizeText(apiMode.providerId) !== renamedProviderId) { + apiMode.providerId = renamedProviderId + customApiModesDirty = true + } + + if (!normalizeText(apiMode.providerId)) { + const customUrl = normalizeText(apiMode.customUrl) + const normalizedCustomUrl = normalizeEndpointUrlForCompare(customUrl) + if (customUrl) { + const apiModeKeyForMatch = normalizeText(apiMode.apiKey) + let provider = customOpenAIProviders.find((item) => { + if (normalizeEndpointUrlForCompare(item.chatCompletionsUrl) !== normalizedCustomUrl) + return false + const existingSecret = normalizeText(providerSecrets[item.id]) + return isProviderSecretCompatibleForCustomMode(apiModeKeyForMatch, existingSecret) + }) + if (!provider) { + customProviderCounter += 1 + const preferredId = + normalizeProviderId(apiMode.customName) || `custom-provider-${customProviderCounter}` + const providerId = ensureUniqueProviderId(providerIdSet, preferredId) + providerIdSet.add(providerId) + provider = { + id: providerId, + name: normalizeText(apiMode.customName) || `Custom Provider ${customProviderCounter}`, + baseUrl: '', + chatCompletionsPath: '/v1/chat/completions', + completionsPath: '/v1/completions', + chatCompletionsUrl: customUrl, + completionsUrl: '', + enabled: true, + allowLegacyResponseField: true, + } + customOpenAIProviders.push(provider) + customProvidersDirty = true + } + apiMode.providerId = provider.id + if (normalizeText(apiMode.customUrl)) { + apiMode.customUrl = '' + } + providerIdAssignedFromLegacyCustomUrl = true + } else { + apiMode.providerId = 'legacy-custom-default' + } + customApiModesDirty = true + } + + const apiModeKey = normalizeText(apiMode.apiKey) + if (apiModeKey) { + const promotedProviderId = promoteCustomModeApiKeyToProvider(apiMode, apiModeKey) + if (normalizeText(apiMode.providerId) !== promotedProviderId) { + apiMode.providerId = promotedProviderId + customApiModesDirty = true + } + if (normalizeText(apiMode.apiKey)) { + // Mode-level custom keys are treated as legacy data; after migration, + // providerSecrets is the single source of truth. + apiMode.apiKey = '' + customApiModesDirty = true + } + } else if (legacyCustomProviderSecret && providerIdAssignedFromLegacyCustomUrl) { + if (!hasOwnProviderSecret(apiMode.providerId)) { + providerSecrets[apiMode.providerId] = legacyCustomProviderSecret + dirty = true + } + } + + migratedCustomModeProviderIds.set( + originalCustomModeSignature, + normalizeText(apiMode.providerId), + ) + } + + if (migrated.apiMode && typeof migrated.apiMode === 'object') { + const selectedApiMode = { ...migrated.apiMode } + let selectedApiModeDirty = false + const selectedIsCustom = selectedApiMode.groupName === 'customApiModelKeys' + let selectedProviderIdAssignedFromLegacyCustomUrl = false + const originalSelectedCustomModeSignature = selectedIsCustom + ? getCustomModeMigrationSignature(selectedApiMode) + : '' + + if (selectedIsCustom) { + const existingSelectedProviderIdRaw = + typeof selectedApiMode.providerId === 'string' ? selectedApiMode.providerId : '' + const existingSelectedProviderId = normalizeProviderId(existingSelectedProviderIdRaw) + if ( + existingSelectedProviderId && + existingSelectedProviderIdRaw !== existingSelectedProviderId + ) { + selectedApiMode.providerId = existingSelectedProviderId + selectedApiModeDirty = true + } + const renamedSelectedProviderId = providerIdRenameLookup.get(existingSelectedProviderId) + if ( + renamedSelectedProviderId && + normalizeText(selectedApiMode.providerId) !== renamedSelectedProviderId + ) { + selectedApiMode.providerId = renamedSelectedProviderId + selectedApiModeDirty = true + } + } + + if (selectedIsCustom) { + const migratedProviderId = migratedCustomModeProviderIds.get( + originalSelectedCustomModeSignature, + ) + if (migratedProviderId && normalizeText(selectedApiMode.providerId) !== migratedProviderId) { + selectedApiMode.providerId = migratedProviderId + selectedApiModeDirty = true + } + } + + if (selectedIsCustom && !normalizeText(selectedApiMode.providerId)) { + const customUrl = normalizeText(selectedApiMode.customUrl) + const normalizedCustomUrl = normalizeEndpointUrlForCompare(customUrl) + if (customUrl) { + const selectedApiModeKeyForMatch = normalizeText(selectedApiMode.apiKey) + let provider = customOpenAIProviders.find((item) => { + if (normalizeEndpointUrlForCompare(item.chatCompletionsUrl) !== normalizedCustomUrl) + return false + const existingSecret = normalizeText(providerSecrets[item.id]) + return isProviderSecretCompatibleForCustomMode(selectedApiModeKeyForMatch, existingSecret) + }) + if (!provider) { + customProviderCounter += 1 + const preferredId = + normalizeProviderId(selectedApiMode.customName) || + `custom-provider-${customProviderCounter}` + const providerId = ensureUniqueProviderId(providerIdSet, preferredId) + providerIdSet.add(providerId) + provider = { + id: providerId, + name: + normalizeText(selectedApiMode.customName) || + `Custom Provider ${customProviderCounter}`, + baseUrl: '', + chatCompletionsPath: '/v1/chat/completions', + completionsPath: '/v1/completions', + chatCompletionsUrl: customUrl, + completionsUrl: '', + enabled: true, + allowLegacyResponseField: true, + } + customOpenAIProviders.push(provider) + customProvidersDirty = true + } + selectedApiMode.providerId = provider.id + if (normalizeText(selectedApiMode.customUrl)) { + selectedApiMode.customUrl = '' + selectedApiModeDirty = true + } + selectedProviderIdAssignedFromLegacyCustomUrl = true + } else { + selectedApiMode.providerId = 'legacy-custom-default' + } + selectedApiModeDirty = true + } + + const selectedApiModeKey = normalizeText(selectedApiMode.apiKey) + const selectedTargetProviderId = selectedIsCustom + ? normalizeText(selectedApiMode.providerId) || 'legacy-custom-default' + : API_MODE_GROUP_TO_PROVIDER_ID[normalizeText(selectedApiMode.groupName)] || + normalizeText(selectedApiMode.providerId) + if ( + selectedIsCustom && + selectedProviderIdAssignedFromLegacyCustomUrl && + !selectedApiModeKey && + legacyCustomProviderSecret && + selectedTargetProviderId && + !hasOwnProviderSecret(selectedTargetProviderId) + ) { + providerSecrets[selectedTargetProviderId] = legacyCustomProviderSecret + dirty = true + } + if (selectedApiModeKey) { + const migratedProviderId = selectedIsCustom + ? migratedCustomModeProviderIds.get(originalSelectedCustomModeSignature) + : '' + if (migratedProviderId) { + if (normalizeText(selectedApiMode.providerId) !== migratedProviderId) { + selectedApiMode.providerId = migratedProviderId + selectedApiModeDirty = true + } + selectedApiMode.apiKey = '' + selectedApiModeDirty = true + } else { + const targetProviderId = selectedIsCustom + ? promoteCustomModeApiKeyToProvider(selectedApiMode, selectedApiModeKey) + : API_MODE_GROUP_TO_PROVIDER_ID[normalizeText(selectedApiMode.groupName)] || + normalizeText(selectedApiMode.providerId) + if (targetProviderId && normalizeText(selectedApiMode.providerId) !== targetProviderId) { + selectedApiMode.providerId = targetProviderId + selectedApiModeDirty = true + } + if (targetProviderId && !selectedIsCustom && !hasOwnProviderSecret(targetProviderId)) { + providerSecrets[targetProviderId] = selectedApiModeKey + dirty = true + } + if (targetProviderId) { + selectedApiMode.apiKey = '' + selectedApiModeDirty = true + } + } + } + + if (!selectedIsCustom && normalizeText(selectedApiMode.providerId)) { + selectedApiMode.providerId = '' + selectedApiModeDirty = true + } + + if (selectedApiModeDirty) { + migrated.apiMode = selectedApiMode + dirty = true + } + } + + if (customProvidersDirty) dirty = true + if (customApiModesDirty) dirty = true + + if (migrated.configSchemaVersion !== CONFIG_SCHEMA_VERSION) { + migrated.configSchemaVersion = CONFIG_SCHEMA_VERSION + dirty = true + } + + migrated.providerSecrets = providerSecrets + migrated.customOpenAIProviders = customOpenAIProviders + migrated.customApiModes = customApiModes + + // Reverse-sync providerSecrets to legacy fields for backward compatibility + // so that older extension versions can still read the keys. + for (const [legacyKey, providerId] of Object.entries(LEGACY_SECRET_KEY_TO_PROVIDER_ID)) { + const hasProviderSecret = Object.hasOwn(providerSecrets, providerId) + const providerSecret = normalizeText(providerSecrets[providerId]) + if (providerSecret && normalizeText(migrated[legacyKey]) !== providerSecret) { + migrated[legacyKey] = providerSecret + dirty = true + } else if (hasProviderSecret && !providerSecret && normalizeText(migrated[legacyKey])) { + migrated[legacyKey] = '' + dirty = true + } + } + + return { migrated, dirty } +} + /** * get user config from local storage * @returns {Promise} @@ -769,8 +1374,6 @@ export async function getUserConfig() { 'claudeApiKey', 'customClaudeApiUrl', ]) - if (options.customChatGptWebApiUrl === 'https://chat.openai.com') - options.customChatGptWebApiUrl = 'https://chatgpt.com' // Migrate legacy Claude-named keys to Anthropic-named keys. // If both old/new keys coexist (for example after a partial migration), @@ -802,7 +1405,46 @@ export async function getUserConfig() { } } - return defaults(options, defaultConfig) + const { migrated, dirty } = migrateUserConfig(options) + if (dirty) { + const payload = {} + if (JSON.stringify(options.customApiModes) !== JSON.stringify(migrated.customApiModes)) { + payload.customApiModes = migrated.customApiModes + } + if ( + JSON.stringify(options.customOpenAIProviders) !== + JSON.stringify(migrated.customOpenAIProviders) + ) { + payload.customOpenAIProviders = migrated.customOpenAIProviders + } + if (!areStringRecordValuesEqual(options.providerSecrets, migrated.providerSecrets)) { + payload.providerSecrets = migrated.providerSecrets + } + if (options.configSchemaVersion !== migrated.configSchemaVersion) { + payload.configSchemaVersion = migrated.configSchemaVersion + } + if (migrated.customChatGptWebApiUrl !== undefined) { + if (options.customChatGptWebApiUrl !== migrated.customChatGptWebApiUrl) { + payload.customChatGptWebApiUrl = migrated.customChatGptWebApiUrl + } + } + if (migrated.apiMode !== undefined) { + if (JSON.stringify(options.apiMode ?? null) !== JSON.stringify(migrated.apiMode ?? null)) { + payload.apiMode = migrated.apiMode + } + } + for (const legacyKey of Object.keys(LEGACY_SECRET_KEY_TO_PROVIDER_ID)) { + if (migrated[legacyKey] !== undefined) { + if (options[legacyKey] !== migrated[legacyKey]) { + payload[legacyKey] = migrated[legacyKey] + } + } + } + if (Object.keys(payload).length > 0) { + await Browser.storage.local.set(payload).catch(() => {}) + } + } + return defaults(migrated, defaultConfig) } /** diff --git a/src/config/openai-provider-mappings.mjs b/src/config/openai-provider-mappings.mjs new file mode 100644 index 000000000..b7a534875 --- /dev/null +++ b/src/config/openai-provider-mappings.mjs @@ -0,0 +1,30 @@ +export const LEGACY_API_KEY_FIELD_BY_PROVIDER_ID = { + openai: 'apiKey', + deepseek: 'deepSeekApiKey', + moonshot: 'moonshotApiKey', + openrouter: 'openRouterApiKey', + aiml: 'aimlApiKey', + chatglm: 'chatglmApiKey', + ollama: 'ollamaApiKey', + 'legacy-custom-default': 'customApiKey', +} + +export const LEGACY_SECRET_KEY_TO_PROVIDER_ID = Object.fromEntries( + Object.entries(LEGACY_API_KEY_FIELD_BY_PROVIDER_ID).map(([providerId, legacyKey]) => [ + legacyKey, + providerId, + ]), +) + +export const OPENAI_COMPATIBLE_GROUP_TO_PROVIDER_ID = { + chatgptApiModelKeys: 'openai', + gptApiModelKeys: 'openai', + moonshotApiModelKeys: 'moonshot', + deepSeekApiModelKeys: 'deepseek', + openRouterApiModelKeys: 'openrouter', + aimlModelKeys: 'aiml', + aimlApiModelKeys: 'aiml', + chatglmApiModelKeys: 'chatglm', + ollamaApiModelKeys: 'ollama', + customApiModelKeys: 'legacy-custom-default', +} diff --git a/src/services/apis/aiml-api.mjs b/src/services/apis/aiml-api.mjs deleted file mode 100644 index fb10be47b..000000000 --- a/src/services/apis/aiml-api.mjs +++ /dev/null @@ -1,12 +0,0 @@ -import { generateAnswersWithOpenAiApiCompat } from './openai-api.mjs' - -/** - * @param {Browser.Runtime.Port} port - * @param {string} question - * @param {Session} session - * @param {string} apiKey - */ -export async function generateAnswersWithAimlApi(port, question, session, apiKey) { - const baseUrl = 'https://api.aimlapi.com/v1' - return generateAnswersWithOpenAiApiCompat(baseUrl, port, question, session, apiKey) -} diff --git a/src/services/apis/chatglm-api.mjs b/src/services/apis/chatglm-api.mjs deleted file mode 100644 index fa6899abe..000000000 --- a/src/services/apis/chatglm-api.mjs +++ /dev/null @@ -1,14 +0,0 @@ -import { getUserConfig } from '../../config/index.mjs' -// import { getToken } from '../../utils/jwt-token-generator.mjs' -import { generateAnswersWithOpenAiApiCompat } from './openai-api.mjs' - -/** - * @param {Runtime.Port} port - * @param {string} question - * @param {Session} session - */ -export async function generateAnswersWithChatGLMApi(port, question, session) { - const baseUrl = 'https://open.bigmodel.cn/api/paas/v4' - const config = await getUserConfig() - return generateAnswersWithOpenAiApiCompat(baseUrl, port, question, session, config.chatglmApiKey) -} diff --git a/src/services/apis/custom-api.mjs b/src/services/apis/custom-api.mjs index 62150d151..f0cd9095a 100644 --- a/src/services/apis/custom-api.mjs +++ b/src/services/apis/custom-api.mjs @@ -1,16 +1,4 @@ -// custom api version - -// There is a lot of duplicated code here, but it is very easy to refactor. -// The current state is mainly convenient for making targeted changes at any time, -// and it has not yet had a negative impact on maintenance. -// If necessary, I will refactor. - -import { getUserConfig } from '../../config/index.mjs' -import { fetchSSE } from '../../utils/fetch-sse.mjs' -import { getConversationPairs } from '../../utils/get-conversation-pairs.mjs' -import { isEmpty } from 'lodash-es' -import { pushRecord, setAbortController } from './shared.mjs' -import { getChatCompletionsTokenParams } from './openai-token-params.mjs' +import { generateAnswersWithOpenAICompatible } from './openai-compatible-core.mjs' /** * @param {Browser.Runtime.Port} port @@ -28,84 +16,15 @@ export async function generateAnswersWithCustomApi( apiKey, modelName, ) { - const { controller, messageListener, disconnectListener } = setAbortController(port) - - const config = await getUserConfig() - const prompt = getConversationPairs( - session.conversationRecords.slice(-config.maxConversationContextLength), - false, - ) - prompt.push({ role: 'user', content: question }) - - let answer = '' - let finished = false - const finish = () => { - finished = true - pushRecord(session, question, answer) - console.debug('conversation history', { content: session.conversationRecords }) - port.postMessage({ answer: null, done: true, session: session }) - } - await fetchSSE(apiUrl, { - method: 'POST', - signal: controller.signal, - headers: { - 'Content-Type': 'application/json', - Authorization: `Bearer ${apiKey}`, - }, - body: JSON.stringify({ - messages: prompt, - model: modelName, - stream: true, - ...getChatCompletionsTokenParams('custom', modelName, config.maxResponseTokenLength), - temperature: config.temperature, - }), - onMessage(message) { - console.debug('sse message', message) - if (finished) return - if (message.trim() === '[DONE]') { - finish() - return - } - let data - try { - data = JSON.parse(message) - } catch (error) { - console.debug('json error', error) - return - } - - if (data.response) answer = data.response - else { - const delta = data.choices?.[0]?.delta?.content - const content = data.choices?.[0]?.message?.content - const text = data.choices?.[0]?.text - if (delta !== undefined) { - answer += delta - } else if (typeof content === 'string') { - answer = content - } else if (text) { - answer += text - } - } - port.postMessage({ answer: answer, done: false, session: null }) - - if (data.choices?.[0]?.finish_reason) { - finish() - return - } - }, - async onStart() {}, - async onEnd() { - port.postMessage({ done: true }) - port.onMessage.removeListener(messageListener) - port.onDisconnect.removeListener(disconnectListener) - }, - async onError(resp) { - port.onMessage.removeListener(messageListener) - port.onDisconnect.removeListener(disconnectListener) - if (resp instanceof Error) throw resp - const error = await resp.json().catch(() => ({})) - throw new Error(!isEmpty(error) ? JSON.stringify(error) : `${resp.status} ${resp.statusText}`) - }, + await generateAnswersWithOpenAICompatible({ + port, + question, + session, + endpointType: 'chat', + requestUrl: apiUrl, + model: modelName, + apiKey, + provider: 'custom', + allowLegacyResponseField: true, }) } diff --git a/src/services/apis/deepseek-api.mjs b/src/services/apis/deepseek-api.mjs deleted file mode 100644 index 9e91b97a8..000000000 --- a/src/services/apis/deepseek-api.mjs +++ /dev/null @@ -1,12 +0,0 @@ -import { generateAnswersWithOpenAiApiCompat } from './openai-api.mjs' - -/** - * @param {Browser.Runtime.Port} port - * @param {string} question - * @param {Session} session - * @param {string} apiKey - */ -export async function generateAnswersWithDeepSeekApi(port, question, session, apiKey) { - const baseUrl = 'https://api.deepseek.com' - return generateAnswersWithOpenAiApiCompat(baseUrl, port, question, session, apiKey) -} diff --git a/src/services/apis/moonshot-api.mjs b/src/services/apis/moonshot-api.mjs deleted file mode 100644 index 157c3ecd4..000000000 --- a/src/services/apis/moonshot-api.mjs +++ /dev/null @@ -1,12 +0,0 @@ -import { generateAnswersWithOpenAiApiCompat } from './openai-api.mjs' - -/** - * @param {Browser.Runtime.Port} port - * @param {string} question - * @param {Session} session - * @param {string} apiKey - */ -export async function generateAnswersWithMoonshotCompletionApi(port, question, session, apiKey) { - const baseUrl = 'https://api.moonshot.cn/v1' - return generateAnswersWithOpenAiApiCompat(baseUrl, port, question, session, apiKey) -} diff --git a/src/services/apis/ollama-api.mjs b/src/services/apis/ollama-api.mjs deleted file mode 100644 index 01634fc64..000000000 --- a/src/services/apis/ollama-api.mjs +++ /dev/null @@ -1,36 +0,0 @@ -import { getUserConfig } from '../../config/index.mjs' -import { generateAnswersWithOpenAiApiCompat } from './openai-api.mjs' -import { getModelValue } from '../../utils/model-name-convert.mjs' - -/** - * @param {Browser.Runtime.Port} port - * @param {string} question - * @param {Session} session - */ -export async function generateAnswersWithOllamaApi(port, question, session) { - const config = await getUserConfig() - const model = getModelValue(session) - return generateAnswersWithOpenAiApiCompat( - config.ollamaEndpoint + '/v1', - port, - question, - session, - config.ollamaApiKey, - ).then(() => - fetch(config.ollamaEndpoint + '/api/generate', { - method: 'POST', - headers: { - 'Content-Type': 'application/json', - Authorization: `Bearer ${config.ollamaApiKey}`, - }, - body: JSON.stringify({ - model, - prompt: 't', - options: { - num_predict: 1, - }, - keep_alive: config.ollamaKeepAliveTime === '-1' ? -1 : config.ollamaKeepAliveTime, - }), - }), - ) -} diff --git a/src/services/apis/openai-api.mjs b/src/services/apis/openai-api.mjs index 0131d8f7d..d591fb2df 100644 --- a/src/services/apis/openai-api.mjs +++ b/src/services/apis/openai-api.mjs @@ -1,12 +1,160 @@ -// api version - import { getUserConfig } from '../../config/index.mjs' -import { fetchSSE } from '../../utils/fetch-sse.mjs' -import { getConversationPairs } from '../../utils/get-conversation-pairs.mjs' -import { isEmpty } from 'lodash-es' -import { getCompletionPromptBase, pushRecord, setAbortController } from './shared.mjs' import { getModelValue } from '../../utils/model-name-convert.mjs' -import { getChatCompletionsTokenParams } from './openai-token-params.mjs' +import { generateAnswersWithOpenAICompatible } from './openai-compatible-core.mjs' +import { resolveOpenAICompatibleRequest } from './provider-registry.mjs' + +function normalizeBaseUrl(baseUrl) { + return String(baseUrl || '') + .trim() + .replace(/\/+$/, '') +} + +function normalizeBaseUrlWithoutVersionSuffix(baseUrl, fallback) { + return normalizeBaseUrl(baseUrl || fallback).replace(/\/v1$/i, '') +} + +function resolveModelName(session, config) { + if (session.modelName === 'customModel' && !session.apiMode) { + return config.customModelName + } + if ( + session.apiMode?.groupName === 'customApiModelKeys' && + session.apiMode?.customName && + session.apiMode.customName.trim() + ) { + return session.apiMode.customName.trim() + } + return getModelValue(session) +} + +function hasNativeOpenAIRequestUrl(requestUrl) { + const normalizedRequestUrl = normalizeBaseUrl(requestUrl) + if (!normalizedRequestUrl) return false + try { + const parsedRequestUrl = new URL(normalizedRequestUrl) + const normalizedPathname = parsedRequestUrl.pathname.replace(/\/+$/, '') || '/' + return ( + parsedRequestUrl.hostname.toLowerCase() === 'api.openai.com' && + (normalizedPathname === '/v1/chat/completions' || normalizedPathname === '/v1/completions') + ) + } catch { + return false + } +} + +function shouldUseOpenAIRequestShaping(request) { + if (request?.providerId === 'openai') return true + + const hasOpenAILineage = + request?.provider?.sourceProviderId === 'openai' || request?.secretProviderId === 'openai' + if (!hasOpenAILineage) return false + + return hasNativeOpenAIRequestUrl(request?.requestUrl) +} + +function resolveProviderRequestShapingId(request) { + if (shouldUseOpenAIRequestShaping(request)) return 'openai' + return request?.providerId +} + +function resolveOllamaKeepAliveBaseUrl(request) { + const requestUrl = normalizeBaseUrl(request?.requestUrl) + if (requestUrl) { + try { + const parsedRequestUrl = new URL(requestUrl) + parsedRequestUrl.search = '' + parsedRequestUrl.hash = '' + const normalizedRequestPathname = parsedRequestUrl.pathname.replace(/\/+$/, '') || '/' + let keepAlivePathname = normalizedRequestPathname + .replace(/\/chat\/completions$/i, '') + .replace(/\/completions$/i, '') + if (keepAlivePathname === normalizedRequestPathname) { + keepAlivePathname = normalizedRequestPathname.replace(/\/[^/]+$/, '') || '/' + keepAlivePathname = keepAlivePathname.replace(/\/api$/i, '') || '/' + } + parsedRequestUrl.pathname = keepAlivePathname + const normalizedRequestBaseUrl = normalizeBaseUrlWithoutVersionSuffix( + parsedRequestUrl.toString(), + '', + ) + if (normalizedRequestBaseUrl) return normalizedRequestBaseUrl + } catch { + // Fall through to provider baseUrl fallback. + } + } + + return normalizeBaseUrlWithoutVersionSuffix(request?.provider?.baseUrl, 'http://127.0.0.1:11434') +} + +function hasOllamaNativeChatPath(requestUrl) { + const normalizedRequestUrl = normalizeBaseUrl(requestUrl) + if (!normalizedRequestUrl) return false + try { + const parsedRequestUrl = new URL(normalizedRequestUrl) + const normalizedPathname = parsedRequestUrl.pathname.replace(/\/+$/, '') || '/' + return ( + /(^|\/)api\/chat$/i.test(normalizedPathname) || + /(^|\/)v1\/messages$/i.test(normalizedPathname) + ) + } catch { + return false + } +} + +function hasOllamaCompatChatCompletionsPath(requestUrl) { + const normalizedRequestUrl = normalizeBaseUrl(requestUrl) + if (!normalizedRequestUrl) return false + try { + const parsedRequestUrl = new URL(normalizedRequestUrl) + const normalizedPathname = parsedRequestUrl.pathname.replace(/\/+$/, '') || '/' + return /(^|\/)v1\/chat\/completions$/i.test(normalizedPathname) + } catch { + return false + } +} + +function shouldSendOllamaKeepAlive(request) { + if (request.providerId === 'ollama') return true + if (request.secretProviderId === 'ollama') { + return hasOllamaNativeChatPath(request.requestUrl) + } + if (request.provider?.sourceProviderId !== 'ollama') return false + if (hasOllamaNativeChatPath(request.requestUrl)) return true + return hasOllamaCompatChatCompletionsPath(request.requestUrl) +} + +async function touchOllamaKeepAlive(ollamaBaseUrl, keepAliveTime, model, apiKey) { + const controller = new AbortController() + const timeoutId = setTimeout(() => controller.abort(), 5000) + + try { + const normalizedOllamaBaseUrl = normalizeBaseUrlWithoutVersionSuffix( + ollamaBaseUrl, + 'http://127.0.0.1:11434', + ) + return await fetch(`${normalizedOllamaBaseUrl}/api/generate`, { + method: 'POST', + signal: controller.signal, + headers: { + 'Content-Type': 'application/json', + ...(apiKey ? { Authorization: `Bearer ${apiKey}` } : {}), + }, + body: JSON.stringify({ + model, + prompt: 't', + options: { + num_predict: 1, + }, + keep_alive: keepAliveTime === '-1' ? -1 : keepAliveTime, + }), + }) + } catch (error) { + if (error?.name === 'AbortError') return null + throw error + } finally { + clearTimeout(timeoutId) + } +} /** * @param {Browser.Runtime.Port} port @@ -15,78 +163,19 @@ import { getChatCompletionsTokenParams } from './openai-token-params.mjs' * @param {string} apiKey */ export async function generateAnswersWithGptCompletionApi(port, question, session, apiKey) { - const { controller, messageListener, disconnectListener } = setAbortController(port) - const model = getModelValue(session) - const config = await getUserConfig() - const prompt = - (await getCompletionPromptBase()) + - getConversationPairs( - session.conversationRecords.slice(-config.maxConversationContextLength), - true, - ) + - `Human: ${question}\nAI: ` - const apiUrl = config.customOpenAiApiUrl - - let answer = '' - let finished = false - const finish = () => { - finished = true - pushRecord(session, question, answer) - console.debug('conversation history', { content: session.conversationRecords }) - port.postMessage({ answer: null, done: true, session: session }) - } - await fetchSSE(`${apiUrl}/v1/completions`, { - method: 'POST', - signal: controller.signal, - headers: { - 'Content-Type': 'application/json', - Authorization: `Bearer ${apiKey}`, - }, - body: JSON.stringify({ - prompt: prompt, - model, - stream: true, - max_tokens: config.maxResponseTokenLength, - temperature: config.temperature, - stop: '\nHuman', - }), - onMessage(message) { - console.debug('sse message', message) - if (finished) return - if (message.trim() === '[DONE]') { - finish() - return - } - let data - try { - data = JSON.parse(message) - } catch (error) { - console.debug('json error', error) - return - } - - answer += data.choices[0].text - port.postMessage({ answer: answer, done: false, session: null }) - - if (data.choices[0]?.finish_reason) { - finish() - return - } - }, - async onStart() {}, - async onEnd() { - port.postMessage({ done: true }) - port.onMessage.removeListener(messageListener) - port.onDisconnect.removeListener(disconnectListener) - }, - async onError(resp) { - port.onMessage.removeListener(messageListener) - port.onDisconnect.removeListener(disconnectListener) - if (resp instanceof Error) throw resp - const error = await resp.json().catch(() => ({})) - throw new Error(!isEmpty(error) ? JSON.stringify(error) : `${resp.status} ${resp.statusText}`) - }, + const openAiBaseUrl = normalizeBaseUrlWithoutVersionSuffix( + config.customOpenAiApiUrl, + 'https://api.openai.com', + ) + await generateAnswersWithOpenAICompatible({ + port, + question, + session, + endpointType: 'completion', + requestUrl: `${openAiBaseUrl}/v1/completions`, + model: getModelValue(session), + apiKey, }) } @@ -98,8 +187,12 @@ export async function generateAnswersWithGptCompletionApi(port, question, sessio */ export async function generateAnswersWithOpenAiApi(port, question, session, apiKey) { const config = await getUserConfig() + const openAiBaseUrl = normalizeBaseUrlWithoutVersionSuffix( + config.customOpenAiApiUrl, + 'https://api.openai.com', + ) return generateAnswersWithOpenAiApiCompat( - config.customOpenAiApiUrl + '/v1', + `${openAiBaseUrl}/v1`, port, question, session, @@ -118,89 +211,55 @@ export async function generateAnswersWithOpenAiApiCompat( extraBody = {}, provider = 'compat', ) { - const { controller, messageListener, disconnectListener } = setAbortController(port) - const model = getModelValue(session) + await generateAnswersWithOpenAICompatible({ + port, + question, + session, + endpointType: 'chat', + requestUrl: `${normalizeBaseUrl(baseUrl)}/chat/completions`, + model: getModelValue(session), + apiKey, + extraBody, + provider, + }) +} - const config = await getUserConfig() - const prompt = getConversationPairs( - session.conversationRecords.slice(-config.maxConversationContextLength), - false, - ) - prompt.push({ role: 'user', content: question }) - const tokenParams = getChatCompletionsTokenParams(provider, model, config.maxResponseTokenLength) - const conflictingTokenParamKey = - 'max_completion_tokens' in tokenParams ? 'max_tokens' : 'max_completion_tokens' - // Avoid sending both token-limit fields when caller passes extraBody. - const safeExtraBody = { ...extraBody } - delete safeExtraBody[conflictingTokenParamKey] - - let answer = '' - let finished = false - const finish = () => { - finished = true - pushRecord(session, question, answer) - console.debug('conversation history', { content: session.conversationRecords }) - port.postMessage({ answer: null, done: true, session: session }) +/** + * Unified entry point for OpenAI-compatible providers. + * @param {Browser.Runtime.Port} port + * @param {string} question + * @param {Session} session + * @param {UserConfig} config + */ +export async function generateAnswersWithOpenAICompatibleApi(port, question, session, config) { + const request = resolveOpenAICompatibleRequest(config, session) + if (!request) { + throw new Error('Unknown OpenAI-compatible provider configuration') } - await fetchSSE(`${baseUrl}/chat/completions`, { - method: 'POST', - signal: controller.signal, - headers: { - 'Content-Type': 'application/json', - Authorization: `Bearer ${apiKey}`, - }, - body: JSON.stringify({ - messages: prompt, - model, - stream: true, - ...tokenParams, - temperature: config.temperature, - ...safeExtraBody, - }), - onMessage(message) { - console.debug('sse message', message) - if (finished) return - if (message.trim() === '[DONE]') { - finish() - return - } - let data - try { - data = JSON.parse(message) - } catch (error) { - console.debug('json error', error) - return - } - const delta = data.choices[0]?.delta?.content - const content = data.choices[0]?.message?.content - const text = data.choices[0]?.text - if (delta !== undefined) { - answer += delta - } else if (content) { - answer = content - } else if (text) { - answer += text - } - port.postMessage({ answer: answer, done: false, session: null }) - - if (data.choices[0]?.finish_reason) { - finish() - return - } - }, - async onStart() {}, - async onEnd() { - port.postMessage({ done: true }) - port.onMessage.removeListener(messageListener) - port.onDisconnect.removeListener(disconnectListener) - }, - async onError(resp) { - port.onMessage.removeListener(messageListener) - port.onDisconnect.removeListener(disconnectListener) - if (resp instanceof Error) throw resp - const error = await resp.json().catch(() => ({})) - throw new Error(!isEmpty(error) ? JSON.stringify(error) : `${resp.status} ${resp.statusText}`) - }, + const model = resolveModelName(session, config) + const providerRequestShapingId = resolveProviderRequestShapingId(request) + await generateAnswersWithOpenAICompatible({ + port, + question, + session, + endpointType: request.endpointType, + requestUrl: request.requestUrl, + model, + apiKey: request.apiKey, + provider: providerRequestShapingId, + allowLegacyResponseField: request.provider.allowLegacyResponseField, }) + + if (shouldSendOllamaKeepAlive(request)) { + const ollamaKeepAliveBaseUrl = resolveOllamaKeepAliveBaseUrl(request) + await touchOllamaKeepAlive( + ollamaKeepAliveBaseUrl, + config.ollamaKeepAliveTime, + model, + request.apiKey, + ).catch((error) => { + console.warn('Ollama keep_alive request failed:', error) + }) + } } diff --git a/src/services/apis/openai-compatible-core.mjs b/src/services/apis/openai-compatible-core.mjs new file mode 100644 index 000000000..1188835e8 --- /dev/null +++ b/src/services/apis/openai-compatible-core.mjs @@ -0,0 +1,166 @@ +import { getUserConfig } from '../../config/index.mjs' +import { fetchSSE } from '../../utils/fetch-sse.mjs' +import { getConversationPairs } from '../../utils/get-conversation-pairs.mjs' +import { isEmpty } from 'lodash-es' +import { getCompletionPromptBase, pushRecord, setAbortController } from './shared.mjs' +import { getChatCompletionsTokenParams } from './openai-token-params.mjs' + +function buildHeaders(apiKey, extraHeaders = {}) { + const headers = { + 'Content-Type': 'application/json', + ...extraHeaders, + } + if (apiKey) headers.Authorization = `Bearer ${apiKey}` + return headers +} + +function buildMessageAnswer(answer, data, allowLegacyResponseField) { + if (allowLegacyResponseField && typeof data?.response === 'string' && data.response) { + return data.response + } + + const delta = data?.choices?.[0]?.delta?.content + const content = data?.choices?.[0]?.message?.content + const text = data?.choices?.[0]?.text + if (typeof delta === 'string') return answer + delta + if (content) return content + if (typeof text === 'string' && text) return answer + text + return answer +} + +function hasFinished(data) { + return Boolean(data?.choices?.[0]?.finish_reason) +} + +/** + * @param {object} params + * @param {Browser.Runtime.Port} params.port + * @param {string} params.question + * @param {Session} params.session + * @param {'chat'|'completion'} params.endpointType + * @param {string} params.requestUrl + * @param {string} params.model + * @param {string} params.apiKey + * @param {string} [params.provider] + * @param {Record} [params.extraBody] + * @param {Record} [params.extraHeaders] + * @param {boolean} [params.allowLegacyResponseField] + */ +export async function generateAnswersWithOpenAICompatible({ + port, + question, + session, + endpointType, + requestUrl, + model, + apiKey, + provider = 'compat', + extraBody = {}, + extraHeaders = {}, + allowLegacyResponseField = false, +}) { + const { controller, messageListener, disconnectListener } = setAbortController(port) + const config = await getUserConfig() + + let requestBody + const conversationRecords = Array.isArray(session.conversationRecords) + ? session.conversationRecords + : [] + session.conversationRecords = conversationRecords + if (endpointType === 'completion') { + const prompt = + (await getCompletionPromptBase()) + + getConversationPairs(conversationRecords.slice(-config.maxConversationContextLength), true) + + `Human: ${question}\nAI: ` + requestBody = { + prompt, + model, + stream: true, + max_tokens: config.maxResponseTokenLength, + temperature: config.temperature, + stop: '\nHuman', + ...extraBody, + } + } else { + const messages = getConversationPairs( + conversationRecords.slice(-config.maxConversationContextLength), + false, + ) + messages.push({ role: 'user', content: question }) + const tokenParams = getChatCompletionsTokenParams( + provider, + model, + config.maxResponseTokenLength, + ) + const conflictingTokenParamKey = + 'max_completion_tokens' in tokenParams ? 'max_tokens' : 'max_completion_tokens' + const safeExtraBody = { ...extraBody } + delete safeExtraBody[conflictingTokenParamKey] + requestBody = { + messages, + model, + stream: true, + ...tokenParams, + temperature: config.temperature, + ...safeExtraBody, + } + } + + let answer = '' + let finished = false + const finish = () => { + if (finished) return + finished = true + if (answer !== '') { + pushRecord(session, question, answer) + console.debug('conversation history', { content: session.conversationRecords }) + } + port.postMessage({ answer: null, done: true, session: session }) + } + + await fetchSSE(requestUrl, { + method: 'POST', + signal: controller.signal, + headers: buildHeaders(apiKey, extraHeaders), + body: JSON.stringify(requestBody), + onMessage(message) { + console.debug('sse message', message) + if (finished) return + if (message.trim() === '[DONE]') { + finish() + return + } + let data + try { + data = JSON.parse(message) + } catch (error) { + console.debug('json error', error) + return + } + + answer = buildMessageAnswer(answer, data, allowLegacyResponseField) + port.postMessage({ answer: answer, done: false, session: null }) + + if (hasFinished(data)) { + finish() + } + }, + async onStart() {}, + async onEnd() { + if (!finished && answer !== '') { + finish() + } else if (!finished) { + port.postMessage({ answer: null, done: true, session: session }) + } + port.onMessage.removeListener(messageListener) + port.onDisconnect.removeListener(disconnectListener) + }, + async onError(resp) { + port.onMessage.removeListener(messageListener) + port.onDisconnect.removeListener(disconnectListener) + if (resp instanceof Error) throw resp + const error = await resp.json().catch(() => ({})) + throw new Error(!isEmpty(error) ? JSON.stringify(error) : `${resp.status} ${resp.statusText}`) + }, + }) +} diff --git a/src/services/apis/openrouter-api.mjs b/src/services/apis/openrouter-api.mjs deleted file mode 100644 index f514e1e29..000000000 --- a/src/services/apis/openrouter-api.mjs +++ /dev/null @@ -1,12 +0,0 @@ -import { generateAnswersWithOpenAiApiCompat } from './openai-api.mjs' - -/** - * @param {Browser.Runtime.Port} port - * @param {string} question - * @param {Session} session - * @param {string} apiKey - */ -export async function generateAnswersWithOpenRouterApi(port, question, session, apiKey) { - const baseUrl = 'https://openrouter.ai/api/v1' - return generateAnswersWithOpenAiApiCompat(baseUrl, port, question, session, apiKey) -} diff --git a/src/services/apis/provider-registry.mjs b/src/services/apis/provider-registry.mjs new file mode 100644 index 000000000..2ac41dd93 --- /dev/null +++ b/src/services/apis/provider-registry.mjs @@ -0,0 +1,750 @@ +import { + LEGACY_API_KEY_FIELD_BY_PROVIDER_ID, + OPENAI_COMPATIBLE_GROUP_TO_PROVIDER_ID, +} from '../../config/openai-provider-mappings.mjs' + +export { OPENAI_COMPATIBLE_GROUP_TO_PROVIDER_ID } + +const DEFAULT_CHAT_PATH = '/v1/chat/completions' +const DEFAULT_COMPLETION_PATH = '/v1/completions' + +const BUILTIN_PROVIDER_TEMPLATE = [ + { + id: 'openai', + name: 'OpenAI', + chatCompletionsPath: '/v1/chat/completions', + completionsPath: '/v1/completions', + builtin: true, + enabled: true, + }, + { + id: 'deepseek', + name: 'DeepSeek', + baseUrl: 'https://api.deepseek.com', + chatCompletionsPath: '/chat/completions', + completionsPath: '/completions', + builtin: true, + enabled: true, + }, + { + id: 'moonshot', + name: 'Kimi.Moonshot', + baseUrl: 'https://api.moonshot.cn/v1', + chatCompletionsPath: '/chat/completions', + completionsPath: '/completions', + builtin: true, + enabled: true, + }, + { + id: 'openrouter', + name: 'OpenRouter', + baseUrl: 'https://openrouter.ai/api/v1', + chatCompletionsPath: '/chat/completions', + completionsPath: '/completions', + builtin: true, + enabled: true, + }, + { + id: 'aiml', + name: 'AI/ML', + baseUrl: 'https://api.aimlapi.com/v1', + chatCompletionsPath: '/chat/completions', + completionsPath: '/completions', + builtin: true, + enabled: true, + }, + { + id: 'chatglm', + name: 'ChatGLM', + baseUrl: 'https://open.bigmodel.cn/api/paas/v4', + chatCompletionsPath: '/chat/completions', + completionsPath: '/completions', + builtin: true, + enabled: true, + }, + { + id: 'ollama', + name: 'Ollama', + chatCompletionsPath: '/chat/completions', + completionsPath: '/completions', + builtin: true, + enabled: true, + }, + { + id: 'legacy-custom-default', + name: 'Custom Model (Legacy)', + chatCompletionsPath: '/chat/completions', + completionsPath: '/completions', + builtin: true, + enabled: true, + allowLegacyResponseField: true, + }, +] + +function getModelNamePresetPart(modelName) { + const value = toStringOrEmpty(modelName) + const separatorIndex = value.indexOf('-') + return separatorIndex === -1 ? value : value.substring(0, separatorIndex) +} + +function resolveProviderIdFromLegacyModelName(modelName) { + const rawModelName = toStringOrEmpty(modelName) + if (!rawModelName) return null + if (rawModelName === 'customModel') return 'legacy-custom-default' + + const preset = getModelNamePresetPart(rawModelName) + + if ( + preset === 'gptApiInstruct' || + preset.startsWith('chatgptApi') || + preset === 'gptApiModelKeys' + ) { + return 'openai' + } + if (preset.startsWith('deepseek_') || preset === 'deepSeekApiModelKeys') return 'deepseek' + if (preset.startsWith('moonshot_') || preset === 'moonshotApiModelKeys') return 'moonshot' + if (preset.startsWith('openRouter_') || preset === 'openRouterApiModelKeys') return 'openrouter' + if (preset.startsWith('aiml_') || preset === 'aimlModelKeys' || preset === 'aimlApiModelKeys') { + return 'aiml' + } + if (preset === 'ollama' || preset === 'ollamaModel' || preset === 'ollamaApiModelKeys') { + return 'ollama' + } + if (preset.startsWith('chatglm') || preset === 'chatglmApiModelKeys') return 'chatglm' + if (preset === 'customApiModelKeys') return 'legacy-custom-default' + + return null +} + +function isLegacyCompletionModelName(modelName) { + const preset = getModelNamePresetPart(modelName) + return preset === 'gptApiInstruct' || preset === 'gptApiModelKeys' +} + +function toStringOrEmpty(value) { + return typeof value === 'string' ? value : '' +} + +function normalizeProviderId(value) { + return toStringOrEmpty(value) + .trim() + .toLowerCase() + .replace(/[^a-z0-9]+/g, '-') + .replace(/^-+|-+$/g, '') +} + +function normalizeEndpointUrlForCompare(value) { + return toStringOrEmpty(value).trim().replace(/\/+$/, '') +} + +function normalizeStableCustomApiModeIdentity(apiMode, providerId = apiMode?.providerId) { + if (!apiMode || typeof apiMode !== 'object') return null + return { + groupName: toStringOrEmpty(apiMode.groupName).trim(), + itemName: toStringOrEmpty(apiMode.itemName).trim(), + isCustom: Boolean(apiMode.isCustom), + customName: toStringOrEmpty(apiMode.customName).trim(), + providerId: normalizeProviderId(providerId), + } +} + +function getConfiguredCustomApiModes(config) { + const customApiModes = Array.isArray(config?.customApiModes) ? config.customApiModes : [] + const selectedApiMode = + config?.apiMode && typeof config.apiMode === 'object' ? [config.apiMode] : [] + const seen = new Set() + return [...customApiModes, ...selectedApiMode] + .filter((apiMode) => apiMode?.groupName === 'customApiModelKeys') + .filter((apiMode) => { + const signature = JSON.stringify({ + groupName: toStringOrEmpty(apiMode.groupName).trim(), + itemName: toStringOrEmpty(apiMode.itemName).trim(), + isCustom: Boolean(apiMode.isCustom), + customName: toStringOrEmpty(apiMode.customName).trim(), + providerId: normalizeProviderId(apiMode.providerId), + }) + if (seen.has(signature)) return false + seen.add(signature) + return true + }) +} + +function getConfiguredCustomApiModesForProvider(config, providerId) { + const normalizedProviderId = normalizeProviderId(providerId) + if (!normalizedProviderId) return [] + return getConfiguredCustomApiModes(config).filter( + (apiMode) => normalizeProviderId(apiMode?.providerId) === normalizedProviderId, + ) +} + +function findConfiguredCustomApiMode(config, sessionApiMode, providerId) { + const normalizedProviderId = normalizeProviderId(providerId) + const normalizedSessionApiMode = normalizeStableCustomApiModeIdentity(sessionApiMode, providerId) + if (!normalizedSessionApiMode || normalizedSessionApiMode.groupName !== 'customApiModelKeys') { + return null + } + + const providerCandidates = getConfiguredCustomApiModesForProvider(config, normalizedProviderId) + const exactCandidates = providerCandidates.filter((apiMode) => { + const normalizedCandidate = normalizeStableCustomApiModeIdentity(apiMode) + return ( + normalizedCandidate && + JSON.stringify(normalizedCandidate) === JSON.stringify(normalizedSessionApiMode) + ) + }) + + if (exactCandidates.length === 1) return exactCandidates[0] + return null +} + +function findConfiguredCustomApiModeBySessionLabel(config, sessionApiMode) { + if (!sessionApiMode || typeof sessionApiMode !== 'object') return null + if (toStringOrEmpty(sessionApiMode.groupName).trim() !== 'customApiModelKeys') return null + + const normalizedSessionLabel = { + groupName: toStringOrEmpty(sessionApiMode.groupName).trim(), + itemName: toStringOrEmpty(sessionApiMode.itemName).trim(), + isCustom: Boolean(sessionApiMode.isCustom), + customName: toStringOrEmpty(sessionApiMode.customName).trim(), + } + const allCandidates = getConfiguredCustomApiModes(config).filter((apiMode) => { + if (!apiMode || typeof apiMode !== 'object') return false + return ( + toStringOrEmpty(apiMode.groupName).trim() === normalizedSessionLabel.groupName && + toStringOrEmpty(apiMode.customName).trim() === normalizedSessionLabel.customName + ) + }) + + const exactCandidates = allCandidates.filter( + (apiMode) => + toStringOrEmpty(apiMode.itemName).trim() === normalizedSessionLabel.itemName && + Boolean(apiMode.isCustom) === normalizedSessionLabel.isCustom, + ) + if (exactCandidates.length === 1) return exactCandidates[0] + + const isLegacyCustomShape = !normalizedSessionLabel.itemName + if (isLegacyCustomShape && allCandidates.length === 1) return allCandidates[0] + + return null +} + +function trimSlashes(value) { + return toStringOrEmpty(value).trim().replace(/\/+$/, '') +} + +function normalizeBaseUrlWithoutVersionSuffix(value, fallback) { + return trimSlashes(value || fallback).replace(/\/v1$/i, '') +} + +function ensureLeadingSlash(value, fallback) { + const raw = toStringOrEmpty(value).trim() + if (!raw) return fallback + return raw.startsWith('/') ? raw : `/${raw}` +} + +function joinUrl(baseUrl, path) { + if (!baseUrl) return '' + return `${trimSlashes(baseUrl)}${ensureLeadingSlash(path, '')}` +} + +function buildBuiltinProviders(config) { + return BUILTIN_PROVIDER_TEMPLATE.map((provider) => { + if (provider.id === 'openai') { + const baseUrl = normalizeBaseUrlWithoutVersionSuffix( + config.customOpenAiApiUrl, + 'https://api.openai.com', + ) + return { + ...provider, + baseUrl, + } + } + if (provider.id === 'ollama') { + const baseUrl = normalizeBaseUrlWithoutVersionSuffix( + config.ollamaEndpoint, + 'http://127.0.0.1:11434', + ) + return { + ...provider, + baseUrl: `${baseUrl}/v1`, + } + } + if (provider.id === 'legacy-custom-default') { + return { + ...provider, + chatCompletionsUrl: + toStringOrEmpty(config.customModelApiUrl).trim() || + 'http://localhost:8000/v1/chat/completions', + } + } + return provider + }) +} + +function normalizeCustomProvider(provider, index) { + if (!provider || typeof provider !== 'object') return null + const id = toStringOrEmpty(provider.id).trim() || `custom-provider-${index + 1}` + const sourceProviderId = normalizeProviderId(provider.sourceProviderId) + const chatCompletionsPath = ensureLeadingSlash(provider.chatCompletionsPath, DEFAULT_CHAT_PATH) + const completionsPath = ensureLeadingSlash(provider.completionsPath, DEFAULT_COMPLETION_PATH) + const chatCompletionsUrl = toStringOrEmpty(provider.chatCompletionsUrl).trim() + const completionsUrl = toStringOrEmpty(provider.completionsUrl).trim() + let baseUrl = trimSlashes(provider.baseUrl) + + if (!chatCompletionsUrl && !completionsUrl) { + const usesDefaultV1Paths = + chatCompletionsPath === DEFAULT_CHAT_PATH && completionsPath === DEFAULT_COMPLETION_PATH + if (usesDefaultV1Paths) { + baseUrl = normalizeBaseUrlWithoutVersionSuffix(baseUrl, '') + } + } + return { + id, + name: toStringOrEmpty(provider.name).trim() || `Custom Provider ${index + 1}`, + baseUrl, + chatCompletionsPath, + completionsPath, + chatCompletionsUrl, + completionsUrl, + builtin: false, + enabled: provider.enabled !== false, + allowLegacyResponseField: provider.allowLegacyResponseField !== false, + ...(sourceProviderId ? { sourceProviderId } : {}), + } +} + +export function getCustomOpenAIProviders(config) { + const providers = Array.isArray(config.customOpenAIProviders) ? config.customOpenAIProviders : [] + return providers + .map((provider, index) => normalizeCustomProvider(provider, index)) + .filter((provider) => provider) +} + +export function getAllOpenAIProviders(config) { + const customProviders = getCustomOpenAIProviders(config) + return [...buildBuiltinProviders(config), ...customProviders] +} + +export function resolveProviderIdForSession(session) { + const apiMode = session?.apiMode + if (apiMode && typeof apiMode === 'object') { + const apiModeProviderId = toStringOrEmpty(apiMode.providerId).trim() + if (apiMode.groupName === 'customApiModelKeys' && apiModeProviderId) return apiModeProviderId + if (apiMode.groupName) { + const mappedProviderId = OPENAI_COMPATIBLE_GROUP_TO_PROVIDER_ID[apiMode.groupName] + if (mappedProviderId) return mappedProviderId + } + if (apiModeProviderId) return apiModeProviderId + } + if (session?.modelName === 'customModel') return 'legacy-custom-default' + const fromLegacyModelName = resolveProviderIdFromLegacyModelName(session?.modelName) + if (fromLegacyModelName) return fromLegacyModelName + return null +} + +export function resolveEndpointTypeForSession(session) { + const apiMode = session?.apiMode + if (apiMode && typeof apiMode === 'object') { + return apiMode.groupName === 'gptApiModelKeys' ? 'completion' : 'chat' + } + return isLegacyCompletionModelName(session?.modelName) ? 'completion' : 'chat' +} + +export function getProviderById(config, providerId) { + if (!providerId) return null + const provider = getAllOpenAIProviders(config).find((item) => item.id === providerId) + if (!provider) return null + if (provider.enabled === false) return null + return provider +} + +function getConfiguredProviderSecret(config, providerId) { + if (!providerId) return '' + const hasProviderSecretsMap = + config?.providerSecrets && typeof config.providerSecrets === 'object' + if (hasProviderSecretsMap && Object.hasOwn(config.providerSecrets, providerId)) { + return toStringOrEmpty(config.providerSecrets[providerId]).trim() + } + const legacyKey = LEGACY_API_KEY_FIELD_BY_PROVIDER_ID[providerId] + return legacyKey ? toStringOrEmpty(config?.[legacyKey]).trim() : '' +} + +function hasConfiguredProviderSecretEntry(config, providerId) { + return Boolean( + providerId && + config?.providerSecrets && + typeof config.providerSecrets === 'object' && + Object.hasOwn(config.providerSecrets, providerId), + ) +} + +export function getProviderSecret(config, providerId, session) { + if (!providerId) return '' + const normalizedProviderId = normalizeProviderId(providerId) + const apiModeApiKey = + session?.apiMode && typeof session.apiMode === 'object' + ? toStringOrEmpty(session.apiMode.apiKey).trim() + : '' + const hasConfiguredSecretEntry = hasConfiguredProviderSecretEntry(config, normalizedProviderId) + const configuredSecret = getConfiguredProviderSecret(config, normalizedProviderId) + if (session?.apiMode?.groupName === 'customApiModelKeys') { + const configuredCustomApiMode = findConfiguredCustomApiMode( + config, + session.apiMode, + normalizedProviderId, + ) + if (configuredCustomApiMode) { + const configuredModeApiKey = toStringOrEmpty(configuredCustomApiMode.apiKey).trim() + if (configuredModeApiKey) return configuredModeApiKey + if (configuredSecret || hasConfiguredSecretEntry) return configuredSecret + return apiModeApiKey + } + const providerCandidates = getConfiguredCustomApiModesForProvider(config, normalizedProviderId) + if (providerCandidates.length > 0) { + const hasAnyModeSpecificKey = providerCandidates.some((apiMode) => + toStringOrEmpty(apiMode.apiKey).trim(), + ) + if (!hasAnyModeSpecificKey && (configuredSecret || hasConfiguredSecretEntry)) { + return configuredSecret + } + } + if (apiModeApiKey) return apiModeApiKey + return configuredSecret + } + if (configuredSecret || hasConfiguredSecretEntry) return configuredSecret + + return apiModeApiKey +} + +function resolveRecoveredCustomUrlApiKey(config, recoveredProviderId, fallbackProviderId, session) { + const normalizedRecoveredProviderId = normalizeProviderId(recoveredProviderId) + const normalizedFallbackProviderId = normalizeProviderId(fallbackProviderId) + if (!normalizedRecoveredProviderId) return '' + const sessionApiKey = toStringOrEmpty(session?.apiMode?.apiKey).trim() + const recoveredProvider = getProviderById(config, normalizedRecoveredProviderId) + const shouldIgnoreRecoveredConfiguredSecret = + session?.apiMode?.groupName === 'customApiModelKeys' && + normalizedFallbackProviderId === 'legacy-custom-default' && + recoveredProvider?.builtin === true + if ( + !normalizedFallbackProviderId || + normalizedFallbackProviderId === normalizedRecoveredProviderId + ) { + return getProviderSecret(config, normalizedRecoveredProviderId, session) + } + + const hasRecoveredConfiguredSecretEntry = hasConfiguredProviderSecretEntry( + config, + normalizedRecoveredProviderId, + ) + const recoveredConfiguredSecret = getConfiguredProviderSecret( + config, + normalizedRecoveredProviderId, + ) + if (shouldIgnoreRecoveredConfiguredSecret) { + const configuredCustomApiMode = findConfiguredCustomApiMode( + config, + session?.apiMode, + normalizedRecoveredProviderId, + ) + const configuredModeApiKey = toStringOrEmpty(configuredCustomApiMode?.apiKey).trim() + if (configuredModeApiKey) return configuredModeApiKey + if (hasRecoveredConfiguredSecretEntry) return recoveredConfiguredSecret + if (sessionApiKey) return sessionApiKey + return getProviderSecret(config, normalizedFallbackProviderId, session) + } + + const recoveredSecret = getProviderSecret(config, normalizedRecoveredProviderId, session) + if (recoveredSecret) return recoveredSecret + if (hasRecoveredConfiguredSecretEntry) return recoveredSecret + if (sessionApiKey) return sessionApiKey + + return getProviderSecret(config, normalizedFallbackProviderId, session) +} + +function getCustomProvidersMatchedByLegacySessionUrl(customProviders, session) { + const customUrl = normalizeEndpointUrlForCompare(session?.apiMode?.customUrl) + if (!customUrl) return [] + + return customProviders.filter((item) => { + if (item.enabled === false) return false + + const directChatCompletionsUrl = normalizeEndpointUrlForCompare(item.chatCompletionsUrl) + if (directChatCompletionsUrl && directChatCompletionsUrl === customUrl) return true + if (directChatCompletionsUrl) return false + + const derivedChatCompletionsUrl = + item.baseUrl && item.chatCompletionsPath + ? normalizeEndpointUrlForCompare(joinUrl(item.baseUrl, item.chatCompletionsPath)) + : '' + + return derivedChatCompletionsUrl && derivedChatCompletionsUrl === customUrl + }) +} + +function resolveCustomProviderByLegacySessionUrl(customProviders, config, session) { + const matchedProviders = getCustomProvidersMatchedByLegacySessionUrl(customProviders, session) + if (matchedProviders.length <= 1) { + return matchedProviders[0] || null + } + + const sessionApiKey = toStringOrEmpty(session?.apiMode?.apiKey).trim() + if (sessionApiKey) { + const matchedBySessionKey = matchedProviders.filter( + (item) => getConfiguredProviderSecret(config, item.id) === sessionApiKey, + ) + if (matchedBySessionKey.length === 1) { + return matchedBySessionKey[0] + } + } + + return null +} + +function hasAmbiguousCustomProviderMatchByLegacySessionUrl(customProviders, config, session) { + const matchedProviders = getCustomProvidersMatchedByLegacySessionUrl(customProviders, session) + if (matchedProviders.length <= 1) return false + + const sessionApiKey = toStringOrEmpty(session?.apiMode?.apiKey).trim() + if (!sessionApiKey) return true + + const matchedBySessionKey = matchedProviders.filter( + (item) => getConfiguredProviderSecret(config, item.id) === sessionApiKey, + ) + return matchedBySessionKey.length !== 1 +} + +function hasDisabledCustomProviderMatchByLegacySessionUrl(customProviders, session) { + const customUrl = normalizeEndpointUrlForCompare(session?.apiMode?.customUrl) + if (!customUrl) return false + + return customProviders.some((item) => { + if (item.enabled !== false) return false + + const directChatCompletionsUrl = normalizeEndpointUrlForCompare(item.chatCompletionsUrl) + if (directChatCompletionsUrl && directChatCompletionsUrl === customUrl) return true + if (directChatCompletionsUrl) return false + + const derivedChatCompletionsUrl = + item.baseUrl && item.chatCompletionsPath + ? normalizeEndpointUrlForCompare(joinUrl(item.baseUrl, item.chatCompletionsPath)) + : '' + + return derivedChatCompletionsUrl && derivedChatCompletionsUrl === customUrl + }) +} + +function hasEnabledCustomProviderMatchByLegacySessionUrl(customProviders, session) { + const customUrl = normalizeEndpointUrlForCompare(session?.apiMode?.customUrl) + if (!customUrl) return false + + return customProviders.some((item) => { + if (item.enabled === false) return false + + const directChatCompletionsUrl = normalizeEndpointUrlForCompare(item.chatCompletionsUrl) + if (directChatCompletionsUrl && directChatCompletionsUrl === customUrl) return true + if (directChatCompletionsUrl) return false + + const derivedChatCompletionsUrl = + item.baseUrl && item.chatCompletionsPath + ? normalizeEndpointUrlForCompare(joinUrl(item.baseUrl, item.chatCompletionsPath)) + : '' + + return derivedChatCompletionsUrl && derivedChatCompletionsUrl === customUrl + }) +} + +function resolveUrlFromProvider( + provider, + endpointType, + config, + session, + useLegacyCustomUrlFallback = false, +) { + if (!provider) return '' + + const apiModeCustomUrl = + endpointType === 'chat' && + session?.apiMode && + typeof session.apiMode === 'object' && + session.apiMode.groupName === 'customApiModelKeys' && + useLegacyCustomUrlFallback + ? toStringOrEmpty(session.apiMode.customUrl).trim() + : '' + if (apiModeCustomUrl) return apiModeCustomUrl + + if (endpointType === 'completion') { + if (provider.completionsUrl) return provider.completionsUrl + if (provider.baseUrl && provider.completionsPath) { + return joinUrl(provider.baseUrl, provider.completionsPath) + } + } else { + if (provider.chatCompletionsUrl) return provider.chatCompletionsUrl + if (provider.baseUrl && provider.chatCompletionsPath) { + return joinUrl(provider.baseUrl, provider.chatCompletionsPath) + } + } + + if (provider.id === 'legacy-custom-default') { + if (endpointType === 'completion') { + const baseUrl = normalizeBaseUrlWithoutVersionSuffix( + config.customOpenAiApiUrl, + 'https://api.openai.com', + ) + return `${baseUrl}/v1/completions` + } + return ( + toStringOrEmpty(config.customModelApiUrl).trim() || + 'http://localhost:8000/v1/chat/completions' + ) + } + + return '' +} + +export function resolveOpenAICompatibleRequest(config, session) { + const providerId = resolveProviderIdForSession(session) + if (!providerId) return null + let resolvedProviderId = providerId + let provider = null + let useLegacyCustomUrlFallback = false + let recoveredProviderId = '' + if (session?.apiMode?.groupName === 'customApiModelKeys') { + const customProviders = getCustomOpenAIProviders(config) + const hasAmbiguousLegacyCustomUrlMatch = hasAmbiguousCustomProviderMatchByLegacySessionUrl( + customProviders, + config, + session, + ) + const matchedByProviderId = customProviders.find( + (item) => item.enabled !== false && item.id === providerId, + ) + if (matchedByProviderId) { + provider = matchedByProviderId + resolvedProviderId = matchedByProviderId.id + } + const normalizedProviderId = normalizeProviderId(providerId) + if (!provider && normalizedProviderId) { + const matchedByNormalizedProviderId = customProviders.find( + (item) => item.enabled !== false && item.id === normalizedProviderId, + ) + if (matchedByNormalizedProviderId) { + provider = matchedByNormalizedProviderId + resolvedProviderId = matchedByNormalizedProviderId.id + } + } + if (!provider && !hasAmbiguousLegacyCustomUrlMatch) { + const matchedByCustomUrl = resolveCustomProviderByLegacySessionUrl( + customProviders, + config, + session, + ) + if (matchedByCustomUrl) { + provider = matchedByCustomUrl + resolvedProviderId = matchedByCustomUrl.id + } + } + if (!provider) { + const matchedConfiguredApiMode = findConfiguredCustomApiModeBySessionLabel( + config, + session?.apiMode, + ) + const matchedConfiguredProviderId = normalizeProviderId(matchedConfiguredApiMode?.providerId) + if (matchedConfiguredProviderId) { + const matchedConfiguredProvider = customProviders.find( + (item) => item.enabled !== false && item.id === matchedConfiguredProviderId, + ) + if (matchedConfiguredProvider) { + provider = matchedConfiguredProvider + resolvedProviderId = matchedConfiguredProvider.id + } else if (matchedConfiguredProviderId === 'legacy-custom-default') { + provider = getProviderById(config, matchedConfiguredProviderId) + if (provider) { + resolvedProviderId = matchedConfiguredProviderId + } + } + } + } + if (!provider && hasAmbiguousLegacyCustomUrlMatch) { + const matchedByCustomUrl = resolveCustomProviderByLegacySessionUrl( + customProviders, + config, + session, + ) + if (matchedByCustomUrl) { + provider = matchedByCustomUrl + resolvedProviderId = matchedByCustomUrl.id + } + } + if (!provider) { + const normalizedProviderId = normalizeProviderId(providerId) + const hasDisabledCustomProviderMatch = Array.isArray(config?.customOpenAIProviders) + ? config.customOpenAIProviders.some( + (item) => + normalizeProviderId(item?.id) === normalizedProviderId && item?.enabled === false, + ) + : false + const hasDisabledCustomProviderMatchByUrl = hasDisabledCustomProviderMatchByLegacySessionUrl( + customProviders, + session, + ) + const hasEnabledCustomProviderMatchByUrl = hasEnabledCustomProviderMatchByLegacySessionUrl( + customProviders, + session, + ) + const matchedBuiltinProvider = normalizedProviderId + ? getProviderById(config, normalizedProviderId) + : null + const canRecoverByLegacyCustomUrl = + resolveEndpointTypeForSession(session) === 'chat' && + toStringOrEmpty(session?.apiMode?.customUrl).trim() && + !hasDisabledCustomProviderMatch && + !hasDisabledCustomProviderMatchByUrl && + !hasEnabledCustomProviderMatchByUrl && + (matchedBuiltinProvider?.builtin !== true || + session?.apiMode?.groupName === 'customApiModelKeys') + if (normalizeProviderId(providerId) === 'legacy-custom-default') { + provider = getProviderById(config, providerId) + if ( + provider && + resolveEndpointTypeForSession(session) === 'chat' && + toStringOrEmpty(session?.apiMode?.customUrl).trim() + ) { + useLegacyCustomUrlFallback = true + } + } else if (canRecoverByLegacyCustomUrl) { + provider = getProviderById(config, 'legacy-custom-default') + if (provider) { + recoveredProviderId = resolvedProviderId + resolvedProviderId = 'legacy-custom-default' + useLegacyCustomUrlFallback = true + } + } else { + return null + } + } + } + if (!provider) { + provider = getProviderById(config, providerId) + } + if (!provider) return null + const endpointType = resolveEndpointTypeForSession(session) + const requestUrl = resolveUrlFromProvider( + provider, + endpointType, + config, + session, + useLegacyCustomUrlFallback, + ) + if (!requestUrl) return null + return { + providerId: resolvedProviderId, + secretProviderId: normalizeProviderId(recoveredProviderId || resolvedProviderId), + provider, + endpointType, + requestUrl, + apiKey: recoveredProviderId + ? resolveRecoveredCustomUrlApiKey(config, recoveredProviderId, resolvedProviderId, session) + : getProviderSecret(config, resolvedProviderId, session), + } +} diff --git a/src/services/init-session.mjs b/src/services/init-session.mjs index 999d3165a..fac630a3f 100644 --- a/src/services/init-session.mjs +++ b/src/services/init-session.mjs @@ -1,5 +1,9 @@ import { v4 as uuidv4 } from 'uuid' -import { apiModeToModelName, modelNameToDesc } from '../utils/model-name-convert.mjs' +import { + apiModeToModelName, + modelNameToDesc, + normalizeApiMode, +} from '../utils/model-name-convert.mjs' import { t } from 'i18next' /** @@ -68,7 +72,7 @@ export function initSession({ ) : null, modelName, - apiMode, + apiMode: normalizeApiMode(apiMode), autoClean, isRetry: false, diff --git a/src/services/wrappers.mjs b/src/services/wrappers.mjs index c828f9038..aff0f2a5e 100644 --- a/src/services/wrappers.mjs +++ b/src/services/wrappers.mjs @@ -7,7 +7,11 @@ import { } from '../config/index.mjs' import Browser from 'webextension-polyfill' import { t } from 'i18next' -import { apiModeToModelName, modelNameToDesc } from '../utils/model-name-convert.mjs' +import { + apiModeToModelName, + modelNameToDesc, + normalizeApiMode, +} from '../utils/model-name-convert.mjs' export async function getChatGptAccessToken() { await clearOldAccessToken() @@ -103,6 +107,7 @@ export function registerPortListener(executor) { const config = await getUserConfig() if (!session.modelName) session.modelName = config.modelName if (!session.apiMode && session.modelName !== 'customModel') session.apiMode = config.apiMode + if (session.apiMode) session.apiMode = normalizeApiMode(session.apiMode) if (!session.aiName) session.aiName = modelNameToDesc( session.apiMode ? apiModeToModelName(session.apiMode) : session.modelName, diff --git a/src/utils/model-name-convert.mjs b/src/utils/model-name-convert.mjs index 04eeb4226..fcbecd786 100644 --- a/src/utils/model-name-convert.mjs +++ b/src/utils/model-name-convert.mjs @@ -76,12 +76,30 @@ export function modelNameToApiMode(modelName) { customName, customUrl: '', apiKey: '', + providerId: '', active: true, } } } +export function normalizeApiMode(apiMode) { + if (!apiMode || typeof apiMode !== 'object') return null + return { + ...apiMode, + groupName: apiMode.groupName || '', + itemName: apiMode.itemName || '', + isCustom: Boolean(apiMode.isCustom), + customName: apiMode.customName || '', + customUrl: apiMode.customUrl || '', + apiKey: apiMode.apiKey || '', + providerId: typeof apiMode.providerId === 'string' ? apiMode.providerId.trim() : '', + active: apiMode.active !== false, + } +} + export function apiModeToModelName(apiMode) { + apiMode = normalizeApiMode(apiMode) + if (!apiMode) return '' if (AlwaysCustomGroups.includes(apiMode.groupName)) return apiMode.groupName + '-' + apiMode.customName @@ -93,29 +111,111 @@ export function apiModeToModelName(apiMode) { return apiMode.itemName } +function resolveCanonicalActiveApiModeInfo(modelName, config) { + let normalizedModelName = modelName + if (normalizedModelName === 'azureOpenAi' && config.azureDeploymentName) + normalizedModelName += '-' + config.azureDeploymentName + if ( + (normalizedModelName === 'ollama' || normalizedModelName === 'ollamaModel') && + config.ollamaModelName + ) { + normalizedModelName = 'ollamaModel-' + config.ollamaModelName + } + const normalizedApiMode = modelNameToApiMode(normalizedModelName) + const canonicalModelName = normalizedApiMode + ? apiModeToModelName(normalizedApiMode) + : normalizedModelName + return { normalizedApiMode, canonicalModelName } +} + export function getApiModesFromConfig(config, onlyActive) { - const stringApiModes = config.customApiModes - .map((apiMode) => { - if (onlyActive) { - if (apiMode.active) return apiModeToModelName(apiMode) - } else return apiModeToModelName(apiMode) - return false + const normalizedCustomApiModes = ( + Array.isArray(config.customApiModes) ? config.customApiModes : [] + ) + .map((apiMode) => normalizeApiMode(apiMode)) + .filter((apiMode) => { + if (!apiMode || !apiMode.groupName) return false + if (AlwaysCustomGroups.includes(apiMode.groupName)) { + return Boolean(apiMode.customName && apiMode.customName.trim()) + } + return Boolean(apiMode.itemName) }) - .filter((apiMode) => apiMode) - const originalApiModes = config.activeApiModes + const activeApiModes = Array.isArray(config.activeApiModes) ? config.activeApiModes : [] + const customApiModeIndexesByCanonicalModelName = normalizedCustomApiModes.reduce( + (result, apiMode, index) => { + const canonicalModelName = apiModeToModelName(apiMode) + if (!canonicalModelName) return result + const currentIndexes = result.get(canonicalModelName) || [] + currentIndexes.push(index) + result.set(canonicalModelName, currentIndexes) + return result + }, + new Map(), + ) + const mergedCustomApiModes = normalizedCustomApiModes.map((apiMode) => ({ ...apiMode })) + const applyCanonicalLegacyItemName = (index, normalizedApiMode) => { + const apiMode = mergedCustomApiModes[index] + if ( + !apiMode || + apiMode.itemName || + !normalizedApiMode?.itemName || + apiMode.groupName !== normalizedApiMode.groupName + ) { + return + } + if ( + normalizedApiMode.itemName !== 'azureOpenAi' && + normalizedApiMode.itemName !== 'ollamaModel' + ) + return + mergedCustomApiModes[index] = { ...apiMode, itemName: normalizedApiMode.itemName } + } + const canonicalActiveApiModeNamesRepresentedByCustomRows = new Set() + activeApiModes.forEach((modelName) => { + const { normalizedApiMode, canonicalModelName } = resolveCanonicalActiveApiModeInfo( + modelName, + config, + ) + if (!canonicalModelName) return + const matchingCustomApiModeIndexes = + customApiModeIndexesByCanonicalModelName.get(canonicalModelName) || [] + if (matchingCustomApiModeIndexes.length === 0) return + if (matchingCustomApiModeIndexes.length === 1) { + mergedCustomApiModes[matchingCustomApiModeIndexes[0]].active = true + applyCanonicalLegacyItemName(matchingCustomApiModeIndexes[0], normalizedApiMode) + canonicalActiveApiModeNamesRepresentedByCustomRows.add(canonicalModelName) + return + } + const activeMatchingCustomApiModeIndexes = matchingCustomApiModeIndexes.filter( + (index) => mergedCustomApiModes[index].active, + ) + if (activeMatchingCustomApiModeIndexes.length > 0) { + activeMatchingCustomApiModeIndexes.forEach((index) => { + applyCanonicalLegacyItemName(index, normalizedApiMode) + }) + canonicalActiveApiModeNamesRepresentedByCustomRows.add(canonicalModelName) + } + }) + + const originalApiModes = activeApiModes .map((modelName) => { + const { normalizedApiMode, canonicalModelName } = resolveCanonicalActiveApiModeInfo( + modelName, + config, + ) // 'customModel' is always active - if (stringApiModes.includes(modelName) || modelName === 'customModel') { + if ( + canonicalActiveApiModeNamesRepresentedByCustomRows.has(canonicalModelName) || + modelName === 'customModel' + ) { return } - if (modelName === 'azureOpenAi') modelName += '-' + config.azureDeploymentName - if (modelName === 'ollama') modelName += '-' + config.ollamaModelName - return modelNameToApiMode(modelName) + return normalizedApiMode }) .filter((apiMode) => apiMode) return [ ...originalApiModes, - ...config.customApiModes.filter((apiMode) => (onlyActive ? apiMode.active : true)), + ...mergedCustomApiModes.filter((apiMode) => (onlyActive ? apiMode.active : true)), ] } @@ -123,11 +223,107 @@ export function getApiModesStringArrayFromConfig(config, onlyActive) { return getApiModesFromConfig(config, onlyActive).map(apiModeToModelName) } -export function isApiModeSelected(apiMode, configOrSession) { - return configOrSession.apiMode - ? JSON.stringify(configOrSession.apiMode, Object.keys(configOrSession.apiMode).sort()) === - JSON.stringify(apiMode, Object.keys(apiMode).sort()) - : configOrSession.modelName === apiModeToModelName(apiMode) +export function isApiModeSelected(apiMode, configOrSession, { sessionCompat = false } = {}) { + const normalizeForCompare = (value, { includeProviderState = true } = {}) => { + const normalized = normalizeApiMode(value) + if (!normalized) return null + const normalizedForCompare = { + groupName: normalized.groupName, + itemName: normalized.itemName, + isCustom: normalized.isCustom, + customName: normalized.customName, + } + if (includeProviderState) { + normalizedForCompare.providerId = normalized.providerId + normalizedForCompare.active = normalized.active + } + return JSON.stringify(normalizedForCompare) + } + + const matchesModelName = (targetApiMode) => { + const targetModelName = apiModeToModelName(targetApiMode) + if (!configOrSession?.modelName || !targetModelName) return false + if (configOrSession.modelName === targetModelName) return true + if (!targetApiMode?.active) return false + const { canonicalModelName } = resolveCanonicalActiveApiModeInfo( + configOrSession.modelName, + configOrSession, + ) + return canonicalModelName === targetModelName + } + + const isLegacyCompatibleSessionMatch = (targetApiMode, selectedApiMode, rawSelectedApiMode) => { + if (!targetApiMode || !selectedApiMode || !rawSelectedApiMode) return false + if (selectedApiMode.groupName !== targetApiMode.groupName) return false + + const isLegacyCustomSession = + selectedApiMode.groupName === 'customApiModelKeys' && + (!Object.hasOwn(rawSelectedApiMode, 'itemName') || + !Object.hasOwn(rawSelectedApiMode, 'isCustom')) + + if (isLegacyCustomSession) { + if (targetApiMode.groupName !== 'customApiModelKeys') return false + if (selectedApiMode.customName !== targetApiMode.customName) return false + } else if ( + selectedApiMode.itemName !== targetApiMode.itemName || + selectedApiMode.isCustom !== targetApiMode.isCustom || + selectedApiMode.customName !== targetApiMode.customName + ) { + return false + } + + if (!selectedApiMode.providerId) return true + if (selectedApiMode.providerId === targetApiMode.providerId) return true + if (!targetApiMode.providerId) return isLegacyCustomSession + return isLegacyCustomSession + } + + const targetApiMode = normalizeApiMode(apiMode) + if (!targetApiMode) return false + + if (!configOrSession?.apiMode) { + return matchesModelName(targetApiMode) + } + + const rawSelectedApiMode = configOrSession.apiMode + const selectedApiMode = normalizeApiMode(rawSelectedApiMode) + if (!selectedApiMode) { + return sessionCompat ? matchesModelName(targetApiMode) : false + } + + if (selectedApiMode) { + const selectedApiModeForCompare = normalizeForCompare(selectedApiMode) + const targetApiModeForCompare = normalizeForCompare(targetApiMode) + if (selectedApiModeForCompare && targetApiModeForCompare) { + if (selectedApiModeForCompare === targetApiModeForCompare) return true + if ( + sessionCompat && + // Historical sessions may carry stale providerId/active values after config migration. + isLegacyCompatibleSessionMatch(targetApiMode, selectedApiMode, rawSelectedApiMode) + ) { + return true + } + } + } + + return false +} + +export function getUniquelySelectedApiModeIndex( + apiModes, + configOrSession, + { sessionCompat = false } = {}, +) { + if (!Array.isArray(apiModes) || apiModes.length === 0) return -1 + + let selectedIndex = -1 + for (const [index, apiMode] of apiModes.entries()) { + if (!isApiModeSelected(apiMode, configOrSession, { sessionCompat })) continue + if (selectedIndex !== -1) return -1 + selectedIndex = index + } + + return selectedIndex } // also match custom modelName, e.g. when modelName is bingFree4, configOrSession model is bingFree4-fast, it returns true diff --git a/tests/unit/config/migrate-user-config.test.mjs b/tests/unit/config/migrate-user-config.test.mjs new file mode 100644 index 000000000..8dda24e86 --- /dev/null +++ b/tests/unit/config/migrate-user-config.test.mjs @@ -0,0 +1,934 @@ +import assert from 'node:assert/strict' +import { beforeEach, test } from 'node:test' +import { getUserConfig } from '../../../src/config/index.mjs' + +function createCustomApiMode(overrides = {}) { + return { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + customName: 'custom-model', + customUrl: '', + apiKey: '', + providerId: '', + active: true, + ...overrides, + } +} + +beforeEach(() => { + globalThis.__TEST_BROWSER_SHIM__.clearStorage() +}) + +test('getUserConfig promotes legacy customUrl into custom provider and migrates legacy custom key', async () => { + const customUrl = 'https://proxy.example.com/v1/chat/completions' + globalThis.__TEST_BROWSER_SHIM__.replaceStorage({ + configSchemaVersion: 0, + customApiKey: 'legacy-custom-key', + customApiModes: [ + createCustomApiMode({ + customName: 'My Proxy', + customUrl, + }), + ], + }) + + const config = await getUserConfig() + const migratedMode = config.customApiModes.find((mode) => mode.customName === 'My Proxy') + const migratedProvider = config.customOpenAIProviders.find( + (provider) => provider.id === migratedMode.providerId, + ) + + assert.equal(Boolean(migratedMode.providerId), true) + assert.equal(migratedMode.customUrl, '') + assert.equal(migratedProvider.chatCompletionsUrl, customUrl) + assert.equal(config.providerSecrets[migratedMode.providerId], 'legacy-custom-key') +}) + +test('getUserConfig keeps raw-id provider secret when custom provider id is renamed', async () => { + globalThis.__TEST_BROWSER_SHIM__.replaceStorage({ + configSchemaVersion: 0, + providerSecrets: { + OpenAI: 'custom-provider-secret', + openai: 'builtin-provider-secret', + }, + customOpenAIProviders: [ + { + id: 'OpenAI', + name: 'My OpenAI Proxy', + chatCompletionsUrl: 'https://custom.example.com/v1/chat/completions', + }, + ], + customApiModes: [ + createCustomApiMode({ + customName: 'proxy-mode', + providerId: 'OpenAI', + }), + ], + }) + + const config = await getUserConfig() + const migratedProvider = config.customOpenAIProviders.find( + (provider) => provider.name === 'My OpenAI Proxy', + ) + const migratedMode = config.customApiModes.find((mode) => mode.customName === 'proxy-mode') + + assert.equal(migratedProvider.id, 'openai-2') + assert.equal(migratedMode.providerId, 'openai-2') + assert.equal(config.providerSecrets.openai, 'builtin-provider-secret') + assert.equal(config.providerSecrets['openai-2'], 'custom-provider-secret') +}) + +test('getUserConfig preserves normalized sourceProviderId on custom providers', async () => { + globalThis.__TEST_BROWSER_SHIM__.replaceStorage({ + configSchemaVersion: 0, + customOpenAIProviders: [ + { + id: 'Selected Mode OpenAI', + name: 'Selected Mode OpenAI', + chatCompletionsUrl: 'https://proxy.example.com/v1/chat/completions', + sourceProviderId: ' OpenAI ', + }, + ], + }) + + const config = await getUserConfig() + const migratedProvider = config.customOpenAIProviders.find( + (provider) => provider.name === 'Selected Mode OpenAI', + ) + + assert.equal(migratedProvider.sourceProviderId, 'openai') +}) + +test('getUserConfig persists normalization-only sourceProviderId migrations', async () => { + globalThis.__TEST_BROWSER_SHIM__.replaceStorage({ + configSchemaVersion: 0, + customOpenAIProviders: [ + { + id: 'Selected Mode OpenAI', + name: 'Selected Mode OpenAI', + chatCompletionsUrl: 'https://proxy.example.com/v1/chat/completions', + sourceProviderId: ' OpenAI ', + }, + ], + }) + + await getUserConfig() + const storedConfig = globalThis.__TEST_BROWSER_SHIM__.getStorage() + const storedProvider = storedConfig.customOpenAIProviders.find( + (provider) => provider.name === 'Selected Mode OpenAI', + ) + + assert.equal(storedProvider.sourceProviderId, 'openai') +}) + +test('getUserConfig remaps preserved custom sourceProviderId when provider ids are renamed', async () => { + globalThis.__TEST_BROWSER_SHIM__.replaceStorage({ + configSchemaVersion: 0, + customOpenAIProviders: [ + { + id: '', + name: 'Generated Proxy', + chatCompletionsUrl: 'https://proxy.example.com/v1/chat/completions', + }, + { + id: 'custom-provider-1', + name: 'Renamed Proxy', + chatCompletionsUrl: 'https://proxy2.example.com/v1/chat/completions', + }, + { + id: 'Selected Mode Proxy', + name: 'Selected Mode Proxy', + chatCompletionsUrl: 'https://proxy3.example.com/v1/chat/completions', + sourceProviderId: 'custom-provider-1', + }, + ], + }) + + const config = await getUserConfig() + const migratedRenamedSourceProvider = config.customOpenAIProviders.find( + (provider) => provider.name === 'Renamed Proxy', + ) + const migratedMaterializedProvider = config.customOpenAIProviders.find( + (provider) => provider.name === 'Selected Mode Proxy', + ) + + assert.equal(migratedRenamedSourceProvider.id, 'custom-provider-1-2') + assert.equal(migratedMaterializedProvider.sourceProviderId, 'custom-provider-1-2') +}) + +test('getUserConfig preserves builtin sourceProviderId when unrelated custom provider collides', async () => { + globalThis.__TEST_BROWSER_SHIM__.replaceStorage({ + configSchemaVersion: 0, + customOpenAIProviders: [ + { + id: 'OpenAI', + name: 'My OpenAI Proxy', + chatCompletionsUrl: 'https://proxy.example.com/v1/chat/completions', + }, + { + id: 'Selected Mode OpenAI', + name: 'Selected Mode OpenAI', + chatCompletionsUrl: 'https://proxy2.example.com/v1/chat/completions', + sourceProviderId: 'openai', + }, + ], + }) + + const config = await getUserConfig() + const migratedCollidingProvider = config.customOpenAIProviders.find( + (provider) => provider.name === 'My OpenAI Proxy', + ) + const migratedMaterializedProvider = config.customOpenAIProviders.find( + (provider) => provider.name === 'Selected Mode OpenAI', + ) + + assert.equal(migratedCollidingProvider.id, 'openai-2') + assert.equal(migratedMaterializedProvider.sourceProviderId, 'openai') +}) + +test('getUserConfig remaps colliding custom sourceProviderId instead of treating it as builtin', async () => { + globalThis.__TEST_BROWSER_SHIM__.replaceStorage({ + configSchemaVersion: 0, + customOpenAIProviders: [ + { + id: 'OpenAI', + name: 'My OpenAI Proxy', + chatCompletionsUrl: 'https://proxy.example.com/v1/chat/completions', + }, + { + id: 'Selected Mode OpenAI Clone', + name: 'Selected Mode OpenAI Clone', + chatCompletionsUrl: 'https://proxy2.example.com/v1/chat/completions', + sourceProviderId: 'OpenAI', + }, + ], + }) + + const config = await getUserConfig() + const migratedCollidingProvider = config.customOpenAIProviders.find( + (provider) => provider.name === 'My OpenAI Proxy', + ) + const migratedMaterializedProvider = config.customOpenAIProviders.find( + (provider) => provider.name === 'Selected Mode OpenAI Clone', + ) + + assert.equal(migratedCollidingProvider.id, 'openai-2') + assert.equal(migratedMaterializedProvider.sourceProviderId, 'openai-2') +}) + +test('getUserConfig preserves unchanged duplicate raw-id sourceProviderId targets', async () => { + globalThis.__TEST_BROWSER_SHIM__.replaceStorage({ + configSchemaVersion: 0, + customOpenAIProviders: [ + { + id: 'foo', + name: 'Primary Foo Proxy', + chatCompletionsUrl: 'https://proxy-a.example.com/v1/chat/completions', + }, + { + id: 'foo', + name: 'Duplicate Foo Proxy', + chatCompletionsUrl: 'https://proxy-b.example.com/v1/chat/completions', + }, + { + id: 'Selected Mode Foo', + name: 'Selected Mode Foo', + chatCompletionsUrl: 'https://proxy-c.example.com/v1/chat/completions', + sourceProviderId: 'foo', + }, + ], + }) + + const config = await getUserConfig() + const primaryProvider = config.customOpenAIProviders.find( + (provider) => provider.name === 'Primary Foo Proxy', + ) + const duplicateProvider = config.customOpenAIProviders.find( + (provider) => provider.name === 'Duplicate Foo Proxy', + ) + const migratedMaterializedProvider = config.customOpenAIProviders.find( + (provider) => provider.name === 'Selected Mode Foo', + ) + + assert.equal(primaryProvider.id, 'foo') + assert.equal(duplicateProvider.id, 'foo-2') + assert.equal(migratedMaterializedProvider.sourceProviderId, 'foo') +}) + +test('getUserConfig does not reuse builtin provider secret for renamed colliding custom provider', async () => { + globalThis.__TEST_BROWSER_SHIM__.replaceStorage({ + configSchemaVersion: 0, + providerSecrets: { + openai: 'builtin-provider-secret', + }, + customOpenAIProviders: [ + { + id: 'OpenAI', + name: 'My OpenAI Proxy', + chatCompletionsUrl: 'https://custom.example.com/v1/chat/completions', + }, + ], + customApiModes: [ + createCustomApiMode({ + customName: 'proxy-mode', + providerId: 'OpenAI', + }), + ], + }) + + const config = await getUserConfig() + const migratedProvider = config.customOpenAIProviders.find( + (provider) => provider.name === 'My OpenAI Proxy', + ) + const migratedMode = config.customApiModes.find((mode) => mode.customName === 'proxy-mode') + + assert.equal(migratedProvider.id, 'openai-2') + assert.equal(migratedMode.providerId, 'openai-2') + assert.equal(config.providerSecrets.openai, 'builtin-provider-secret') + assert.equal(config.providerSecrets['openai-2'], undefined) +}) + +test('getUserConfig keeps original secret when colliding custom provider raw id is already normalized', async () => { + globalThis.__TEST_BROWSER_SHIM__.replaceStorage({ + configSchemaVersion: 0, + providerSecrets: { + myproxy: 'shared-provider-secret', + }, + customOpenAIProviders: [ + { + id: 'myproxy', + name: 'Primary Proxy', + chatCompletionsUrl: 'https://primary.example.com/v1/chat/completions', + }, + { + id: 'myproxy', + name: 'Duplicate Proxy', + chatCompletionsUrl: 'https://duplicate.example.com/v1/chat/completions', + }, + ], + }) + + const config = await getUserConfig() + const primaryProvider = config.customOpenAIProviders.find( + (provider) => provider.name === 'Primary Proxy', + ) + const duplicateProvider = config.customOpenAIProviders.find( + (provider) => provider.name === 'Duplicate Proxy', + ) + + assert.equal(primaryProvider.id, 'myproxy') + assert.equal(duplicateProvider.id, 'myproxy-2') + assert.equal(config.providerSecrets.myproxy, 'shared-provider-secret') + assert.equal(config.providerSecrets['myproxy-2'], 'shared-provider-secret') +}) + +test('getUserConfig prefers normalized provider secret when raw alias is explicitly empty', async () => { + globalThis.__TEST_BROWSER_SHIM__.replaceStorage({ + configSchemaVersion: 0, + providerSecrets: { + 'My Proxy': '', + 'my-proxy': 'normalized-provider-secret', + }, + customOpenAIProviders: [ + { + id: 'my-proxy', + name: 'Primary Proxy', + chatCompletionsUrl: 'https://primary.example.com/v1/chat/completions', + }, + { + id: ' My Proxy ', + name: 'Duplicate Proxy', + chatCompletionsUrl: 'https://duplicate.example.com/v1/chat/completions', + }, + ], + }) + + const config = await getUserConfig() + const duplicateProvider = config.customOpenAIProviders.find( + (provider) => provider.name === 'Duplicate Proxy', + ) + + assert.equal(duplicateProvider.id, 'my-proxy-2') + assert.equal(config.providerSecrets['my-proxy'], 'normalized-provider-secret') + assert.equal(config.providerSecrets['my-proxy-2'], 'normalized-provider-secret') + assert.equal(Object.hasOwn(config.providerSecrets, 'My Proxy'), false) +}) + +test('getUserConfig keeps empty providerSecrets entry instead of restoring legacy key', async () => { + globalThis.__TEST_BROWSER_SHIM__.replaceStorage({ + configSchemaVersion: 0, + providerSecrets: { + openai: '', + }, + apiKey: 'legacy-openai-key', + }) + + const config = await getUserConfig() + + assert.equal(config.providerSecrets.openai, '') + assert.equal(config.apiKey, '') +}) + +test('getUserConfig migrates raw-id provider secret when provider id is normalized only', async () => { + globalThis.__TEST_BROWSER_SHIM__.replaceStorage({ + configSchemaVersion: 0, + providerSecrets: { + MyProxy: 'raw-provider-secret', + }, + customOpenAIProviders: [ + { + id: 'MyProxy', + name: 'My Proxy', + chatCompletionsUrl: 'https://proxy.example.com/v1/chat/completions', + }, + ], + customApiModes: [ + createCustomApiMode({ + customName: 'proxy-mode', + providerId: 'MyProxy', + }), + ], + }) + + const config = await getUserConfig() + const migratedProvider = config.customOpenAIProviders.find( + (provider) => provider.name === 'My Proxy', + ) + const migratedMode = config.customApiModes.find((mode) => mode.customName === 'proxy-mode') + + assert.equal(migratedProvider.id, 'myproxy') + assert.equal(migratedMode.providerId, 'myproxy') + assert.equal(config.providerSecrets.myproxy, 'raw-provider-secret') +}) + +test('getUserConfig trims whitespace when normalizing custom provider ids in modes', async () => { + globalThis.__TEST_BROWSER_SHIM__.replaceStorage({ + configSchemaVersion: 0, + providerSecrets: { + MyProxy: 'raw-provider-secret', + }, + customOpenAIProviders: [ + { + id: 'MyProxy', + name: 'My Proxy', + chatCompletionsUrl: 'https://proxy.example.com/v1/chat/completions', + }, + ], + customApiModes: [ + createCustomApiMode({ + customName: 'proxy-mode', + providerId: ' myproxy ', + }), + ], + apiMode: createCustomApiMode({ + customName: 'selected-proxy-mode', + providerId: ' MyProxy ', + }), + }) + + const config = await getUserConfig() + const migratedMode = config.customApiModes.find((mode) => mode.customName === 'proxy-mode') + + assert.equal(migratedMode.providerId, 'myproxy') + assert.equal(config.apiMode.providerId, 'myproxy') + assert.equal(config.providerSecrets.myproxy, 'raw-provider-secret') +}) + +test('getUserConfig reuses existing custom provider when legacy customUrl only differs by trailing slash', async () => { + globalThis.__TEST_BROWSER_SHIM__.replaceStorage({ + configSchemaVersion: 0, + customOpenAIProviders: [ + { + id: 'myproxy', + name: 'My Proxy', + chatCompletionsUrl: 'https://proxy.example.com/v1/chat/completions', + }, + ], + customApiModes: [ + createCustomApiMode({ + customName: 'mode-with-slash', + customUrl: 'https://proxy.example.com/v1/chat/completions/', + }), + ], + }) + + const config = await getUserConfig() + const migratedMode = config.customApiModes.find((mode) => mode.customName === 'mode-with-slash') + + assert.equal(config.customOpenAIProviders.length, 1) + assert.equal(migratedMode.providerId, 'myproxy') + assert.equal(migratedMode.customUrl, '') +}) + +test('getUserConfig reuses existing custom provider for selected mode when legacy customUrl only differs by trailing slash', async () => { + globalThis.__TEST_BROWSER_SHIM__.replaceStorage({ + configSchemaVersion: 0, + customOpenAIProviders: [ + { + id: 'myproxy', + name: 'My Proxy', + chatCompletionsUrl: 'https://proxy.example.com/v1/chat/completions', + }, + ], + apiMode: createCustomApiMode({ + customName: 'selected-mode', + customUrl: 'https://proxy.example.com/v1/chat/completions/', + }), + }) + + const config = await getUserConfig() + + assert.equal(config.customOpenAIProviders.length, 1) + assert.equal(config.apiMode.providerId, 'myproxy') + assert.equal(config.apiMode.customUrl, '') +}) + +test('getUserConfig defaults custom provider allowLegacyResponseField to true when absent', async () => { + globalThis.__TEST_BROWSER_SHIM__.replaceStorage({ + configSchemaVersion: 0, + customOpenAIProviders: [ + { + id: 'myproxy', + name: 'My Proxy', + chatCompletionsUrl: 'https://proxy.example.com/v1/chat/completions', + }, + ], + customApiModes: [ + createCustomApiMode({ + customName: 'proxy-mode', + providerId: 'myproxy', + }), + ], + }) + + const config = await getUserConfig() + const migratedProvider = config.customOpenAIProviders.find( + (provider) => provider.id === 'myproxy', + ) + + assert.equal(migratedProvider.allowLegacyResponseField, true) +}) + +test('getUserConfig preserves explicit false allowLegacyResponseField on custom providers', async () => { + globalThis.__TEST_BROWSER_SHIM__.replaceStorage({ + configSchemaVersion: 0, + customOpenAIProviders: [ + { + id: 'myproxy', + name: 'My Proxy', + chatCompletionsUrl: 'https://proxy.example.com/v1/chat/completions', + allowLegacyResponseField: false, + }, + ], + customApiModes: [ + createCustomApiMode({ + customName: 'proxy-mode', + providerId: 'myproxy', + }), + ], + }) + + const config = await getUserConfig() + const migratedProvider = config.customOpenAIProviders.find( + (provider) => provider.id === 'myproxy', + ) + + assert.equal(migratedProvider.allowLegacyResponseField, false) +}) + +test('getUserConfig preserves distinct selected and listed custom mode apiKeys', async () => { + globalThis.__TEST_BROWSER_SHIM__.replaceStorage({ + configSchemaVersion: 0, + providerSecrets: { + myproxy: 'provider-level-key', + }, + customOpenAIProviders: [ + { + id: 'myproxy', + name: 'My Proxy', + chatCompletionsUrl: 'https://proxy.example.com/v1/chat/completions', + }, + ], + customApiModes: [ + createCustomApiMode({ + customName: 'mode-key-override', + providerId: 'myproxy', + apiKey: 'mode-level-key', + }), + ], + apiMode: createCustomApiMode({ + customName: 'selected-mode-key-override', + providerId: 'myproxy', + apiKey: 'selected-mode-level-key', + }), + }) + + const config = await getUserConfig() + const migratedMode = config.customApiModes.find((mode) => mode.customName === 'mode-key-override') + const selectedProviderId = config.apiMode.providerId + + assert.equal(config.providerSecrets.myproxy, 'provider-level-key') + assert.notEqual(migratedMode.providerId, 'myproxy') + assert.notEqual(selectedProviderId, 'myproxy') + assert.notEqual(migratedMode.providerId, selectedProviderId) + assert.equal(config.providerSecrets[migratedMode.providerId], 'mode-level-key') + assert.equal(config.providerSecrets[selectedProviderId], 'selected-mode-level-key') + assert.equal(migratedMode.apiKey, '') + assert.equal(config.apiMode.apiKey, '') +}) + +test('getUserConfig splits conflicting custom mode apiKeys into separate providers', async () => { + globalThis.__TEST_BROWSER_SHIM__.replaceStorage({ + configSchemaVersion: 0, + customOpenAIProviders: [ + { + id: 'myproxy', + name: 'My Proxy', + chatCompletionsUrl: 'https://proxy.example.com/v1/chat/completions', + }, + ], + customApiModes: [ + createCustomApiMode({ + customName: 'mode-a', + providerId: 'myproxy', + apiKey: 'key-a', + }), + createCustomApiMode({ + customName: 'mode-b', + providerId: 'myproxy', + apiKey: 'key-b', + }), + ], + apiMode: createCustomApiMode({ + customName: 'mode-b', + providerId: 'myproxy', + apiKey: 'key-b', + }), + }) + + const config = await getUserConfig() + const modeA = config.customApiModes.find((mode) => mode.customName === 'mode-a') + const modeB = config.customApiModes.find((mode) => mode.customName === 'mode-b') + + assert.equal(modeA.providerId, 'myproxy') + assert.notEqual(modeB.providerId, 'myproxy') + assert.equal(config.apiMode.providerId, modeB.providerId) + assert.equal(config.providerSecrets.myproxy, 'key-a') + assert.equal(config.providerSecrets[modeB.providerId], 'key-b') + assert.equal(modeA.apiKey, '') + assert.equal(modeB.apiKey, '') + assert.equal(config.apiMode.apiKey, '') +}) + +test('getUserConfig materializes distinct providers for legacy custom default key conflicts', async () => { + globalThis.__TEST_BROWSER_SHIM__.replaceStorage({ + configSchemaVersion: 0, + customModelApiUrl: 'https://legacy.example.com/v1/chat/completions', + customApiModes: [ + createCustomApiMode({ + customName: 'legacy-a', + apiKey: 'key-a', + }), + createCustomApiMode({ + customName: 'legacy-b', + apiKey: 'key-b', + }), + ], + apiMode: createCustomApiMode({ + customName: 'legacy-b', + apiKey: 'key-b', + }), + }) + + const config = await getUserConfig() + const modeA = config.customApiModes.find((mode) => mode.customName === 'legacy-a') + const modeB = config.customApiModes.find((mode) => mode.customName === 'legacy-b') + const materializedProvider = config.customOpenAIProviders.find( + (provider) => provider.id === modeB.providerId, + ) + + assert.equal(modeA.providerId, 'legacy-custom-default') + assert.notEqual(modeB.providerId, 'legacy-custom-default') + assert.equal(config.apiMode.providerId, modeB.providerId) + assert.equal(config.providerSecrets['legacy-custom-default'], 'key-a') + assert.equal(config.providerSecrets[modeB.providerId], 'key-b') + assert.equal( + materializedProvider.chatCompletionsUrl, + 'https://legacy.example.com/v1/chat/completions', + ) + assert.equal(modeA.apiKey, '') + assert.equal(modeB.apiKey, '') + assert.equal(config.apiMode.apiKey, '') +}) + +test('getUserConfig migrates custom mode apiKey into provider secret when provider secret is empty', async () => { + globalThis.__TEST_BROWSER_SHIM__.replaceStorage({ + configSchemaVersion: 0, + customOpenAIProviders: [ + { + id: 'myproxy', + name: 'My Proxy', + chatCompletionsUrl: 'https://proxy.example.com/v1/chat/completions', + }, + ], + customApiModes: [ + createCustomApiMode({ + customName: 'mode-key-source', + providerId: 'myproxy', + apiKey: 'mode-level-key', + }), + ], + }) + + const config = await getUserConfig() + const migratedMode = config.customApiModes.find((mode) => mode.customName === 'mode-key-source') + + assert.equal(config.providerSecrets.myproxy, 'mode-level-key') + assert.equal(migratedMode.apiKey, '') +}) + +test('getUserConfig keeps existing provider secret when imported legacy key differs', async () => { + globalThis.__TEST_BROWSER_SHIM__.replaceStorage({ + configSchemaVersion: 0, + providerSecrets: { + openai: 'existing-secret', + }, + apiKey: 'imported-legacy-secret', + }) + + const config = await getUserConfig() + + assert.equal(config.providerSecrets.openai, 'existing-secret') +}) + +test('getUserConfig does not overwrite provider secret when imported legacy key is empty', async () => { + globalThis.__TEST_BROWSER_SHIM__.replaceStorage({ + configSchemaVersion: 0, + providerSecrets: { + openai: 'existing-secret', + }, + apiKey: '', + }) + + const config = await getUserConfig() + + assert.equal(config.providerSecrets.openai, 'existing-secret') +}) + +test('getUserConfig clears non-custom mode providerId and migrates mode key to providerSecrets', async () => { + globalThis.__TEST_BROWSER_SHIM__.replaceStorage({ + configSchemaVersion: 0, + customApiModes: [ + { + groupName: 'chatgptApiModelKeys', + itemName: 'chatgptApi35', + isCustom: false, + customName: '', + customUrl: '', + apiKey: 'sk-from-mode', + providerId: 'openai', + active: true, + }, + ], + }) + + const config = await getUserConfig() + const migratedMode = config.customApiModes.find( + (mode) => mode.groupName === 'chatgptApiModelKeys' && mode.itemName === 'chatgptApi35', + ) + + assert.equal(migratedMode.providerId, '') + assert.equal(migratedMode.apiKey, '') + assert.equal(config.providerSecrets.openai, 'sk-from-mode') +}) + +test('getUserConfig keeps empty providerSecrets entry when migrating non-custom mode key', async () => { + globalThis.__TEST_BROWSER_SHIM__.replaceStorage({ + configSchemaVersion: 0, + providerSecrets: { + openai: '', + }, + customApiModes: [ + { + groupName: 'chatgptApiModelKeys', + itemName: 'chatgptApi35', + isCustom: false, + customName: '', + customUrl: '', + apiKey: 'sk-from-mode', + providerId: 'openai', + active: true, + }, + ], + }) + + const config = await getUserConfig() + const migratedMode = config.customApiModes.find( + (mode) => mode.groupName === 'chatgptApiModelKeys' && mode.itemName === 'chatgptApi35', + ) + + assert.equal(migratedMode.providerId, '') + assert.equal(migratedMode.apiKey, '') + assert.equal(config.providerSecrets.openai, '') +}) + +test('getUserConfig writes current config schema version during migration', async () => { + globalThis.__TEST_BROWSER_SHIM__.replaceStorage({ + configSchemaVersion: 0, + }) + + const config = await getUserConfig() + const storage = globalThis.__TEST_BROWSER_SHIM__.getStorage() + + assert.equal(config.configSchemaVersion, 1) + assert.equal(storage.configSchemaVersion, 1) +}) + +test('getUserConfig creates separate providers when same URL has different API keys', async () => { + const customUrl = 'https://proxy.example.com/v1/chat/completions' + globalThis.__TEST_BROWSER_SHIM__.replaceStorage({ + configSchemaVersion: 0, + customApiModes: [ + createCustomApiMode({ + customName: 'mode-a', + customUrl, + apiKey: 'key-a', + }), + createCustomApiMode({ + customName: 'mode-b', + customUrl, + apiKey: 'key-b', + }), + ], + }) + + const config = await getUserConfig() + const modeA = config.customApiModes.find((mode) => mode.customName === 'mode-a') + const modeB = config.customApiModes.find((mode) => mode.customName === 'mode-b') + + assert.notEqual( + modeA.providerId, + modeB.providerId, + 'modes with different keys should get separate providers', + ) + assert.equal(config.providerSecrets[modeA.providerId], 'key-a') + assert.equal(config.providerSecrets[modeB.providerId], 'key-b') + assert.equal(config.customOpenAIProviders.length, 2) +}) + +test('getUserConfig does not merge keyless mode into keyed provider for same URL', async () => { + const customUrl = 'https://proxy.example.com/v1/chat/completions' + globalThis.__TEST_BROWSER_SHIM__.replaceStorage({ + configSchemaVersion: 0, + customApiModes: [ + createCustomApiMode({ + customName: 'mode-keyed', + customUrl, + apiKey: 'key-a', + }), + createCustomApiMode({ + customName: 'mode-keyless', + customUrl, + apiKey: '', + }), + ], + }) + + const config = await getUserConfig() + const keyedMode = config.customApiModes.find((mode) => mode.customName === 'mode-keyed') + const keylessMode = config.customApiModes.find((mode) => mode.customName === 'mode-keyless') + + assert.notEqual( + keyedMode.providerId, + keylessMode.providerId, + 'keyless mode should not be merged into a keyed provider', + ) + assert.equal(config.providerSecrets[keyedMode.providerId], 'key-a') + assert.equal(config.providerSecrets[keylessMode.providerId] || '', '') +}) + +test('getUserConfig keeps selected keyless mode separate from keyed provider for same URL', async () => { + const customUrl = 'https://proxy.example.com/v1/chat/completions' + globalThis.__TEST_BROWSER_SHIM__.replaceStorage({ + configSchemaVersion: 0, + customApiModes: [ + createCustomApiMode({ + customName: 'mode-keyed', + customUrl, + apiKey: 'key-a', + }), + ], + apiMode: createCustomApiMode({ + customName: 'selected-keyless', + customUrl, + apiKey: '', + }), + }) + + const config = await getUserConfig() + const keyedMode = config.customApiModes.find((mode) => mode.customName === 'mode-keyed') + + assert.notEqual( + keyedMode.providerId, + config.apiMode.providerId, + 'selected keyless mode should not reuse keyed provider', + ) + assert.equal(config.providerSecrets[keyedMode.providerId], 'key-a') + assert.equal(config.providerSecrets[config.apiMode.providerId] || '', '') +}) + +test('getUserConfig reverse-syncs providerSecrets to legacy fields for backward compatibility', async () => { + globalThis.__TEST_BROWSER_SHIM__.replaceStorage({ + configSchemaVersion: 0, + customApiModes: [ + { + groupName: 'chatgptApiModelKeys', + itemName: 'chatgptApi35', + isCustom: false, + customName: '', + customUrl: '', + apiKey: 'sk-from-mode', + providerId: '', + active: true, + }, + ], + }) + + const config = await getUserConfig() + const storage = globalThis.__TEST_BROWSER_SHIM__.getStorage() + + assert.equal(config.providerSecrets.openai, 'sk-from-mode') + assert.equal(storage.apiKey, 'sk-from-mode', 'legacy apiKey field should be reverse-synced') +}) + +test('getUserConfig converges missing provider migration keys when schema version is current', async () => { + globalThis.__TEST_BROWSER_SHIM__.replaceStorage({ + configSchemaVersion: 1, + }) + + await getUserConfig() + const storageAfterFirst = globalThis.__TEST_BROWSER_SHIM__.getStorage() + + assert.deepEqual(storageAfterFirst.providerSecrets, {}) + assert.deepEqual(storageAfterFirst.customApiModes, []) + assert.deepEqual(storageAfterFirst.customOpenAIProviders, []) + + const snapshot = JSON.stringify(storageAfterFirst) + await getUserConfig() + const storageAfterSecond = globalThis.__TEST_BROWSER_SHIM__.getStorage() + + assert.equal(JSON.stringify(storageAfterSecond), snapshot) +}) + +test('getUserConfig normalizes providerSecrets when legacy data is not a plain object', async () => { + globalThis.__TEST_BROWSER_SHIM__.replaceStorage({ + configSchemaVersion: 1, + providerSecrets: ['invalid-shape'], + }) + + await getUserConfig() + const storage = globalThis.__TEST_BROWSER_SHIM__.getStorage() + + assert.deepEqual(storage.providerSecrets, {}) +}) diff --git a/tests/unit/config/user-config.test.mjs b/tests/unit/config/user-config.test.mjs index f22d73609..4e9709152 100644 --- a/tests/unit/config/user-config.test.mjs +++ b/tests/unit/config/user-config.test.mjs @@ -15,8 +15,10 @@ test('getUserConfig migrates legacy chat.openai.com URL to chatgpt.com', async ( }) const config = await getUserConfig() + const storage = globalThis.__TEST_BROWSER_SHIM__.getStorage() assert.equal(config.customChatGptWebApiUrl, 'https://chatgpt.com') + assert.equal(storage.customChatGptWebApiUrl, 'https://chatgpt.com') }) test('getUserConfig keeps modern chatgpt.com URL unchanged', async () => { @@ -141,6 +143,66 @@ test('getUserConfig returns migrated Anthropic values when storage.remove fails' assert.equal(Object.hasOwn(storage, 'customClaudeApiUrl'), false) }) +test('getUserConfig preserves empty provider secret when provider id is normalized', async () => { + globalThis.__TEST_BROWSER_SHIM__.replaceStorage({ + customOpenAIProviders: [ + { + id: ' My Proxy ', + name: 'My Proxy', + baseUrl: 'https://proxy.example.com', + chatCompletionsPath: '/v1/chat/completions', + completionsPath: '/v1/completions', + enabled: true, + }, + ], + providerSecrets: { + 'My Proxy': '', + }, + }) + + const config = await getUserConfig() + const storage = globalThis.__TEST_BROWSER_SHIM__.getStorage() + + assert.equal(config.providerSecrets['my-proxy'], '') + assert.equal(Object.hasOwn(config.providerSecrets, 'My Proxy'), false) + assert.equal(storage.providerSecrets['my-proxy'], '') + assert.equal(Object.hasOwn(storage.providerSecrets, 'My Proxy'), false) +}) + +test('getUserConfig preserves empty provider secret when duplicate provider ids are renamed', async () => { + globalThis.__TEST_BROWSER_SHIM__.replaceStorage({ + customOpenAIProviders: [ + { + id: 'my-proxy', + name: 'Existing Proxy', + baseUrl: 'https://proxy-a.example.com', + chatCompletionsPath: '/v1/chat/completions', + completionsPath: '/v1/completions', + enabled: true, + }, + { + id: ' My Proxy ', + name: 'Renamed Proxy', + baseUrl: 'https://proxy-b.example.com', + chatCompletionsPath: '/v1/chat/completions', + completionsPath: '/v1/completions', + enabled: true, + }, + ], + providerSecrets: { + 'My Proxy': '', + }, + }) + + const config = await getUserConfig() + const storage = globalThis.__TEST_BROWSER_SHIM__.getStorage() + + assert.equal(config.providerSecrets['my-proxy-2'], '') + assert.equal(Object.hasOwn(config.providerSecrets, 'My Proxy'), false) + assert.equal(storage.providerSecrets['my-proxy-2'], '') + assert.equal(Object.hasOwn(storage.providerSecrets, 'My Proxy'), false) +}) + test('clearOldAccessToken clears expired token older than 30 days', async (t) => { const now = 1_700_000_000_000 t.mock.method(Date, 'now', () => now) diff --git a/tests/unit/services/apis/custom-api.test.mjs b/tests/unit/services/apis/custom-api.test.mjs index 8f8dea842..3717b94a9 100644 --- a/tests/unit/services/apis/custom-api.test.mjs +++ b/tests/unit/services/apis/custom-api.test.mjs @@ -72,7 +72,7 @@ test('aggregates delta.content SSE chunks and finishes on finish_reason', async port.postedMessages.some((m) => m.done === true && m.session === session), true, ) - assert.deepEqual(port.postedMessages.at(-1), { done: true }) + assert.deepEqual(port.postedMessages.at(-1), { answer: null, done: true, session }) assert.deepEqual(session.conversationRecords.at(-1), { question: 'CurrentQ', answer: 'Hello', diff --git a/tests/unit/services/apis/openai-api-compat.test.mjs b/tests/unit/services/apis/openai-api-compat.test.mjs index 39b890598..1bf854d0b 100644 --- a/tests/unit/services/apis/openai-api-compat.test.mjs +++ b/tests/unit/services/apis/openai-api-compat.test.mjs @@ -4,6 +4,7 @@ import { generateAnswersWithOpenAiApi, generateAnswersWithOpenAiApiCompat, generateAnswersWithGptCompletionApi, + generateAnswersWithOpenAICompatibleApi, } from '../../../../src/services/apis/openai-api.mjs' import { createFakePort } from '../../helpers/port.mjs' import { createMockSseResponse } from '../../helpers/sse-response.mjs' @@ -89,11 +90,229 @@ test('generateAnswersWithOpenAiApiCompat sends expected request and aggregates S port.postedMessages.some((message) => message.done === true && message.session === session), true, ) - assert.deepEqual(port.postedMessages.at(-1), { done: true }) + assert.deepEqual(port.postedMessages.at(-1), { answer: null, done: true, session }) assert.deepEqual(session.conversationRecords.at(-1), { question: 'CurrentQ', answer: 'Hello' }) }) -test('generateAnswersWithOpenAiApiCompat uses max_completion_tokens for OpenAI latest gpt-5 compat models', async (t) => { +test('generateAnswersWithOpenAiApiCompat emits fallback done message when stream ends without finish reason', async (t) => { + t.mock.method(console, 'debug', () => {}) + setStorage({ + maxConversationContextLength: 3, + maxResponseTokenLength: 256, + temperature: 0.25, + }) + + const session = { + modelName: 'chatgptApi4oMini', + conversationRecords: [], + isRetry: false, + } + const port = createFakePort() + + t.mock.method(globalThis, 'fetch', async () => + createMockSseResponse(['data: {"choices":[{"delta":{"content":"Partial"}}]}\n\n']), + ) + + await generateAnswersWithOpenAiApiCompat( + 'https://api.example.com/v1', + port, + 'CurrentQ', + session, + 'sk-test', + ) + + assert.equal( + port.postedMessages.some((message) => message.done === false && message.answer === 'Partial'), + true, + ) + assert.equal( + port.postedMessages.some((message) => message.done === true && message.session === session), + true, + ) + assert.deepEqual(port.postedMessages.at(-1), { answer: null, done: true, session }) + assert.deepEqual(session.conversationRecords.at(-1), { + question: 'CurrentQ', + answer: 'Partial', + }) +}) + +test('generateAnswersWithOpenAiApiCompat does not record an empty answer when stream ends before first chunk', async (t) => { + t.mock.method(console, 'debug', () => {}) + setStorage({ + maxConversationContextLength: 3, + maxResponseTokenLength: 256, + temperature: 0.25, + }) + + const session = { + modelName: 'chatgptApi4oMini', + conversationRecords: [], + isRetry: false, + } + const port = createFakePort() + + t.mock.method(globalThis, 'fetch', async () => createMockSseResponse([])) + + await generateAnswersWithOpenAiApiCompat( + 'https://api.example.com/v1', + port, + 'CurrentQ', + session, + 'sk-test', + ) + + assert.equal( + port.postedMessages.some((message) => message.done === true && message.session === session), + true, + ) + assert.deepEqual(port.postedMessages.at(-1), { answer: null, done: true, session }) + assert.deepEqual(session.conversationRecords, []) + assert.deepEqual(port.listenerCounts(), { onMessage: 0, onDisconnect: 0 }) +}) + +test('generateAnswersWithOpenAiApiCompat does not record an empty answer when stream only sends [DONE]', async (t) => { + t.mock.method(console, 'debug', () => {}) + setStorage({ + maxConversationContextLength: 3, + maxResponseTokenLength: 256, + temperature: 0.25, + }) + + const session = { + modelName: 'chatgptApi4oMini', + conversationRecords: [], + isRetry: false, + } + const port = createFakePort() + + t.mock.method(globalThis, 'fetch', async () => createMockSseResponse(['data: [DONE]\n\n'])) + + await generateAnswersWithOpenAiApiCompat( + 'https://api.example.com/v1', + port, + 'CurrentQ', + session, + 'sk-test', + ) + + assert.equal( + port.postedMessages.some((message) => message.done === true && message.session === session), + true, + ) + assert.deepEqual(port.postedMessages.at(-1), { answer: null, done: true, session }) + assert.deepEqual(session.conversationRecords, []) +}) + +test('generateAnswersWithOpenAiApiCompat does not record an empty answer when finish_reason arrives without content', async (t) => { + t.mock.method(console, 'debug', () => {}) + setStorage({ + maxConversationContextLength: 3, + maxResponseTokenLength: 256, + temperature: 0.25, + }) + + const session = { + modelName: 'chatgptApi4oMini', + conversationRecords: [], + isRetry: false, + } + const port = createFakePort() + + t.mock.method(globalThis, 'fetch', async () => + createMockSseResponse(['data: {"choices":[{"delta":{},"finish_reason":"stop"}]}\n\n']), + ) + + await generateAnswersWithOpenAiApiCompat( + 'https://api.example.com/v1', + port, + 'CurrentQ', + session, + 'sk-test', + ) + + assert.equal( + port.postedMessages.some((message) => message.done === true && message.session === session), + true, + ) + assert.deepEqual(port.postedMessages.at(-1), { answer: null, done: true, session }) + assert.deepEqual(session.conversationRecords, []) +}) + +test('generateAnswersWithOpenAiApiCompat ignores null deltas without coercing them into the answer', async (t) => { + t.mock.method(console, 'debug', () => {}) + setStorage({ + maxConversationContextLength: 3, + maxResponseTokenLength: 256, + temperature: 0.25, + }) + + const session = { + modelName: 'chatgptApi4oMini', + conversationRecords: [], + isRetry: false, + } + const port = createFakePort() + + t.mock.method(globalThis, 'fetch', async () => + createMockSseResponse([ + 'data: {"choices":[{"delta":{"content":"Hel"}}]}\n\n', + 'data: {"choices":[{"delta":{"content":null}}]}\n\n', + 'data: {"choices":[{"delta":{"content":"lo"},"finish_reason":"stop"}]}\n\n', + ]), + ) + + await generateAnswersWithOpenAiApiCompat( + 'https://api.example.com/v1', + port, + 'CurrentQ', + session, + 'sk-test', + ) + + assert.equal( + port.postedMessages.some((message) => message.done === false && message.answer === 'Helnull'), + false, + ) + assert.deepEqual(session.conversationRecords.at(-1), { question: 'CurrentQ', answer: 'Hello' }) +}) + +test('generateAnswersWithOpenAiApiCompat treats missing conversationRecords as an empty history', async (t) => { + t.mock.method(console, 'debug', () => {}) + setStorage({ + maxConversationContextLength: 3, + maxResponseTokenLength: 256, + temperature: 0.25, + }) + + const session = { + modelName: 'chatgptApi4oMini', + conversationRecords: null, + isRetry: false, + } + const port = createFakePort() + + let capturedInit + t.mock.method(globalThis, 'fetch', async (_input, init) => { + capturedInit = init + return createMockSseResponse([ + 'data: {"choices":[{"delta":{"content":"Hello"},"finish_reason":"stop"}]}\n\n', + ]) + }) + + await generateAnswersWithOpenAiApiCompat( + 'https://api.example.com/v1', + port, + 'CurrentQ', + session, + 'sk-test', + ) + + const body = JSON.parse(capturedInit.body) + assert.deepEqual(body.messages, [{ role: 'user', content: 'CurrentQ' }]) + assert.deepEqual(session.conversationRecords.at(-1), { question: 'CurrentQ', answer: 'Hello' }) +}) + +test('generateAnswersWithOpenAiApiCompat uses max_completion_tokens for OpenAI gpt-5 models', async (t) => { t.mock.method(console, 'debug', () => {}) setStorage({ maxConversationContextLength: 3, @@ -101,45 +320,970 @@ test('generateAnswersWithOpenAiApiCompat uses max_completion_tokens for OpenAI l temperature: 0.2, }) let capturedInit - t.mock.method(globalThis, 'fetch', async (_input, init) => { + t.mock.method(globalThis, 'fetch', async (_input, init) => { + capturedInit = init + return createMockSseResponse([ + 'data: {"choices":[{"delta":{"content":"OK"},"finish_reason":"stop"}]}\n\n', + ]) + }) + + for (const modelName of gpt5LatestCompatModelNames) { + capturedInit = undefined + const session = { + modelName, + conversationRecords: [], + isRetry: false, + } + const port = createFakePort() + + await generateAnswersWithOpenAiApiCompat( + 'https://api.example.com/v1', + port, + 'CurrentQ', + session, + 'sk-test', + {}, + 'openai', + ) + + const body = JSON.parse(capturedInit.body) + assert.equal(body.max_completion_tokens, 321) + assert.equal(Object.hasOwn(body, 'max_tokens'), false) + } +}) + +test('generateAnswersWithOpenAiApiCompat uses latest mapped gpt-5 API model values', async (t) => { + t.mock.method(console, 'debug', () => {}) + setStorage({ + maxConversationContextLength: 3, + maxResponseTokenLength: 111, + temperature: 0.2, + }) + let capturedInit + t.mock.method(globalThis, 'fetch', async (_input, init) => { + capturedInit = init + return createMockSseResponse([ + 'data: {"choices":[{"delta":{"content":"OK"},"finish_reason":"stop"}]}\n\n', + ]) + }) + + for (const [modelName, expectedModel] of gpt5LatestMappedModels) { + capturedInit = undefined + const session = { + modelName, + conversationRecords: [], + isRetry: false, + } + const port = createFakePort() + + await generateAnswersWithOpenAiApiCompat( + 'https://api.example.com/v1', + port, + 'CurrentQ', + session, + 'sk-test', + {}, + 'openai', + ) + + const body = JSON.parse(capturedInit.body) + assert.equal(body.model, expectedModel) + assert.equal(body.max_completion_tokens, 111) + assert.equal(Object.hasOwn(body, 'max_tokens'), false) + } +}) + +test('generateAnswersWithOpenAiApi uses OpenAI token params for a latest mapped gpt-5 model', async (t) => { + t.mock.method(console, 'debug', () => {}) + setStorage({ + customOpenAiApiUrl: 'https://api.openai.example.com', + maxConversationContextLength: 3, + maxResponseTokenLength: 222, + temperature: 0.2, + }) + + const session = { + modelName: 'chatgptApi5_2Latest', + conversationRecords: [], + isRetry: false, + } + const port = createFakePort() + + let capturedInput + let capturedInit + t.mock.method(globalThis, 'fetch', async (input, init) => { + capturedInput = input + capturedInit = init + return createMockSseResponse([ + 'data: {"choices":[{"delta":{"content":"OK"},"finish_reason":"stop"}]}\n\n', + ]) + }) + + await generateAnswersWithOpenAiApi(port, 'CurrentQ', session, 'sk-test') + + const body = JSON.parse(capturedInit.body) + assert.equal(capturedInput, 'https://api.openai.example.com/v1/chat/completions') + assert.equal(body.model, 'gpt-5.2-chat-latest') + assert.equal(body.max_completion_tokens, 222) + assert.equal(Object.hasOwn(body, 'max_tokens'), false) +}) + +test('generateAnswersWithOpenAiApi uses max_completion_tokens for GPT-5.4 mini', async (t) => { + t.mock.method(console, 'debug', () => {}) + setStorage({ + customOpenAiApiUrl: 'https://api.openai.example.com', + maxConversationContextLength: 3, + maxResponseTokenLength: 444, + temperature: 0.3, + }) + + const session = { + modelName: 'chatgptApi5_4Mini', + conversationRecords: [], + isRetry: false, + } + const port = createFakePort() + + let capturedInput + let capturedInit + t.mock.method(globalThis, 'fetch', async (input, init) => { + capturedInput = input + capturedInit = init + return createMockSseResponse([ + 'data: {"choices":[{"delta":{"content":"OK"},"finish_reason":"stop"}]}\n\n', + ]) + }) + + await generateAnswersWithOpenAiApi(port, 'CurrentQ', session, 'sk-test') + + const body = JSON.parse(capturedInit.body) + assert.equal(capturedInput, 'https://api.openai.example.com/v1/chat/completions') + assert.equal(body.model, 'gpt-5.4-mini') + assert.equal(body.max_completion_tokens, 444) + assert.equal(Object.hasOwn(body, 'max_tokens'), false) +}) + +test('generateAnswersWithOpenAiApi uses max_completion_tokens for GPT-5.4 nano', async (t) => { + t.mock.method(console, 'debug', () => {}) + setStorage({ + customOpenAiApiUrl: 'https://api.openai.example.com', + maxConversationContextLength: 3, + maxResponseTokenLength: 555, + temperature: 0.3, + }) + + const session = { + modelName: 'chatgptApi5_4Nano', + conversationRecords: [], + isRetry: false, + } + const port = createFakePort() + + let capturedInput + let capturedInit + t.mock.method(globalThis, 'fetch', async (input, init) => { + capturedInput = input + capturedInit = init + return createMockSseResponse([ + 'data: {"choices":[{"delta":{"content":"OK"},"finish_reason":"stop"}]}\n\n', + ]) + }) + + await generateAnswersWithOpenAiApi(port, 'CurrentQ', session, 'sk-test') + + const body = JSON.parse(capturedInit.body) + assert.equal(capturedInput, 'https://api.openai.example.com/v1/chat/completions') + assert.equal(body.model, 'gpt-5.4-nano') + assert.equal(body.max_completion_tokens, 555) + assert.equal(Object.hasOwn(body, 'max_tokens'), false) +}) + +test('generateAnswersWithOpenAICompatibleApi keeps OpenAI GPT-5 token params for materialized OpenAI providers even when built-in OpenAI URL points to a proxy', async (t) => { + t.mock.method(console, 'debug', () => {}) + setStorage({ + customOpenAiApiUrl: 'https://proxy.example.com/v1', + maxConversationContextLength: 3, + maxResponseTokenLength: 666, + temperature: 0.3, + }) + + const config = { + customOpenAIProviders: [ + { + id: 'selected-mode-openai', + name: 'Selected Mode (OpenAI)', + baseUrl: 'https://api.openai.com/v1', + chatCompletionsPath: '/chat/completions', + completionsPath: '/completions', + sourceProviderId: 'openai', + enabled: true, + }, + ], + providerSecrets: { + 'selected-mode-openai': 'proxy-key', + }, + } + const session = { + modelName: 'customModel', + conversationRecords: [], + isRetry: false, + apiMode: { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + providerId: 'selected-mode-openai', + customName: 'gpt-5.4-mini', + customUrl: '', + apiKey: '', + active: true, + }, + } + const port = createFakePort() + + let capturedInput + let capturedInit + t.mock.method(globalThis, 'fetch', async (input, init) => { + capturedInput = input + capturedInit = init + return createMockSseResponse([ + 'data: {"choices":[{"delta":{"content":"OK"},"finish_reason":"stop"}]}\n\n', + ]) + }) + + await generateAnswersWithOpenAICompatibleApi(port, 'CurrentQ', session, config) + + const body = JSON.parse(capturedInit.body) + assert.equal(capturedInput, 'https://api.openai.com/v1/chat/completions') + assert.equal(body.model, 'gpt-5.4-mini') + assert.equal(body.max_completion_tokens, 666) + assert.equal(Object.hasOwn(body, 'max_tokens'), false) +}) + +test('generateAnswersWithOpenAICompatibleApi keeps generic compat token params for repointed OpenAI-lineage providers', async (t) => { + t.mock.method(console, 'debug', () => {}) + setStorage({ + maxConversationContextLength: 3, + maxResponseTokenLength: 666, + temperature: 0.2, + }) + + const config = { + customOpenAIProviders: [ + { + id: 'selected-mode-openai-proxy', + name: 'Selected Mode (OpenAI Proxy)', + baseUrl: 'https://proxy.example.com/v1', + chatCompletionsPath: '/chat/completions', + completionsPath: '/completions', + sourceProviderId: 'openai', + enabled: true, + }, + ], + providerSecrets: { + 'selected-mode-openai-proxy': 'proxy-key', + }, + } + const session = { + modelName: 'customModel', + conversationRecords: [], + isRetry: false, + apiMode: { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + providerId: 'selected-mode-openai-proxy', + customName: 'gpt-5.4-mini', + customUrl: '', + apiKey: '', + active: true, + }, + } + const port = createFakePort() + + let capturedInput + let capturedInit + t.mock.method(globalThis, 'fetch', async (input, init) => { + capturedInput = input + capturedInit = init + return createMockSseResponse([ + 'data: {"choices":[{"delta":{"content":"OK"},"finish_reason":"stop"}]}\n\n', + ]) + }) + + await generateAnswersWithOpenAICompatibleApi(port, 'CurrentQ', session, config) + + const body = JSON.parse(capturedInit.body) + assert.equal(capturedInput, 'https://proxy.example.com/v1/chat/completions') + assert.equal(body.model, 'gpt-5.4-mini') + assert.equal(body.max_tokens, 666) + assert.equal(Object.hasOwn(body, 'max_completion_tokens'), false) +}) + +test('generateAnswersWithOpenAICompatibleApi keeps generic compat token params for custom providers without source lineage', async (t) => { + t.mock.method(console, 'debug', () => {}) + setStorage({ + maxConversationContextLength: 3, + maxResponseTokenLength: 444, + temperature: 0.2, + }) + + const config = { + customOpenAIProviders: [ + { + id: 'selected-mode-compat', + name: 'Selected Mode (Compat)', + baseUrl: 'https://compat.example.com/v1', + chatCompletionsPath: '/chat/completions', + completionsPath: '/completions', + enabled: true, + }, + ], + providerSecrets: { + 'selected-mode-compat': 'compat-key', + }, + } + const session = { + modelName: 'customModel', + conversationRecords: [], + isRetry: false, + apiMode: { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + providerId: 'selected-mode-compat', + customName: 'gpt-5.4-mini', + customUrl: '', + apiKey: '', + active: true, + }, + } + const port = createFakePort() + + let capturedInit + t.mock.method(globalThis, 'fetch', async (_input, init) => { + capturedInit = init + return createMockSseResponse([ + 'data: {"choices":[{"delta":{"content":"OK"},"finish_reason":"stop"}]}\n\n', + ]) + }) + + await generateAnswersWithOpenAICompatibleApi(port, 'CurrentQ', session, config) + + const body = JSON.parse(capturedInit.body) + assert.equal(body.max_tokens, 444) + assert.equal(Object.hasOwn(body, 'max_completion_tokens'), false) +}) + +test('generateAnswersWithOpenAICompatibleApi keeps Ollama keep_alive for materialized Ollama providers with native chat path', async (t) => { + t.mock.method(console, 'debug', () => {}) + t.mock.method(console, 'warn', () => {}) + setStorage({ + maxConversationContextLength: 3, + maxResponseTokenLength: 333, + temperature: 0.2, + }) + + const config = { + customOpenAIProviders: [ + { + id: 'selected-mode-ollama', + name: 'Selected Mode (Ollama)', + baseUrl: 'http://127.0.0.1:11434', + chatCompletionsPath: '/api/chat', + completionsPath: '/completions', + sourceProviderId: 'ollama', + enabled: true, + }, + ], + ollamaKeepAliveTime: '5m', + } + const session = { + modelName: 'customModel', + conversationRecords: [], + isRetry: false, + apiMode: { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + providerId: 'selected-mode-ollama', + customName: 'llama3.2', + customUrl: '', + apiKey: '', + active: true, + }, + } + const port = createFakePort() + + const calls = [] + t.mock.method(globalThis, 'fetch', async (input, init) => { + calls.push([input, init]) + if (String(input).endsWith('/api/generate')) { + return { ok: true } + } + return createMockSseResponse([ + 'data: {"choices":[{"delta":{"content":"OK"},"finish_reason":"stop"}]}\n\n', + ]) + }) + + await generateAnswersWithOpenAICompatibleApi(port, 'CurrentQ', session, config) + + assert.equal(calls.length, 2) + assert.equal(calls[0][0], 'http://127.0.0.1:11434/api/chat') + assert.equal(calls[1][0], 'http://127.0.0.1:11434/api/generate') +}) + +test('generateAnswersWithOpenAICompatibleApi keeps Ollama keep_alive for materialized Ollama providers with standard compat chat path', async (t) => { + t.mock.method(console, 'debug', () => {}) + t.mock.method(console, 'warn', () => {}) + setStorage({ + maxConversationContextLength: 3, + maxResponseTokenLength: 333, + temperature: 0.2, + }) + + const config = { + customOpenAIProviders: [ + { + id: 'selected-mode-ollama-compat', + name: 'Selected Mode (Ollama Compat)', + baseUrl: 'http://127.0.0.1:11434/v1', + chatCompletionsPath: '/chat/completions', + completionsPath: '/completions', + sourceProviderId: 'ollama', + enabled: true, + }, + ], + ollamaKeepAliveTime: '5m', + } + const session = { + modelName: 'customModel', + conversationRecords: [], + isRetry: false, + apiMode: { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + providerId: 'selected-mode-ollama-compat', + customName: 'llama3.2', + customUrl: '', + apiKey: '', + active: true, + }, + } + const port = createFakePort() + + const requestedUrls = [] + t.mock.method(globalThis, 'fetch', async (input) => { + requestedUrls.push(String(input)) + if (String(input).endsWith('/api/generate')) { + return { ok: true } + } + return createMockSseResponse([ + 'data: {"choices":[{"delta":{"content":"OK"},"finish_reason":"stop"}]}\n\n', + ]) + }) + + await generateAnswersWithOpenAICompatibleApi(port, 'CurrentQ', session, config) + + assert.equal(requestedUrls.includes('http://127.0.0.1:11434/v1/chat/completions'), true) + assert.equal(requestedUrls.includes('http://127.0.0.1:11434/api/generate'), true) +}) + +test('generateAnswersWithOpenAICompatibleApi keeps Ollama keep_alive for edited standard compat endpoint', async (t) => { + t.mock.method(console, 'debug', () => {}) + t.mock.method(console, 'warn', () => {}) + setStorage({ + maxConversationContextLength: 3, + maxResponseTokenLength: 333, + temperature: 0.2, + }) + + const config = { + customOpenAIProviders: [ + { + id: 'selected-mode-edited-ollama', + name: 'Selected Mode (Edited Ollama)', + baseUrl: '', + chatCompletionsUrl: 'http://edited-ollama:11434/v1/chat/completions', + completionsUrl: 'http://edited-ollama:11434/v1/completions', + sourceProviderId: 'ollama', + enabled: true, + }, + ], + ollamaKeepAliveTime: '5m', + } + const session = { + modelName: 'customModel', + conversationRecords: [], + isRetry: false, + apiMode: { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + providerId: 'selected-mode-edited-ollama', + customName: 'llama3.2', + customUrl: '', + apiKey: '', + active: true, + }, + } + const port = createFakePort() + + const requestedUrls = [] + t.mock.method(globalThis, 'fetch', async (input) => { + requestedUrls.push(String(input)) + if (String(input).endsWith('/api/generate')) { + return { ok: true } + } + return createMockSseResponse([ + 'data: {"choices":[{"delta":{"content":"OK"},"finish_reason":"stop"}]}\n\n', + ]) + }) + + await generateAnswersWithOpenAICompatibleApi(port, 'CurrentQ', session, config) + + assert.equal(requestedUrls.includes('http://edited-ollama:11434/v1/chat/completions'), true) + assert.equal(requestedUrls.includes('http://edited-ollama:11434/api/generate'), true) +}) + +test('generateAnswersWithOpenAICompatibleApi routes materialized Ollama keep_alive to request host for native path', async (t) => { + t.mock.method(console, 'debug', () => {}) + t.mock.method(console, 'warn', () => {}) + setStorage({ + maxConversationContextLength: 3, + maxResponseTokenLength: 333, + temperature: 0.2, + }) + + const config = { + customOpenAIProviders: [ + { + id: 'selected-mode-ollama-remote', + name: 'Selected Mode (Ollama Remote)', + baseUrl: 'http://remote-ollama:11434', + chatCompletionsPath: '/api/chat', + completionsPath: '/completions', + sourceProviderId: 'ollama', + enabled: true, + }, + ], + ollamaKeepAliveTime: '5m', + } + const session = { + modelName: 'customModel', + conversationRecords: [], + isRetry: false, + apiMode: { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + providerId: 'selected-mode-ollama-remote', + customName: 'llama3.2', + customUrl: '', + apiKey: '', + active: true, + }, + } + const port = createFakePort() + + const requestedUrls = [] + t.mock.method(globalThis, 'fetch', async (input) => { + requestedUrls.push(String(input)) + if (String(input).endsWith('/api/generate')) { + return { ok: true } + } + return createMockSseResponse([ + 'data: {"choices":[{"delta":{"content":"OK"},"finish_reason":"stop"}]}\n\n', + ]) + }) + + await generateAnswersWithOpenAICompatibleApi(port, 'CurrentQ', session, config) + + assert.equal(requestedUrls.includes('http://remote-ollama:11434/api/chat'), true) + assert.equal(requestedUrls.includes('http://remote-ollama:11434/api/generate'), true) + assert.equal(requestedUrls.includes('http://127.0.0.1:11434/api/generate'), false) +}) + +test('generateAnswersWithOpenAICompatibleApi prefers resolved request url for native materialized Ollama keep_alive', async (t) => { + t.mock.method(console, 'debug', () => {}) + t.mock.method(console, 'warn', () => {}) + setStorage({ + maxConversationContextLength: 3, + maxResponseTokenLength: 333, + temperature: 0.2, + }) + + const config = { + customOpenAIProviders: [ + { + id: 'selected-mode-ollama-direct', + name: 'Selected Mode (Ollama Direct)', + baseUrl: 'http://base-ollama:11434', + chatCompletionsPath: '/api/chat', + completionsPath: '/completions', + chatCompletionsUrl: 'http://direct-ollama:11434/custom/v1/messages', + sourceProviderId: 'ollama', + enabled: true, + }, + ], + ollamaKeepAliveTime: '5m', + } + const session = { + modelName: 'customModel', + conversationRecords: [], + isRetry: false, + apiMode: { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + providerId: 'selected-mode-ollama-direct', + customName: 'llama3.2', + customUrl: '', + apiKey: '', + active: true, + }, + } + const port = createFakePort() + + const requestedUrls = [] + t.mock.method(globalThis, 'fetch', async (input) => { + requestedUrls.push(String(input)) + if (String(input).endsWith('/api/generate')) { + return { ok: true } + } + return createMockSseResponse([ + 'data: {"choices":[{"delta":{"content":"OK"},"finish_reason":"stop"}]}\n\n', + ]) + }) + + await generateAnswersWithOpenAICompatibleApi(port, 'CurrentQ', session, config) + + assert.equal(requestedUrls.includes('http://direct-ollama:11434/custom/v1/messages'), true) + assert.equal(requestedUrls.includes('http://direct-ollama:11434/custom/api/generate'), true) + assert.equal(requestedUrls.includes('http://base-ollama:11434/api/generate'), false) +}) + +test('generateAnswersWithOpenAICompatibleApi strips query string before native Ollama keep_alive routing', async (t) => { + t.mock.method(console, 'debug', () => {}) + t.mock.method(console, 'warn', () => {}) + setStorage({ + maxConversationContextLength: 3, + maxResponseTokenLength: 333, + temperature: 0.2, + }) + + const config = { + customOpenAIProviders: [ + { + id: 'selected-mode-ollama-query', + name: 'Selected Mode (Ollama Query)', + baseUrl: 'http://base-ollama:11434', + chatCompletionsPath: '/api/chat', + completionsPath: '/completions', + chatCompletionsUrl: 'http://query-ollama:11434/ollama/v1/messages?token=abc', + sourceProviderId: 'ollama', + enabled: true, + }, + ], + ollamaKeepAliveTime: '5m', + } + const session = { + modelName: 'customModel', + conversationRecords: [], + isRetry: false, + apiMode: { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + providerId: 'selected-mode-ollama-query', + customName: 'llama3.2', + customUrl: '', + apiKey: '', + active: true, + }, + } + const port = createFakePort() + + const requestedUrls = [] + t.mock.method(globalThis, 'fetch', async (input) => { + requestedUrls.push(String(input)) + if (String(input).endsWith('/api/generate')) { + return { ok: true } + } + return createMockSseResponse([ + 'data: {"choices":[{"delta":{"content":"OK"},"finish_reason":"stop"}]}\n\n', + ]) + }) + + await generateAnswersWithOpenAICompatibleApi(port, 'CurrentQ', session, config) + + assert.equal( + requestedUrls.includes('http://query-ollama:11434/ollama/v1/messages?token=abc'), + true, + ) + assert.equal(requestedUrls.includes('http://query-ollama:11434/ollama/api/generate'), true) + assert.equal( + requestedUrls.some((url) => url.includes('/messages?token=abc/api/generate')), + false, + ) +}) + +test('generateAnswersWithOpenAICompatibleApi strips non-standard v1 Ollama chat path before keep_alive', async (t) => { + t.mock.method(console, 'debug', () => {}) + t.mock.method(console, 'warn', () => {}) + setStorage({ + maxConversationContextLength: 3, + maxResponseTokenLength: 333, + temperature: 0.2, + }) + + const config = { + customOpenAIProviders: [ + { + id: 'selected-mode-ollama-messages', + name: 'Selected Mode (Ollama Messages)', + baseUrl: 'http://base-ollama:11434/v1', + chatCompletionsUrl: 'http://messages-ollama:11434/v1/messages', + sourceProviderId: 'ollama', + enabled: true, + }, + ], + ollamaKeepAliveTime: '5m', + } + const session = { + modelName: 'customModel', + conversationRecords: [], + isRetry: false, + apiMode: { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + providerId: 'selected-mode-ollama-messages', + customName: 'llama3.2', + customUrl: '', + apiKey: '', + active: true, + }, + } + const port = createFakePort() + + const requestedUrls = [] + t.mock.method(globalThis, 'fetch', async (input) => { + requestedUrls.push(String(input)) + if (String(input).endsWith('/api/generate')) { + return { ok: true } + } + return createMockSseResponse([ + 'data: {"choices":[{"delta":{"content":"OK"},"finish_reason":"stop"}]}\n\n', + ]) + }) + + await generateAnswersWithOpenAICompatibleApi(port, 'CurrentQ', session, config) + + assert.equal(requestedUrls.includes('http://messages-ollama:11434/v1/messages'), true) + assert.equal(requestedUrls.includes('http://messages-ollama:11434/api/generate'), true) + assert.equal( + requestedUrls.includes('http://messages-ollama:11434/v1/messages/api/generate'), + false, + ) +}) + +test('generateAnswersWithOpenAICompatibleApi strips non-standard Ollama chat suffix before keep_alive', async (t) => { + t.mock.method(console, 'debug', () => {}) + t.mock.method(console, 'warn', () => {}) + setStorage({ + maxConversationContextLength: 3, + maxResponseTokenLength: 333, + temperature: 0.2, + }) + + const config = { + customOpenAIProviders: [ + { + id: 'selected-mode-ollama-chat', + name: 'Selected Mode (Ollama Chat)', + baseUrl: 'http://base-ollama:11434/v1', + chatCompletionsUrl: 'http://chat-ollama:11434/api/chat', + sourceProviderId: 'ollama', + enabled: true, + }, + ], + ollamaKeepAliveTime: '5m', + } + const session = { + modelName: 'customModel', + conversationRecords: [], + isRetry: false, + apiMode: { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + providerId: 'selected-mode-ollama-chat', + customName: 'llama3.2', + customUrl: '', + apiKey: '', + active: true, + }, + } + const port = createFakePort() + + const requestedUrls = [] + t.mock.method(globalThis, 'fetch', async (input) => { + requestedUrls.push(String(input)) + if (String(input).endsWith('/api/generate')) { + return { ok: true } + } + return createMockSseResponse([ + 'data: {"choices":[{"delta":{"content":"OK"},"finish_reason":"stop"}]}\n\n', + ]) + }) + + await generateAnswersWithOpenAICompatibleApi(port, 'CurrentQ', session, config) + + assert.equal(requestedUrls.includes('http://chat-ollama:11434/api/chat'), true) + assert.equal(requestedUrls.includes('http://chat-ollama:11434/api/generate'), true) + assert.equal(requestedUrls.includes('http://chat-ollama:11434/api/chat/api/generate'), false) +}) + +test('generateAnswersWithOpenAICompatibleApi strips nested non-standard Ollama chat path before keep_alive', async (t) => { + t.mock.method(console, 'debug', () => {}) + t.mock.method(console, 'warn', () => {}) + setStorage({ + maxConversationContextLength: 3, + maxResponseTokenLength: 333, + temperature: 0.2, + }) + + const config = { + customOpenAIProviders: [ + { + id: 'selected-mode-ollama-nested', + name: 'Selected Mode (Ollama Nested)', + baseUrl: 'http://base-ollama:11434/v1', + chatCompletionsUrl: 'http://nested-ollama:11434/custom/v1/messages', + sourceProviderId: 'ollama', + enabled: true, + }, + ], + ollamaKeepAliveTime: '5m', + } + const session = { + modelName: 'customModel', + conversationRecords: [], + isRetry: false, + apiMode: { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + providerId: 'selected-mode-ollama-nested', + customName: 'llama3.2', + customUrl: '', + apiKey: '', + active: true, + }, + } + const port = createFakePort() + + const requestedUrls = [] + t.mock.method(globalThis, 'fetch', async (input) => { + requestedUrls.push(String(input)) + if (String(input).endsWith('/api/generate')) { + return { ok: true } + } + return createMockSseResponse([ + 'data: {"choices":[{"delta":{"content":"OK"},"finish_reason":"stop"}]}\n\n', + ]) + }) + + await generateAnswersWithOpenAICompatibleApi(port, 'CurrentQ', session, config) + + assert.equal(requestedUrls.includes('http://nested-ollama:11434/custom/v1/messages'), true) + assert.equal(requestedUrls.includes('http://nested-ollama:11434/custom/api/generate'), true) + assert.equal( + requestedUrls.includes('http://nested-ollama:11434/custom/v1/messages/api/generate'), + false, + ) +}) + +test('generateAnswersWithOpenAICompatibleApi uses secretProviderId lineage for recovered OpenAI sessions', async (t) => { + t.mock.method(console, 'debug', () => {}) + setStorage({ + maxConversationContextLength: 3, + maxResponseTokenLength: 777, + temperature: 0.2, + }) + + const config = { + providerSecrets: { + openai: 'legacy-openai-key', + }, + } + const session = { + modelName: 'customModel', + conversationRecords: [], + isRetry: false, + apiMode: { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + providerId: 'openai', + customName: 'gpt-5.4-mini', + customUrl: 'https://proxy.example.com/v1/chat/completions', + apiKey: 'legacy-openai-key', + active: true, + }, + } + const port = createFakePort() + + let capturedInput + let capturedInit + t.mock.method(globalThis, 'fetch', async (input, init) => { + capturedInput = input capturedInit = init return createMockSseResponse([ 'data: {"choices":[{"delta":{"content":"OK"},"finish_reason":"stop"}]}\n\n', ]) }) - for (const modelName of gpt5LatestCompatModelNames) { - capturedInit = undefined - const session = { - modelName, - conversationRecords: [], - isRetry: false, - } - const port = createFakePort() - - await generateAnswersWithOpenAiApiCompat( - 'https://api.example.com/v1', - port, - 'CurrentQ', - session, - 'sk-test', - {}, - 'openai', - ) + await generateAnswersWithOpenAICompatibleApi(port, 'CurrentQ', session, config) - const body = JSON.parse(capturedInit.body) - assert.equal(body.max_completion_tokens, 321) - assert.equal(Object.hasOwn(body, 'max_tokens'), false) - } + const body = JSON.parse(capturedInit.body) + assert.equal(capturedInput, 'https://proxy.example.com/v1/chat/completions') + assert.equal(body.max_tokens, 777) + assert.equal(Object.hasOwn(body, 'max_completion_tokens'), false) }) -test('generateAnswersWithOpenAiApiCompat uses latest mapped gpt-5 API model values', async (t) => { +test('generateAnswersWithOpenAICompatibleApi keeps OpenAI token params for recovered sessions on native OpenAI URL even when built-in OpenAI URL points to a proxy', async (t) => { t.mock.method(console, 'debug', () => {}) setStorage({ maxConversationContextLength: 3, - maxResponseTokenLength: 111, + maxResponseTokenLength: 777, temperature: 0.2, }) + + const config = { + providerSecrets: { + openai: 'legacy-openai-key', + }, + customOpenAiApiUrl: 'https://proxy.example.com/v1', + } + const session = { + modelName: 'customModel', + conversationRecords: [], + isRetry: false, + apiMode: { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + providerId: 'openai', + customName: 'gpt-5.4-mini', + customUrl: 'https://api.openai.com/v1/chat/completions', + apiKey: 'legacy-openai-key', + active: true, + }, + } + const port = createFakePort() + let capturedInit t.mock.method(globalThis, 'fetch', async (_input, init) => { capturedInit = init @@ -148,80 +1292,86 @@ test('generateAnswersWithOpenAiApiCompat uses latest mapped gpt-5 API model valu ]) }) - for (const [modelName, expectedModel] of gpt5LatestMappedModels) { - capturedInit = undefined - const session = { - modelName, - conversationRecords: [], - isRetry: false, - } - const port = createFakePort() - - await generateAnswersWithOpenAiApiCompat( - 'https://api.example.com/v1', - port, - 'CurrentQ', - session, - 'sk-test', - {}, - 'openai', - ) + await generateAnswersWithOpenAICompatibleApi(port, 'CurrentQ', session, config) - const body = JSON.parse(capturedInit.body) - assert.equal(body.model, expectedModel) - assert.equal(body.max_completion_tokens, 111) - assert.equal(Object.hasOwn(body, 'max_tokens'), false) - } + const body = JSON.parse(capturedInit.body) + assert.equal(body.max_completion_tokens, 777) + assert.equal(Object.hasOwn(body, 'max_tokens'), false) }) -test('generateAnswersWithOpenAiApi uses OpenAI token params for a latest mapped gpt-5 model', async (t) => { +test('generateAnswersWithOpenAICompatibleApi keeps generic compat token params for recovered OpenAI sessions on proxy URL even when built-in OpenAI URL matches that proxy', async (t) => { t.mock.method(console, 'debug', () => {}) setStorage({ - customOpenAiApiUrl: 'https://api.openai.example.com', maxConversationContextLength: 3, - maxResponseTokenLength: 222, + maxResponseTokenLength: 777, temperature: 0.2, }) + const config = { + providerSecrets: { + openai: 'legacy-openai-key', + }, + customOpenAiApiUrl: 'https://proxy.example.com/v1', + } const session = { - modelName: 'chatgptApi5_2Latest', + modelName: 'customModel', conversationRecords: [], isRetry: false, + apiMode: { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + providerId: 'openai', + customName: 'gpt-5.4-mini', + customUrl: 'https://proxy.example.com/v1/chat/completions', + apiKey: 'legacy-openai-key', + active: true, + }, } const port = createFakePort() - let capturedInput let capturedInit - t.mock.method(globalThis, 'fetch', async (input, init) => { - capturedInput = input + t.mock.method(globalThis, 'fetch', async (_input, init) => { capturedInit = init return createMockSseResponse([ 'data: {"choices":[{"delta":{"content":"OK"},"finish_reason":"stop"}]}\n\n', ]) }) - await generateAnswersWithOpenAiApi(port, 'CurrentQ', session, 'sk-test') + await generateAnswersWithOpenAICompatibleApi(port, 'CurrentQ', session, config) const body = JSON.parse(capturedInit.body) - assert.equal(capturedInput, 'https://api.openai.example.com/v1/chat/completions') - assert.equal(body.model, 'gpt-5.2-chat-latest') - assert.equal(body.max_completion_tokens, 222) - assert.equal(Object.hasOwn(body, 'max_tokens'), false) + assert.equal(body.max_tokens, 777) + assert.equal(Object.hasOwn(body, 'max_completion_tokens'), false) }) -test('generateAnswersWithOpenAiApi uses max_completion_tokens for GPT-5.4 mini', async (t) => { +test('generateAnswersWithOpenAICompatibleApi keeps generic compat token params for recovered OpenAI sessions', async (t) => { t.mock.method(console, 'debug', () => {}) setStorage({ - customOpenAiApiUrl: 'https://api.openai.example.com', maxConversationContextLength: 3, - maxResponseTokenLength: 444, - temperature: 0.3, + maxResponseTokenLength: 777, + temperature: 0.2, }) + const config = { + providerSecrets: { + openai: 'legacy-openai-key', + }, + } const session = { - modelName: 'chatgptApi5_4Mini', + modelName: 'customModel', conversationRecords: [], isRetry: false, + apiMode: { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + providerId: 'openai', + customName: 'gpt-5.4-mini', + customUrl: 'https://proxy.example.com/v1/chat/completions', + apiKey: 'legacy-openai-key', + active: true, + }, } const port = createFakePort() @@ -235,48 +1385,107 @@ test('generateAnswersWithOpenAiApi uses max_completion_tokens for GPT-5.4 mini', ]) }) - await generateAnswersWithOpenAiApi(port, 'CurrentQ', session, 'sk-test') + await generateAnswersWithOpenAICompatibleApi(port, 'CurrentQ', session, config) const body = JSON.parse(capturedInit.body) - assert.equal(capturedInput, 'https://api.openai.example.com/v1/chat/completions') - assert.equal(body.model, 'gpt-5.4-mini') - assert.equal(body.max_completion_tokens, 444) - assert.equal(Object.hasOwn(body, 'max_tokens'), false) + assert.equal(capturedInput, 'https://proxy.example.com/v1/chat/completions') + assert.equal(body.max_tokens, 777) + assert.equal(Object.hasOwn(body, 'max_completion_tokens'), false) }) -test('generateAnswersWithOpenAiApi uses max_completion_tokens for GPT-5.4 nano', async (t) => { +test('generateAnswersWithOpenAICompatibleApi keeps Ollama keep_alive for recovered Ollama sessions with native path', async (t) => { t.mock.method(console, 'debug', () => {}) + t.mock.method(console, 'warn', () => {}) setStorage({ - customOpenAiApiUrl: 'https://api.openai.example.com', maxConversationContextLength: 3, - maxResponseTokenLength: 555, - temperature: 0.3, + maxResponseTokenLength: 333, + temperature: 0.2, }) + const config = { + ollamaKeepAliveTime: '5m', + } const session = { - modelName: 'chatgptApi5_4Nano', + modelName: 'customModel', conversationRecords: [], isRetry: false, + apiMode: { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + providerId: 'ollama', + customName: 'llama3.2', + customUrl: 'http://recovered-ollama:11434/api/chat', + apiKey: '', + active: true, + }, } const port = createFakePort() - let capturedInput - let capturedInit - t.mock.method(globalThis, 'fetch', async (input, init) => { - capturedInput = input - capturedInit = init + const requestedUrls = [] + t.mock.method(globalThis, 'fetch', async (input) => { + requestedUrls.push(String(input)) + if (String(input).endsWith('/api/generate')) { + return { ok: true } + } return createMockSseResponse([ 'data: {"choices":[{"delta":{"content":"OK"},"finish_reason":"stop"}]}\n\n', ]) }) - await generateAnswersWithOpenAiApi(port, 'CurrentQ', session, 'sk-test') + await generateAnswersWithOpenAICompatibleApi(port, 'CurrentQ', session, config) - const body = JSON.parse(capturedInit.body) - assert.equal(capturedInput, 'https://api.openai.example.com/v1/chat/completions') - assert.equal(body.model, 'gpt-5.4-nano') - assert.equal(body.max_completion_tokens, 555) - assert.equal(Object.hasOwn(body, 'max_tokens'), false) + assert.equal(requestedUrls.includes('http://recovered-ollama:11434/api/chat'), true) + assert.equal(requestedUrls.includes('http://recovered-ollama:11434/api/generate'), true) +}) + +test('generateAnswersWithOpenAICompatibleApi skips Ollama keep_alive for recovered compat sessions', async (t) => { + t.mock.method(console, 'debug', () => {}) + t.mock.method(console, 'warn', () => {}) + setStorage({ + maxConversationContextLength: 3, + maxResponseTokenLength: 333, + temperature: 0.2, + }) + + const config = { + ollamaKeepAliveTime: '5m', + } + const session = { + modelName: 'customModel', + conversationRecords: [], + isRetry: false, + apiMode: { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + providerId: 'ollama', + customName: 'llama3.2', + customUrl: 'https://proxy.example.com/v1/chat/completions', + apiKey: '', + active: true, + }, + } + const port = createFakePort() + + const requestedUrls = [] + t.mock.method(globalThis, 'fetch', async (input) => { + requestedUrls.push(String(input)) + if (String(input).endsWith('/api/generate')) { + return { ok: true } + } + return createMockSseResponse([ + 'data: {"choices":[{"delta":{"content":"OK"},"finish_reason":"stop"}]}\n\n', + ]) + }) + + await generateAnswersWithOpenAICompatibleApi(port, 'CurrentQ', session, config) + + assert.equal(requestedUrls.includes('https://proxy.example.com/v1/chat/completions'), true) + assert.equal( + requestedUrls.some((url) => url.endsWith('/api/generate')), + false, + ) }) test('generateAnswersWithOpenAiApiCompat keeps max_tokens for latest mapped gpt-5 models in compat provider', async (t) => { @@ -689,3 +1898,172 @@ test('generateAnswersWithGptCompletionApi builds completion prompt and appends a ) assert.deepEqual(session.conversationRecords.at(-1), { question: 'NowQ', answer: 'AB' }) }) + +test('generateAnswersWithGptCompletionApi avoids duplicate /v1 when customOpenAiApiUrl already has /v1', async (t) => { + t.mock.method(console, 'debug', () => {}) + setStorage({ + customOpenAiApiUrl: 'https://api.example.com/v1/', + maxConversationContextLength: 5, + maxResponseTokenLength: 300, + temperature: 0.5, + }) + + const session = { + modelName: 'gptApiInstruct', + conversationRecords: [], + isRetry: false, + } + const port = createFakePort() + + let capturedInput + t.mock.method(globalThis, 'fetch', async (input) => { + capturedInput = input + return createMockSseResponse(['data: {"choices":[{"text":"Done","finish_reason":"stop"}]}\n\n']) + }) + + await generateAnswersWithGptCompletionApi(port, 'NowQ', session, 'sk-completion') + + assert.equal(capturedInput, 'https://api.example.com/v1/completions') +}) + +test('generateAnswersWithChatgptApi avoids duplicate /v1 when customOpenAiApiUrl already has /v1', async (t) => { + t.mock.method(console, 'debug', () => {}) + setStorage({ + customOpenAiApiUrl: 'https://api.example.com/v1/', + maxConversationContextLength: 2, + maxResponseTokenLength: 128, + temperature: 0.2, + }) + + const session = { + modelName: 'chatgptApi4oMini', + conversationRecords: [], + isRetry: false, + } + const port = createFakePort() + + let capturedInput + t.mock.method(globalThis, 'fetch', async (input) => { + capturedInput = input + return createMockSseResponse([ + 'data: {"choices":[{"delta":{"content":"OK"},"finish_reason":"stop"}]}\n\n', + ]) + }) + + await generateAnswersWithOpenAiApi(port, 'NowQ', session, 'sk-chat') + + assert.equal(capturedInput, 'https://api.example.com/v1/chat/completions') +}) + +test('generateAnswersWithOpenAICompatibleApi uses default Ollama endpoint for keepAlive when empty', async (t) => { + t.mock.method(console, 'debug', () => {}) + t.mock.method(console, 'warn', () => {}) + setStorage({ + maxConversationContextLength: 2, + maxResponseTokenLength: 64, + temperature: 0.2, + }) + + const config = { + ollamaEndpoint: '', + providerSecrets: {}, + customOpenAIProviders: [], + } + const session = { + modelName: 'ollama', + apiMode: { + groupName: 'ollamaApiModelKeys', + itemName: 'ollama', + isCustom: false, + customName: '', + customUrl: '', + apiKey: '', + providerId: '', + active: true, + }, + conversationRecords: [], + isRetry: false, + } + const port = createFakePort() + const requestedUrls = [] + + t.mock.method(globalThis, 'fetch', async (input) => { + requestedUrls.push(String(input)) + if (String(input).endsWith('/chat/completions')) { + return createMockSseResponse([ + 'data: {"choices":[{"delta":{"content":"OK"},"finish_reason":"stop"}]}\n\n', + ]) + } + return { ok: true } + }) + + await generateAnswersWithOpenAICompatibleApi(port, 'NowQ', session, config) + + assert.equal(requestedUrls.includes('http://127.0.0.1:11434/v1/chat/completions'), true) + assert.equal(requestedUrls.includes('http://127.0.0.1:11434/api/generate'), true) +}) + +test('generateAnswersWithOpenAICompatibleApi ignores non-string legacy response chunks', async (t) => { + t.mock.method(console, 'debug', () => {}) + setStorage({ + maxConversationContextLength: 2, + maxResponseTokenLength: 64, + temperature: 0.2, + }) + + const config = { + providerSecrets: { + 'my-provider': 'sk-custom', + }, + customOpenAIProviders: [ + { + id: 'my-provider', + name: 'My Provider', + baseUrl: 'https://api.example.com', + chatCompletionsPath: '/v1/chat/completions', + completionsPath: '/v1/completions', + enabled: true, + allowLegacyResponseField: true, + }, + ], + } + const session = { + modelName: 'customModel', + apiMode: { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + customName: 'my-model', + customUrl: '', + apiKey: '', + providerId: 'my-provider', + active: true, + }, + conversationRecords: [], + isRetry: false, + } + const port = createFakePort() + + t.mock.method(globalThis, 'fetch', async () => + createMockSseResponse([ + 'data: {"response":false}\n\n', + 'data: {"choices":[{"delta":{"content":"OK"},"finish_reason":"stop"}]}\n\n', + ]), + ) + + await generateAnswersWithOpenAICompatibleApi(port, 'NowQ', session, config) + + assert.equal( + port.postedMessages.some((message) => message.done === false && message.answer === 'false'), + false, + ) + assert.equal( + port.postedMessages.some((message) => message.done === false && message.answer === 'falseOK'), + false, + ) + assert.equal( + port.postedMessages.some((message) => message.done === false && message.answer === 'OK'), + true, + ) + assert.deepEqual(session.conversationRecords.at(-1), { question: 'NowQ', answer: 'OK' }) +}) diff --git a/tests/unit/services/apis/provider-registry.test.mjs b/tests/unit/services/apis/provider-registry.test.mjs new file mode 100644 index 000000000..434587690 --- /dev/null +++ b/tests/unit/services/apis/provider-registry.test.mjs @@ -0,0 +1,2425 @@ +import assert from 'node:assert/strict' +import { test } from 'node:test' +import { + getCustomOpenAIProviders, + getProviderById, + resolveEndpointTypeForSession, + resolveOpenAICompatibleRequest, +} from '../../../../src/services/apis/provider-registry.mjs' + +test('resolveEndpointTypeForSession prefers apiMode when present', () => { + const session = { + apiMode: { + groupName: 'chatgptApiModelKeys', + itemName: 'gpt-4o-mini', + }, + modelName: 'gptApiInstruct', + } + + assert.equal(resolveEndpointTypeForSession(session), 'chat') +}) + +test('resolveEndpointTypeForSession returns completion for gptApiModelKeys apiMode', () => { + const session = { + apiMode: { + groupName: 'gptApiModelKeys', + itemName: 'text-davinci-003', + }, + modelName: 'chatgptApi4oMini', + } + + assert.equal(resolveEndpointTypeForSession(session), 'completion') +}) + +test('resolveEndpointTypeForSession falls back to legacy modelName when apiMode is missing', () => { + const session = { + modelName: 'gptApiInstruct-text-davinci-003', + } + + assert.equal(resolveEndpointTypeForSession(session), 'completion') +}) + +test('resolveOpenAICompatibleRequest resolves custom provider from normalized session provider id', () => { + const config = { + customOpenAIProviders: [ + { + id: 'myproxy', + name: 'My Proxy', + chatCompletionsUrl: 'https://proxy.example.com/v1/chat/completions', + completionsUrl: 'https://proxy.example.com/v1/completions', + enabled: true, + }, + ], + providerSecrets: { + myproxy: 'proxy-key', + }, + } + const session = { + apiMode: { + groupName: 'customApiModelKeys', + providerId: ' MyProxy ', + customName: 'proxy-model', + customUrl: '', + }, + } + + const resolved = resolveOpenAICompatibleRequest(config, session) + + assert.equal(resolved.providerId, 'myproxy') + assert.equal(resolved.requestUrl, 'https://proxy.example.com/v1/chat/completions') + assert.equal(resolved.apiKey, 'proxy-key') +}) + +test('getCustomOpenAIProviders defaults allowLegacyResponseField to true when absent', () => { + const config = { + customOpenAIProviders: [ + { + id: 'myproxy', + name: 'My Proxy', + chatCompletionsUrl: 'https://proxy.example.com/v1/chat/completions', + completionsUrl: 'https://proxy.example.com/v1/completions', + enabled: true, + }, + ], + providerSecrets: { + myproxy: 'proxy-key', + }, + } + const providers = getCustomOpenAIProviders(config) + + assert.equal(providers[0].allowLegacyResponseField, true) +}) + +test('getCustomOpenAIProviders preserves explicit false allowLegacyResponseField', () => { + const config = { + customOpenAIProviders: [ + { + id: 'myproxy', + name: 'My Proxy', + chatCompletionsUrl: 'https://proxy.example.com/v1/chat/completions', + completionsUrl: 'https://proxy.example.com/v1/completions', + enabled: true, + allowLegacyResponseField: false, + }, + ], + providerSecrets: { + myproxy: 'proxy-key', + }, + } + const providers = getCustomOpenAIProviders(config) + + assert.equal(providers[0].allowLegacyResponseField, false) +}) + +test('resolveOpenAICompatibleRequest resolves provider secret when session providerId is not canonical', () => { + const config = { + customOpenAIProviders: [ + { + id: 'myproxy', + name: 'My Proxy', + chatCompletionsUrl: 'https://proxy.example.com/v1/chat/completions', + completionsUrl: 'https://proxy.example.com/v1/completions', + enabled: true, + }, + ], + providerSecrets: { + myproxy: 'proxy-key', + }, + customApiModes: [ + { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + customName: 'proxy-model', + customUrl: '', + apiKey: '', + providerId: 'myproxy', + active: true, + }, + ], + } + const session = { + apiMode: { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + providerId: 'MyProxy', + customName: 'proxy-model', + customUrl: '', + apiKey: '', + active: true, + }, + } + + const resolved = resolveOpenAICompatibleRequest(config, session) + + assert.equal(resolved.apiKey, 'proxy-key') +}) + +test('resolveOpenAICompatibleRequest treats empty providerSecrets entries as authoritative', () => { + const config = { + providerSecrets: { + openai: '', + }, + apiKey: 'legacy-openai-key', + } + const session = { + apiMode: { + groupName: 'chatgptApiModelKeys', + itemName: 'gpt-4o-mini', + apiKey: 'stale-session-key', + }, + } + + const resolved = resolveOpenAICompatibleRequest(config, session) + + assert.equal(resolved.providerId, 'openai') + assert.equal(resolved.apiKey, '') +}) + +test('resolveOpenAICompatibleRequest keeps empty configured custom provider secret entry over legacy session key', () => { + const config = { + customOpenAIProviders: [ + { + id: 'myproxy', + name: 'My Proxy', + chatCompletionsUrl: 'https://proxy.example.com/v1/chat/completions', + completionsUrl: 'https://proxy.example.com/v1/completions', + enabled: true, + }, + ], + providerSecrets: { + myproxy: '', + }, + customApiModes: [ + { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + providerId: 'myproxy', + customName: 'proxy-model', + apiKey: '', + active: true, + }, + ], + } + const session = { + apiMode: { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + providerId: 'myproxy', + customName: 'proxy-model', + customUrl: '', + apiKey: 'legacy-session-key', + active: true, + }, + } + + const resolved = resolveOpenAICompatibleRequest(config, session) + + assert.equal(resolved.apiKey, '') +}) + +test('resolveOpenAICompatibleRequest keeps empty configured custom provider secret entry when no legacy session key exists', () => { + const config = { + customOpenAIProviders: [ + { + id: 'myproxy', + name: 'My Proxy', + chatCompletionsUrl: 'https://proxy.example.com/v1/chat/completions', + completionsUrl: 'https://proxy.example.com/v1/completions', + enabled: true, + }, + ], + providerSecrets: { + myproxy: '', + }, + customApiModes: [ + { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + providerId: 'myproxy', + customName: 'proxy-model', + apiKey: '', + active: true, + }, + ], + } + const session = { + apiMode: { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + providerId: 'myproxy', + customName: 'proxy-model', + customUrl: '', + apiKey: '', + active: true, + }, + } + + const resolved = resolveOpenAICompatibleRequest(config, session) + + assert.equal(resolved.apiKey, '') +}) + +test('resolveOpenAICompatibleRequest preserves orphan custom session key override when mode is not in config', () => { + const config = { + customOpenAIProviders: [ + { + id: 'myproxy', + name: 'My Proxy', + chatCompletionsUrl: 'https://proxy.example.com/v1/chat/completions', + completionsUrl: 'https://proxy.example.com/v1/completions', + enabled: true, + }, + ], + providerSecrets: { + myproxy: 'new-provider-key', + }, + customApiModes: [], + } + const session = { + apiMode: { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + providerId: 'myproxy', + customName: 'proxy-model', + customUrl: '', + apiKey: 'stale-session-key', + active: true, + }, + } + + const resolved = resolveOpenAICompatibleRequest(config, session) + + assert.equal(resolved.apiKey, 'stale-session-key') +}) + +test('resolveOpenAICompatibleRequest falls back to session customUrl when referenced provider is missing', () => { + const config = { + customOpenAIProviders: [], + customApiModes: [], + } + const session = { + apiMode: { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + providerId: 'missing-provider', + customName: 'proxy-model', + customUrl: 'https://proxy.example.com/v1/chat/completions', + apiKey: 'session-key', + active: true, + }, + } + + const resolved = resolveOpenAICompatibleRequest(config, session) + + assert.equal(resolved.providerId, 'legacy-custom-default') + assert.equal(resolved.provider.id, 'legacy-custom-default') + assert.equal(getProviderById(config, resolved.providerId)?.id, 'legacy-custom-default') + assert.equal(resolved.requestUrl, 'https://proxy.example.com/v1/chat/completions') + assert.equal(resolved.apiKey, 'session-key') +}) + +test('resolveOpenAICompatibleRequest keeps original provider secret when missing provider session has no key', () => { + const config = { + customOpenAIProviders: [], + customApiModes: [], + providerSecrets: { + 'missing-provider': 'missing-provider-key', + }, + } + const session = { + apiMode: { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + providerId: 'missing-provider', + customName: 'proxy-model', + customUrl: 'https://proxy.example.com/v1/chat/completions', + apiKey: '', + active: true, + }, + } + + const resolved = resolveOpenAICompatibleRequest(config, session) + + assert.equal(resolved.providerId, 'legacy-custom-default') + assert.equal(resolved.requestUrl, 'https://proxy.example.com/v1/chat/completions') + assert.equal(resolved.apiKey, 'missing-provider-key') +}) + +test('resolveOpenAICompatibleRequest keeps explicit empty stale provider secret over legacy fallback secret', () => { + const config = { + customOpenAIProviders: [], + customApiModes: [], + providerSecrets: { + 'missing-provider': '', + 'legacy-custom-default': 'legacy-provider-key', + }, + } + const session = { + apiMode: { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + providerId: 'missing-provider', + customName: 'proxy-model', + customUrl: 'https://proxy.example.com/v1/chat/completions', + apiKey: '', + active: true, + }, + } + + const resolved = resolveOpenAICompatibleRequest(config, session) + + assert.equal(resolved.providerId, 'legacy-custom-default') + assert.equal(resolved.requestUrl, 'https://proxy.example.com/v1/chat/completions') + assert.equal(resolved.apiKey, '') +}) + +test('resolveOpenAICompatibleRequest falls back to legacy custom provider secret when missing provider has no secret', () => { + const config = { + customOpenAIProviders: [], + customApiModes: [], + providerSecrets: { + 'legacy-custom-default': 'legacy-provider-key', + }, + } + const session = { + apiMode: { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + providerId: 'missing-provider', + customName: 'proxy-model', + customUrl: 'https://proxy.example.com/v1/chat/completions', + apiKey: '', + active: true, + }, + } + + const resolved = resolveOpenAICompatibleRequest(config, session) + + assert.equal(resolved.providerId, 'legacy-custom-default') + assert.equal(resolved.requestUrl, 'https://proxy.example.com/v1/chat/completions') + assert.equal(resolved.apiKey, 'legacy-provider-key') +}) + +test('resolveOpenAICompatibleRequest preserves per-session customUrl for direct legacy sessions', () => { + const config = { + customOpenAIProviders: [], + customApiModes: [], + customModelApiUrl: 'https://new.example.com/v1/chat/completions', + } + const session = { + apiMode: { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + providerId: 'legacy-custom-default', + customName: 'legacy-custom', + customUrl: 'https://stale.example.com/v1/chat/completions', + apiKey: '', + active: true, + }, + } + + const resolved = resolveOpenAICompatibleRequest(config, session) + + assert.equal(resolved.providerId, 'legacy-custom-default') + assert.equal(resolved.requestUrl, 'https://stale.example.com/v1/chat/completions') +}) + +test('resolveOpenAICompatibleRequest keeps failing when missing provider has no customUrl', () => { + const config = { + customOpenAIProviders: [], + customApiModes: [], + } + const session = { + apiMode: { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + providerId: 'missing-provider', + customName: 'proxy-model', + customUrl: '', + apiKey: 'session-key', + active: true, + }, + } + + const resolved = resolveOpenAICompatibleRequest(config, session) + + assert.equal(resolved, null) +}) + +test('resolveOpenAICompatibleRequest does not use custom session fallback outside customApiModelKeys', () => { + const config = { + customOpenAIProviders: [], + customApiModes: [], + } + const session = { + apiMode: { + groupName: 'gptApiModelKeys', + itemName: 'text-davinci-003', + providerId: 'missing-provider', + customUrl: 'https://proxy.example.com/v1/chat/completions', + apiKey: 'session-key', + }, + modelName: 'gptApiInstruct-text-davinci-003', + } + + const resolved = resolveOpenAICompatibleRequest(config, session) + + assert.equal(resolved.providerId, 'openai') + assert.equal(resolved.requestUrl, 'https://api.openai.com/v1/completions') +}) + +test('resolveOpenAICompatibleRequest preserves normalized OpenAI source provider on materialized custom providers', () => { + const config = { + customOpenAIProviders: [ + { + id: 'selected-mode-openai', + name: 'Selected Mode (OpenAI)', + baseUrl: 'https://proxy.example.com/v1', + chatCompletionsPath: '/chat/completions', + completionsPath: '/completions', + sourceProviderId: 'openai', + enabled: true, + }, + ], + providerSecrets: { + 'selected-mode-openai': 'proxy-key', + }, + } + const session = { + apiMode: { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + providerId: 'selected-mode-openai', + customName: 'gpt-5.4-mini', + customUrl: '', + apiKey: '', + active: true, + }, + } + + const resolved = resolveOpenAICompatibleRequest(config, session) + + assert.equal(resolved.providerId, 'selected-mode-openai') + assert.equal(resolved.provider.sourceProviderId, 'openai') +}) + +test('resolveOpenAICompatibleRequest preserves normalized Ollama source provider on materialized custom providers', () => { + const config = { + customOpenAIProviders: [ + { + id: 'selected-mode-ollama', + name: 'Selected Mode (Ollama)', + baseUrl: 'http://127.0.0.1:11434/v1', + chatCompletionsPath: '/chat/completions', + completionsPath: '/completions', + sourceProviderId: 'ollama', + enabled: true, + }, + ], + } + const session = { + apiMode: { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + providerId: 'selected-mode-ollama', + customName: 'llama3.2', + customUrl: '', + apiKey: '', + active: true, + }, + } + + const resolved = resolveOpenAICompatibleRequest(config, session) + + assert.equal(resolved.providerId, 'selected-mode-ollama') + assert.equal(resolved.provider.sourceProviderId, 'ollama') +}) + +test('resolveOpenAICompatibleRequest prefers configured provider secret over stale custom session key', () => { + const config = { + customOpenAIProviders: [ + { + id: 'myproxy', + name: 'My Proxy', + chatCompletionsUrl: 'https://proxy.example.com/v1/chat/completions', + completionsUrl: 'https://proxy.example.com/v1/completions', + enabled: true, + }, + ], + providerSecrets: { + myproxy: 'new-provider-key', + }, + customApiModes: [ + { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + customName: 'proxy-model', + customUrl: '', + apiKey: '', + providerId: 'myproxy', + active: true, + }, + ], + } + const session = { + apiMode: { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + providerId: 'myproxy', + customName: 'proxy-model', + customUrl: '', + apiKey: 'stale-session-key', + active: true, + }, + } + + const resolved = resolveOpenAICompatibleRequest(config, session) + + assert.equal(resolved.apiKey, 'new-provider-key') +}) + +test('resolveOpenAICompatibleRequest deduplicates selected custom mode when config copies differ only by apiKey', () => { + const config = { + customOpenAIProviders: [ + { + id: 'myproxy', + name: 'My Proxy', + chatCompletionsUrl: 'https://proxy.example.com/v1/chat/completions', + completionsUrl: 'https://proxy.example.com/v1/completions', + enabled: true, + }, + ], + providerSecrets: { + myproxy: 'updated-mode-key', + }, + customApiModes: [ + { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + customName: 'proxy-model', + customUrl: '', + apiKey: '', + providerId: 'myproxy', + active: true, + }, + ], + apiMode: { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + customName: 'proxy-model', + customUrl: '', + apiKey: 'updated-mode-key', + providerId: 'myproxy', + active: true, + }, + } + const session = { + apiMode: { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + providerId: 'myproxy', + customName: 'proxy-model', + customUrl: '', + apiKey: 'stale-session-key', + active: true, + }, + } + + const resolved = resolveOpenAICompatibleRequest(config, session) + + assert.equal(resolved.apiKey, 'updated-mode-key') +}) + +test('resolveOpenAICompatibleRequest matches configured custom mode when session providerId needs normalization', () => { + const config = { + customOpenAIProviders: [ + { + id: 'myproxy', + name: 'My Proxy', + chatCompletionsUrl: 'https://proxy.example.com/v1/chat/completions', + completionsUrl: 'https://proxy.example.com/v1/completions', + enabled: true, + }, + ], + providerSecrets: { + myproxy: 'new-provider-key', + }, + customApiModes: [ + { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + customName: 'proxy-model', + customUrl: '', + apiKey: '', + providerId: 'myproxy', + active: true, + }, + ], + } + const session = { + apiMode: { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + providerId: ' MyProxy ', + customName: 'proxy-model', + customUrl: '', + apiKey: 'stale-session-key', + active: true, + }, + } + + const resolved = resolveOpenAICompatibleRequest(config, session) + + assert.equal(resolved.apiKey, 'new-provider-key') +}) + +test('resolveOpenAICompatibleRequest recovers custom provider from legacy customUrl when provider uses baseUrl and path', () => { + const config = { + customOpenAIProviders: [ + { + id: 'myproxy', + name: 'My Proxy', + baseUrl: 'https://proxy.example.com', + chatCompletionsPath: '/v1/chat/completions', + completionsPath: '/v1/completions', + enabled: true, + }, + ], + providerSecrets: { + myproxy: 'proxy-key', + }, + } + const session = { + apiMode: { + groupName: 'customApiModelKeys', + providerId: 'OpenAI', + customName: 'proxy-model', + customUrl: 'https://proxy.example.com/v1/chat/completions/', + apiKey: '', + }, + } + + const resolved = resolveOpenAICompatibleRequest(config, session) + + assert.equal(resolved.providerId, 'myproxy') + assert.equal(resolved.requestUrl, 'https://proxy.example.com/v1/chat/completions') + assert.equal(resolved.apiKey, 'proxy-key') +}) + +test('resolveOpenAICompatibleRequest recovers by legacy customUrl when provider direct chat url changed but derived url still matches', () => { + const config = { + customOpenAIProviders: [ + { + id: 'myproxy', + name: 'My Proxy', + chatCompletionsUrl: 'https://direct.example.com/v1/chat/completions', + baseUrl: 'https://derived.example.com', + chatCompletionsPath: '/v1/chat/completions', + completionsUrl: 'https://direct.example.com/v1/completions', + enabled: true, + }, + ], + providerSecrets: { + myproxy: 'proxy-key', + }, + } + const session = { + apiMode: { + groupName: 'customApiModelKeys', + providerId: 'OpenAI', + customName: 'proxy-model', + customUrl: 'https://derived.example.com/v1/chat/completions', + apiKey: '', + }, + } + + const resolved = resolveOpenAICompatibleRequest(config, session) + + assert.equal(resolved.providerId, 'legacy-custom-default') + assert.equal(resolved.requestUrl, 'https://derived.example.com/v1/chat/completions') + assert.equal(resolved.apiKey, '') +}) + +test('resolveOpenAICompatibleRequest does not fall back when the referenced custom provider is disabled', () => { + const config = { + customOpenAIProviders: [ + { + id: 'myproxy', + name: 'My Proxy', + chatCompletionsUrl: 'https://proxy.example.com/v1/chat/completions', + enabled: false, + }, + ], + providerSecrets: { + 'legacy-custom-default': 'legacy-provider-key', + }, + } + const session = { + apiMode: { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + providerId: 'myproxy', + customName: 'proxy-model', + customUrl: 'https://proxy.example.com/v1/chat/completions', + apiKey: '', + active: true, + }, + } + + const resolved = resolveOpenAICompatibleRequest(config, session) + + assert.equal(resolved, null) +}) + +test('resolveOpenAICompatibleRequest does not fall back when customUrl points at a disabled custom provider', () => { + const config = { + customOpenAIProviders: [ + { + id: 'disabled-provider', + name: 'Disabled Provider', + chatCompletionsUrl: 'https://proxy.example.com/v1/chat/completions', + enabled: false, + }, + ], + customApiModes: [], + } + const session = { + apiMode: { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + providerId: 'missing-provider', + customName: 'proxy-model', + customUrl: 'https://proxy.example.com/v1/chat/completions', + apiKey: 'session-key', + active: true, + }, + } + + const resolved = resolveOpenAICompatibleRequest(config, session) + + assert.equal(resolved, null) +}) + +test('resolveOpenAICompatibleRequest uses recovered provider url instead of stale legacy customUrl', () => { + const config = { + customOpenAIProviders: [ + { + id: 'myproxy', + name: 'My Proxy', + chatCompletionsUrl: 'https://new.example.com/v1/chat/completions', + completionsUrl: 'https://new.example.com/v1/completions', + enabled: true, + }, + ], + providerSecrets: { + myproxy: 'provider-key', + }, + customApiModes: [ + { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + customName: 'proxy-model', + customUrl: '', + apiKey: '', + providerId: 'myproxy', + active: true, + }, + ], + } + const session = { + apiMode: { + groupName: 'customApiModelKeys', + itemName: '', + isCustom: false, + providerId: '', + customName: 'proxy-model', + customUrl: 'https://old.example.com/v1/chat/completions', + apiKey: 'stale-session-key', + }, + } + + const resolved = resolveOpenAICompatibleRequest(config, session) + + assert.equal(resolved.providerId, 'myproxy') + assert.equal(resolved.requestUrl, 'https://new.example.com/v1/chat/completions') + assert.equal(resolved.apiKey, 'provider-key') +}) + +test('resolveOpenAICompatibleRequest preserves per-session customUrl for unrecovered legacy custom sessions', () => { + const config = { + customModelApiUrl: 'https://global-default.example.com/v1/chat/completions', + providerSecrets: { + 'legacy-custom-default': 'legacy-key', + }, + } + const session = { + apiMode: { + groupName: 'customApiModelKeys', + providerId: '', + customName: 'orphaned-self-hosted', + customUrl: 'https://self-hosted.example.com/v1/chat/completions', + apiKey: 'session-key', + }, + } + + const resolved = resolveOpenAICompatibleRequest(config, session) + + assert.equal(resolved.providerId, 'legacy-custom-default') + assert.equal(resolved.requestUrl, 'https://self-hosted.example.com/v1/chat/completions') + assert.equal(resolved.apiKey, 'session-key') +}) + +test('resolveOpenAICompatibleRequest uses global customModelApiUrl for direct legacy sessions without customUrl', () => { + const config = { + customModelApiUrl: 'https://global-default.example.com/v1/chat/completions', + } + const session = { + apiMode: { + groupName: 'customApiModelKeys', + providerId: 'legacy-custom-default', + customName: 'legacy-custom', + customUrl: '', + apiKey: '', + }, + } + + const resolved = resolveOpenAICompatibleRequest(config, session) + + assert.equal(resolved.providerId, 'legacy-custom-default') + assert.equal(resolved.requestUrl, 'https://global-default.example.com/v1/chat/completions') +}) + +test('resolveOpenAICompatibleRequest uses recovered provider url when configured provider reuses legacy-custom-default id', () => { + const config = { + customOpenAIProviders: [ + { + id: 'legacy-custom-default', + name: 'Recovered Legacy Provider', + chatCompletionsUrl: 'https://new.example.com/v1/chat/completions', + completionsUrl: 'https://new.example.com/v1/completions', + enabled: true, + }, + ], + providerSecrets: { + 'legacy-custom-default': 'provider-key', + }, + customApiModes: [ + { + groupName: 'customApiModelKeys', + itemName: '', + isCustom: false, + customName: 'proxy-model', + customUrl: '', + apiKey: '', + providerId: 'legacy-custom-default', + active: true, + }, + ], + } + const session = { + apiMode: { + groupName: 'customApiModelKeys', + itemName: '', + isCustom: false, + providerId: '', + customName: 'proxy-model', + customUrl: 'https://old.example.com/v1/chat/completions', + apiKey: 'stale-session-key', + }, + } + + const resolved = resolveOpenAICompatibleRequest(config, session) + + assert.equal(resolved.providerId, 'legacy-custom-default') + assert.equal(resolved.requestUrl, 'https://new.example.com/v1/chat/completions') + assert.equal(resolved.apiKey, 'provider-key') +}) + +test('resolveOpenAICompatibleRequest uses global legacy custom url when label recovery lands on legacy-custom-default', () => { + const config = { + customModelApiUrl: 'https://global-default.example.com/v1/chat/completions', + providerSecrets: { + 'legacy-custom-default': 'legacy-key', + }, + customApiModes: [ + { + groupName: 'customApiModelKeys', + itemName: '', + isCustom: false, + customName: 'legacy-proxy', + customUrl: '', + apiKey: '', + providerId: 'legacy-custom-default', + active: true, + }, + ], + } + const session = { + apiMode: { + groupName: 'customApiModelKeys', + itemName: '', + isCustom: false, + providerId: '', + customName: 'legacy-proxy', + customUrl: 'https://saved-session.example.com/v1/chat/completions', + apiKey: 'session-key', + }, + } + + const resolved = resolveOpenAICompatibleRequest(config, session) + + assert.equal(resolved.providerId, 'legacy-custom-default') + assert.equal(resolved.requestUrl, 'https://global-default.example.com/v1/chat/completions') + assert.equal(resolved.apiKey, 'legacy-key') +}) + +test('resolveOpenAICompatibleRequest falls back to provider secret when custom mode label changes', () => { + const config = { + customOpenAIProviders: [ + { + id: 'myproxy', + name: 'My Proxy', + chatCompletionsUrl: 'https://proxy.example.com/v1/chat/completions', + completionsUrl: 'https://proxy.example.com/v1/completions', + enabled: true, + }, + ], + providerSecrets: { + myproxy: 'updated-provider-key', + }, + customApiModes: [ + { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + customName: 'renamed-model', + customUrl: '', + apiKey: '', + providerId: 'myproxy', + active: true, + }, + ], + } + const session = { + apiMode: { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + providerId: 'myproxy', + customName: 'old-model-name', + customUrl: '', + apiKey: 'stale-session-key', + active: true, + }, + } + + const resolved = resolveOpenAICompatibleRequest(config, session) + + assert.equal(resolved.apiKey, 'updated-provider-key') +}) + +test('resolveOpenAICompatibleRequest does not treat the only provider mode as a renamed session mode', () => { + const config = { + customOpenAIProviders: [ + { + id: 'myproxy', + name: 'My Proxy', + chatCompletionsUrl: 'https://proxy.example.com/v1/chat/completions', + completionsUrl: 'https://proxy.example.com/v1/completions', + enabled: true, + }, + ], + providerSecrets: { + myproxy: 'provider-key', + }, + customApiModes: [ + { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + customName: 'replacement-mode', + customUrl: '', + apiKey: 'replacement-mode-key', + providerId: 'myproxy', + active: true, + }, + ], + } + const session = { + apiMode: { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + providerId: 'myproxy', + customName: 'deleted-mode', + customUrl: '', + apiKey: 'session-key', + active: true, + }, + } + + const resolved = resolveOpenAICompatibleRequest(config, session) + + assert.equal(resolved.apiKey, 'session-key') +}) + +test('resolveOpenAICompatibleRequest preserves session key when multiple custom modes share one provider', () => { + const config = { + customOpenAIProviders: [ + { + id: 'shared-provider', + name: 'Shared Provider', + chatCompletionsUrl: 'https://proxy.example.com/v1/chat/completions', + completionsUrl: 'https://proxy.example.com/v1/completions', + enabled: true, + }, + ], + providerSecrets: { + 'shared-provider': 'provider-key', + }, + customApiModes: [ + { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + customName: 'mode-a', + customUrl: '', + apiKey: '', + providerId: 'shared-provider', + active: true, + }, + { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + customName: 'mode-b', + customUrl: '', + apiKey: 'mode-b-key', + providerId: 'shared-provider', + active: true, + }, + ], + } + const session = { + apiMode: { + groupName: 'customApiModelKeys', + providerId: 'shared-provider', + customName: 'old-session-name', + customUrl: '', + apiKey: 'session-key', + }, + } + + const resolved = resolveOpenAICompatibleRequest(config, session) + + assert.equal(resolved.apiKey, 'session-key') +}) + +test('resolveOpenAICompatibleRequest matches the correct custom mode by customName when multiple modes share one provider', () => { + const config = { + customOpenAIProviders: [ + { + id: 'shared-provider', + name: 'Shared Provider', + chatCompletionsUrl: 'https://proxy.example.com/v1/chat/completions', + completionsUrl: 'https://proxy.example.com/v1/completions', + enabled: true, + }, + ], + providerSecrets: { + 'shared-provider': 'provider-key', + }, + customApiModes: [ + { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + customName: 'mode-a', + customUrl: '', + apiKey: '', + providerId: 'shared-provider', + active: true, + }, + { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + customName: 'mode-b', + customUrl: '', + apiKey: 'mode-b-key', + providerId: 'shared-provider', + active: true, + }, + ], + } + const session = { + apiMode: { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + providerId: 'shared-provider', + customName: 'mode-b', + customUrl: '', + apiKey: 'stale-session-key', + }, + } + + const resolved = resolveOpenAICompatibleRequest(config, session) + + assert.equal(resolved.apiKey, 'mode-b-key') +}) + +test('resolveOpenAICompatibleRequest uses provider secret when multiple custom modes share one provider but none has a mode-specific key', () => { + const config = { + customOpenAIProviders: [ + { + id: 'shared-provider', + name: 'Shared Provider', + chatCompletionsUrl: 'https://proxy.example.com/v1/chat/completions', + completionsUrl: 'https://proxy.example.com/v1/completions', + enabled: true, + }, + ], + providerSecrets: { + 'shared-provider': 'provider-key', + }, + customApiModes: [ + { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + customName: 'mode-a', + customUrl: '', + apiKey: '', + providerId: 'shared-provider', + active: true, + }, + { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + customName: 'mode-b', + customUrl: '', + apiKey: '', + providerId: 'shared-provider', + active: true, + }, + ], + } + const session = { + apiMode: { + groupName: 'customApiModelKeys', + providerId: 'shared-provider', + customName: 'old-session-name', + customUrl: '', + apiKey: 'stale-session-key', + }, + } + + const resolved = resolveOpenAICompatibleRequest(config, session) + + assert.equal(resolved.apiKey, 'provider-key') +}) + +test('resolveOpenAICompatibleRequest preserves session key when multiple custom modes share one provider without configured keys', () => { + const config = { + customOpenAIProviders: [ + { + id: 'shared-provider', + name: 'Shared Provider', + chatCompletionsUrl: 'https://proxy.example.com/v1/chat/completions', + completionsUrl: 'https://proxy.example.com/v1/completions', + enabled: true, + }, + ], + providerSecrets: {}, + customApiModes: [ + { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + customName: 'mode-a', + customUrl: '', + apiKey: '', + providerId: 'shared-provider', + active: true, + }, + { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + customName: 'mode-b', + customUrl: '', + apiKey: '', + providerId: 'shared-provider', + active: true, + }, + ], + } + const session = { + apiMode: { + groupName: 'customApiModelKeys', + providerId: 'shared-provider', + customName: 'old-session-name', + customUrl: '', + apiKey: 'session-key', + }, + } + + const resolved = resolveOpenAICompatibleRequest(config, session) + + assert.equal(resolved.apiKey, 'session-key') +}) + +test('resolveOpenAICompatibleRequest falls back to provider secret for custom provider when mode-level key is empty', () => { + const config = { + customOpenAIProviders: [ + { + id: 'myproxy', + name: 'My Proxy', + chatCompletionsUrl: 'https://proxy.example.com/v1/chat/completions', + completionsUrl: 'https://proxy.example.com/v1/completions', + enabled: true, + }, + ], + providerSecrets: { + myproxy: 'provider-key', + }, + customApiModes: [ + { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + customName: 'proxy-model', + customUrl: '', + apiKey: '', + providerId: 'myproxy', + active: true, + }, + ], + } + const session = { + apiMode: { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + providerId: 'myproxy', + customName: 'proxy-model', + customUrl: '', + apiKey: '', + active: true, + }, + } + + const resolved = resolveOpenAICompatibleRequest(config, session) + + assert.equal(resolved.apiKey, 'provider-key') +}) + +test('resolveOpenAICompatibleRequest prefers configured custom mode key over provider secret', () => { + const config = { + customOpenAIProviders: [ + { + id: 'myproxy', + name: 'My Proxy', + chatCompletionsUrl: 'https://proxy.example.com/v1/chat/completions', + completionsUrl: 'https://proxy.example.com/v1/completions', + enabled: true, + }, + ], + providerSecrets: { + myproxy: 'provider-key', + }, + customApiModes: [ + { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + customName: 'proxy-model', + customUrl: '', + apiKey: 'mode-key', + providerId: 'myproxy', + active: true, + }, + ], + } + const session = { + apiMode: { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + providerId: 'myproxy', + customName: 'proxy-model', + customUrl: '', + apiKey: 'stale-session-key', + active: true, + }, + } + + const resolved = resolveOpenAICompatibleRequest(config, session) + + assert.equal(resolved.apiKey, 'mode-key') +}) + +test('resolveOpenAICompatibleRequest preserves session key when matched custom mode has no saved key', () => { + const config = { + customOpenAIProviders: [ + { + id: 'myproxy', + name: 'My Proxy', + chatCompletionsUrl: 'https://proxy.example.com/v1/chat/completions', + completionsUrl: 'https://proxy.example.com/v1/completions', + enabled: true, + }, + ], + providerSecrets: {}, + customApiModes: [ + { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + customName: 'proxy-model', + customUrl: '', + apiKey: '', + providerId: 'myproxy', + active: true, + }, + ], + } + const session = { + apiMode: { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + providerId: 'myproxy', + customName: 'proxy-model', + customUrl: '', + apiKey: 'session-key', + active: true, + }, + } + + const resolved = resolveOpenAICompatibleRequest(config, session) + + assert.equal(resolved.apiKey, 'session-key') +}) + +test('resolveOpenAICompatibleRequest ignores active-state differences when matching configured custom mode', () => { + const config = { + customOpenAIProviders: [ + { + id: 'myproxy', + name: 'My Proxy', + chatCompletionsUrl: 'https://proxy.example.com/v1/chat/completions', + completionsUrl: 'https://proxy.example.com/v1/completions', + enabled: true, + }, + ], + providerSecrets: { + myproxy: 'updated-provider-key', + }, + customApiModes: [ + { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + customName: 'proxy-model', + customUrl: '', + apiKey: '', + providerId: 'myproxy', + active: false, + }, + ], + } + const session = { + apiMode: { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + providerId: 'myproxy', + customName: 'proxy-model', + customUrl: '', + apiKey: 'stale-session-key', + active: true, + }, + } + + const resolved = resolveOpenAICompatibleRequest(config, session) + + assert.equal(resolved.apiKey, 'updated-provider-key') +}) + +test('resolveOpenAICompatibleRequest falls back to provider secret when providerId was migrated but provider still resolves', () => { + const config = { + customOpenAIProviders: [ + { + id: 'openai-2', + name: 'Legacy OpenAI Proxy', + chatCompletionsUrl: 'https://proxy.example.com/v1/chat/completions', + completionsUrl: 'https://proxy.example.com/v1/completions', + enabled: true, + }, + ], + providerSecrets: { + 'openai-2': 'updated-provider-key', + }, + customApiModes: [ + { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + customName: 'renamed-model', + customUrl: '', + apiKey: '', + providerId: 'openai-2', + active: true, + }, + ], + } + const session = { + apiMode: { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + providerId: 'openai', + customName: 'old-model-name', + customUrl: 'https://proxy.example.com/v1/chat/completions/', + apiKey: 'stale-session-key', + active: true, + }, + } + + const resolved = resolveOpenAICompatibleRequest(config, session) + + assert.equal(resolved.providerId, 'openai-2') + assert.equal(resolved.apiKey, 'updated-provider-key') +}) + +test('resolveOpenAICompatibleRequest resolves custom provider by legacy customUrl when session provider id collides with builtin id', () => { + const config = { + customOpenAIProviders: [ + { + id: 'openai-2', + name: 'Legacy OpenAI Proxy', + chatCompletionsUrl: 'https://proxy.example.com/v1/chat/completions', + completionsUrl: 'https://proxy.example.com/v1/completions', + enabled: true, + }, + ], + providerSecrets: { + 'openai-2': 'proxy-key', + }, + } + const session = { + apiMode: { + groupName: 'customApiModelKeys', + providerId: 'openai', + customName: 'proxy-model', + customUrl: 'https://proxy.example.com/v1/chat/completions/', + }, + } + + const resolved = resolveOpenAICompatibleRequest(config, session) + + assert.equal(resolved.providerId, 'openai-2') + assert.equal(resolved.secretProviderId, 'openai-2') + assert.equal(resolved.requestUrl, 'https://proxy.example.com/v1/chat/completions') + assert.equal(resolved.apiKey, 'proxy-key') +}) + +test('resolveOpenAICompatibleRequest recovers orphaned custom session with builtin-like stale provider id', () => { + const config = { + customModelApiUrl: 'https://fallback.example.com/v1/chat/completions', + providerSecrets: { + 'legacy-custom-default': 'legacy-key', + }, + customOpenAIProviders: [], + } + const session = { + apiMode: { + groupName: 'customApiModelKeys', + providerId: 'openai', + customName: 'orphaned-proxy-model', + customUrl: 'https://proxy.example.com/v1/chat/completions/', + }, + } + + const resolved = resolveOpenAICompatibleRequest(config, session) + + assert.equal(resolved.providerId, 'legacy-custom-default') + assert.equal(resolved.secretProviderId, 'openai') + assert.equal(resolved.provider.id, 'legacy-custom-default') + assert.equal(resolved.requestUrl, 'https://proxy.example.com/v1/chat/completions/') + assert.equal(resolved.apiKey, 'legacy-key') +}) + +test('resolveOpenAICompatibleRequest keeps configured mode key during builtin-like customUrl recovery', () => { + const config = { + customModelApiUrl: 'https://fallback.example.com/v1/chat/completions', + providerSecrets: { + 'legacy-custom-default': 'legacy-key', + }, + customApiModes: [ + { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + providerId: 'openai', + customName: 'orphaned-proxy-model', + customUrl: '', + apiKey: 'configured-mode-key', + active: true, + }, + ], + customOpenAIProviders: [], + } + const session = { + apiMode: { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + providerId: 'openai', + customName: 'orphaned-proxy-model', + customUrl: 'https://proxy.example.com/v1/chat/completions/', + apiKey: '', + active: true, + }, + } + + const resolved = resolveOpenAICompatibleRequest(config, session) + + assert.equal(resolved.providerId, 'legacy-custom-default') + assert.equal(resolved.secretProviderId, 'openai') + assert.equal(resolved.requestUrl, 'https://proxy.example.com/v1/chat/completions/') + assert.equal(resolved.apiKey, 'configured-mode-key') +}) + +test('resolveOpenAICompatibleRequest uses recovered secret slot for builtin-like stale provider id', () => { + const config = { + customModelApiUrl: 'https://fallback.example.com/v1/chat/completions', + providerSecrets: { + openai: 'recovered-shared-key', + 'legacy-custom-default': 'legacy-key', + }, + customOpenAIProviders: [], + } + const session = { + apiMode: { + groupName: 'customApiModelKeys', + providerId: 'openai', + customName: 'orphaned-proxy-model', + customUrl: 'https://proxy.example.com/v1/chat/completions/', + }, + } + + const resolved = resolveOpenAICompatibleRequest(config, session) + + assert.equal(resolved.providerId, 'legacy-custom-default') + assert.equal(resolved.secretProviderId, 'openai') + assert.equal(resolved.requestUrl, 'https://proxy.example.com/v1/chat/completions/') + assert.equal(resolved.apiKey, 'recovered-shared-key') +}) + +test('resolveOpenAICompatibleRequest prefers recovered secret slot over stale session key for builtin-like stale provider id', () => { + const config = { + customModelApiUrl: 'https://fallback.example.com/v1/chat/completions', + providerSecrets: { + openai: 'recovered-shared-key', + 'legacy-custom-default': 'legacy-key', + }, + customOpenAIProviders: [], + } + const session = { + apiMode: { + groupName: 'customApiModelKeys', + providerId: 'openai', + customName: 'orphaned-proxy-model', + customUrl: 'https://proxy.example.com/v1/chat/completions/', + apiKey: 'session-key', + }, + } + + const resolved = resolveOpenAICompatibleRequest(config, session) + + assert.equal(resolved.providerId, 'legacy-custom-default') + assert.equal(resolved.secretProviderId, 'openai') + assert.equal(resolved.requestUrl, 'https://proxy.example.com/v1/chat/completions/') + assert.equal(resolved.apiKey, 'recovered-shared-key') +}) + +test('resolveOpenAICompatibleRequest keeps explicit empty recovered secret entry over stale session key', () => { + const config = { + customModelApiUrl: 'https://fallback.example.com/v1/chat/completions', + providerSecrets: { + openai: '', + 'legacy-custom-default': 'legacy-key', + }, + customOpenAIProviders: [], + } + const session = { + apiMode: { + groupName: 'customApiModelKeys', + providerId: 'openai', + customName: 'orphaned-proxy-model', + customUrl: 'https://proxy.example.com/v1/chat/completions/', + apiKey: 'session-key', + }, + } + + const resolved = resolveOpenAICompatibleRequest(config, session) + + assert.equal(resolved.providerId, 'legacy-custom-default') + assert.equal(resolved.secretProviderId, 'openai') + assert.equal(resolved.requestUrl, 'https://proxy.example.com/v1/chat/completions/') + assert.equal(resolved.apiKey, '') +}) + +test('resolveOpenAICompatibleRequest keeps empty builtin-like secret entry over legacy fallback during customUrl recovery', () => { + const config = { + customModelApiUrl: 'https://fallback.example.com/v1/chat/completions', + providerSecrets: { + openai: '', + 'legacy-custom-default': 'legacy-key', + }, + customOpenAIProviders: [], + } + const session = { + apiMode: { + groupName: 'customApiModelKeys', + providerId: 'openai', + customName: 'orphaned-proxy-model', + customUrl: 'https://proxy.example.com/v1/chat/completions/', + }, + } + + const resolved = resolveOpenAICompatibleRequest(config, session) + + assert.equal(resolved.providerId, 'legacy-custom-default') + assert.equal(resolved.requestUrl, 'https://proxy.example.com/v1/chat/completions/') + assert.equal(resolved.apiKey, '') +}) + +test('resolveOpenAICompatibleRequest matches legacy customUrl session by mode-level apiKey', () => { + const config = { + customOpenAIProviders: [ + { + id: 'proxy-a', + name: 'Proxy A', + chatCompletionsUrl: 'https://proxy.example.com/v1/chat/completions', + completionsUrl: 'https://proxy.example.com/v1/completions', + enabled: true, + }, + { + id: 'proxy-b', + name: 'Proxy B', + chatCompletionsUrl: 'https://proxy.example.com/v1/chat/completions', + completionsUrl: 'https://proxy.example.com/v1/completions', + enabled: true, + }, + ], + providerSecrets: { + 'proxy-a': 'key-a', + 'proxy-b': 'key-b', + }, + } + const session = { + apiMode: { + groupName: 'customApiModelKeys', + providerId: 'openai', + customName: 'proxy-model', + customUrl: 'https://proxy.example.com/v1/chat/completions', + apiKey: 'key-b', + }, + } + + const resolved = resolveOpenAICompatibleRequest(config, session) + + assert.equal(resolved.providerId, 'proxy-b') + assert.equal(resolved.apiKey, 'key-b') +}) + +test('resolveOpenAICompatibleRequest resolves renamed custom provider before falling back to builtin provider', () => { + const config = { + customOpenAIProviders: [ + { + id: 'openai-2', + name: 'Legacy OpenAI Proxy', + chatCompletionsUrl: 'https://proxy.example.com/v1/chat/completions', + completionsUrl: 'https://proxy.example.com/v1/completions', + enabled: true, + }, + ], + providerSecrets: { + 'openai-2': 'proxy-key', + }, + customApiModes: [ + { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + customName: 'proxy-model', + customUrl: '', + apiKey: '', + providerId: 'openai-2', + active: true, + }, + ], + } + const session = { + apiMode: { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + providerId: 'openai', + customName: 'proxy-model', + customUrl: '', + apiKey: 'stale-session-key', + active: true, + }, + } + + const resolved = resolveOpenAICompatibleRequest(config, session) + + assert.equal(resolved.providerId, 'openai-2') + assert.equal(resolved.requestUrl, 'https://proxy.example.com/v1/chat/completions') + assert.equal(resolved.apiKey, 'proxy-key') +}) + +test('resolveOpenAICompatibleRequest does not fall back to builtin provider when custom provider cannot be safely recovered', () => { + const config = { + providerSecrets: { + openai: 'builtin-openai-key', + }, + customOpenAIProviders: [ + { + id: 'openai-2', + name: 'Legacy OpenAI Proxy', + chatCompletionsUrl: 'https://proxy.example.com/v1/chat/completions', + completionsUrl: 'https://proxy.example.com/v1/completions', + enabled: true, + }, + ], + customApiModes: [ + { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + customName: 'renamed-proxy-model', + customUrl: '', + apiKey: '', + providerId: 'openai-2', + active: true, + }, + ], + } + const session = { + apiMode: { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + providerId: 'openai', + customName: 'missing-session-label', + customUrl: '', + apiKey: 'stale-session-key', + active: true, + }, + } + + const resolved = resolveOpenAICompatibleRequest(config, session) + + assert.equal(resolved, null) +}) + +test('resolveOpenAICompatibleRequest does not recover builtin-like stale provider id when customUrl matches disabled custom provider', () => { + const config = { + customModelApiUrl: 'https://fallback.example.com/v1/chat/completions', + providerSecrets: { + 'legacy-custom-default': 'legacy-key', + }, + customOpenAIProviders: [ + { + id: 'openai-2', + name: 'Disabled Legacy OpenAI Proxy', + chatCompletionsUrl: 'https://proxy.example.com/v1/chat/completions', + completionsUrl: 'https://proxy.example.com/v1/completions', + enabled: false, + }, + ], + } + const session = { + apiMode: { + groupName: 'customApiModelKeys', + providerId: 'openai', + customName: 'disabled-proxy-model', + customUrl: 'https://proxy.example.com/v1/chat/completions/', + }, + } + + const resolved = resolveOpenAICompatibleRequest(config, session) + + assert.equal(resolved, null) +}) + +test('resolveOpenAICompatibleRequest recovers orphaned custom session when enabled provider direct url changed but derived url still matches', () => { + const config = { + customModelApiUrl: 'https://fallback.example.com/v1/chat/completions', + providerSecrets: { + 'legacy-custom-default': 'legacy-key', + }, + customOpenAIProviders: [ + { + id: 'openai-2', + name: 'Renamed Proxy', + chatCompletionsUrl: 'https://new-proxy.example.com/v1/chat/completions', + baseUrl: 'https://old-proxy.example.com', + chatCompletionsPath: '/v1/chat/completions', + completionsUrl: 'https://new-proxy.example.com/v1/completions', + enabled: true, + }, + ], + } + const session = { + apiMode: { + groupName: 'customApiModelKeys', + providerId: 'openai', + customName: 'orphaned-proxy-model', + customUrl: 'https://old-proxy.example.com/v1/chat/completions', + }, + } + + const resolved = resolveOpenAICompatibleRequest(config, session) + + assert.equal(resolved.providerId, 'legacy-custom-default') + assert.equal(resolved.requestUrl, 'https://old-proxy.example.com/v1/chat/completions') + assert.equal(resolved.apiKey, 'legacy-key') +}) + +test('resolveOpenAICompatibleRequest recovers orphaned custom session when disabled provider direct url changed but derived url still matches', () => { + const config = { + customModelApiUrl: 'https://fallback.example.com/v1/chat/completions', + providerSecrets: { + 'legacy-custom-default': 'legacy-key', + }, + customOpenAIProviders: [ + { + id: 'openai-2', + name: 'Disabled Renamed Proxy', + chatCompletionsUrl: 'https://new-proxy.example.com/v1/chat/completions', + baseUrl: 'https://old-proxy.example.com', + chatCompletionsPath: '/v1/chat/completions', + completionsUrl: 'https://new-proxy.example.com/v1/completions', + enabled: false, + }, + ], + } + const session = { + apiMode: { + groupName: 'customApiModelKeys', + providerId: 'openai', + customName: 'orphaned-proxy-model', + customUrl: 'https://old-proxy.example.com/v1/chat/completions', + }, + } + + const resolved = resolveOpenAICompatibleRequest(config, session) + + assert.equal(resolved.providerId, 'legacy-custom-default') + assert.equal(resolved.requestUrl, 'https://old-proxy.example.com/v1/chat/completions') + assert.equal(resolved.apiKey, 'legacy-key') +}) + +test('resolveOpenAICompatibleRequest recovers legacy custom default provider from label-matched configured mode', () => { + const config = { + customApiModes: [ + { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + customName: 'legacy-proxy', + customUrl: '', + apiKey: '', + providerId: 'legacy-custom-default', + active: true, + }, + ], + customModelApiUrl: 'https://legacy-proxy.example.com/v1/chat/completions', + providerSecrets: { + 'legacy-custom-default': 'legacy-key', + }, + } + const session = { + apiMode: { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + providerId: 'openai', + customName: 'legacy-proxy', + customUrl: '', + apiKey: 'stale-session-key', + active: true, + }, + } + + const resolved = resolveOpenAICompatibleRequest(config, session) + + assert.equal(resolved.providerId, 'legacy-custom-default') + assert.equal(resolved.requestUrl, 'https://legacy-proxy.example.com/v1/chat/completions') + assert.equal(resolved.apiKey, 'legacy-key') +}) + +test('resolveOpenAICompatibleRequest uses global legacy custom url when label recovery hits legacy-custom-default with stale session customUrl', () => { + const config = { + customApiModes: [ + { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + customName: 'legacy-proxy', + customUrl: '', + apiKey: '', + providerId: 'legacy-custom-default', + active: true, + }, + ], + customModelApiUrl: 'https://new-legacy-proxy.example.com/v1/chat/completions', + providerSecrets: { + 'legacy-custom-default': 'legacy-key', + }, + } + const session = { + apiMode: { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + providerId: 'missing-proxy', + customName: 'legacy-proxy', + customUrl: 'https://old-legacy-proxy.example.com/v1/chat/completions', + apiKey: 'stale-session-key', + active: true, + }, + } + + const resolved = resolveOpenAICompatibleRequest(config, session) + + assert.equal(resolved.providerId, 'legacy-custom-default') + assert.equal(resolved.requestUrl, 'https://new-legacy-proxy.example.com/v1/chat/completions') + assert.notEqual(resolved.requestUrl, session.apiMode.customUrl) + assert.equal(resolved.apiKey, 'legacy-key') +}) + +test('resolveOpenAICompatibleRequest prefers label recovery when multiple providers share a legacy custom url', () => { + const sharedUrl = 'https://shared-proxy.example.com/v1/chat/completions' + const config = { + customOpenAIProviders: [ + { + id: 'proxy-a', + name: 'Proxy A', + chatCompletionsUrl: sharedUrl, + completionsUrl: 'https://shared-proxy.example.com/v1/completions', + enabled: true, + }, + { + id: 'proxy-b', + name: 'Proxy B', + chatCompletionsUrl: sharedUrl, + completionsUrl: 'https://shared-proxy.example.com/v1/completions', + enabled: true, + }, + ], + customApiModes: [ + { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + customName: 'proxy-b-mode', + customUrl: '', + apiKey: '', + providerId: 'proxy-b', + active: true, + }, + ], + providerSecrets: { + 'proxy-b': 'proxy-b-key', + }, + } + const session = { + apiMode: { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + providerId: 'missing-provider', + customName: 'proxy-b-mode', + customUrl: sharedUrl, + apiKey: '', + active: true, + }, + } + + const resolved = resolveOpenAICompatibleRequest(config, session) + + assert.equal(resolved.providerId, 'proxy-b') + assert.equal(resolved.requestUrl, sharedUrl) + assert.equal(resolved.apiKey, 'proxy-b-key') +}) + +test('resolveOpenAICompatibleRequest recovers renamed custom provider for legacy session without itemName and isCustom', () => { + const config = { + customOpenAIProviders: [ + { + id: 'openai-2', + name: 'Legacy OpenAI Proxy', + chatCompletionsUrl: 'https://proxy.example.com/v1/chat/completions', + completionsUrl: 'https://proxy.example.com/v1/completions', + enabled: true, + }, + ], + providerSecrets: { + 'openai-2': 'proxy-key', + }, + customApiModes: [ + { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + customName: 'proxy-model', + customUrl: '', + apiKey: '', + providerId: 'openai-2', + active: true, + }, + ], + } + const session = { + apiMode: { + groupName: 'customApiModelKeys', + providerId: 'openai', + customName: 'proxy-model', + customUrl: '', + apiKey: 'stale-session-key', + }, + } + + const resolved = resolveOpenAICompatibleRequest(config, session) + + assert.equal(resolved.providerId, 'openai-2') + assert.equal(resolved.requestUrl, 'https://proxy.example.com/v1/chat/completions') + assert.equal(resolved.apiKey, 'proxy-key') +}) + +test('resolveOpenAICompatibleRequest keeps fail-closed behavior when legacy label recovery is ambiguous', () => { + const config = { + customOpenAIProviders: [ + { + id: 'proxy-a', + name: 'Proxy A', + chatCompletionsUrl: 'https://proxy-a.example.com/v1/chat/completions', + completionsUrl: 'https://proxy-a.example.com/v1/completions', + enabled: true, + }, + { + id: 'proxy-b', + name: 'Proxy B', + chatCompletionsUrl: 'https://proxy-b.example.com/v1/chat/completions', + completionsUrl: 'https://proxy-b.example.com/v1/completions', + enabled: true, + }, + ], + customApiModes: [ + { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + customName: 'shared-label', + customUrl: '', + apiKey: '', + providerId: 'proxy-a', + active: true, + }, + { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + customName: 'shared-label', + customUrl: '', + apiKey: '', + providerId: 'proxy-b', + active: true, + }, + ], + } + const session = { + apiMode: { + groupName: 'customApiModelKeys', + providerId: 'openai', + customName: 'shared-label', + customUrl: '', + apiKey: 'stale-session-key', + }, + } + + const resolved = resolveOpenAICompatibleRequest(config, session) + + assert.equal(resolved, null) +}) + +test('resolveOpenAICompatibleRequest fails closed when shared legacy custom url remains ambiguous', () => { + const sharedUrl = 'https://shared-proxy.example.com/v1/chat/completions' + const config = { + customOpenAIProviders: [ + { + id: 'proxy-a', + name: 'Proxy A', + chatCompletionsUrl: sharedUrl, + completionsUrl: 'https://shared-proxy.example.com/v1/completions', + enabled: true, + }, + { + id: 'proxy-b', + name: 'Proxy B', + chatCompletionsUrl: sharedUrl, + completionsUrl: 'https://shared-proxy.example.com/v1/completions', + enabled: true, + }, + ], + customApiModes: [ + { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + customName: 'shared-label', + customUrl: '', + apiKey: '', + providerId: 'proxy-a', + active: true, + }, + { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + customName: 'shared-label', + customUrl: '', + apiKey: '', + providerId: 'proxy-b', + active: true, + }, + ], + } + const session = { + apiMode: { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + providerId: 'missing-provider', + customName: 'shared-label', + customUrl: sharedUrl, + apiKey: '', + active: true, + }, + } + + const resolved = resolveOpenAICompatibleRequest(config, session) + + assert.equal(resolved, null) +}) + +test('resolveOpenAICompatibleRequest fails closed when legacy customUrl has only provider secrets and no session key signal', () => { + const config = { + customOpenAIProviders: [ + { + id: 'proxy-a', + name: 'Proxy A', + chatCompletionsUrl: 'https://proxy.example.com/v1/chat/completions', + completionsUrl: 'https://proxy.example.com/v1/completions', + enabled: true, + }, + { + id: 'proxy-b', + name: 'Proxy B', + chatCompletionsUrl: 'https://proxy.example.com/v1/chat/completions', + completionsUrl: 'https://proxy.example.com/v1/completions', + enabled: true, + }, + ], + providerSecrets: { + 'legacy-custom-default': 'key-b', + 'proxy-a': 'key-a', + 'proxy-b': 'key-b', + }, + } + const session = { + apiMode: { + groupName: 'customApiModelKeys', + providerId: 'openai', + customName: 'proxy-model', + customUrl: 'https://proxy.example.com/v1/chat/completions', + apiKey: '', + }, + } + + const resolved = resolveOpenAICompatibleRequest(config, session) + + assert.equal(resolved, null) +}) + +test('resolveOpenAICompatibleRequest fails closed when legacy customUrl session has no key signal', () => { + const config = { + customOpenAIProviders: [ + { + id: 'proxy-a', + name: 'Proxy A', + chatCompletionsUrl: 'https://proxy.example.com/v1/chat/completions', + completionsUrl: 'https://proxy.example.com/v1/completions', + enabled: true, + }, + { + id: 'proxy-b', + name: 'Proxy B', + chatCompletionsUrl: 'https://proxy.example.com/v1/chat/completions', + completionsUrl: 'https://proxy.example.com/v1/completions', + enabled: true, + }, + ], + providerSecrets: { + 'proxy-a': 'key-a', + 'proxy-b': 'key-b', + }, + } + const session = { + apiMode: { + groupName: 'customApiModelKeys', + providerId: 'openai', + customName: 'proxy-model', + customUrl: 'https://proxy.example.com/v1/chat/completions', + apiKey: '', + }, + } + + const resolved = resolveOpenAICompatibleRequest(config, session) + + assert.equal(resolved, null) +}) + +test('resolveOpenAICompatibleRequest fails closed when legacy customUrl key signal matches multiple providers', () => { + const sharedUrl = 'https://proxy.example.com/v1/chat/completions' + const config = { + customOpenAIProviders: [ + { + id: 'proxy-a', + name: 'Proxy A', + chatCompletionsUrl: sharedUrl, + completionsUrl: 'https://proxy.example.com/v1/completions', + enabled: true, + }, + { + id: 'proxy-b', + name: 'Proxy B', + chatCompletionsUrl: sharedUrl, + completionsUrl: 'https://proxy.example.com/v1/completions', + enabled: true, + }, + ], + providerSecrets: { + 'proxy-a': 'shared-key', + 'proxy-b': 'shared-key', + }, + } + const session = { + apiMode: { + groupName: 'customApiModelKeys', + providerId: 'openai', + customName: 'proxy-model', + customUrl: sharedUrl, + apiKey: 'shared-key', + }, + } + + const resolved = resolveOpenAICompatibleRequest(config, session) + + assert.equal(resolved, null) +}) + +test('resolveOpenAICompatibleRequest avoids duplicate /v1 for OpenAI base URL with /v1 suffix', () => { + const config = { + customOpenAiApiUrl: 'https://api.openai.com/v1/', + providerSecrets: { + openai: 'openai-key', + }, + } + const session = { + apiMode: { + groupName: 'chatgptApiModelKeys', + itemName: 'chatgptApi4oMini', + providerId: '', + }, + } + + const resolved = resolveOpenAICompatibleRequest(config, session) + + assert.equal(resolved.providerId, 'openai') + assert.equal(resolved.requestUrl, 'https://api.openai.com/v1/chat/completions') +}) + +test('resolveOpenAICompatibleRequest avoids duplicate /v1 for OpenAI completion URL with /v1 suffix', () => { + const config = { + customOpenAiApiUrl: 'https://api.openai.com/v1/', + providerSecrets: { + openai: 'openai-key', + }, + } + const session = { + apiMode: { + groupName: 'gptApiModelKeys', + itemName: 'gptApiInstruct', + providerId: '', + }, + } + + const resolved = resolveOpenAICompatibleRequest(config, session) + + assert.equal(resolved.providerId, 'openai') + assert.equal(resolved.endpointType, 'completion') + assert.equal(resolved.requestUrl, 'https://api.openai.com/v1/completions') +}) + +test('resolveOpenAICompatibleRequest avoids duplicate /v1 for Ollama endpoint with /v1 suffix', () => { + const config = { + ollamaEndpoint: 'http://127.0.0.1:11434/v1/', + } + const session = { + apiMode: { + groupName: 'ollamaApiModelKeys', + itemName: 'ollama', + providerId: '', + }, + } + + const resolved = resolveOpenAICompatibleRequest(config, session) + + assert.equal(resolved.providerId, 'ollama') + assert.equal(resolved.requestUrl, 'http://127.0.0.1:11434/v1/chat/completions') +}) diff --git a/tests/unit/services/apis/thin-adapters.test.mjs b/tests/unit/services/apis/thin-adapters.test.mjs index 1b3318d34..385fd25f7 100644 --- a/tests/unit/services/apis/thin-adapters.test.mjs +++ b/tests/unit/services/apis/thin-adapters.test.mjs @@ -3,11 +3,7 @@ import { beforeEach, test } from 'node:test' import { createFakePort } from '../../helpers/port.mjs' import { createMockSseResponse } from '../../helpers/sse-response.mjs' -import { generateAnswersWithAimlApi } from '../../../../src/services/apis/aiml-api.mjs' -import { generateAnswersWithDeepSeekApi } from '../../../../src/services/apis/deepseek-api.mjs' -import { generateAnswersWithMoonshotCompletionApi } from '../../../../src/services/apis/moonshot-api.mjs' -import { generateAnswersWithOpenRouterApi } from '../../../../src/services/apis/openrouter-api.mjs' -import { generateAnswersWithChatGLMApi } from '../../../../src/services/apis/chatglm-api.mjs' +import { generateAnswersWithOpenAICompatibleApi } from '../../../../src/services/apis/openai-api.mjs' const setStorage = (values) => { globalThis.__TEST_BROWSER_SHIM__.replaceStorage(values) @@ -23,8 +19,8 @@ const commonStorage = { temperature: 0.5, } -const makeSession = () => ({ - modelName: 'chatgptApi4oMini', +const makeSession = (apiMode) => ({ + apiMode, conversationRecords: [], isRetry: false, }) @@ -34,47 +30,54 @@ const sseChunks = ['data: {"choices":[{"delta":{"content":"OK"},"finish_reason": const adapters = [ { name: 'aiml-api', - fn: (port, q, session) => generateAnswersWithAimlApi(port, q, session, 'aiml-key'), + apiMode: { groupName: 'aimlModelKeys', itemName: 'aiml_openai_o3_2025_04_16' }, + providerId: 'aiml', expectedBaseUrl: 'https://api.aimlapi.com/v1', expectedApiKey: 'aiml-key', - storage: commonStorage, }, { name: 'deepseek-api', - fn: (port, q, session) => generateAnswersWithDeepSeekApi(port, q, session, 'ds-key'), + apiMode: { groupName: 'deepSeekApiModelKeys', itemName: 'deepseek_chat' }, + providerId: 'deepseek', expectedBaseUrl: 'https://api.deepseek.com', expectedApiKey: 'ds-key', - storage: commonStorage, }, { name: 'moonshot-api', - fn: (port, q, session) => generateAnswersWithMoonshotCompletionApi(port, q, session, 'ms-key'), + apiMode: { groupName: 'moonshotApiModelKeys', itemName: 'moonshot_kimi_latest' }, + providerId: 'moonshot', expectedBaseUrl: 'https://api.moonshot.cn/v1', expectedApiKey: 'ms-key', - storage: commonStorage, }, { name: 'openrouter-api', - fn: (port, q, session) => generateAnswersWithOpenRouterApi(port, q, session, 'or-key'), + apiMode: { groupName: 'openRouterApiModelKeys', itemName: 'openRouter_openai_o3' }, + providerId: 'openrouter', expectedBaseUrl: 'https://openrouter.ai/api/v1', expectedApiKey: 'or-key', - storage: commonStorage, }, { name: 'chatglm-api', - fn: (port, q, session) => generateAnswersWithChatGLMApi(port, q, session), + apiMode: { groupName: 'chatglmApiModelKeys', itemName: 'chatglmTurbo' }, + providerId: 'chatglm', expectedBaseUrl: 'https://open.bigmodel.cn/api/paas/v4', expectedApiKey: 'glm-key', - storage: { ...commonStorage, chatglmApiKey: 'glm-key' }, }, ] for (const adapter of adapters) { test(`${adapter.name}: passes correct base URL and API key`, async (t) => { t.mock.method(console, 'debug', () => {}) - setStorage(adapter.storage) - const session = makeSession() + const config = { + ...commonStorage, + providerSecrets: { + [adapter.providerId]: adapter.expectedApiKey, + }, + } + setStorage(config) + + const session = makeSession(adapter.apiMode) const port = createFakePort() let capturedInput, capturedInit @@ -84,7 +87,7 @@ for (const adapter of adapters) { return createMockSseResponse(sseChunks) }) - await adapter.fn(port, 'Q', session) + await generateAnswersWithOpenAICompatibleApi(port, 'Q', session, config) assert.equal(capturedInput, `${adapter.expectedBaseUrl}/chat/completions`) // Verify API key reaches the Authorization header @@ -93,14 +96,21 @@ for (const adapter of adapters) { test(`${adapter.name}: delegates to compat layer and produces output`, async (t) => { t.mock.method(console, 'debug', () => {}) - setStorage(adapter.storage) - const session = makeSession() + const config = { + ...commonStorage, + providerSecrets: { + [adapter.providerId]: adapter.expectedApiKey, + }, + } + setStorage(config) + + const session = makeSession(adapter.apiMode) const port = createFakePort() t.mock.method(globalThis, 'fetch', async () => createMockSseResponse(sseChunks)) - await adapter.fn(port, 'Q', session) + await generateAnswersWithOpenAICompatibleApi(port, 'Q', session, config) assert.equal( port.postedMessages.some((m) => m.done === true && m.session === session), @@ -115,9 +125,13 @@ for (const adapter of adapters) { test('chatglm-api: reads chatglmApiKey from config', async (t) => { t.mock.method(console, 'debug', () => {}) - setStorage({ ...commonStorage, chatglmApiKey: 'glm-secret' }) + const config = { ...commonStorage, chatglmApiKey: 'glm-secret' } + setStorage(config) - const session = makeSession() + const session = makeSession({ + groupName: 'chatglmApiModelKeys', + itemName: 'chatglmTurbo', + }) const port = createFakePort() let capturedInit @@ -126,7 +140,7 @@ test('chatglm-api: reads chatglmApiKey from config', async (t) => { return createMockSseResponse(sseChunks) }) - await generateAnswersWithChatGLMApi(port, 'Q', session) + await generateAnswersWithOpenAICompatibleApi(port, 'Q', session, config) assert.equal(capturedInit.headers.Authorization, 'Bearer glm-secret') }) diff --git a/tests/unit/services/wrappers-register.test.mjs b/tests/unit/services/wrappers-register.test.mjs index c1786e783..66ab7171e 100644 --- a/tests/unit/services/wrappers-register.test.mjs +++ b/tests/unit/services/wrappers-register.test.mjs @@ -43,6 +43,7 @@ import { getBardCookies, getClaudeSessionKey, } from '../../../src/services/wrappers.mjs' +import { normalizeApiMode } from '../../../src/utils/model-name-convert.mjs' const setStorage = (values) => { globalThis.__TEST_BROWSER_SHIM__.replaceStorage(values) @@ -176,7 +177,7 @@ test('registerPortListener defaults apiMode from config for non-custom models', port.emitMessage({ session: { conversationRecords: [] } }) const session = await execDone - assert.deepEqual(session.apiMode, apiMode) + assert.deepEqual(session.apiMode, normalizeApiMode(apiMode)) }) test('registerPortListener sets aiName when not provided', async (t) => { diff --git a/tests/unit/utils/model-name-convert.test.mjs b/tests/unit/utils/model-name-convert.test.mjs index fb1fe4880..c604c1d09 100644 --- a/tests/unit/utils/model-name-convert.test.mjs +++ b/tests/unit/utils/model-name-convert.test.mjs @@ -4,6 +4,7 @@ import { apiModeToModelName, getApiModesFromConfig, getApiModesStringArrayFromConfig, + getUniquelySelectedApiModeIndex, isApiModeSelected, isInApiModeGroup, isUsingModelName, @@ -13,6 +14,7 @@ import { modelNameToPresetPart, modelNameToValue, getModelValue, + normalizeApiMode, } from '../../../src/utils/model-name-convert.mjs' import { ModelGroups } from '../../../src/config/index.mjs' @@ -97,6 +99,394 @@ test('getApiModesFromConfig merges active and custom API modes correctly', () => ) }) +test('getApiModesFromConfig keeps AlwaysCustomGroups modes when itemName is empty', () => { + const config = { + activeApiModes: ['customModel'], + customApiModes: [ + { + groupName: 'ollamaApiModelKeys', + itemName: '', + isCustom: true, + customName: 'llama3.2', + customUrl: '', + apiKey: '', + providerId: '', + active: true, + }, + ], + azureDeploymentName: 'deploy-a', + ollamaModelName: 'llama4', + } + + const onlyActive = getApiModesFromConfig(config, true) + + assert.equal( + onlyActive.some( + (mode) => mode.groupName === 'ollamaApiModelKeys' && mode.customName === 'llama3.2', + ), + true, + ) + assert.equal(apiModeToModelName(onlyActive[0]), 'ollamaApiModelKeys-llama3.2') +}) + +test('getApiModesFromConfig drops nameless Azure row instead of hiding the legacy active mode', () => { + const config = { + activeApiModes: ['azureOpenAi'], + customApiModes: [ + { + groupName: 'azureOpenAiApiModelKeys', + itemName: '', + isCustom: true, + customName: '', + customUrl: '', + apiKey: '', + providerId: 'blank-azure-provider', + active: true, + }, + ], + azureDeploymentName: '', + ollamaModelName: 'llama4', + } + + const allModes = getApiModesFromConfig(config, false) + const onlyActive = getApiModesFromConfig(config, true) + + assert.equal( + allModes.some((mode) => mode.providerId === 'blank-azure-provider'), + false, + ) + assert.equal( + allModes.some((mode) => mode.itemName === 'azureOpenAi'), + true, + ) + assert.equal( + onlyActive.some((mode) => mode.itemName === 'azureOpenAi'), + true, + ) +}) + +test('getApiModesFromConfig drops nameless Ollama row instead of hiding the legacy active mode', () => { + const config = { + activeApiModes: ['ollamaModel'], + customApiModes: [ + { + groupName: 'ollamaApiModelKeys', + itemName: '', + isCustom: true, + customName: '', + customUrl: '', + apiKey: '', + providerId: 'blank-ollama-provider', + active: true, + }, + ], + azureDeploymentName: 'deploy-a', + ollamaModelName: '', + } + + const allModes = getApiModesFromConfig(config, false) + const onlyActive = getApiModesFromConfig(config, true) + + assert.equal( + allModes.some((mode) => mode.providerId === 'blank-ollama-provider'), + false, + ) + assert.equal( + allModes.some((mode) => mode.itemName === 'ollamaModel'), + true, + ) + assert.equal( + onlyActive.some((mode) => mode.itemName === 'ollamaModel'), + true, + ) +}) + +test('getApiModesFromConfig deduplicates migrated Ollama legacy row against kept AlwaysCustomGroups mode', () => { + const config = { + activeApiModes: ['ollamaModel-llama3.2'], + customApiModes: [ + { + groupName: 'ollamaApiModelKeys', + itemName: '', + isCustom: true, + customName: 'llama3.2', + customUrl: '', + apiKey: '', + providerId: '', + active: true, + }, + ], + azureDeploymentName: 'deploy-a', + ollamaModelName: 'llama3.2', + } + + const allModes = getApiModesFromConfig(config, false) + assert.equal( + allModes.filter((mode) => apiModeToModelName(mode) === 'ollamaApiModelKeys-llama3.2').length, + 1, + ) +}) + +test('getApiModesFromConfig preserves active state when inactive Ollama custom row matches active legacy mode', () => { + const config = { + activeApiModes: ['ollamaModel-llama3.2'], + customApiModes: [ + { + groupName: 'ollamaApiModelKeys', + itemName: '', + isCustom: true, + customName: 'llama3.2', + customUrl: 'http://localhost:11434/api/chat', + apiKey: '', + providerId: 'preserved-ollama-provider', + sourceProviderId: 'ollama', + active: false, + }, + ], + azureDeploymentName: 'deploy-a', + ollamaModelName: 'llama3.2', + } + + const allModes = getApiModesFromConfig(config, false) + const onlyActive = getApiModesFromConfig(config, true) + const preservedMode = allModes.find( + (mode) => apiModeToModelName(mode) === 'ollamaApiModelKeys-llama3.2', + ) + + assert.equal( + allModes.filter((mode) => apiModeToModelName(mode) === 'ollamaApiModelKeys-llama3.2').length, + 1, + ) + assert.equal(preservedMode.active, true) + assert.equal(preservedMode.itemName, 'ollamaModel') + assert.equal(preservedMode.providerId, 'preserved-ollama-provider') + assert.equal(preservedMode.customUrl, 'http://localhost:11434/api/chat') + assert.equal(isApiModeSelected(preservedMode, { modelName: 'ollamaModel-llama3.2' }), true) + assert.equal( + onlyActive.some((mode) => apiModeToModelName(mode) === 'ollamaApiModelKeys-llama3.2'), + true, + ) +}) + +test('getApiModesFromConfig keeps legacy Ollama row when multiple inactive custom providers share the same mode name', () => { + const config = { + activeApiModes: ['ollamaModel-llama3.2'], + customApiModes: [ + { + groupName: 'ollamaApiModelKeys', + itemName: '', + isCustom: true, + customName: 'llama3.2', + customUrl: 'http://ollama-a:11434/api/chat', + apiKey: '', + providerId: 'ollama-provider-a', + sourceProviderId: 'ollama', + active: false, + }, + { + groupName: 'ollamaApiModelKeys', + itemName: '', + isCustom: true, + customName: 'llama3.2', + customUrl: 'http://ollama-b:11434/api/chat', + apiKey: '', + providerId: 'ollama-provider-b', + sourceProviderId: 'ollama', + active: false, + }, + ], + azureDeploymentName: 'deploy-a', + ollamaModelName: 'llama3.2', + } + + const allModes = getApiModesFromConfig(config, false) + const onlyActive = getApiModesFromConfig(config, true) + const legacyMode = allModes.find( + (mode) => + apiModeToModelName(mode) === 'ollamaApiModelKeys-llama3.2' && + mode.providerId === '' && + mode.itemName === 'ollamaModel', + ) + + assert.equal( + allModes.filter((mode) => apiModeToModelName(mode) === 'ollamaApiModelKeys-llama3.2').length, + 3, + ) + assert.equal(legacyMode.active, true) + assert.equal( + allModes.some((mode) => mode.providerId === 'ollama-provider-a' && mode.active), + false, + ) + assert.equal( + allModes.some((mode) => mode.providerId === 'ollama-provider-b' && mode.active), + false, + ) + assert.equal(onlyActive.length, 1) + assert.equal(onlyActive[0].providerId, '') + assert.equal(onlyActive[0].itemName, 'ollamaModel') +}) + +test('getApiModesFromConfig does not add a legacy Ollama row when one matching custom provider is already active', () => { + const config = { + activeApiModes: ['ollamaModel-llama3.2'], + customApiModes: [ + { + groupName: 'ollamaApiModelKeys', + itemName: '', + isCustom: true, + customName: 'llama3.2', + customUrl: 'http://ollama-a:11434/api/chat', + apiKey: '', + providerId: 'ollama-provider-a', + sourceProviderId: 'ollama', + active: true, + }, + { + groupName: 'ollamaApiModelKeys', + itemName: '', + isCustom: true, + customName: 'llama3.2', + customUrl: 'http://ollama-b:11434/api/chat', + apiKey: '', + providerId: 'ollama-provider-b', + sourceProviderId: 'ollama', + active: false, + }, + ], + azureDeploymentName: 'deploy-a', + ollamaModelName: 'llama3.2', + } + + const allModes = getApiModesFromConfig(config, false) + const onlyActive = getApiModesFromConfig(config, true) + const activeMode = allModes.find((mode) => mode.providerId === 'ollama-provider-a') + const inactiveMode = allModes.find((mode) => mode.providerId === 'ollama-provider-b') + + assert.equal( + allModes.filter((mode) => apiModeToModelName(mode) === 'ollamaApiModelKeys-llama3.2').length, + 2, + ) + assert.equal( + allModes.some((mode) => mode.providerId === '' && mode.itemName === 'ollamaModel'), + false, + ) + assert.equal( + allModes.some((mode) => mode.providerId === 'ollama-provider-a' && mode.active), + true, + ) + assert.equal( + allModes.some((mode) => mode.providerId === 'ollama-provider-b' && mode.active), + false, + ) + assert.equal(activeMode.itemName, 'ollamaModel') + assert.equal(inactiveMode.itemName, '') + assert.equal(isApiModeSelected(activeMode, { modelName: 'ollamaModel-llama3.2' }), true) + assert.equal(isApiModeSelected(inactiveMode, { modelName: 'ollamaModel-llama3.2' }), false) + assert.equal(onlyActive.length, 1) + assert.equal(onlyActive[0].providerId, 'ollama-provider-a') +}) + +test('getApiModesFromConfig deduplicates migrated Azure legacy row against kept AlwaysCustomGroups mode', () => { + const config = { + activeApiModes: ['azureOpenAi-deploy-a'], + customApiModes: [ + { + groupName: 'azureOpenAiApiModelKeys', + itemName: '', + isCustom: true, + customName: 'deploy-a', + customUrl: '', + apiKey: '', + providerId: '', + active: true, + }, + ], + azureDeploymentName: 'deploy-a', + ollamaModelName: 'llama4', + } + + const allModes = getApiModesFromConfig(config, false) + assert.equal( + allModes.filter((mode) => apiModeToModelName(mode) === 'azureOpenAiApiModelKeys-deploy-a') + .length, + 1, + ) +}) + +test('getApiModesFromConfig preserves active state when inactive Azure custom row matches active legacy mode', () => { + const config = { + activeApiModes: ['azureOpenAi-deploy-a'], + customApiModes: [ + { + groupName: 'azureOpenAiApiModelKeys', + itemName: '', + isCustom: true, + customName: 'deploy-a', + customUrl: 'https://azure.example.com/openai/deployments/deploy-a/chat/completions', + apiKey: '', + providerId: 'preserved-azure-provider', + sourceProviderId: 'openai', + active: false, + }, + ], + azureDeploymentName: 'deploy-a', + ollamaModelName: 'llama4', + } + + const allModes = getApiModesFromConfig(config, false) + const onlyActive = getApiModesFromConfig(config, true) + const preservedMode = allModes.find( + (mode) => apiModeToModelName(mode) === 'azureOpenAiApiModelKeys-deploy-a', + ) + + assert.equal( + allModes.filter((mode) => apiModeToModelName(mode) === 'azureOpenAiApiModelKeys-deploy-a') + .length, + 1, + ) + assert.equal(preservedMode.active, true) + assert.equal(preservedMode.itemName, 'azureOpenAi') + assert.equal(preservedMode.providerId, 'preserved-azure-provider') + assert.equal( + preservedMode.customUrl, + 'https://azure.example.com/openai/deployments/deploy-a/chat/completions', + ) + assert.equal(isApiModeSelected(preservedMode, { modelName: 'azureOpenAi-deploy-a' }), true) + assert.equal( + onlyActive.some((mode) => apiModeToModelName(mode) === 'azureOpenAiApiModelKeys-deploy-a'), + true, + ) +}) + +test('getApiModesFromConfig does not synthesize undefined legacy Azure or Ollama names', () => { + const config = { + activeApiModes: ['azureOpenAi', 'ollamaModel'], + customApiModes: [], + azureDeploymentName: '', + ollamaModelName: '', + } + + const allModes = getApiModesFromConfig(config, false) + const onlyActive = getApiModesFromConfig(config, true) + + assert.equal( + allModes.some((mode) => apiModeToModelName(mode).includes('undefined')), + false, + ) + assert.equal( + onlyActive.some((mode) => apiModeToModelName(mode).includes('undefined')), + false, + ) + assert.equal( + allModes.some((mode) => mode.itemName === 'azureOpenAi'), + true, + ) + assert.equal( + allModes.some((mode) => mode.itemName === 'ollamaModel'), + true, + ) +}) + test('isUsingModelName matches base model for custom model names', () => { assert.equal(isUsingModelName('bingFree4', { modelName: 'bingFree4-fast' }), true) assert.equal(isUsingModelName('claude2WebFree', { modelName: 'chatgptFree35' }), false) @@ -316,3 +706,371 @@ test('isUsingModelName returns true for exact apiMode match', () => { test('isUsingModelName resolves ModelGroups presetPart to first value', () => { assert.equal(isUsingModelName('bingFree4', { modelName: 'bingWebModelKeys-custom' }), true) }) + +test('normalizeApiMode trims providerId', () => { + const normalized = normalizeApiMode({ + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + customName: 'mode-a', + providerId: ' myproxy ', + }) + + assert.equal(normalized.providerId, 'myproxy') +}) + +test('isApiModeSelected matches apiMode when providerId differs only by whitespace', () => { + const apiMode = { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + customName: 'mode-a', + providerId: 'myproxy', + } + const session = { + apiMode: { + ...apiMode, + providerId: ' myproxy ', + }, + } + + assert.equal(isApiModeSelected(apiMode, session), true) +}) + +test('isApiModeSelected returns false when either side apiMode is invalid', () => { + const validApiMode = { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + customName: 'mode-a', + providerId: 'myproxy', + } + + assert.equal( + isApiModeSelected(validApiMode, { + apiMode: 'customApiModelKeys-customModel', + }), + false, + ) + assert.equal( + isApiModeSelected('customApiModelKeys-customModel', { + apiMode: validApiMode, + }), + false, + ) + assert.equal( + isApiModeSelected('customApiModelKeys-customModel', { + apiMode: 'customApiModelKeys-customModel', + }), + false, + ) +}) + +test('isApiModeSelected returns false when apiMode differs only by active state', () => { + const apiMode = { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + customName: 'mode-a', + providerId: 'myproxy', + active: false, + } + const session = { + apiMode: { + ...apiMode, + active: true, + }, + } + + assert.equal(isApiModeSelected(apiMode, session), false) +}) + +test('isApiModeSelected matches legacy session missing providerId when sessionCompat is enabled', () => { + const apiMode = { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + customName: 'mode-a', + providerId: 'myproxy', + active: true, + } + const session = { + apiMode: { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + customName: 'mode-a', + }, + } + + assert.equal(isApiModeSelected(apiMode, session), false) + assert.equal(isApiModeSelected(apiMode, session, { sessionCompat: true }), true) +}) + +test('isApiModeSelected ignores active state difference for sessionCompat fallback', () => { + const apiMode = { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + customName: 'mode-a', + providerId: 'myproxy', + active: false, + } + const session = { + apiMode: { + ...apiMode, + active: true, + }, + } + + assert.equal(isApiModeSelected(apiMode, session, { sessionCompat: true }), true) +}) + +test('isApiModeSelected keeps provider mismatch fail-closed for non-legacy sessionCompat fallback', () => { + const apiMode = { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + customName: 'mode-a', + providerId: 'provider-a', + active: true, + } + const session = { + apiMode: { + ...apiMode, + providerId: 'provider-b', + }, + } + + assert.equal(isApiModeSelected(apiMode, session, { sessionCompat: true }), false) +}) + +test('isApiModeSelected keeps modern custom session provider mismatch fail-closed with customUrl and apiKey', () => { + const apiMode = { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + customName: 'shared-name', + providerId: 'provider-a', + active: true, + } + const session = { + apiMode: { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + customName: 'shared-name', + providerId: 'provider-b', + customUrl: 'https://proxy.example.com/v1/chat/completions', + apiKey: 'modern-session-key', + active: true, + }, + } + + assert.equal(isApiModeSelected(apiMode, session, { sessionCompat: true }), false) +}) + +test('isApiModeSelected matches legacy custom session missing itemName and isCustom', () => { + const apiMode = { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + customName: 'proxy-model', + providerId: 'openai-2', + active: true, + } + const session = { + apiMode: { + groupName: 'customApiModelKeys', + providerId: 'openai', + customName: 'proxy-model', + customUrl: 'https://proxy.example.com/v1/chat/completions', + apiKey: 'stale-session-key', + }, + } + + assert.equal(isApiModeSelected(apiMode, session, { sessionCompat: true }), true) +}) + +test('isApiModeSelected falls back to modelName when sessionCompat apiMode compare misses', () => { + const apiMode = { + groupName: 'bingWebModelKeys', + itemName: 'bingFree4', + isCustom: false, + customName: '', + active: true, + } + const session = { + apiMode: { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + customName: 'different-mode', + providerId: 'provider-b', + }, + modelName: 'bingFree4', + } + + assert.equal(isApiModeSelected(apiMode, session, { sessionCompat: true }), false) +}) + +test('isApiModeSelected falls back to modelName when session apiMode is a non-object string', () => { + const apiMode = { + groupName: 'bingWebModelKeys', + itemName: 'bingFree4', + isCustom: false, + customName: '', + active: true, + } + const session = { + apiMode: 'bingFree4', + modelName: 'bingFree4', + } + + assert.equal(isApiModeSelected(apiMode, session), false) + assert.equal(isApiModeSelected(apiMode, session, { sessionCompat: true }), true) +}) + +test('isApiModeSelected does not double-match via legacy compat and modelName fallback', () => { + const legacyCustomMode = { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + customName: 'proxy-model', + providerId: 'openai-2', + active: true, + } + const bingMode = { + groupName: 'bingWebModelKeys', + itemName: 'bingFree4', + isCustom: false, + customName: '', + active: true, + } + const session = { + apiMode: { + groupName: 'customApiModelKeys', + customName: 'proxy-model', + customUrl: 'https://proxy.example.com/v1/chat/completions', + apiKey: 'stale-session-key', + }, + modelName: 'bingFree4', + } + + assert.equal(isApiModeSelected(legacyCustomMode, session, { sessionCompat: true }), true) + assert.equal(isApiModeSelected(bingMode, session, { sessionCompat: true }), false) +}) + +test('getUniquelySelectedApiModeIndex returns -1 when legacy session matches multiple custom modes', () => { + const apiModes = [ + { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + customName: 'proxy-model', + providerId: 'provider-a', + active: true, + }, + { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + customName: 'proxy-model', + providerId: 'provider-b', + active: true, + }, + ] + const session = { + apiMode: { + groupName: 'customApiModelKeys', + customName: 'proxy-model', + customUrl: 'https://proxy.example.com/v1/chat/completions', + apiKey: 'stale-session-key', + }, + } + + assert.equal(getUniquelySelectedApiModeIndex(apiModes, session, { sessionCompat: true }), -1) +}) + +test('getUniquelySelectedApiModeIndex returns matching index for a unique legacy session match', () => { + const apiModes = [ + { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + customName: 'proxy-model', + providerId: 'provider-a', + active: true, + }, + { + groupName: 'bingWebModelKeys', + itemName: 'bingFree4', + isCustom: false, + customName: '', + active: true, + }, + ] + const session = { + apiMode: { + groupName: 'customApiModelKeys', + customName: 'proxy-model', + customUrl: 'https://proxy.example.com/v1/chat/completions', + apiKey: 'stale-session-key', + }, + } + + assert.equal(getUniquelySelectedApiModeIndex(apiModes, session, { sessionCompat: true }), 0) +}) + +test('getUniquelySelectedApiModeIndex keeps modern custom session pinned to matching provider', () => { + const apiModes = [ + { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + customName: 'shared-name', + providerId: 'provider-a', + active: true, + }, + { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + customName: 'shared-name', + providerId: 'provider-b', + active: true, + }, + ] + const session = { + apiMode: { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + customName: 'shared-name', + providerId: 'provider-b', + customUrl: 'https://proxy.example.com/v1/chat/completions', + apiKey: 'modern-session-key', + active: true, + }, + } + + assert.equal(getUniquelySelectedApiModeIndex(apiModes, session, { sessionCompat: true }), 1) +}) + +test('isApiModeSelected returns true when apiMode active state is equal', () => { + const apiMode = { + groupName: 'customApiModelKeys', + itemName: 'customModel', + isCustom: true, + customName: 'mode-a', + providerId: 'myproxy', + active: true, + } + const session = { + apiMode: { + ...apiMode, + }, + } + + assert.equal(isApiModeSelected(apiMode, session), true) +}) From 2d049c357e7d0472815e398033323dc83f33edf5 Mon Sep 17 00:00:00 2001 From: Peter Dave Hello Date: Fri, 27 Feb 2026 01:37:14 +0800 Subject: [PATCH 2/2] Add custom provider editor to API modes Split provider management from API mode saving so users can add and edit custom OpenAI-compatible providers in a dedicated editor. Require a full chat-completions endpoint URL for provider setup and derive the paired completions endpoint with shared popup utilities backed by unit tests. Keep API mode provider binding explicit at save time and preserve provider-secret synchronization in General settings for backward compatibility. Add provider-related locale keys across supported locales. --- src/_locales/de/main.json | 15 +- src/_locales/en/main.json | 15 +- src/_locales/es/main.json | 15 +- src/_locales/fr/main.json | 15 +- src/_locales/in/main.json | 15 +- src/_locales/it/main.json | 15 +- src/_locales/ja/main.json | 15 +- src/_locales/ko/main.json | 15 +- src/_locales/pt/main.json | 15 +- src/_locales/ru/main.json | 15 +- src/_locales/tr/main.json | 15 +- src/_locales/zh-hans/main.json | 15 +- src/_locales/zh-hant/main.json | 15 +- src/components/ConversationCard/index.jsx | 41 +- src/popup/Popup.jsx | 148 +- src/popup/popup-config-utils.mjs | 31 + src/popup/sections/ApiModes.jsx | 632 +++++- src/popup/sections/GeneralPart.jsx | 620 +++++- .../sections/api-modes-provider-utils.mjs | 735 +++++++ src/popup/sections/general-balance-utils.mjs | 224 ++ .../general-provider-override-utils.mjs | 166 ++ src/popup/sections/provider-secret-utils.mjs | 465 ++++ .../popup/api-modes-provider-utils.test.mjs | 1909 +++++++++++++++++ tests/unit/popup/api-modes.test.mjs | 49 + .../unit/popup/general-balance-utils.test.mjs | 343 +++ .../unit/popup/general-part-override.test.mjs | 425 ++++ tests/unit/popup/popup-config-update.test.mjs | 189 ++ .../unit/popup/provider-secret-utils.test.mjs | 930 ++++++++ .../services/apis/provider-registry.test.mjs | 60 + 29 files changed, 6919 insertions(+), 243 deletions(-) create mode 100644 src/popup/popup-config-utils.mjs create mode 100644 src/popup/sections/api-modes-provider-utils.mjs create mode 100644 src/popup/sections/general-balance-utils.mjs create mode 100644 src/popup/sections/general-provider-override-utils.mjs create mode 100644 src/popup/sections/provider-secret-utils.mjs create mode 100644 tests/unit/popup/api-modes-provider-utils.test.mjs create mode 100644 tests/unit/popup/api-modes.test.mjs create mode 100644 tests/unit/popup/general-balance-utils.test.mjs create mode 100644 tests/unit/popup/general-part-override.test.mjs create mode 100644 tests/unit/popup/popup-config-update.test.mjs create mode 100644 tests/unit/popup/provider-secret-utils.test.mjs diff --git a/src/_locales/de/main.json b/src/_locales/de/main.json index b3845deb5..7f1a3eb13 100644 --- a/src/_locales/de/main.json +++ b/src/_locales/de/main.json @@ -79,6 +79,7 @@ "ChatGPT (GPT-4-32k)": "ChatGPT (GPT-4-32k)", "GPT-3.5": "GPT-3.5", "Custom Model": "Benutzerdefiniertes Modell", + "Custom Provider": "Benutzerdefinierter Anbieter", "Balanced": "Ausgeglichen", "Creative": "Kreativ", "Precise": "Präzise", @@ -96,6 +97,7 @@ "Pin": "Anheften", "Unpin": "Loslösen", "Delete Conversation": "Konversation löschen", + "Delete": "Löschen", "Clear conversations": "Konversationen löschen", "Settings": "Einstellungen", "Feature Pages": "Funktionsseiten", @@ -115,6 +117,7 @@ "Modules": "Module", "API Params": "API-Parameter", "API Url": "API-URL", + "Provider": "Anbieter", "Others": "Andere", "API Modes": "API-Modi", "Disable web mode history for better privacy protection, but it will result in unavailable conversations after a period of time": "Deaktivieren Sie die Verlaufsfunktion im Webmodus für besseren Datenschutz. Beachten Sie jedoch, dass die Gespräche nach einer gewissen Zeit nicht mehr verfügbar sind", @@ -138,6 +141,7 @@ "Anthropic API Key": "Anthropic-API-Schlüssel", "Cancel": "Abbrechen", "Name is required": "Name ist erforderlich", + "Please enter a full Chat Completions URL": "Bitte geben Sie eine vollständige Chat Completions URL ein", "Prompt template should include {{selection}}": "Die Vorlage sollte {{selection}} enthalten", "Save": "Speichern", "Name": "Name", @@ -145,6 +149,11 @@ "Prompt Template": "Vorlagen-Template", "Explain this: {{selection}}": "Erkläre das: {{selection}}", "New": "Neu", + "Edit": "Bearbeiten", + "This provider is still used by other API modes": "Dieser Anbieter wird noch von anderen API-Modi verwendet", + "Loading saved conversations…": "Gespeicherte Unterhaltungen werden geladen…", + "Select a provider": "Anbieter auswählen", + "Please select a provider": "Bitte einen Anbieter auswählen", "Always display floating window, disable sidebar for all site adapters": "Immer das schwebende Fenster anzeigen, die Seitenleiste für alle Website-Adapter deaktivieren", "Allow ESC to close all floating windows": "ESC-Taste zum Schließen aller schwebenden Fenster zulassen", "Export All Data": "Alle Daten exportieren", @@ -199,5 +208,9 @@ "OpenAI (GPT-5.4)": "OpenAI (GPT-5.4)", "OpenAI (GPT-5.4 mini)": "OpenAI (GPT-5.4 mini)", "OpenAI (GPT-5.4 nano)": "OpenAI (GPT-5.4 nano)", - "Anthropic (Claude Sonnet 4.6)": "Anthropic (Claude Sonnet 4.6)" + "Anthropic (Claude Sonnet 4.6)": "Anthropic (Claude Sonnet 4.6)", + "This provider is still used by other API modes or saved conversations": "Dieser Anbieter wird noch von anderen API-Modi oder gespeicherten Unterhaltungen verwendet", + "This API key is set on the selected custom mode. Editing it here will create a dedicated provider for that mode.": "Dieser API-Schlüssel ist für den ausgewählten benutzerdefinierten Modus festgelegt. Wenn Sie ihn hier bearbeiten, wird ein dedizierter Provider für diesen Modus erstellt.", + "Use shared key": "Gemeinsamen Schlüssel verwenden", + "This provider endpoint is still needed by saved conversations": "Dieser Anbieter-Endpunkt wird noch von gespeicherten Unterhaltungen benötigt" } diff --git a/src/_locales/en/main.json b/src/_locales/en/main.json index a6f0d29e2..c79c7428c 100644 --- a/src/_locales/en/main.json +++ b/src/_locales/en/main.json @@ -79,6 +79,7 @@ "ChatGPT (GPT-4-32k)": "ChatGPT (GPT-4-32k)", "GPT-3.5": "GPT-3.5", "Custom Model": "Custom Model", + "Custom Provider": "Custom Provider", "Balanced": "Balanced", "Creative": "Creative", "Precise": "Precise", @@ -96,6 +97,7 @@ "Pin": "Pin", "Unpin": "Unpin", "Delete Conversation": "Delete Conversation", + "Delete": "Delete", "Clear conversations": "Clear conversations", "Settings": "Settings", "Feature Pages": "Feature Pages", @@ -115,6 +117,7 @@ "Modules": "Modules", "API Params": "API Params", "API Url": "API Url", + "Provider": "Provider", "Others": "Others", "API Modes": "API Modes", "Disable web mode history for better privacy protection, but it will result in unavailable conversations after a period of time": "Disable web mode history for better privacy protection, but it will result in unavailable conversations after a period of time", @@ -138,6 +141,7 @@ "Anthropic API Key": "Anthropic API Key", "Cancel": "Cancel", "Name is required": "Name is required", + "Please enter a full Chat Completions URL": "Please enter a full Chat Completions URL", "Prompt template should include {{selection}}": "Prompt template should include {{selection}}", "Save": "Save", "Name": "Name", @@ -145,6 +149,11 @@ "Prompt Template": "Prompt Template", "Explain this: {{selection}}": "Explain this: {{selection}}", "New": "New", + "Edit": "Edit", + "This provider is still used by other API modes": "This provider is still used by other API modes", + "Loading saved conversations…": "Loading saved conversations…", + "Select a provider": "Select a provider", + "Please select a provider": "Please select a provider", "Always display floating window, disable sidebar for all site adapters": "Always display floating window, disable sidebar for all site adapters", "Allow ESC to close all floating windows": "Allow ESC to close all floating windows", "Export All Data": "Export All Data", @@ -200,5 +209,9 @@ "OpenAI (GPT-5.4)": "OpenAI (GPT-5.4)", "OpenAI (GPT-5.4 mini)": "OpenAI (GPT-5.4 mini)", "OpenAI (GPT-5.4 nano)": "OpenAI (GPT-5.4 nano)", - "Anthropic (Claude Sonnet 4.6)": "Anthropic (Claude Sonnet 4.6)" + "Anthropic (Claude Sonnet 4.6)": "Anthropic (Claude Sonnet 4.6)", + "This provider is still used by other API modes or saved conversations": "This provider is still used by other API modes or saved conversations", + "This API key is set on the selected custom mode. Editing it here will create a dedicated provider for that mode.": "This API key is set on the selected custom mode. Editing it here will create a dedicated provider for that mode.", + "Use shared key": "Use shared key", + "This provider endpoint is still needed by saved conversations": "This provider endpoint is still needed by saved conversations" } diff --git a/src/_locales/es/main.json b/src/_locales/es/main.json index 7f916a9ca..c236e94c9 100644 --- a/src/_locales/es/main.json +++ b/src/_locales/es/main.json @@ -79,6 +79,7 @@ "ChatGPT (GPT-4-32k)": "ChatGPT (GPT-4-32k)", "GPT-3.5": "GPT-3.5", "Custom Model": "Modelo personalizado", + "Custom Provider": "Proveedor personalizado", "Balanced": "Equilibrado", "Creative": "Creativo", "Precise": "Preciso", @@ -96,6 +97,7 @@ "Pin": "Fijar", "Unpin": "Desfijar", "Delete Conversation": "Eliminar conversación", + "Delete": "Eliminar", "Clear conversations": "Borrar todas las conversaciones", "Settings": "Configuración", "Feature Pages": "Páginas de características", @@ -115,6 +117,7 @@ "Modules": "Módulos", "API Params": "Parámetros de la API", "API Url": "URL de la API", + "Provider": "Proveedor", "Others": "Otros", "API Modes": "Modos de la API", "Disable web mode history for better privacy protection, but it will result in unavailable conversations after a period of time": "Desactivar el historial del modo web para una mejor protección de la privacidad, pero esto resultará en conversaciones no disponibles después de un período de tiempo.", @@ -138,6 +141,7 @@ "Anthropic API Key": "Clave API de Anthropic", "Cancel": "Cancelar", "Name is required": "Se requiere un nombre", + "Please enter a full Chat Completions URL": "Introduzca una URL completa de Chat Completions", "Prompt template should include {{selection}}": "La plantilla de sugerencias debe incluir {{selection}}", "Save": "Guardar", "Name": "Nombre", @@ -145,6 +149,11 @@ "Prompt Template": "Plantilla de sugerencias", "Explain this: {{selection}}": "Explicar esto: {{selection}}", "New": "Nuevo", + "Edit": "Editar", + "This provider is still used by other API modes": "Este proveedor aún está siendo utilizado por otros modos de API", + "Loading saved conversations…": "Cargando conversaciones guardadas…", + "Select a provider": "Selecciona un proveedor", + "Please select a provider": "Selecciona un proveedor", "Always display floating window, disable sidebar for all site adapters": "Mostrar siempre la ventana flotante, desactivar la barra lateral para todos los adaptadores de sitios", "Allow ESC to close all floating windows": "Permitir que ESC cierre todas las ventanas flotantes", "Export All Data": "Exportar todos los datos", @@ -199,5 +208,9 @@ "OpenAI (GPT-5.4)": "OpenAI (GPT-5.4)", "OpenAI (GPT-5.4 mini)": "OpenAI (GPT-5.4 mini)", "OpenAI (GPT-5.4 nano)": "OpenAI (GPT-5.4 nano)", - "Anthropic (Claude Sonnet 4.6)": "Anthropic (Claude Sonnet 4.6)" + "Anthropic (Claude Sonnet 4.6)": "Anthropic (Claude Sonnet 4.6)", + "This provider is still used by other API modes or saved conversations": "Este proveedor todavía está siendo utilizado por otros modos de API o conversaciones guardadas", + "This API key is set on the selected custom mode. Editing it here will create a dedicated provider for that mode.": "Esta clave de API está configurada en el modo personalizado seleccionado. Si la editas aquí, se creará un proveedor dedicado para ese modo.", + "Use shared key": "Usar clave compartida", + "This provider endpoint is still needed by saved conversations": "Las conversaciones guardadas todavía necesitan este endpoint del proveedor" } diff --git a/src/_locales/fr/main.json b/src/_locales/fr/main.json index c93419436..526ee5082 100644 --- a/src/_locales/fr/main.json +++ b/src/_locales/fr/main.json @@ -79,6 +79,7 @@ "ChatGPT (GPT-4-32k)": "ChatGPT (GPT-4-32k)", "GPT-3.5": "GPT-3.5", "Custom Model": "Modèle personnalisé", + "Custom Provider": "Fournisseur personnalisé", "Balanced": "Équilibré", "Creative": "Créatif", "Precise": "Précis", @@ -96,6 +97,7 @@ "Pin": "Épingler", "Unpin": "Détacher", "Delete Conversation": "Supprimer la conversation", + "Delete": "Supprimer", "Clear conversations": "Effacer les conversations", "Settings": "Paramètres", "Feature Pages": "Pages de fonctionnalités", @@ -115,6 +117,7 @@ "Modules": "Modules", "API Params": "Paramètres de l'API", "API Url": "URL de l'API", + "Provider": "Fournisseur", "Others": "Autres", "API Modes": "Modes de l'API", "Disable web mode history for better privacy protection, but it will result in unavailable conversations after a period of time": "Désactivez l'historique du mode web pour une meilleure protection de la vie privée, mais cela entraînera des conversations non disponibles après un certain temps", @@ -138,6 +141,7 @@ "Anthropic API Key": "Clé API Anthropic", "Cancel": "Annuler", "Name is required": "Le nom est requis", + "Please enter a full Chat Completions URL": "Veuillez saisir une URL complète de Chat Completions", "Prompt template should include {{selection}}": "Le modèle de suggestion doit inclure {{selection}}", "Save": "Enregistrer", "Name": "Nom", @@ -145,6 +149,11 @@ "Prompt Template": "Modèle de suggestion", "Explain this: {{selection}}": "Expliquer ceci : {{selection}}", "New": "Nouveau", + "Edit": "Modifier", + "This provider is still used by other API modes": "Ce fournisseur est encore utilisé par d’autres modes API", + "Loading saved conversations…": "Chargement des conversations enregistrées…", + "Select a provider": "Sélectionnez un fournisseur", + "Please select a provider": "Veuillez sélectionner un fournisseur", "Always display floating window, disable sidebar for all site adapters": "Toujours afficher la fenêtre flottante, désactiver la barre latérale pour tous les adaptateurs de site", "Allow ESC to close all floating windows": "Autoriser la touche ESC pour fermer toutes les fenêtres flottantes", "Export All Data": "Exporter toutes les données", @@ -199,5 +208,9 @@ "OpenAI (GPT-5.4)": "OpenAI (GPT-5.4)", "OpenAI (GPT-5.4 mini)": "OpenAI (GPT-5.4 mini)", "OpenAI (GPT-5.4 nano)": "OpenAI (GPT-5.4 nano)", - "Anthropic (Claude Sonnet 4.6)": "Anthropic (Claude Sonnet 4.6)" + "Anthropic (Claude Sonnet 4.6)": "Anthropic (Claude Sonnet 4.6)", + "This provider is still used by other API modes or saved conversations": "Ce fournisseur est encore utilise par d'autres modes d'API ou des conversations enregistrees", + "This API key is set on the selected custom mode. Editing it here will create a dedicated provider for that mode.": "Cette clé API est définie sur le mode personnalisé sélectionné. La modifier ici créera un fournisseur dédié pour ce mode.", + "Use shared key": "Utiliser la clé partagée", + "This provider endpoint is still needed by saved conversations": "Ce point de terminaison du fournisseur est encore nécessaire pour les conversations enregistrées" } diff --git a/src/_locales/in/main.json b/src/_locales/in/main.json index 1fa5b7d99..bfec9e9e4 100644 --- a/src/_locales/in/main.json +++ b/src/_locales/in/main.json @@ -79,6 +79,7 @@ "ChatGPT (GPT-4-32k)": "ChatGPT (GPT-4-32k)", "GPT-3.5": "GPT-3.5", "Custom Model": "Model Kustom", + "Custom Provider": "Penyedia Kustom", "Balanced": "Seimbang", "Creative": "Kreatif", "Precise": "Tepat", @@ -96,6 +97,7 @@ "Pin": "Sematkan", "Unpin": "Lepas Sematan", "Delete Conversation": "Hapus Percakapan", + "Delete": "Hapus", "Clear conversations": "Hapus Percakapan", "Settings": "Pengaturan", "Feature Pages": "Halaman Fitur", @@ -115,6 +117,7 @@ "Modules": "Modul", "API Params": "Parameter API", "API Url": "URL API", + "Provider": "Penyedia", "Others": "Lainnya", "API Modes": "Mode API", "Disable web mode history for better privacy protection, but it will result in unavailable conversations after a period of time": "Nonaktifkan riwayat mode web untuk perlindungan privasi yang lebih baik, tetapi ini akan menyebabkan percakapan tidak tersedia setelah jangka waktu tertentu", @@ -138,6 +141,7 @@ "Anthropic API Key": "Kunci API Anthropic", "Cancel": "Batal", "Name is required": "Nama diperlukan", + "Please enter a full Chat Completions URL": "Masukkan URL Chat Completions lengkap", "Prompt template should include {{selection}}": "Template prompt harus mencakup {{selection}}", "Save": "Simpan", "Name": "Nama", @@ -145,6 +149,11 @@ "Prompt Template": "Template Prompt", "Explain this: {{selection}}": "Jelaskan ini: {{selection}}", "New": "Baru", + "Edit": "Edit", + "This provider is still used by other API modes": "Penyedia ini masih digunakan oleh mode API lain", + "Loading saved conversations…": "Memuat percakapan tersimpan…", + "Select a provider": "Pilih penyedia", + "Please select a provider": "Silakan pilih penyedia", "Always display floating window, disable sidebar for all site adapters": "Selalu tampilkan jendela mengambang, nonaktifkan sidebar untuk semua adapter situs", "Allow ESC to close all floating windows": "Izinkan ESC untuk menutup semua jendela mengambang", "Export All Data": "Ekspor Semua Data", @@ -199,5 +208,9 @@ "OpenAI (GPT-5.4)": "OpenAI (GPT-5.4)", "OpenAI (GPT-5.4 mini)": "OpenAI (GPT-5.4 mini)", "OpenAI (GPT-5.4 nano)": "OpenAI (GPT-5.4 nano)", - "Anthropic (Claude Sonnet 4.6)": "Anthropic (Claude Sonnet 4.6)" + "Anthropic (Claude Sonnet 4.6)": "Anthropic (Claude Sonnet 4.6)", + "This provider is still used by other API modes or saved conversations": "Penyedia ini masih digunakan oleh mode API lain atau percakapan yang tersimpan", + "This API key is set on the selected custom mode. Editing it here will create a dedicated provider for that mode.": "Kunci API ini ditetapkan pada mode kustom yang dipilih. Mengeditnya di sini akan membuat provider khusus untuk mode tersebut.", + "Use shared key": "Gunakan kunci bersama", + "This provider endpoint is still needed by saved conversations": "Endpoint penyedia ini masih diperlukan oleh percakapan yang tersimpan" } diff --git a/src/_locales/it/main.json b/src/_locales/it/main.json index 400fa5461..3a6e0378d 100644 --- a/src/_locales/it/main.json +++ b/src/_locales/it/main.json @@ -79,6 +79,7 @@ "ChatGPT (GPT-4-32k)": "ChatGPT (GPT-4-32k)", "GPT-3.5": "GPT-3.5", "Custom Model": "Modello personalizzato", + "Custom Provider": "Provider personalizzato", "Balanced": "Bilanciato", "Creative": "Creativo", "Precise": "Preciso", @@ -96,6 +97,7 @@ "Pin": "Fissa", "Unpin": "Sblocca", "Delete Conversation": "Elimina la conversazione", + "Delete": "Elimina", "Clear conversations": "Pulisci le conversazioni", "Settings": "Impostazioni", "Feature Pages": "Pagine delle funzionalità", @@ -115,6 +117,7 @@ "Modules": "Moduli", "API Params": "Parametri API", "API Url": "URL API", + "Provider": "Provider", "Others": "Altri", "API Modes": "Modalità API", "Disable web mode history for better privacy protection, but it will result in unavailable conversations after a period of time": "Disabilita la cronologia della modalità web per una migliore protezione della privacy, ma ciò comporterà conversazioni non disponibili dopo un certo periodo di tempo", @@ -138,6 +141,7 @@ "Anthropic API Key": "Chiave API Anthropic", "Cancel": "Annulla", "Name is required": "Il nome è obbligatorio", + "Please enter a full Chat Completions URL": "Inserisci un URL completo di Chat Completions", "Prompt template should include {{selection}}": "Il modello di prompt dovrebbe includere {{selection}}", "Save": "Salva", "Name": "Nome", @@ -145,6 +149,11 @@ "Prompt Template": "Modello di prompt", "Explain this: {{selection}}": "Spiega questo: {{selection}}", "New": "Nuovo", + "Edit": "Modifica", + "This provider is still used by other API modes": "Questo provider è ancora utilizzato da altre modalità API", + "Loading saved conversations…": "Caricamento delle conversazioni salvate…", + "Select a provider": "Seleziona un provider", + "Please select a provider": "Seleziona un provider", "Always display floating window, disable sidebar for all site adapters": "Mostra sempre la finestra flottante, disabilita la barra laterale per tutti gli adattatori del sito", "Allow ESC to close all floating windows": "Consenti ESC per chiudere tutte le finestre flottanti", "Export All Data": "Esporta tutti i dati", @@ -199,5 +208,9 @@ "OpenAI (GPT-5.4)": "OpenAI (GPT-5.4)", "OpenAI (GPT-5.4 mini)": "OpenAI (GPT-5.4 mini)", "OpenAI (GPT-5.4 nano)": "OpenAI (GPT-5.4 nano)", - "Anthropic (Claude Sonnet 4.6)": "Anthropic (Claude Sonnet 4.6)" + "Anthropic (Claude Sonnet 4.6)": "Anthropic (Claude Sonnet 4.6)", + "This provider is still used by other API modes or saved conversations": "Questo provider è ancora utilizzato da altre modalità API o conversazioni salvate", + "This API key is set on the selected custom mode. Editing it here will create a dedicated provider for that mode.": "Questa chiave API è impostata nella modalità personalizzata selezionata. Modificandola qui verrà creato un provider dedicato per quella modalità.", + "Use shared key": "Usa la chiave condivisa", + "This provider endpoint is still needed by saved conversations": "L'endpoint di questo provider è ancora necessario per le conversazioni salvate" } diff --git a/src/_locales/ja/main.json b/src/_locales/ja/main.json index ac19edc8e..a30a6ca76 100644 --- a/src/_locales/ja/main.json +++ b/src/_locales/ja/main.json @@ -79,6 +79,7 @@ "ChatGPT (GPT-4-32k)": "ChatGPT (GPT-4-32k)", "GPT-3.5": "GPT-3.5", "Custom Model": "カスタムモデル", + "Custom Provider": "カスタムプロバイダー", "Balanced": "バランスの取れた", "Creative": "創造的な", "Precise": "正確な", @@ -96,6 +97,7 @@ "Pin": "ピン留め", "Unpin": "ピン留め解除", "Delete Conversation": "会話を削除", + "Delete": "削除", "Clear conversations": "会話をクリア", "Settings": "設定", "Feature Pages": "機能ページ", @@ -115,6 +117,7 @@ "Modules": "モジュール", "API Params": "APIパラメータ", "API Url": "API URL", + "Provider": "プロバイダー", "Others": "その他", "API Modes": "APIモード", "Disable web mode history for better privacy protection, but it will result in unavailable conversations after a period of time": "プライバシー保護の向上のためにWebモードの履歴を無効にしますが、一定期間後に会話が利用できなくなります", @@ -138,6 +141,7 @@ "Anthropic API Key": "Anthropic API キー", "Cancel": "キャンセル", "Name is required": "名前は必須です", + "Please enter a full Chat Completions URL": "完全な Chat Completions URL を入力してください", "Prompt template should include {{selection}}": "プロンプトテンプレートには {{selection}} を含める必要があります", "Save": "保存", "Name": "名前", @@ -145,6 +149,11 @@ "Prompt Template": "プロンプトテンプレート", "Explain this: {{selection}}": "これを説明する: {{selection}}", "New": "新規", + "Edit": "編集", + "This provider is still used by other API modes": "このプロバイダーは他の API モードでまだ使用されています", + "Loading saved conversations…": "保存済みの会話を読み込み中…", + "Select a provider": "プロバイダーを選択", + "Please select a provider": "プロバイダーを選択してください", "Always display floating window, disable sidebar for all site adapters": "常にフローティングウィンドウを表示し、すべてのサイトアダプターでサイドバーを無効にします", "Allow ESC to close all floating windows": "ESCキーですべてのフローティングウィンドウを閉じる", "Export All Data": "すべてのデータをエクスポート", @@ -199,5 +208,9 @@ "OpenAI (GPT-5.4)": "OpenAI (GPT-5.4)", "OpenAI (GPT-5.4 mini)": "OpenAI (GPT-5.4 mini)", "OpenAI (GPT-5.4 nano)": "OpenAI (GPT-5.4 nano)", - "Anthropic (Claude Sonnet 4.6)": "Anthropic (Claude Sonnet 4.6)" + "Anthropic (Claude Sonnet 4.6)": "Anthropic (Claude Sonnet 4.6)", + "This provider is still used by other API modes or saved conversations": "このプロバイダーは他の API モードまたは保存済みの会話でまだ使用されています", + "This API key is set on the selected custom mode. Editing it here will create a dedicated provider for that mode.": "この API キーは選択中のカスタムモードに設定されています。ここで編集すると、そのモード専用のプロバイダーが作成されます。", + "Use shared key": "共有キーを使用", + "This provider endpoint is still needed by saved conversations": "このプロバイダーのエンドポイントは保存済みの会話でまだ必要です" } diff --git a/src/_locales/ko/main.json b/src/_locales/ko/main.json index 4348a7c16..08acb9009 100644 --- a/src/_locales/ko/main.json +++ b/src/_locales/ko/main.json @@ -79,6 +79,7 @@ "ChatGPT (GPT-4-32k)": "ChatGPT (GPT-4-32k)", "GPT-3.5": "GPT-3.5", "Custom Model": "사용자 정의 모델", + "Custom Provider": "사용자 정의 공급자", "Balanced": "균형 잡힌", "Creative": "창의적인", "Precise": "정확한", @@ -96,6 +97,7 @@ "Pin": "고정", "Unpin": "고정 해제", "Delete Conversation": "대화 삭제", + "Delete": "삭제", "Clear conversations": "대화 기록 지우기", "Settings": "설정", "Feature Pages": "기능 페이지", @@ -115,6 +117,7 @@ "Modules": "모듈", "API Params": "API 매개변수", "API Url": "API 주소", + "Provider": "공급자", "Others": "기타", "API Modes": "API 모드", "Disable web mode history for better privacy protection, but it will result in unavailable conversations after a period of time": "개인 정보 보호를 위해 웹 모드 기록을 비활성화하지만 일정 시간 이후에 대화를 사용할 수 없게 됩니다.", @@ -138,6 +141,7 @@ "Anthropic API Key": "Anthropic API 키", "Cancel": "취소", "Name is required": "이름은 필수입니다", + "Please enter a full Chat Completions URL": "전체 Chat Completions URL을 입력하세요", "Prompt template should include {{selection}}": "프롬프트 템플릿에는 {{selection}} 이 포함되어야 합니다", "Save": "저장", "Name": "이름", @@ -145,6 +149,11 @@ "Prompt Template": "프롬프트 템플릿", "Explain this: {{selection}}": "이것을 설명하세요: {{selection}}", "New": "새로 만들기", + "Edit": "편집", + "This provider is still used by other API modes": "이 공급자는 아직 다른 API 모드에서 사용 중입니다", + "Loading saved conversations…": "저장된 대화를 불러오는 중…", + "Select a provider": "공급업체 선택", + "Please select a provider": "공급업체를 선택하세요", "Always display floating window, disable sidebar for all site adapters": "항상 떠다니는 창을 표시하고 모든 사이트 어댑터의 사이드바를 비활성화합니다", "Allow ESC to close all floating windows": "ESC를 눌러 모든 떠다니는 창을 닫도록 허용", "Export All Data": "모든 데이터 내보내기", @@ -199,5 +208,9 @@ "OpenAI (GPT-5.4)": "OpenAI (GPT-5.4)", "OpenAI (GPT-5.4 mini)": "OpenAI (GPT-5.4 mini)", "OpenAI (GPT-5.4 nano)": "OpenAI (GPT-5.4 nano)", - "Anthropic (Claude Sonnet 4.6)": "Anthropic (Claude Sonnet 4.6)" + "Anthropic (Claude Sonnet 4.6)": "Anthropic (Claude Sonnet 4.6)", + "This provider is still used by other API modes or saved conversations": "이 공급자는 아직 다른 API 모드 또는 저장된 대화에서 사용 중입니다", + "This API key is set on the selected custom mode. Editing it here will create a dedicated provider for that mode.": "이 API 키는 선택한 사용자 지정 모드에 설정되어 있습니다. 여기서 편집하면 해당 모드 전용 provider가 생성됩니다.", + "Use shared key": "공유 키 사용", + "This provider endpoint is still needed by saved conversations": "저장된 대화에 이 공급자 엔드포인트가 아직 필요합니다" } diff --git a/src/_locales/pt/main.json b/src/_locales/pt/main.json index d3a5bc560..dd4088388 100644 --- a/src/_locales/pt/main.json +++ b/src/_locales/pt/main.json @@ -79,6 +79,7 @@ "ChatGPT (GPT-4-32k)": "ChatGPT (GPT-4-32k)", "GPT-3.5": "GPT-3.5", "Custom Model": "Modelo Personalizado", + "Custom Provider": "Provedor Personalizado", "Balanced": "Equilibrado", "Creative": "Criativo", "Precise": "Preciso", @@ -96,6 +97,7 @@ "Pin": "Fixar", "Unpin": "Desafixar", "Delete Conversation": "Excluir Conversa", + "Delete": "Excluir", "Clear conversations": "Limpar conversas", "Settings": "Configurações", "Feature Pages": "Páginas de Recursos", @@ -115,6 +117,7 @@ "Modules": "Módulos", "API Params": "Parâmetros da API", "API Url": "URL da API", + "Provider": "Provedor", "Others": "Outros", "API Modes": "Modos da API", "Disable web mode history for better privacy protection, but it will result in unavailable conversations after a period of time": "Desative o histórico do modo web para uma melhor proteção de privacidade, mas isso resultará em conversas indisponíveis após um certo tempo.", @@ -138,6 +141,7 @@ "Anthropic API Key": "Chave API Anthropic", "Cancel": "Cancelar", "Name is required": "Nome é obrigatório", + "Please enter a full Chat Completions URL": "Insira uma URL completa de Chat Completions", "Prompt template should include {{selection}}": "O modelo de prompt deve incluir {{selection}}", "Save": "Salvar", "Name": "Nome", @@ -145,6 +149,11 @@ "Prompt Template": "Modelo de Prompt", "Explain this: {{selection}}": "Explique isso: {{selection}}", "New": "Novo", + "Edit": "Editar", + "This provider is still used by other API modes": "Este provedor ainda está sendo usado por outros modos de API", + "Loading saved conversations…": "Carregando conversas salvas…", + "Select a provider": "Selecione um provedor", + "Please select a provider": "Selecione um provedor", "Always display floating window, disable sidebar for all site adapters": "Sempre exibir janela flutuante, desativar barra lateral para todos os adaptadores de site", "Allow ESC to close all floating windows": "Permitir ESC para fechar todas as janelas flutuantes", "Export All Data": "Exportar Todos os Dados", @@ -199,5 +208,9 @@ "OpenAI (GPT-5.4)": "OpenAI (GPT-5.4)", "OpenAI (GPT-5.4 mini)": "OpenAI (GPT-5.4 mini)", "OpenAI (GPT-5.4 nano)": "OpenAI (GPT-5.4 nano)", - "Anthropic (Claude Sonnet 4.6)": "Anthropic (Claude Sonnet 4.6)" + "Anthropic (Claude Sonnet 4.6)": "Anthropic (Claude Sonnet 4.6)", + "This provider is still used by other API modes or saved conversations": "Este provedor ainda está sendo usado por outros modos de API ou conversas salvas", + "This API key is set on the selected custom mode. Editing it here will create a dedicated provider for that mode.": "Esta chave de API está definida no modo personalizado selecionado. Editá-la aqui vai criar um fornecedor dedicado para esse modo.", + "Use shared key": "Usar chave partilhada", + "This provider endpoint is still needed by saved conversations": "As conversas salvas ainda precisam deste endpoint do provedor" } diff --git a/src/_locales/ru/main.json b/src/_locales/ru/main.json index b6852bbdf..91b043049 100644 --- a/src/_locales/ru/main.json +++ b/src/_locales/ru/main.json @@ -79,6 +79,7 @@ "ChatGPT (GPT-4-32k)": "ChatGPT (GPT-4-32к)", "GPT-3.5": "GPT-3.5", "Custom Model": "Пользовательская модель", + "Custom Provider": "Пользовательский провайдер", "Balanced": "Сбалансированный", "Creative": "Креативный", "Precise": "Точный", @@ -96,6 +97,7 @@ "Pin": "Закрепить", "Unpin": "Открепить", "Delete Conversation": "Удалить беседу", + "Delete": "Удалить", "Clear conversations": "Очистить историю бесед", "Settings": "Настройки", "Feature Pages": "Страницы функций", @@ -115,6 +117,7 @@ "Modules": "Модули", "API Params": "Параметры API", "API Url": "URL API", + "Provider": "Провайдер", "Others": "Другие", "API Modes": "Режимы API", "Disable web mode history for better privacy protection, but it will result in unavailable conversations after a period of time": "Отключить историю веб-режима для лучшей защиты конфиденциальности, но это приведет к недоступности разговоров после определенного времени", @@ -138,6 +141,7 @@ "Anthropic API Key": "Ключ API Anthropic", "Cancel": "Отмена", "Name is required": "Имя обязательно", + "Please enter a full Chat Completions URL": "Введите полный URL Chat Completions", "Prompt template should include {{selection}}": "Шаблон запроса должен включать {{selection}}", "Save": "Сохранить", "Name": "Имя", @@ -145,6 +149,11 @@ "Prompt Template": "Шаблон запроса", "Explain this: {{selection}}": "Объяснить это: {{selection}}", "New": "Новый", + "Edit": "Редактировать", + "This provider is still used by other API modes": "Этот провайдер всё ещё используется другими режимами API", + "Loading saved conversations…": "Загрузка сохранённых разговоров…", + "Select a provider": "Выберите провайдера", + "Please select a provider": "Выберите провайдера", "Always display floating window, disable sidebar for all site adapters": "Всегда отображать плавающее окно, отключить боковую панель для всех адаптеров сайтов", "Allow ESC to close all floating windows": "Разрешить ESC для закрытия всех плавающих окон", "Export All Data": "Экспорт всех данных", @@ -199,5 +208,9 @@ "OpenAI (GPT-5.4)": "OpenAI (GPT-5.4)", "OpenAI (GPT-5.4 mini)": "OpenAI (GPT-5.4 mini)", "OpenAI (GPT-5.4 nano)": "OpenAI (GPT-5.4 nano)", - "Anthropic (Claude Sonnet 4.6)": "Anthropic (Claude Sonnet 4.6)" + "Anthropic (Claude Sonnet 4.6)": "Anthropic (Claude Sonnet 4.6)", + "This provider is still used by other API modes or saved conversations": "Этот провайдер всё ещё используется другими режимами API или сохранёнными диалогами", + "This API key is set on the selected custom mode. Editing it here will create a dedicated provider for that mode.": "Этот API-ключ задан в выбранном пользовательском режиме. Если изменить его здесь, будет создан отдельный провайдер для этого режима.", + "Use shared key": "Использовать общий ключ", + "This provider endpoint is still needed by saved conversations": "Сохранённым диалогам всё ещё нужен этот endpoint провайдера" } diff --git a/src/_locales/tr/main.json b/src/_locales/tr/main.json index 4bf6bb9db..e019ce2e7 100644 --- a/src/_locales/tr/main.json +++ b/src/_locales/tr/main.json @@ -79,6 +79,7 @@ "ChatGPT (GPT-4-32k)": "ChatGPT (GPT-4-32k)", "GPT-3.5": "GPT-3.5", "Custom Model": "Özel Model", + "Custom Provider": "Özel Sağlayıcı", "Balanced": "Dengeli", "Creative": "Yaratıcı", "Precise": "Duyarlı", @@ -96,6 +97,7 @@ "Pin": "Sabitle", "Unpin": "Sabitlemeyi Kaldır", "Delete Conversation": "Konuşmayı Sil", + "Delete": "Sil", "Clear conversations": "Konuşmaları temizle", "Settings": "Ayarlar", "Feature Pages": "Özellik Sayfaları", @@ -115,6 +117,7 @@ "Modules": "Modüller", "API Params": "API Parametreleri", "API Url": "API Url'si", + "Provider": "Sağlayıcı", "Others": "Diğerleri", "API Modes": "API Modları", "Disable web mode history for better privacy protection, but it will result in unavailable conversations after a period of time": "Daha iyi gizlilik koruması için web modu geçmişini devre dışı bırakın, ancak bir süre sonra kullanılamayan konuşmalara neden olacaktır", @@ -138,6 +141,7 @@ "Anthropic API Key": "Anthropic API Anahtarı", "Cancel": "İptal", "Name is required": "İsim gereklidir", + "Please enter a full Chat Completions URL": "Lütfen tam bir Chat Completions URL'si girin", "Prompt template should include {{selection}}": "Prompt şablonu {{selection}} içermelidir", "Save": "Kaydet", "Name": "İsim", @@ -145,6 +149,11 @@ "Prompt Template": "Prompt Şablonu", "Explain this: {{selection}}": "Bunu açıkla: {{selection}}", "New": "Yeni", + "Edit": "Düzenle", + "This provider is still used by other API modes": "Bu sağlayıcı hâlâ diğer API modları tarafından kullanılıyor", + "Loading saved conversations…": "Kaydedilmiş sohbetler yükleniyor…", + "Select a provider": "Bir sağlayıcı seçin", + "Please select a provider": "Lütfen bir sağlayıcı seçin", "Always display floating window, disable sidebar for all site adapters": "Her zaman kayan pencereyi görüntüle, tüm site adaptörleri için kenar çubuğunu devre dışı bırak", "Allow ESC to close all floating windows": "ESC tuşuyla tüm kayan pencereleri kapatmaya izin ver", "Export All Data": "Tüm Verileri Dışa Aktar", @@ -199,5 +208,9 @@ "OpenAI (GPT-5.4)": "OpenAI (GPT-5.4)", "OpenAI (GPT-5.4 mini)": "OpenAI (GPT-5.4 mini)", "OpenAI (GPT-5.4 nano)": "OpenAI (GPT-5.4 nano)", - "Anthropic (Claude Sonnet 4.6)": "Anthropic (Claude Sonnet 4.6)" + "Anthropic (Claude Sonnet 4.6)": "Anthropic (Claude Sonnet 4.6)", + "This provider is still used by other API modes or saved conversations": "Bu saglayici hala diger API modlari veya kaydedilmis konusmalar tarafindan kullaniliyor", + "This API key is set on the selected custom mode. Editing it here will create a dedicated provider for that mode.": "Bu API anahtarı seçili özel modda ayarlanmış. Burada düzenlemek bu mod için özel bir sağlayıcı oluşturur.", + "Use shared key": "Paylaşılan anahtarı kullan", + "This provider endpoint is still needed by saved conversations": "Bu sağlayıcı uç noktası hâlâ kaydedilmiş konuşmalar için gerekli" } diff --git a/src/_locales/zh-hans/main.json b/src/_locales/zh-hans/main.json index 01c4299e3..47a9e9ac7 100644 --- a/src/_locales/zh-hans/main.json +++ b/src/_locales/zh-hans/main.json @@ -79,6 +79,7 @@ "ChatGPT (GPT-4-32k)": "ChatGPT (GPT-4-32k)", "GPT-3.5": "GPT-3.5", "Custom Model": "自定义模型", + "Custom Provider": "自定义提供商", "Balanced": "平衡", "Creative": "有创造力", "Precise": "精确", @@ -96,6 +97,7 @@ "Pin": "固定侧边", "Unpin": "收缩侧边", "Delete Conversation": "删除对话", + "Delete": "删除", "Clear conversations": "清空记录", "Settings": "设置", "Feature Pages": "功能页", @@ -115,6 +117,7 @@ "Modules": "模块", "API Params": "API参数", "API Url": "API地址", + "Provider": "提供商", "Others": "其他", "API Modes": "API模式", "Disable web mode history for better privacy protection, but it will result in unavailable conversations after a period of time": "禁用网页版模式历史记录以获得更好的隐私保护, 但会导致对话在一段时间后不可用", @@ -138,6 +141,7 @@ "Anthropic API Key": "Anthropic API 密钥", "Cancel": "取消", "Name is required": "名称是必须的", + "Please enter a full Chat Completions URL": "请输入完整的 Chat Completions URL", "Prompt template should include {{selection}}": "提示模板应该包含 {{selection}}", "Save": "保存", "Name": "名称", @@ -145,6 +149,11 @@ "Prompt Template": "提示模板", "Explain this: {{selection}}": "解释这个: {{selection}}", "New": "新建", + "Edit": "编辑", + "This provider is still used by other API modes": "此提供商仍被其他 API 模式使用中", + "Loading saved conversations…": "正在载入已保存的对话…", + "Select a provider": "选择提供商", + "Please select a provider": "请选择提供商", "Always display floating window, disable sidebar for all site adapters": "总是显示浮动窗口, 禁用所有站点适配器的侧边栏", "Allow ESC to close all floating windows": "允许按ESC关闭所有浮动窗口", "Export All Data": "导出所有数据", @@ -206,5 +215,9 @@ "OpenAI (GPT-5.4)": "OpenAI (GPT-5.4)", "OpenAI (GPT-5.4 mini)": "OpenAI (GPT-5.4 mini)", "OpenAI (GPT-5.4 nano)": "OpenAI (GPT-5.4 nano)", - "Anthropic (Claude Sonnet 4.6)": "Anthropic (Claude Sonnet 4.6)" + "Anthropic (Claude Sonnet 4.6)": "Anthropic (Claude Sonnet 4.6)", + "This provider is still used by other API modes or saved conversations": "此提供商仍被其他 API 模式或已保存的对话使用", + "This API key is set on the selected custom mode. Editing it here will create a dedicated provider for that mode.": "此 API 密钥设定在所选的自定义模式上。在这里编辑会为该模式建立专用 provider。", + "Use shared key": "使用共享 provider 密钥", + "This provider endpoint is still needed by saved conversations": "已保存的对话仍需要此提供商端点" } diff --git a/src/_locales/zh-hant/main.json b/src/_locales/zh-hant/main.json index 25686ca23..5cab7412e 100644 --- a/src/_locales/zh-hant/main.json +++ b/src/_locales/zh-hant/main.json @@ -79,6 +79,7 @@ "ChatGPT (GPT-4-32k)": "ChatGPT (GPT-4-32k)", "GPT-3.5": "GPT-3.5", "Custom Model": "自訂模型", + "Custom Provider": "自訂供應商", "Balanced": "平衡", "Creative": "有創意", "Precise": "精確", @@ -96,6 +97,7 @@ "Pin": "固定側邊", "Unpin": "取消固定側邊", "Delete Conversation": "刪除對話", + "Delete": "刪除", "Clear conversations": "清空對話記錄", "Settings": "設定", "Feature Pages": "功能頁面", @@ -115,6 +117,7 @@ "Modules": "模組", "API Params": "API 參數", "API Url": "API 網址", + "Provider": "供應商", "Others": "其他", "API Modes": "API 模式", "Disable web mode history for better privacy protection, but it will result in unavailable conversations after a period of time": "停用網頁版模式歷史記錄以提升隱私保護,但會導致對話記錄在一段時間後無法使用", @@ -138,6 +141,7 @@ "Anthropic API Key": "Anthropic API 金鑰", "Cancel": "取消", "Name is required": "名稱是必填的", + "Please enter a full Chat Completions URL": "請輸入完整的 Chat Completions URL", "Prompt template should include {{selection}}": "提示範本應該包含 {{selection}}", "Save": "儲存", "Name": "名稱", @@ -145,6 +149,11 @@ "Prompt Template": "提示範本", "Explain this: {{selection}}": "解釋這個: {{selection}}", "New": "新增", + "Edit": "編輯", + "This provider is still used by other API modes": "此供應商仍被其他 API 模式使用中", + "Loading saved conversations…": "正在載入已儲存的對話…", + "Select a provider": "選擇供應商", + "Please select a provider": "請選擇供應商", "Always display floating window, disable sidebar for all site adapters": "總是顯示浮動視窗,停用所有網站適配器的側邊欄", "Allow ESC to close all floating windows": "允許按 ESC 關閉所有浮動視窗", "Export All Data": "匯出所有資料", @@ -201,5 +210,9 @@ "OpenAI (GPT-5.4)": "OpenAI (GPT-5.4)", "OpenAI (GPT-5.4 mini)": "OpenAI (GPT-5.4 mini)", "OpenAI (GPT-5.4 nano)": "OpenAI (GPT-5.4 nano)", - "Anthropic (Claude Sonnet 4.6)": "Anthropic (Claude Sonnet 4.6)" + "Anthropic (Claude Sonnet 4.6)": "Anthropic (Claude Sonnet 4.6)", + "This provider is still used by other API modes or saved conversations": "此供應商仍被其他 API 模式或已儲存的對話使用中", + "This API key is set on the selected custom mode. Editing it here will create a dedicated provider for that mode.": "此 API 金鑰已設在目前選取的自訂模式。在這裡編輯會為該模式建立專用 provider。", + "Use shared key": "使用共用 provider 金鑰", + "This provider endpoint is still needed by saved conversations": "已儲存的對話仍需要此供應商端點" } diff --git a/src/components/ConversationCard/index.jsx b/src/components/ConversationCard/index.jsx index d131ff8fe..bcaf3f2ab 100644 --- a/src/components/ConversationCard/index.jsx +++ b/src/components/ConversationCard/index.jsx @@ -36,6 +36,10 @@ import { initSession } from '../../services/init-session.mjs' import { findLastIndex } from 'lodash-es' import { generateAnswersWithBingWebApi } from '../../services/apis/bing-web.mjs' import { handlePortError } from '../../services/wrappers.mjs' +import { + getApiModeDisplayLabel, + getConversationAiName, +} from '../../popup/sections/api-modes-provider-utils.mjs' const logo = Browser.runtime.getURL('logo.png') const UNMATCHED_API_MODE_VALUE = '__current-session-api-mode__' @@ -71,29 +75,19 @@ function ConversationCard(props) { */ const [conversationItemData, setConversationItemData] = useState([]) const config = useConfig() - const currentAiName = - session.aiName || - modelNameToDesc( - session.apiMode && typeof session.apiMode === 'object' - ? apiModeToModelName(session.apiMode) - : session.modelName, - t, - config.customModelName, - ) || - t(Models.customModel.desc) + const customOpenAIProviders = Array.isArray(config.customOpenAIProviders) + ? config.customOpenAIProviders + : [] + const currentAiName = getConversationAiName(session, t, customOpenAIProviders) const selectedApiModeIndex = useMemo( () => getUniquelySelectedApiModeIndex(apiModes, session, { sessionCompat: true }), [apiModes, session], ) - const selectedApiModeDesc = + const selectedApiModeLabel = selectedApiModeIndex !== -1 - ? modelNameToDesc( - apiModeToModelName(apiModes[selectedApiModeIndex]), - t, - config.customModelName, - ) + ? getApiModeDisplayLabel(apiModes[selectedApiModeIndex], t, customOpenAIProviders) : '' - const selectedApiModeValue = selectedApiModeDesc + const selectedApiModeValue = selectedApiModeLabel ? String(selectedApiModeIndex) : !session.apiMode && session.modelName === 'customModel' ? '-1' @@ -423,11 +417,9 @@ function ConversationCard(props) { ...session, modelName, apiMode, - aiName: modelNameToDesc( - apiMode ? apiModeToModelName(apiMode) : modelName, - t, - config.customModelName, - ), + aiName: apiMode + ? getApiModeDisplayLabel(apiMode, t, customOpenAIProviders) + : modelNameToDesc(modelName, t, config.customModelName), } if (config.autoRegenAfterSwitchModel && conversationItemData.length > 0) getRetryFn(newSession)() @@ -440,8 +432,7 @@ function ConversationCard(props) { )} {apiModes.map((apiMode, index) => { - const modelName = apiModeToModelName(apiMode) - const desc = modelNameToDesc(modelName, t, config.customModelName) + const desc = getApiModeDisplayLabel(apiMode, t, customOpenAIProviders) if (desc) { return (