From 94f754d7f48a225614df10e931fc9248c3311135 Mon Sep 17 00:00:00 2001 From: Roo Code Date: Fri, 17 Apr 2026 19:36:30 +0000 Subject: [PATCH] fix: omit temperature from OpenAI-compatible requests when not explicitly set When no custom temperature is configured, the OpenAI handler was always sending temperature: 0 in API requests. Some OpenAI-compatible APIs (e.g. Kimi Coding) require specific temperature values and reject others. This change omits the temperature parameter entirely when the user has not explicitly set one, letting upstream APIs use their own model-specific defaults. When a user configures a custom temperature, it is still sent. Fixes #12141 --- src/api/providers/__tests__/openai.spec.ts | 69 +++++++++++++++++++++- src/api/providers/openai.ts | 15 ++++- 2 files changed, 82 insertions(+), 2 deletions(-) diff --git a/src/api/providers/__tests__/openai.spec.ts b/src/api/providers/__tests__/openai.spec.ts index 73b542dbc73..cd6d9fd2eac 100644 --- a/src/api/providers/__tests__/openai.spec.ts +++ b/src/api/providers/__tests__/openai.spec.ts @@ -497,6 +497,74 @@ describe("OpenAiHandler", () => { const callArgs = mockCreate.mock.calls[0][0] expect(callArgs.max_completion_tokens).toBe(4096) }) + + it("should omit temperature when modelTemperature is not set (streaming)", async () => { + // When no custom temperature is configured, temperature should not be sent + // so that OpenAI-compatible APIs (e.g. Kimi) can use their own defaults + const handlerNoTemp = new OpenAiHandler({ + ...mockOptions, + // modelTemperature is not set (undefined) + }) + const stream = handlerNoTemp.createMessage(systemPrompt, messages) + for await (const _chunk of stream) { + } + expect(mockCreate).toHaveBeenCalled() + const callArgs = mockCreate.mock.calls[0][0] + expect(callArgs).not.toHaveProperty("temperature") + }) + + it("should include temperature when modelTemperature is explicitly set (streaming)", async () => { + const handlerWithTemp = new OpenAiHandler({ + ...mockOptions, + modelTemperature: 0.6, + }) + const stream = handlerWithTemp.createMessage(systemPrompt, messages) + for await (const _chunk of stream) { + } + expect(mockCreate).toHaveBeenCalled() + const callArgs = mockCreate.mock.calls[0][0] + expect(callArgs.temperature).toBe(0.6) + }) + + it("should include temperature 0 when modelTemperature is explicitly set to 0 (streaming)", async () => { + const handlerWithZeroTemp = new OpenAiHandler({ + ...mockOptions, + modelTemperature: 0, + }) + const stream = handlerWithZeroTemp.createMessage(systemPrompt, messages) + for await (const _chunk of stream) { + } + expect(mockCreate).toHaveBeenCalled() + const callArgs = mockCreate.mock.calls[0][0] + expect(callArgs.temperature).toBe(0) + }) + + it("should omit temperature when modelTemperature is not set (non-streaming)", async () => { + const handlerNoTemp = new OpenAiHandler({ + ...mockOptions, + openAiStreamingEnabled: false, + }) + const stream = handlerNoTemp.createMessage(systemPrompt, messages) + for await (const _chunk of stream) { + } + expect(mockCreate).toHaveBeenCalled() + const callArgs = mockCreate.mock.calls[0][0] + expect(callArgs).not.toHaveProperty("temperature") + }) + + it("should include temperature when modelTemperature is explicitly set (non-streaming)", async () => { + const handlerWithTemp = new OpenAiHandler({ + ...mockOptions, + openAiStreamingEnabled: false, + modelTemperature: 0.6, + }) + const stream = handlerWithTemp.createMessage(systemPrompt, messages) + for await (const _chunk of stream) { + } + expect(mockCreate).toHaveBeenCalled() + const callArgs = mockCreate.mock.calls[0][0] + expect(callArgs.temperature).toBe(0.6) + }) }) describe("error handling", () => { @@ -632,7 +700,6 @@ describe("OpenAiHandler", () => { ], stream: true, stream_options: { include_usage: true }, - temperature: 0, tools: undefined, tool_choice: undefined, parallel_tool_calls: true, diff --git a/src/api/providers/openai.ts b/src/api/providers/openai.ts index 33b29abcafe..58db12330e0 100644 --- a/src/api/providers/openai.ts +++ b/src/api/providers/openai.ts @@ -154,7 +154,13 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl const requestOptions: OpenAI.Chat.Completions.ChatCompletionCreateParamsStreaming = { model: modelId, - temperature: this.options.modelTemperature ?? (deepseekReasoner ? DEEP_SEEK_DEFAULT_TEMPERATURE : 0), + // Only include temperature when explicitly configured or for deepseek reasoner models. + // Omitting it lets OpenAI-compatible APIs (e.g. Kimi) use their own model-specific defaults. + ...(this.options.modelTemperature != null + ? { temperature: this.options.modelTemperature } + : deepseekReasoner + ? { temperature: DEEP_SEEK_DEFAULT_TEMPERATURE } + : {}), messages: convertedMessages, stream: true as const, ...(isGrokXAI ? {} : { stream_options: { include_usage: true } }), @@ -223,6 +229,13 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl } else { const requestOptions: OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming = { model: modelId, + // Only include temperature when explicitly configured or for deepseek reasoner models. + // Omitting it lets OpenAI-compatible APIs (e.g. Kimi) use their own model-specific defaults. + ...(this.options.modelTemperature != null + ? { temperature: this.options.modelTemperature } + : deepseekReasoner + ? { temperature: DEEP_SEEK_DEFAULT_TEMPERATURE } + : {}), messages: deepseekReasoner ? convertToR1Format([{ role: "user", content: systemPrompt }, ...messages]) : [systemMessage, ...convertToOpenAiMessages(messages)],