Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
69 changes: 68 additions & 1 deletion src/api/providers/__tests__/openai.spec.ts
Original file line number Diff line number Diff line change
Expand Up @@ -497,6 +497,74 @@ describe("OpenAiHandler", () => {
const callArgs = mockCreate.mock.calls[0][0]
expect(callArgs.max_completion_tokens).toBe(4096)
})

it("should omit temperature when modelTemperature is not set (streaming)", async () => {
// When no custom temperature is configured, temperature should not be sent
// so that OpenAI-compatible APIs (e.g. Kimi) can use their own defaults
const handlerNoTemp = new OpenAiHandler({
...mockOptions,
// modelTemperature is not set (undefined)
})
const stream = handlerNoTemp.createMessage(systemPrompt, messages)
for await (const _chunk of stream) {
}
expect(mockCreate).toHaveBeenCalled()
const callArgs = mockCreate.mock.calls[0][0]
expect(callArgs).not.toHaveProperty("temperature")
})

it("should include temperature when modelTemperature is explicitly set (streaming)", async () => {
const handlerWithTemp = new OpenAiHandler({
...mockOptions,
modelTemperature: 0.6,
})
const stream = handlerWithTemp.createMessage(systemPrompt, messages)
for await (const _chunk of stream) {
}
expect(mockCreate).toHaveBeenCalled()
const callArgs = mockCreate.mock.calls[0][0]
expect(callArgs.temperature).toBe(0.6)
})

it("should include temperature 0 when modelTemperature is explicitly set to 0 (streaming)", async () => {
const handlerWithZeroTemp = new OpenAiHandler({
...mockOptions,
modelTemperature: 0,
})
const stream = handlerWithZeroTemp.createMessage(systemPrompt, messages)
for await (const _chunk of stream) {
}
expect(mockCreate).toHaveBeenCalled()
const callArgs = mockCreate.mock.calls[0][0]
expect(callArgs.temperature).toBe(0)
})

it("should omit temperature when modelTemperature is not set (non-streaming)", async () => {
const handlerNoTemp = new OpenAiHandler({
...mockOptions,
openAiStreamingEnabled: false,
})
const stream = handlerNoTemp.createMessage(systemPrompt, messages)
for await (const _chunk of stream) {
}
expect(mockCreate).toHaveBeenCalled()
const callArgs = mockCreate.mock.calls[0][0]
expect(callArgs).not.toHaveProperty("temperature")
})

it("should include temperature when modelTemperature is explicitly set (non-streaming)", async () => {
const handlerWithTemp = new OpenAiHandler({
...mockOptions,
openAiStreamingEnabled: false,
modelTemperature: 0.6,
})
const stream = handlerWithTemp.createMessage(systemPrompt, messages)
for await (const _chunk of stream) {
}
expect(mockCreate).toHaveBeenCalled()
const callArgs = mockCreate.mock.calls[0][0]
expect(callArgs.temperature).toBe(0.6)
})
})

describe("error handling", () => {
Expand Down Expand Up @@ -632,7 +700,6 @@ describe("OpenAiHandler", () => {
],
stream: true,
stream_options: { include_usage: true },
temperature: 0,
tools: undefined,
tool_choice: undefined,
parallel_tool_calls: true,
Expand Down
15 changes: 14 additions & 1 deletion src/api/providers/openai.ts
Original file line number Diff line number Diff line change
Expand Up @@ -154,7 +154,13 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl

const requestOptions: OpenAI.Chat.Completions.ChatCompletionCreateParamsStreaming = {
model: modelId,
temperature: this.options.modelTemperature ?? (deepseekReasoner ? DEEP_SEEK_DEFAULT_TEMPERATURE : 0),
// Only include temperature when explicitly configured or for deepseek reasoner models.
// Omitting it lets OpenAI-compatible APIs (e.g. Kimi) use their own model-specific defaults.
...(this.options.modelTemperature != null
? { temperature: this.options.modelTemperature }
: deepseekReasoner
? { temperature: DEEP_SEEK_DEFAULT_TEMPERATURE }
: {}),
messages: convertedMessages,
stream: true as const,
...(isGrokXAI ? {} : { stream_options: { include_usage: true } }),
Expand Down Expand Up @@ -223,6 +229,13 @@ export class OpenAiHandler extends BaseProvider implements SingleCompletionHandl
} else {
const requestOptions: OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming = {
model: modelId,
// Only include temperature when explicitly configured or for deepseek reasoner models.
// Omitting it lets OpenAI-compatible APIs (e.g. Kimi) use their own model-specific defaults.
...(this.options.modelTemperature != null
? { temperature: this.options.modelTemperature }
: deepseekReasoner
? { temperature: DEEP_SEEK_DEFAULT_TEMPERATURE }
: {}),
messages: deepseekReasoner
? convertToR1Format([{ role: "user", content: systemPrompt }, ...messages])
: [systemMessage, ...convertToOpenAiMessages(messages)],
Expand Down
Loading