Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions core/llm/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -964,6 +964,11 @@ export abstract class BaseLLM implements ILLM {
) {
let completion = "";
for await (const message of this.streamChat(messages, signal, options)) {
// Skip thinking/reasoning messages so the returned completion only
// contains the visible assistant response, not internal reasoning text.
if (message.role === "thinking") {
continue;
}
completion += renderChatMessage(message);
}
return { role: "assistant" as const, content: completion };
Expand Down
18 changes: 18 additions & 0 deletions core/util/chatDescriber.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -65,5 +65,23 @@ describe("ChatDescriber", () => {
ChatDescriber.describe(testLLM, completionOptions, message),
).rejects.toThrow();
});

it("should exclude thinking/reasoning content from the generated title", async () => {
const message = "Help me write a sorting algorithm";

// Simulate a model that emits a thinking chunk followed by an assistant chunk
testLLM.chatStreams = [
[
{ role: "thinking", content: "Here is my thinking process, I need to consider various sorting algorithms..." },
{ role: "assistant", content: "Sorting Algorithm Help" },
],
];

const result = await ChatDescriber.describe(testLLM, {}, message);

// The title should come from the assistant chunk, not the thinking chunk
expect(result).toBe("Sorting Algorithm Help");
expect(result).not.toContain("thinking process");
});
});
});
Loading