Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 4 additions & 3 deletions packages/typescript/ai-anthropic/src/adapters/text.ts
Original file line number Diff line number Diff line change
Expand Up @@ -290,10 +290,13 @@ export class AnthropicTextAdapter<
'service_tier',
'stop_sequences',
'system',
'temperature',
'thinking',
'tool_choice',
'top_k',
'top_p',
]

for (const key of validKeys) {
if (key in modelOptions) {
const value = modelOptions[key]
Expand All @@ -312,7 +315,7 @@ export class AnthropicTextAdapter<
validProviderOptions.thinking?.type === 'enabled'
? validProviderOptions.thinking.budget_tokens
: undefined
const defaultMaxTokens = options.maxTokens || 1024
const defaultMaxTokens = modelOptions?.max_tokens || 1024
const maxTokens =
thinkingBudget && thinkingBudget >= defaultMaxTokens
? thinkingBudget + 1
Expand All @@ -321,8 +324,6 @@ export class AnthropicTextAdapter<
const requestParams: InternalTextProviderOptions = {
model: options.model,
max_tokens: maxTokens,
temperature: options.temperature,
top_p: options.topP,
messages: formattedMessages,
system: options.systemPrompts?.join('\n'),
tools: tools,
Expand Down
3 changes: 0 additions & 3 deletions packages/typescript/ai-gemini/src/adapters/text.ts
Original file line number Diff line number Diff line change
Expand Up @@ -812,9 +812,6 @@ export class GeminiTextAdapter<
contents: this.formatMessages(options.messages),
config: {
...modelOpts,
temperature: options.temperature,
topP: options.topP,
maxOutputTokens: options.maxTokens,
thinkingConfig: thinkingConfig
? {
...thinkingConfig,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,13 @@ This will be enforced on the GenerateContentRequest.contents and GenerateContent

export interface GeminiCommonConfigOptions {
/**
* Configuration options for model generation and outputs.
* Controls the randomness of the output. Range: [0.0, 2.0]. Higher values produce more random output.
*/
temperature?: number
/**
* Nucleus sampling probability threshold. Range: (0.0, 1.0). Alter this or temperature, not both.
*/
topP?: number
Comment on lines 25 to +33
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟡 Minor

🧩 Analysis chain

🏁 Script executed:

#!/bin/bash
set -euo pipefail

# Expect: adapter no longer contains a direct maxTokens -> maxOutputTokens mapping,
# and provider option types should expose maxOutputTokens if modelOptions is now the path.
rg -nP --type=ts -C3 '\bmaxTokens\b|\bmaxOutputTokens\b' packages/typescript/ai-gemini packages/typescript/ai/src

Repository: TanStack/ai

Length of output: 11787


🏁 Script executed:

cat -n packages/typescript/ai-gemini/src/text/text-provider-options.ts | head -100

Repository: TanStack/ai

Length of output: 5727


🏁 Script executed:

fd -t f "text.ts" packages/typescript/ai-gemini/src/adapters --exec cat -n {} \; | head -150

Repository: TanStack/ai

Length of output: 5836


🏁 Script executed:

rg -n "mapCommonOptionsToGemini" packages/typescript/ai-gemini/src --context 20 -A 30

Repository: TanStack/ai

Length of output: 8331


🏁 Script executed:

rg -n "ExternalTextProviderOptions" packages/typescript/ai-gemini/src

Repository: TanStack/ai

Length of output: 447


🏁 Script executed:

sed -n '253,260p' packages/typescript/ai-gemini/src/text/text-provider-options.ts

Repository: TanStack/ai

Length of output: 299


🏁 Script executed:

cat -n packages/typescript/ai-gemini/src/text/text-provider-options.ts | tail -50

Repository: TanStack/ai

Length of output: 1928


🏁 Script executed:

rg -n "maxTokens" packages/typescript/ai-gemini/src/adapters/text.ts -B 2 -A 2

Repository: TanStack/ai

Length of output: 37


🏁 Script executed:

grep -n "maxOutputTokens" packages/typescript/ai-gemini/src/text/text-provider-options.ts

Repository: TanStack/ai

Length of output: 370


🏁 Script executed:

grep -n "maxTokens" packages/typescript/ai-gemini/tests/gemini-adapter.test.ts -B 3 -A 3

Repository: TanStack/ai

Length of output: 492


🏁 Script executed:

rg -n "maxTokens\|maxOutputTokens" packages/typescript/ai-gemini/src/adapters/ -B 2 -A 2

Repository: TanStack/ai

Length of output: 37


🏁 Script executed:

rg -n "maxTokens" packages/typescript/ai-gemini/tests/gemini-adapter.test.ts | grep -A 5 "expect.*maxOutputTokens"

Repository: TanStack/ai

Length of output: 37


🏁 Script executed:

cat -n packages/typescript/ai-gemini/tests/gemini-adapter.test.ts | sed -n '104,125p'

Repository: TanStack/ai

Length of output: 795


🏁 Script executed:

cat -n packages/typescript/ai-gemini/tests/gemini-adapter.test.ts | sed -n '200,230p'

Repository: TanStack/ai

Length of output: 1315


🏁 Script executed:

rg -n "class BaseTextAdapter\|chatStream" packages/typescript/ai/src/adapters -B 2 -A 15 | head -80

Repository: TanStack/ai

Length of output: 130


🏁 Script executed:

find packages -name "*.ts" -type f | xargs grep -l "class BaseTextAdapter" | head -5

Repository: TanStack/ai

Length of output: 108


🏁 Script executed:

fd -t f "index.ts" packages/typescript/ai/src/adapters | head -10

Repository: TanStack/ai

Length of output: 177


🏁 Script executed:

cat -n packages/typescript/ai/src/activities/chat/adapter.ts | head -150

Repository: TanStack/ai

Length of output: 5884


🏁 Script executed:

rg -n "maxTokens" packages/typescript/ai/src/activities/chat/adapter.ts -B 3 -A 3

Repository: TanStack/ai

Length of output: 37


🏁 Script executed:

rg -n "mapCommonOptionsToGemini" packages/typescript/ai-gemini/src/adapters/text.ts -B 5 -A 40 | grep -A 40 "private mapCommonOptionsToGemini"

Repository: TanStack/ai

Length of output: 1576


🏁 Script executed:

rg -n "temperature\|topP\|maxTokens" packages/typescript/ai/src/activities/chat/adapter.ts | head -20

Repository: TanStack/ai

Length of output: 37


🏁 Script executed:

rg -n "maxTokens" packages/typescript/ai-gemini/src/ --type ts -B 2 -A 2 | grep -v test | grep -v ".test.ts"

Repository: TanStack/ai

Length of output: 37


🏁 Script executed:

rg -n "\.maxTokens" packages/typescript/ai/src/activities/chat/ --type ts -B 2 -A 2 | head -30

Repository: TanStack/ai

Length of output: 1522


🏁 Script executed:

cat -n packages/typescript/ai-gemini/tests/gemini-adapter.test.ts | sed -n '95,130p'

Repository: TanStack/ai

Length of output: 1352


🏁 Script executed:

rg -n "maxTokens.*maxOutputTokens\|maxOutputTokens.*maxTokens" packages/typescript/ai-gemini/ --type ts

Repository: TanStack/ai

Length of output: 37


🏁 Script executed:

rg -n "const.*maxOutputTokens\|maxOutputTokens.*=" packages/typescript/ai-gemini/src --type ts -B 2 -A 2

Repository: TanStack/ai

Length of output: 37


🏁 Script executed:

cat -n packages/typescript/ai/src/activities/chat/index.ts | sed -n '570,620p'

Repository: TanStack/ai

Length of output: 1964


🏁 Script executed:

rg -n "maxTokens.*maxOutputTokens" packages/typescript/ai-gemini/ -B 5 -A 5

Repository: TanStack/ai

Length of output: 37


🏁 Script executed:

cat -n packages/typescript/ai-gemini/src/adapters/text.ts | sed -n '800,835p'

Repository: TanStack/ai

Length of output: 1395


🏁 Script executed:

rg -n "temperature\|topP" packages/typescript/ai-gemini/src/adapters/text.ts -B 2 -A 2

Repository: TanStack/ai

Length of output: 37


🏁 Script executed:

fd -t f "test*.ts" packages/typescript/ai-gemini | xargs grep -l "maxTokens.*maxOutputTokens" | head -1

Repository: TanStack/ai

Length of output: 37


Add maxOutputTokens to GeminiCommonConfigOptions to support token limiting through modelOptions.

The adapter's mapCommonOptionsToGemini method spreads modelOptions directly into Gemini's config object (line 814). Since temperature and topP are now part of GeminiCommonConfigOptions, the provider-native field maxOutputTokens should be exposed alongside them for type-safe token-limit control through modelOptions.

🔧 Proposed fix
 export interface GeminiCommonConfigOptions {
   /**
    * Controls the randomness of the output. Range: [0.0, 2.0]. Higher values produce more random output.
    */
   temperature?: number
   /**
    * Nucleus sampling probability threshold. Range: (0.0, 1.0). Alter this or temperature, not both.
    */
   topP?: number
+  /**
+   * Maximum number of tokens that can be generated in the response.
+   */
+  maxOutputTokens?: number
   /**
    * The set of character sequences (up to 5) that will stop output generation. If specified, the API will stop at the first appearance of a stop_sequence. The stop sequence will not be included as part of the response.
    */
   stopSequences?: Array<string>
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
export interface GeminiCommonConfigOptions {
/**
* Configuration options for model generation and outputs.
* Controls the randomness of the output. Range: [0.0, 2.0]. Higher values produce more random output.
*/
temperature?: number
/**
* Nucleus sampling probability threshold. Range: (0.0, 1.0). Alter this or temperature, not both.
*/
topP?: number
export interface GeminiCommonConfigOptions {
/**
* Controls the randomness of the output. Range: [0.0, 2.0]. Higher values produce more random output.
*/
temperature?: number
/**
* Nucleus sampling probability threshold. Range: (0.0, 1.0). Alter this or temperature, not both.
*/
topP?: number
/**
* Maximum number of tokens that can be generated in the response.
*/
maxOutputTokens?: number
/**
* The set of character sequences (up to 5) that will stop output generation. If specified, the API will stop at the first appearance of a stop_sequence. The stop sequence will not be included as part of the response.
*/
stopSequences?: Array<string>
🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@packages/typescript/ai-gemini/src/text/text-provider-options.ts` around lines
25 - 33, The GeminiCommonConfigOptions interface is missing the provider-native
maxOutputTokens field so type-safe token limits passed via modelOptions are not
recognized; add an optional maxOutputTokens?: number property to
GeminiCommonConfigOptions so when mapCommonOptionsToGemini spreads modelOptions
into Gemini's config (mapCommonOptionsToGemini, modelOptions) the token limit is
typed and passed through correctly.

/**
* The set of character sequences (up to 5) that will stop output generation. If specified, the API will stop at the first appearance of a stop_sequence. The stop sequence will not be included as part of the response.
*/
Expand Down
8 changes: 1 addition & 7 deletions packages/typescript/ai-grok/src/adapters/text.ts
Original file line number Diff line number Diff line change
Expand Up @@ -450,10 +450,7 @@ export class GrokTextAdapter<
options: TextOptions,
): OpenAI_SDK.Chat.Completions.ChatCompletionCreateParamsStreaming {
const modelOptions = options.modelOptions as
| Omit<
InternalTextProviderOptions,
'max_tokens' | 'tools' | 'temperature' | 'input' | 'top_p'
>
| Omit<InternalTextProviderOptions, 'tools' | 'input'>
| undefined

if (modelOptions) {
Expand Down Expand Up @@ -487,9 +484,6 @@ export class GrokTextAdapter<
return {
model: options.model,
messages,
temperature: options.temperature,
max_tokens: options.maxTokens,
top_p: options.topP,
tools: tools as Array<OpenAI_SDK.Chat.Completions.ChatCompletionTool>,
stream: true,
stream_options: { include_usage: true },
Expand Down
8 changes: 1 addition & 7 deletions packages/typescript/ai-groq/src/adapters/text.ts
Original file line number Diff line number Diff line change
Expand Up @@ -442,10 +442,7 @@ export class GroqTextAdapter<
options: TextOptions,
): ChatCompletionCreateParamsStreaming {
const modelOptions = options.modelOptions as
| Omit<
InternalTextProviderOptions,
'max_tokens' | 'tools' | 'temperature' | 'input' | 'top_p'
>
| Omit<InternalTextProviderOptions, 'tools'>
| undefined

if (modelOptions) {
Expand Down Expand Up @@ -475,9 +472,6 @@ export class GroqTextAdapter<
return {
model: options.model,
messages,
temperature: options.temperature,
max_tokens: options.maxTokens,
top_p: options.topP,
tools,
stream: true,
}
Expand Down
9 changes: 1 addition & 8 deletions packages/typescript/ai-ollama/src/adapters/text.ts
Original file line number Diff line number Diff line change
Expand Up @@ -577,16 +577,9 @@ export class OllamaTextAdapter<TModel extends string> extends BaseTextAdapter<
| OllamaTextProviderOptions
| undefined

const ollamaOptions = {
temperature: options.temperature,
top_p: options.topP,
num_predict: options.maxTokens,
...modelOptions,
}

return {
model,
options: ollamaOptions,
options: modelOptions,
messages: this.formatMessages(options.messages),
tools: this.convertToolsToOllamaFormat(options.tools),
...(options.systemPrompts?.length
Expand Down
55 changes: 12 additions & 43 deletions packages/typescript/ai-ollama/src/meta/models-meta.ts
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
import type { Tool, ToolCall } from 'ollama'
import type { Options, Tool, ToolCall } from 'ollama'

export interface OllamaModelMeta<TModelOptions = unknown> {
name: string
Expand All @@ -12,37 +12,7 @@ export interface OllamaModelMeta<TModelOptions = unknown> {
context?: number
}

interface OllamaOptions {
numa: boolean
num_ctx: number
num_batch: number
num_gpu: number
main_gpu: number
low_vram: boolean
f16_kv: boolean
logits_all: boolean
vocab_only: boolean
use_mmap: boolean
use_mlock: boolean
embedding_only: boolean
num_thread: number
num_keep: number
seed: number
num_predict: number
top_k: number
tfs_z: number
typical_p: number
repeat_last_n: number
repeat_penalty: number
presence_penalty: number
frequency_penalty: number
mirostat: number
mirostat_tau: number
mirostat_eta: number
penalize_newline: boolean
stop: Array<string>
}

// ollama model for reference
// interface ChatRequest {
// model: string
// messages?: Message[]
Expand All @@ -55,18 +25,17 @@ interface OllamaOptions {
// top_logprobs?: number
// options?: Partial<Options>
// }

export interface OllamaChatRequest {
// model: string
// messages?: Message[]
// model: string (extended later)
// messages?: Message[] (extended later)
stream?: boolean
format?: string | object
keep_alive?: string | number
// tools?: Tool[]
// think?: boolean | 'high' | 'medium' | 'low'
// tools?: Tool[] (extended later)
// think?: boolean | 'high' | 'medium' | 'low' (extended later)
logprobs?: boolean
top_logprobs?: number
options?: Partial<OllamaOptions>
options?: Partial<Options>
}

export interface OllamaChatRequestThinking {
Expand All @@ -81,6 +50,7 @@ export interface OllamaChatRequestTools {
tools?: Array<Tool>
}

// ollama model for reference
// interface Message {
// role: string
// content: string
Expand All @@ -89,18 +59,17 @@ export interface OllamaChatRequestTools {
// tool_calls?: ToolCall[]
// tool_name?: string
// }

export interface OllamaChatRequestMessages<
TMessageExtension extends OllamaMessageExtension = {},
> {
messages?: Array<
{
role: string
content: string
// thinking?: string
// images?: Uint8Array[] | string[]
// tool_calls?: ToolCall[]
// tool_name?: string
// thinking?: string (extended later)
// images?: Uint8Array[] | string[] (extended later)
// tool_calls?: ToolCall[] (extended later)
// tool_name?: string (extended later)
} & TMessageExtension
>
}
Expand Down
13 changes: 1 addition & 12 deletions packages/typescript/ai-openai/src/adapters/text.ts
Original file line number Diff line number Diff line change
Expand Up @@ -846,15 +846,7 @@ export class OpenAITextAdapter<
*/
private mapTextOptionsToOpenAI(options: TextOptions) {
const modelOptions = options.modelOptions as
| Omit<
InternalTextProviderOptions,
| 'max_output_tokens'
| 'tools'
| 'metadata'
| 'temperature'
| 'input'
| 'top_p'
>
| Omit<InternalTextProviderOptions, 'tools' | 'metadata' | 'input'>
| undefined
const input = this.convertMessagesToInput(options.messages)
if (modelOptions) {
Expand All @@ -874,9 +866,6 @@ export class OpenAITextAdapter<
'stream'
> = {
model: options.model,
temperature: options.temperature,
max_output_tokens: options.maxTokens,
top_p: options.topP,
metadata: options.metadata,
instructions: options.systemPrompts?.join('\n'),
...modelOptions,
Expand Down
18 changes: 4 additions & 14 deletions packages/typescript/ai-openrouter/src/adapters/text.ts
Original file line number Diff line number Diff line change
Expand Up @@ -671,22 +671,12 @@ export class OpenRouterTextAdapter<
})
}

// Spread modelOptions first, then conditionally override with explicit
// top-level options so undefined values don't clobber modelOptions. Fixes
// #310, where the reverse order silently dropped user-set values.
const { variant, ...restModelOptions } = modelOptions ?? {}

const request: ChatRequest = {
...modelOptions,
model:
options.model +
(modelOptions?.variant ? `:${modelOptions.variant}` : ''),
...restModelOptions,
model: options.model + (variant ? `:${variant}` : ''),
messages,
...(options.temperature !== undefined && {
temperature: options.temperature,
}),
...(options.maxTokens !== undefined && {
maxCompletionTokens: options.maxTokens,
}),
...(options.topP !== undefined && { topP: options.topP }),
tools: options.tools
? convertToolsToProviderFormat(options.tools)
: undefined,
Expand Down
Loading
Loading