diff --git a/.changeset/add-ai-mistral.md b/.changeset/add-ai-mistral.md new file mode 100644 index 000000000..4e1199e98 --- /dev/null +++ b/.changeset/add-ai-mistral.md @@ -0,0 +1,5 @@ +--- +'@tanstack/ai-mistral': minor +--- + +Add new `@tanstack/ai-mistral` adapter package for Mistral models using the `@mistralai/mistralai` SDK. Supports streaming chat, tool calling, vision input (Pixtral / Mistral Medium / Small), structured output via JSON Schema, and reasoning streams (Magistral) — emitted as AG-UI `REASONING_*` events. Includes model metadata for Mistral Large, Medium, Small, Ministral 3B/8B, Codestral, Pixtral, Magistral, and Open Mistral Nemo. diff --git a/docs/adapters/mistral.md b/docs/adapters/mistral.md new file mode 100644 index 000000000..e678b46be --- /dev/null +++ b/docs/adapters/mistral.md @@ -0,0 +1,329 @@ +--- +title: Mistral +id: mistral-adapter +order: 7 +description: "Use Mistral models with TanStack AI — Mistral Large, Mistral Medium, Pixtral vision models, Magistral reasoning models, and Codestral via @tanstack/ai-mistral." +keywords: + - tanstack ai + - mistral + - mistral large + - pixtral + - magistral + - codestral + - adapter + - llm +--- + +The Mistral adapter provides access to Mistral's chat models, including Mistral Large, the multimodal Pixtral family, the Magistral reasoning models, and the Codestral code-specialized model. + +## Installation + +```bash +npm install @tanstack/ai-mistral +``` + +## Basic Usage + +```typescript +import { chat } from "@tanstack/ai"; +import { mistralText } from "@tanstack/ai-mistral"; + +const stream = chat({ + adapter: mistralText("mistral-large-latest"), + messages: [{ role: "user", content: "Hello!" }], +}); +``` + +## Basic Usage - Custom API Key + +```typescript +import { chat } from "@tanstack/ai"; +import { createMistralText } from "@tanstack/ai-mistral"; + +const adapter = createMistralText( + "mistral-large-latest", + process.env.MISTRAL_API_KEY!, +); + +const stream = chat({ + adapter, + messages: [{ role: "user", content: "Hello!" }], +}); +``` + +## Configuration + +```typescript +import { + createMistralText, + type MistralTextConfig, +} from "@tanstack/ai-mistral"; + +const config: Omit = { + serverURL: "https://api.mistral.ai", // Optional, this is the default + defaultHeaders: { + "X-Custom-Header": "value", + }, +}; + +const adapter = createMistralText( + "mistral-large-latest", + process.env.MISTRAL_API_KEY!, + config, +); +``` + +## Example: Chat Completion + +```typescript +import { chat, toServerSentEventsResponse } from "@tanstack/ai"; +import { mistralText } from "@tanstack/ai-mistral"; + +export async function POST(request: Request) { + const { messages } = await request.json(); + + const stream = chat({ + adapter: mistralText("mistral-large-latest"), + messages, + }); + + return toServerSentEventsResponse(stream); +} +``` + +## Example: With Tools + +```typescript +import { chat, toolDefinition } from "@tanstack/ai"; +import { mistralText } from "@tanstack/ai-mistral"; +import { z } from "zod"; + +const getWeatherDef = toolDefinition({ + name: "get_weather", + description: "Get the current weather for a location", + inputSchema: z.object({ + location: z.string(), + }), +}); + +const getWeather = getWeatherDef.server(async ({ location }) => { + return { temperature: 72, conditions: "sunny" }; +}); + +const stream = chat({ + adapter: mistralText("mistral-large-latest"), + messages: [{ role: "user", content: "What's the weather in Paris?" }], + tools: [getWeather], +}); +``` + +## Example: Multimodal (Vision) + +Use a vision-capable model — `pixtral-large-latest`, `pixtral-12b-2409`, `mistral-medium-latest`, or `mistral-small-latest` — to send images alongside text: + +```typescript +import { chat } from "@tanstack/ai"; +import { mistralText } from "@tanstack/ai-mistral"; + +const stream = chat({ + adapter: mistralText("pixtral-large-latest"), + messages: [ + { + role: "user", + content: [ + { type: "text", content: "What's in this image?" }, + { + type: "image", + source: { + type: "url", + value: "https://example.com/photo.jpg", + }, + }, + ], + }, + ], +}); +``` + +For data-URL or base64 images, set `source.type` to `"data"` and provide `mimeType`: + +```typescript +{ + type: "image", + source: { + type: "data", + mimeType: "image/png", + value: base64String, + }, +} +``` + +See [Multimodal Content](../advanced/multimodal-content) for the full content-part shape. + +## Example: Reasoning (Magistral) + +Magistral models (`magistral-medium-latest`, `magistral-small-latest`) stream their reasoning as separate events before the final answer. The adapter emits AG-UI `REASONING_*` chunks for the thinking content and `TEXT_MESSAGE_*` chunks for the answer: + +```typescript +import { chat } from "@tanstack/ai"; +import { mistralText } from "@tanstack/ai-mistral"; + +const stream = chat({ + adapter: mistralText("magistral-medium-latest"), + messages: [{ role: "user", content: "Why is the sky blue?" }], +}); + +for await (const chunk of stream) { + if (chunk.type === "REASONING_MESSAGE_CONTENT") { + process.stdout.write(`[thinking] ${chunk.delta}`); + } else if (chunk.type === "TEXT_MESSAGE_CONTENT") { + process.stdout.write(chunk.delta); + } +} +``` + +Reasoning events are always closed before any text or tool output begins, so consumers see a complete `REASONING_START → REASONING_MESSAGE_START → REASONING_MESSAGE_CONTENT* → REASONING_MESSAGE_END → REASONING_END` sequence first. + +See [Thinking & Reasoning](../chat/thinking-content) for the cross-provider event spec. + +## Example: Structured Output + +Generate JSON that conforms to a Zod schema using Mistral's `json_schema` response format: + +```typescript +import { generate } from "@tanstack/ai"; +import { mistralText } from "@tanstack/ai-mistral"; +import { z } from "zod"; + +const recipeSchema = z.object({ + name: z.string(), + ingredients: z.array(z.string()), + steps: z.array(z.string()), +}); + +const result = await generate({ + adapter: mistralText("mistral-large-latest"), + messages: [ + { role: "user", content: "Give me a chocolate chip cookie recipe." }, + ], + outputSchema: recipeSchema, +}); + +console.log(result.data); // typed as z.infer +``` + +See [Structured Outputs](../chat/structured-outputs) for the full guide. + +## Model Options + +Mistral exposes provider-specific options via `modelOptions`: + +```typescript +const stream = chat({ + adapter: mistralText("mistral-large-latest"), + messages, + temperature: 0.7, + topP: 0.9, + maxTokens: 1024, + modelOptions: { + random_seed: 42, + stop: ["END"], + safe_prompt: true, + frequency_penalty: 0.5, + presence_penalty: 0.5, + parallel_tool_calls: true, + tool_choice: "auto", + }, +}); +``` + +> Pass `temperature`, `topP`, and `maxTokens` at the top level — not inside `modelOptions`. + +## Environment Variables + +Set your API key in environment variables: + +```bash +MISTRAL_API_KEY=... +``` + +Get a key from the [Mistral Console](https://console.mistral.ai/). + +## Supported Models + +### Chat + +- `mistral-large-latest` — Flagship general-purpose model (128k context) +- `mistral-medium-latest` — Multimodal mid-tier model with vision +- `mistral-small-latest` — Fast, affordable multimodal model with vision +- `ministral-8b-latest` — 8B edge model +- `ministral-3b-latest` — 3B edge model +- `open-mistral-nemo` — Open 12B model + +### Code + +- `codestral-latest` — Code-specialized model (256k context) + +### Vision + +- `pixtral-large-latest` — Large vision model +- `pixtral-12b-2409` — 12B vision model + +### Reasoning + +Reasoning content is streamed as `REASONING_*` events before the final answer. + +- `magistral-medium-latest` — Mid-tier reasoning model +- `magistral-small-latest` — Small reasoning model + +See [Mistral's model comparison](https://docs.mistral.ai/getting-started/models/compare) for full details. + +## API Reference + +### `mistralText(model, config?)` + +Creates a Mistral text adapter using the `MISTRAL_API_KEY` environment variable. + +**Parameters:** + +- `model` — The model name (e.g., `'mistral-large-latest'`) +- `config.serverURL?` — Custom base URL (optional) +- `config.defaultHeaders?` — Headers to attach to every request (optional) + +**Returns:** A Mistral text adapter instance. + +### `createMistralText(model, apiKey, config?)` + +Creates a Mistral text adapter with an explicit API key. + +**Parameters:** + +- `model` — The model name +- `apiKey` — Your Mistral API key +- `config.serverURL?` — Custom base URL (optional) +- `config.defaultHeaders?` — Headers to attach to every request (optional) + +**Returns:** A Mistral text adapter instance. + +## Limitations + +- **Embeddings**: Use the [Mistral SDK](https://github.com/mistralai/client-ts) directly for `mistral-embed`. +- **Image / Audio / Video Generation**: Mistral does not provide these endpoints. Use OpenAI, Gemini, or fal.ai. +- **Text-to-Speech / Transcription**: Not supported. Use OpenAI or ElevenLabs. + +## Next Steps + +- [Getting Started](../getting-started/quick-start) — Learn the basics +- [Tools Guide](../tools/tools) — Define and call tools +- [Structured Outputs](../chat/structured-outputs) — Generate typed JSON +- [Multimodal Content](../advanced/multimodal-content) — Send images and other modalities +- [Other Adapters](./openai) — Explore other providers + +## Provider Tools + +Mistral does not currently expose provider-specific tool factories. +Define your own tools with `toolDefinition()` from `@tanstack/ai`. + +See [Tools](../tools/tools.md) for the general tool-definition flow, or +[Provider Tools](../tools/provider-tools.md) for other providers' +native-tool offerings. diff --git a/docs/config.json b/docs/config.json index f24a5fa0a..183ab65ae 100644 --- a/docs/config.json +++ b/docs/config.json @@ -274,6 +274,10 @@ "label": "Groq", "to": "adapters/groq" }, + { + "label": "Mistral", + "to": "adapters/mistral" + }, { "label": "ElevenLabs", "to": "adapters/elevenlabs" diff --git a/packages/typescript/ai-mistral/README.md b/packages/typescript/ai-mistral/README.md new file mode 100644 index 000000000..977720c8d --- /dev/null +++ b/packages/typescript/ai-mistral/README.md @@ -0,0 +1,93 @@ +# @tanstack/ai-mistral + +Mistral adapter for TanStack AI. + +## Installation + +```bash +npm install @tanstack/ai-mistral +# or +pnpm add @tanstack/ai-mistral +# or +yarn add @tanstack/ai-mistral +``` + +## Setup + +Get your API key from [Mistral Console](https://console.mistral.ai/) and set it as an environment variable: + +```bash +export MISTRAL_API_KEY="..." +``` + +## Usage + +### Text/Chat Adapter + +```typescript +import { mistralText } from '@tanstack/ai-mistral' +import { generate } from '@tanstack/ai' + +const adapter = mistralText('mistral-large-latest') + +const result = await generate({ + adapter, + model: 'mistral-large-latest', + messages: [ + { role: 'user', content: 'Explain quantum computing in simple terms' }, + ], +}) + +console.log(result.text) +``` + +### With Explicit API Key + +```typescript +import { createMistralText } from '@tanstack/ai-mistral' + +const adapter = createMistralText( + 'mistral-large-latest', + process.env.MISTRAL_API_KEY!, +) +``` + +## Supported Models + +### Chat Models + +- `mistral-large-latest` - Frontier flagship model (128k context) +- `mistral-medium-latest` - Balanced multimodal model (vision) +- `mistral-small-latest` - Fast, affordable multimodal model (vision) +- `ministral-8b-latest` - 8B edge model +- `ministral-3b-latest` - 3B edge model +- `codestral-latest` - Code-specialized model (256k context) +- `pixtral-large-latest` - Large vision model +- `pixtral-12b-2409` - 12B vision model +- `magistral-medium-latest` - Reasoning model +- `magistral-small-latest` - Small reasoning model +- `open-mistral-nemo` - Open 12B model + +See [Mistral model comparison](https://docs.mistral.ai/getting-started/models/compare) for full details. + +## Features + +- ✅ Streaming chat completions +- ✅ Structured output (JSON Schema) +- ✅ Function/tool calling +- ✅ Reasoning (magistral-\_ models — streamed as `REASONING\__` events) +- ✅ Multimodal input (text + images) — requires a vision-capable model (`pixtral-large-latest`, `pixtral-12b-2409`, `mistral-medium-latest`, or `mistral-small-latest`) +- ❌ Embeddings (use [@mistralai/mistralai](https://github.com/mistralai/client-ts) directly) +- ❌ Image generation + +## Tree-Shakeable Adapters + +This package uses tree-shakeable adapters, so you only import what you need: + +```typescript +import { mistralText } from '@tanstack/ai-mistral' +``` + +## License + +MIT diff --git a/packages/typescript/ai-mistral/package.json b/packages/typescript/ai-mistral/package.json new file mode 100644 index 000000000..c94dfc12e --- /dev/null +++ b/packages/typescript/ai-mistral/package.json @@ -0,0 +1,59 @@ +{ + "name": "@tanstack/ai-mistral", + "version": "0.1.0", + "type": "module", + "description": "Mistral adapter for TanStack AI", + "author": "", + "license": "MIT", + "repository": { + "type": "git", + "url": "git+https://github.com/TanStack/ai.git", + "directory": "packages/typescript/ai-mistral" + }, + "module": "./dist/esm/index.js", + "types": "./dist/esm/index.d.ts", + "exports": { + ".": { + "types": "./dist/esm/index.d.ts", + "import": "./dist/esm/index.js" + }, + "./adapters/text": { + "types": "./dist/esm/adapters/text.d.ts", + "import": "./dist/esm/adapters/text.js" + } + }, + "files": [ + "dist", + "src" + ], + "scripts": { + "build": "vite build", + "clean": "premove ./build ./dist", + "lint:fix": "eslint ./src --fix", + "test:build": "publint --strict", + "test:eslint": "eslint ./src", + "test:lib": "vitest run", + "test:lib:dev": "pnpm test:lib --watch", + "test:types": "tsc" + }, + "keywords": [ + "ai", + "mistral", + "tanstack", + "adapter" + ], + "devDependencies": { + "@tanstack/ai": "workspace:*", + "@tanstack/ai-client": "workspace:*", + "@vitest/coverage-v8": "4.0.14", + "vite": "^7.2.7", + "zod": "^4.2.0" + }, + "peerDependencies": { + "@tanstack/ai": "workspace:^", + "zod": "^4.0.0" + }, + "dependencies": { + "@mistralai/mistralai": "^2.2.0" + } +} diff --git a/packages/typescript/ai-mistral/src/adapters/text.ts b/packages/typescript/ai-mistral/src/adapters/text.ts new file mode 100644 index 000000000..cb69b82ae --- /dev/null +++ b/packages/typescript/ai-mistral/src/adapters/text.ts @@ -0,0 +1,1142 @@ +import { BaseTextAdapter } from '@tanstack/ai/adapters' +import { convertToolsToProviderFormat } from '../tools' +import { + createMistralClient, + generateId, + getMistralApiKeyFromEnv, + makeMistralStructuredOutputCompatible, + transformNullsToUndefined, +} from '../utils' +import type { + ContentPart, + Modality, + ModelMessage, + StreamChunk, + TextOptions, +} from '@tanstack/ai' +import type { + MISTRAL_CHAT_MODELS, + MistralChatModelProviderOptionsByName, + MistralModelInputModalitiesByName, +} from '../model-meta' +import type { + StructuredOutputOptions, + StructuredOutputResult, +} from '@tanstack/ai/adapters' +import type { Mistral } from '@mistralai/mistralai' +import type { ChatCompletionStreamRequest } from '@mistralai/mistralai/models/components' +import type { + ExternalTextProviderOptions, + InternalTextProviderOptions, +} from '../text/text-provider-options' +import type { + ChatCompletionContentPart, + ChatCompletionMessageParam, + MistralImageMetadata, + MistralMessageMetadataByModality, +} from '../message-types' +import type { MistralClientConfig } from '../utils' + +/** Cast an event object to StreamChunk. Adapters construct events with string + * literal types which are structurally compatible with the EventType enum. */ +const asChunk = (chunk: Record) => + chunk as unknown as StreamChunk + +/** + * Parse the accumulated streaming arguments for a tool call. Throws a clear + * error if the JSON is malformed — silently substituting `{}` would let a + * tool fire with empty inputs, masking truncated streams or mis-shaped output. + */ +function parseToolCallInput(toolCall: { + id: string + name: string + arguments: string +}): unknown { + if (!toolCall.arguments) return {} + try { + return transformNullsToUndefined(JSON.parse(toolCall.arguments)) + } catch (cause) { + const preview = toolCall.arguments.slice(0, 200) + const ellipsis = toolCall.arguments.length > 200 ? '...' : '' + throw new Error( + `Failed to parse tool call arguments for tool '${toolCall.name}' (id: ${toolCall.id}). Arguments: ${preview}${ellipsis}`, + { cause }, + ) + } +} + +/** + * Configuration for Mistral text adapter. + */ +export type MistralTextConfig = MistralClientConfig + +/** + * Alias for TextProviderOptions for external use. + */ +export type MistralTextProviderOptions = ExternalTextProviderOptions + +// =========================== +// Type Resolution Helpers +// =========================== + +type ResolveProviderOptions = + TModel extends keyof MistralChatModelProviderOptionsByName + ? MistralChatModelProviderOptionsByName[TModel] + : MistralTextProviderOptions + +type ResolveInputModalities = + TModel extends keyof MistralModelInputModalitiesByName + ? MistralModelInputModalitiesByName[TModel] + : readonly ['text'] + +// =========================== +// Wire-format chunk types +// =========================== + +/** + * Snake-case shape of a Mistral chat completion stream chunk as returned on the + * wire. We bypass the SDK's `chat.stream` because its Zod validation rejects + * tool-call argument deltas that omit `function.name` (only the first chunk in + * a tool call carries the name). + */ +interface MistralRawToolCall { + id?: string + type?: string + index?: number + function?: { + name?: string + arguments?: string | Record + } +} + +interface MistralRawChoice { + index?: number + delta?: { + role?: string | null + content?: + | string + | Array<{ + type: string + text?: string + // Mistral magistral models stream reasoning as content parts of + // type 'thinking' whose `thinking` field is itself an array of + // text/reference chunks. See Mistral SDK ThinkChunk type. + thinking?: Array<{ type: string; text?: string }> + }> + | null + // Some OpenAI-compatible deployments (DeepSeek, Groq for reasoning + // models, and aimock-based test environments) emit reasoning via a + // separate `reasoning_content` delta field rather than as a content + // part. Accept both shapes — they cannot collide because real Mistral + // never sets the OpenAI-compat field, and aimock never sets the + // thinking content part. + reasoning_content?: string | null + tool_calls?: Array + } + finish_reason?: string | null +} + +interface MistralRawChunk { + id?: string + model?: string + choices?: Array + usage?: { + prompt_tokens?: number + completion_tokens?: number + total_tokens?: number + } +} + +// =========================== +// Adapter Implementation +// =========================== + +/** + * Mistral Text (Chat) Adapter. + * + * Tree-shakeable adapter for Mistral chat/text completion functionality. + */ +export class MistralTextAdapter< + TModel extends (typeof MISTRAL_CHAT_MODELS)[number], + TProviderOptions extends Record = ResolveProviderOptions, + TInputModalities extends ReadonlyArray = + ResolveInputModalities, +> extends BaseTextAdapter< + TModel, + TProviderOptions, + TInputModalities, + MistralMessageMetadataByModality +> { + readonly name = 'mistral' as const + + private client: Mistral + private rawConfig: MistralClientConfig + + constructor(config: MistralTextConfig, model: TModel) { + super(config, model) + // The SDK client is retained for `structuredOutput` (non-streaming). The + // streaming path bypasses the SDK and uses `fetchRawMistralStream` because + // the SDK's Zod schemas reject partial tool-call argument deltas. + this.client = createMistralClient(config) + this.rawConfig = config + } + + async *chatStream( + options: TextOptions, + ): AsyncIterable { + const requestParams = this.mapTextOptionsToMistral(options) + const timestamp = Date.now() + + const aguiState = { + runId: options.runId ?? generateId(this.name), + threadId: options.threadId ?? generateId(this.name), + messageId: generateId(this.name), + timestamp, + hasEmittedRunStarted: false, + } + + try { + const stream = this.fetchRawMistralStream(requestParams, this.rawConfig) + yield* this.processMistralStreamChunks(stream, options, aguiState) + } catch (error: unknown) { + const err = error as Error & { code?: string } + + if (!aguiState.hasEmittedRunStarted) { + aguiState.hasEmittedRunStarted = true + yield asChunk({ + type: 'RUN_STARTED', + runId: aguiState.runId, + threadId: aguiState.threadId, + model: options.model, + timestamp, + }) + } + + yield asChunk({ + type: 'RUN_ERROR', + runId: aguiState.runId, + model: options.model, + timestamp, + message: err.message || 'Unknown error', + code: err.code, + error: { + message: err.message || 'Unknown error', + code: err.code, + }, + }) + + throw err + } + } + + /** + * Generate structured output using Mistral's JSON Schema response format. + */ + async structuredOutput( + options: StructuredOutputOptions, + ): Promise> { + const { chatOptions, outputSchema } = options + const { stream: _stream, ...nonStreamParams } = + this.mapTextOptionsToMistral(chatOptions) + + const jsonSchema = makeMistralStructuredOutputCompatible( + outputSchema, + outputSchema.required || [], + ) + + const response = await this.client.chat.complete({ + ...nonStreamParams, + responseFormat: { + type: 'json_schema', + jsonSchema: { + name: 'structured_output', + schemaDefinition: jsonSchema, + strict: true, + }, + }, + }) + + const rawText = response.choices[0]?.message?.content + const textContent = typeof rawText === 'string' ? rawText : '' + + let parsed: unknown + try { + parsed = JSON.parse(textContent) + } catch { + throw new Error( + `Failed to parse structured output as JSON. Content: ${textContent.slice(0, 200)}${textContent.length > 200 ? '...' : ''}`, + ) + } + + return { + data: transformNullsToUndefined(parsed), + rawText: textContent, + } + } + + /** + * Processes streaming chunks from the Mistral API and yields AG-UI stream events. + */ + private async *processMistralStreamChunks( + stream: AsyncIterable, + options: TextOptions, + aguiState: { + runId: string + threadId: string + messageId: string + timestamp: number + hasEmittedRunStarted: boolean + }, + ): AsyncIterable { + let accumulatedContent = '' + const timestamp = aguiState.timestamp + let hasEmittedTextMessageStart = false + let hasEmittedTextMessageEnd = false + let hasEmittedToolCall = false + let hasEmittedRunFinished = false + let lastChunkModel = options.model + + // Reasoning lifecycle (magistral-* models stream `thinking` content + // parts before any text). Mirrors the anthropic adapter's pattern: + // open REASONING_* events on the first thinking delta, close them when + // text/tool content begins or the run finishes. + let reasoningMessageId: string | null = null + let hasClosedReasoning = false + + const toolCallsInProgress = new Map< + number, + { + id: string + name: string + arguments: string + started: boolean + ended: boolean + } + >() + + try { + for await (const chunk of stream) { + lastChunkModel = chunk.model || options.model + const choice = chunk.choices?.[0] + if (!choice) continue + + const chunkModel = chunk.model || options.model + + if (!aguiState.hasEmittedRunStarted) { + aguiState.hasEmittedRunStarted = true + yield asChunk({ + type: 'RUN_STARTED', + runId: aguiState.runId, + threadId: aguiState.threadId, + model: chunkModel, + timestamp, + }) + } + + const delta = choice.delta + const { text: deltaContent, thinking: deltaThinkingFromContent } = + this.extractDeltaParts(delta?.content) + // Reasoning may also arrive as a separate top-level field + // (`delta.reasoning_content`) on OpenAI-compatible deployments. + const deltaThinking = + deltaThinkingFromContent + + (typeof delta?.reasoning_content === 'string' + ? delta.reasoning_content + : '') + const deltaToolCalls = delta?.tool_calls + + // Emit reasoning events FIRST so they always precede the matching + // text or tool deltas in the same chunk. + if (deltaThinking) { + if (reasoningMessageId === null) { + reasoningMessageId = generateId(this.name) + yield asChunk({ + type: 'REASONING_START', + messageId: reasoningMessageId, + model: chunkModel, + timestamp, + }) + yield asChunk({ + type: 'REASONING_MESSAGE_START', + messageId: reasoningMessageId, + role: 'reasoning', + model: chunkModel, + timestamp, + }) + } + yield asChunk({ + type: 'REASONING_MESSAGE_CONTENT', + messageId: reasoningMessageId, + model: chunkModel, + timestamp, + delta: deltaThinking, + }) + } + + // Close reasoning before any text/tool output starts in this chunk. + const aboutToEmitOutput = + !!deltaContent || (!!deltaToolCalls && deltaToolCalls.length > 0) + if ( + reasoningMessageId !== null && + !hasClosedReasoning && + aboutToEmitOutput + ) { + hasClosedReasoning = true + yield asChunk({ + type: 'REASONING_MESSAGE_END', + messageId: reasoningMessageId, + model: chunkModel, + timestamp, + }) + yield asChunk({ + type: 'REASONING_END', + messageId: reasoningMessageId, + model: chunkModel, + timestamp, + }) + } + + if (deltaContent) { + if (!hasEmittedTextMessageStart) { + hasEmittedTextMessageStart = true + yield asChunk({ + type: 'TEXT_MESSAGE_START', + messageId: aguiState.messageId, + model: chunkModel, + timestamp, + role: 'assistant', + }) + } + + accumulatedContent += deltaContent + + yield asChunk({ + type: 'TEXT_MESSAGE_CONTENT', + messageId: aguiState.messageId, + model: chunkModel, + timestamp, + delta: deltaContent, + content: accumulatedContent, + }) + } + + if (deltaToolCalls) { + for (let i = 0; i < deltaToolCalls.length; i++) { + const toolCallDelta = deltaToolCalls[i]! + const index = toolCallDelta.index ?? i + + if (!toolCallsInProgress.has(index)) { + toolCallsInProgress.set(index, { + id: toolCallDelta.id || '', + name: toolCallDelta.function?.name || '', + arguments: '', + started: false, + ended: false, + }) + } + + const toolCall = toolCallsInProgress.get(index)! + + if (toolCallDelta.id) toolCall.id = toolCallDelta.id + if (toolCallDelta.function?.name) { + toolCall.name = toolCallDelta.function.name + } + + const rawArgs = toolCallDelta.function?.arguments + const argsDelta = + rawArgs === undefined + ? undefined + : typeof rawArgs === 'string' + ? rawArgs + : JSON.stringify(rawArgs) + + if (argsDelta !== undefined) { + toolCall.arguments += argsDelta + } + + const justStarted = + !!toolCall.id && !!toolCall.name && !toolCall.started + if (justStarted) { + toolCall.started = true + yield asChunk({ + type: 'TOOL_CALL_START', + toolCallId: toolCall.id, + toolCallName: toolCall.name, + toolName: toolCall.name, + model: chunkModel, + timestamp, + index, + }) + // Replay any args buffered before id+name arrived (including + // this chunk's argsDelta, if any). + if (toolCall.arguments.length > 0) { + yield asChunk({ + type: 'TOOL_CALL_ARGS', + toolCallId: toolCall.id, + model: chunkModel, + timestamp, + delta: toolCall.arguments, + }) + } + } else if (argsDelta !== undefined && toolCall.started) { + yield asChunk({ + type: 'TOOL_CALL_ARGS', + toolCallId: toolCall.id, + model: chunkModel, + timestamp, + delta: argsDelta, + }) + } + } + } + + const finishReason = choice.finish_reason + if (finishReason) { + if (finishReason === 'tool_calls' || toolCallsInProgress.size > 0) { + for (const [, toolCall] of toolCallsInProgress) { + if ( + !toolCall.started || + !toolCall.id || + !toolCall.name || + toolCall.ended + ) { + continue + } + + const parsedInput = parseToolCallInput(toolCall) + + toolCall.ended = true + hasEmittedToolCall = true + yield asChunk({ + type: 'TOOL_CALL_END', + toolCallId: toolCall.id, + toolCallName: toolCall.name, + toolName: toolCall.name, + model: chunkModel, + timestamp, + input: parsedInput, + }) + } + } + + const computedFinishReason = + finishReason === 'tool_calls' || hasEmittedToolCall + ? 'tool_calls' + : finishReason === 'length' + ? 'length' + : 'stop' + + // If the run finished while reasoning was still open (no text or + // tool output ever followed), close reasoning before TEXT/RUN + // finalization events. + if (reasoningMessageId !== null && !hasClosedReasoning) { + hasClosedReasoning = true + yield asChunk({ + type: 'REASONING_MESSAGE_END', + messageId: reasoningMessageId, + model: chunkModel, + timestamp, + }) + yield asChunk({ + type: 'REASONING_END', + messageId: reasoningMessageId, + model: chunkModel, + timestamp, + }) + } + + if (hasEmittedTextMessageStart && !hasEmittedTextMessageEnd) { + hasEmittedTextMessageEnd = true + yield asChunk({ + type: 'TEXT_MESSAGE_END', + messageId: aguiState.messageId, + model: chunkModel, + timestamp, + }) + } + + const usage = chunk.usage + hasEmittedRunFinished = true + yield asChunk({ + type: 'RUN_FINISHED', + runId: aguiState.runId, + threadId: aguiState.threadId, + model: chunkModel, + timestamp, + usage: usage + ? { + promptTokens: usage.prompt_tokens || 0, + completionTokens: usage.completion_tokens || 0, + totalTokens: usage.total_tokens || 0, + } + : undefined, + finishReason: computedFinishReason, + }) + } + } + + // Stream ended cleanly without finish_reason — flush any open + // lifecycle events so consumers don't see orphaned starts. This + // happens for abrupt `[DONE]` or upstream cuts. + if (!hasEmittedRunFinished) { + if (reasoningMessageId !== null && !hasClosedReasoning) { + hasClosedReasoning = true + yield asChunk({ + type: 'REASONING_MESSAGE_END', + messageId: reasoningMessageId, + model: lastChunkModel, + timestamp, + }) + yield asChunk({ + type: 'REASONING_END', + messageId: reasoningMessageId, + model: lastChunkModel, + timestamp, + }) + } + for (const [, toolCall] of toolCallsInProgress) { + if (toolCall.started && !toolCall.ended) { + toolCall.ended = true + hasEmittedToolCall = true + yield asChunk({ + type: 'TOOL_CALL_END', + toolCallId: toolCall.id, + toolCallName: toolCall.name, + toolName: toolCall.name, + model: lastChunkModel, + timestamp, + input: parseToolCallInput(toolCall), + }) + } + } + if (hasEmittedTextMessageStart && !hasEmittedTextMessageEnd) { + hasEmittedTextMessageEnd = true + yield asChunk({ + type: 'TEXT_MESSAGE_END', + messageId: aguiState.messageId, + model: lastChunkModel, + timestamp, + }) + } + hasEmittedRunFinished = true + yield asChunk({ + type: 'RUN_FINISHED', + runId: aguiState.runId, + threadId: aguiState.threadId, + model: lastChunkModel, + timestamp, + usage: undefined, + finishReason: hasEmittedToolCall ? 'tool_calls' : 'stop', + }) + } + } catch (error: unknown) { + // Lifecycle cleanup (TEXT_MESSAGE_END / TOOL_CALL_END / REASONING_END) + // on error path so consumers don't see orphaned starts. RUN_ERROR is + // emitted by the outer chatStream catch — emitting it here would + // duplicate the event. + if (reasoningMessageId !== null && !hasClosedReasoning) { + hasClosedReasoning = true + yield asChunk({ + type: 'REASONING_MESSAGE_END', + messageId: reasoningMessageId, + model: lastChunkModel, + timestamp, + }) + yield asChunk({ + type: 'REASONING_END', + messageId: reasoningMessageId, + model: lastChunkModel, + timestamp, + }) + } + if (hasEmittedTextMessageStart && !hasEmittedTextMessageEnd) { + hasEmittedTextMessageEnd = true + yield asChunk({ + type: 'TEXT_MESSAGE_END', + messageId: aguiState.messageId, + model: lastChunkModel, + timestamp, + }) + } + for (const [, toolCall] of toolCallsInProgress) { + if (toolCall.started && !toolCall.ended) { + toolCall.ended = true + // Best-effort parse for the partial args; if invalid, surface + // empty input rather than throwing inside the cleanup path. + let partialInput: unknown = {} + try { + partialInput = toolCall.arguments + ? transformNullsToUndefined(JSON.parse(toolCall.arguments)) + : {} + } catch { + partialInput = {} + } + yield asChunk({ + type: 'TOOL_CALL_END', + toolCallId: toolCall.id, + toolCallName: toolCall.name, + toolName: toolCall.name, + model: lastChunkModel, + timestamp, + input: partialInput, + }) + } + } + throw error + } + } + + /** + * Makes a raw fetch request to the Mistral chat completions endpoint and + * parses the SSE stream manually, bypassing the SDK's Zod validation which + * rejects streaming tool call chunks that omit `name` in argument deltas. + */ + private async *fetchRawMistralStream( + params: ChatCompletionStreamRequest, + config: MistralClientConfig, + ): AsyncGenerator { + const serverURL = (config.serverURL ?? 'https://api.mistral.ai') + .replace(/\/+$/, '') + .replace(/\/v1$/, '') + const url = `${serverURL}/v1/chat/completions` + + const body = this.toWireBody(params) + + const headers: Record = { + 'Content-Type': 'application/json', + Authorization: `Bearer ${config.apiKey}`, + ...config.defaultHeaders, + } + + const response = await fetch(url, { + method: 'POST', + headers, + body: JSON.stringify(body), + }) + + if (!response.ok) { + const errorText = await response.text() + throw new Error(`Mistral API error ${response.status}: ${errorText}`) + } + + if (!response.body) { + throw new Error( + 'Mistral API returned a response with no body. This may indicate a proxy or runtime that does not support streaming.', + ) + } + + const reader = response.body.getReader() + const decoder = new TextDecoder() + let buffer = '' + + try { + for (;;) { + const { done, value } = await reader.read() + if (done) break + + buffer += decoder.decode(value, { stream: true }) + const lines = buffer.split('\n') + buffer = lines.pop()! + + for (const line of lines) { + const trimmed = line.trim() + if (!trimmed.startsWith('data:')) continue + const data = trimmed.slice(5).trimStart() + if (data === '[DONE]') return + + let parsed: unknown + try { + parsed = JSON.parse(data) + } catch (e) { + if (e instanceof SyntaxError) { + console.warn( + `[mistral] skipped unparseable SSE chunk: ${data.slice(0, 200)}`, + ) + continue + } + throw e + } + + // Mistral signals mid-stream errors via an `error` field. Surface + // them as RUN_ERROR rather than swallowing them as empty chunks. + if ( + parsed && + typeof parsed === 'object' && + 'error' in parsed && + !('choices' in parsed) + ) { + const errPayload = (parsed as { error: unknown }).error + const message = + typeof errPayload === 'string' + ? errPayload + : errPayload && + typeof errPayload === 'object' && + 'message' in errPayload + ? String((errPayload as { message: unknown }).message) + : JSON.stringify(errPayload) + throw new Error(`Mistral stream error: ${message}`) + } + + yield parsed as MistralRawChunk + } + } + } finally { + await reader.cancel().catch(() => {}) + reader.releaseLock() + } + } + + /** + * Converts the SDK's camelCase `ChatCompletionStreamRequest` into the + * snake_case wire body, including converting messages. + */ + private toWireBody( + params: ChatCompletionStreamRequest, + ): Record { + const { + messages, + maxTokens, + topP, + randomSeed, + responseFormat, + toolChoice, + parallelToolCalls, + frequencyPenalty, + presencePenalty, + safePrompt, + stream: _stream, + ...rest + } = params + + return { + ...rest, + messages: messages.map(messageToWire), + stream: true, + // Opt in to usage on the final streaming chunk. + stream_options: { include_usage: true }, + ...(maxTokens != null && { max_tokens: maxTokens }), + ...(topP != null && { top_p: topP }), + ...(randomSeed != null && { random_seed: randomSeed }), + ...(responseFormat != null && { response_format: responseFormat }), + ...(toolChoice != null && { tool_choice: toolChoice }), + ...(parallelToolCalls != null && { + parallel_tool_calls: parallelToolCalls, + }), + ...(frequencyPenalty != null && { frequency_penalty: frequencyPenalty }), + ...(presencePenalty != null && { presence_penalty: presencePenalty }), + ...(safePrompt != null && { safe_prompt: safePrompt }), + } + } + + /** + * Splits a Mistral delta content payload into text and reasoning deltas. + * Mistral reasoning models (magistral-*) stream reasoning content as + * `{ type: 'thinking', thinking: [{ type: 'text', text }, ...] }` content + * parts. A single delta may contain text only, thinking only, or — rarely — + * both (when a step transitions); both fields are returned so the caller + * can sequence REASONING and TEXT lifecycle events in order. + */ + private extractDeltaParts( + content: + | string + | Array<{ + type: string + text?: string + thinking?: Array<{ type: string; text?: string }> + }> + | null + | undefined, + ): { text: string; thinking: string } { + if (!content) return { text: '', thinking: '' } + if (typeof content === 'string') return { text: content, thinking: '' } + + let text = '' + let thinking = '' + for (const part of content) { + if (part.type === 'text' && typeof part.text === 'string') { + text += part.text + } else if (part.type === 'thinking' && Array.isArray(part.thinking)) { + for (const inner of part.thinking) { + if (inner.type === 'text' && typeof inner.text === 'string') { + thinking += inner.text + } + } + } + } + return { text, thinking } + } + + /** + * Maps common TextOptions to Mistral Chat Completions request parameters. + */ + private mapTextOptionsToMistral( + options: TextOptions, + ): ChatCompletionStreamRequest { + const modelOptions = options.modelOptions as + | Omit + | undefined + + const tools = options.tools + ? convertToolsToProviderFormat(options.tools) + : undefined + + const messages: Array = [] + + if (options.systemPrompts && options.systemPrompts.length > 0) { + messages.push({ + role: 'system', + content: options.systemPrompts.join('\n'), + }) + } + + for (const message of options.messages) { + messages.push(this.convertMessageToMistral(message)) + } + + return { + model: options.model, + messages: messages as ChatCompletionStreamRequest['messages'], + temperature: + options.temperature ?? modelOptions?.temperature ?? undefined, + maxTokens: options.maxTokens, + topP: options.topP ?? modelOptions?.top_p ?? undefined, + tools: tools as ChatCompletionStreamRequest['tools'], + stream: true, + ...(modelOptions && { + ...(modelOptions.stop != null && { stop: modelOptions.stop }), + ...(modelOptions.random_seed != null && { + randomSeed: modelOptions.random_seed, + }), + ...(modelOptions.response_format != null && { + responseFormat: + modelOptions.response_format as ChatCompletionStreamRequest['responseFormat'], + }), + ...(modelOptions.tool_choice != null && { + toolChoice: + modelOptions.tool_choice as ChatCompletionStreamRequest['toolChoice'], + }), + ...(modelOptions.parallel_tool_calls != null && { + parallelToolCalls: modelOptions.parallel_tool_calls, + }), + ...(modelOptions.frequency_penalty != null && { + frequencyPenalty: modelOptions.frequency_penalty, + }), + ...(modelOptions.presence_penalty != null && { + presencePenalty: modelOptions.presence_penalty, + }), + ...(modelOptions.n != null && { n: modelOptions.n }), + ...(modelOptions.prediction != null && { + prediction: modelOptions.prediction, + }), + ...(modelOptions.safe_prompt != null && { + safePrompt: modelOptions.safe_prompt, + }), + }), + } + } + + /** + * Converts a TanStack AI ModelMessage to a Mistral ChatCompletionMessageParam. + */ + private convertMessageToMistral( + message: ModelMessage, + ): ChatCompletionMessageParam { + if (message.role === 'tool') { + if (!message.toolCallId) { + throw new Error('Missing toolCallId for tool message') + } + return { + role: 'tool', + toolCallId: message.toolCallId, + content: + typeof message.content === 'string' + ? message.content + : JSON.stringify(message.content), + } + } + + if (message.role === 'assistant') { + const toolCalls = message.toolCalls?.map((tc) => ({ + id: tc.id, + type: 'function' as const, + function: { + name: tc.function.name, + arguments: + typeof tc.function.arguments === 'string' + ? tc.function.arguments + : JSON.stringify(tc.function.arguments), + }, + })) + + return { + role: 'assistant', + content: this.extractTextContent(message.content), + ...(toolCalls && toolCalls.length > 0 ? { toolCalls } : {}), + } + } + + const contentParts = this.normalizeContent(message.content) + + if (contentParts.length === 1 && contentParts[0]?.type === 'text') { + return { + role: 'user', + content: contentParts[0].content, + } + } + + const parts = contentParts.map((part) => + this.convertContentPartToMistral(part), + ) + + return { + role: 'user', + content: parts.length > 0 ? parts : '', + } + } + + /** + * Converts a ContentPart to a Mistral content part. Returns undefined for + * unsupported part types. + */ + private convertContentPartToMistral( + part: ContentPart, + ): ChatCompletionContentPart { + if (part.type === 'text') { + return { type: 'text', text: part.content } + } + + if (part.type === 'image') { + const imageMetadata = part.metadata as MistralImageMetadata | undefined + const imageValue = part.source.value + const imageUrl = + part.source.type === 'data' && !imageValue.startsWith('data:') + ? `data:${part.source.mimeType};base64,${imageValue}` + : imageValue + return { + type: 'image_url', + imageUrl: imageMetadata?.detail + ? { url: imageUrl, detail: imageMetadata.detail } + : imageUrl, + } + } + + throw new Error( + `Mistral text adapter does not support content part of type '${(part as ContentPart).type}'. Supported types: text, image. Use a vision-capable model (pixtral-large-latest, pixtral-12b-2409, mistral-medium-latest, or mistral-small-latest) for images.`, + ) + } + + /** + * Normalizes message content to an array of ContentPart. + */ + private normalizeContent( + content: string | null | Array, + ): Array { + if (content === null) return [] + if (typeof content === 'string') return [{ type: 'text', content }] + return content + } + + /** + * Extracts text content from a content value that may be string, null, or ContentPart array. + */ + private extractTextContent( + content: string | null | Array, + ): string { + if (content === null) return '' + if (typeof content === 'string') return content + return content + .filter((p) => p.type === 'text') + .map((p) => p.content) + .join('') + } +} + +/** + * Snake-cases a Mistral SDK message into the wire format expected by the API. + */ +function messageToWire(msg: ChatCompletionStreamRequest['messages'][number]) { + if (msg.role === 'tool') { + return { + role: 'tool', + tool_call_id: msg.toolCallId, + content: msg.content, + ...(msg.name !== undefined ? { name: msg.name } : {}), + } + } + if (msg.role === 'assistant') { + const base: Record = { + role: 'assistant', + content: msg.content ?? null, + } + if (msg.toolCalls && msg.toolCalls.length > 0) { + base.tool_calls = msg.toolCalls.map((tc) => ({ + id: tc.id, + type: tc.type ?? 'function', + function: tc.function, + })) + } + if (msg.prefix !== undefined) base.prefix = msg.prefix + return base + } + if (msg.role === 'user' && Array.isArray(msg.content)) { + return { + role: 'user', + content: msg.content.map((part) => { + if (part.type === 'image_url') { + return { type: 'image_url', image_url: part.imageUrl } + } + if (part.type === 'document_url') { + return { type: 'document_url', document_url: part.documentUrl } + } + return part + }), + } + } + return msg +} + +/** + * Creates a Mistral text adapter with explicit API key. + * + * @param model - The model name (e.g., 'mistral-large-latest') + * @param apiKey - Your Mistral API key + * @param config - Optional additional configuration + * @returns Configured Mistral text adapter instance + * + * @example + * ```typescript + * const adapter = createMistralText('mistral-large-latest', 'api_key'); + * ``` + */ +export function createMistralText< + TModel extends (typeof MISTRAL_CHAT_MODELS)[number], +>( + model: TModel, + apiKey: string, + config?: Omit, +): MistralTextAdapter { + return new MistralTextAdapter({ apiKey, ...config }, model) +} + +/** + * Creates a Mistral text adapter using the `MISTRAL_API_KEY` environment variable. + * + * @param model - The model name (e.g., 'mistral-large-latest') + * @param config - Optional configuration (excluding apiKey) + * @returns Configured Mistral text adapter instance + * @throws Error if MISTRAL_API_KEY is not found in environment + * + * @example + * ```typescript + * const adapter = mistralText('mistral-large-latest'); + * ``` + */ +export function mistralText< + TModel extends (typeof MISTRAL_CHAT_MODELS)[number], +>( + model: TModel, + config?: Omit, +): MistralTextAdapter { + const apiKey = getMistralApiKeyFromEnv() + return createMistralText(model, apiKey, config) +} diff --git a/packages/typescript/ai-mistral/src/index.ts b/packages/typescript/ai-mistral/src/index.ts new file mode 100644 index 000000000..d57f85f81 --- /dev/null +++ b/packages/typescript/ai-mistral/src/index.ts @@ -0,0 +1,33 @@ +/** + * @module @tanstack/ai-mistral + * + * Mistral provider adapter for TanStack AI. + * Provides tree-shakeable adapters for Mistral's Chat Completions API. + */ + +// Text (Chat) adapter +export { + MistralTextAdapter, + createMistralText, + mistralText, + type MistralTextConfig, + type MistralTextProviderOptions, +} from './adapters/text' + +// Types +export type { + MistralChatModelProviderOptionsByName, + MistralModelInputModalitiesByName, + ResolveProviderOptions, + ResolveInputModalities, + MistralChatModels, +} from './model-meta' +export { MISTRAL_CHAT_MODELS } from './model-meta' +export type { + MistralTextMetadata, + MistralImageMetadata, + MistralAudioMetadata, + MistralVideoMetadata, + MistralDocumentMetadata, + MistralMessageMetadataByModality, +} from './message-types' diff --git a/packages/typescript/ai-mistral/src/message-types.ts b/packages/typescript/ai-mistral/src/message-types.ts new file mode 100644 index 000000000..2b150282c --- /dev/null +++ b/packages/typescript/ai-mistral/src/message-types.ts @@ -0,0 +1,194 @@ +/** + * Mistral-specific message types for the Chat Completions API. + * + * These types mirror the shape expected by the Mistral SDK (`@mistralai/mistralai`) + * and are used internally by the adapter to avoid tight coupling to the SDK's + * exported types. + * + * @see https://docs.mistral.ai/api/ + */ + +export interface ChatCompletionContentPartText { + /** The text content. */ + text: string + + /** The type of the content part. */ + type: 'text' +} + +export interface ChatCompletionContentPartImage { + imageUrl: string | { url: string; detail?: 'auto' | 'low' | 'high' } + + /** The type of the content part. */ + type: 'image_url' +} + +export interface ChatCompletionContentPartDocumentUrl { + documentUrl: string + + /** The type of the content part. */ + type: 'document_url' +} + +export type ChatCompletionContentPart = + | ChatCompletionContentPartText + | ChatCompletionContentPartImage + | ChatCompletionContentPartDocumentUrl + +export interface ChatCompletionMessageToolCall { + /** The ID of the tool call. */ + id: string + + /** The type of the tool. Currently only `function` is supported. */ + type?: 'function' + + /** The function that the model called. */ + function: { + /** The name of the function to call. */ + name: string + + /** Arguments generated by the model as a JSON string. */ + arguments: string + } +} + +export type FunctionParameters = { [key: string]: unknown } + +export interface FunctionDefinition { + /** + * The name of the function. Must be a-z, A-Z, 0-9, or contain underscores and + * dashes, with a maximum length of 64. + */ + name: string + + /** A description of what the function does. */ + description?: string + + /** Function parameters defined as a JSON Schema object. */ + parameters?: FunctionParameters + + /** Whether to enable strict schema adherence. */ + strict?: boolean +} + +export interface ChatCompletionTool { + /** The type of the tool. */ + type: 'function' + + function: FunctionDefinition +} + +export interface ChatCompletionNamedToolChoice { + type: 'function' + function: { + name: string + } +} + +/** + * Controls which (if any) tool is called by the model. + * + * - `none` — never call tools + * - `auto` — model decides + * - `any` / `required` — model must call one or more tools + * - Named tool choice — forces a specific tool + */ +export type ChatCompletionToolChoiceOption = + | 'none' + | 'auto' + | 'any' + | 'required' + | ChatCompletionNamedToolChoice + +export interface ChatCompletionSystemMessageParam { + role: 'system' + content: string | Array +} + +export interface ChatCompletionUserMessageParam { + role: 'user' + content: string | Array +} + +export interface ChatCompletionAssistantMessageParam { + role: 'assistant' + content?: string | Array | null + toolCalls?: Array + prefix?: boolean +} + +export interface ChatCompletionToolMessageParam { + role: 'tool' + content: string | Array + toolCallId: string + name?: string +} + +export type ChatCompletionMessageParam = + | ChatCompletionSystemMessageParam + | ChatCompletionUserMessageParam + | ChatCompletionAssistantMessageParam + | ChatCompletionToolMessageParam + +export interface ResponseFormatText { + type: 'text' +} + +export interface ResponseFormatJsonObject { + type: 'json_object' +} + +export interface ResponseFormatJsonSchema { + type: 'json_schema' + jsonSchema: { + name: string + description?: string + schemaDefinition: { [key: string]: unknown } + strict?: boolean + } +} + +/** + * Metadata for Mistral text content parts. + */ +export type MistralTextMetadata = Record + +/** + * Metadata for Mistral image content parts. + */ +export interface MistralImageMetadata { + /** + * Specifies the detail level of the image. + * @default 'auto' + */ + detail?: 'auto' | 'low' | 'high' +} + +/** + * Metadata for Mistral audio content parts. + * Mistral does not currently support audio input. + */ +export type MistralAudioMetadata = Record + +/** + * Metadata for Mistral video content parts. + * Mistral does not currently support video input. + */ +export type MistralVideoMetadata = Record + +/** + * Metadata for Mistral document content parts. + * Used with document understanding models via `document_url` parts. + */ +export type MistralDocumentMetadata = Record + +/** + * Map of modality types to their Mistral-specific metadata types. + */ +export interface MistralMessageMetadataByModality { + text: MistralTextMetadata + image: MistralImageMetadata + audio: MistralAudioMetadata + video: MistralVideoMetadata + document: MistralDocumentMetadata +} diff --git a/packages/typescript/ai-mistral/src/model-meta.ts b/packages/typescript/ai-mistral/src/model-meta.ts new file mode 100644 index 000000000..45ba0a56e --- /dev/null +++ b/packages/typescript/ai-mistral/src/model-meta.ts @@ -0,0 +1,285 @@ +import type { MistralTextProviderOptions } from './text/text-provider-options' + +/** Provider options for vision-capable Mistral models (pixtral-*). */ +export type MistralVisionProviderOptions = MistralTextProviderOptions + +/** Provider options for reasoning-capable Mistral models (magistral-*). */ +export type MistralReasoningProviderOptions = MistralTextProviderOptions + +/** + * Internal metadata structure describing a Mistral model's capabilities + * and approximate pricing (USD per million tokens). + */ +interface ModelMeta { + name: string + context_window?: number + max_completion_tokens?: number + pricing: { + input?: { normal: number; cached?: number } + output?: { normal: number } + } + supports: { + input: Array<'text' | 'image' | 'audio'> + output: Array<'text'> + endpoints: Array<'chat' | 'embeddings'> + + features: Array< + | 'streaming' + | 'tools' + | 'json_object' + | 'json_schema' + | 'reasoning' + | 'vision' + | 'code' + > + } + providerOptions?: TProviderOptions +} + +const MISTRAL_LARGE_LATEST = { + name: 'mistral-large-latest', + context_window: 131_072, + max_completion_tokens: 8_192, + pricing: { + input: { normal: 0.5 }, + output: { normal: 1.5 }, + }, + supports: { + input: ['text'], + output: ['text'], + endpoints: ['chat'], + features: ['streaming', 'tools', 'json_object', 'json_schema'], + }, +} as const satisfies ModelMeta + +const MISTRAL_MEDIUM_LATEST = { + name: 'mistral-medium-latest', + context_window: 131_072, + max_completion_tokens: 8_192, + pricing: { + input: { normal: 0.4 }, + output: { normal: 2 }, + }, + supports: { + input: ['text', 'image'], + output: ['text'], + endpoints: ['chat'], + features: ['streaming', 'tools', 'json_object', 'json_schema', 'vision'], + }, +} as const satisfies ModelMeta + +const MISTRAL_SMALL_LATEST = { + name: 'mistral-small-latest', + context_window: 131_072, + max_completion_tokens: 8_192, + pricing: { + input: { normal: 0.1 }, + output: { normal: 0.3 }, + }, + supports: { + input: ['text', 'image'], + output: ['text'], + endpoints: ['chat'], + features: ['streaming', 'tools', 'json_object', 'json_schema', 'vision'], + }, +} as const satisfies ModelMeta + +const MINISTRAL_8B_LATEST = { + name: 'ministral-8b-latest', + context_window: 131_072, + max_completion_tokens: 8_192, + pricing: { + input: { normal: 0.1 }, + output: { normal: 0.1 }, + }, + supports: { + input: ['text'], + output: ['text'], + endpoints: ['chat'], + features: ['streaming', 'tools', 'json_object', 'json_schema'], + }, +} as const satisfies ModelMeta + +const MINISTRAL_3B_LATEST = { + name: 'ministral-3b-latest', + context_window: 131_072, + max_completion_tokens: 8_192, + pricing: { + input: { normal: 0.04 }, + output: { normal: 0.04 }, + }, + supports: { + input: ['text'], + output: ['text'], + endpoints: ['chat'], + features: ['streaming', 'tools', 'json_object', 'json_schema'], + }, +} as const satisfies ModelMeta + +const CODESTRAL_LATEST = { + name: 'codestral-latest', + context_window: 256_000, + max_completion_tokens: 8_192, + pricing: { + input: { normal: 0.3 }, + output: { normal: 0.9 }, + }, + supports: { + input: ['text'], + output: ['text'], + endpoints: ['chat'], + features: ['streaming', 'tools', 'json_object', 'json_schema', 'code'], + }, +} as const satisfies ModelMeta + +const PIXTRAL_LARGE_LATEST = { + name: 'pixtral-large-latest', + context_window: 131_072, + max_completion_tokens: 8_192, + pricing: { + input: { normal: 2 }, + output: { normal: 6 }, + }, + supports: { + input: ['text', 'image'], + output: ['text'], + endpoints: ['chat'], + features: ['streaming', 'tools', 'json_object', 'json_schema', 'vision'], + }, +} as const satisfies ModelMeta + +const PIXTRAL_12B_2409 = { + name: 'pixtral-12b-2409', + context_window: 131_072, + max_completion_tokens: 8_192, + pricing: { + input: { normal: 0.15 }, + output: { normal: 0.15 }, + }, + supports: { + input: ['text', 'image'], + output: ['text'], + endpoints: ['chat'], + features: ['streaming', 'tools', 'json_object', 'vision'], + }, +} as const satisfies ModelMeta + +const MAGISTRAL_MEDIUM_LATEST = { + name: 'magistral-medium-latest', + context_window: 40_000, + max_completion_tokens: 40_000, + pricing: { + input: { normal: 2 }, + output: { normal: 5 }, + }, + supports: { + input: ['text'], + output: ['text'], + endpoints: ['chat'], + features: ['streaming', 'tools', 'reasoning', 'json_object', 'json_schema'], + }, +} as const satisfies ModelMeta + +const MAGISTRAL_SMALL_LATEST = { + name: 'magistral-small-latest', + context_window: 40_000, + max_completion_tokens: 40_000, + pricing: { + input: { normal: 0.5 }, + output: { normal: 1.5 }, + }, + supports: { + input: ['text'], + output: ['text'], + endpoints: ['chat'], + features: ['streaming', 'tools', 'reasoning', 'json_object', 'json_schema'], + }, +} as const satisfies ModelMeta + +const OPEN_MISTRAL_NEMO = { + name: 'open-mistral-nemo', + context_window: 131_072, + max_completion_tokens: 8_192, + pricing: { + input: { normal: 0.15 }, + output: { normal: 0.15 }, + }, + supports: { + input: ['text'], + output: ['text'], + endpoints: ['chat'], + features: ['streaming', 'tools', 'json_object'], + }, +} as const satisfies ModelMeta + +/** + * All supported Mistral chat model identifiers. + */ +export const MISTRAL_CHAT_MODELS = [ + MISTRAL_LARGE_LATEST.name, + MISTRAL_MEDIUM_LATEST.name, + MISTRAL_SMALL_LATEST.name, + MINISTRAL_8B_LATEST.name, + MINISTRAL_3B_LATEST.name, + CODESTRAL_LATEST.name, + PIXTRAL_LARGE_LATEST.name, + PIXTRAL_12B_2409.name, + MAGISTRAL_MEDIUM_LATEST.name, + MAGISTRAL_SMALL_LATEST.name, + OPEN_MISTRAL_NEMO.name, +] as const + +/** + * Union type of all supported Mistral chat model names. + */ +export type MistralChatModels = (typeof MISTRAL_CHAT_MODELS)[number] + +/** + * Type-only map from Mistral chat model name to its supported input modalities. + */ +export type MistralModelInputModalitiesByName = { + [MISTRAL_LARGE_LATEST.name]: typeof MISTRAL_LARGE_LATEST.supports.input + [MISTRAL_MEDIUM_LATEST.name]: typeof MISTRAL_MEDIUM_LATEST.supports.input + [MISTRAL_SMALL_LATEST.name]: typeof MISTRAL_SMALL_LATEST.supports.input + [MINISTRAL_8B_LATEST.name]: typeof MINISTRAL_8B_LATEST.supports.input + [MINISTRAL_3B_LATEST.name]: typeof MINISTRAL_3B_LATEST.supports.input + [CODESTRAL_LATEST.name]: typeof CODESTRAL_LATEST.supports.input + [PIXTRAL_LARGE_LATEST.name]: typeof PIXTRAL_LARGE_LATEST.supports.input + [PIXTRAL_12B_2409.name]: typeof PIXTRAL_12B_2409.supports.input + [MAGISTRAL_MEDIUM_LATEST.name]: typeof MAGISTRAL_MEDIUM_LATEST.supports.input + [MAGISTRAL_SMALL_LATEST.name]: typeof MAGISTRAL_SMALL_LATEST.supports.input + [OPEN_MISTRAL_NEMO.name]: typeof OPEN_MISTRAL_NEMO.supports.input +} + +/** + * Type-only map from Mistral chat model name to its provider options type. + */ +export type MistralChatModelProviderOptionsByName = { + [MISTRAL_LARGE_LATEST.name]: MistralTextProviderOptions + [MISTRAL_MEDIUM_LATEST.name]: MistralVisionProviderOptions + [MISTRAL_SMALL_LATEST.name]: MistralVisionProviderOptions + [MINISTRAL_8B_LATEST.name]: MistralTextProviderOptions + [MINISTRAL_3B_LATEST.name]: MistralTextProviderOptions + [CODESTRAL_LATEST.name]: MistralTextProviderOptions + [PIXTRAL_LARGE_LATEST.name]: MistralVisionProviderOptions + [PIXTRAL_12B_2409.name]: MistralVisionProviderOptions + [MAGISTRAL_MEDIUM_LATEST.name]: MistralReasoningProviderOptions + [MAGISTRAL_SMALL_LATEST.name]: MistralReasoningProviderOptions + [OPEN_MISTRAL_NEMO.name]: MistralTextProviderOptions +} + +/** + * Resolves the provider options type for a specific Mistral model. + */ +export type ResolveProviderOptions = + TModel extends keyof MistralChatModelProviderOptionsByName + ? MistralChatModelProviderOptionsByName[TModel] + : MistralTextProviderOptions + +/** + * Resolve input modalities for a specific model. + */ +export type ResolveInputModalities = + TModel extends keyof MistralModelInputModalitiesByName + ? MistralModelInputModalitiesByName[TModel] + : readonly ['text'] diff --git a/packages/typescript/ai-mistral/src/text/text-provider-options.ts b/packages/typescript/ai-mistral/src/text/text-provider-options.ts new file mode 100644 index 000000000..223d32797 --- /dev/null +++ b/packages/typescript/ai-mistral/src/text/text-provider-options.ts @@ -0,0 +1,103 @@ +import type { + ChatCompletionMessageParam, + ChatCompletionTool, + ChatCompletionToolChoiceOption, + ResponseFormatJsonObject, + ResponseFormatJsonSchema, + ResponseFormatText, +} from '../message-types' + +/** + * Mistral-specific provider options for text/chat models. + * + * @see https://docs.mistral.ai/api/ + */ +export interface MistralTextProviderOptions { + /** + * Sampling temperature. The default varies by model; lower values make output + * more deterministic. We recommend altering this OR `top_p`, not both. + */ + temperature?: number | null + + /** + * Nucleus sampling — consider the tokens with `top_p` probability mass. + */ + top_p?: number | null + + /** + * The maximum number of tokens to generate. + */ + max_tokens?: number | null + + /** + * Stop sequences where the API will stop generating further tokens. + */ + stop?: string | Array | null + + /** + * A seed for deterministic sampling. Repeated requests with the same seed + * and parameters should return the same result (best-effort). + */ + random_seed?: number | null + + /** + * Specifies the format the model must output. + */ + response_format?: + | ResponseFormatText + | ResponseFormatJsonSchema + | ResponseFormatJsonObject + | null + + /** + * Controls which (if any) tool is called by the model. + */ + tool_choice?: ChatCompletionToolChoiceOption | null + + /** + * Whether parallel tool calls are allowed during tool use. + */ + parallel_tool_calls?: boolean | null + + /** + * Number between -2.0 and 2.0. Positive values penalize tokens based on + * their frequency in the text so far. + */ + frequency_penalty?: number | null + + /** + * Number between -2.0 and 2.0. Positive values penalize tokens based on + * whether they appear in the text so far. + */ + presence_penalty?: number | null + + /** + * How many chat completion choices to generate for each input message. + */ + n?: number | null + + /** + * Prediction — used to speed up generation with speculative decoding. + */ + prediction?: { type: 'content'; content: string } | null + + /** + * Safe prompt — enables safety guarding injected into the system prompt. + */ + safe_prompt?: boolean | null +} + +/** + * Internal options interface used for validation within the adapter. + */ +export interface InternalTextProviderOptions extends MistralTextProviderOptions { + messages: Array + model: string + stream?: boolean | null + tools?: Array +} + +/** + * External provider options (what users pass in). + */ +export type ExternalTextProviderOptions = MistralTextProviderOptions diff --git a/packages/typescript/ai-mistral/src/tools/function-tool.ts b/packages/typescript/ai-mistral/src/tools/function-tool.ts new file mode 100644 index 000000000..8e2da9d89 --- /dev/null +++ b/packages/typescript/ai-mistral/src/tools/function-tool.ts @@ -0,0 +1,42 @@ +import { makeMistralStructuredOutputCompatible } from '../utils/schema-converter' +import type { JSONSchema, Tool } from '@tanstack/ai' +import type { ChatCompletionTool } from '../message-types' + +export type FunctionTool = ChatCompletionTool + +/** + * Converts a standard Tool to Mistral ChatCompletionTool format. + * + * Tool schemas are already converted to JSON Schema in the ai layer. + * We apply Mistral-specific transformations for strict mode: + * - All properties in required array + * - Optional fields made nullable + * - additionalProperties: false + */ +export function convertFunctionToolToAdapterFormat(tool: Tool): FunctionTool { + const baseSchema = (tool.inputSchema ?? { + type: 'object', + properties: {}, + required: [], + }) as JSONSchema + + const inputSchema: JSONSchema = + baseSchema.type === 'object' && !baseSchema.properties + ? { ...baseSchema, properties: {} } + : { ...baseSchema } + + const jsonSchema = makeMistralStructuredOutputCompatible( + inputSchema, + inputSchema.required || [], + ) + + return { + type: 'function', + function: { + name: tool.name, + description: tool.description, + parameters: jsonSchema, + strict: true, + }, + } satisfies FunctionTool +} diff --git a/packages/typescript/ai-mistral/src/tools/index.ts b/packages/typescript/ai-mistral/src/tools/index.ts new file mode 100644 index 000000000..c90334153 --- /dev/null +++ b/packages/typescript/ai-mistral/src/tools/index.ts @@ -0,0 +1,5 @@ +export { + convertFunctionToolToAdapterFormat, + type FunctionTool, +} from './function-tool' +export { convertToolsToProviderFormat } from './tool-converter' diff --git a/packages/typescript/ai-mistral/src/tools/tool-converter.ts b/packages/typescript/ai-mistral/src/tools/tool-converter.ts new file mode 100644 index 000000000..c852f0643 --- /dev/null +++ b/packages/typescript/ai-mistral/src/tools/tool-converter.ts @@ -0,0 +1,14 @@ +import { convertFunctionToolToAdapterFormat } from './function-tool' +import type { FunctionTool } from './function-tool' +import type { Tool } from '@tanstack/ai' + +/** + * Converts an array of standard Tools to Mistral-specific format. + */ +export function convertToolsToProviderFormat( + tools: Array, +): Array { + return tools.map((tool) => { + return convertFunctionToolToAdapterFormat(tool) + }) +} diff --git a/packages/typescript/ai-mistral/src/utils/client.ts b/packages/typescript/ai-mistral/src/utils/client.ts new file mode 100644 index 000000000..f619571dd --- /dev/null +++ b/packages/typescript/ai-mistral/src/utils/client.ts @@ -0,0 +1,70 @@ +import { HTTPClient, Mistral } from '@mistralai/mistralai' + +export interface MistralClientConfig { + /** Mistral API key. */ + apiKey: string + + /** Optional server URL override. */ + serverURL?: string + + /** Optional request timeout (ms). */ + timeoutMs?: number + + /** Optional default headers to include with every request. */ + defaultHeaders?: Record +} + +/** + * Creates a Mistral SDK client instance. + */ +export function createMistralClient(config: MistralClientConfig): Mistral { + const { apiKey, serverURL, timeoutMs, defaultHeaders } = config + + let httpClient: HTTPClient | undefined + if (defaultHeaders && Object.keys(defaultHeaders).length > 0) { + httpClient = new HTTPClient() + httpClient.addHook('beforeRequest', (req) => { + for (const [key, value] of Object.entries(defaultHeaders)) { + req.headers.set(key, value) + } + return req + }) + } + + return new Mistral({ + apiKey, + ...(serverURL !== undefined ? { serverURL } : {}), + ...(timeoutMs !== undefined ? { timeoutMs } : {}), + ...(httpClient !== undefined ? { httpClient } : {}), + }) +} + +/** + * Gets Mistral API key from environment variables. + * @throws Error if MISTRAL_API_KEY is not found + */ +export function getMistralApiKeyFromEnv(): string { + let key: string | undefined + + if (typeof process !== 'undefined' && typeof process.env !== 'undefined') { + key = process.env.MISTRAL_API_KEY + } else { + const g = globalThis as { window?: { env?: Record } } + key = g.window?.env?.MISTRAL_API_KEY + } + + if (!key) { + throw new Error( + 'MISTRAL_API_KEY is required. In Node.js set it as an environment variable; in browser environments inject it via window.env.MISTRAL_API_KEY or use the factory function with an explicit API key.', + ) + } + + return key +} + +/** + * Generates a unique ID with a prefix. + */ +export function generateId(prefix: string): string { + return `${prefix}-${crypto.randomUUID()}` +} diff --git a/packages/typescript/ai-mistral/src/utils/index.ts b/packages/typescript/ai-mistral/src/utils/index.ts new file mode 100644 index 000000000..1cb28cc0a --- /dev/null +++ b/packages/typescript/ai-mistral/src/utils/index.ts @@ -0,0 +1,10 @@ +export { + createMistralClient, + getMistralApiKeyFromEnv, + generateId, + type MistralClientConfig, +} from './client' +export { + makeMistralStructuredOutputCompatible, + transformNullsToUndefined, +} from './schema-converter' diff --git a/packages/typescript/ai-mistral/src/utils/schema-converter.ts b/packages/typescript/ai-mistral/src/utils/schema-converter.ts new file mode 100644 index 000000000..f5b3d442d --- /dev/null +++ b/packages/typescript/ai-mistral/src/utils/schema-converter.ts @@ -0,0 +1,134 @@ +/** + * Recursively transform null values to undefined in an object. + * + * This is needed because Mistral's structured output may require optional + * fields to be declared nullable. When Mistral returns null for optional + * fields, we convert them back to undefined to match the original Zod schema. + */ +export function transformNullsToUndefined(obj: T): T { + if (obj === null) { + return undefined as unknown as T + } + + if (Array.isArray(obj)) { + // Preserve array length and indices — converting null elements to + // undefined slots rather than dropping them. `Array` schemas + // depend on positional alignment. + return obj.map((item) => transformNullsToUndefined(item)) as unknown as T + } + + if ( + typeof obj === 'object' && + Object.getPrototypeOf(obj) === Object.prototype + ) { + // Preserve every key — `null` values become `undefined` values, but the + // key itself is not removed. Schemas distinguishing absent vs explicit + // null rely on this. + const result: Record = {} + for (const [key, value] of Object.entries(obj as Record)) { + result[key] = transformNullsToUndefined(value) + } + return result as T + } + + return obj +} + +/** + * Transform a JSON schema to be compatible with Mistral's structured output + * requirements when `strict: true` is used. + * + * Mistral (in strict mode) requires: + * - All properties must be in the `required` array + * - Optional fields should have null added to their type union + * - additionalProperties must be false for objects + */ +export function makeMistralStructuredOutputCompatible( + schema: Record, + originalRequired: Array = [], +): Record { + const result = { ...schema } + + if (result.type === 'object') { + if (!result.properties) { + result.properties = {} + } + const properties = { ...result.properties } + const allPropertyNames = Object.keys(properties) + + for (const propName of allPropertyNames) { + const prop = properties[propName] + const wasOptional = !originalRequired.includes(propName) + + if (prop.type === 'object' && prop.properties) { + const converted = makeMistralStructuredOutputCompatible( + prop, + prop.required || [], + ) + if (wasOptional) { + properties[propName] = { + ...converted, + type: Array.isArray(converted.type) + ? converted.type.includes('null') + ? converted.type + : [...converted.type, 'null'] + : [converted.type, 'null'], + } + } else { + properties[propName] = converted + } + } else if (prop.type === 'array' && prop.items) { + const converted = { + ...prop, + items: makeMistralStructuredOutputCompatible( + prop.items, + prop.items.required || [], + ), + } + if (wasOptional) { + properties[propName] = { + ...converted, + type: Array.isArray(converted.type) + ? converted.type.includes('null') + ? converted.type + : [...converted.type, 'null'] + : [converted.type, 'null'], + } + } else { + properties[propName] = converted + } + } else if (wasOptional) { + if (prop.type && !Array.isArray(prop.type)) { + properties[propName] = { + ...prop, + type: [prop.type, 'null'], + } + } else if (Array.isArray(prop.type) && !prop.type.includes('null')) { + properties[propName] = { + ...prop, + type: [...prop.type, 'null'], + } + } else if (!prop.type) { + properties[propName] = { anyOf: [prop, { type: 'null' }] } + } + } + } + + result.properties = properties + if (allPropertyNames.length > 0) { + result.required = allPropertyNames + } else { + delete result.required + } + result.additionalProperties = false + } + + if (result.type === 'array' && result.items) { + result.items = makeMistralStructuredOutputCompatible( + result.items, + result.items.required || [], + ) + } + + return result +} diff --git a/packages/typescript/ai-mistral/tests/mistral-adapter.test.ts b/packages/typescript/ai-mistral/tests/mistral-adapter.test.ts new file mode 100644 index 000000000..9edf18923 --- /dev/null +++ b/packages/typescript/ai-mistral/tests/mistral-adapter.test.ts @@ -0,0 +1,1237 @@ +import { + describe, + it, + expect, + vi, + afterEach, + beforeEach, + type Mock, +} from 'vitest' +import { createMistralText, mistralText } from '../src/adapters/text' +import { transformNullsToUndefined } from '../src/utils/schema-converter' +import type { StreamChunk, Tool, TextOptions } from '@tanstack/ai' +import type { MistralTextProviderOptions } from '../src/adapters/text' + +/** + * Builds chat options for tests. `chatStream`'s `TextOptions` requires fields + * (e.g. `logger`) that the adapter only consults when provided; the cast + * lets tests focus on the inputs they actually exercise without rebuilding + * a full options object on every call. + */ +function chatOpts( + opts: Partial> & { + model: string + messages: Array<{ + role: 'user' | 'assistant' | 'tool' + content: unknown + toolCallId?: string + toolCalls?: Array + }> + }, +): TextOptions { + return opts as unknown as TextOptions +} + +// Declare mocks at module level +let mockComplete: Mock<(...args: Array) => unknown> + +// Mock the Mistral SDK (constructor still used for structuredOutput) +vi.mock('@mistralai/mistralai', () => { + return { + Mistral: class { + chat = { + complete: (...args: Array) => mockComplete(...args), + } + HTTPClient = class {} + }, + HTTPClient: class { + addHook() {} + }, + } +}) + +function toApiChunk(chunk: Record): Record { + const choices = (chunk.choices as Array>) ?? [] + const result: Record = { + id: chunk.id, + model: chunk.model, + object: 'chat.completion.chunk', + created: 0, + choices: choices.map((choice) => { + const delta = (choice.delta as Record) ?? {} + const toolCalls = delta.toolCalls as + | Array> + | undefined + return { + index: choice.index ?? 0, + delta: { + role: delta.role, + content: delta.content, + ...(toolCalls ? { tool_calls: toolCalls } : {}), + }, + finish_reason: choice.finishReason ?? null, + } + }), + } + if (chunk.usage) { + const u = chunk.usage as Record + result.usage = { + prompt_tokens: u.promptTokens, + completion_tokens: u.completionTokens, + total_tokens: u.totalTokens, + } + } + return result +} + +function setupMockStream(chunks: Array>) { + const sseBody = + chunks.map((c) => `data: ${JSON.stringify(toApiChunk(c))}`).join('\n\n') + + '\n\ndata: [DONE]\n\n' + vi.stubGlobal( + 'fetch', + vi.fn().mockResolvedValue({ + ok: true, + status: 200, + body: new ReadableStream({ + start(controller) { + controller.enqueue(new TextEncoder().encode(sseBody)) + controller.close() + }, + }), + }), + ) + mockComplete = vi.fn() +} + +const weatherTool: Tool = { + name: 'lookup_weather', + description: 'Return the forecast for a location', +} + +describe('Mistral adapters', () => { + afterEach(() => { + vi.unstubAllEnvs() + vi.unstubAllGlobals() + }) + + describe('Text adapter', () => { + beforeEach(() => { + vi.clearAllMocks() + }) + + it('creates a text adapter with explicit API key', () => { + const adapter = createMistralText('mistral-large-latest', 'test-api-key') + + expect(adapter).toBeDefined() + expect(adapter.kind).toBe('text') + expect(adapter.name).toBe('mistral') + expect(adapter.model).toBe('mistral-large-latest') + }) + + it('creates a text adapter from environment variable', () => { + vi.stubEnv('MISTRAL_API_KEY', 'env-api-key') + + const adapter = mistralText('ministral-8b-latest') + + expect(adapter).toBeDefined() + expect(adapter.kind).toBe('text') + expect(adapter.model).toBe('ministral-8b-latest') + }) + + it('throws if MISTRAL_API_KEY is not set when using mistralText', () => { + vi.stubEnv('MISTRAL_API_KEY', '') + + expect(() => mistralText('mistral-large-latest')).toThrow( + 'MISTRAL_API_KEY is required', + ) + }) + + it('allows custom serverURL override', () => { + const adapter = createMistralText( + 'mistral-large-latest', + 'test-api-key', + { + serverURL: 'https://custom.api.example.com', + }, + ) + + expect(adapter).toBeDefined() + }) + }) +}) + +describe('Mistral AG-UI event emission', () => { + beforeEach(() => { + vi.clearAllMocks() + }) + + afterEach(() => { + vi.unstubAllEnvs() + vi.unstubAllGlobals() + }) + + it('emits RUN_STARTED as the first event', async () => { + const streamChunks = [ + { + id: 'cmpl-123', + model: 'mistral-large-latest', + choices: [ + { + index: 0, + delta: { content: 'Hello' }, + finishReason: null, + }, + ], + }, + { + id: 'cmpl-123', + model: 'mistral-large-latest', + choices: [ + { + index: 0, + delta: {}, + finishReason: 'stop', + }, + ], + usage: { + promptTokens: 5, + completionTokens: 1, + totalTokens: 6, + }, + }, + ] + + setupMockStream(streamChunks) + const adapter = createMistralText('mistral-large-latest', 'test-api-key') + const chunks: Array = [] + + for await (const chunk of adapter.chatStream( + chatOpts({ + model: 'mistral-large-latest', + messages: [{ role: 'user', content: 'Hello' }], + }), + )) { + chunks.push(chunk) + } + + expect(chunks[0]?.type).toBe('RUN_STARTED') + if (chunks[0]?.type === 'RUN_STARTED') { + expect(chunks[0].runId).toBeDefined() + expect(chunks[0].model).toBe('mistral-large-latest') + } + }) + + it('emits TEXT_MESSAGE_START before TEXT_MESSAGE_CONTENT', async () => { + const streamChunks = [ + { + id: 'cmpl-123', + model: 'mistral-large-latest', + choices: [ + { + index: 0, + delta: { content: 'Hello' }, + finishReason: null, + }, + ], + }, + { + id: 'cmpl-123', + model: 'mistral-large-latest', + choices: [ + { + index: 0, + delta: {}, + finishReason: 'stop', + }, + ], + usage: { + promptTokens: 5, + completionTokens: 1, + totalTokens: 6, + }, + }, + ] + + setupMockStream(streamChunks) + const adapter = createMistralText('mistral-large-latest', 'test-api-key') + const chunks: Array = [] + + for await (const chunk of adapter.chatStream( + chatOpts({ + model: 'mistral-large-latest', + messages: [{ role: 'user', content: 'Hello' }], + }), + )) { + chunks.push(chunk) + } + + const textStartIndex = chunks.findIndex( + (c) => c.type === 'TEXT_MESSAGE_START', + ) + const textContentIndex = chunks.findIndex( + (c) => c.type === 'TEXT_MESSAGE_CONTENT', + ) + + expect(textStartIndex).toBeGreaterThan(-1) + expect(textContentIndex).toBeGreaterThan(-1) + expect(textStartIndex).toBeLessThan(textContentIndex) + }) + + it('emits TEXT_MESSAGE_END and RUN_FINISHED at the end', async () => { + const streamChunks = [ + { + id: 'cmpl-123', + model: 'mistral-large-latest', + choices: [ + { + index: 0, + delta: { content: 'Hello' }, + finishReason: null, + }, + ], + }, + { + id: 'cmpl-123', + model: 'mistral-large-latest', + choices: [ + { + index: 0, + delta: {}, + finishReason: 'stop', + }, + ], + usage: { + promptTokens: 5, + completionTokens: 1, + totalTokens: 6, + }, + }, + ] + + setupMockStream(streamChunks) + const adapter = createMistralText('mistral-large-latest', 'test-api-key') + const chunks: Array = [] + + for await (const chunk of adapter.chatStream( + chatOpts({ + model: 'mistral-large-latest', + messages: [{ role: 'user', content: 'Hello' }], + }), + )) { + chunks.push(chunk) + } + + const textEndChunk = chunks.find((c) => c.type === 'TEXT_MESSAGE_END') + expect(textEndChunk).toBeDefined() + + const runFinishedChunk = chunks.find((c) => c.type === 'RUN_FINISHED') + expect(runFinishedChunk).toBeDefined() + if (runFinishedChunk?.type === 'RUN_FINISHED') { + expect(runFinishedChunk.finishReason).toBe('stop') + expect(runFinishedChunk.usage).toMatchObject({ + promptTokens: 5, + completionTokens: 1, + totalTokens: 6, + }) + } + }) + + it('emits AG-UI tool call events', async () => { + const streamChunks = [ + { + id: 'cmpl-456', + model: 'mistral-large-latest', + choices: [ + { + index: 0, + delta: { + toolCalls: [ + { + index: 0, + id: 'call_abc123', + type: 'function', + function: { + name: 'lookup_weather', + arguments: '{"location":', + }, + }, + ], + }, + finishReason: null, + }, + ], + }, + { + id: 'cmpl-456', + model: 'mistral-large-latest', + choices: [ + { + index: 0, + delta: { + toolCalls: [ + { + index: 0, + function: { + arguments: '"Berlin"}', + }, + }, + ], + }, + finishReason: null, + }, + ], + }, + { + id: 'cmpl-456', + model: 'mistral-large-latest', + choices: [ + { + index: 0, + delta: {}, + finishReason: 'tool_calls', + }, + ], + usage: { + promptTokens: 10, + completionTokens: 5, + totalTokens: 15, + }, + }, + ] + + setupMockStream(streamChunks) + const adapter = createMistralText('mistral-large-latest', 'test-api-key') + const chunks: Array = [] + + for await (const chunk of adapter.chatStream( + chatOpts({ + model: 'mistral-large-latest', + messages: [{ role: 'user', content: 'Weather in Berlin?' }], + tools: [weatherTool], + }), + )) { + chunks.push(chunk) + } + + const toolStartChunk = chunks.find((c) => c.type === 'TOOL_CALL_START') + expect(toolStartChunk).toBeDefined() + if (toolStartChunk?.type === 'TOOL_CALL_START') { + expect(toolStartChunk.toolCallId).toBe('call_abc123') + expect(toolStartChunk.toolName).toBe('lookup_weather') + } + + const toolArgsChunks = chunks.filter((c) => c.type === 'TOOL_CALL_ARGS') + expect(toolArgsChunks.length).toBeGreaterThan(0) + + const toolEndChunk = chunks.find((c) => c.type === 'TOOL_CALL_END') + expect(toolEndChunk).toBeDefined() + if (toolEndChunk?.type === 'TOOL_CALL_END') { + expect(toolEndChunk.toolCallId).toBe('call_abc123') + expect(toolEndChunk.toolName).toBe('lookup_weather') + expect(toolEndChunk.input).toEqual({ location: 'Berlin' }) + } + + const runFinishedChunk = chunks.find((c) => c.type === 'RUN_FINISHED') + expect(runFinishedChunk).toBeDefined() + if (runFinishedChunk?.type === 'RUN_FINISHED') { + expect(runFinishedChunk.finishReason).toBe('tool_calls') + } + }) + + it('emits RUN_ERROR on stream error', async () => { + const firstChunk = JSON.stringify({ + id: 'cmpl-123', + model: 'mistral-large-latest', + choices: [{ index: 0, delta: { content: 'Hello' }, finish_reason: null }], + }) + vi.stubGlobal( + 'fetch', + vi.fn().mockResolvedValue({ + ok: true, + status: 200, + body: new ReadableStream({ + start(controller) { + controller.enqueue( + new TextEncoder().encode(`data: ${firstChunk}\n\n`), + ) + controller.error(new Error('Stream interrupted')) + }, + }), + }), + ) + mockComplete = vi.fn() + + const adapter = createMistralText('mistral-large-latest', 'test-api-key') + const chunks: Array = [] + let thrownError: Error | undefined + + try { + for await (const chunk of adapter.chatStream( + chatOpts({ + model: 'mistral-large-latest', + messages: [{ role: 'user', content: 'Hello' }], + }), + )) { + chunks.push(chunk) + } + } catch (err) { + thrownError = err as Error + } + + expect(thrownError).toBeDefined() + expect(thrownError?.message).toBe('Stream interrupted') + + const runErrorChunk = chunks.find((c) => c.type === 'RUN_ERROR') + expect(runErrorChunk).toBeDefined() + if (runErrorChunk?.type === 'RUN_ERROR') { + expect(runErrorChunk.error?.message).toBe('Stream interrupted') + } + }) + + it('streams content with correct accumulated values', async () => { + const streamChunks = [ + { + id: 'cmpl-stream', + model: 'mistral-large-latest', + choices: [ + { + index: 0, + delta: { content: 'Hello ' }, + finishReason: null, + }, + ], + }, + { + id: 'cmpl-stream', + model: 'mistral-large-latest', + choices: [ + { + index: 0, + delta: { content: 'world' }, + finishReason: null, + }, + ], + }, + { + id: 'cmpl-stream', + model: 'mistral-large-latest', + choices: [ + { + index: 0, + delta: {}, + finishReason: 'stop', + }, + ], + usage: { + promptTokens: 5, + completionTokens: 2, + totalTokens: 7, + }, + }, + ] + + setupMockStream(streamChunks) + const adapter = createMistralText('mistral-large-latest', 'test-api-key') + const chunks: Array = [] + + for await (const chunk of adapter.chatStream( + chatOpts({ + model: 'mistral-large-latest', + messages: [{ role: 'user', content: 'Say hello' }], + }), + )) { + chunks.push(chunk) + } + + const contentChunks = chunks.filter( + (c) => c.type === 'TEXT_MESSAGE_CONTENT', + ) + expect(contentChunks.length).toBe(2) + + const firstContent = contentChunks[0] + if (firstContent?.type === 'TEXT_MESSAGE_CONTENT') { + expect(firstContent.delta).toBe('Hello ') + expect(firstContent.content).toBe('Hello ') + } + + const secondContent = contentChunks[1] + if (secondContent?.type === 'TEXT_MESSAGE_CONTENT') { + expect(secondContent.delta).toBe('world') + expect(secondContent.content).toBe('Hello world') + } + }) + + it('emits exactly one RUN_ERROR on stream error (no duplicates from inner+outer catch)', async () => { + vi.stubGlobal( + 'fetch', + vi.fn().mockResolvedValue({ + ok: true, + status: 200, + body: new ReadableStream({ + start(controller) { + controller.enqueue( + new TextEncoder().encode( + `data: ${JSON.stringify( + toApiChunk({ + id: 'cmpl-dup', + model: 'mistral-large-latest', + choices: [ + { index: 0, delta: { content: 'x' }, finishReason: null }, + ], + }), + )}\n\n`, + ), + ) + controller.error(new Error('boom')) + }, + }), + }), + ) + + const adapter = createMistralText('mistral-large-latest', 'test-api-key') + const chunks: Array = [] + + try { + for await (const chunk of adapter.chatStream( + chatOpts({ + model: 'mistral-large-latest', + messages: [{ role: 'user', content: 'Hi' }], + }), + )) { + chunks.push(chunk) + } + } catch { + // expected + } + + const runErrors = chunks.filter((c) => c.type === 'RUN_ERROR') + expect(runErrors).toHaveLength(1) + }) + + it('flushes TEXT_MESSAGE_END and RUN_FINISHED when stream ends without finish_reason', async () => { + // Stream emits content, then [DONE] without ever sending a finish_reason + // chunk. Consumers must still receive matched lifecycle events. + const sseBody = `data: ${JSON.stringify( + toApiChunk({ + id: 'cmpl-cut', + model: 'mistral-large-latest', + choices: [ + { index: 0, delta: { content: 'partial' }, finishReason: null }, + ], + }), + )}\n\ndata: [DONE]\n\n` + + vi.stubGlobal( + 'fetch', + vi.fn().mockResolvedValue({ + ok: true, + status: 200, + body: new ReadableStream({ + start(controller) { + controller.enqueue(new TextEncoder().encode(sseBody)) + controller.close() + }, + }), + }), + ) + mockComplete = vi.fn() + + const adapter = createMistralText('mistral-large-latest', 'test-api-key') + const chunks: Array = [] + + for await (const chunk of adapter.chatStream( + chatOpts({ + model: 'mistral-large-latest', + messages: [{ role: 'user', content: 'Go' }], + }), + )) { + chunks.push(chunk) + } + + const types: Array = chunks.map((c) => c.type) + expect(types).toContain('TEXT_MESSAGE_START') + expect(types).toContain('TEXT_MESSAGE_END') + expect(types).toContain('RUN_FINISHED') + expect(types.indexOf('TEXT_MESSAGE_END')).toBeLessThan( + types.indexOf('RUN_FINISHED'), + ) + }) + + it('replays buffered tool-call args when arguments arrive before id and name', async () => { + // First delta carries arguments fragment but no id/name; second delta + // brings id+name; third closes the call. Consumers tracking ARGS deltas + // must see the buffered prefix replayed once START is emitted. + setupMockStream([ + { + id: 'cmpl-replay', + model: 'mistral-large-latest', + choices: [ + { + index: 0, + delta: { + toolCalls: [ + { + index: 0, + function: { arguments: '{"loc' }, + }, + ], + }, + finishReason: null, + }, + ], + }, + { + id: 'cmpl-replay', + model: 'mistral-large-latest', + choices: [ + { + index: 0, + delta: { + toolCalls: [ + { + index: 0, + id: 'tc_1', + function: { name: 'lookup_weather' }, + }, + ], + }, + finishReason: null, + }, + ], + }, + { + id: 'cmpl-replay', + model: 'mistral-large-latest', + choices: [ + { + index: 0, + delta: { + toolCalls: [ + { index: 0, function: { arguments: 'ation":"Berlin"}' } }, + ], + }, + finishReason: 'tool_calls', + }, + ], + }, + ]) + + const adapter = createMistralText('mistral-large-latest', 'test-api-key') + const chunks: Array = [] + + for await (const chunk of adapter.chatStream( + chatOpts({ + model: 'mistral-large-latest', + messages: [{ role: 'user', content: 'Weather in Berlin?' }], + tools: [weatherTool], + }), + )) { + chunks.push(chunk) + } + + const argsChunks = chunks.filter((c) => c.type === 'TOOL_CALL_ARGS') + const concatenated = argsChunks + .map((c) => + c.type === 'TOOL_CALL_ARGS' ? (c as { delta: string }).delta : '', + ) + .join('') + expect(concatenated).toBe('{"location":"Berlin"}') + + // The TOOL_CALL_END input must reflect the full JSON + const endChunk = chunks.find((c) => c.type === 'TOOL_CALL_END') + if (endChunk?.type === 'TOOL_CALL_END') { + expect(endChunk.input).toEqual({ location: 'Berlin' }) + } + }) + + it('throws on malformed tool-call arguments rather than silently substituting {}', async () => { + setupMockStream([ + { + id: 'cmpl-bad', + model: 'mistral-large-latest', + choices: [ + { + index: 0, + delta: { + toolCalls: [ + { + index: 0, + id: 'tc_bad', + function: { name: 'lookup_weather', arguments: '{not json' }, + }, + ], + }, + finishReason: 'tool_calls', + }, + ], + }, + ]) + + const adapter = createMistralText('mistral-large-latest', 'test-api-key') + let caught: Error | undefined + try { + for await (const _chunk of adapter.chatStream( + chatOpts({ + model: 'mistral-large-latest', + messages: [{ role: 'user', content: 'x' }], + tools: [weatherTool], + }), + )) { + // drain + } + } catch (err) { + caught = err as Error + } + + expect(caught).toBeDefined() + expect(caught?.message).toMatch(/Failed to parse tool call arguments/) + expect(caught?.message).toMatch(/lookup_weather/) + }) + + it('sends stream_options.include_usage so Mistral returns usage on streaming', async () => { + let capturedBody: unknown + vi.stubGlobal( + 'fetch', + vi.fn(async (_url: string, init?: { body?: string }) => { + capturedBody = init?.body ? JSON.parse(init.body) : undefined + return { + ok: true, + status: 200, + body: new ReadableStream({ + start(controller) { + controller.enqueue(new TextEncoder().encode('data: [DONE]\n\n')) + controller.close() + }, + }), + } + }), + ) + mockComplete = vi.fn() + + const adapter = createMistralText('mistral-large-latest', 'test-api-key') + for await (const _chunk of adapter.chatStream( + chatOpts({ + model: 'mistral-large-latest', + messages: [{ role: 'user', content: 'Hi' }], + }), + )) { + // drain + } + + expect(capturedBody).toMatchObject({ + stream: true, + stream_options: { include_usage: true }, + }) + }) + + it('reads temperature and top_p from modelOptions when not set at top level', async () => { + let capturedBody: { temperature?: number; top_p?: number } | undefined + vi.stubGlobal( + 'fetch', + vi.fn(async (_url: string, init?: { body?: string }) => { + capturedBody = init?.body ? JSON.parse(init.body) : undefined + return { + ok: true, + status: 200, + body: new ReadableStream({ + start(controller) { + controller.enqueue(new TextEncoder().encode('data: [DONE]\n\n')) + controller.close() + }, + }), + } + }), + ) + mockComplete = vi.fn() + + const adapter = createMistralText('mistral-large-latest', 'test-api-key') + for await (const _chunk of adapter.chatStream( + chatOpts({ + model: 'mistral-large-latest', + messages: [{ role: 'user', content: 'Hi' }], + modelOptions: { temperature: 0.42, top_p: 0.9 }, + }), + )) { + // drain + } + + expect(capturedBody?.temperature).toBe(0.42) + expect(capturedBody?.top_p).toBe(0.9) + }) + + it('throws a clear error for unsupported content part types', async () => { + vi.stubGlobal( + 'fetch', + vi.fn().mockResolvedValue({ + ok: true, + status: 200, + body: new ReadableStream({ + start(controller) { + controller.enqueue(new TextEncoder().encode('data: [DONE]\n\n')) + controller.close() + }, + }), + }), + ) + mockComplete = vi.fn() + + const adapter = createMistralText('mistral-large-latest', 'test-api-key') + let caught: Error | undefined + try { + for await (const _chunk of adapter.chatStream( + chatOpts({ + model: 'mistral-large-latest', + messages: [ + { + role: 'user', + content: [ + { + type: 'audio', + source: { type: 'url', value: 'https://example.com/a.mp3' }, + }, + ], + }, + ], + }), + )) { + // drain + } + } catch (err) { + caught = err as Error + } + + expect(caught).toBeDefined() + expect(caught?.message).toMatch( + /Mistral text adapter does not support content part of type 'audio'/, + ) + }) +}) + +describe('Mistral reasoning (magistral-* models)', () => { + beforeEach(() => { + vi.clearAllMocks() + }) + + afterEach(() => { + vi.unstubAllEnvs() + vi.unstubAllGlobals() + }) + + it('emits REASONING_* events when delta.content contains thinking parts, before any TEXT_MESSAGE_*', async () => { + // Magistral streaming format: delta.content is an array containing + // `{ type: 'thinking', thinking: [{ type: 'text', text: '...' }] }`. + // We build the SSE body by hand because `toApiChunk` strips non-text parts. + const sseChunks: Array> = [ + { + id: 'cmpl-think-1', + model: 'magistral-medium-latest', + object: 'chat.completion.chunk', + created: 0, + choices: [ + { + index: 0, + delta: { + content: [ + { + type: 'thinking', + thinking: [{ type: 'text', text: 'Let me think... ' }], + }, + ], + }, + finish_reason: null, + }, + ], + }, + { + id: 'cmpl-think-1', + model: 'magistral-medium-latest', + object: 'chat.completion.chunk', + created: 0, + choices: [ + { + index: 0, + delta: { + content: [ + { + type: 'thinking', + thinking: [{ type: 'text', text: 'the answer is 42.' }], + }, + ], + }, + finish_reason: null, + }, + ], + }, + { + id: 'cmpl-think-1', + model: 'magistral-medium-latest', + object: 'chat.completion.chunk', + created: 0, + choices: [ + { + index: 0, + delta: { content: 'The answer is 42.' }, + finish_reason: null, + }, + ], + }, + { + id: 'cmpl-think-1', + model: 'magistral-medium-latest', + object: 'chat.completion.chunk', + created: 0, + choices: [{ index: 0, delta: {}, finish_reason: 'stop' }], + }, + ] + + const sseBody = + sseChunks.map((c) => `data: ${JSON.stringify(c)}`).join('\n\n') + + '\n\ndata: [DONE]\n\n' + + vi.stubGlobal( + 'fetch', + vi.fn().mockResolvedValue({ + ok: true, + status: 200, + body: new ReadableStream({ + start(controller) { + controller.enqueue(new TextEncoder().encode(sseBody)) + controller.close() + }, + }), + }), + ) + mockComplete = vi.fn() + + const adapter = createMistralText('magistral-medium-latest', 'test-api-key') + const chunks: Array = [] + for await (const chunk of adapter.chatStream( + chatOpts({ + model: 'magistral-medium-latest', + messages: [{ role: 'user', content: 'What is the answer?' }], + }), + )) { + chunks.push(chunk) + } + + const types: Array = chunks.map((c) => c.type) + + // Reasoning lifecycle is present and ordered correctly + expect(types).toContain('REASONING_START') + expect(types).toContain('REASONING_MESSAGE_START') + expect(types).toContain('REASONING_MESSAGE_CONTENT') + expect(types).toContain('REASONING_MESSAGE_END') + expect(types).toContain('REASONING_END') + + expect(types.indexOf('REASONING_START')).toBeLessThan( + types.indexOf('REASONING_MESSAGE_CONTENT'), + ) + // REASONING_END must precede TEXT_MESSAGE_START + expect(types.indexOf('REASONING_END')).toBeLessThan( + types.indexOf('TEXT_MESSAGE_START'), + ) + + // Reasoning content reassembles correctly + const reasoningDeltas = chunks.filter( + (c) => c.type === 'REASONING_MESSAGE_CONTENT', + ) + const reasoningText = reasoningDeltas + .map((c) => + c.type === 'REASONING_MESSAGE_CONTENT' + ? (c as { delta: string }).delta + : '', + ) + .join('') + expect(reasoningText).toBe('Let me think... the answer is 42.') + }) + + it('emits REASONING_* events when the upstream uses delta.reasoning_content (OpenAI-compat / aimock format)', async () => { + // OpenAI-compatible deployments (DeepSeek, Groq for reasoning models, + // and the aimock test backend) stream reasoning via delta.reasoning_content + // rather than as a thinking content part. The adapter must accept both + // shapes for the e2e suite to run against aimock. + const sseChunks: Array> = [ + { + id: 'cmpl-rc', + model: 'magistral-medium-latest', + object: 'chat.completion.chunk', + created: 0, + choices: [ + { + index: 0, + delta: { reasoning_content: 'Considering options...' }, + finish_reason: null, + }, + ], + }, + { + id: 'cmpl-rc', + model: 'magistral-medium-latest', + object: 'chat.completion.chunk', + created: 0, + choices: [ + { + index: 0, + delta: { content: 'Final answer.' }, + finish_reason: null, + }, + ], + }, + { + id: 'cmpl-rc', + model: 'magistral-medium-latest', + object: 'chat.completion.chunk', + created: 0, + choices: [{ index: 0, delta: {}, finish_reason: 'stop' }], + }, + ] + const sseBody = + sseChunks.map((c) => `data: ${JSON.stringify(c)}`).join('\n\n') + + '\n\ndata: [DONE]\n\n' + + vi.stubGlobal( + 'fetch', + vi.fn().mockResolvedValue({ + ok: true, + status: 200, + body: new ReadableStream({ + start(controller) { + controller.enqueue(new TextEncoder().encode(sseBody)) + controller.close() + }, + }), + }), + ) + mockComplete = vi.fn() + + const adapter = createMistralText('magistral-medium-latest', 'test-api-key') + const chunks: Array = [] + for await (const chunk of adapter.chatStream( + chatOpts({ + model: 'magistral-medium-latest', + messages: [{ role: 'user', content: 'Decide.' }], + }), + )) { + chunks.push(chunk) + } + + const types: Array = chunks.map((c) => c.type) + expect(types).toContain('REASONING_MESSAGE_CONTENT') + expect(types.indexOf('REASONING_END')).toBeLessThan( + types.indexOf('TEXT_MESSAGE_START'), + ) + + const reasoningText = chunks + .filter((c) => c.type === 'REASONING_MESSAGE_CONTENT') + .map((c) => + c.type === 'REASONING_MESSAGE_CONTENT' + ? (c as { delta: string }).delta + : '', + ) + .join('') + expect(reasoningText).toBe('Considering options...') + }) + + it('closes reasoning lifecycle if the run finishes while still in thinking', async () => { + const sseChunks: Array> = [ + { + id: 'cmpl-think-only', + model: 'magistral-medium-latest', + object: 'chat.completion.chunk', + created: 0, + choices: [ + { + index: 0, + delta: { + content: [ + { + type: 'thinking', + thinking: [{ type: 'text', text: 'pondering...' }], + }, + ], + }, + finish_reason: null, + }, + ], + }, + { + id: 'cmpl-think-only', + model: 'magistral-medium-latest', + object: 'chat.completion.chunk', + created: 0, + choices: [{ index: 0, delta: {}, finish_reason: 'stop' }], + }, + ] + const sseBody = + sseChunks.map((c) => `data: ${JSON.stringify(c)}`).join('\n\n') + + '\n\ndata: [DONE]\n\n' + + vi.stubGlobal( + 'fetch', + vi.fn().mockResolvedValue({ + ok: true, + status: 200, + body: new ReadableStream({ + start(controller) { + controller.enqueue(new TextEncoder().encode(sseBody)) + controller.close() + }, + }), + }), + ) + mockComplete = vi.fn() + + const adapter = createMistralText('magistral-medium-latest', 'test-api-key') + const chunks: Array = [] + for await (const chunk of adapter.chatStream( + chatOpts({ + model: 'magistral-medium-latest', + messages: [{ role: 'user', content: 'Just think.' }], + }), + )) { + chunks.push(chunk) + } + + const types: Array = chunks.map((c) => c.type) + expect(types).toContain('REASONING_END') + expect(types).toContain('RUN_FINISHED') + expect(types.indexOf('REASONING_END')).toBeLessThan( + types.indexOf('RUN_FINISHED'), + ) + // No TEXT_MESSAGE_START — the run was reasoning-only + expect(types).not.toContain('TEXT_MESSAGE_START') + }) +}) + +describe('transformNullsToUndefined (regression coverage)', () => { + it('preserves array length and indices — null elements become undefined slots', () => { + const input = ['a', null, 'b', null] + const out = transformNullsToUndefined(input) + expect(out).toHaveLength(4) + expect(out[0]).toBe('a') + expect(out[1]).toBeUndefined() + expect(out[2]).toBe('b') + expect(out[3]).toBeUndefined() + }) + + it('preserves object keys whose values were null — value becomes undefined, key remains', () => { + const input = { a: 1, b: null, c: 'x' } + const out = transformNullsToUndefined(input) as Record + expect(Object.keys(out).sort()).toEqual(['a', 'b', 'c']) + expect(out.a).toBe(1) + expect(out.b).toBeUndefined() + expect(out.c).toBe('x') + }) + + it('recurses into nested arrays and objects', () => { + const input = { items: [{ x: null, y: 1 }, null, { x: 2, y: null }] } + const out = transformNullsToUndefined(input) as { + items: Array<{ x: unknown; y: unknown } | undefined> + } + expect(out.items).toHaveLength(3) + expect(out.items[0]).toEqual({ x: undefined, y: 1 }) + expect(out.items[1]).toBeUndefined() + expect(out.items[2]).toEqual({ x: 2, y: undefined }) + }) +}) diff --git a/packages/typescript/ai-mistral/tsconfig.json b/packages/typescript/ai-mistral/tsconfig.json new file mode 100644 index 000000000..9028fa3bd --- /dev/null +++ b/packages/typescript/ai-mistral/tsconfig.json @@ -0,0 +1,8 @@ +{ + "extends": "../../../tsconfig.json", + "compilerOptions": { + "outDir": "dist" + }, + "include": ["src/**/*.ts", "src/**/*.tsx", "tests/**/*.ts"], + "exclude": ["node_modules", "dist", "**/*.config.ts"] +} diff --git a/packages/typescript/ai-mistral/vite.config.ts b/packages/typescript/ai-mistral/vite.config.ts new file mode 100644 index 000000000..77bcc2e60 --- /dev/null +++ b/packages/typescript/ai-mistral/vite.config.ts @@ -0,0 +1,36 @@ +import { defineConfig, mergeConfig } from 'vitest/config' +import { tanstackViteConfig } from '@tanstack/vite-config' +import packageJson from './package.json' + +const config = defineConfig({ + test: { + name: packageJson.name, + dir: './', + watch: false, + globals: true, + environment: 'node', + include: ['tests/**/*.test.ts'], + coverage: { + provider: 'v8', + reporter: ['text', 'json', 'html', 'lcov'], + exclude: [ + 'node_modules/', + 'dist/', + 'tests/', + '**/*.test.ts', + '**/*.config.ts', + '**/types.ts', + ], + include: ['src/**/*.ts'], + }, + }, +}) + +export default mergeConfig( + config, + tanstackViteConfig({ + entry: ['./src/index.ts'], + srcDir: './src', + cjs: false, + }), +) diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index eb41b0817..3791b4fa6 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -1278,6 +1278,28 @@ importers: specifier: 4.0.14 version: 4.0.14(vitest@4.1.4) + packages/typescript/ai-mistral: + dependencies: + '@mistralai/mistralai': + specifier: ^2.2.0 + version: 2.2.0 + devDependencies: + '@tanstack/ai': + specifier: workspace:* + version: link:../ai + '@tanstack/ai-client': + specifier: workspace:* + version: link:../ai-client + '@vitest/coverage-v8': + specifier: 4.0.14 + version: 4.0.14(vitest@4.1.4) + vite: + specifier: ^7.2.7 + version: 7.3.1(@types/node@25.0.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) + zod: + specifier: ^4.2.0 + version: 4.3.6 + packages/typescript/ai-ollama: dependencies: ollama: @@ -1693,6 +1715,9 @@ importers: '@tanstack/ai-groq': specifier: workspace:* version: link:../../packages/typescript/ai-groq + '@tanstack/ai-mistral': + specifier: workspace:* + version: link:../../packages/typescript/ai-mistral '@tanstack/ai-ollama': specifier: workspace:* version: link:../../packages/typescript/ai-ollama @@ -3381,6 +3406,9 @@ packages: '@microsoft/tsdoc@0.15.1': resolution: {integrity: sha512-4aErSrCR/On/e5G2hDP0wjooqDdauzEbIq8hIkIe5pXV0rtWJZvdCEKL0ykZxex+IxIwBp0eGeV48hQN07dXtw==} + '@mistralai/mistralai@2.2.0': + resolution: {integrity: sha512-JQUGIXjFWnw/J9LpTSf/ZXwVW3Sh8FBAcfTo5QvAHqkl4CfSiIwnjRJhMoAFcP6ncCe84YPU1ncDGX+p3OXnfg==} + '@msgpack/msgpack@3.1.3': resolution: {integrity: sha512-47XIizs9XZXvuJgoaJUIE2lFoID8ugvc0jzSHP+Ptfk8nTbnR8g788wv48N03Kx0UkAv559HWRQ3yzOgzlRNUA==} engines: {node: '>= 18'} @@ -11987,6 +12015,11 @@ packages: resolution: {integrity: sha512-zK7YHHz4ZXpW89AHXUPbQVGKI7uvkd3hzusTdotCg1UxyaVtg0zFJSTfW/Dq5f7OBBVnq6cZIaC8Ti4hb6dtCA==} engines: {node: '>= 14'} + zod-to-json-schema@3.25.2: + resolution: {integrity: sha512-O/PgfnpT1xKSDeQYSCfRI5Gy3hPf91mKVDuYLUHZJMiDFptvP41MSnWofm8dnCm0256ZNfZIM7DSzuSMAFnjHA==} + peerDependencies: + zod: ^3.25.28 || ^4 + zod@3.25.76: resolution: {integrity: sha512-gzUt/qt81nXsFGKIFcC3YnfEAx5NkunCfnDlvuBSSFS02bcXu4Lmea0AFIUwbLWxWPx3d9p8S5QoaujKcNQxcQ==} @@ -13312,6 +13345,15 @@ snapshots: '@microsoft/tsdoc@0.15.1': {} + '@mistralai/mistralai@2.2.0': + dependencies: + ws: 8.19.0 + zod: 4.3.6 + zod-to-json-schema: 3.25.2(zod@4.3.6) + transitivePeerDependencies: + - bufferutil + - utf-8-validate + '@msgpack/msgpack@3.1.3': {} '@napi-rs/wasm-runtime@0.2.12': @@ -14609,17 +14651,17 @@ snapshots: '@rolldown/pluginutils@1.0.0-rc.17': {} - '@rollup/plugin-alias@5.1.1(rollup@4.57.1)': + '@rollup/plugin-alias@5.1.1(rollup@4.60.1)': optionalDependencies: - rollup: 4.57.1 + rollup: 4.60.1 '@rollup/plugin-alias@6.0.0(rollup@4.57.1)': optionalDependencies: rollup: 4.57.1 - '@rollup/plugin-commonjs@28.0.9(rollup@4.57.1)': + '@rollup/plugin-commonjs@28.0.9(rollup@4.60.1)': dependencies: - '@rollup/pluginutils': 5.3.0(rollup@4.57.1) + '@rollup/pluginutils': 5.3.0(rollup@4.60.1) commondir: 1.0.1 estree-walker: 2.0.2 fdir: 6.5.0(picomatch@4.0.4) @@ -14627,7 +14669,7 @@ snapshots: magic-string: 0.30.21 picomatch: 4.0.4 optionalDependencies: - rollup: 4.57.1 + rollup: 4.60.1 '@rollup/plugin-commonjs@29.0.0(rollup@4.57.1)': dependencies: @@ -14649,12 +14691,26 @@ snapshots: optionalDependencies: rollup: 4.57.1 + '@rollup/plugin-inject@5.0.5(rollup@4.60.1)': + dependencies: + '@rollup/pluginutils': 5.3.0(rollup@4.60.1) + estree-walker: 2.0.2 + magic-string: 0.30.21 + optionalDependencies: + rollup: 4.60.1 + '@rollup/plugin-json@6.1.0(rollup@4.57.1)': dependencies: '@rollup/pluginutils': 5.3.0(rollup@4.57.1) optionalDependencies: rollup: 4.57.1 + '@rollup/plugin-json@6.1.0(rollup@4.60.1)': + dependencies: + '@rollup/pluginutils': 5.3.0(rollup@4.60.1) + optionalDependencies: + rollup: 4.60.1 + '@rollup/plugin-node-resolve@16.0.3(rollup@4.57.1)': dependencies: '@rollup/pluginutils': 5.3.0(rollup@4.57.1) @@ -14665,6 +14721,16 @@ snapshots: optionalDependencies: rollup: 4.57.1 + '@rollup/plugin-node-resolve@16.0.3(rollup@4.60.1)': + dependencies: + '@rollup/pluginutils': 5.3.0(rollup@4.60.1) + '@types/resolve': 1.20.2 + deepmerge: 4.3.1 + is-module: 1.0.0 + resolve: 1.22.11 + optionalDependencies: + rollup: 4.60.1 + '@rollup/plugin-replace@6.0.3(rollup@4.57.1)': dependencies: '@rollup/pluginutils': 5.3.0(rollup@4.57.1) @@ -14672,6 +14738,13 @@ snapshots: optionalDependencies: rollup: 4.57.1 + '@rollup/plugin-replace@6.0.3(rollup@4.60.1)': + dependencies: + '@rollup/pluginutils': 5.3.0(rollup@4.60.1) + magic-string: 0.30.21 + optionalDependencies: + rollup: 4.60.1 + '@rollup/plugin-terser@0.4.4(rollup@4.57.1)': dependencies: serialize-javascript: 6.0.2 @@ -14680,6 +14753,14 @@ snapshots: optionalDependencies: rollup: 4.57.1 + '@rollup/plugin-terser@0.4.4(rollup@4.60.1)': + dependencies: + serialize-javascript: 6.0.2 + smob: 1.5.0 + terser: 5.44.1 + optionalDependencies: + rollup: 4.60.1 + '@rollup/pluginutils@5.3.0(rollup@4.57.1)': dependencies: '@types/estree': 1.0.8 @@ -17078,10 +17159,10 @@ snapshots: '@unrs/resolver-binding-win32-x64-msvc@1.11.1': optional: true - '@vercel/nft@0.30.4(rollup@4.57.1)': + '@vercel/nft@0.30.4(rollup@4.60.1)': dependencies: '@mapbox/node-pre-gyp': 2.0.3 - '@rollup/pluginutils': 5.3.0(rollup@4.57.1) + '@rollup/pluginutils': 5.3.0(rollup@4.60.1) acorn: 8.15.0 acorn-import-attributes: 1.9.5(acorn@8.15.0) async-sema: 3.1.1 @@ -19287,14 +19368,14 @@ snapshots: h3@2.0.1-rc.14(crossws@0.4.4(srvx@0.10.1)): dependencies: rou3: 0.7.12 - srvx: 0.11.2 + srvx: 0.11.15 optionalDependencies: crossws: 0.4.4(srvx@0.10.1) h3@2.0.1-rc.14(crossws@0.4.5(srvx@0.11.15)): dependencies: rou3: 0.7.12 - srvx: 0.11.2 + srvx: 0.11.15 optionalDependencies: crossws: 0.4.5(srvx@0.11.15) @@ -20745,14 +20826,14 @@ snapshots: nitropack@2.12.9(rolldown@1.0.0-rc.17): dependencies: '@cloudflare/kv-asset-handler': 0.4.1 - '@rollup/plugin-alias': 5.1.1(rollup@4.57.1) - '@rollup/plugin-commonjs': 28.0.9(rollup@4.57.1) - '@rollup/plugin-inject': 5.0.5(rollup@4.57.1) - '@rollup/plugin-json': 6.1.0(rollup@4.57.1) - '@rollup/plugin-node-resolve': 16.0.3(rollup@4.57.1) - '@rollup/plugin-replace': 6.0.3(rollup@4.57.1) - '@rollup/plugin-terser': 0.4.4(rollup@4.57.1) - '@vercel/nft': 0.30.4(rollup@4.57.1) + '@rollup/plugin-alias': 5.1.1(rollup@4.60.1) + '@rollup/plugin-commonjs': 28.0.9(rollup@4.60.1) + '@rollup/plugin-inject': 5.0.5(rollup@4.60.1) + '@rollup/plugin-json': 6.1.0(rollup@4.60.1) + '@rollup/plugin-node-resolve': 16.0.3(rollup@4.60.1) + '@rollup/plugin-replace': 6.0.3(rollup@4.60.1) + '@rollup/plugin-terser': 0.4.4(rollup@4.60.1) + '@vercel/nft': 0.30.4(rollup@4.60.1) archiver: 7.0.1 c12: 3.3.2(magicast@0.5.2) chokidar: 4.0.3 @@ -20794,8 +20875,8 @@ snapshots: pkg-types: 2.3.0 pretty-bytes: 7.1.0 radix3: 1.1.2 - rollup: 4.57.1 - rollup-plugin-visualizer: 6.0.5(rolldown@1.0.0-rc.17)(rollup@4.57.1) + rollup: 4.60.1 + rollup-plugin-visualizer: 6.0.5(rolldown@1.0.0-rc.17)(rollup@4.60.1) scule: 1.3.0 semver: 7.7.4 serve-placeholder: 2.0.2 @@ -21980,6 +22061,16 @@ snapshots: rolldown: 1.0.0-rc.17 rollup: 4.57.1 + rollup-plugin-visualizer@6.0.5(rolldown@1.0.0-rc.17)(rollup@4.60.1): + dependencies: + open: 8.4.2 + picomatch: 4.0.4 + source-map: 0.7.6 + yargs: 17.7.2 + optionalDependencies: + rolldown: 1.0.0-rc.17 + rollup: 4.60.1 + rollup@4.53.3: dependencies: '@types/estree': 1.0.8 @@ -23376,8 +23467,8 @@ snapshots: esbuild: 0.25.12 fdir: 6.5.0(picomatch@4.0.4) picomatch: 4.0.4 - postcss: 8.5.6 - rollup: 4.57.1 + postcss: 8.5.9 + rollup: 4.60.1 tinyglobby: 0.2.16 optionalDependencies: '@types/node': 24.10.3 @@ -23818,6 +23909,10 @@ snapshots: compress-commons: 6.0.2 readable-stream: 4.7.0 + zod-to-json-schema@3.25.2(zod@4.3.6): + dependencies: + zod: 4.3.6 + zod@3.25.76: {} zod@4.2.1: {} diff --git a/testing/e2e/package.json b/testing/e2e/package.json index 0dc700b0d..d7e37e151 100644 --- a/testing/e2e/package.json +++ b/testing/e2e/package.json @@ -19,6 +19,7 @@ "@tanstack/ai-gemini": "workspace:*", "@tanstack/ai-grok": "workspace:*", "@tanstack/ai-groq": "workspace:*", + "@tanstack/ai-mistral": "workspace:*", "@tanstack/ai-ollama": "workspace:*", "@tanstack/ai-openai": "workspace:*", "@tanstack/ai-openrouter": "workspace:*", diff --git a/testing/e2e/src/lib/feature-support.ts b/testing/e2e/src/lib/feature-support.ts index db0696e4e..f4ecb0cbf 100644 --- a/testing/e2e/src/lib/feature-support.ts +++ b/testing/e2e/src/lib/feature-support.ts @@ -16,6 +16,7 @@ export const matrix: Record> = { 'groq', 'grok', 'openrouter', + 'mistral', ]), 'one-shot-text': new Set([ 'openai', @@ -25,8 +26,9 @@ export const matrix: Record> = { 'groq', 'grok', 'openrouter', + 'mistral', ]), - reasoning: new Set(['openai', 'anthropic', 'gemini']), + reasoning: new Set(['openai', 'anthropic', 'gemini', 'mistral']), 'multi-turn': new Set([ 'openai', 'anthropic', @@ -35,6 +37,7 @@ export const matrix: Record> = { 'groq', 'grok', 'openrouter', + 'mistral', ]), 'tool-calling': new Set([ 'openai', @@ -44,6 +47,7 @@ export const matrix: Record> = { 'groq', 'grok', 'openrouter', + 'mistral', ]), 'parallel-tool-calls': new Set([ 'openai', @@ -52,6 +56,7 @@ export const matrix: Record> = { 'groq', 'grok', 'openrouter', + 'mistral', ]), // Gemini excluded: approval flow timing issues with Gemini's streaming format 'tool-approval': new Set([ @@ -61,6 +66,7 @@ export const matrix: Record> = { 'groq', 'grok', 'openrouter', + 'mistral', ]), // Ollama excluded: aimock doesn't support content+toolCalls for /api/chat format 'text-tool-text': new Set([ @@ -70,6 +76,7 @@ export const matrix: Record> = { 'groq', 'grok', 'openrouter', + 'mistral', ]), 'structured-output': new Set([ 'openai', @@ -79,6 +86,7 @@ export const matrix: Record> = { 'groq', 'grok', 'openrouter', + 'mistral', ]), 'agentic-structured': new Set([ 'openai', @@ -88,7 +96,9 @@ export const matrix: Record> = { 'groq', 'grok', 'openrouter', + 'mistral', ]), + // Mistral excluded: mistral-large-latest is text-only; vision requires pixtral 'multimodal-image': new Set([ 'openai', 'anthropic', @@ -110,6 +120,7 @@ export const matrix: Record> = { 'ollama', 'grok', 'openrouter', + 'mistral', ]), 'summarize-stream': new Set([ 'openai', @@ -118,6 +129,7 @@ export const matrix: Record> = { 'ollama', 'grok', 'openrouter', + 'mistral', ]), // Gemini excluded: aimock doesn't mock Gemini's Imagen predict endpoint format 'image-gen': new Set(['openai', 'grok']), diff --git a/testing/e2e/src/lib/features.ts b/testing/e2e/src/lib/features.ts index 15000cd7e..6e3360fc2 100644 --- a/testing/e2e/src/lib/features.ts +++ b/testing/e2e/src/lib/features.ts @@ -23,6 +23,7 @@ export const featureConfigs: Record = { modelOverrides: { openai: 'o3', anthropic: 'claude-sonnet-4-5', + mistral: 'magistral-medium-latest', }, }, 'multi-turn': { diff --git a/testing/e2e/src/lib/providers.ts b/testing/e2e/src/lib/providers.ts index 35b720b61..4b40d17b2 100644 --- a/testing/e2e/src/lib/providers.ts +++ b/testing/e2e/src/lib/providers.ts @@ -7,6 +7,7 @@ import { createOllamaChat } from '@tanstack/ai-ollama' import { createGroqText } from '@tanstack/ai-groq' import { createGrokText } from '@tanstack/ai-grok' import { createOpenRouterText } from '@tanstack/ai-openrouter' +import { createMistralText } from '@tanstack/ai-mistral' import type { Provider } from '@/lib/types' const LLMOCK_DEFAULT_BASE = process.env.LLMOCK_URL || 'http://127.0.0.1:4010' @@ -20,6 +21,7 @@ const defaultModels: Record = { groq: 'llama-3.3-70b-versatile', grok: 'grok-3', openrouter: 'openai/gpt-4o', + mistral: 'mistral-large-latest', } export function createTextAdapter( @@ -92,6 +94,13 @@ export function createTextAdapter( : openaiUrl, }), }), + mistral: () => + createChatOptions({ + adapter: createMistralText(model as 'mistral-large-latest', DUMMY_KEY, { + serverURL: base, + defaultHeaders: testHeaders, + }), + }), } return factories[provider]() diff --git a/testing/e2e/src/lib/types.ts b/testing/e2e/src/lib/types.ts index 00c848157..46ce8e05e 100644 --- a/testing/e2e/src/lib/types.ts +++ b/testing/e2e/src/lib/types.ts @@ -8,6 +8,7 @@ export type Provider = | 'grok' | 'groq' | 'openrouter' + | 'mistral' export type Feature = | 'chat' @@ -37,6 +38,7 @@ export const ALL_PROVIDERS: Provider[] = [ 'grok', 'groq', 'openrouter', + 'mistral', ] export const ALL_FEATURES: Feature[] = [ diff --git a/testing/e2e/tests/test-matrix.ts b/testing/e2e/tests/test-matrix.ts index 425b49518..59d7a7575 100644 --- a/testing/e2e/tests/test-matrix.ts +++ b/testing/e2e/tests/test-matrix.ts @@ -21,6 +21,7 @@ export const providers: Provider[] = [ 'groq', 'grok', 'openrouter', + 'mistral', ] export { isSupported }