From a87d24253edcdeab2c017641369551a9b2de42b8 Mon Sep 17 00:00:00 2001 From: Tim Raderschad Date: Fri, 17 Apr 2026 10:05:45 +0200 Subject: [PATCH 1/9] feat: add Mistral provider adapter for TanStack AI - Implemented MistralTextAdapter and related functions for chat completions. - Defined Mistral-specific message types and metadata structures. - Created model metadata for various Mistral models with pricing and capabilities. - Added text provider options and validation for Mistral text models. - Developed utility functions for Mistral client configuration and schema conversion. - Implemented function tool conversion for Mistral-specific formats. - Added tests for Mistral adapters, including event emissions and error handling. - Configured TypeScript and Vite for the new package. --- .changeset/add-ai-mistral.md | 5 + packages/typescript/ai-mistral/README.md | 89 +++ packages/typescript/ai-mistral/package.json | 52 ++ .../ai-mistral/src/adapters/text.ts | 641 ++++++++++++++++++ packages/typescript/ai-mistral/src/index.ts | 33 + .../ai-mistral/src/message-types.ts | 194 ++++++ .../typescript/ai-mistral/src/model-meta.ts | 269 ++++++++ .../src/text/text-provider-options.ts | 114 ++++ .../ai-mistral/src/tools/function-tool.ts | 43 ++ .../typescript/ai-mistral/src/tools/index.ts | 5 + .../ai-mistral/src/tools/tool-converter.ts | 14 + .../typescript/ai-mistral/src/utils/client.ts | 53 ++ .../typescript/ai-mistral/src/utils/index.ts | 10 + .../ai-mistral/src/utils/schema-converter.ts | 102 +++ .../ai-mistral/tests/mistral-adapter.test.ts | 498 ++++++++++++++ packages/typescript/ai-mistral/tsconfig.json | 9 + packages/typescript/ai-mistral/vite.config.ts | 36 + .../typescript/ai-mistral/vitest.config.ts | 22 + pnpm-lock.yaml | 149 +++- 19 files changed, 2308 insertions(+), 30 deletions(-) create mode 100644 .changeset/add-ai-mistral.md create mode 100644 packages/typescript/ai-mistral/README.md create mode 100644 packages/typescript/ai-mistral/package.json create mode 100644 packages/typescript/ai-mistral/src/adapters/text.ts create mode 100644 packages/typescript/ai-mistral/src/index.ts create mode 100644 packages/typescript/ai-mistral/src/message-types.ts create mode 100644 packages/typescript/ai-mistral/src/model-meta.ts create mode 100644 packages/typescript/ai-mistral/src/text/text-provider-options.ts create mode 100644 packages/typescript/ai-mistral/src/tools/function-tool.ts create mode 100644 packages/typescript/ai-mistral/src/tools/index.ts create mode 100644 packages/typescript/ai-mistral/src/tools/tool-converter.ts create mode 100644 packages/typescript/ai-mistral/src/utils/client.ts create mode 100644 packages/typescript/ai-mistral/src/utils/index.ts create mode 100644 packages/typescript/ai-mistral/src/utils/schema-converter.ts create mode 100644 packages/typescript/ai-mistral/tests/mistral-adapter.test.ts create mode 100644 packages/typescript/ai-mistral/tsconfig.json create mode 100644 packages/typescript/ai-mistral/vite.config.ts create mode 100644 packages/typescript/ai-mistral/vitest.config.ts diff --git a/.changeset/add-ai-mistral.md b/.changeset/add-ai-mistral.md new file mode 100644 index 000000000..294086f32 --- /dev/null +++ b/.changeset/add-ai-mistral.md @@ -0,0 +1,5 @@ +--- +'@tanstack/ai-mistral': minor +--- + +Add new `@tanstack/ai-mistral` adapter package for Mistral models using the `@mistralai/mistralai` SDK. Supports streaming chat, tool calling, vision input (Pixtral / Mistral Medium / Small), and structured output via JSON Schema. Includes model metadata for Mistral Large, Medium, Small, Ministral 3B/8B, Codestral, Pixtral, Magistral, and Open Mistral Nemo. diff --git a/packages/typescript/ai-mistral/README.md b/packages/typescript/ai-mistral/README.md new file mode 100644 index 000000000..bb2dce25f --- /dev/null +++ b/packages/typescript/ai-mistral/README.md @@ -0,0 +1,89 @@ +# @tanstack/ai-mistral + +Mistral adapter for TanStack AI. + +## Installation + +```bash +npm install @tanstack/ai-mistral +# or +pnpm add @tanstack/ai-mistral +# or +yarn add @tanstack/ai-mistral +``` + +## Setup + +Get your API key from [Mistral Console](https://console.mistral.ai/) and set it as an environment variable: + +```bash +export MISTRAL_API_KEY="..." +``` + +## Usage + +### Text/Chat Adapter + +```typescript +import { mistralText } from '@tanstack/ai-mistral' +import { generate } from '@tanstack/ai' + +const adapter = mistralText('mistral-large-latest') + +const result = await generate({ + adapter, + model: 'mistral-large-latest', + messages: [ + { role: 'user', content: 'Explain quantum computing in simple terms' }, + ], +}) + +console.log(result.text) +``` + +### With Explicit API Key + +```typescript +import { createMistralText } from '@tanstack/ai-mistral' + +const adapter = createMistralText('mistral-large-latest', 'api_key') +``` + +## Supported Models + +### Chat Models + +- `mistral-large-latest` - Frontier flagship model (128k context) +- `mistral-medium-latest` - Balanced multimodal model (vision) +- `mistral-small-latest` - Fast, affordable multimodal model (vision) +- `ministral-8b-latest` - 8B edge model +- `ministral-3b-latest` - 3B edge model +- `codestral-latest` - Code-specialized model (256k context) +- `pixtral-large-latest` - Large vision model +- `pixtral-12b-2409` - 12B vision model +- `magistral-medium-latest` - Reasoning model +- `magistral-small-latest` - Small reasoning model +- `open-mistral-nemo` - Open 12B model + +See [Mistral model comparison](https://docs.mistral.ai/getting-started/models/compare) for full details. + +## Features + +- ✅ Streaming chat completions +- ✅ Structured output (JSON Schema) +- ✅ Function/tool calling +- ✅ Multimodal input (text + images for vision models) +- ❌ Embeddings (use the Mistral SDK directly) +- ❌ Image generation + +## Tree-Shakeable Adapters + +This package uses tree-shakeable adapters, so you only import what you need: + +```typescript +import { mistralText } from '@tanstack/ai-mistral' +``` + +## License + +MIT diff --git a/packages/typescript/ai-mistral/package.json b/packages/typescript/ai-mistral/package.json new file mode 100644 index 000000000..53ea79415 --- /dev/null +++ b/packages/typescript/ai-mistral/package.json @@ -0,0 +1,52 @@ +{ + "name": "@tanstack/ai-mistral", + "version": "0.1.0", + "type": "module", + "description": "Mistral adapter for TanStack AI", + "author": "", + "license": "MIT", + "repository": { + "type": "git", + "url": "git+https://github.com/TanStack/ai.git", + "directory": "packages/typescript/ai-mistral" + }, + "module": "./dist/esm/index.js", + "types": "./dist/esm/index.d.ts", + "exports": { + ".": { + "types": "./dist/esm/index.d.ts", + "import": "./dist/esm/index.js" + } + }, + "files": [ + "dist", + "src" + ], + "scripts": { + "build": "vite build", + "clean": "premove ./build ./dist", + "lint:fix": "eslint ./src --fix", + "test:build": "publint --strict", + "test:eslint": "eslint ./src", + "test:lib": "vitest run", + "test:lib:dev": "pnpm test:lib --watch", + "test:types": "tsc" + }, + "keywords": [ + "ai", + "mistral", + "tanstack", + "adapter" + ], + "devDependencies": { + "@vitest/coverage-v8": "4.0.14", + "vite": "^7.2.7" + }, + "peerDependencies": { + "@tanstack/ai": "workspace:^", + "zod": "^4.0.0" + }, + "dependencies": { + "@mistralai/mistralai": "^2.2.0" + } +} diff --git a/packages/typescript/ai-mistral/src/adapters/text.ts b/packages/typescript/ai-mistral/src/adapters/text.ts new file mode 100644 index 000000000..bfc2f1f12 --- /dev/null +++ b/packages/typescript/ai-mistral/src/adapters/text.ts @@ -0,0 +1,641 @@ +import { BaseTextAdapter } from '@tanstack/ai/adapters' +import { validateTextProviderOptions } from '../text/text-provider-options' +import { convertToolsToProviderFormat } from '../tools' +import { + createMistralClient, + generateId, + getMistralApiKeyFromEnv, + makeMistralStructuredOutputCompatible, + transformNullsToUndefined, +} from '../utils' +import type { + MISTRAL_CHAT_MODELS, + ResolveInputModalities, + ResolveProviderOptions, +} from '../model-meta' +import type { + StructuredOutputOptions, + StructuredOutputResult, +} from '@tanstack/ai/adapters' +import type { Mistral } from '@mistralai/mistralai' +import type { + ContentPart, + ModelMessage, + StreamChunk, + TextOptions, +} from '@tanstack/ai' +import type { InternalTextProviderOptions } from '../text/text-provider-options' +import type { + ChatCompletionContentPart, + ChatCompletionMessageParam, + MistralImageMetadata, + MistralMessageMetadataByModality, +} from '../message-types' +import type { MistralClientConfig } from '../utils' + +/** + * Configuration for Mistral text adapter. + */ +export interface MistralTextConfig extends MistralClientConfig {} + +/** + * Alias for TextProviderOptions for external use. + */ +export type { ExternalTextProviderOptions as MistralTextProviderOptions } from '../text/text-provider-options' + +/** + * Minimal shape of a Mistral stream chunk used by the adapter. + */ +interface MistralStreamChunk { + id?: string + model?: string + choices: Array<{ + index?: number + delta: { + role?: string | null + content?: string | Array<{ type: string; text?: string }> | null + toolCalls?: Array<{ + id?: string + type?: string + index?: number + function: { + name?: string + arguments?: string | Record + } + }> | null + } + finishReason?: string | null + }> + usage?: { + promptTokens?: number + completionTokens?: number + totalTokens?: number + } +} + +interface MistralStreamEvent { + data: MistralStreamChunk +} + +/** + * Mistral Text (Chat) Adapter. + * + * Tree-shakeable adapter for Mistral chat/text completion functionality. + */ +export class MistralTextAdapter< + TModel extends (typeof MISTRAL_CHAT_MODELS)[number], +> extends BaseTextAdapter< + TModel, + ResolveProviderOptions, + ResolveInputModalities, + MistralMessageMetadataByModality +> { + readonly kind = 'text' as const + readonly name = 'mistral' as const + + private client: Mistral + + constructor(config: MistralTextConfig, model: TModel) { + super({}, model) + this.client = createMistralClient(config) + } + + async *chatStream( + options: TextOptions>, + ): AsyncIterable { + const requestParams = this.mapTextOptionsToMistral(options) + const timestamp = Date.now() + + const aguiState = { + runId: generateId(this.name), + messageId: generateId(this.name), + timestamp, + hasEmittedRunStarted: false, + } + + try { + const stream = (await this.client.chat.stream( + requestParams as any, + )) as unknown as AsyncIterable + + yield* this.processMistralStreamChunks(stream, options, aguiState) + } catch (error: unknown) { + const err = error as Error & { code?: string } + + if (!aguiState.hasEmittedRunStarted) { + aguiState.hasEmittedRunStarted = true + yield { + type: 'RUN_STARTED', + runId: aguiState.runId, + model: options.model, + timestamp, + } + } + + yield { + type: 'RUN_ERROR', + runId: aguiState.runId, + model: options.model, + timestamp, + error: { + message: err.message || 'Unknown error', + code: err.code, + }, + } + + console.error('>>> chatStream: Fatal error during response creation <<<') + console.error('>>> Error message:', err.message) + console.error('>>> Error stack:', err.stack) + console.error('>>> Full error:', err) + } + } + + /** + * Generate structured output using Mistral's JSON Schema response format. + */ + async structuredOutput( + options: StructuredOutputOptions>, + ): Promise> { + const { chatOptions, outputSchema } = options + const requestParams = this.mapTextOptionsToMistral(chatOptions) + + const jsonSchema = makeMistralStructuredOutputCompatible( + outputSchema, + outputSchema.required || [], + ) + + try { + const { stream: _stream, ...nonStreamParams } = requestParams + const response = (await this.client.chat.complete({ + ...nonStreamParams, + responseFormat: { + type: 'json_schema', + jsonSchema: { + name: 'structured_output', + schemaDefinition: jsonSchema, + strict: true, + }, + }, + } as any)) as { + choices?: Array<{ message?: { content?: string | null } }> + } + + const rawText = response.choices?.[0]?.message?.content || '' + const textContent = + typeof rawText === 'string' ? rawText : String(rawText) + + let parsed: unknown + try { + parsed = JSON.parse(textContent) + } catch { + throw new Error( + `Failed to parse structured output as JSON. Content: ${textContent.slice(0, 200)}${textContent.length > 200 ? '...' : ''}`, + ) + } + + const transformed = transformNullsToUndefined(parsed) + + return { + data: transformed, + rawText: textContent, + } + } catch (error: unknown) { + const err = error as Error + console.error('>>> structuredOutput: Error during response creation <<<') + console.error('>>> Error message:', err.message) + throw error + } + } + + /** + * Processes streaming chunks from the Mistral API and yields AG-UI stream events. + */ + private async *processMistralStreamChunks( + stream: AsyncIterable, + options: TextOptions, + aguiState: { + runId: string + messageId: string + timestamp: number + hasEmittedRunStarted: boolean + }, + ): AsyncIterable { + let accumulatedContent = '' + const timestamp = aguiState.timestamp + let hasEmittedTextMessageStart = false + + const toolCallsInProgress = new Map< + number, + { + id: string + name: string + arguments: string + started: boolean + } + >() + + try { + for await (const event of stream) { + const chunk = event.data + const choice = chunk.choices[0] + + if (!choice) continue + + if (!aguiState.hasEmittedRunStarted) { + aguiState.hasEmittedRunStarted = true + yield { + type: 'RUN_STARTED', + runId: aguiState.runId, + model: chunk.model || options.model, + timestamp, + } + } + + const delta = choice.delta + const deltaContent = this.extractDeltaText(delta.content) + const deltaToolCalls = delta.toolCalls + + if (deltaContent) { + if (!hasEmittedTextMessageStart) { + hasEmittedTextMessageStart = true + yield { + type: 'TEXT_MESSAGE_START', + messageId: aguiState.messageId, + model: chunk.model || options.model, + timestamp, + role: 'assistant', + } + } + + accumulatedContent += deltaContent + + yield { + type: 'TEXT_MESSAGE_CONTENT', + messageId: aguiState.messageId, + model: chunk.model || options.model, + timestamp, + delta: deltaContent, + content: accumulatedContent, + } + } + + if (deltaToolCalls) { + for (let i = 0; i < deltaToolCalls.length; i++) { + const toolCallDelta = deltaToolCalls[i]! + const index = toolCallDelta.index ?? i + + if (!toolCallsInProgress.has(index)) { + toolCallsInProgress.set(index, { + id: toolCallDelta.id || '', + name: toolCallDelta.function.name || '', + arguments: '', + started: false, + }) + } + + const toolCall = toolCallsInProgress.get(index)! + + if (toolCallDelta.id) { + toolCall.id = toolCallDelta.id + } + if (toolCallDelta.function.name) { + toolCall.name = toolCallDelta.function.name + } + if (toolCallDelta.function.arguments !== undefined) { + const argsDelta = + typeof toolCallDelta.function.arguments === 'string' + ? toolCallDelta.function.arguments + : JSON.stringify(toolCallDelta.function.arguments) + toolCall.arguments += argsDelta + } + + if (toolCall.id && toolCall.name && !toolCall.started) { + toolCall.started = true + yield { + type: 'TOOL_CALL_START', + toolCallId: toolCall.id, + toolName: toolCall.name, + model: chunk.model || options.model, + timestamp, + index, + } + } + + if (toolCallDelta.function.arguments !== undefined && toolCall.started) { + const argsDelta = + typeof toolCallDelta.function.arguments === 'string' + ? toolCallDelta.function.arguments + : JSON.stringify(toolCallDelta.function.arguments) + yield { + type: 'TOOL_CALL_ARGS', + toolCallId: toolCall.id, + model: chunk.model || options.model, + timestamp, + delta: argsDelta, + } + } + } + } + + if (choice.finishReason) { + if ( + choice.finishReason === 'tool_calls' || + toolCallsInProgress.size > 0 + ) { + for (const [, toolCall] of toolCallsInProgress) { + if (!toolCall.started || !toolCall.id || !toolCall.name) { + continue + } + + let parsedInput: unknown = {} + try { + parsedInput = toolCall.arguments + ? JSON.parse(toolCall.arguments) + : {} + } catch { + parsedInput = {} + } + + yield { + type: 'TOOL_CALL_END', + toolCallId: toolCall.id, + toolName: toolCall.name, + model: chunk.model || options.model, + timestamp, + input: parsedInput, + } + } + } + + const computedFinishReason = + choice.finishReason === 'tool_calls' || + toolCallsInProgress.size > 0 + ? 'tool_calls' + : choice.finishReason === 'length' + ? 'length' + : 'stop' + + if (hasEmittedTextMessageStart) { + yield { + type: 'TEXT_MESSAGE_END', + messageId: aguiState.messageId, + model: chunk.model || options.model, + timestamp, + } + } + + const usage = chunk.usage + + yield { + type: 'RUN_FINISHED', + runId: aguiState.runId, + model: chunk.model || options.model, + timestamp, + usage: usage + ? { + promptTokens: usage.promptTokens || 0, + completionTokens: usage.completionTokens || 0, + totalTokens: usage.totalTokens || 0, + } + : undefined, + finishReason: computedFinishReason, + } + } + } + } catch (error: unknown) { + const err = error as Error & { code?: string } + console.log('[Mistral Adapter] Stream ended with error:', err.message) + + yield { + type: 'RUN_ERROR', + runId: aguiState.runId, + model: options.model, + timestamp, + error: { + message: err.message || 'Unknown error occurred', + code: err.code, + }, + } + } + } + + /** + * Extracts text from a Mistral delta content, which can be a string or an + * array of content chunks. + */ + private extractDeltaText( + content: string | Array<{ type: string; text?: string }> | null | undefined, + ): string { + if (!content) return '' + if (typeof content === 'string') return content + return content + .filter((c) => c.type === 'text' && typeof c.text === 'string') + .map((c) => c.text!) + .join('') + } + + /** + * Maps common TextOptions to Mistral Chat Completions request parameters. + */ + private mapTextOptionsToMistral(options: TextOptions): { + model: string + messages: Array + temperature?: number | null + maxTokens?: number | null + topP?: number | null + tools?: Array + stream: true + } { + const modelOptions = options.modelOptions as + | Omit< + InternalTextProviderOptions, + 'max_tokens' | 'tools' | 'temperature' | 'top_p' + > + | undefined + + if (modelOptions) { + validateTextProviderOptions({ + ...modelOptions, + model: options.model, + } as InternalTextProviderOptions) + } + + const tools = options.tools + ? convertToolsToProviderFormat(options.tools) + : undefined + + const messages: Array = [] + + if (options.systemPrompts && options.systemPrompts.length > 0) { + messages.push({ + role: 'system', + content: options.systemPrompts.join('\n'), + }) + } + + for (const message of options.messages) { + messages.push(this.convertMessageToMistral(message)) + } + + return { + model: options.model, + messages, + temperature: options.temperature, + maxTokens: options.maxTokens, + topP: options.topP, + tools, + stream: true, + } + } + + /** + * Converts a TanStack AI ModelMessage to a Mistral ChatCompletionMessageParam. + */ + private convertMessageToMistral( + message: ModelMessage, + ): ChatCompletionMessageParam { + if (message.role === 'tool') { + return { + role: 'tool', + toolCallId: message.toolCallId || '', + content: + typeof message.content === 'string' + ? message.content + : JSON.stringify(message.content), + } + } + + if (message.role === 'assistant') { + const toolCalls = message.toolCalls?.map((tc) => ({ + id: tc.id, + type: 'function' as const, + function: { + name: tc.function.name, + arguments: + typeof tc.function.arguments === 'string' + ? tc.function.arguments + : JSON.stringify(tc.function.arguments), + }, + })) + + return { + role: 'assistant', + content: this.extractTextContent(message.content), + ...(toolCalls && toolCalls.length > 0 ? { toolCalls } : {}), + } + } + + const contentParts = this.normalizeContent(message.content) + + if (contentParts.length === 1 && contentParts[0]?.type === 'text') { + return { + role: 'user', + content: contentParts[0].content, + } + } + + const parts: Array = [] + for (const part of contentParts) { + if (part.type === 'text') { + parts.push({ type: 'text', text: part.content }) + } else if (part.type === 'image') { + const imageMetadata = part.metadata as MistralImageMetadata | undefined + const imageValue = part.source.value + const imageUrl = + part.source.type === 'data' && !imageValue.startsWith('data:') + ? `data:${part.source.mimeType};base64,${imageValue}` + : imageValue + parts.push({ + type: 'image_url', + imageUrl: imageMetadata?.detail + ? { url: imageUrl, detail: imageMetadata.detail } + : imageUrl, + }) + } + } + + return { + role: 'user', + content: parts.length > 0 ? parts : '', + } + } + + /** + * Normalizes message content to an array of ContentPart. + */ + private normalizeContent( + content: string | null | Array, + ): Array { + if (content === null) { + return [] + } + if (typeof content === 'string') { + return [{ type: 'text', content: content }] + } + return content + } + + /** + * Extracts text content from a content value that may be string, null, or ContentPart array. + */ + private extractTextContent( + content: string | null | Array, + ): string { + if (content === null) { + return '' + } + if (typeof content === 'string') { + return content + } + return content + .filter((p) => p.type === 'text') + .map((p) => p.content) + .join('') + } +} + +/** + * Creates a Mistral text adapter with explicit API key. + * + * @param model - The model name (e.g., 'mistral-large-latest') + * @param apiKey - Your Mistral API key + * @param config - Optional additional configuration + * @returns Configured Mistral text adapter instance + * + * @example + * ```typescript + * const adapter = createMistralText('mistral-large-latest', 'api_key'); + * ``` + */ +export function createMistralText< + TModel extends (typeof MISTRAL_CHAT_MODELS)[number], +>( + model: TModel, + apiKey: string, + config?: Omit, +): MistralTextAdapter { + return new MistralTextAdapter({ apiKey, ...config }, model) +} + +/** + * Creates a Mistral text adapter using the `MISTRAL_API_KEY` environment variable. + * + * @param model - The model name (e.g., 'mistral-large-latest') + * @param config - Optional configuration (excluding apiKey) + * @returns Configured Mistral text adapter instance + * @throws Error if MISTRAL_API_KEY is not found in environment + * + * @example + * ```typescript + * const adapter = mistralText('mistral-large-latest'); + * ``` + */ +export function mistralText< + TModel extends (typeof MISTRAL_CHAT_MODELS)[number], +>( + model: TModel, + config?: Omit, +): MistralTextAdapter { + const apiKey = getMistralApiKeyFromEnv() + return createMistralText(model, apiKey, config) +} diff --git a/packages/typescript/ai-mistral/src/index.ts b/packages/typescript/ai-mistral/src/index.ts new file mode 100644 index 000000000..d57f85f81 --- /dev/null +++ b/packages/typescript/ai-mistral/src/index.ts @@ -0,0 +1,33 @@ +/** + * @module @tanstack/ai-mistral + * + * Mistral provider adapter for TanStack AI. + * Provides tree-shakeable adapters for Mistral's Chat Completions API. + */ + +// Text (Chat) adapter +export { + MistralTextAdapter, + createMistralText, + mistralText, + type MistralTextConfig, + type MistralTextProviderOptions, +} from './adapters/text' + +// Types +export type { + MistralChatModelProviderOptionsByName, + MistralModelInputModalitiesByName, + ResolveProviderOptions, + ResolveInputModalities, + MistralChatModels, +} from './model-meta' +export { MISTRAL_CHAT_MODELS } from './model-meta' +export type { + MistralTextMetadata, + MistralImageMetadata, + MistralAudioMetadata, + MistralVideoMetadata, + MistralDocumentMetadata, + MistralMessageMetadataByModality, +} from './message-types' diff --git a/packages/typescript/ai-mistral/src/message-types.ts b/packages/typescript/ai-mistral/src/message-types.ts new file mode 100644 index 000000000..553ad9867 --- /dev/null +++ b/packages/typescript/ai-mistral/src/message-types.ts @@ -0,0 +1,194 @@ +/** + * Mistral-specific message types for the Chat Completions API. + * + * These types mirror the shape expected by the Mistral SDK (`@mistralai/mistralai`) + * and are used internally by the adapter to avoid tight coupling to the SDK's + * exported types. + * + * @see https://docs.mistral.ai/api/ + */ + +export interface ChatCompletionContentPartText { + /** The text content. */ + text: string + + /** The type of the content part. */ + type: 'text' +} + +export interface ChatCompletionContentPartImage { + imageUrl: string | { url: string; detail?: 'auto' | 'low' | 'high' } + + /** The type of the content part. */ + type: 'image_url' +} + +export interface ChatCompletionContentPartDocumentUrl { + documentUrl: string + + /** The type of the content part. */ + type: 'document_url' +} + +export type ChatCompletionContentPart = + | ChatCompletionContentPartText + | ChatCompletionContentPartImage + | ChatCompletionContentPartDocumentUrl + +export interface ChatCompletionMessageToolCall { + /** The ID of the tool call. */ + id: string + + /** The type of the tool. Currently only `function` is supported. */ + type?: 'function' + + /** The function that the model called. */ + function: { + /** The name of the function to call. */ + name: string + + /** Arguments generated by the model as a JSON string. */ + arguments: string + } +} + +export type FunctionParameters = { [key: string]: unknown } + +export interface FunctionDefinition { + /** + * The name of the function. Must be a-z, A-Z, 0-9, or contain underscores and + * dashes, with a maximum length of 64. + */ + name: string + + /** A description of what the function does. */ + description?: string + + /** Function parameters defined as a JSON Schema object. */ + parameters?: FunctionParameters + + /** Whether to enable strict schema adherence. */ + strict?: boolean +} + +export interface ChatCompletionTool { + /** The type of the tool. */ + type: 'function' + + function: FunctionDefinition +} + +export interface ChatCompletionNamedToolChoice { + type: 'function' + function: { + name: string + } +} + +/** + * Controls which (if any) tool is called by the model. + * + * - `none` — never call tools + * - `auto` — model decides + * - `any` / `required` — model must call one or more tools + * - Named tool choice — forces a specific tool + */ +export type ChatCompletionToolChoiceOption = + | 'none' + | 'auto' + | 'any' + | 'required' + | ChatCompletionNamedToolChoice + +export interface ChatCompletionSystemMessageParam { + role: 'system' + content: string | Array +} + +export interface ChatCompletionUserMessageParam { + role: 'user' + content: string | Array +} + +export interface ChatCompletionAssistantMessageParam { + role: 'assistant' + content?: string | Array | null + toolCalls?: Array + prefix?: boolean +} + +export interface ChatCompletionToolMessageParam { + role: 'tool' + content: string | Array + toolCallId: string + name?: string +} + +export type ChatCompletionMessageParam = + | ChatCompletionSystemMessageParam + | ChatCompletionUserMessageParam + | ChatCompletionAssistantMessageParam + | ChatCompletionToolMessageParam + +export interface ResponseFormatText { + type: 'text' +} + +export interface ResponseFormatJsonObject { + type: 'json_object' +} + +export interface ResponseFormatJsonSchema { + type: 'json_schema' + jsonSchema: { + name: string + description?: string + schemaDefinition?: { [key: string]: unknown } + strict?: boolean + } +} + +/** + * Metadata for Mistral text content parts. + */ +export interface MistralTextMetadata {} + +/** + * Metadata for Mistral image content parts. + */ +export interface MistralImageMetadata { + /** + * Specifies the detail level of the image. + * @default 'auto' + */ + detail?: 'auto' | 'low' | 'high' +} + +/** + * Metadata for Mistral audio content parts. + * Mistral does not currently support audio input. + */ +export interface MistralAudioMetadata {} + +/** + * Metadata for Mistral video content parts. + * Mistral does not currently support video input. + */ +export interface MistralVideoMetadata {} + +/** + * Metadata for Mistral document content parts. + * Used with document understanding models via `document_url` parts. + */ +export interface MistralDocumentMetadata {} + +/** + * Map of modality types to their Mistral-specific metadata types. + */ +export interface MistralMessageMetadataByModality { + text: MistralTextMetadata + image: MistralImageMetadata + audio: MistralAudioMetadata + video: MistralVideoMetadata + document: MistralDocumentMetadata +} diff --git a/packages/typescript/ai-mistral/src/model-meta.ts b/packages/typescript/ai-mistral/src/model-meta.ts new file mode 100644 index 000000000..33991193c --- /dev/null +++ b/packages/typescript/ai-mistral/src/model-meta.ts @@ -0,0 +1,269 @@ +import type { MistralTextProviderOptions } from './text/text-provider-options' + +/** + * Internal metadata structure describing a Mistral model's capabilities + * and approximate pricing (USD per million tokens). + */ +interface ModelMeta { + name: string + context_window?: number + max_completion_tokens?: number + pricing: { + input?: { normal: number; cached?: number } + output?: { normal: number } + } + supports: { + input: Array<'text' | 'image' | 'audio'> + output: Array<'text'> + endpoints: Array<'chat' | 'embeddings'> + + features: Array< + | 'streaming' + | 'tools' + | 'json_object' + | 'json_schema' + | 'reasoning' + | 'vision' + | 'code' + > + } + providerOptions?: TProviderOptions +} + +const MISTRAL_LARGE_LATEST = { + name: 'mistral-large-latest', + context_window: 131_072, + max_completion_tokens: 8_192, + pricing: { + input: { normal: 2 }, + output: { normal: 6 }, + }, + supports: { + input: ['text'], + output: ['text'], + endpoints: ['chat'], + features: ['streaming', 'tools', 'json_object', 'json_schema'], + }, +} as const satisfies ModelMeta + +const MISTRAL_MEDIUM_LATEST = { + name: 'mistral-medium-latest', + context_window: 131_072, + max_completion_tokens: 8_192, + pricing: { + input: { normal: 0.4 }, + output: { normal: 2 }, + }, + supports: { + input: ['text', 'image'], + output: ['text'], + endpoints: ['chat'], + features: ['streaming', 'tools', 'json_object', 'json_schema', 'vision'], + }, +} as const satisfies ModelMeta + +const MISTRAL_SMALL_LATEST = { + name: 'mistral-small-latest', + context_window: 131_072, + max_completion_tokens: 8_192, + pricing: { + input: { normal: 0.1 }, + output: { normal: 0.3 }, + }, + supports: { + input: ['text', 'image'], + output: ['text'], + endpoints: ['chat'], + features: ['streaming', 'tools', 'json_object', 'json_schema', 'vision'], + }, +} as const satisfies ModelMeta + +const MINISTRAL_8B_LATEST = { + name: 'ministral-8b-latest', + context_window: 131_072, + max_completion_tokens: 8_192, + pricing: { + input: { normal: 0.1 }, + output: { normal: 0.1 }, + }, + supports: { + input: ['text'], + output: ['text'], + endpoints: ['chat'], + features: ['streaming', 'tools', 'json_object', 'json_schema'], + }, +} as const satisfies ModelMeta + +const MINISTRAL_3B_LATEST = { + name: 'ministral-3b-latest', + context_window: 131_072, + max_completion_tokens: 8_192, + pricing: { + input: { normal: 0.04 }, + output: { normal: 0.04 }, + }, + supports: { + input: ['text'], + output: ['text'], + endpoints: ['chat'], + features: ['streaming', 'tools', 'json_object', 'json_schema'], + }, +} as const satisfies ModelMeta + +const CODESTRAL_LATEST = { + name: 'codestral-latest', + context_window: 256_000, + max_completion_tokens: 8_192, + pricing: { + input: { normal: 0.3 }, + output: { normal: 0.9 }, + }, + supports: { + input: ['text'], + output: ['text'], + endpoints: ['chat'], + features: ['streaming', 'tools', 'json_object', 'json_schema', 'code'], + }, +} as const satisfies ModelMeta + +const PIXTRAL_LARGE_LATEST = { + name: 'pixtral-large-latest', + context_window: 131_072, + max_completion_tokens: 8_192, + pricing: { + input: { normal: 2 }, + output: { normal: 6 }, + }, + supports: { + input: ['text', 'image'], + output: ['text'], + endpoints: ['chat'], + features: ['streaming', 'tools', 'json_object', 'json_schema', 'vision'], + }, +} as const satisfies ModelMeta + +const PIXTRAL_12B_2409 = { + name: 'pixtral-12b-2409', + context_window: 131_072, + max_completion_tokens: 8_192, + pricing: { + input: { normal: 0.15 }, + output: { normal: 0.15 }, + }, + supports: { + input: ['text', 'image'], + output: ['text'], + endpoints: ['chat'], + features: ['streaming', 'tools', 'json_object', 'vision'], + }, +} as const satisfies ModelMeta + +const MAGISTRAL_MEDIUM_LATEST = { + name: 'magistral-medium-latest', + context_window: 40_000, + max_completion_tokens: 40_000, + pricing: { + input: { normal: 2 }, + output: { normal: 5 }, + }, + supports: { + input: ['text'], + output: ['text'], + endpoints: ['chat'], + features: ['streaming', 'tools', 'reasoning', 'json_object', 'json_schema'], + }, +} as const satisfies ModelMeta + +const MAGISTRAL_SMALL_LATEST = { + name: 'magistral-small-latest', + context_window: 40_000, + max_completion_tokens: 40_000, + pricing: { + input: { normal: 0.5 }, + output: { normal: 1.5 }, + }, + supports: { + input: ['text'], + output: ['text'], + endpoints: ['chat'], + features: ['streaming', 'tools', 'reasoning', 'json_object', 'json_schema'], + }, +} as const satisfies ModelMeta + +const OPEN_MISTRAL_NEMO = { + name: 'open-mistral-nemo', + context_window: 131_072, + max_completion_tokens: 8_192, + pricing: { + input: { normal: 0.15 }, + output: { normal: 0.15 }, + }, + supports: { + input: ['text'], + output: ['text'], + endpoints: ['chat'], + features: ['streaming', 'tools', 'json_object'], + }, +} as const satisfies ModelMeta + +/** + * All supported Mistral chat model identifiers. + */ +export const MISTRAL_CHAT_MODELS = [ + MISTRAL_LARGE_LATEST.name, + MISTRAL_MEDIUM_LATEST.name, + MISTRAL_SMALL_LATEST.name, + MINISTRAL_8B_LATEST.name, + MINISTRAL_3B_LATEST.name, + CODESTRAL_LATEST.name, + PIXTRAL_LARGE_LATEST.name, + PIXTRAL_12B_2409.name, + MAGISTRAL_MEDIUM_LATEST.name, + MAGISTRAL_SMALL_LATEST.name, + OPEN_MISTRAL_NEMO.name, +] as const + +/** + * Union type of all supported Mistral chat model names. + */ +export type MistralChatModels = (typeof MISTRAL_CHAT_MODELS)[number] + +/** + * Type-only map from Mistral chat model name to its supported input modalities. + */ +export type MistralModelInputModalitiesByName = { + [MISTRAL_LARGE_LATEST.name]: typeof MISTRAL_LARGE_LATEST.supports.input + [MISTRAL_MEDIUM_LATEST.name]: typeof MISTRAL_MEDIUM_LATEST.supports.input + [MISTRAL_SMALL_LATEST.name]: typeof MISTRAL_SMALL_LATEST.supports.input + [MINISTRAL_8B_LATEST.name]: typeof MINISTRAL_8B_LATEST.supports.input + [MINISTRAL_3B_LATEST.name]: typeof MINISTRAL_3B_LATEST.supports.input + [CODESTRAL_LATEST.name]: typeof CODESTRAL_LATEST.supports.input + [PIXTRAL_LARGE_LATEST.name]: typeof PIXTRAL_LARGE_LATEST.supports.input + [PIXTRAL_12B_2409.name]: typeof PIXTRAL_12B_2409.supports.input + [MAGISTRAL_MEDIUM_LATEST.name]: typeof MAGISTRAL_MEDIUM_LATEST.supports.input + [MAGISTRAL_SMALL_LATEST.name]: typeof MAGISTRAL_SMALL_LATEST.supports.input + [OPEN_MISTRAL_NEMO.name]: typeof OPEN_MISTRAL_NEMO.supports.input +} + +/** + * Type-only map from Mistral chat model name to its provider options type. + */ +export type MistralChatModelProviderOptionsByName = { + [K in (typeof MISTRAL_CHAT_MODELS)[number]]: MistralTextProviderOptions +} + +/** + * Resolves the provider options type for a specific Mistral model. + */ +export type ResolveProviderOptions = + TModel extends keyof MistralChatModelProviderOptionsByName + ? MistralChatModelProviderOptionsByName[TModel] + : MistralTextProviderOptions + +/** + * Resolve input modalities for a specific model. + */ +export type ResolveInputModalities = + TModel extends keyof MistralModelInputModalitiesByName + ? MistralModelInputModalitiesByName[TModel] + : readonly ['text'] diff --git a/packages/typescript/ai-mistral/src/text/text-provider-options.ts b/packages/typescript/ai-mistral/src/text/text-provider-options.ts new file mode 100644 index 000000000..8f27dea53 --- /dev/null +++ b/packages/typescript/ai-mistral/src/text/text-provider-options.ts @@ -0,0 +1,114 @@ +import type { + ChatCompletionMessageParam, + ChatCompletionTool, + ChatCompletionToolChoiceOption, + ResponseFormatJsonObject, + ResponseFormatJsonSchema, + ResponseFormatText, +} from '../message-types' + +/** + * Mistral-specific provider options for text/chat models. + * + * @see https://docs.mistral.ai/api/ + */ +export interface MistralTextProviderOptions { + /** + * Sampling temperature. The default varies by model; lower values make output + * more deterministic. We recommend altering this OR `top_p`, not both. + */ + temperature?: number | null + + /** + * Nucleus sampling — consider the tokens with `top_p` probability mass. + */ + top_p?: number | null + + /** + * The maximum number of tokens to generate. + */ + max_tokens?: number | null + + /** + * Stop sequences where the API will stop generating further tokens. + */ + stop?: string | Array | null + + /** + * A seed for deterministic sampling. Repeated requests with the same seed + * and parameters should return the same result (best-effort). + */ + random_seed?: number | null + + /** + * Specifies the format the model must output. + */ + response_format?: + | ResponseFormatText + | ResponseFormatJsonSchema + | ResponseFormatJsonObject + | null + + /** + * Controls which (if any) tool is called by the model. + */ + tool_choice?: ChatCompletionToolChoiceOption | null + + /** + * Whether parallel tool calls are allowed during tool use. + */ + parallel_tool_calls?: boolean | null + + /** + * Number between -2.0 and 2.0. Positive values penalize tokens based on + * their frequency in the text so far. + */ + frequency_penalty?: number | null + + /** + * Number between -2.0 and 2.0. Positive values penalize tokens based on + * whether they appear in the text so far. + */ + presence_penalty?: number | null + + /** + * How many chat completion choices to generate for each input message. + */ + n?: number | null + + /** + * Prediction — used to speed up generation with speculative decoding. + */ + prediction?: { type: 'content'; content: string } | null + + /** + * Safe prompt — enables safety guarding injected into the system prompt. + */ + safe_prompt?: boolean | null +} + +/** + * Internal options interface used for validation within the adapter. + */ +export interface InternalTextProviderOptions + extends MistralTextProviderOptions { + messages: Array + model: string + stream?: boolean | null + tools?: Array +} + +/** + * External provider options (what users pass in). + */ +export type ExternalTextProviderOptions = MistralTextProviderOptions + +/** + * Validates text provider options. + * Basic validation stub — Mistral API handles detailed validation. + */ +export function validateTextProviderOptions( + _options: InternalTextProviderOptions, +): void { + // Mistral API handles detailed validation +} diff --git a/packages/typescript/ai-mistral/src/tools/function-tool.ts b/packages/typescript/ai-mistral/src/tools/function-tool.ts new file mode 100644 index 000000000..c7d82dd7e --- /dev/null +++ b/packages/typescript/ai-mistral/src/tools/function-tool.ts @@ -0,0 +1,43 @@ +import { makeMistralStructuredOutputCompatible } from '../utils/schema-converter' +import type { JSONSchema, Tool } from '@tanstack/ai' +import type { ChatCompletionTool } from '../message-types' + +export type FunctionTool = ChatCompletionTool + +/** + * Converts a standard Tool to Mistral ChatCompletionTool format. + * + * Tool schemas are already converted to JSON Schema in the ai layer. + * We apply Mistral-specific transformations for strict mode: + * - All properties in required array + * - Optional fields made nullable + * - additionalProperties: false + */ +export function convertFunctionToolToAdapterFormat(tool: Tool): FunctionTool { + const inputSchema = (tool.inputSchema ?? { + type: 'object', + properties: {}, + required: [], + }) as JSONSchema + + if (inputSchema.type === 'object' && !inputSchema.properties) { + inputSchema.properties = {} + } + + const jsonSchema = makeMistralStructuredOutputCompatible( + inputSchema, + inputSchema.required || [], + ) + + jsonSchema.additionalProperties = false + + return { + type: 'function', + function: { + name: tool.name, + description: tool.description, + parameters: jsonSchema, + strict: true, + }, + } satisfies FunctionTool +} diff --git a/packages/typescript/ai-mistral/src/tools/index.ts b/packages/typescript/ai-mistral/src/tools/index.ts new file mode 100644 index 000000000..c90334153 --- /dev/null +++ b/packages/typescript/ai-mistral/src/tools/index.ts @@ -0,0 +1,5 @@ +export { + convertFunctionToolToAdapterFormat, + type FunctionTool, +} from './function-tool' +export { convertToolsToProviderFormat } from './tool-converter' diff --git a/packages/typescript/ai-mistral/src/tools/tool-converter.ts b/packages/typescript/ai-mistral/src/tools/tool-converter.ts new file mode 100644 index 000000000..c852f0643 --- /dev/null +++ b/packages/typescript/ai-mistral/src/tools/tool-converter.ts @@ -0,0 +1,14 @@ +import { convertFunctionToolToAdapterFormat } from './function-tool' +import type { FunctionTool } from './function-tool' +import type { Tool } from '@tanstack/ai' + +/** + * Converts an array of standard Tools to Mistral-specific format. + */ +export function convertToolsToProviderFormat( + tools: Array, +): Array { + return tools.map((tool) => { + return convertFunctionToolToAdapterFormat(tool) + }) +} diff --git a/packages/typescript/ai-mistral/src/utils/client.ts b/packages/typescript/ai-mistral/src/utils/client.ts new file mode 100644 index 000000000..10b234207 --- /dev/null +++ b/packages/typescript/ai-mistral/src/utils/client.ts @@ -0,0 +1,53 @@ +import { Mistral } from '@mistralai/mistralai' + +export interface MistralClientConfig { + /** Mistral API key. */ + apiKey: string + + /** Optional server URL override. */ + serverURL?: string + + /** Optional request timeout (ms). */ + timeoutMs?: number +} + +/** + * Creates a Mistral SDK client instance. + */ +export function createMistralClient(config: MistralClientConfig): Mistral { + const { apiKey, serverURL, timeoutMs } = config + return new Mistral({ + apiKey, + ...(serverURL ? { serverURL } : {}), + ...(timeoutMs ? { timeoutMs } : {}), + }) +} + +/** + * Gets Mistral API key from environment variables. + * @throws Error if MISTRAL_API_KEY is not found + */ +export function getMistralApiKeyFromEnv(): string { + const env = + typeof globalThis !== 'undefined' && (globalThis as any).window?.env + ? (globalThis as any).window.env + : typeof process !== 'undefined' + ? process.env + : undefined + const key = env?.MISTRAL_API_KEY + + if (!key) { + throw new Error( + 'MISTRAL_API_KEY is required. Please set it in your environment variables or use the factory function with an explicit API key.', + ) + } + + return key +} + +/** + * Generates a unique ID with a prefix. + */ +export function generateId(prefix: string): string { + return `${prefix}-${Date.now()}-${Math.random().toString(36).substring(7)}` +} diff --git a/packages/typescript/ai-mistral/src/utils/index.ts b/packages/typescript/ai-mistral/src/utils/index.ts new file mode 100644 index 000000000..1cb28cc0a --- /dev/null +++ b/packages/typescript/ai-mistral/src/utils/index.ts @@ -0,0 +1,10 @@ +export { + createMistralClient, + getMistralApiKeyFromEnv, + generateId, + type MistralClientConfig, +} from './client' +export { + makeMistralStructuredOutputCompatible, + transformNullsToUndefined, +} from './schema-converter' diff --git a/packages/typescript/ai-mistral/src/utils/schema-converter.ts b/packages/typescript/ai-mistral/src/utils/schema-converter.ts new file mode 100644 index 000000000..e26d12c45 --- /dev/null +++ b/packages/typescript/ai-mistral/src/utils/schema-converter.ts @@ -0,0 +1,102 @@ +/** + * Recursively transform null values to undefined in an object. + * + * This is needed because Mistral's structured output may require optional + * fields to be declared nullable. When Mistral returns null for optional + * fields, we convert them back to undefined to match the original Zod schema. + */ +export function transformNullsToUndefined(obj: T): T { + if (obj === null) { + return undefined as unknown as T + } + + if (Array.isArray(obj)) { + return obj.map((item) => transformNullsToUndefined(item)) as unknown as T + } + + if (typeof obj === 'object') { + const result: Record = {} + for (const [key, value] of Object.entries(obj as Record)) { + const transformed = transformNullsToUndefined(value) + if (transformed !== undefined) { + result[key] = transformed + } + } + return result as T + } + + return obj +} + +/** + * Transform a JSON schema to be compatible with Mistral's structured output + * requirements when `strict: true` is used. + * + * Mistral (in strict mode) requires: + * - All properties must be in the `required` array + * - Optional fields should have null added to their type union + * - additionalProperties must be false for objects + */ +export function makeMistralStructuredOutputCompatible( + schema: Record, + originalRequired: Array = [], +): Record { + const result = { ...schema } + + if (result.type === 'object') { + if (!result.properties) { + result.properties = {} + } + const properties = { ...result.properties } + const allPropertyNames = Object.keys(properties) + + for (const propName of allPropertyNames) { + const prop = properties[propName] + const wasOptional = !originalRequired.includes(propName) + + if (prop.type === 'object' && prop.properties) { + properties[propName] = makeMistralStructuredOutputCompatible( + prop, + prop.required || [], + ) + } else if (prop.type === 'array' && prop.items) { + properties[propName] = { + ...prop, + items: makeMistralStructuredOutputCompatible( + prop.items, + prop.items.required || [], + ), + } + } else if (wasOptional) { + if (prop.type && !Array.isArray(prop.type)) { + properties[propName] = { + ...prop, + type: [prop.type, 'null'], + } + } else if (Array.isArray(prop.type) && !prop.type.includes('null')) { + properties[propName] = { + ...prop, + type: [...prop.type, 'null'], + } + } + } + } + + result.properties = properties + if (allPropertyNames.length > 0) { + result.required = allPropertyNames + } else { + delete result.required + } + result.additionalProperties = false + } + + if (result.type === 'array' && result.items) { + result.items = makeMistralStructuredOutputCompatible( + result.items, + result.items.required || [], + ) + } + + return result +} diff --git a/packages/typescript/ai-mistral/tests/mistral-adapter.test.ts b/packages/typescript/ai-mistral/tests/mistral-adapter.test.ts new file mode 100644 index 000000000..8befe6cf0 --- /dev/null +++ b/packages/typescript/ai-mistral/tests/mistral-adapter.test.ts @@ -0,0 +1,498 @@ +import { + describe, + it, + expect, + vi, + afterEach, + beforeEach, + type Mock, +} from 'vitest' +import { createMistralText, mistralText } from '../src/adapters/text' +import type { StreamChunk, Tool } from '@tanstack/ai' + +// Declare mocks at module level +let mockStream: Mock<(...args: Array) => unknown> +let mockComplete: Mock<(...args: Array) => unknown> + +// Mock the Mistral SDK +vi.mock('@mistralai/mistralai', () => { + return { + Mistral: class { + chat = { + stream: (...args: Array) => mockStream(...args), + complete: (...args: Array) => mockComplete(...args), + } + }, + } +}) + +// Helper to create async iterable from chunks +function createAsyncIterable(chunks: Array): AsyncIterable { + return { + [Symbol.asyncIterator]() { + let index = 0 + return { + async next() { + if (index < chunks.length) { + return { value: chunks[index++]!, done: false } + } + return { value: undefined as T, done: true } + }, + } + }, + } +} + +function setupMockStream(chunks: Array>) { + mockStream = vi + .fn() + .mockImplementation(() => + Promise.resolve( + createAsyncIterable(chunks.map((data) => ({ data }))), + ), + ) + mockComplete = vi.fn() +} + +const weatherTool: Tool = { + name: 'lookup_weather', + description: 'Return the forecast for a location', +} + +describe('Mistral adapters', () => { + afterEach(() => { + vi.unstubAllEnvs() + }) + + describe('Text adapter', () => { + it('creates a text adapter with explicit API key', () => { + const adapter = createMistralText('mistral-large-latest', 'test-api-key') + + expect(adapter).toBeDefined() + expect(adapter.kind).toBe('text') + expect(adapter.name).toBe('mistral') + expect(adapter.model).toBe('mistral-large-latest') + }) + + it('creates a text adapter from environment variable', () => { + vi.stubEnv('MISTRAL_API_KEY', 'env-api-key') + + const adapter = mistralText('ministral-8b-latest') + + expect(adapter).toBeDefined() + expect(adapter.kind).toBe('text') + expect(adapter.model).toBe('ministral-8b-latest') + }) + + it('throws if MISTRAL_API_KEY is not set when using mistralText', () => { + vi.stubEnv('MISTRAL_API_KEY', '') + + expect(() => mistralText('mistral-large-latest')).toThrow( + 'MISTRAL_API_KEY is required', + ) + }) + + it('allows custom serverURL override', () => { + const adapter = createMistralText( + 'mistral-large-latest', + 'test-api-key', + { + serverURL: 'https://custom.api.example.com', + }, + ) + + expect(adapter).toBeDefined() + }) + }) +}) + +describe('Mistral AG-UI event emission', () => { + beforeEach(() => { + vi.clearAllMocks() + }) + + afterEach(() => { + vi.unstubAllEnvs() + }) + + it('emits RUN_STARTED as the first event', async () => { + const streamChunks = [ + { + id: 'cmpl-123', + model: 'mistral-large-latest', + choices: [ + { + index: 0, + delta: { content: 'Hello' }, + finishReason: null, + }, + ], + }, + { + id: 'cmpl-123', + model: 'mistral-large-latest', + choices: [ + { + index: 0, + delta: {}, + finishReason: 'stop', + }, + ], + usage: { + promptTokens: 5, + completionTokens: 1, + totalTokens: 6, + }, + }, + ] + + setupMockStream(streamChunks) + const adapter = createMistralText('mistral-large-latest', 'test-api-key') + const chunks: Array = [] + + for await (const chunk of adapter.chatStream({ + model: 'mistral-large-latest', + messages: [{ role: 'user', content: 'Hello' }], + })) { + chunks.push(chunk) + } + + expect(chunks[0]?.type).toBe('RUN_STARTED') + if (chunks[0]?.type === 'RUN_STARTED') { + expect(chunks[0].runId).toBeDefined() + expect(chunks[0].model).toBe('mistral-large-latest') + } + }) + + it('emits TEXT_MESSAGE_START before TEXT_MESSAGE_CONTENT', async () => { + const streamChunks = [ + { + id: 'cmpl-123', + model: 'mistral-large-latest', + choices: [ + { + index: 0, + delta: { content: 'Hello' }, + finishReason: null, + }, + ], + }, + { + id: 'cmpl-123', + model: 'mistral-large-latest', + choices: [ + { + index: 0, + delta: {}, + finishReason: 'stop', + }, + ], + usage: { + promptTokens: 5, + completionTokens: 1, + totalTokens: 6, + }, + }, + ] + + setupMockStream(streamChunks) + const adapter = createMistralText('mistral-large-latest', 'test-api-key') + const chunks: Array = [] + + for await (const chunk of adapter.chatStream({ + model: 'mistral-large-latest', + messages: [{ role: 'user', content: 'Hello' }], + })) { + chunks.push(chunk) + } + + const textStartIndex = chunks.findIndex( + (c) => c.type === 'TEXT_MESSAGE_START', + ) + const textContentIndex = chunks.findIndex( + (c) => c.type === 'TEXT_MESSAGE_CONTENT', + ) + + expect(textStartIndex).toBeGreaterThan(-1) + expect(textContentIndex).toBeGreaterThan(-1) + expect(textStartIndex).toBeLessThan(textContentIndex) + }) + + it('emits TEXT_MESSAGE_END and RUN_FINISHED at the end', async () => { + const streamChunks = [ + { + id: 'cmpl-123', + model: 'mistral-large-latest', + choices: [ + { + index: 0, + delta: { content: 'Hello' }, + finishReason: null, + }, + ], + }, + { + id: 'cmpl-123', + model: 'mistral-large-latest', + choices: [ + { + index: 0, + delta: {}, + finishReason: 'stop', + }, + ], + usage: { + promptTokens: 5, + completionTokens: 1, + totalTokens: 6, + }, + }, + ] + + setupMockStream(streamChunks) + const adapter = createMistralText('mistral-large-latest', 'test-api-key') + const chunks: Array = [] + + for await (const chunk of adapter.chatStream({ + model: 'mistral-large-latest', + messages: [{ role: 'user', content: 'Hello' }], + })) { + chunks.push(chunk) + } + + const textEndChunk = chunks.find((c) => c.type === 'TEXT_MESSAGE_END') + expect(textEndChunk).toBeDefined() + + const runFinishedChunk = chunks.find((c) => c.type === 'RUN_FINISHED') + expect(runFinishedChunk).toBeDefined() + if (runFinishedChunk?.type === 'RUN_FINISHED') { + expect(runFinishedChunk.finishReason).toBe('stop') + expect(runFinishedChunk.usage).toMatchObject({ + promptTokens: 5, + completionTokens: 1, + totalTokens: 6, + }) + } + }) + + it('emits AG-UI tool call events', async () => { + const streamChunks = [ + { + id: 'cmpl-456', + model: 'mistral-large-latest', + choices: [ + { + index: 0, + delta: { + toolCalls: [ + { + index: 0, + id: 'call_abc123', + type: 'function', + function: { + name: 'lookup_weather', + arguments: '{"location":', + }, + }, + ], + }, + finishReason: null, + }, + ], + }, + { + id: 'cmpl-456', + model: 'mistral-large-latest', + choices: [ + { + index: 0, + delta: { + toolCalls: [ + { + index: 0, + function: { + arguments: '"Berlin"}', + }, + }, + ], + }, + finishReason: null, + }, + ], + }, + { + id: 'cmpl-456', + model: 'mistral-large-latest', + choices: [ + { + index: 0, + delta: {}, + finishReason: 'tool_calls', + }, + ], + usage: { + promptTokens: 10, + completionTokens: 5, + totalTokens: 15, + }, + }, + ] + + setupMockStream(streamChunks) + const adapter = createMistralText('mistral-large-latest', 'test-api-key') + const chunks: Array = [] + + for await (const chunk of adapter.chatStream({ + model: 'mistral-large-latest', + messages: [{ role: 'user', content: 'Weather in Berlin?' }], + tools: [weatherTool], + })) { + chunks.push(chunk) + } + + const toolStartChunk = chunks.find((c) => c.type === 'TOOL_CALL_START') + expect(toolStartChunk).toBeDefined() + if (toolStartChunk?.type === 'TOOL_CALL_START') { + expect(toolStartChunk.toolCallId).toBe('call_abc123') + expect(toolStartChunk.toolName).toBe('lookup_weather') + } + + const toolArgsChunks = chunks.filter((c) => c.type === 'TOOL_CALL_ARGS') + expect(toolArgsChunks.length).toBeGreaterThan(0) + + const toolEndChunk = chunks.find((c) => c.type === 'TOOL_CALL_END') + expect(toolEndChunk).toBeDefined() + if (toolEndChunk?.type === 'TOOL_CALL_END') { + expect(toolEndChunk.toolCallId).toBe('call_abc123') + expect(toolEndChunk.toolName).toBe('lookup_weather') + expect(toolEndChunk.input).toEqual({ location: 'Berlin' }) + } + + const runFinishedChunk = chunks.find((c) => c.type === 'RUN_FINISHED') + if (runFinishedChunk?.type === 'RUN_FINISHED') { + expect(runFinishedChunk.finishReason).toBe('tool_calls') + } + }) + + it('emits RUN_ERROR on stream error', async () => { + const streamChunks = [ + { + data: { + id: 'cmpl-123', + model: 'mistral-large-latest', + choices: [ + { + index: 0, + delta: { content: 'Hello' }, + finishReason: null, + }, + ], + }, + }, + ] + + const errorIterable = { + [Symbol.asyncIterator]() { + let index = 0 + return { + async next() { + if (index < streamChunks.length) { + return { value: streamChunks[index++]!, done: false } + } + throw new Error('Stream interrupted') + }, + } + }, + } + + mockStream = vi.fn().mockResolvedValue(errorIterable) + mockComplete = vi.fn() + + const adapter = createMistralText('mistral-large-latest', 'test-api-key') + const chunks: Array = [] + + for await (const chunk of adapter.chatStream({ + model: 'mistral-large-latest', + messages: [{ role: 'user', content: 'Hello' }], + })) { + chunks.push(chunk) + } + + const runErrorChunk = chunks.find((c) => c.type === 'RUN_ERROR') + expect(runErrorChunk).toBeDefined() + if (runErrorChunk?.type === 'RUN_ERROR') { + expect(runErrorChunk.error.message).toBe('Stream interrupted') + } + }) + + it('streams content with correct accumulated values', async () => { + const streamChunks = [ + { + id: 'cmpl-stream', + model: 'mistral-large-latest', + choices: [ + { + index: 0, + delta: { content: 'Hello ' }, + finishReason: null, + }, + ], + }, + { + id: 'cmpl-stream', + model: 'mistral-large-latest', + choices: [ + { + index: 0, + delta: { content: 'world' }, + finishReason: null, + }, + ], + }, + { + id: 'cmpl-stream', + model: 'mistral-large-latest', + choices: [ + { + index: 0, + delta: {}, + finishReason: 'stop', + }, + ], + usage: { + promptTokens: 5, + completionTokens: 2, + totalTokens: 7, + }, + }, + ] + + setupMockStream(streamChunks) + const adapter = createMistralText('mistral-large-latest', 'test-api-key') + const chunks: Array = [] + + for await (const chunk of adapter.chatStream({ + model: 'mistral-large-latest', + messages: [{ role: 'user', content: 'Say hello' }], + })) { + chunks.push(chunk) + } + + const contentChunks = chunks.filter( + (c) => c.type === 'TEXT_MESSAGE_CONTENT', + ) + expect(contentChunks.length).toBe(2) + + const firstContent = contentChunks[0] + if (firstContent?.type === 'TEXT_MESSAGE_CONTENT') { + expect(firstContent.delta).toBe('Hello ') + expect(firstContent.content).toBe('Hello ') + } + + const secondContent = contentChunks[1] + if (secondContent?.type === 'TEXT_MESSAGE_CONTENT') { + expect(secondContent.delta).toBe('world') + expect(secondContent.content).toBe('Hello world') + } + }) +}) diff --git a/packages/typescript/ai-mistral/tsconfig.json b/packages/typescript/ai-mistral/tsconfig.json new file mode 100644 index 000000000..ea11c1096 --- /dev/null +++ b/packages/typescript/ai-mistral/tsconfig.json @@ -0,0 +1,9 @@ +{ + "extends": "../../../tsconfig.json", + "compilerOptions": { + "outDir": "dist", + "rootDir": "src" + }, + "include": ["src/**/*.ts", "src/**/*.tsx"], + "exclude": ["node_modules", "dist", "**/*.config.ts"] +} diff --git a/packages/typescript/ai-mistral/vite.config.ts b/packages/typescript/ai-mistral/vite.config.ts new file mode 100644 index 000000000..77bcc2e60 --- /dev/null +++ b/packages/typescript/ai-mistral/vite.config.ts @@ -0,0 +1,36 @@ +import { defineConfig, mergeConfig } from 'vitest/config' +import { tanstackViteConfig } from '@tanstack/vite-config' +import packageJson from './package.json' + +const config = defineConfig({ + test: { + name: packageJson.name, + dir: './', + watch: false, + globals: true, + environment: 'node', + include: ['tests/**/*.test.ts'], + coverage: { + provider: 'v8', + reporter: ['text', 'json', 'html', 'lcov'], + exclude: [ + 'node_modules/', + 'dist/', + 'tests/', + '**/*.test.ts', + '**/*.config.ts', + '**/types.ts', + ], + include: ['src/**/*.ts'], + }, + }, +}) + +export default mergeConfig( + config, + tanstackViteConfig({ + entry: ['./src/index.ts'], + srcDir: './src', + cjs: false, + }), +) diff --git a/packages/typescript/ai-mistral/vitest.config.ts b/packages/typescript/ai-mistral/vitest.config.ts new file mode 100644 index 000000000..fa2531743 --- /dev/null +++ b/packages/typescript/ai-mistral/vitest.config.ts @@ -0,0 +1,22 @@ +import { defineConfig } from 'vitest/config' + +export default defineConfig({ + test: { + globals: true, + environment: 'node', + include: ['tests/**/*.test.ts'], + coverage: { + provider: 'v8', + reporter: ['text', 'json', 'html', 'lcov'], + exclude: [ + 'node_modules/', + 'dist/', + 'tests/', + '**/*.test.ts', + '**/*.config.ts', + '**/types.ts', + ], + include: ['src/**/*.ts'], + }, + }, +}) diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index f3b0e616a..00c581564 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -1139,6 +1139,25 @@ importers: specifier: 4.0.14 version: 4.0.14(vitest@4.1.4) + packages/typescript/ai-mistral: + dependencies: + '@mistralai/mistralai': + specifier: ^2.2.0 + version: 2.2.0 + '@tanstack/ai': + specifier: workspace:^ + version: link:../ai + zod: + specifier: ^4.0.0 + version: 4.3.6 + devDependencies: + '@vitest/coverage-v8': + specifier: 4.0.14 + version: 4.0.14(vitest@4.1.4) + vite: + specifier: ^7.2.7 + version: 7.3.2(@types/node@25.0.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) + packages/typescript/ai-ollama: dependencies: ollama: @@ -3209,6 +3228,9 @@ packages: '@microsoft/tsdoc@0.15.1': resolution: {integrity: sha512-4aErSrCR/On/e5G2hDP0wjooqDdauzEbIq8hIkIe5pXV0rtWJZvdCEKL0ykZxex+IxIwBp0eGeV48hQN07dXtw==} + '@mistralai/mistralai@2.2.0': + resolution: {integrity: sha512-JQUGIXjFWnw/J9LpTSf/ZXwVW3Sh8FBAcfTo5QvAHqkl4CfSiIwnjRJhMoAFcP6ncCe84YPU1ncDGX+p3OXnfg==} + '@msgpack/msgpack@3.1.3': resolution: {integrity: sha512-47XIizs9XZXvuJgoaJUIE2lFoID8ugvc0jzSHP+Ptfk8nTbnR8g788wv48N03Kx0UkAv559HWRQ3yzOgzlRNUA==} engines: {node: '>= 18'} @@ -10861,6 +10883,11 @@ packages: resolution: {integrity: sha512-zK7YHHz4ZXpW89AHXUPbQVGKI7uvkd3hzusTdotCg1UxyaVtg0zFJSTfW/Dq5f7OBBVnq6cZIaC8Ti4hb6dtCA==} engines: {node: '>= 14'} + zod-to-json-schema@3.25.2: + resolution: {integrity: sha512-O/PgfnpT1xKSDeQYSCfRI5Gy3hPf91mKVDuYLUHZJMiDFptvP41MSnWofm8dnCm0256ZNfZIM7DSzuSMAFnjHA==} + peerDependencies: + zod: ^3.25.28 || ^4 + zod@3.25.76: resolution: {integrity: sha512-gzUt/qt81nXsFGKIFcC3YnfEAx5NkunCfnDlvuBSSFS02bcXu4Lmea0AFIUwbLWxWPx3d9p8S5QoaujKcNQxcQ==} @@ -12147,6 +12174,15 @@ snapshots: '@microsoft/tsdoc@0.15.1': {} + '@mistralai/mistralai@2.2.0': + dependencies: + ws: 8.19.0 + zod: 4.3.6 + zod-to-json-schema: 3.25.2(zod@4.3.6) + transitivePeerDependencies: + - bufferutil + - utf-8-validate + '@msgpack/msgpack@3.1.3': {} '@napi-rs/wasm-runtime@0.2.12': @@ -12714,25 +12750,25 @@ snapshots: '@rolldown/pluginutils@1.0.0-beta.53': {} - '@rollup/plugin-alias@5.1.1(rollup@4.57.1)': + '@rollup/plugin-alias@5.1.1(rollup@4.60.1)': optionalDependencies: - rollup: 4.57.1 + rollup: 4.60.1 '@rollup/plugin-alias@6.0.0(rollup@4.57.1)': optionalDependencies: rollup: 4.57.1 - '@rollup/plugin-commonjs@28.0.9(rollup@4.57.1)': + '@rollup/plugin-commonjs@28.0.9(rollup@4.60.1)': dependencies: - '@rollup/pluginutils': 5.3.0(rollup@4.57.1) + '@rollup/pluginutils': 5.3.0(rollup@4.60.1) commondir: 1.0.1 estree-walker: 2.0.2 - fdir: 6.5.0(picomatch@4.0.3) + fdir: 6.5.0(picomatch@4.0.4) is-reference: 1.2.1 magic-string: 0.30.21 - picomatch: 4.0.3 + picomatch: 4.0.4 optionalDependencies: - rollup: 4.57.1 + rollup: 4.60.1 '@rollup/plugin-commonjs@29.0.0(rollup@4.57.1)': dependencies: @@ -12754,12 +12790,26 @@ snapshots: optionalDependencies: rollup: 4.57.1 + '@rollup/plugin-inject@5.0.5(rollup@4.60.1)': + dependencies: + '@rollup/pluginutils': 5.3.0(rollup@4.60.1) + estree-walker: 2.0.2 + magic-string: 0.30.21 + optionalDependencies: + rollup: 4.60.1 + '@rollup/plugin-json@6.1.0(rollup@4.57.1)': dependencies: '@rollup/pluginutils': 5.3.0(rollup@4.57.1) optionalDependencies: rollup: 4.57.1 + '@rollup/plugin-json@6.1.0(rollup@4.60.1)': + dependencies: + '@rollup/pluginutils': 5.3.0(rollup@4.60.1) + optionalDependencies: + rollup: 4.60.1 + '@rollup/plugin-node-resolve@16.0.3(rollup@4.57.1)': dependencies: '@rollup/pluginutils': 5.3.0(rollup@4.57.1) @@ -12770,6 +12820,16 @@ snapshots: optionalDependencies: rollup: 4.57.1 + '@rollup/plugin-node-resolve@16.0.3(rollup@4.60.1)': + dependencies: + '@rollup/pluginutils': 5.3.0(rollup@4.60.1) + '@types/resolve': 1.20.2 + deepmerge: 4.3.1 + is-module: 1.0.0 + resolve: 1.22.11 + optionalDependencies: + rollup: 4.60.1 + '@rollup/plugin-replace@6.0.3(rollup@4.57.1)': dependencies: '@rollup/pluginutils': 5.3.0(rollup@4.57.1) @@ -12777,6 +12837,13 @@ snapshots: optionalDependencies: rollup: 4.57.1 + '@rollup/plugin-replace@6.0.3(rollup@4.60.1)': + dependencies: + '@rollup/pluginutils': 5.3.0(rollup@4.60.1) + magic-string: 0.30.21 + optionalDependencies: + rollup: 4.60.1 + '@rollup/plugin-terser@0.4.4(rollup@4.57.1)': dependencies: serialize-javascript: 6.0.2 @@ -12785,6 +12852,14 @@ snapshots: optionalDependencies: rollup: 4.57.1 + '@rollup/plugin-terser@0.4.4(rollup@4.60.1)': + dependencies: + serialize-javascript: 6.0.2 + smob: 1.5.0 + terser: 5.44.1 + optionalDependencies: + rollup: 4.60.1 + '@rollup/pluginutils@5.3.0(rollup@4.57.1)': dependencies: '@types/estree': 1.0.8 @@ -14128,7 +14203,7 @@ snapshots: ansis: 4.2.0 diff: 8.0.2 pathe: 2.0.3 - tinyglobby: 0.2.15 + tinyglobby: 0.2.16 transitivePeerDependencies: - supports-color @@ -15052,7 +15127,7 @@ snapshots: debug: 4.4.3 minimatch: 9.0.5 semver: 7.7.4 - tinyglobby: 0.2.15 + tinyglobby: 0.2.16 ts-api-utils: 2.1.0(typescript@5.9.3) typescript: 5.9.3 transitivePeerDependencies: @@ -15146,10 +15221,10 @@ snapshots: '@unrs/resolver-binding-win32-x64-msvc@1.11.1': optional: true - '@vercel/nft@0.30.4(rollup@4.57.1)': + '@vercel/nft@0.30.4(rollup@4.60.1)': dependencies: '@mapbox/node-pre-gyp': 2.0.3 - '@rollup/pluginutils': 5.3.0(rollup@4.57.1) + '@rollup/pluginutils': 5.3.0(rollup@4.60.1) acorn: 8.15.0 acorn-import-attributes: 1.9.5(acorn@8.15.0) async-sema: 3.1.1 @@ -15158,7 +15233,7 @@ snapshots: glob: 10.5.0 graceful-fs: 4.2.11 node-gyp-build: 4.8.4 - picomatch: 4.0.3 + picomatch: 4.0.4 resolve-from: 5.0.0 transitivePeerDependencies: - encoding @@ -18715,14 +18790,14 @@ snapshots: nitropack@2.12.9(rolldown@1.0.0-beta.53): dependencies: '@cloudflare/kv-asset-handler': 0.4.1 - '@rollup/plugin-alias': 5.1.1(rollup@4.57.1) - '@rollup/plugin-commonjs': 28.0.9(rollup@4.57.1) - '@rollup/plugin-inject': 5.0.5(rollup@4.57.1) - '@rollup/plugin-json': 6.1.0(rollup@4.57.1) - '@rollup/plugin-node-resolve': 16.0.3(rollup@4.57.1) - '@rollup/plugin-replace': 6.0.3(rollup@4.57.1) - '@rollup/plugin-terser': 0.4.4(rollup@4.57.1) - '@vercel/nft': 0.30.4(rollup@4.57.1) + '@rollup/plugin-alias': 5.1.1(rollup@4.60.1) + '@rollup/plugin-commonjs': 28.0.9(rollup@4.60.1) + '@rollup/plugin-inject': 5.0.5(rollup@4.60.1) + '@rollup/plugin-json': 6.1.0(rollup@4.60.1) + '@rollup/plugin-node-resolve': 16.0.3(rollup@4.60.1) + '@rollup/plugin-replace': 6.0.3(rollup@4.60.1) + '@rollup/plugin-terser': 0.4.4(rollup@4.60.1) + '@vercel/nft': 0.30.4(rollup@4.60.1) archiver: 7.0.1 c12: 3.3.2(magicast@0.5.2) chokidar: 4.0.3 @@ -18764,8 +18839,8 @@ snapshots: pkg-types: 2.3.0 pretty-bytes: 7.1.0 radix3: 1.1.2 - rollup: 4.57.1 - rollup-plugin-visualizer: 6.0.5(rolldown@1.0.0-beta.53)(rollup@4.57.1) + rollup: 4.60.1 + rollup-plugin-visualizer: 6.0.5(rolldown@1.0.0-beta.53)(rollup@4.60.1) scule: 1.3.0 semver: 7.7.4 serve-placeholder: 2.0.2 @@ -19822,6 +19897,16 @@ snapshots: rolldown: 1.0.0-beta.53 rollup: 4.57.1 + rollup-plugin-visualizer@6.0.5(rolldown@1.0.0-beta.53)(rollup@4.60.1): + dependencies: + open: 8.4.2 + picomatch: 4.0.3 + source-map: 0.7.6 + yargs: 17.7.2 + optionalDependencies: + rolldown: 1.0.0-beta.53 + rollup: 4.60.1 + rollup@4.53.3: dependencies: '@types/estree': 1.0.8 @@ -20562,7 +20647,7 @@ snapshots: ts-declaration-location@1.0.7(typescript@5.9.3): dependencies: - picomatch: 4.0.3 + picomatch: 4.0.4 typescript: 5.9.3 ts-interface-checker@0.1.13: {} @@ -20784,11 +20869,11 @@ snapshots: magic-string: 0.30.21 mlly: 1.8.0 pathe: 2.0.3 - picomatch: 4.0.3 + picomatch: 4.0.4 pkg-types: 2.3.0 scule: 1.3.0 strip-literal: 3.1.0 - tinyglobby: 0.2.15 + tinyglobby: 0.2.16 unplugin: 2.3.11 unplugin-utils: 0.3.1 @@ -21185,11 +21270,11 @@ snapshots: vite@6.4.1(@types/node@24.10.3)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2): dependencies: esbuild: 0.25.12 - fdir: 6.5.0(picomatch@4.0.3) - picomatch: 4.0.3 - postcss: 8.5.6 - rollup: 4.57.1 - tinyglobby: 0.2.15 + fdir: 6.5.0(picomatch@4.0.4) + picomatch: 4.0.4 + postcss: 8.5.9 + rollup: 4.60.1 + tinyglobby: 0.2.16 optionalDependencies: '@types/node': 24.10.3 fsevents: 2.3.3 @@ -21617,6 +21702,10 @@ snapshots: compress-commons: 6.0.2 readable-stream: 4.7.0 + zod-to-json-schema@3.25.2(zod@4.3.6): + dependencies: + zod: 4.3.6 + zod@3.25.76: {} zod@4.2.1: {} From 56da4e6f2e809a50a340b846d356a2483991ede5 Mon Sep 17 00:00:00 2001 From: Tim Raderschad Date: Sun, 19 Apr 2026 08:08:04 +0200 Subject: [PATCH 2/9] feat: implement Mistral text adapter with enhanced streaming and error handling --- packages/typescript/ai-mistral/package.json | 4 + .../ai-mistral/src/adapters/text.ts | 376 ++++++++++++++---- .../ai-mistral/src/message-types.ts | 10 +- .../typescript/ai-mistral/src/model-meta.ts | 24 +- .../ai-mistral/src/tools/function-tool.ts | 11 +- .../typescript/ai-mistral/src/utils/client.ts | 51 ++- .../ai-mistral/src/utils/schema-converter.ts | 36 +- .../ai-mistral/tests/mistral-adapter.test.ts | 144 ++++--- pnpm-lock.yaml | 3 + testing/e2e/package.json | 1 + testing/e2e/src/lib/feature-support.ts | 12 + testing/e2e/src/lib/providers.ts | 9 + testing/e2e/src/lib/types.ts | 2 + testing/e2e/tests/test-matrix.ts | 124 +----- 14 files changed, 520 insertions(+), 287 deletions(-) diff --git a/packages/typescript/ai-mistral/package.json b/packages/typescript/ai-mistral/package.json index 53ea79415..286bcf4fd 100644 --- a/packages/typescript/ai-mistral/package.json +++ b/packages/typescript/ai-mistral/package.json @@ -16,6 +16,10 @@ ".": { "types": "./dist/esm/index.d.ts", "import": "./dist/esm/index.js" + }, + "./adapters/text": { + "types": "./dist/esm/adapters/text.d.ts", + "import": "./dist/esm/adapters/text.js" } }, "files": [ diff --git a/packages/typescript/ai-mistral/src/adapters/text.ts b/packages/typescript/ai-mistral/src/adapters/text.ts index bfc2f1f12..be8165795 100644 --- a/packages/typescript/ai-mistral/src/adapters/text.ts +++ b/packages/typescript/ai-mistral/src/adapters/text.ts @@ -33,10 +33,121 @@ import type { } from '../message-types' import type { MistralClientConfig } from '../utils' +function messagesToSnakeCase( + messages: Array, +): Array { + return messages.map((msg) => { + if (msg.role === 'tool') { + return { + role: 'tool', + tool_call_id: msg.toolCallId, + content: msg.content, + ...(msg.name !== undefined ? { name: msg.name } : {}), + } + } + if (msg.role === 'assistant') { + const base: Record = { + role: 'assistant', + content: msg.content ?? null, + } + if (msg.toolCalls && msg.toolCalls.length > 0) { + base.tool_calls = msg.toolCalls.map((tc) => ({ + id: tc.id, + type: tc.type ?? 'function', + function: tc.function, + })) + } + if (msg.prefix !== undefined) base.prefix = msg.prefix + return base + } + if (msg.role === 'user' && Array.isArray(msg.content)) { + return { + role: 'user', + content: msg.content.map((part) => { + if (part.type === 'image_url') { + return { type: 'image_url', image_url: part.imageUrl } + } + if (part.type === 'document_url') { + return { type: 'document_url', document_url: part.documentUrl } + } + return part + }), + } + } + return msg + }) +} + +function rawChunkToCamelCase(raw: Record): MistralStreamChunk { + const rawChoices = (raw.choices as Array>) ?? [] + return { + id: raw.id as string | undefined, + model: raw.model as string | undefined, + choices: rawChoices.map((choice) => { + const delta = (choice.delta as Record) ?? {} + const rawToolCalls = delta.tool_calls as + | Array> + | undefined + return { + index: choice.index as number | undefined, + delta: { + role: delta.role as string | null | undefined, + content: delta.content as + | string + | Array<{ type: string; text?: string }> + | null + | undefined, + toolCalls: rawToolCalls?.map((tc) => ({ + id: tc.id as string | undefined, + type: tc.type as string | undefined, + index: tc.index as number | undefined, + function: tc.function as { + name?: string + arguments?: string | Record + }, + })), + }, + finishReason: (choice.finish_reason as string | null | undefined) ?? null, + } + }), + usage: raw.usage + ? (() => { + const u = raw.usage as Record + return { + promptTokens: (u.prompt_tokens as number | undefined) ?? 0, + completionTokens: (u.completion_tokens as number | undefined) ?? 0, + totalTokens: (u.total_tokens as number | undefined) ?? 0, + } + })() + : undefined, + } +} + +interface RawStreamParams { + model: string + messages: Array + temperature?: number | null + maxTokens?: number | null + topP?: number | null + tools?: unknown + stop?: unknown + randomSeed?: number | null + responseFormat?: unknown + toolChoice?: unknown + parallelToolCalls?: boolean | null + frequencyPenalty?: number | null + presencePenalty?: number | null + n?: number | null + prediction?: unknown + safePrompt?: boolean | null + stream?: true + [key: string]: unknown +} + /** * Configuration for Mistral text adapter. */ -export interface MistralTextConfig extends MistralClientConfig {} +export type MistralTextConfig = MistralClientConfig /** * Alias for TextProviderOptions for external use. @@ -90,14 +201,19 @@ export class MistralTextAdapter< ResolveInputModalities, MistralMessageMetadataByModality > { - readonly kind = 'text' as const readonly name = 'mistral' as const private client: Mistral + private rawConfig: MistralClientConfig constructor(config: MistralTextConfig, model: TModel) { - super({}, model) + super(config, model) + // Retained for structuredOutput (see structuredOutput method); not used on + // streaming paths, which go through fetchRawMistralStream instead. E2E tests + // route Mistral through llmock via providers.ts (serverURL: base), so the + // custom SSE path remains covered. this.client = createMistralClient(config) + this.rawConfig = config } async *chatStream( @@ -111,13 +227,11 @@ export class MistralTextAdapter< messageId: generateId(this.name), timestamp, hasEmittedRunStarted: false, + hasEmittedRunError: false, } try { - const stream = (await this.client.chat.stream( - requestParams as any, - )) as unknown as AsyncIterable - + const stream = this.fetchRawMistralStream(requestParams, this.rawConfig) yield* this.processMistralStreamChunks(stream, options, aguiState) } catch (error: unknown) { const err = error as Error & { code?: string } @@ -132,21 +246,20 @@ export class MistralTextAdapter< } } - yield { - type: 'RUN_ERROR', - runId: aguiState.runId, - model: options.model, - timestamp, - error: { - message: err.message || 'Unknown error', - code: err.code, - }, + if (!aguiState.hasEmittedRunError) { + yield { + type: 'RUN_ERROR', + runId: aguiState.runId, + model: options.model, + timestamp, + error: { + message: err.message || 'Unknown error', + code: err.code, + }, + } } - console.error('>>> chatStream: Fatal error during response creation <<<') - console.error('>>> Error message:', err.message) - console.error('>>> Error stack:', err.stack) - console.error('>>> Full error:', err) + throw err } } @@ -164,46 +277,39 @@ export class MistralTextAdapter< outputSchema.required || [], ) - try { - const { stream: _stream, ...nonStreamParams } = requestParams - const response = (await this.client.chat.complete({ - ...nonStreamParams, - responseFormat: { - type: 'json_schema', - jsonSchema: { - name: 'structured_output', - schemaDefinition: jsonSchema, - strict: true, - }, + const { stream: _stream, ...nonStreamParams } = requestParams + const response = (await this.client.chat.complete({ + ...nonStreamParams, + responseFormat: { + type: 'json_schema', + jsonSchema: { + name: 'structured_output', + schemaDefinition: jsonSchema, + strict: true, }, - } as any)) as { - choices?: Array<{ message?: { content?: string | null } }> - } + }, + } as Parameters[0])) as { + choices?: Array<{ message?: { content?: string | null } }> + } - const rawText = response.choices?.[0]?.message?.content || '' - const textContent = - typeof rawText === 'string' ? rawText : String(rawText) - - let parsed: unknown - try { - parsed = JSON.parse(textContent) - } catch { - throw new Error( - `Failed to parse structured output as JSON. Content: ${textContent.slice(0, 200)}${textContent.length > 200 ? '...' : ''}`, - ) - } + const rawText = response.choices?.[0]?.message?.content || '' + const textContent = + typeof rawText === 'string' ? rawText : String(rawText) - const transformed = transformNullsToUndefined(parsed) + let parsed: unknown + try { + parsed = JSON.parse(textContent) + } catch { + throw new Error( + `Failed to parse structured output as JSON. Content: ${textContent.slice(0, 200)}${textContent.length > 200 ? '...' : ''}`, + ) + } - return { - data: transformed, - rawText: textContent, - } - } catch (error: unknown) { - const err = error as Error - console.error('>>> structuredOutput: Error during response creation <<<') - console.error('>>> Error message:', err.message) - throw error + const transformed = transformNullsToUndefined(parsed) + + return { + data: transformed, + rawText: textContent, } } @@ -218,11 +324,13 @@ export class MistralTextAdapter< messageId: string timestamp: number hasEmittedRunStarted: boolean + hasEmittedRunError: boolean }, ): AsyncIterable { let accumulatedContent = '' const timestamp = aguiState.timestamp let hasEmittedTextMessageStart = false + let hasEmittedToolCall = false const toolCallsInProgress = new Map< number, @@ -231,6 +339,7 @@ export class MistralTextAdapter< name: string arguments: string started: boolean + ended: boolean } >() @@ -290,6 +399,7 @@ export class MistralTextAdapter< name: toolCallDelta.function.name || '', arguments: '', started: false, + ended: false, }) } @@ -301,11 +411,14 @@ export class MistralTextAdapter< if (toolCallDelta.function.name) { toolCall.name = toolCallDelta.function.name } - if (toolCallDelta.function.arguments !== undefined) { - const argsDelta = - typeof toolCallDelta.function.arguments === 'string' + const argsDelta = + toolCallDelta.function.arguments !== undefined + ? typeof toolCallDelta.function.arguments === 'string' ? toolCallDelta.function.arguments : JSON.stringify(toolCallDelta.function.arguments) + : undefined + + if (argsDelta !== undefined) { toolCall.arguments += argsDelta } @@ -321,11 +434,7 @@ export class MistralTextAdapter< } } - if (toolCallDelta.function.arguments !== undefined && toolCall.started) { - const argsDelta = - typeof toolCallDelta.function.arguments === 'string' - ? toolCallDelta.function.arguments - : JSON.stringify(toolCallDelta.function.arguments) + if (argsDelta !== undefined && toolCall.started) { yield { type: 'TOOL_CALL_ARGS', toolCallId: toolCall.id, @@ -343,7 +452,7 @@ export class MistralTextAdapter< toolCallsInProgress.size > 0 ) { for (const [, toolCall] of toolCallsInProgress) { - if (!toolCall.started || !toolCall.id || !toolCall.name) { + if (!toolCall.started || !toolCall.id || !toolCall.name || toolCall.ended) { continue } @@ -356,6 +465,8 @@ export class MistralTextAdapter< parsedInput = {} } + toolCall.ended = true + hasEmittedToolCall = true yield { type: 'TOOL_CALL_END', toolCallId: toolCall.id, @@ -368,8 +479,7 @@ export class MistralTextAdapter< } const computedFinishReason = - choice.finishReason === 'tool_calls' || - toolCallsInProgress.size > 0 + choice.finishReason === 'tool_calls' || hasEmittedToolCall ? 'tool_calls' : choice.finishReason === 'length' ? 'length' @@ -404,8 +514,8 @@ export class MistralTextAdapter< } } catch (error: unknown) { const err = error as Error & { code?: string } - console.log('[Mistral Adapter] Stream ended with error:', err.message) + aguiState.hasEmittedRunError = true yield { type: 'RUN_ERROR', runId: aguiState.runId, @@ -416,6 +526,107 @@ export class MistralTextAdapter< code: err.code, }, } + throw err + } + } + + /** + * Makes a raw fetch request to the Mistral chat completions endpoint and + * parses the SSE stream manually, bypassing the SDK's Zod validation which + * rejects streaming tool call chunks that omit `name` in argument deltas. + */ + private async *fetchRawMistralStream( + params: RawStreamParams, + config: MistralClientConfig, + ): AsyncGenerator { + const serverURL = (config.serverURL ?? 'https://api.mistral.ai') + .replace(/\/+$/, '') + .replace(/\/v1$/, '') + const url = `${serverURL}/v1/chat/completions` + + const { + stream: _stream, + messages, + maxTokens, + topP, + randomSeed, + responseFormat, + toolChoice, + parallelToolCalls, + frequencyPenalty, + presencePenalty, + safePrompt, + ...rest + } = params + + const body: Record = { + ...rest, + messages: messagesToSnakeCase(messages), + stream: true, + ...(maxTokens != null && { max_tokens: maxTokens }), + ...(topP != null && { top_p: topP }), + ...(randomSeed != null && { random_seed: randomSeed }), + ...(responseFormat != null && { response_format: responseFormat }), + ...(toolChoice != null && { tool_choice: toolChoice }), + ...(parallelToolCalls != null && { + parallel_tool_calls: parallelToolCalls, + }), + ...(frequencyPenalty != null && { + frequency_penalty: frequencyPenalty, + }), + ...(presencePenalty != null && { + presence_penalty: presencePenalty, + }), + ...(safePrompt != null && { safe_prompt: safePrompt }), + } + + const headers: Record = { + 'Content-Type': 'application/json', + Authorization: `Bearer ${config.apiKey}`, + ...config.defaultHeaders, + } + + const response = await fetch(url, { + method: 'POST', + headers, + body: JSON.stringify(body), + }) + + if (!response.ok || !response.body) { + const errorText = await response.text() + throw new Error(`Mistral API error ${response.status}: ${errorText}`) + } + + const reader = response.body.getReader() + const decoder = new TextDecoder() + let buffer = '' + + try { + while (true) { + const { done, value } = await reader.read() + if (done) break + + buffer += decoder.decode(value, { stream: true }) + const lines = buffer.split('\n') + buffer = lines.pop()! + + for (const line of lines) { + const trimmed = line.trim() + if (!trimmed.startsWith('data:')) continue + const data = trimmed.slice(5).trimStart() + if (data === '[DONE]') return + + try { + const raw = JSON.parse(data) as Record + yield { data: rawChunkToCamelCase(raw) } + } catch { + // skip malformed chunks + } + } + } + } finally { + await reader.cancel().catch(() => {}) + reader.releaseLock() } } @@ -437,15 +648,7 @@ export class MistralTextAdapter< /** * Maps common TextOptions to Mistral Chat Completions request parameters. */ - private mapTextOptionsToMistral(options: TextOptions): { - model: string - messages: Array - temperature?: number | null - maxTokens?: number | null - topP?: number | null - tools?: Array - stream: true - } { + private mapTextOptionsToMistral(options: TextOptions) { const modelOptions = options.modelOptions as | Omit< InternalTextProviderOptions, @@ -484,7 +687,19 @@ export class MistralTextAdapter< maxTokens: options.maxTokens, topP: options.topP, tools, - stream: true, + stream: true as const, + ...(modelOptions && { + ...(modelOptions.stop !== undefined && { stop: modelOptions.stop }), + ...(modelOptions.random_seed !== undefined && { randomSeed: modelOptions.random_seed }), + ...(modelOptions.response_format !== undefined && { responseFormat: modelOptions.response_format }), + ...(modelOptions.tool_choice !== undefined && { toolChoice: modelOptions.tool_choice }), + ...(modelOptions.parallel_tool_calls !== undefined && { parallelToolCalls: modelOptions.parallel_tool_calls }), + ...(modelOptions.frequency_penalty !== undefined && { frequencyPenalty: modelOptions.frequency_penalty }), + ...(modelOptions.presence_penalty !== undefined && { presencePenalty: modelOptions.presence_penalty }), + ...(modelOptions.n !== undefined && { n: modelOptions.n }), + ...(modelOptions.prediction !== undefined && { prediction: modelOptions.prediction }), + ...(modelOptions.safe_prompt !== undefined && { safePrompt: modelOptions.safe_prompt }), + }), } } @@ -495,9 +710,12 @@ export class MistralTextAdapter< message: ModelMessage, ): ChatCompletionMessageParam { if (message.role === 'tool') { + if (!message.toolCallId) { + throw new Error('Missing toolCallId for tool message') + } return { role: 'tool', - toolCallId: message.toolCallId || '', + toolCallId: message.toolCallId, content: typeof message.content === 'string' ? message.content diff --git a/packages/typescript/ai-mistral/src/message-types.ts b/packages/typescript/ai-mistral/src/message-types.ts index 553ad9867..2b150282c 100644 --- a/packages/typescript/ai-mistral/src/message-types.ts +++ b/packages/typescript/ai-mistral/src/message-types.ts @@ -143,7 +143,7 @@ export interface ResponseFormatJsonSchema { jsonSchema: { name: string description?: string - schemaDefinition?: { [key: string]: unknown } + schemaDefinition: { [key: string]: unknown } strict?: boolean } } @@ -151,7 +151,7 @@ export interface ResponseFormatJsonSchema { /** * Metadata for Mistral text content parts. */ -export interface MistralTextMetadata {} +export type MistralTextMetadata = Record /** * Metadata for Mistral image content parts. @@ -168,19 +168,19 @@ export interface MistralImageMetadata { * Metadata for Mistral audio content parts. * Mistral does not currently support audio input. */ -export interface MistralAudioMetadata {} +export type MistralAudioMetadata = Record /** * Metadata for Mistral video content parts. * Mistral does not currently support video input. */ -export interface MistralVideoMetadata {} +export type MistralVideoMetadata = Record /** * Metadata for Mistral document content parts. * Used with document understanding models via `document_url` parts. */ -export interface MistralDocumentMetadata {} +export type MistralDocumentMetadata = Record /** * Map of modality types to their Mistral-specific metadata types. diff --git a/packages/typescript/ai-mistral/src/model-meta.ts b/packages/typescript/ai-mistral/src/model-meta.ts index 33991193c..1c78b8dfb 100644 --- a/packages/typescript/ai-mistral/src/model-meta.ts +++ b/packages/typescript/ai-mistral/src/model-meta.ts @@ -1,5 +1,11 @@ import type { MistralTextProviderOptions } from './text/text-provider-options' +/** Provider options for vision-capable Mistral models (pixtral-*). */ +export type MistralVisionProviderOptions = MistralTextProviderOptions + +/** Provider options for reasoning-capable Mistral models (magistral-*). */ +export type MistralReasoningProviderOptions = MistralTextProviderOptions + /** * Internal metadata structure describing a Mistral model's capabilities * and approximate pricing (USD per million tokens). @@ -32,11 +38,11 @@ interface ModelMeta { const MISTRAL_LARGE_LATEST = { name: 'mistral-large-latest', - context_window: 131_072, + context_window: 256_000, max_completion_tokens: 8_192, pricing: { - input: { normal: 2 }, - output: { normal: 6 }, + input: { normal: 0.5 }, + output: { normal: 1.5 }, }, supports: { input: ['text'], @@ -249,7 +255,17 @@ export type MistralModelInputModalitiesByName = { * Type-only map from Mistral chat model name to its provider options type. */ export type MistralChatModelProviderOptionsByName = { - [K in (typeof MISTRAL_CHAT_MODELS)[number]]: MistralTextProviderOptions + [MISTRAL_LARGE_LATEST.name]: MistralTextProviderOptions + [MISTRAL_MEDIUM_LATEST.name]: MistralVisionProviderOptions + [MISTRAL_SMALL_LATEST.name]: MistralVisionProviderOptions + [MINISTRAL_8B_LATEST.name]: MistralTextProviderOptions + [MINISTRAL_3B_LATEST.name]: MistralTextProviderOptions + [CODESTRAL_LATEST.name]: MistralTextProviderOptions + [PIXTRAL_LARGE_LATEST.name]: MistralVisionProviderOptions + [PIXTRAL_12B_2409.name]: MistralVisionProviderOptions + [MAGISTRAL_MEDIUM_LATEST.name]: MistralReasoningProviderOptions + [MAGISTRAL_SMALL_LATEST.name]: MistralReasoningProviderOptions + [OPEN_MISTRAL_NEMO.name]: MistralTextProviderOptions } /** diff --git a/packages/typescript/ai-mistral/src/tools/function-tool.ts b/packages/typescript/ai-mistral/src/tools/function-tool.ts index c7d82dd7e..8e2da9d89 100644 --- a/packages/typescript/ai-mistral/src/tools/function-tool.ts +++ b/packages/typescript/ai-mistral/src/tools/function-tool.ts @@ -14,23 +14,22 @@ export type FunctionTool = ChatCompletionTool * - additionalProperties: false */ export function convertFunctionToolToAdapterFormat(tool: Tool): FunctionTool { - const inputSchema = (tool.inputSchema ?? { + const baseSchema = (tool.inputSchema ?? { type: 'object', properties: {}, required: [], }) as JSONSchema - if (inputSchema.type === 'object' && !inputSchema.properties) { - inputSchema.properties = {} - } + const inputSchema: JSONSchema = + baseSchema.type === 'object' && !baseSchema.properties + ? { ...baseSchema, properties: {} } + : { ...baseSchema } const jsonSchema = makeMistralStructuredOutputCompatible( inputSchema, inputSchema.required || [], ) - jsonSchema.additionalProperties = false - return { type: 'function', function: { diff --git a/packages/typescript/ai-mistral/src/utils/client.ts b/packages/typescript/ai-mistral/src/utils/client.ts index 10b234207..1d54a39d0 100644 --- a/packages/typescript/ai-mistral/src/utils/client.ts +++ b/packages/typescript/ai-mistral/src/utils/client.ts @@ -1,4 +1,4 @@ -import { Mistral } from '@mistralai/mistralai' +import { Mistral, HTTPClient } from '@mistralai/mistralai' export interface MistralClientConfig { /** Mistral API key. */ @@ -9,17 +9,33 @@ export interface MistralClientConfig { /** Optional request timeout (ms). */ timeoutMs?: number + + /** Optional default headers to include with every request. */ + defaultHeaders?: Record } /** * Creates a Mistral SDK client instance. */ export function createMistralClient(config: MistralClientConfig): Mistral { - const { apiKey, serverURL, timeoutMs } = config + const { apiKey, serverURL, timeoutMs, defaultHeaders } = config + + let httpClient: HTTPClient | undefined + if (defaultHeaders && Object.keys(defaultHeaders).length > 0) { + httpClient = new HTTPClient() + httpClient.addHook('beforeRequest', (req) => { + for (const [key, value] of Object.entries(defaultHeaders)) { + req.headers.set(key, value) + } + return req + }) + } + return new Mistral({ apiKey, - ...(serverURL ? { serverURL } : {}), - ...(timeoutMs ? { timeoutMs } : {}), + ...(serverURL !== undefined ? { serverURL } : {}), + ...(timeoutMs !== undefined ? { timeoutMs } : {}), + ...(httpClient !== undefined ? { httpClient } : {}), }) } @@ -28,26 +44,35 @@ export function createMistralClient(config: MistralClientConfig): Mistral { * @throws Error if MISTRAL_API_KEY is not found */ export function getMistralApiKeyFromEnv(): string { - const env = - typeof globalThis !== 'undefined' && (globalThis as any).window?.env - ? (globalThis as any).window.env - : typeof process !== 'undefined' - ? process.env - : undefined - const key = env?.MISTRAL_API_KEY + let key: string | undefined + + if (typeof process !== 'undefined' && typeof process.env !== 'undefined') { + key = process.env.MISTRAL_API_KEY + } else { + const g = globalThis as { window?: { env?: Record } } + key = g.window?.env?.MISTRAL_API_KEY + } if (!key) { throw new Error( - 'MISTRAL_API_KEY is required. Please set it in your environment variables or use the factory function with an explicit API key.', + 'MISTRAL_API_KEY is required. In Node.js set it as an environment variable; in browser environments inject it via window.env.MISTRAL_API_KEY or use the factory function with an explicit API key.', ) } return key } +function uuidv4Fallback(): string { + return 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx'.replace(/[xy]/g, (c) => { + const r = (Math.random() * 16) | 0 + return (c === 'x' ? r : (r & 0x3) | 0x8).toString(16) + }) +} + /** * Generates a unique ID with a prefix. */ export function generateId(prefix: string): string { - return `${prefix}-${Date.now()}-${Math.random().toString(36).substring(7)}` + const uuid = globalThis.crypto?.randomUUID?.() ?? uuidv4Fallback() + return `${prefix}-${uuid}` } diff --git a/packages/typescript/ai-mistral/src/utils/schema-converter.ts b/packages/typescript/ai-mistral/src/utils/schema-converter.ts index e26d12c45..c5849be08 100644 --- a/packages/typescript/ai-mistral/src/utils/schema-converter.ts +++ b/packages/typescript/ai-mistral/src/utils/schema-converter.ts @@ -11,10 +11,12 @@ export function transformNullsToUndefined(obj: T): T { } if (Array.isArray(obj)) { - return obj.map((item) => transformNullsToUndefined(item)) as unknown as T + return obj + .map((item) => transformNullsToUndefined(item)) + .filter((item) => item !== undefined) as unknown as T } - if (typeof obj === 'object') { + if (typeof obj === 'object' && Object.getPrototypeOf(obj) === Object.prototype) { const result: Record = {} for (const [key, value] of Object.entries(obj as Record)) { const transformed = transformNullsToUndefined(value) @@ -55,18 +57,42 @@ export function makeMistralStructuredOutputCompatible( const wasOptional = !originalRequired.includes(propName) if (prop.type === 'object' && prop.properties) { - properties[propName] = makeMistralStructuredOutputCompatible( + const converted = makeMistralStructuredOutputCompatible( prop, prop.required || [], ) + if (wasOptional) { + properties[propName] = { + ...converted, + type: Array.isArray(converted.type) + ? converted.type.includes('null') + ? converted.type + : [...converted.type, 'null'] + : [converted.type, 'null'], + } + } else { + properties[propName] = converted + } } else if (prop.type === 'array' && prop.items) { - properties[propName] = { + const converted = { ...prop, items: makeMistralStructuredOutputCompatible( prop.items, prop.items.required || [], ), } + if (wasOptional) { + properties[propName] = { + ...converted, + type: Array.isArray(converted.type) + ? converted.type.includes('null') + ? converted.type + : [...converted.type, 'null'] + : [converted.type, 'null'], + } + } else { + properties[propName] = converted + } } else if (wasOptional) { if (prop.type && !Array.isArray(prop.type)) { properties[propName] = { @@ -78,6 +104,8 @@ export function makeMistralStructuredOutputCompatible( ...prop, type: [...prop.type, 'null'], } + } else if (!prop.type) { + properties[propName] = { anyOf: [prop, { type: 'null' }] } } } } diff --git a/packages/typescript/ai-mistral/tests/mistral-adapter.test.ts b/packages/typescript/ai-mistral/tests/mistral-adapter.test.ts index 8befe6cf0..042ce0c0a 100644 --- a/packages/typescript/ai-mistral/tests/mistral-adapter.test.ts +++ b/packages/typescript/ai-mistral/tests/mistral-adapter.test.ts @@ -11,46 +11,74 @@ import { createMistralText, mistralText } from '../src/adapters/text' import type { StreamChunk, Tool } from '@tanstack/ai' // Declare mocks at module level -let mockStream: Mock<(...args: Array) => unknown> let mockComplete: Mock<(...args: Array) => unknown> -// Mock the Mistral SDK +// Mock the Mistral SDK (constructor still used for structuredOutput) vi.mock('@mistralai/mistralai', () => { return { Mistral: class { chat = { - stream: (...args: Array) => mockStream(...args), complete: (...args: Array) => mockComplete(...args), } + HTTPClient = class {} + }, + HTTPClient: class { + addHook() {} }, } }) -// Helper to create async iterable from chunks -function createAsyncIterable(chunks: Array): AsyncIterable { - return { - [Symbol.asyncIterator]() { - let index = 0 +function toApiChunk(chunk: Record): Record { + const choices = (chunk.choices as Array>) ?? [] + const result: Record = { + id: chunk.id, + model: chunk.model, + object: 'chat.completion.chunk', + created: 0, + choices: choices.map((choice) => { + const delta = (choice.delta as Record) ?? {} + const toolCalls = delta.toolCalls as + | Array> + | undefined return { - async next() { - if (index < chunks.length) { - return { value: chunks[index++]!, done: false } - } - return { value: undefined as T, done: true } + index: choice.index ?? 0, + delta: { + role: delta.role, + content: delta.content, + ...(toolCalls ? { tool_calls: toolCalls } : {}), }, + finish_reason: choice.finishReason ?? null, } - }, + }), + } + if (chunk.usage) { + const u = chunk.usage as Record + result.usage = { + prompt_tokens: u.promptTokens, + completion_tokens: u.completionTokens, + total_tokens: u.totalTokens, + } } + return result } function setupMockStream(chunks: Array>) { - mockStream = vi - .fn() - .mockImplementation(() => - Promise.resolve( - createAsyncIterable(chunks.map((data) => ({ data }))), - ), - ) + const sseBody = + chunks.map((c) => `data: ${JSON.stringify(toApiChunk(c))}`).join('\n\n') + + '\n\ndata: [DONE]\n\n' + vi.stubGlobal( + 'fetch', + vi.fn().mockResolvedValue({ + ok: true, + status: 200, + body: new ReadableStream({ + start(controller) { + controller.enqueue(new TextEncoder().encode(sseBody)) + controller.close() + }, + }), + }), + ) mockComplete = vi.fn() } @@ -62,9 +90,14 @@ const weatherTool: Tool = { describe('Mistral adapters', () => { afterEach(() => { vi.unstubAllEnvs() + vi.unstubAllGlobals() }) describe('Text adapter', () => { + beforeEach(() => { + vi.clearAllMocks() + }) + it('creates a text adapter with explicit API key', () => { const adapter = createMistralText('mistral-large-latest', 'test-api-key') @@ -113,6 +146,7 @@ describe('Mistral AG-UI event emission', () => { afterEach(() => { vi.unstubAllEnvs() + vi.unstubAllGlobals() }) it('emits RUN_STARTED as the first event', async () => { @@ -369,55 +403,53 @@ describe('Mistral AG-UI event emission', () => { } const runFinishedChunk = chunks.find((c) => c.type === 'RUN_FINISHED') + expect(runFinishedChunk).toBeDefined() if (runFinishedChunk?.type === 'RUN_FINISHED') { expect(runFinishedChunk.finishReason).toBe('tool_calls') } }) it('emits RUN_ERROR on stream error', async () => { - const streamChunks = [ - { - data: { - id: 'cmpl-123', - model: 'mistral-large-latest', - choices: [ - { - index: 0, - delta: { content: 'Hello' }, - finishReason: null, - }, - ], - }, - }, - ] - - const errorIterable = { - [Symbol.asyncIterator]() { - let index = 0 - return { - async next() { - if (index < streamChunks.length) { - return { value: streamChunks[index++]!, done: false } - } - throw new Error('Stream interrupted') + const firstChunk = JSON.stringify({ + id: 'cmpl-123', + model: 'mistral-large-latest', + choices: [{ index: 0, delta: { content: 'Hello' }, finish_reason: null }], + }) + vi.stubGlobal( + 'fetch', + vi.fn().mockResolvedValue({ + ok: true, + status: 200, + body: new ReadableStream({ + start(controller) { + controller.enqueue( + new TextEncoder().encode(`data: ${firstChunk}\n\n`), + ) + controller.error(new Error('Stream interrupted')) }, - } - }, - } - - mockStream = vi.fn().mockResolvedValue(errorIterable) + }), + }), + ) mockComplete = vi.fn() const adapter = createMistralText('mistral-large-latest', 'test-api-key') const chunks: Array = [] + let thrownError: Error | undefined - for await (const chunk of adapter.chatStream({ - model: 'mistral-large-latest', - messages: [{ role: 'user', content: 'Hello' }], - })) { - chunks.push(chunk) + try { + for await (const chunk of adapter.chatStream({ + model: 'mistral-large-latest', + messages: [{ role: 'user', content: 'Hello' }], + })) { + chunks.push(chunk) + } + } catch (err) { + thrownError = err as Error } + expect(thrownError).toBeDefined() + expect(thrownError?.message).toBe('Stream interrupted') + const runErrorChunk = chunks.find((c) => c.type === 'RUN_ERROR') expect(runErrorChunk).toBeDefined() if (runErrorChunk?.type === 'RUN_ERROR') { diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 00c581564..6788d1031 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -1570,6 +1570,9 @@ importers: '@tanstack/ai-groq': specifier: workspace:* version: link:../../packages/typescript/ai-groq + '@tanstack/ai-mistral': + specifier: workspace:* + version: link:../../packages/typescript/ai-mistral '@tanstack/ai-ollama': specifier: workspace:* version: link:../../packages/typescript/ai-ollama diff --git a/testing/e2e/package.json b/testing/e2e/package.json index 0dc700b0d..ba622965d 100644 --- a/testing/e2e/package.json +++ b/testing/e2e/package.json @@ -17,6 +17,7 @@ "@tanstack/ai-anthropic": "workspace:*", "@tanstack/ai-client": "workspace:*", "@tanstack/ai-gemini": "workspace:*", + "@tanstack/ai-mistral": "workspace:*", "@tanstack/ai-grok": "workspace:*", "@tanstack/ai-groq": "workspace:*", "@tanstack/ai-ollama": "workspace:*", diff --git a/testing/e2e/src/lib/feature-support.ts b/testing/e2e/src/lib/feature-support.ts index 3609b42c8..112de8cf3 100644 --- a/testing/e2e/src/lib/feature-support.ts +++ b/testing/e2e/src/lib/feature-support.ts @@ -9,6 +9,7 @@ const matrix: Record> = { 'groq', 'grok', 'openrouter', + 'mistral', ]), 'one-shot-text': new Set([ 'openai', @@ -18,6 +19,7 @@ const matrix: Record> = { 'groq', 'grok', 'openrouter', + 'mistral', ]), reasoning: new Set(['openai', 'anthropic', 'gemini']), 'multi-turn': new Set([ @@ -28,6 +30,7 @@ const matrix: Record> = { 'groq', 'grok', 'openrouter', + 'mistral', ]), 'tool-calling': new Set([ 'openai', @@ -37,6 +40,7 @@ const matrix: Record> = { 'groq', 'grok', 'openrouter', + 'mistral', ]), 'parallel-tool-calls': new Set([ 'openai', @@ -45,6 +49,7 @@ const matrix: Record> = { 'groq', 'grok', 'openrouter', + 'mistral', ]), // Gemini excluded: approval flow timing issues with Gemini's streaming format 'tool-approval': new Set([ @@ -54,6 +59,7 @@ const matrix: Record> = { 'groq', 'grok', 'openrouter', + 'mistral', ]), // Ollama excluded: aimock doesn't support content+toolCalls for /api/chat format 'text-tool-text': new Set([ @@ -63,6 +69,7 @@ const matrix: Record> = { 'groq', 'grok', 'openrouter', + 'mistral', ]), 'structured-output': new Set([ 'openai', @@ -72,6 +79,7 @@ const matrix: Record> = { 'groq', 'grok', 'openrouter', + 'mistral', ]), 'agentic-structured': new Set([ 'openai', @@ -81,7 +89,9 @@ const matrix: Record> = { 'groq', 'grok', 'openrouter', + 'mistral', ]), + // Mistral excluded: mistral-large-latest is text-only; vision requires pixtral 'multimodal-image': new Set([ 'openai', 'anthropic', @@ -103,6 +113,7 @@ const matrix: Record> = { 'ollama', 'grok', 'openrouter', + 'mistral', ]), 'summarize-stream': new Set([ 'openai', @@ -111,6 +122,7 @@ const matrix: Record> = { 'ollama', 'grok', 'openrouter', + 'mistral', ]), // Gemini excluded: aimock doesn't mock Gemini's Imagen predict endpoint format 'image-gen': new Set(['openai', 'grok']), diff --git a/testing/e2e/src/lib/providers.ts b/testing/e2e/src/lib/providers.ts index 35b720b61..4b40d17b2 100644 --- a/testing/e2e/src/lib/providers.ts +++ b/testing/e2e/src/lib/providers.ts @@ -7,6 +7,7 @@ import { createOllamaChat } from '@tanstack/ai-ollama' import { createGroqText } from '@tanstack/ai-groq' import { createGrokText } from '@tanstack/ai-grok' import { createOpenRouterText } from '@tanstack/ai-openrouter' +import { createMistralText } from '@tanstack/ai-mistral' import type { Provider } from '@/lib/types' const LLMOCK_DEFAULT_BASE = process.env.LLMOCK_URL || 'http://127.0.0.1:4010' @@ -20,6 +21,7 @@ const defaultModels: Record = { groq: 'llama-3.3-70b-versatile', grok: 'grok-3', openrouter: 'openai/gpt-4o', + mistral: 'mistral-large-latest', } export function createTextAdapter( @@ -92,6 +94,13 @@ export function createTextAdapter( : openaiUrl, }), }), + mistral: () => + createChatOptions({ + adapter: createMistralText(model as 'mistral-large-latest', DUMMY_KEY, { + serverURL: base, + defaultHeaders: testHeaders, + }), + }), } return factories[provider]() diff --git a/testing/e2e/src/lib/types.ts b/testing/e2e/src/lib/types.ts index 00c848157..46ce8e05e 100644 --- a/testing/e2e/src/lib/types.ts +++ b/testing/e2e/src/lib/types.ts @@ -8,6 +8,7 @@ export type Provider = | 'grok' | 'groq' | 'openrouter' + | 'mistral' export type Feature = | 'chat' @@ -37,6 +38,7 @@ export const ALL_PROVIDERS: Provider[] = [ 'grok', 'groq', 'openrouter', + 'mistral', ] export const ALL_FEATURES: Feature[] = [ diff --git a/testing/e2e/tests/test-matrix.ts b/testing/e2e/tests/test-matrix.ts index a5f33f025..1803ef7c4 100644 --- a/testing/e2e/tests/test-matrix.ts +++ b/testing/e2e/tests/test-matrix.ts @@ -1,4 +1,7 @@ import type { Provider, Feature } from '../src/lib/types' +import { isSupported } from '../src/lib/feature-support' + +export { isSupported } export const providers: Provider[] = [ 'openai', @@ -8,128 +11,9 @@ export const providers: Provider[] = [ 'groq', 'grok', 'openrouter', + 'mistral', ] -const supportMatrix: Record> = { - chat: new Set([ - 'openai', - 'anthropic', - 'gemini', - 'ollama', - 'groq', - 'grok', - 'openrouter', - ]), - 'one-shot-text': new Set([ - 'openai', - 'anthropic', - 'gemini', - 'ollama', - 'groq', - 'grok', - 'openrouter', - ]), - reasoning: new Set(['openai', 'anthropic', 'gemini']), - 'multi-turn': new Set([ - 'openai', - 'anthropic', - 'gemini', - 'ollama', - 'groq', - 'grok', - 'openrouter', - ]), - 'tool-calling': new Set([ - 'openai', - 'anthropic', - 'gemini', - 'ollama', - 'groq', - 'grok', - 'openrouter', - ]), - 'parallel-tool-calls': new Set([ - 'openai', - 'anthropic', - 'gemini', - 'groq', - 'grok', - 'openrouter', - ]), - 'tool-approval': new Set([ - 'openai', - 'anthropic', - 'ollama', - 'groq', - 'grok', - 'openrouter', - ]), - 'text-tool-text': new Set([ - 'openai', - 'anthropic', - 'gemini', - 'groq', - 'grok', - 'openrouter', - ]), - 'structured-output': new Set([ - 'openai', - 'anthropic', - 'gemini', - 'ollama', - 'groq', - 'grok', - 'openrouter', - ]), - 'agentic-structured': new Set([ - 'openai', - 'anthropic', - 'gemini', - 'ollama', - 'groq', - 'grok', - 'openrouter', - ]), - 'multimodal-image': new Set([ - 'openai', - 'anthropic', - 'gemini', - 'grok', - 'openrouter', - ]), - 'multimodal-structured': new Set([ - 'openai', - 'anthropic', - 'gemini', - 'grok', - 'openrouter', - ]), - summarize: new Set([ - 'openai', - 'anthropic', - 'gemini', - 'ollama', - 'grok', - 'openrouter', - ]), - 'summarize-stream': new Set([ - 'openai', - 'anthropic', - 'gemini', - 'ollama', - 'grok', - 'openrouter', - ]), - 'image-gen': new Set(['openai', 'grok']), - tts: new Set(['openai']), - transcription: new Set(['openai']), - 'video-gen': new Set(['openai']), -} - -export function isSupported(provider: Provider, feature: Feature): boolean { - return supportMatrix[feature]?.has(provider) ?? false -} - /** Get only the providers that support a given feature */ export function providersFor(feature: Feature): Provider[] { return providers.filter((p) => isSupported(p, feature)) From 018ed5eb84f7099f07a29e7ad760c3806cabe529 Mon Sep 17 00:00:00 2001 From: "autofix-ci[bot]" <114827586+autofix-ci[bot]@users.noreply.github.com> Date: Fri, 24 Apr 2026 09:04:35 +0000 Subject: [PATCH 3/9] ci: apply automated fixes --- .../ai-mistral/src/adapters/text.ts | 45 ++++++++++++++----- .../src/text/text-provider-options.ts | 3 +- .../ai-mistral/src/utils/schema-converter.ts | 5 ++- 3 files changed, 38 insertions(+), 15 deletions(-) diff --git a/packages/typescript/ai-mistral/src/adapters/text.ts b/packages/typescript/ai-mistral/src/adapters/text.ts index be8165795..9ba8daba6 100644 --- a/packages/typescript/ai-mistral/src/adapters/text.ts +++ b/packages/typescript/ai-mistral/src/adapters/text.ts @@ -107,7 +107,8 @@ function rawChunkToCamelCase(raw: Record): MistralStreamChunk { }, })), }, - finishReason: (choice.finish_reason as string | null | undefined) ?? null, + finishReason: + (choice.finish_reason as string | null | undefined) ?? null, } }), usage: raw.usage @@ -293,8 +294,7 @@ export class MistralTextAdapter< } const rawText = response.choices?.[0]?.message?.content || '' - const textContent = - typeof rawText === 'string' ? rawText : String(rawText) + const textContent = typeof rawText === 'string' ? rawText : String(rawText) let parsed: unknown try { @@ -452,7 +452,12 @@ export class MistralTextAdapter< toolCallsInProgress.size > 0 ) { for (const [, toolCall] of toolCallsInProgress) { - if (!toolCall.started || !toolCall.id || !toolCall.name || toolCall.ended) { + if ( + !toolCall.started || + !toolCall.id || + !toolCall.name || + toolCall.ended + ) { continue } @@ -690,15 +695,31 @@ export class MistralTextAdapter< stream: true as const, ...(modelOptions && { ...(modelOptions.stop !== undefined && { stop: modelOptions.stop }), - ...(modelOptions.random_seed !== undefined && { randomSeed: modelOptions.random_seed }), - ...(modelOptions.response_format !== undefined && { responseFormat: modelOptions.response_format }), - ...(modelOptions.tool_choice !== undefined && { toolChoice: modelOptions.tool_choice }), - ...(modelOptions.parallel_tool_calls !== undefined && { parallelToolCalls: modelOptions.parallel_tool_calls }), - ...(modelOptions.frequency_penalty !== undefined && { frequencyPenalty: modelOptions.frequency_penalty }), - ...(modelOptions.presence_penalty !== undefined && { presencePenalty: modelOptions.presence_penalty }), + ...(modelOptions.random_seed !== undefined && { + randomSeed: modelOptions.random_seed, + }), + ...(modelOptions.response_format !== undefined && { + responseFormat: modelOptions.response_format, + }), + ...(modelOptions.tool_choice !== undefined && { + toolChoice: modelOptions.tool_choice, + }), + ...(modelOptions.parallel_tool_calls !== undefined && { + parallelToolCalls: modelOptions.parallel_tool_calls, + }), + ...(modelOptions.frequency_penalty !== undefined && { + frequencyPenalty: modelOptions.frequency_penalty, + }), + ...(modelOptions.presence_penalty !== undefined && { + presencePenalty: modelOptions.presence_penalty, + }), ...(modelOptions.n !== undefined && { n: modelOptions.n }), - ...(modelOptions.prediction !== undefined && { prediction: modelOptions.prediction }), - ...(modelOptions.safe_prompt !== undefined && { safePrompt: modelOptions.safe_prompt }), + ...(modelOptions.prediction !== undefined && { + prediction: modelOptions.prediction, + }), + ...(modelOptions.safe_prompt !== undefined && { + safePrompt: modelOptions.safe_prompt, + }), }), } } diff --git a/packages/typescript/ai-mistral/src/text/text-provider-options.ts b/packages/typescript/ai-mistral/src/text/text-provider-options.ts index 8f27dea53..65d2a57b0 100644 --- a/packages/typescript/ai-mistral/src/text/text-provider-options.ts +++ b/packages/typescript/ai-mistral/src/text/text-provider-options.ts @@ -90,8 +90,7 @@ export interface MistralTextProviderOptions { /** * Internal options interface used for validation within the adapter. */ -export interface InternalTextProviderOptions - extends MistralTextProviderOptions { +export interface InternalTextProviderOptions extends MistralTextProviderOptions { messages: Array model: string stream?: boolean | null diff --git a/packages/typescript/ai-mistral/src/utils/schema-converter.ts b/packages/typescript/ai-mistral/src/utils/schema-converter.ts index c5849be08..b941b1396 100644 --- a/packages/typescript/ai-mistral/src/utils/schema-converter.ts +++ b/packages/typescript/ai-mistral/src/utils/schema-converter.ts @@ -16,7 +16,10 @@ export function transformNullsToUndefined(obj: T): T { .filter((item) => item !== undefined) as unknown as T } - if (typeof obj === 'object' && Object.getPrototypeOf(obj) === Object.prototype) { + if ( + typeof obj === 'object' && + Object.getPrototypeOf(obj) === Object.prototype + ) { const result: Record = {} for (const [key, value] of Object.entries(obj as Record)) { const transformed = transformNullsToUndefined(value) From 325a627c79a7b2971518ee6a067c86cc2a390d94 Mon Sep 17 00:00:00 2001 From: Tim Raderschad Date: Fri, 24 Apr 2026 20:20:11 +0200 Subject: [PATCH 4/9] fix(e2e): re-add @tanstack/ai-mistral dependency to package.json Co-authored-by: Copilot --- .../ai-mistral/src/adapters/text.ts | 95 +++++++++++-------- testing/e2e/package.json | 2 +- 2 files changed, 54 insertions(+), 43 deletions(-) diff --git a/packages/typescript/ai-mistral/src/adapters/text.ts b/packages/typescript/ai-mistral/src/adapters/text.ts index 9ba8daba6..28a69d131 100644 --- a/packages/typescript/ai-mistral/src/adapters/text.ts +++ b/packages/typescript/ai-mistral/src/adapters/text.ts @@ -8,6 +8,12 @@ import { makeMistralStructuredOutputCompatible, transformNullsToUndefined, } from '../utils' +import type { + ContentPart, + ModelMessage, + StreamChunk, + TextOptions, +} from '@tanstack/ai' import type { MISTRAL_CHAT_MODELS, ResolveInputModalities, @@ -18,12 +24,6 @@ import type { StructuredOutputResult, } from '@tanstack/ai/adapters' import type { Mistral } from '@mistralai/mistralai' -import type { - ContentPart, - ModelMessage, - StreamChunk, - TextOptions, -} from '@tanstack/ai' import type { InternalTextProviderOptions } from '../text/text-provider-options' import type { ChatCompletionContentPart, @@ -189,6 +189,11 @@ interface MistralStreamEvent { data: MistralStreamChunk } +/** Cast an event object to StreamChunk. Adapters construct events with string + * literal types which are structurally compatible with the EventType enum. */ +const asChunk = (chunk: Record) => + chunk as unknown as StreamChunk + /** * Mistral Text (Chat) Adapter. * @@ -224,11 +229,11 @@ export class MistralTextAdapter< const timestamp = Date.now() const aguiState = { - runId: generateId(this.name), + runId: options.runId ?? generateId(this.name), + threadId: options.threadId ?? generateId(this.name), messageId: generateId(this.name), timestamp, hasEmittedRunStarted: false, - hasEmittedRunError: false, } try { @@ -239,26 +244,27 @@ export class MistralTextAdapter< if (!aguiState.hasEmittedRunStarted) { aguiState.hasEmittedRunStarted = true - yield { + yield asChunk({ type: 'RUN_STARTED', runId: aguiState.runId, + threadId: aguiState.threadId, model: options.model, timestamp, - } + }) } - if (!aguiState.hasEmittedRunError) { - yield { - type: 'RUN_ERROR', - runId: aguiState.runId, - model: options.model, - timestamp, - error: { - message: err.message || 'Unknown error', - code: err.code, - }, - } - } + yield asChunk({ + type: 'RUN_ERROR', + runId: aguiState.runId, + model: options.model, + timestamp, + message: err.message || 'Unknown error', + code: err.code, + error: { + message: err.message || 'Unknown error', + code: err.code, + }, + }) throw err } @@ -321,10 +327,10 @@ export class MistralTextAdapter< options: TextOptions, aguiState: { runId: string + threadId: string messageId: string timestamp: number hasEmittedRunStarted: boolean - hasEmittedRunError: boolean }, ): AsyncIterable { let accumulatedContent = '' @@ -352,12 +358,13 @@ export class MistralTextAdapter< if (!aguiState.hasEmittedRunStarted) { aguiState.hasEmittedRunStarted = true - yield { + yield asChunk({ type: 'RUN_STARTED', runId: aguiState.runId, + threadId: aguiState.threadId, model: chunk.model || options.model, timestamp, - } + }) } const delta = choice.delta @@ -367,25 +374,25 @@ export class MistralTextAdapter< if (deltaContent) { if (!hasEmittedTextMessageStart) { hasEmittedTextMessageStart = true - yield { + yield asChunk({ type: 'TEXT_MESSAGE_START', messageId: aguiState.messageId, model: chunk.model || options.model, timestamp, role: 'assistant', - } + }) } accumulatedContent += deltaContent - yield { + yield asChunk({ type: 'TEXT_MESSAGE_CONTENT', messageId: aguiState.messageId, model: chunk.model || options.model, timestamp, delta: deltaContent, content: accumulatedContent, - } + }) } if (deltaToolCalls) { @@ -424,24 +431,25 @@ export class MistralTextAdapter< if (toolCall.id && toolCall.name && !toolCall.started) { toolCall.started = true - yield { + yield asChunk({ type: 'TOOL_CALL_START', toolCallId: toolCall.id, + toolCallName: toolCall.name, toolName: toolCall.name, model: chunk.model || options.model, timestamp, index, - } + }) } if (argsDelta !== undefined && toolCall.started) { - yield { + yield asChunk({ type: 'TOOL_CALL_ARGS', toolCallId: toolCall.id, model: chunk.model || options.model, timestamp, delta: argsDelta, - } + }) } } } @@ -472,14 +480,15 @@ export class MistralTextAdapter< toolCall.ended = true hasEmittedToolCall = true - yield { + yield asChunk({ type: 'TOOL_CALL_END', toolCallId: toolCall.id, + toolCallName: toolCall.name, toolName: toolCall.name, model: chunk.model || options.model, timestamp, input: parsedInput, - } + }) } } @@ -491,19 +500,20 @@ export class MistralTextAdapter< : 'stop' if (hasEmittedTextMessageStart) { - yield { + yield asChunk({ type: 'TEXT_MESSAGE_END', messageId: aguiState.messageId, model: chunk.model || options.model, timestamp, - } + }) } const usage = chunk.usage - yield { + yield asChunk({ type: 'RUN_FINISHED', runId: aguiState.runId, + threadId: aguiState.threadId, model: chunk.model || options.model, timestamp, usage: usage @@ -514,23 +524,24 @@ export class MistralTextAdapter< } : undefined, finishReason: computedFinishReason, - } + }) } } } catch (error: unknown) { const err = error as Error & { code?: string } - aguiState.hasEmittedRunError = true - yield { + yield asChunk({ type: 'RUN_ERROR', runId: aguiState.runId, model: options.model, timestamp, + message: err.message || 'Unknown error occurred', + code: err.code, error: { message: err.message || 'Unknown error occurred', code: err.code, }, - } + }) throw err } } diff --git a/testing/e2e/package.json b/testing/e2e/package.json index ba622965d..d7e37e151 100644 --- a/testing/e2e/package.json +++ b/testing/e2e/package.json @@ -17,9 +17,9 @@ "@tanstack/ai-anthropic": "workspace:*", "@tanstack/ai-client": "workspace:*", "@tanstack/ai-gemini": "workspace:*", - "@tanstack/ai-mistral": "workspace:*", "@tanstack/ai-grok": "workspace:*", "@tanstack/ai-groq": "workspace:*", + "@tanstack/ai-mistral": "workspace:*", "@tanstack/ai-ollama": "workspace:*", "@tanstack/ai-openai": "workspace:*", "@tanstack/ai-openrouter": "workspace:*", From ea0a3673b26df9b9ba28db10afeba723d599b4fc Mon Sep 17 00:00:00 2001 From: Tim Raderschad Date: Sun, 26 Apr 2026 08:10:55 +0200 Subject: [PATCH 5/9] refactor: reorder imports and simplify UUID generation in client utility --- .../ai-mistral/src/adapters/text.ts | 555 +++++++++--------- .../typescript/ai-mistral/src/utils/client.ts | 12 +- 2 files changed, 268 insertions(+), 299 deletions(-) diff --git a/packages/typescript/ai-mistral/src/adapters/text.ts b/packages/typescript/ai-mistral/src/adapters/text.ts index 28a69d131..ade190a46 100644 --- a/packages/typescript/ai-mistral/src/adapters/text.ts +++ b/packages/typescript/ai-mistral/src/adapters/text.ts @@ -10,21 +10,26 @@ import { } from '../utils' import type { ContentPart, + Modality, ModelMessage, StreamChunk, TextOptions, } from '@tanstack/ai' import type { MISTRAL_CHAT_MODELS, - ResolveInputModalities, - ResolveProviderOptions, + MistralChatModelProviderOptionsByName, + MistralModelInputModalitiesByName, } from '../model-meta' import type { StructuredOutputOptions, StructuredOutputResult, } from '@tanstack/ai/adapters' import type { Mistral } from '@mistralai/mistralai' -import type { InternalTextProviderOptions } from '../text/text-provider-options' +import type { ChatCompletionStreamRequest } from '@mistralai/mistralai/models/components' +import type { + ExternalTextProviderOptions, + InternalTextProviderOptions, +} from '../text/text-provider-options' import type { ChatCompletionContentPart, ChatCompletionMessageParam, @@ -33,166 +38,79 @@ import type { } from '../message-types' import type { MistralClientConfig } from '../utils' -function messagesToSnakeCase( - messages: Array, -): Array { - return messages.map((msg) => { - if (msg.role === 'tool') { - return { - role: 'tool', - tool_call_id: msg.toolCallId, - content: msg.content, - ...(msg.name !== undefined ? { name: msg.name } : {}), - } - } - if (msg.role === 'assistant') { - const base: Record = { - role: 'assistant', - content: msg.content ?? null, - } - if (msg.toolCalls && msg.toolCalls.length > 0) { - base.tool_calls = msg.toolCalls.map((tc) => ({ - id: tc.id, - type: tc.type ?? 'function', - function: tc.function, - })) - } - if (msg.prefix !== undefined) base.prefix = msg.prefix - return base - } - if (msg.role === 'user' && Array.isArray(msg.content)) { - return { - role: 'user', - content: msg.content.map((part) => { - if (part.type === 'image_url') { - return { type: 'image_url', image_url: part.imageUrl } - } - if (part.type === 'document_url') { - return { type: 'document_url', document_url: part.documentUrl } - } - return part - }), - } - } - return msg - }) -} - -function rawChunkToCamelCase(raw: Record): MistralStreamChunk { - const rawChoices = (raw.choices as Array>) ?? [] - return { - id: raw.id as string | undefined, - model: raw.model as string | undefined, - choices: rawChoices.map((choice) => { - const delta = (choice.delta as Record) ?? {} - const rawToolCalls = delta.tool_calls as - | Array> - | undefined - return { - index: choice.index as number | undefined, - delta: { - role: delta.role as string | null | undefined, - content: delta.content as - | string - | Array<{ type: string; text?: string }> - | null - | undefined, - toolCalls: rawToolCalls?.map((tc) => ({ - id: tc.id as string | undefined, - type: tc.type as string | undefined, - index: tc.index as number | undefined, - function: tc.function as { - name?: string - arguments?: string | Record - }, - })), - }, - finishReason: - (choice.finish_reason as string | null | undefined) ?? null, - } - }), - usage: raw.usage - ? (() => { - const u = raw.usage as Record - return { - promptTokens: (u.prompt_tokens as number | undefined) ?? 0, - completionTokens: (u.completion_tokens as number | undefined) ?? 0, - totalTokens: (u.total_tokens as number | undefined) ?? 0, - } - })() - : undefined, - } -} - -interface RawStreamParams { - model: string - messages: Array - temperature?: number | null - maxTokens?: number | null - topP?: number | null - tools?: unknown - stop?: unknown - randomSeed?: number | null - responseFormat?: unknown - toolChoice?: unknown - parallelToolCalls?: boolean | null - frequencyPenalty?: number | null - presencePenalty?: number | null - n?: number | null - prediction?: unknown - safePrompt?: boolean | null - stream?: true - [key: string]: unknown -} +/** Cast an event object to StreamChunk. Adapters construct events with string + * literal types which are structurally compatible with the EventType enum. */ +const asChunk = (chunk: Record) => + chunk as unknown as StreamChunk /** * Configuration for Mistral text adapter. */ -export type MistralTextConfig = MistralClientConfig +export interface MistralTextConfig extends MistralClientConfig {} /** * Alias for TextProviderOptions for external use. */ -export type { ExternalTextProviderOptions as MistralTextProviderOptions } from '../text/text-provider-options' +export type MistralTextProviderOptions = ExternalTextProviderOptions + +// =========================== +// Type Resolution Helpers +// =========================== + +type ResolveProviderOptions = + TModel extends keyof MistralChatModelProviderOptionsByName + ? MistralChatModelProviderOptionsByName[TModel] + : MistralTextProviderOptions + +type ResolveInputModalities = + TModel extends keyof MistralModelInputModalitiesByName + ? MistralModelInputModalitiesByName[TModel] + : readonly ['text'] + +// =========================== +// Wire-format chunk types +// =========================== /** - * Minimal shape of a Mistral stream chunk used by the adapter. + * Snake-case shape of a Mistral chat completion stream chunk as returned on the + * wire. We bypass the SDK's `chat.stream` because its Zod validation rejects + * tool-call argument deltas that omit `function.name` (only the first chunk in + * a tool call carries the name). */ -interface MistralStreamChunk { +interface MistralRawToolCall { id?: string - model?: string - choices: Array<{ - index?: number - delta: { - role?: string | null - content?: string | Array<{ type: string; text?: string }> | null - toolCalls?: Array<{ - id?: string - type?: string - index?: number - function: { - name?: string - arguments?: string | Record - } - }> | null - } - finishReason?: string | null - }> - usage?: { - promptTokens?: number - completionTokens?: number - totalTokens?: number + type?: string + index?: number + function?: { + name?: string + arguments?: string | Record } } -interface MistralStreamEvent { - data: MistralStreamChunk +interface MistralRawChoice { + index?: number + delta?: { + role?: string | null + content?: string | Array<{ type: string; text?: string }> | null + tool_calls?: Array + } + finish_reason?: string | null } -/** Cast an event object to StreamChunk. Adapters construct events with string - * literal types which are structurally compatible with the EventType enum. */ -const asChunk = (chunk: Record) => - chunk as unknown as StreamChunk +interface MistralRawChunk { + id?: string + model?: string + choices?: Array + usage?: { + prompt_tokens?: number + completion_tokens?: number + total_tokens?: number + } +} + +// =========================== +// Adapter Implementation +// =========================== /** * Mistral Text (Chat) Adapter. @@ -201,10 +119,13 @@ const asChunk = (chunk: Record) => */ export class MistralTextAdapter< TModel extends (typeof MISTRAL_CHAT_MODELS)[number], + TProviderOptions extends Record = ResolveProviderOptions, + TInputModalities extends + ReadonlyArray = ResolveInputModalities, > extends BaseTextAdapter< TModel, - ResolveProviderOptions, - ResolveInputModalities, + TProviderOptions, + TInputModalities, MistralMessageMetadataByModality > { readonly name = 'mistral' as const @@ -214,16 +135,15 @@ export class MistralTextAdapter< constructor(config: MistralTextConfig, model: TModel) { super(config, model) - // Retained for structuredOutput (see structuredOutput method); not used on - // streaming paths, which go through fetchRawMistralStream instead. E2E tests - // route Mistral through llmock via providers.ts (serverURL: base), so the - // custom SSE path remains covered. + // The SDK client is retained for `structuredOutput` (non-streaming). The + // streaming path bypasses the SDK and uses `fetchRawMistralStream` because + // the SDK's Zod schemas reject partial tool-call argument deltas. this.client = createMistralClient(config) this.rawConfig = config } async *chatStream( - options: TextOptions>, + options: TextOptions, ): AsyncIterable { const requestParams = this.mapTextOptionsToMistral(options) const timestamp = Date.now() @@ -274,18 +194,18 @@ export class MistralTextAdapter< * Generate structured output using Mistral's JSON Schema response format. */ async structuredOutput( - options: StructuredOutputOptions>, + options: StructuredOutputOptions, ): Promise> { const { chatOptions, outputSchema } = options - const requestParams = this.mapTextOptionsToMistral(chatOptions) + const { stream: _stream, ...nonStreamParams } = + this.mapTextOptionsToMistral(chatOptions) const jsonSchema = makeMistralStructuredOutputCompatible( outputSchema, outputSchema.required || [], ) - const { stream: _stream, ...nonStreamParams } = requestParams - const response = (await this.client.chat.complete({ + const response = await this.client.chat.complete({ ...nonStreamParams, responseFormat: { type: 'json_schema', @@ -295,12 +215,10 @@ export class MistralTextAdapter< strict: true, }, }, - } as Parameters[0])) as { - choices?: Array<{ message?: { content?: string | null } }> - } + }) - const rawText = response.choices?.[0]?.message?.content || '' - const textContent = typeof rawText === 'string' ? rawText : String(rawText) + const rawText = response.choices[0]?.message?.content + const textContent = typeof rawText === 'string' ? rawText : '' let parsed: unknown try { @@ -311,10 +229,8 @@ export class MistralTextAdapter< ) } - const transformed = transformNullsToUndefined(parsed) - return { - data: transformed, + data: transformNullsToUndefined(parsed), rawText: textContent, } } @@ -323,7 +239,7 @@ export class MistralTextAdapter< * Processes streaming chunks from the Mistral API and yields AG-UI stream events. */ private async *processMistralStreamChunks( - stream: AsyncIterable, + stream: AsyncIterable, options: TextOptions, aguiState: { runId: string @@ -350,26 +266,26 @@ export class MistralTextAdapter< >() try { - for await (const event of stream) { - const chunk = event.data - const choice = chunk.choices[0] - + for await (const chunk of stream) { + const choice = chunk.choices?.[0] if (!choice) continue + const chunkModel = chunk.model || options.model + if (!aguiState.hasEmittedRunStarted) { aguiState.hasEmittedRunStarted = true yield asChunk({ type: 'RUN_STARTED', runId: aguiState.runId, threadId: aguiState.threadId, - model: chunk.model || options.model, + model: chunkModel, timestamp, }) } const delta = choice.delta - const deltaContent = this.extractDeltaText(delta.content) - const deltaToolCalls = delta.toolCalls + const deltaContent = this.extractDeltaText(delta?.content) + const deltaToolCalls = delta?.tool_calls if (deltaContent) { if (!hasEmittedTextMessageStart) { @@ -377,7 +293,7 @@ export class MistralTextAdapter< yield asChunk({ type: 'TEXT_MESSAGE_START', messageId: aguiState.messageId, - model: chunk.model || options.model, + model: chunkModel, timestamp, role: 'assistant', }) @@ -388,7 +304,7 @@ export class MistralTextAdapter< yield asChunk({ type: 'TEXT_MESSAGE_CONTENT', messageId: aguiState.messageId, - model: chunk.model || options.model, + model: chunkModel, timestamp, delta: deltaContent, content: accumulatedContent, @@ -403,7 +319,7 @@ export class MistralTextAdapter< if (!toolCallsInProgress.has(index)) { toolCallsInProgress.set(index, { id: toolCallDelta.id || '', - name: toolCallDelta.function.name || '', + name: toolCallDelta.function?.name || '', arguments: '', started: false, ended: false, @@ -412,18 +328,18 @@ export class MistralTextAdapter< const toolCall = toolCallsInProgress.get(index)! - if (toolCallDelta.id) { - toolCall.id = toolCallDelta.id - } - if (toolCallDelta.function.name) { + if (toolCallDelta.id) toolCall.id = toolCallDelta.id + if (toolCallDelta.function?.name) { toolCall.name = toolCallDelta.function.name } + + const rawArgs = toolCallDelta.function?.arguments const argsDelta = - toolCallDelta.function.arguments !== undefined - ? typeof toolCallDelta.function.arguments === 'string' - ? toolCallDelta.function.arguments - : JSON.stringify(toolCallDelta.function.arguments) - : undefined + rawArgs === undefined + ? undefined + : typeof rawArgs === 'string' + ? rawArgs + : JSON.stringify(rawArgs) if (argsDelta !== undefined) { toolCall.arguments += argsDelta @@ -436,7 +352,7 @@ export class MistralTextAdapter< toolCallId: toolCall.id, toolCallName: toolCall.name, toolName: toolCall.name, - model: chunk.model || options.model, + model: chunkModel, timestamp, index, }) @@ -446,7 +362,7 @@ export class MistralTextAdapter< yield asChunk({ type: 'TOOL_CALL_ARGS', toolCallId: toolCall.id, - model: chunk.model || options.model, + model: chunkModel, timestamp, delta: argsDelta, }) @@ -454,11 +370,9 @@ export class MistralTextAdapter< } } - if (choice.finishReason) { - if ( - choice.finishReason === 'tool_calls' || - toolCallsInProgress.size > 0 - ) { + const finishReason = choice.finish_reason + if (finishReason) { + if (finishReason === 'tool_calls' || toolCallsInProgress.size > 0) { for (const [, toolCall] of toolCallsInProgress) { if ( !toolCall.started || @@ -485,7 +399,7 @@ export class MistralTextAdapter< toolCallId: toolCall.id, toolCallName: toolCall.name, toolName: toolCall.name, - model: chunk.model || options.model, + model: chunkModel, timestamp, input: parsedInput, }) @@ -493,9 +407,9 @@ export class MistralTextAdapter< } const computedFinishReason = - choice.finishReason === 'tool_calls' || hasEmittedToolCall + finishReason === 'tool_calls' || hasEmittedToolCall ? 'tool_calls' - : choice.finishReason === 'length' + : finishReason === 'length' ? 'length' : 'stop' @@ -503,24 +417,23 @@ export class MistralTextAdapter< yield asChunk({ type: 'TEXT_MESSAGE_END', messageId: aguiState.messageId, - model: chunk.model || options.model, + model: chunkModel, timestamp, }) } const usage = chunk.usage - yield asChunk({ type: 'RUN_FINISHED', runId: aguiState.runId, threadId: aguiState.threadId, - model: chunk.model || options.model, + model: chunkModel, timestamp, usage: usage ? { - promptTokens: usage.promptTokens || 0, - completionTokens: usage.completionTokens || 0, - totalTokens: usage.totalTokens || 0, + promptTokens: usage.prompt_tokens || 0, + completionTokens: usage.completion_tokens || 0, + totalTokens: usage.total_tokens || 0, } : undefined, finishReason: computedFinishReason, @@ -552,49 +465,15 @@ export class MistralTextAdapter< * rejects streaming tool call chunks that omit `name` in argument deltas. */ private async *fetchRawMistralStream( - params: RawStreamParams, + params: ChatCompletionStreamRequest, config: MistralClientConfig, - ): AsyncGenerator { + ): AsyncGenerator { const serverURL = (config.serverURL ?? 'https://api.mistral.ai') .replace(/\/+$/, '') .replace(/\/v1$/, '') const url = `${serverURL}/v1/chat/completions` - const { - stream: _stream, - messages, - maxTokens, - topP, - randomSeed, - responseFormat, - toolChoice, - parallelToolCalls, - frequencyPenalty, - presencePenalty, - safePrompt, - ...rest - } = params - - const body: Record = { - ...rest, - messages: messagesToSnakeCase(messages), - stream: true, - ...(maxTokens != null && { max_tokens: maxTokens }), - ...(topP != null && { top_p: topP }), - ...(randomSeed != null && { random_seed: randomSeed }), - ...(responseFormat != null && { response_format: responseFormat }), - ...(toolChoice != null && { tool_choice: toolChoice }), - ...(parallelToolCalls != null && { - parallel_tool_calls: parallelToolCalls, - }), - ...(frequencyPenalty != null && { - frequency_penalty: frequencyPenalty, - }), - ...(presencePenalty != null && { - presence_penalty: presencePenalty, - }), - ...(safePrompt != null && { safe_prompt: safePrompt }), - } + const body = this.toWireBody(params) const headers: Record = { 'Content-Type': 'application/json', @@ -608,17 +487,17 @@ export class MistralTextAdapter< body: JSON.stringify(body), }) - if (!response.ok || !response.body) { + if (!response.ok) { const errorText = await response.text() throw new Error(`Mistral API error ${response.status}: ${errorText}`) } - const reader = response.body.getReader() + const reader = response.body!.getReader() const decoder = new TextDecoder() let buffer = '' try { - while (true) { + for (;;) { const { done, value } = await reader.read() if (done) break @@ -633,8 +512,7 @@ export class MistralTextAdapter< if (data === '[DONE]') return try { - const raw = JSON.parse(data) as Record - yield { data: rawChunkToCamelCase(raw) } + yield JSON.parse(data) as MistralRawChunk } catch { // skip malformed chunks } @@ -646,12 +524,56 @@ export class MistralTextAdapter< } } + /** + * Converts the SDK's camelCase `ChatCompletionStreamRequest` into the + * snake_case wire body, including converting messages. + */ + private toWireBody( + params: ChatCompletionStreamRequest, + ): Record { + const { + messages, + maxTokens, + topP, + randomSeed, + responseFormat, + toolChoice, + parallelToolCalls, + frequencyPenalty, + presencePenalty, + safePrompt, + stream: _stream, + ...rest + } = params + + return { + ...rest, + messages: messages.map(messageToWire), + stream: true, + ...(maxTokens != null && { max_tokens: maxTokens }), + ...(topP != null && { top_p: topP }), + ...(randomSeed != null && { random_seed: randomSeed }), + ...(responseFormat != null && { response_format: responseFormat }), + ...(toolChoice != null && { tool_choice: toolChoice }), + ...(parallelToolCalls != null && { + parallel_tool_calls: parallelToolCalls, + }), + ...(frequencyPenalty != null && { frequency_penalty: frequencyPenalty }), + ...(presencePenalty != null && { presence_penalty: presencePenalty }), + ...(safePrompt != null && { safe_prompt: safePrompt }), + } + } + /** * Extracts text from a Mistral delta content, which can be a string or an * array of content chunks. */ private extractDeltaText( - content: string | Array<{ type: string; text?: string }> | null | undefined, + content: + | string + | Array<{ type: string; text?: string }> + | null + | undefined, ): string { if (!content) return '' if (typeof content === 'string') return content @@ -664,7 +586,9 @@ export class MistralTextAdapter< /** * Maps common TextOptions to Mistral Chat Completions request parameters. */ - private mapTextOptionsToMistral(options: TextOptions) { + private mapTextOptionsToMistral( + options: TextOptions, + ): ChatCompletionStreamRequest { const modelOptions = options.modelOptions as | Omit< InternalTextProviderOptions, @@ -698,37 +622,39 @@ export class MistralTextAdapter< return { model: options.model, - messages, + messages: messages as ChatCompletionStreamRequest['messages'], temperature: options.temperature, maxTokens: options.maxTokens, - topP: options.topP, - tools, - stream: true as const, + topP: options.topP ?? undefined, + tools: tools as ChatCompletionStreamRequest['tools'], + stream: true, ...(modelOptions && { - ...(modelOptions.stop !== undefined && { stop: modelOptions.stop }), - ...(modelOptions.random_seed !== undefined && { + ...(modelOptions.stop != null && { stop: modelOptions.stop }), + ...(modelOptions.random_seed != null && { randomSeed: modelOptions.random_seed, }), - ...(modelOptions.response_format !== undefined && { - responseFormat: modelOptions.response_format, + ...(modelOptions.response_format != null && { + responseFormat: + modelOptions.response_format as ChatCompletionStreamRequest['responseFormat'], }), - ...(modelOptions.tool_choice !== undefined && { - toolChoice: modelOptions.tool_choice, + ...(modelOptions.tool_choice != null && { + toolChoice: + modelOptions.tool_choice as ChatCompletionStreamRequest['toolChoice'], }), - ...(modelOptions.parallel_tool_calls !== undefined && { + ...(modelOptions.parallel_tool_calls != null && { parallelToolCalls: modelOptions.parallel_tool_calls, }), - ...(modelOptions.frequency_penalty !== undefined && { + ...(modelOptions.frequency_penalty != null && { frequencyPenalty: modelOptions.frequency_penalty, }), - ...(modelOptions.presence_penalty !== undefined && { + ...(modelOptions.presence_penalty != null && { presencePenalty: modelOptions.presence_penalty, }), - ...(modelOptions.n !== undefined && { n: modelOptions.n }), - ...(modelOptions.prediction !== undefined && { + ...(modelOptions.n != null && { n: modelOptions.n }), + ...(modelOptions.prediction != null && { prediction: modelOptions.prediction, }), - ...(modelOptions.safe_prompt !== undefined && { + ...(modelOptions.safe_prompt != null && { safePrompt: modelOptions.safe_prompt, }), }), @@ -786,22 +712,8 @@ export class MistralTextAdapter< const parts: Array = [] for (const part of contentParts) { - if (part.type === 'text') { - parts.push({ type: 'text', text: part.content }) - } else if (part.type === 'image') { - const imageMetadata = part.metadata as MistralImageMetadata | undefined - const imageValue = part.source.value - const imageUrl = - part.source.type === 'data' && !imageValue.startsWith('data:') - ? `data:${part.source.mimeType};base64,${imageValue}` - : imageValue - parts.push({ - type: 'image_url', - imageUrl: imageMetadata?.detail - ? { url: imageUrl, detail: imageMetadata.detail } - : imageUrl, - }) - } + const converted = this.convertContentPartToMistral(part) + if (converted) parts.push(converted) } return { @@ -810,18 +722,43 @@ export class MistralTextAdapter< } } + /** + * Converts a ContentPart to a Mistral content part. Returns undefined for + * unsupported part types. + */ + private convertContentPartToMistral( + part: ContentPart, + ): ChatCompletionContentPart | undefined { + if (part.type === 'text') { + return { type: 'text', text: part.content } + } + + if (part.type === 'image') { + const imageMetadata = part.metadata as MistralImageMetadata | undefined + const imageValue = part.source.value + const imageUrl = + part.source.type === 'data' && !imageValue.startsWith('data:') + ? `data:${part.source.mimeType};base64,${imageValue}` + : imageValue + return { + type: 'image_url', + imageUrl: imageMetadata?.detail + ? { url: imageUrl, detail: imageMetadata.detail } + : imageUrl, + } + } + + return undefined + } + /** * Normalizes message content to an array of ContentPart. */ private normalizeContent( content: string | null | Array, ): Array { - if (content === null) { - return [] - } - if (typeof content === 'string') { - return [{ type: 'text', content: content }] - } + if (content === null) return [] + if (typeof content === 'string') return [{ type: 'text', content }] return content } @@ -831,12 +768,8 @@ export class MistralTextAdapter< private extractTextContent( content: string | null | Array, ): string { - if (content === null) { - return '' - } - if (typeof content === 'string') { - return content - } + if (content === null) return '' + if (typeof content === 'string') return content return content .filter((p) => p.type === 'text') .map((p) => p.content) @@ -844,6 +777,50 @@ export class MistralTextAdapter< } } +/** + * Snake-cases a Mistral SDK message into the wire format expected by the API. + */ +function messageToWire(msg: ChatCompletionStreamRequest['messages'][number]) { + if (msg.role === 'tool') { + return { + role: 'tool', + tool_call_id: msg.toolCallId, + content: msg.content, + ...(msg.name !== undefined ? { name: msg.name } : {}), + } + } + if (msg.role === 'assistant') { + const base: Record = { + role: 'assistant', + content: msg.content ?? null, + } + if (msg.toolCalls && msg.toolCalls.length > 0) { + base.tool_calls = msg.toolCalls.map((tc) => ({ + id: tc.id, + type: tc.type ?? 'function', + function: tc.function, + })) + } + if (msg.prefix !== undefined) base.prefix = msg.prefix + return base + } + if (msg.role === 'user' && Array.isArray(msg.content)) { + return { + role: 'user', + content: msg.content.map((part) => { + if (part.type === 'image_url') { + return { type: 'image_url', image_url: part.imageUrl } + } + if (part.type === 'document_url') { + return { type: 'document_url', document_url: part.documentUrl } + } + return part + }), + } + } + return msg +} + /** * Creates a Mistral text adapter with explicit API key. * diff --git a/packages/typescript/ai-mistral/src/utils/client.ts b/packages/typescript/ai-mistral/src/utils/client.ts index 1d54a39d0..7a25a40dc 100644 --- a/packages/typescript/ai-mistral/src/utils/client.ts +++ b/packages/typescript/ai-mistral/src/utils/client.ts @@ -1,4 +1,4 @@ -import { Mistral, HTTPClient } from '@mistralai/mistralai' +import { HTTPClient, Mistral } from '@mistralai/mistralai' export interface MistralClientConfig { /** Mistral API key. */ @@ -62,17 +62,9 @@ export function getMistralApiKeyFromEnv(): string { return key } -function uuidv4Fallback(): string { - return 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx'.replace(/[xy]/g, (c) => { - const r = (Math.random() * 16) | 0 - return (c === 'x' ? r : (r & 0x3) | 0x8).toString(16) - }) -} - /** * Generates a unique ID with a prefix. */ export function generateId(prefix: string): string { - const uuid = globalThis.crypto?.randomUUID?.() ?? uuidv4Fallback() - return `${prefix}-${uuid}` + return `${prefix}-${Date.now()}-${Math.random().toString(36).substring(7)}` } From 29d40dfb191e559b3d861e570f34462bda3f1b91 Mon Sep 17 00:00:00 2001 From: "autofix-ci[bot]" <114827586+autofix-ci[bot]@users.noreply.github.com> Date: Mon, 27 Apr 2026 08:32:18 +0000 Subject: [PATCH 6/9] ci: apply automated fixes --- packages/typescript/ai-mistral/src/adapters/text.ts | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/packages/typescript/ai-mistral/src/adapters/text.ts b/packages/typescript/ai-mistral/src/adapters/text.ts index ade190a46..96b007085 100644 --- a/packages/typescript/ai-mistral/src/adapters/text.ts +++ b/packages/typescript/ai-mistral/src/adapters/text.ts @@ -120,8 +120,8 @@ interface MistralRawChunk { export class MistralTextAdapter< TModel extends (typeof MISTRAL_CHAT_MODELS)[number], TProviderOptions extends Record = ResolveProviderOptions, - TInputModalities extends - ReadonlyArray = ResolveInputModalities, + TInputModalities extends ReadonlyArray = + ResolveInputModalities, > extends BaseTextAdapter< TModel, TProviderOptions, @@ -569,11 +569,7 @@ export class MistralTextAdapter< * array of content chunks. */ private extractDeltaText( - content: - | string - | Array<{ type: string; text?: string }> - | null - | undefined, + content: string | Array<{ type: string; text?: string }> | null | undefined, ): string { if (!content) return '' if (typeof content === 'string') return content From 04836370e82a2ecd0b586e4694107a73b072f76c Mon Sep 17 00:00:00 2001 From: Alem Tuzlak Date: Tue, 28 Apr 2026 19:22:09 +0200 Subject: [PATCH 7/9] =?UTF-8?q?fix(ai-mistral):=20CR=20loop=20fixes=20?= =?UTF-8?q?=E2=80=94=20data=20correctness,=20lifecycle,=20reasoning?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - transformNullsToUndefined preserves array length and object keys instead of dropping null slots/keys (data corruption fix). - Stream lifecycle: emit TEXT_MESSAGE_END / TOOL_CALL_END / RUN_FINISHED on early termination; inner generator no longer double-emits RUN_ERROR. - Tool-call args buffered before id+name arrive are now replayed as a single TOOL_CALL_ARGS event so streaming consumers see the full prefix. - Tool-call argument JSON.parse failures throw with tool name + id context instead of silently substituting {}. - transformNullsToUndefined now applied to tool-call inputs (not only structuredOutput), so user tool handlers receive undefined where the schema declared optional. - Streaming requests now opt in to stream_options.include_usage so RUN_FINISHED.usage isn't permanently undefined. - modelOptions.temperature / top_p are no longer silently dropped. - Empty SSE catch swallowing JSON.parse errors replaced with explicit warn-on-syntax-error and throw-on-error-frame paths. - response.body non-null assertion replaced with a clear empty-body error. - convertContentPartToMistral throws a descriptive error for unsupported modalities instead of silently dropping the part. - Reasoning streams (Magistral) plumbed through as REASONING_* AG-UI events. Adapter accepts both wire shapes: - Mistral SDK ContentChunk: delta.content[].type='thinking' - OpenAI-compat (DeepSeek, aimock): delta.reasoning_content - generateId switched to crypto.randomUUID(). - Build/config consistency: deleted duplicate vitest.config.ts; added @tanstack/ai, @tanstack/ai-client, zod to devDependencies; tsconfig now includes tests/. - README narrows multimodal claim to the vision-capable model list and advertises reasoning support. - E2E: enable mistral on the reasoning feature row with a magistral modelOverride (uses the existing modelOverrides mechanism). - Docs: new docs/adapters/mistral.md page wired into config.json. 23 unit tests pass (was 10), publint --strict clean, vite build clean. --- .changeset/add-ai-mistral.md | 2 +- docs/adapters/mistral.md | 329 ++++++++ docs/config.json | 4 + packages/typescript/ai-mistral/README.md | 7 +- packages/typescript/ai-mistral/package.json | 5 +- .../ai-mistral/src/adapters/text.ts | 411 ++++++++-- .../typescript/ai-mistral/src/model-meta.ts | 2 +- .../src/text/text-provider-options.ts | 10 - .../typescript/ai-mistral/src/utils/client.ts | 2 +- .../ai-mistral/src/utils/schema-converter.ts | 15 +- .../ai-mistral/tests/mistral-adapter.test.ts | 724 +++++++++++++++++- packages/typescript/ai-mistral/tsconfig.json | 5 +- .../typescript/ai-mistral/vitest.config.ts | 22 - pnpm-lock.yaml | 17 +- testing/e2e/src/lib/feature-support.ts | 2 +- testing/e2e/src/lib/features.ts | 1 + 16 files changed, 1420 insertions(+), 138 deletions(-) create mode 100644 docs/adapters/mistral.md delete mode 100644 packages/typescript/ai-mistral/vitest.config.ts diff --git a/.changeset/add-ai-mistral.md b/.changeset/add-ai-mistral.md index 294086f32..4e1199e98 100644 --- a/.changeset/add-ai-mistral.md +++ b/.changeset/add-ai-mistral.md @@ -2,4 +2,4 @@ '@tanstack/ai-mistral': minor --- -Add new `@tanstack/ai-mistral` adapter package for Mistral models using the `@mistralai/mistralai` SDK. Supports streaming chat, tool calling, vision input (Pixtral / Mistral Medium / Small), and structured output via JSON Schema. Includes model metadata for Mistral Large, Medium, Small, Ministral 3B/8B, Codestral, Pixtral, Magistral, and Open Mistral Nemo. +Add new `@tanstack/ai-mistral` adapter package for Mistral models using the `@mistralai/mistralai` SDK. Supports streaming chat, tool calling, vision input (Pixtral / Mistral Medium / Small), structured output via JSON Schema, and reasoning streams (Magistral) — emitted as AG-UI `REASONING_*` events. Includes model metadata for Mistral Large, Medium, Small, Ministral 3B/8B, Codestral, Pixtral, Magistral, and Open Mistral Nemo. diff --git a/docs/adapters/mistral.md b/docs/adapters/mistral.md new file mode 100644 index 000000000..e678b46be --- /dev/null +++ b/docs/adapters/mistral.md @@ -0,0 +1,329 @@ +--- +title: Mistral +id: mistral-adapter +order: 7 +description: "Use Mistral models with TanStack AI — Mistral Large, Mistral Medium, Pixtral vision models, Magistral reasoning models, and Codestral via @tanstack/ai-mistral." +keywords: + - tanstack ai + - mistral + - mistral large + - pixtral + - magistral + - codestral + - adapter + - llm +--- + +The Mistral adapter provides access to Mistral's chat models, including Mistral Large, the multimodal Pixtral family, the Magistral reasoning models, and the Codestral code-specialized model. + +## Installation + +```bash +npm install @tanstack/ai-mistral +``` + +## Basic Usage + +```typescript +import { chat } from "@tanstack/ai"; +import { mistralText } from "@tanstack/ai-mistral"; + +const stream = chat({ + adapter: mistralText("mistral-large-latest"), + messages: [{ role: "user", content: "Hello!" }], +}); +``` + +## Basic Usage - Custom API Key + +```typescript +import { chat } from "@tanstack/ai"; +import { createMistralText } from "@tanstack/ai-mistral"; + +const adapter = createMistralText( + "mistral-large-latest", + process.env.MISTRAL_API_KEY!, +); + +const stream = chat({ + adapter, + messages: [{ role: "user", content: "Hello!" }], +}); +``` + +## Configuration + +```typescript +import { + createMistralText, + type MistralTextConfig, +} from "@tanstack/ai-mistral"; + +const config: Omit = { + serverURL: "https://api.mistral.ai", // Optional, this is the default + defaultHeaders: { + "X-Custom-Header": "value", + }, +}; + +const adapter = createMistralText( + "mistral-large-latest", + process.env.MISTRAL_API_KEY!, + config, +); +``` + +## Example: Chat Completion + +```typescript +import { chat, toServerSentEventsResponse } from "@tanstack/ai"; +import { mistralText } from "@tanstack/ai-mistral"; + +export async function POST(request: Request) { + const { messages } = await request.json(); + + const stream = chat({ + adapter: mistralText("mistral-large-latest"), + messages, + }); + + return toServerSentEventsResponse(stream); +} +``` + +## Example: With Tools + +```typescript +import { chat, toolDefinition } from "@tanstack/ai"; +import { mistralText } from "@tanstack/ai-mistral"; +import { z } from "zod"; + +const getWeatherDef = toolDefinition({ + name: "get_weather", + description: "Get the current weather for a location", + inputSchema: z.object({ + location: z.string(), + }), +}); + +const getWeather = getWeatherDef.server(async ({ location }) => { + return { temperature: 72, conditions: "sunny" }; +}); + +const stream = chat({ + adapter: mistralText("mistral-large-latest"), + messages: [{ role: "user", content: "What's the weather in Paris?" }], + tools: [getWeather], +}); +``` + +## Example: Multimodal (Vision) + +Use a vision-capable model — `pixtral-large-latest`, `pixtral-12b-2409`, `mistral-medium-latest`, or `mistral-small-latest` — to send images alongside text: + +```typescript +import { chat } from "@tanstack/ai"; +import { mistralText } from "@tanstack/ai-mistral"; + +const stream = chat({ + adapter: mistralText("pixtral-large-latest"), + messages: [ + { + role: "user", + content: [ + { type: "text", content: "What's in this image?" }, + { + type: "image", + source: { + type: "url", + value: "https://example.com/photo.jpg", + }, + }, + ], + }, + ], +}); +``` + +For data-URL or base64 images, set `source.type` to `"data"` and provide `mimeType`: + +```typescript +{ + type: "image", + source: { + type: "data", + mimeType: "image/png", + value: base64String, + }, +} +``` + +See [Multimodal Content](../advanced/multimodal-content) for the full content-part shape. + +## Example: Reasoning (Magistral) + +Magistral models (`magistral-medium-latest`, `magistral-small-latest`) stream their reasoning as separate events before the final answer. The adapter emits AG-UI `REASONING_*` chunks for the thinking content and `TEXT_MESSAGE_*` chunks for the answer: + +```typescript +import { chat } from "@tanstack/ai"; +import { mistralText } from "@tanstack/ai-mistral"; + +const stream = chat({ + adapter: mistralText("magistral-medium-latest"), + messages: [{ role: "user", content: "Why is the sky blue?" }], +}); + +for await (const chunk of stream) { + if (chunk.type === "REASONING_MESSAGE_CONTENT") { + process.stdout.write(`[thinking] ${chunk.delta}`); + } else if (chunk.type === "TEXT_MESSAGE_CONTENT") { + process.stdout.write(chunk.delta); + } +} +``` + +Reasoning events are always closed before any text or tool output begins, so consumers see a complete `REASONING_START → REASONING_MESSAGE_START → REASONING_MESSAGE_CONTENT* → REASONING_MESSAGE_END → REASONING_END` sequence first. + +See [Thinking & Reasoning](../chat/thinking-content) for the cross-provider event spec. + +## Example: Structured Output + +Generate JSON that conforms to a Zod schema using Mistral's `json_schema` response format: + +```typescript +import { generate } from "@tanstack/ai"; +import { mistralText } from "@tanstack/ai-mistral"; +import { z } from "zod"; + +const recipeSchema = z.object({ + name: z.string(), + ingredients: z.array(z.string()), + steps: z.array(z.string()), +}); + +const result = await generate({ + adapter: mistralText("mistral-large-latest"), + messages: [ + { role: "user", content: "Give me a chocolate chip cookie recipe." }, + ], + outputSchema: recipeSchema, +}); + +console.log(result.data); // typed as z.infer +``` + +See [Structured Outputs](../chat/structured-outputs) for the full guide. + +## Model Options + +Mistral exposes provider-specific options via `modelOptions`: + +```typescript +const stream = chat({ + adapter: mistralText("mistral-large-latest"), + messages, + temperature: 0.7, + topP: 0.9, + maxTokens: 1024, + modelOptions: { + random_seed: 42, + stop: ["END"], + safe_prompt: true, + frequency_penalty: 0.5, + presence_penalty: 0.5, + parallel_tool_calls: true, + tool_choice: "auto", + }, +}); +``` + +> Pass `temperature`, `topP`, and `maxTokens` at the top level — not inside `modelOptions`. + +## Environment Variables + +Set your API key in environment variables: + +```bash +MISTRAL_API_KEY=... +``` + +Get a key from the [Mistral Console](https://console.mistral.ai/). + +## Supported Models + +### Chat + +- `mistral-large-latest` — Flagship general-purpose model (128k context) +- `mistral-medium-latest` — Multimodal mid-tier model with vision +- `mistral-small-latest` — Fast, affordable multimodal model with vision +- `ministral-8b-latest` — 8B edge model +- `ministral-3b-latest` — 3B edge model +- `open-mistral-nemo` — Open 12B model + +### Code + +- `codestral-latest` — Code-specialized model (256k context) + +### Vision + +- `pixtral-large-latest` — Large vision model +- `pixtral-12b-2409` — 12B vision model + +### Reasoning + +Reasoning content is streamed as `REASONING_*` events before the final answer. + +- `magistral-medium-latest` — Mid-tier reasoning model +- `magistral-small-latest` — Small reasoning model + +See [Mistral's model comparison](https://docs.mistral.ai/getting-started/models/compare) for full details. + +## API Reference + +### `mistralText(model, config?)` + +Creates a Mistral text adapter using the `MISTRAL_API_KEY` environment variable. + +**Parameters:** + +- `model` — The model name (e.g., `'mistral-large-latest'`) +- `config.serverURL?` — Custom base URL (optional) +- `config.defaultHeaders?` — Headers to attach to every request (optional) + +**Returns:** A Mistral text adapter instance. + +### `createMistralText(model, apiKey, config?)` + +Creates a Mistral text adapter with an explicit API key. + +**Parameters:** + +- `model` — The model name +- `apiKey` — Your Mistral API key +- `config.serverURL?` — Custom base URL (optional) +- `config.defaultHeaders?` — Headers to attach to every request (optional) + +**Returns:** A Mistral text adapter instance. + +## Limitations + +- **Embeddings**: Use the [Mistral SDK](https://github.com/mistralai/client-ts) directly for `mistral-embed`. +- **Image / Audio / Video Generation**: Mistral does not provide these endpoints. Use OpenAI, Gemini, or fal.ai. +- **Text-to-Speech / Transcription**: Not supported. Use OpenAI or ElevenLabs. + +## Next Steps + +- [Getting Started](../getting-started/quick-start) — Learn the basics +- [Tools Guide](../tools/tools) — Define and call tools +- [Structured Outputs](../chat/structured-outputs) — Generate typed JSON +- [Multimodal Content](../advanced/multimodal-content) — Send images and other modalities +- [Other Adapters](./openai) — Explore other providers + +## Provider Tools + +Mistral does not currently expose provider-specific tool factories. +Define your own tools with `toolDefinition()` from `@tanstack/ai`. + +See [Tools](../tools/tools.md) for the general tool-definition flow, or +[Provider Tools](../tools/provider-tools.md) for other providers' +native-tool offerings. diff --git a/docs/config.json b/docs/config.json index f24a5fa0a..183ab65ae 100644 --- a/docs/config.json +++ b/docs/config.json @@ -274,6 +274,10 @@ "label": "Groq", "to": "adapters/groq" }, + { + "label": "Mistral", + "to": "adapters/mistral" + }, { "label": "ElevenLabs", "to": "adapters/elevenlabs" diff --git a/packages/typescript/ai-mistral/README.md b/packages/typescript/ai-mistral/README.md index bb2dce25f..3c9e09b3f 100644 --- a/packages/typescript/ai-mistral/README.md +++ b/packages/typescript/ai-mistral/README.md @@ -46,7 +46,7 @@ console.log(result.text) ```typescript import { createMistralText } from '@tanstack/ai-mistral' -const adapter = createMistralText('mistral-large-latest', 'api_key') +const adapter = createMistralText('mistral-large-latest', process.env.MISTRAL_API_KEY!) ``` ## Supported Models @@ -72,8 +72,9 @@ See [Mistral model comparison](https://docs.mistral.ai/getting-started/models/co - ✅ Streaming chat completions - ✅ Structured output (JSON Schema) - ✅ Function/tool calling -- ✅ Multimodal input (text + images for vision models) -- ❌ Embeddings (use the Mistral SDK directly) +- ✅ Reasoning (magistral-* models — streamed as `REASONING_*` events) +- ✅ Multimodal input (text + images) — requires a vision-capable model (`pixtral-large-latest`, `pixtral-12b-2409`, `mistral-medium-latest`, or `mistral-small-latest`) +- ❌ Embeddings (use [@mistralai/mistralai](https://github.com/mistralai/client-ts) directly) - ❌ Image generation ## Tree-Shakeable Adapters diff --git a/packages/typescript/ai-mistral/package.json b/packages/typescript/ai-mistral/package.json index 286bcf4fd..c94dfc12e 100644 --- a/packages/typescript/ai-mistral/package.json +++ b/packages/typescript/ai-mistral/package.json @@ -43,8 +43,11 @@ "adapter" ], "devDependencies": { + "@tanstack/ai": "workspace:*", + "@tanstack/ai-client": "workspace:*", "@vitest/coverage-v8": "4.0.14", - "vite": "^7.2.7" + "vite": "^7.2.7", + "zod": "^4.2.0" }, "peerDependencies": { "@tanstack/ai": "workspace:^", diff --git a/packages/typescript/ai-mistral/src/adapters/text.ts b/packages/typescript/ai-mistral/src/adapters/text.ts index 96b007085..0fdcb0d70 100644 --- a/packages/typescript/ai-mistral/src/adapters/text.ts +++ b/packages/typescript/ai-mistral/src/adapters/text.ts @@ -1,5 +1,4 @@ import { BaseTextAdapter } from '@tanstack/ai/adapters' -import { validateTextProviderOptions } from '../text/text-provider-options' import { convertToolsToProviderFormat } from '../tools' import { createMistralClient, @@ -43,10 +42,33 @@ import type { MistralClientConfig } from '../utils' const asChunk = (chunk: Record) => chunk as unknown as StreamChunk +/** + * Parse the accumulated streaming arguments for a tool call. Throws a clear + * error if the JSON is malformed — silently substituting `{}` would let a + * tool fire with empty inputs, masking truncated streams or mis-shaped output. + */ +function parseToolCallInput(toolCall: { + id: string + name: string + arguments: string +}): unknown { + if (!toolCall.arguments) return {} + try { + return transformNullsToUndefined(JSON.parse(toolCall.arguments)) + } catch (cause) { + const preview = toolCall.arguments.slice(0, 200) + const ellipsis = toolCall.arguments.length > 200 ? '...' : '' + throw new Error( + `Failed to parse tool call arguments for tool '${toolCall.name}' (id: ${toolCall.id}). Arguments: ${preview}${ellipsis}`, + { cause }, + ) + } +} + /** * Configuration for Mistral text adapter. */ -export interface MistralTextConfig extends MistralClientConfig {} +export type MistralTextConfig = MistralClientConfig /** * Alias for TextProviderOptions for external use. @@ -91,7 +113,24 @@ interface MistralRawChoice { index?: number delta?: { role?: string | null - content?: string | Array<{ type: string; text?: string }> | null + content?: + | string + | Array<{ + type: string + text?: string + // Mistral magistral models stream reasoning as content parts of + // type 'thinking' whose `thinking` field is itself an array of + // text/reference chunks. See Mistral SDK ThinkChunk type. + thinking?: Array<{ type: string; text?: string }> + }> + | null + // Some OpenAI-compatible deployments (DeepSeek, Groq for reasoning + // models, and aimock-based test environments) emit reasoning via a + // separate `reasoning_content` delta field rather than as a content + // part. Accept both shapes — they cannot collide because real Mistral + // never sets the OpenAI-compat field, and aimock never sets the + // thinking content part. + reasoning_content?: string | null tool_calls?: Array } finish_reason?: string | null @@ -252,7 +291,17 @@ export class MistralTextAdapter< let accumulatedContent = '' const timestamp = aguiState.timestamp let hasEmittedTextMessageStart = false + let hasEmittedTextMessageEnd = false let hasEmittedToolCall = false + let hasEmittedRunFinished = false + let lastChunkModel = options.model + + // Reasoning lifecycle (magistral-* models stream `thinking` content + // parts before any text). Mirrors the anthropic adapter's pattern: + // open REASONING_* events on the first thinking delta, close them when + // text/tool content begins or the run finishes. + let reasoningMessageId: string | null = null + let hasClosedReasoning = false const toolCallsInProgress = new Map< number, @@ -267,6 +316,7 @@ export class MistralTextAdapter< try { for await (const chunk of stream) { + lastChunkModel = chunk.model || options.model const choice = chunk.choices?.[0] if (!choice) continue @@ -284,9 +334,68 @@ export class MistralTextAdapter< } const delta = choice.delta - const deltaContent = this.extractDeltaText(delta?.content) + const { text: deltaContent, thinking: deltaThinkingFromContent } = + this.extractDeltaParts(delta?.content) + // Reasoning may also arrive as a separate top-level field + // (`delta.reasoning_content`) on OpenAI-compatible deployments. + const deltaThinking = + deltaThinkingFromContent + + (typeof delta?.reasoning_content === 'string' + ? delta.reasoning_content + : '') const deltaToolCalls = delta?.tool_calls + // Emit reasoning events FIRST so they always precede the matching + // text or tool deltas in the same chunk. + if (deltaThinking) { + if (reasoningMessageId === null) { + reasoningMessageId = generateId(this.name) + yield asChunk({ + type: 'REASONING_START', + messageId: reasoningMessageId, + model: chunkModel, + timestamp, + }) + yield asChunk({ + type: 'REASONING_MESSAGE_START', + messageId: reasoningMessageId, + role: 'reasoning', + model: chunkModel, + timestamp, + }) + } + yield asChunk({ + type: 'REASONING_MESSAGE_CONTENT', + messageId: reasoningMessageId, + model: chunkModel, + timestamp, + delta: deltaThinking, + }) + } + + // Close reasoning before any text/tool output starts in this chunk. + const aboutToEmitOutput = + !!deltaContent || (!!deltaToolCalls && deltaToolCalls.length > 0) + if ( + reasoningMessageId !== null && + !hasClosedReasoning && + aboutToEmitOutput + ) { + hasClosedReasoning = true + yield asChunk({ + type: 'REASONING_MESSAGE_END', + messageId: reasoningMessageId, + model: chunkModel, + timestamp, + }) + yield asChunk({ + type: 'REASONING_END', + messageId: reasoningMessageId, + model: chunkModel, + timestamp, + }) + } + if (deltaContent) { if (!hasEmittedTextMessageStart) { hasEmittedTextMessageStart = true @@ -345,7 +454,9 @@ export class MistralTextAdapter< toolCall.arguments += argsDelta } - if (toolCall.id && toolCall.name && !toolCall.started) { + const justStarted = + !!toolCall.id && !!toolCall.name && !toolCall.started + if (justStarted) { toolCall.started = true yield asChunk({ type: 'TOOL_CALL_START', @@ -356,9 +467,18 @@ export class MistralTextAdapter< timestamp, index, }) - } - - if (argsDelta !== undefined && toolCall.started) { + // Replay any args buffered before id+name arrived (including + // this chunk's argsDelta, if any). + if (toolCall.arguments.length > 0) { + yield asChunk({ + type: 'TOOL_CALL_ARGS', + toolCallId: toolCall.id, + model: chunkModel, + timestamp, + delta: toolCall.arguments, + }) + } + } else if (argsDelta !== undefined && toolCall.started) { yield asChunk({ type: 'TOOL_CALL_ARGS', toolCallId: toolCall.id, @@ -383,14 +503,7 @@ export class MistralTextAdapter< continue } - let parsedInput: unknown = {} - try { - parsedInput = toolCall.arguments - ? JSON.parse(toolCall.arguments) - : {} - } catch { - parsedInput = {} - } + const parsedInput = parseToolCallInput(toolCall) toolCall.ended = true hasEmittedToolCall = true @@ -413,7 +526,27 @@ export class MistralTextAdapter< ? 'length' : 'stop' - if (hasEmittedTextMessageStart) { + // If the run finished while reasoning was still open (no text or + // tool output ever followed), close reasoning before TEXT/RUN + // finalization events. + if (reasoningMessageId !== null && !hasClosedReasoning) { + hasClosedReasoning = true + yield asChunk({ + type: 'REASONING_MESSAGE_END', + messageId: reasoningMessageId, + model: chunkModel, + timestamp, + }) + yield asChunk({ + type: 'REASONING_END', + messageId: reasoningMessageId, + model: chunkModel, + timestamp, + }) + } + + if (hasEmittedTextMessageStart && !hasEmittedTextMessageEnd) { + hasEmittedTextMessageEnd = true yield asChunk({ type: 'TEXT_MESSAGE_END', messageId: aguiState.messageId, @@ -423,6 +556,7 @@ export class MistralTextAdapter< } const usage = chunk.usage + hasEmittedRunFinished = true yield asChunk({ type: 'RUN_FINISHED', runId: aguiState.runId, @@ -440,22 +574,115 @@ export class MistralTextAdapter< }) } } - } catch (error: unknown) { - const err = error as Error & { code?: string } - yield asChunk({ - type: 'RUN_ERROR', - runId: aguiState.runId, - model: options.model, - timestamp, - message: err.message || 'Unknown error occurred', - code: err.code, - error: { - message: err.message || 'Unknown error occurred', - code: err.code, - }, - }) - throw err + // Stream ended cleanly without finish_reason — flush any open + // lifecycle events so consumers don't see orphaned starts. This + // happens for abrupt `[DONE]` or upstream cuts. + if (!hasEmittedRunFinished) { + if (reasoningMessageId !== null && !hasClosedReasoning) { + hasClosedReasoning = true + yield asChunk({ + type: 'REASONING_MESSAGE_END', + messageId: reasoningMessageId, + model: lastChunkModel, + timestamp, + }) + yield asChunk({ + type: 'REASONING_END', + messageId: reasoningMessageId, + model: lastChunkModel, + timestamp, + }) + } + for (const [, toolCall] of toolCallsInProgress) { + if (toolCall.started && !toolCall.ended) { + toolCall.ended = true + hasEmittedToolCall = true + yield asChunk({ + type: 'TOOL_CALL_END', + toolCallId: toolCall.id, + toolCallName: toolCall.name, + toolName: toolCall.name, + model: lastChunkModel, + timestamp, + input: parseToolCallInput(toolCall), + }) + } + } + if (hasEmittedTextMessageStart && !hasEmittedTextMessageEnd) { + hasEmittedTextMessageEnd = true + yield asChunk({ + type: 'TEXT_MESSAGE_END', + messageId: aguiState.messageId, + model: lastChunkModel, + timestamp, + }) + } + hasEmittedRunFinished = true + yield asChunk({ + type: 'RUN_FINISHED', + runId: aguiState.runId, + threadId: aguiState.threadId, + model: lastChunkModel, + timestamp, + usage: undefined, + finishReason: hasEmittedToolCall ? 'tool_calls' : 'stop', + }) + } + } catch (error: unknown) { + // Lifecycle cleanup (TEXT_MESSAGE_END / TOOL_CALL_END / REASONING_END) + // on error path so consumers don't see orphaned starts. RUN_ERROR is + // emitted by the outer chatStream catch — emitting it here would + // duplicate the event. + if (reasoningMessageId !== null && !hasClosedReasoning) { + hasClosedReasoning = true + yield asChunk({ + type: 'REASONING_MESSAGE_END', + messageId: reasoningMessageId, + model: lastChunkModel, + timestamp, + }) + yield asChunk({ + type: 'REASONING_END', + messageId: reasoningMessageId, + model: lastChunkModel, + timestamp, + }) + } + if (hasEmittedTextMessageStart && !hasEmittedTextMessageEnd) { + hasEmittedTextMessageEnd = true + yield asChunk({ + type: 'TEXT_MESSAGE_END', + messageId: aguiState.messageId, + model: lastChunkModel, + timestamp, + }) + } + for (const [, toolCall] of toolCallsInProgress) { + if (toolCall.started && !toolCall.ended) { + toolCall.ended = true + // Best-effort parse for the partial args; if invalid, surface + // empty input rather than throwing inside the cleanup path. + let partialInput: unknown = {} + try { + partialInput = toolCall.arguments + ? transformNullsToUndefined(JSON.parse(toolCall.arguments)) + : {} + } catch { + partialInput = {} + } + yield asChunk({ + type: 'TOOL_CALL_END', + toolCallId: toolCall.id, + toolCallName: toolCall.name, + toolName: toolCall.name, + model: lastChunkModel, + timestamp, + input: partialInput, + }) + } + } + throw error } } @@ -492,7 +719,13 @@ export class MistralTextAdapter< throw new Error(`Mistral API error ${response.status}: ${errorText}`) } - const reader = response.body!.getReader() + if (!response.body) { + throw new Error( + 'Mistral API returned a response with no body. This may indicate a proxy or runtime that does not support streaming.', + ) + } + + const reader = response.body.getReader() const decoder = new TextDecoder() let buffer = '' @@ -511,11 +744,40 @@ export class MistralTextAdapter< const data = trimmed.slice(5).trimStart() if (data === '[DONE]') return + let parsed: unknown try { - yield JSON.parse(data) as MistralRawChunk - } catch { - // skip malformed chunks + parsed = JSON.parse(data) + } catch (e) { + if (e instanceof SyntaxError) { + console.warn( + `[mistral] skipped unparseable SSE chunk: ${data.slice(0, 200)}`, + ) + continue + } + throw e + } + + // Mistral signals mid-stream errors via an `error` field. Surface + // them as RUN_ERROR rather than swallowing them as empty chunks. + if ( + parsed && + typeof parsed === 'object' && + 'error' in parsed && + !('choices' in parsed) + ) { + const errPayload = (parsed as { error: unknown }).error + const message = + typeof errPayload === 'string' + ? errPayload + : errPayload && + typeof errPayload === 'object' && + 'message' in errPayload + ? String((errPayload as { message: unknown }).message) + : JSON.stringify(errPayload) + throw new Error(`Mistral stream error: ${message}`) } + + yield parsed as MistralRawChunk } } } finally { @@ -550,6 +812,8 @@ export class MistralTextAdapter< ...rest, messages: messages.map(messageToWire), stream: true, + // Opt in to usage on the final streaming chunk. + stream_options: { include_usage: true }, ...(maxTokens != null && { max_tokens: maxTokens }), ...(topP != null && { top_p: topP }), ...(randomSeed != null && { random_seed: randomSeed }), @@ -565,18 +829,41 @@ export class MistralTextAdapter< } /** - * Extracts text from a Mistral delta content, which can be a string or an - * array of content chunks. + * Splits a Mistral delta content payload into text and reasoning deltas. + * Mistral reasoning models (magistral-*) stream reasoning content as + * `{ type: 'thinking', thinking: [{ type: 'text', text }, ...] }` content + * parts. A single delta may contain text only, thinking only, or — rarely — + * both (when a step transitions); both fields are returned so the caller + * can sequence REASONING and TEXT lifecycle events in order. */ - private extractDeltaText( - content: string | Array<{ type: string; text?: string }> | null | undefined, - ): string { - if (!content) return '' - if (typeof content === 'string') return content - return content - .filter((c) => c.type === 'text' && typeof c.text === 'string') - .map((c) => c.text!) - .join('') + private extractDeltaParts( + content: + | string + | Array<{ + type: string + text?: string + thinking?: Array<{ type: string; text?: string }> + }> + | null + | undefined, + ): { text: string; thinking: string } { + if (!content) return { text: '', thinking: '' } + if (typeof content === 'string') return { text: content, thinking: '' } + + let text = '' + let thinking = '' + for (const part of content) { + if (part.type === 'text' && typeof part.text === 'string') { + text += part.text + } else if (part.type === 'thinking' && Array.isArray(part.thinking)) { + for (const inner of part.thinking) { + if (inner.type === 'text' && typeof inner.text === 'string') { + thinking += inner.text + } + } + } + } + return { text, thinking } } /** @@ -586,19 +873,9 @@ export class MistralTextAdapter< options: TextOptions, ): ChatCompletionStreamRequest { const modelOptions = options.modelOptions as - | Omit< - InternalTextProviderOptions, - 'max_tokens' | 'tools' | 'temperature' | 'top_p' - > + | Omit | undefined - if (modelOptions) { - validateTextProviderOptions({ - ...modelOptions, - model: options.model, - } as InternalTextProviderOptions) - } - const tools = options.tools ? convertToolsToProviderFormat(options.tools) : undefined @@ -619,9 +896,9 @@ export class MistralTextAdapter< return { model: options.model, messages: messages as ChatCompletionStreamRequest['messages'], - temperature: options.temperature, + temperature: options.temperature ?? modelOptions?.temperature ?? undefined, maxTokens: options.maxTokens, - topP: options.topP ?? undefined, + topP: options.topP ?? modelOptions?.top_p ?? undefined, tools: tools as ChatCompletionStreamRequest['tools'], stream: true, ...(modelOptions && { @@ -706,11 +983,9 @@ export class MistralTextAdapter< } } - const parts: Array = [] - for (const part of contentParts) { - const converted = this.convertContentPartToMistral(part) - if (converted) parts.push(converted) - } + const parts = contentParts.map((part) => + this.convertContentPartToMistral(part), + ) return { role: 'user', @@ -724,7 +999,7 @@ export class MistralTextAdapter< */ private convertContentPartToMistral( part: ContentPart, - ): ChatCompletionContentPart | undefined { + ): ChatCompletionContentPart { if (part.type === 'text') { return { type: 'text', text: part.content } } @@ -744,7 +1019,9 @@ export class MistralTextAdapter< } } - return undefined + throw new Error( + `Mistral text adapter does not support content part of type '${(part as ContentPart).type}'. Supported types: text, image. Use a vision-capable model (pixtral-large-latest, pixtral-12b-2409, mistral-medium-latest, or mistral-small-latest) for images.`, + ) } /** diff --git a/packages/typescript/ai-mistral/src/model-meta.ts b/packages/typescript/ai-mistral/src/model-meta.ts index 1c78b8dfb..45ba0a56e 100644 --- a/packages/typescript/ai-mistral/src/model-meta.ts +++ b/packages/typescript/ai-mistral/src/model-meta.ts @@ -38,7 +38,7 @@ interface ModelMeta { const MISTRAL_LARGE_LATEST = { name: 'mistral-large-latest', - context_window: 256_000, + context_window: 131_072, max_completion_tokens: 8_192, pricing: { input: { normal: 0.5 }, diff --git a/packages/typescript/ai-mistral/src/text/text-provider-options.ts b/packages/typescript/ai-mistral/src/text/text-provider-options.ts index 65d2a57b0..223d32797 100644 --- a/packages/typescript/ai-mistral/src/text/text-provider-options.ts +++ b/packages/typescript/ai-mistral/src/text/text-provider-options.ts @@ -101,13 +101,3 @@ export interface InternalTextProviderOptions extends MistralTextProviderOptions * External provider options (what users pass in). */ export type ExternalTextProviderOptions = MistralTextProviderOptions - -/** - * Validates text provider options. - * Basic validation stub — Mistral API handles detailed validation. - */ -export function validateTextProviderOptions( - _options: InternalTextProviderOptions, -): void { - // Mistral API handles detailed validation -} diff --git a/packages/typescript/ai-mistral/src/utils/client.ts b/packages/typescript/ai-mistral/src/utils/client.ts index 7a25a40dc..f619571dd 100644 --- a/packages/typescript/ai-mistral/src/utils/client.ts +++ b/packages/typescript/ai-mistral/src/utils/client.ts @@ -66,5 +66,5 @@ export function getMistralApiKeyFromEnv(): string { * Generates a unique ID with a prefix. */ export function generateId(prefix: string): string { - return `${prefix}-${Date.now()}-${Math.random().toString(36).substring(7)}` + return `${prefix}-${crypto.randomUUID()}` } diff --git a/packages/typescript/ai-mistral/src/utils/schema-converter.ts b/packages/typescript/ai-mistral/src/utils/schema-converter.ts index b941b1396..f5b3d442d 100644 --- a/packages/typescript/ai-mistral/src/utils/schema-converter.ts +++ b/packages/typescript/ai-mistral/src/utils/schema-converter.ts @@ -11,21 +11,22 @@ export function transformNullsToUndefined(obj: T): T { } if (Array.isArray(obj)) { - return obj - .map((item) => transformNullsToUndefined(item)) - .filter((item) => item !== undefined) as unknown as T + // Preserve array length and indices — converting null elements to + // undefined slots rather than dropping them. `Array` schemas + // depend on positional alignment. + return obj.map((item) => transformNullsToUndefined(item)) as unknown as T } if ( typeof obj === 'object' && Object.getPrototypeOf(obj) === Object.prototype ) { + // Preserve every key — `null` values become `undefined` values, but the + // key itself is not removed. Schemas distinguishing absent vs explicit + // null rely on this. const result: Record = {} for (const [key, value] of Object.entries(obj as Record)) { - const transformed = transformNullsToUndefined(value) - if (transformed !== undefined) { - result[key] = transformed - } + result[key] = transformNullsToUndefined(value) } return result as T } diff --git a/packages/typescript/ai-mistral/tests/mistral-adapter.test.ts b/packages/typescript/ai-mistral/tests/mistral-adapter.test.ts index 042ce0c0a..ca4652c9f 100644 --- a/packages/typescript/ai-mistral/tests/mistral-adapter.test.ts +++ b/packages/typescript/ai-mistral/tests/mistral-adapter.test.ts @@ -8,7 +8,29 @@ import { type Mock, } from 'vitest' import { createMistralText, mistralText } from '../src/adapters/text' -import type { StreamChunk, Tool } from '@tanstack/ai' +import { transformNullsToUndefined } from '../src/utils/schema-converter' +import type { StreamChunk, Tool, TextOptions } from '@tanstack/ai' +import type { MistralTextProviderOptions } from '../src/adapters/text' + +/** + * Builds chat options for tests. `chatStream`'s `TextOptions` requires fields + * (e.g. `logger`) that the adapter only consults when provided; the cast + * lets tests focus on the inputs they actually exercise without rebuilding + * a full options object on every call. + */ +function chatOpts( + opts: Partial> & { + model: string + messages: Array<{ + role: 'user' | 'assistant' | 'tool' + content: unknown + toolCallId?: string + toolCalls?: Array + }> + }, +): TextOptions { + return opts as unknown as TextOptions +} // Declare mocks at module level let mockComplete: Mock<(...args: Array) => unknown> @@ -184,10 +206,10 @@ describe('Mistral AG-UI event emission', () => { const adapter = createMistralText('mistral-large-latest', 'test-api-key') const chunks: Array = [] - for await (const chunk of adapter.chatStream({ + for await (const chunk of adapter.chatStream(chatOpts({ model: 'mistral-large-latest', messages: [{ role: 'user', content: 'Hello' }], - })) { + }))) { chunks.push(chunk) } @@ -233,10 +255,10 @@ describe('Mistral AG-UI event emission', () => { const adapter = createMistralText('mistral-large-latest', 'test-api-key') const chunks: Array = [] - for await (const chunk of adapter.chatStream({ + for await (const chunk of adapter.chatStream(chatOpts({ model: 'mistral-large-latest', messages: [{ role: 'user', content: 'Hello' }], - })) { + }))) { chunks.push(chunk) } @@ -287,10 +309,10 @@ describe('Mistral AG-UI event emission', () => { const adapter = createMistralText('mistral-large-latest', 'test-api-key') const chunks: Array = [] - for await (const chunk of adapter.chatStream({ + for await (const chunk of adapter.chatStream(chatOpts({ model: 'mistral-large-latest', messages: [{ role: 'user', content: 'Hello' }], - })) { + }))) { chunks.push(chunk) } @@ -376,11 +398,11 @@ describe('Mistral AG-UI event emission', () => { const adapter = createMistralText('mistral-large-latest', 'test-api-key') const chunks: Array = [] - for await (const chunk of adapter.chatStream({ + for await (const chunk of adapter.chatStream(chatOpts({ model: 'mistral-large-latest', messages: [{ role: 'user', content: 'Weather in Berlin?' }], tools: [weatherTool], - })) { + }))) { chunks.push(chunk) } @@ -437,10 +459,10 @@ describe('Mistral AG-UI event emission', () => { let thrownError: Error | undefined try { - for await (const chunk of adapter.chatStream({ + for await (const chunk of adapter.chatStream(chatOpts({ model: 'mistral-large-latest', messages: [{ role: 'user', content: 'Hello' }], - })) { + }))) { chunks.push(chunk) } } catch (err) { @@ -453,7 +475,7 @@ describe('Mistral AG-UI event emission', () => { const runErrorChunk = chunks.find((c) => c.type === 'RUN_ERROR') expect(runErrorChunk).toBeDefined() if (runErrorChunk?.type === 'RUN_ERROR') { - expect(runErrorChunk.error.message).toBe('Stream interrupted') + expect(runErrorChunk.error?.message).toBe('Stream interrupted') } }) @@ -503,10 +525,10 @@ describe('Mistral AG-UI event emission', () => { const adapter = createMistralText('mistral-large-latest', 'test-api-key') const chunks: Array = [] - for await (const chunk of adapter.chatStream({ + for await (const chunk of adapter.chatStream(chatOpts({ model: 'mistral-large-latest', messages: [{ role: 'user', content: 'Say hello' }], - })) { + }))) { chunks.push(chunk) } @@ -527,4 +549,678 @@ describe('Mistral AG-UI event emission', () => { expect(secondContent.content).toBe('Hello world') } }) + + it('emits exactly one RUN_ERROR on stream error (no duplicates from inner+outer catch)', async () => { + vi.stubGlobal( + 'fetch', + vi.fn().mockResolvedValue({ + ok: true, + status: 200, + body: new ReadableStream({ + start(controller) { + controller.enqueue( + new TextEncoder().encode( + `data: ${JSON.stringify( + toApiChunk({ + id: 'cmpl-dup', + model: 'mistral-large-latest', + choices: [ + { index: 0, delta: { content: 'x' }, finishReason: null }, + ], + }), + )}\n\n`, + ), + ) + controller.error(new Error('boom')) + }, + }), + }), + ) + + const adapter = createMistralText('mistral-large-latest', 'test-api-key') + const chunks: Array = [] + + try { + for await (const chunk of adapter.chatStream( + chatOpts({ + model: 'mistral-large-latest', + messages: [{ role: 'user', content: 'Hi' }], + }), + )) { + chunks.push(chunk) + } + } catch { + // expected + } + + const runErrors = chunks.filter((c) => c.type === 'RUN_ERROR') + expect(runErrors).toHaveLength(1) + }) + + it('flushes TEXT_MESSAGE_END and RUN_FINISHED when stream ends without finish_reason', async () => { + // Stream emits content, then [DONE] without ever sending a finish_reason + // chunk. Consumers must still receive matched lifecycle events. + const sseBody = + `data: ${JSON.stringify( + toApiChunk({ + id: 'cmpl-cut', + model: 'mistral-large-latest', + choices: [ + { index: 0, delta: { content: 'partial' }, finishReason: null }, + ], + }), + )}\n\ndata: [DONE]\n\n` + + vi.stubGlobal( + 'fetch', + vi.fn().mockResolvedValue({ + ok: true, + status: 200, + body: new ReadableStream({ + start(controller) { + controller.enqueue(new TextEncoder().encode(sseBody)) + controller.close() + }, + }), + }), + ) + mockComplete = vi.fn() + + const adapter = createMistralText('mistral-large-latest', 'test-api-key') + const chunks: Array = [] + + for await (const chunk of adapter.chatStream( + chatOpts({ + model: 'mistral-large-latest', + messages: [{ role: 'user', content: 'Go' }], + }), + )) { + chunks.push(chunk) + } + + const types: Array = chunks.map((c) => c.type) + expect(types).toContain('TEXT_MESSAGE_START') + expect(types).toContain('TEXT_MESSAGE_END') + expect(types).toContain('RUN_FINISHED') + expect(types.indexOf('TEXT_MESSAGE_END')).toBeLessThan( + types.indexOf('RUN_FINISHED'), + ) + }) + + it('replays buffered tool-call args when arguments arrive before id and name', async () => { + // First delta carries arguments fragment but no id/name; second delta + // brings id+name; third closes the call. Consumers tracking ARGS deltas + // must see the buffered prefix replayed once START is emitted. + setupMockStream([ + { + id: 'cmpl-replay', + model: 'mistral-large-latest', + choices: [ + { + index: 0, + delta: { + toolCalls: [ + { + index: 0, + function: { arguments: '{"loc' }, + }, + ], + }, + finishReason: null, + }, + ], + }, + { + id: 'cmpl-replay', + model: 'mistral-large-latest', + choices: [ + { + index: 0, + delta: { + toolCalls: [ + { + index: 0, + id: 'tc_1', + function: { name: 'lookup_weather' }, + }, + ], + }, + finishReason: null, + }, + ], + }, + { + id: 'cmpl-replay', + model: 'mistral-large-latest', + choices: [ + { + index: 0, + delta: { + toolCalls: [ + { index: 0, function: { arguments: 'ation":"Berlin"}' } }, + ], + }, + finishReason: 'tool_calls', + }, + ], + }, + ]) + + const adapter = createMistralText('mistral-large-latest', 'test-api-key') + const chunks: Array = [] + + for await (const chunk of adapter.chatStream( + chatOpts({ + model: 'mistral-large-latest', + messages: [{ role: 'user', content: 'Weather in Berlin?' }], + tools: [weatherTool], + }), + )) { + chunks.push(chunk) + } + + const argsChunks = chunks.filter((c) => c.type === 'TOOL_CALL_ARGS') + const concatenated = argsChunks + .map((c) => + c.type === 'TOOL_CALL_ARGS' ? (c as { delta: string }).delta : '', + ) + .join('') + expect(concatenated).toBe('{"location":"Berlin"}') + + // The TOOL_CALL_END input must reflect the full JSON + const endChunk = chunks.find((c) => c.type === 'TOOL_CALL_END') + if (endChunk?.type === 'TOOL_CALL_END') { + expect(endChunk.input).toEqual({ location: 'Berlin' }) + } + }) + + it('throws on malformed tool-call arguments rather than silently substituting {}', async () => { + setupMockStream([ + { + id: 'cmpl-bad', + model: 'mistral-large-latest', + choices: [ + { + index: 0, + delta: { + toolCalls: [ + { + index: 0, + id: 'tc_bad', + function: { name: 'lookup_weather', arguments: '{not json' }, + }, + ], + }, + finishReason: 'tool_calls', + }, + ], + }, + ]) + + const adapter = createMistralText('mistral-large-latest', 'test-api-key') + let caught: Error | undefined + try { + for await (const _chunk of adapter.chatStream( + chatOpts({ + model: 'mistral-large-latest', + messages: [{ role: 'user', content: 'x' }], + tools: [weatherTool], + }), + )) { + // drain + } + } catch (err) { + caught = err as Error + } + + expect(caught).toBeDefined() + expect(caught?.message).toMatch(/Failed to parse tool call arguments/) + expect(caught?.message).toMatch(/lookup_weather/) + }) + + it('sends stream_options.include_usage so Mistral returns usage on streaming', async () => { + let capturedBody: unknown + vi.stubGlobal( + 'fetch', + vi.fn(async (_url: string, init?: { body?: string }) => { + capturedBody = init?.body ? JSON.parse(init.body) : undefined + return { + ok: true, + status: 200, + body: new ReadableStream({ + start(controller) { + controller.enqueue(new TextEncoder().encode('data: [DONE]\n\n')) + controller.close() + }, + }), + } + }), + ) + mockComplete = vi.fn() + + const adapter = createMistralText('mistral-large-latest', 'test-api-key') + for await (const _chunk of adapter.chatStream( + chatOpts({ + model: 'mistral-large-latest', + messages: [{ role: 'user', content: 'Hi' }], + }), + )) { + // drain + } + + expect(capturedBody).toMatchObject({ + stream: true, + stream_options: { include_usage: true }, + }) + }) + + it('reads temperature and top_p from modelOptions when not set at top level', async () => { + let capturedBody: { temperature?: number; top_p?: number } | undefined + vi.stubGlobal( + 'fetch', + vi.fn(async (_url: string, init?: { body?: string }) => { + capturedBody = init?.body ? JSON.parse(init.body) : undefined + return { + ok: true, + status: 200, + body: new ReadableStream({ + start(controller) { + controller.enqueue(new TextEncoder().encode('data: [DONE]\n\n')) + controller.close() + }, + }), + } + }), + ) + mockComplete = vi.fn() + + const adapter = createMistralText('mistral-large-latest', 'test-api-key') + for await (const _chunk of adapter.chatStream( + chatOpts({ + model: 'mistral-large-latest', + messages: [{ role: 'user', content: 'Hi' }], + modelOptions: { temperature: 0.42, top_p: 0.9 }, + }), + )) { + // drain + } + + expect(capturedBody?.temperature).toBe(0.42) + expect(capturedBody?.top_p).toBe(0.9) + }) + + it('throws a clear error for unsupported content part types', async () => { + vi.stubGlobal( + 'fetch', + vi.fn().mockResolvedValue({ + ok: true, + status: 200, + body: new ReadableStream({ + start(controller) { + controller.enqueue(new TextEncoder().encode('data: [DONE]\n\n')) + controller.close() + }, + }), + }), + ) + mockComplete = vi.fn() + + const adapter = createMistralText('mistral-large-latest', 'test-api-key') + let caught: Error | undefined + try { + for await (const _chunk of adapter.chatStream( + chatOpts({ + model: 'mistral-large-latest', + messages: [ + { + role: 'user', + content: [ + { + type: 'audio', + source: { type: 'url', value: 'https://example.com/a.mp3' }, + }, + ], + }, + ], + }), + )) { + // drain + } + } catch (err) { + caught = err as Error + } + + expect(caught).toBeDefined() + expect(caught?.message).toMatch( + /Mistral text adapter does not support content part of type 'audio'/, + ) + }) +}) + +describe('Mistral reasoning (magistral-* models)', () => { + beforeEach(() => { + vi.clearAllMocks() + }) + + afterEach(() => { + vi.unstubAllEnvs() + vi.unstubAllGlobals() + }) + + it('emits REASONING_* events when delta.content contains thinking parts, before any TEXT_MESSAGE_*', async () => { + // Magistral streaming format: delta.content is an array containing + // `{ type: 'thinking', thinking: [{ type: 'text', text: '...' }] }`. + // We build the SSE body by hand because `toApiChunk` strips non-text parts. + const sseChunks: Array> = [ + { + id: 'cmpl-think-1', + model: 'magistral-medium-latest', + object: 'chat.completion.chunk', + created: 0, + choices: [ + { + index: 0, + delta: { + content: [ + { + type: 'thinking', + thinking: [{ type: 'text', text: 'Let me think... ' }], + }, + ], + }, + finish_reason: null, + }, + ], + }, + { + id: 'cmpl-think-1', + model: 'magistral-medium-latest', + object: 'chat.completion.chunk', + created: 0, + choices: [ + { + index: 0, + delta: { + content: [ + { + type: 'thinking', + thinking: [{ type: 'text', text: 'the answer is 42.' }], + }, + ], + }, + finish_reason: null, + }, + ], + }, + { + id: 'cmpl-think-1', + model: 'magistral-medium-latest', + object: 'chat.completion.chunk', + created: 0, + choices: [ + { + index: 0, + delta: { content: 'The answer is 42.' }, + finish_reason: null, + }, + ], + }, + { + id: 'cmpl-think-1', + model: 'magistral-medium-latest', + object: 'chat.completion.chunk', + created: 0, + choices: [{ index: 0, delta: {}, finish_reason: 'stop' }], + }, + ] + + const sseBody = + sseChunks.map((c) => `data: ${JSON.stringify(c)}`).join('\n\n') + + '\n\ndata: [DONE]\n\n' + + vi.stubGlobal( + 'fetch', + vi.fn().mockResolvedValue({ + ok: true, + status: 200, + body: new ReadableStream({ + start(controller) { + controller.enqueue(new TextEncoder().encode(sseBody)) + controller.close() + }, + }), + }), + ) + mockComplete = vi.fn() + + const adapter = createMistralText('magistral-medium-latest', 'test-api-key') + const chunks: Array = [] + for await (const chunk of adapter.chatStream( + chatOpts({ + model: 'magistral-medium-latest', + messages: [{ role: 'user', content: 'What is the answer?' }], + }), + )) { + chunks.push(chunk) + } + + const types: Array = chunks.map((c) => c.type) + + // Reasoning lifecycle is present and ordered correctly + expect(types).toContain('REASONING_START') + expect(types).toContain('REASONING_MESSAGE_START') + expect(types).toContain('REASONING_MESSAGE_CONTENT') + expect(types).toContain('REASONING_MESSAGE_END') + expect(types).toContain('REASONING_END') + + expect(types.indexOf('REASONING_START')).toBeLessThan( + types.indexOf('REASONING_MESSAGE_CONTENT'), + ) + // REASONING_END must precede TEXT_MESSAGE_START + expect(types.indexOf('REASONING_END')).toBeLessThan( + types.indexOf('TEXT_MESSAGE_START'), + ) + + // Reasoning content reassembles correctly + const reasoningDeltas = chunks.filter( + (c) => c.type === 'REASONING_MESSAGE_CONTENT', + ) + const reasoningText = reasoningDeltas + .map((c) => + c.type === 'REASONING_MESSAGE_CONTENT' + ? (c as { delta: string }).delta + : '', + ) + .join('') + expect(reasoningText).toBe('Let me think... the answer is 42.') + }) + + it('emits REASONING_* events when the upstream uses delta.reasoning_content (OpenAI-compat / aimock format)', async () => { + // OpenAI-compatible deployments (DeepSeek, Groq for reasoning models, + // and the aimock test backend) stream reasoning via delta.reasoning_content + // rather than as a thinking content part. The adapter must accept both + // shapes for the e2e suite to run against aimock. + const sseChunks: Array> = [ + { + id: 'cmpl-rc', + model: 'magistral-medium-latest', + object: 'chat.completion.chunk', + created: 0, + choices: [ + { + index: 0, + delta: { reasoning_content: 'Considering options...' }, + finish_reason: null, + }, + ], + }, + { + id: 'cmpl-rc', + model: 'magistral-medium-latest', + object: 'chat.completion.chunk', + created: 0, + choices: [ + { + index: 0, + delta: { content: 'Final answer.' }, + finish_reason: null, + }, + ], + }, + { + id: 'cmpl-rc', + model: 'magistral-medium-latest', + object: 'chat.completion.chunk', + created: 0, + choices: [{ index: 0, delta: {}, finish_reason: 'stop' }], + }, + ] + const sseBody = + sseChunks.map((c) => `data: ${JSON.stringify(c)}`).join('\n\n') + + '\n\ndata: [DONE]\n\n' + + vi.stubGlobal( + 'fetch', + vi.fn().mockResolvedValue({ + ok: true, + status: 200, + body: new ReadableStream({ + start(controller) { + controller.enqueue(new TextEncoder().encode(sseBody)) + controller.close() + }, + }), + }), + ) + mockComplete = vi.fn() + + const adapter = createMistralText('magistral-medium-latest', 'test-api-key') + const chunks: Array = [] + for await (const chunk of adapter.chatStream( + chatOpts({ + model: 'magistral-medium-latest', + messages: [{ role: 'user', content: 'Decide.' }], + }), + )) { + chunks.push(chunk) + } + + const types: Array = chunks.map((c) => c.type) + expect(types).toContain('REASONING_MESSAGE_CONTENT') + expect(types.indexOf('REASONING_END')).toBeLessThan( + types.indexOf('TEXT_MESSAGE_START'), + ) + + const reasoningText = chunks + .filter((c) => c.type === 'REASONING_MESSAGE_CONTENT') + .map((c) => + c.type === 'REASONING_MESSAGE_CONTENT' + ? (c as { delta: string }).delta + : '', + ) + .join('') + expect(reasoningText).toBe('Considering options...') + }) + + it('closes reasoning lifecycle if the run finishes while still in thinking', async () => { + const sseChunks: Array> = [ + { + id: 'cmpl-think-only', + model: 'magistral-medium-latest', + object: 'chat.completion.chunk', + created: 0, + choices: [ + { + index: 0, + delta: { + content: [ + { + type: 'thinking', + thinking: [{ type: 'text', text: 'pondering...' }], + }, + ], + }, + finish_reason: null, + }, + ], + }, + { + id: 'cmpl-think-only', + model: 'magistral-medium-latest', + object: 'chat.completion.chunk', + created: 0, + choices: [{ index: 0, delta: {}, finish_reason: 'stop' }], + }, + ] + const sseBody = + sseChunks.map((c) => `data: ${JSON.stringify(c)}`).join('\n\n') + + '\n\ndata: [DONE]\n\n' + + vi.stubGlobal( + 'fetch', + vi.fn().mockResolvedValue({ + ok: true, + status: 200, + body: new ReadableStream({ + start(controller) { + controller.enqueue(new TextEncoder().encode(sseBody)) + controller.close() + }, + }), + }), + ) + mockComplete = vi.fn() + + const adapter = createMistralText('magistral-medium-latest', 'test-api-key') + const chunks: Array = [] + for await (const chunk of adapter.chatStream( + chatOpts({ + model: 'magistral-medium-latest', + messages: [{ role: 'user', content: 'Just think.' }], + }), + )) { + chunks.push(chunk) + } + + const types: Array = chunks.map((c) => c.type) + expect(types).toContain('REASONING_END') + expect(types).toContain('RUN_FINISHED') + expect(types.indexOf('REASONING_END')).toBeLessThan( + types.indexOf('RUN_FINISHED'), + ) + // No TEXT_MESSAGE_START — the run was reasoning-only + expect(types).not.toContain('TEXT_MESSAGE_START') + }) +}) + +describe('transformNullsToUndefined (regression coverage)', () => { + it('preserves array length and indices — null elements become undefined slots', () => { + const input = ['a', null, 'b', null] + const out = transformNullsToUndefined(input) + expect(out).toHaveLength(4) + expect(out[0]).toBe('a') + expect(out[1]).toBeUndefined() + expect(out[2]).toBe('b') + expect(out[3]).toBeUndefined() + }) + + it('preserves object keys whose values were null — value becomes undefined, key remains', () => { + const input = { a: 1, b: null, c: 'x' } + const out = transformNullsToUndefined(input) as Record + expect(Object.keys(out).sort()).toEqual(['a', 'b', 'c']) + expect(out.a).toBe(1) + expect(out.b).toBeUndefined() + expect(out.c).toBe('x') + }) + + it('recurses into nested arrays and objects', () => { + const input = { items: [{ x: null, y: 1 }, null, { x: 2, y: null }] } + const out = transformNullsToUndefined(input) as { + items: Array<{ x: unknown; y: unknown } | undefined> + } + expect(out.items).toHaveLength(3) + expect(out.items[0]).toEqual({ x: undefined, y: 1 }) + expect(out.items[1]).toBeUndefined() + expect(out.items[2]).toEqual({ x: 2, y: undefined }) + }) }) diff --git a/packages/typescript/ai-mistral/tsconfig.json b/packages/typescript/ai-mistral/tsconfig.json index ea11c1096..9028fa3bd 100644 --- a/packages/typescript/ai-mistral/tsconfig.json +++ b/packages/typescript/ai-mistral/tsconfig.json @@ -1,9 +1,8 @@ { "extends": "../../../tsconfig.json", "compilerOptions": { - "outDir": "dist", - "rootDir": "src" + "outDir": "dist" }, - "include": ["src/**/*.ts", "src/**/*.tsx"], + "include": ["src/**/*.ts", "src/**/*.tsx", "tests/**/*.ts"], "exclude": ["node_modules", "dist", "**/*.config.ts"] } diff --git a/packages/typescript/ai-mistral/vitest.config.ts b/packages/typescript/ai-mistral/vitest.config.ts deleted file mode 100644 index fa2531743..000000000 --- a/packages/typescript/ai-mistral/vitest.config.ts +++ /dev/null @@ -1,22 +0,0 @@ -import { defineConfig } from 'vitest/config' - -export default defineConfig({ - test: { - globals: true, - environment: 'node', - include: ['tests/**/*.test.ts'], - coverage: { - provider: 'v8', - reporter: ['text', 'json', 'html', 'lcov'], - exclude: [ - 'node_modules/', - 'dist/', - 'tests/', - '**/*.test.ts', - '**/*.config.ts', - '**/types.ts', - ], - include: ['src/**/*.ts'], - }, - }, -}) diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index bd777ea58..3791b4fa6 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -1283,19 +1283,22 @@ importers: '@mistralai/mistralai': specifier: ^2.2.0 version: 2.2.0 + devDependencies: '@tanstack/ai': - specifier: workspace:^ + specifier: workspace:* version: link:../ai - zod: - specifier: ^4.0.0 - version: 4.3.6 - devDependencies: + '@tanstack/ai-client': + specifier: workspace:* + version: link:../ai-client '@vitest/coverage-v8': specifier: 4.0.14 version: 4.0.14(vitest@4.1.4) vite: specifier: ^7.2.7 version: 7.3.1(@types/node@25.0.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.1)(tsx@4.21.0)(yaml@2.8.2) + zod: + specifier: ^4.2.0 + version: 4.3.6 packages/typescript/ai-ollama: dependencies: @@ -19365,14 +19368,14 @@ snapshots: h3@2.0.1-rc.14(crossws@0.4.4(srvx@0.10.1)): dependencies: rou3: 0.7.12 - srvx: 0.11.2 + srvx: 0.11.15 optionalDependencies: crossws: 0.4.4(srvx@0.10.1) h3@2.0.1-rc.14(crossws@0.4.5(srvx@0.11.15)): dependencies: rou3: 0.7.12 - srvx: 0.11.2 + srvx: 0.11.15 optionalDependencies: crossws: 0.4.5(srvx@0.11.15) diff --git a/testing/e2e/src/lib/feature-support.ts b/testing/e2e/src/lib/feature-support.ts index c2e39bad0..f4ecb0cbf 100644 --- a/testing/e2e/src/lib/feature-support.ts +++ b/testing/e2e/src/lib/feature-support.ts @@ -28,7 +28,7 @@ export const matrix: Record> = { 'openrouter', 'mistral', ]), - reasoning: new Set(['openai', 'anthropic', 'gemini']), + reasoning: new Set(['openai', 'anthropic', 'gemini', 'mistral']), 'multi-turn': new Set([ 'openai', 'anthropic', diff --git a/testing/e2e/src/lib/features.ts b/testing/e2e/src/lib/features.ts index 15000cd7e..6e3360fc2 100644 --- a/testing/e2e/src/lib/features.ts +++ b/testing/e2e/src/lib/features.ts @@ -23,6 +23,7 @@ export const featureConfigs: Record = { modelOverrides: { openai: 'o3', anthropic: 'claude-sonnet-4-5', + mistral: 'magistral-medium-latest', }, }, 'multi-turn': { From e2e1d8c15c1a7a9198b128dbcec6e328f0fe0c36 Mon Sep 17 00:00:00 2001 From: "autofix-ci[bot]" <114827586+autofix-ci[bot]@users.noreply.github.com> Date: Tue, 28 Apr 2026 17:51:40 +0000 Subject: [PATCH 8/9] ci: apply automated fixes --- packages/typescript/ai-mistral/README.md | 7 +- .../ai-mistral/src/adapters/text.ts | 3 +- .../ai-mistral/tests/mistral-adapter.test.ts | 81 +++++++++++-------- 3 files changed, 53 insertions(+), 38 deletions(-) diff --git a/packages/typescript/ai-mistral/README.md b/packages/typescript/ai-mistral/README.md index 3c9e09b3f..71a4a82fb 100644 --- a/packages/typescript/ai-mistral/README.md +++ b/packages/typescript/ai-mistral/README.md @@ -46,7 +46,10 @@ console.log(result.text) ```typescript import { createMistralText } from '@tanstack/ai-mistral' -const adapter = createMistralText('mistral-large-latest', process.env.MISTRAL_API_KEY!) +const adapter = createMistralText( + 'mistral-large-latest', + process.env.MISTRAL_API_KEY!, +) ``` ## Supported Models @@ -72,7 +75,7 @@ See [Mistral model comparison](https://docs.mistral.ai/getting-started/models/co - ✅ Streaming chat completions - ✅ Structured output (JSON Schema) - ✅ Function/tool calling -- ✅ Reasoning (magistral-* models — streamed as `REASONING_*` events) +- ✅ Reasoning (magistral-_ models — streamed as `REASONING\__` events) - ✅ Multimodal input (text + images) — requires a vision-capable model (`pixtral-large-latest`, `pixtral-12b-2409`, `mistral-medium-latest`, or `mistral-small-latest`) - ❌ Embeddings (use [@mistralai/mistralai](https://github.com/mistralai/client-ts) directly) - ❌ Image generation diff --git a/packages/typescript/ai-mistral/src/adapters/text.ts b/packages/typescript/ai-mistral/src/adapters/text.ts index 0fdcb0d70..cb69b82ae 100644 --- a/packages/typescript/ai-mistral/src/adapters/text.ts +++ b/packages/typescript/ai-mistral/src/adapters/text.ts @@ -896,7 +896,8 @@ export class MistralTextAdapter< return { model: options.model, messages: messages as ChatCompletionStreamRequest['messages'], - temperature: options.temperature ?? modelOptions?.temperature ?? undefined, + temperature: + options.temperature ?? modelOptions?.temperature ?? undefined, maxTokens: options.maxTokens, topP: options.topP ?? modelOptions?.top_p ?? undefined, tools: tools as ChatCompletionStreamRequest['tools'], diff --git a/packages/typescript/ai-mistral/tests/mistral-adapter.test.ts b/packages/typescript/ai-mistral/tests/mistral-adapter.test.ts index ca4652c9f..9edf18923 100644 --- a/packages/typescript/ai-mistral/tests/mistral-adapter.test.ts +++ b/packages/typescript/ai-mistral/tests/mistral-adapter.test.ts @@ -206,10 +206,12 @@ describe('Mistral AG-UI event emission', () => { const adapter = createMistralText('mistral-large-latest', 'test-api-key') const chunks: Array = [] - for await (const chunk of adapter.chatStream(chatOpts({ - model: 'mistral-large-latest', - messages: [{ role: 'user', content: 'Hello' }], - }))) { + for await (const chunk of adapter.chatStream( + chatOpts({ + model: 'mistral-large-latest', + messages: [{ role: 'user', content: 'Hello' }], + }), + )) { chunks.push(chunk) } @@ -255,10 +257,12 @@ describe('Mistral AG-UI event emission', () => { const adapter = createMistralText('mistral-large-latest', 'test-api-key') const chunks: Array = [] - for await (const chunk of adapter.chatStream(chatOpts({ - model: 'mistral-large-latest', - messages: [{ role: 'user', content: 'Hello' }], - }))) { + for await (const chunk of adapter.chatStream( + chatOpts({ + model: 'mistral-large-latest', + messages: [{ role: 'user', content: 'Hello' }], + }), + )) { chunks.push(chunk) } @@ -309,10 +313,12 @@ describe('Mistral AG-UI event emission', () => { const adapter = createMistralText('mistral-large-latest', 'test-api-key') const chunks: Array = [] - for await (const chunk of adapter.chatStream(chatOpts({ - model: 'mistral-large-latest', - messages: [{ role: 'user', content: 'Hello' }], - }))) { + for await (const chunk of adapter.chatStream( + chatOpts({ + model: 'mistral-large-latest', + messages: [{ role: 'user', content: 'Hello' }], + }), + )) { chunks.push(chunk) } @@ -398,11 +404,13 @@ describe('Mistral AG-UI event emission', () => { const adapter = createMistralText('mistral-large-latest', 'test-api-key') const chunks: Array = [] - for await (const chunk of adapter.chatStream(chatOpts({ - model: 'mistral-large-latest', - messages: [{ role: 'user', content: 'Weather in Berlin?' }], - tools: [weatherTool], - }))) { + for await (const chunk of adapter.chatStream( + chatOpts({ + model: 'mistral-large-latest', + messages: [{ role: 'user', content: 'Weather in Berlin?' }], + tools: [weatherTool], + }), + )) { chunks.push(chunk) } @@ -459,10 +467,12 @@ describe('Mistral AG-UI event emission', () => { let thrownError: Error | undefined try { - for await (const chunk of adapter.chatStream(chatOpts({ - model: 'mistral-large-latest', - messages: [{ role: 'user', content: 'Hello' }], - }))) { + for await (const chunk of adapter.chatStream( + chatOpts({ + model: 'mistral-large-latest', + messages: [{ role: 'user', content: 'Hello' }], + }), + )) { chunks.push(chunk) } } catch (err) { @@ -525,10 +535,12 @@ describe('Mistral AG-UI event emission', () => { const adapter = createMistralText('mistral-large-latest', 'test-api-key') const chunks: Array = [] - for await (const chunk of adapter.chatStream(chatOpts({ - model: 'mistral-large-latest', - messages: [{ role: 'user', content: 'Say hello' }], - }))) { + for await (const chunk of adapter.chatStream( + chatOpts({ + model: 'mistral-large-latest', + messages: [{ role: 'user', content: 'Say hello' }], + }), + )) { chunks.push(chunk) } @@ -600,16 +612,15 @@ describe('Mistral AG-UI event emission', () => { it('flushes TEXT_MESSAGE_END and RUN_FINISHED when stream ends without finish_reason', async () => { // Stream emits content, then [DONE] without ever sending a finish_reason // chunk. Consumers must still receive matched lifecycle events. - const sseBody = - `data: ${JSON.stringify( - toApiChunk({ - id: 'cmpl-cut', - model: 'mistral-large-latest', - choices: [ - { index: 0, delta: { content: 'partial' }, finishReason: null }, - ], - }), - )}\n\ndata: [DONE]\n\n` + const sseBody = `data: ${JSON.stringify( + toApiChunk({ + id: 'cmpl-cut', + model: 'mistral-large-latest', + choices: [ + { index: 0, delta: { content: 'partial' }, finishReason: null }, + ], + }), + )}\n\ndata: [DONE]\n\n` vi.stubGlobal( 'fetch', From 8a157ccef3581d01dde661000e356bfa1f62d7ad Mon Sep 17 00:00:00 2001 From: "autofix-ci[bot]" <114827586+autofix-ci[bot]@users.noreply.github.com> Date: Tue, 28 Apr 2026 17:52:48 +0000 Subject: [PATCH 9/9] ci: apply automated fixes (attempt 2/3) --- packages/typescript/ai-mistral/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/typescript/ai-mistral/README.md b/packages/typescript/ai-mistral/README.md index 71a4a82fb..977720c8d 100644 --- a/packages/typescript/ai-mistral/README.md +++ b/packages/typescript/ai-mistral/README.md @@ -75,7 +75,7 @@ See [Mistral model comparison](https://docs.mistral.ai/getting-started/models/co - ✅ Streaming chat completions - ✅ Structured output (JSON Schema) - ✅ Function/tool calling -- ✅ Reasoning (magistral-_ models — streamed as `REASONING\__` events) +- ✅ Reasoning (magistral-\_ models — streamed as `REASONING\__` events) - ✅ Multimodal input (text + images) — requires a vision-capable model (`pixtral-large-latest`, `pixtral-12b-2409`, `mistral-medium-latest`, or `mistral-small-latest`) - ❌ Embeddings (use [@mistralai/mistralai](https://github.com/mistralai/client-ts) directly) - ❌ Image generation