diff --git a/.changeset/ag-ui-client-compliance.md b/.changeset/ag-ui-client-compliance.md new file mode 100644 index 000000000..679fac06c --- /dev/null +++ b/.changeset/ag-ui-client-compliance.md @@ -0,0 +1,24 @@ +--- +'@tanstack/ai': minor +'@tanstack/ai-client': minor +'@tanstack/ai-react': minor +'@tanstack/ai-solid': minor +'@tanstack/ai-vue': minor +'@tanstack/ai-svelte': minor +'@tanstack/ai-react-ui': minor +--- + +**Breaking:** AG-UI client-to-server compliance. + +`@tanstack/ai-client` now POSTs an AG-UI `RunAgentInput` request body and `@tanstack/ai` server endpoints must use the new `chatParamsFromRequestBody` + `mergeAgentTools` helpers. Upgrade both packages together. + +Highlights: + +- **Wire format**: `{threadId, runId, state, messages, tools, context, forwardedProps}` (per AG-UI 0.0.52 `RunAgentInputSchema`) instead of `{messages, data}`. +- **New server helpers** exported from `@tanstack/ai`: `chatParamsFromRequestBody`, `mergeAgentTools`. +- **`chat()` accepts `threadId`, `runId`, `parentRunId`** as optional fields for AG-UI run correlation. +- **`ChatClient` accepts `threadId`** option; auto-generates and persists per session if omitted; fresh `runId` per send. +- **Client tools auto-advertised** to the server via `RunAgentInput.tools`. +- **Foreign AG-UI clients** can hit a TanStack server: `developer` collapses to `system`, `reasoning`/`activity` drop. + +See `docs/migration/ag-ui-compliance.md` for full migration steps. diff --git a/docs/config.json b/docs/config.json index f24a5fa0a..db2ce2413 100644 --- a/docs/config.json +++ b/docs/config.json @@ -211,6 +211,10 @@ { "label": "From Vercel AI SDK", "to": "migration/migration-from-vercel-ai" + }, + { + "label": "AG-UI Client Compliance", + "to": "migration/ag-ui-compliance" } ] }, diff --git a/docs/migration/ag-ui-compliance.md b/docs/migration/ag-ui-compliance.md new file mode 100644 index 000000000..67ec4fb6b --- /dev/null +++ b/docs/migration/ag-ui-compliance.md @@ -0,0 +1,180 @@ +--- +title: Migrating to AG-UI Client-to-Server Compliance +--- + +# Migrating to AG-UI Client-to-Server Compliance + +> **TL;DR:** Upgrade `@tanstack/ai` and `@tanstack/ai-client` together. The HTTP wire format changed to AG-UI `RunAgentInput`. Server endpoints must use `chatParamsFromRequestBody` and `mergeAgentTools`. Clients have nothing to do β€” `useChat` and the connection adapters handle the new format internally. + +## What changed + +`@tanstack/ai-client` now POSTs an AG-UI 0.0.52 `RunAgentInput` request body. The previous shape (`{ messages, data, ...optionsBody }`) is no longer accepted by `@tanstack/ai`'s server helpers. + +### Old wire shape + +```json +{ + "messages": [...], + "data": {...} +} +``` + +### New wire shape + +```json +{ + "threadId": "thread-...", + "runId": "run-...", + "state": {}, + "messages": [...], + "tools": [...], + "context": [], + "forwardedProps": {...} +} +``` + +The `messages` array carries TanStack `UIMessage` anchors with `parts` intact, plus AG-UI mirror fields (`content`, `toolCalls`) so strict AG-UI servers can parse it. Tool results and thinking parts are additionally emitted as separate `{role:'tool',...}` and `{role:'reasoning',...}` fan-out messages alongside the anchors. + +## Server endpoint upgrade + +### Before + +```ts +import { chat, toServerSentEventsResponse } from '@tanstack/ai' +import { openaiText } from '@tanstack/ai-openai/adapters' + +export async function POST(req: Request) { + const { messages } = await req.json() + const stream = chat({ + adapter: openaiText('gpt-4o'), + messages, + tools: serverTools, + }) + return toServerSentEventsResponse(stream) +} +``` + +### After + +```ts +import { + chat, + chatParamsFromRequestBody, + mergeAgentTools, + toServerSentEventsResponse, +} from '@tanstack/ai' +import { openaiText } from '@tanstack/ai-openai/adapters' + +export async function POST(req: Request) { + let params + try { + params = await chatParamsFromRequestBody(await req.json()) + } catch (error) { + return new Response( + error instanceof Error ? error.message : 'Bad request', + { status: 400 }, + ) + } + + const stream = chat({ + adapter: openaiText('gpt-4o'), + messages: params.messages, + tools: mergeAgentTools(serverTools, params.tools), + threadId: params.threadId, + runId: params.runId, + + // Explicitly allowlist forwardedProps β€” see security note below + temperature: + typeof params.forwardedProps.temperature === 'number' + ? params.forwardedProps.temperature + : undefined, + maxTokens: + typeof params.forwardedProps.maxTokens === 'number' + ? params.forwardedProps.maxTokens + : undefined, + }) + + return toServerSentEventsResponse(stream) +} +``` + +`chatParamsFromRequestBody` validates the body against `RunAgentInputSchema` from `@ag-ui/core` and throws an `AGUIError` on a malformed shape. Surface this as HTTP 400. + +## `forwardedProps` security + +`forwardedProps` is arbitrary client-controlled JSON. **Do not** spread it directly into `chat({...})`: + +```ts +// 🚫 UNSAFE β€” a client could override `adapter`, `model`, `tools`, system prompts, anything +chat({ + adapter: openaiText('gpt-4o'), + ...params, + ...params.forwardedProps, +}) +``` + +Always destructure the specific fields you intend to forward: + +```ts +// βœ… SAFE β€” explicit allowlist +chat({ + adapter: openaiText('gpt-4o'), + messages: params.messages, + tools: mergeAgentTools(serverTools, params.tools), + threadId: params.threadId, + runId: params.runId, + temperature: typeof params.forwardedProps.temperature === 'number' + ? params.forwardedProps.temperature + : undefined, +}) +``` + +## Client-side: nothing to do + +`useChat` and the connection adapters (`fetchServerSentEvents`, `fetchHttpStream`) handle the new wire format internally. Existing `UIMessage` state is unchanged. `clientTools(...)` declarations are now automatically advertised to the server in the request payload. + +If you instantiated a `ChatClient` directly and want to control the thread identifier, pass `threadId` via the constructor options: + +```ts +const client = new ChatClient({ + threadId: 'persistent-thread-from-storage', + connection: fetchServerSentEvents('/api/chat'), + tools: [/* clientTools */], +}) +``` + +If you don't pass `threadId`, one is generated automatically and persists for the lifetime of the `ChatClient` instance. A fresh `runId` is generated for every send. + +## Tool-merge semantics + +- **Server tools win on name collision.** A tool registered server-side via `toolDefinition().server(...)` always executes server-side. +- **Client-only tools become no-execute stubs** in `chat()`. The runtime emits a `ClientToolRequest` event back to the client; the client's registered handler (via `clientTools(...)`) executes locally and posts the result. +- **Dual-handler (both have it):** server executes, then `chat-client.ts`'s `onToolCall` fires the client's handler as a UI side-effect when the streamed tool result event arrives. The server's result is authoritative for the conversation. + +## Talking to a foreign AG-UI server + +A `@tanstack/ai-client` request hitting a foreign AG-UI server: + +- βœ… Single-turn user messages work β€” content is mirrored to AG-UI's `content` field. +- βœ… Server-emitted events stream and render correctly. +- βœ… Multi-turn history that includes tool results from prior turns: the foreign server reads them via the AG-UI fan-out duplicates we send (separate `{role:'tool',...}` messages). +- ⚠️ Client-only tools are sent in the AG-UI `tools` field; whether the foreign server actually invokes them depends on its tool-calling logic. + +## Talking to a TanStack server from a foreign AG-UI client + +Pure AG-UI `RunAgentInput` payloads (no TanStack `parts` field) work end-to-end: + +- Tool messages pass through as `ModelMessage` entries with `role: 'tool'`. +- `reasoning` messages are dropped (no LLM-replay equivalent today). +- `activity` messages are dropped (no TanStack equivalent). +- `developer` messages are collapsed to `system` role. + +## `@ag-ui/core` bump + +`@tanstack/ai` now depends on `@ag-ui/core@^0.0.52`. If your code imports types from `@tanstack/ai` that re-export AG-UI types, you may need minor type adjustments β€” see the changeset for specifics. + +## Out of scope (existing behavior preserved) + +- **Reasoning replay to LLM providers.** TanStack still drops `ThinkingPart` at the `UIMessage`β†’`ModelMessage` boundary (pre-existing behavior). Providers like Anthropic that require thinking blocks to be replayed for extended thinking continuation remain a separate concern, tracked outside this migration. +- **AG-UI `state` and `context` fields.** Surfaced on `chatParamsFromRequestBody`'s return value but not yet wired into `chat()`. They're available for your endpoint to inspect/forward, but the runtime ignores them. +- **PHP and Python server packages.** No `chatParamsFromRequestBody` parity yet. Their examples temporarily lag on the old shape until the matching helpers ship. diff --git a/examples/ts-react-chat/src/routes/api.tanchat.ts b/examples/ts-react-chat/src/routes/api.tanchat.ts index f571fd9c7..2e048e202 100644 --- a/examples/ts-react-chat/src/routes/api.tanchat.ts +++ b/examples/ts-react-chat/src/routes/api.tanchat.ts @@ -1,8 +1,10 @@ import { createFileRoute } from '@tanstack/react-router' import { chat, + chatParamsFromRequestBody, createChatOptions, maxIterations, + mergeAgentTools, toServerSentEventsResponse, } from '@tanstack/ai' import { openaiText } from '@tanstack/ai-openai' @@ -12,7 +14,7 @@ import { geminiText } from '@tanstack/ai-gemini' import { openRouterText } from '@tanstack/ai-openrouter' import { grokText } from '@tanstack/ai-grok' import { groqText } from '@tanstack/ai-groq' -import type { AnyTextAdapter, ChatMiddleware } from '@tanstack/ai' +import type { AnyTextAdapter, ChatMiddleware, Tool } from '@tanstack/ai' import { addToCartToolDef, addToWishListToolDef, @@ -75,6 +77,22 @@ const addToCartToolServer = addToCartToolDef.server((args, context) => { } }) +const serverToolsList = [ + getGuitars, // Server tool + recommendGuitarToolDef, // No server execute - client will handle + addToCartToolServer, + addToWishListToolDef, + getPersonalGuitarPreferenceToolDef, + // Lazy tools - discovered on demand + compareGuitars, + calculateFinancing, + searchGuitars, +] + +const serverTools: Record = Object.fromEntries( + serverToolsList.map((t) => [t.name, t]), +) + const loggingMiddleware: ChatMiddleware = { name: 'logging', onConfig(ctx, config) { @@ -122,13 +140,26 @@ export const Route = createFileRoute('/api/tanchat')({ const abortController = new AbortController() - const body = await request.json() - const { messages, data } = body + let params + try { + params = await chatParamsFromRequestBody(await request.json()) + } catch (error) { + return new Response( + error instanceof Error ? error.message : 'Bad request', + { status: 400 }, + ) + } - // Extract provider and model from data - const provider: Provider = data?.provider || 'openai' - const model: string = data?.model || 'gpt-4o' - const conversationId: string | undefined = data?.conversationId + // Extract provider and model from forwardedProps (sent by the client) + const provider: Provider = + typeof params.forwardedProps.provider === 'string' && + (params.forwardedProps.provider as Provider) + ? (params.forwardedProps.provider as Provider) + : 'openai' + const model: string = + typeof params.forwardedProps.model === 'string' + ? params.forwardedProps.model + : 'gpt-4o' // Pre-define typed adapter configurations with full type inference // Model is passed to the adapter factory function for type-safe autocomplete @@ -191,28 +222,18 @@ export const Route = createFileRoute('/api/tanchat')({ // Get typed adapter options using createChatOptions pattern const options = adapterConfig[provider]() - // Note: We cast to AsyncIterable because all chat adapters - // return streams, but TypeScript sees a union of all possible return types + const mergedTools = mergeAgentTools(serverTools, params.tools) + const stream = chat({ ...options, - - tools: [ - getGuitars, // Server tool - recommendGuitarToolDef, // No server execute - client will handle - addToCartToolServer, - addToWishListToolDef, - getPersonalGuitarPreferenceToolDef, - // Lazy tools - discovered on demand - compareGuitars, - calculateFinancing, - searchGuitars, - ], + tools: Object.values(mergedTools), middleware: [loggingMiddleware], systemPrompts: [SYSTEM_PROMPT], agentLoopStrategy: maxIterations(20), - messages, + messages: params.messages, + threadId: params.threadId, + runId: params.runId, abortController, - conversationId, }) return toServerSentEventsResponse(stream, { abortController }) } catch (error: any) { diff --git a/examples/ts-solid-chat/src/routes/api.chat.ts b/examples/ts-solid-chat/src/routes/api.chat.ts index 0b73e29be..fabdfb74a 100644 --- a/examples/ts-solid-chat/src/routes/api.chat.ts +++ b/examples/ts-solid-chat/src/routes/api.chat.ts @@ -1,8 +1,19 @@ import { createFileRoute } from '@tanstack/solid-router' -import { chat, maxIterations, toServerSentEventsResponse } from '@tanstack/ai' +import { + chat, + chatParamsFromRequestBody, + maxIterations, + mergeAgentTools, + toServerSentEventsResponse, +} from '@tanstack/ai' +import type { Tool } from '@tanstack/ai' import { anthropicText } from '@tanstack/ai-anthropic' import { serverTools } from '@/lib/guitar-tools' +const serverToolsRecord: Record = Object.fromEntries( + serverTools.map((t) => [t.name, t]), +) + const SYSTEM_PROMPT = `You are a helpful assistant for a guitar store. CRITICAL INSTRUCTIONS - YOU MUST FOLLOW THIS EXACT WORKFLOW: @@ -53,15 +64,27 @@ export const Route = createFileRoute('/api/chat')({ const abortController = new AbortController() - const { messages } = await request.json() + let params + try { + params = await chatParamsFromRequestBody(await request.json()) + } catch (error) { + return new Response( + error instanceof Error ? error.message : 'Bad request', + { status: 400 }, + ) + } + try { + const mergedTools = mergeAgentTools(serverToolsRecord, params.tools) // Use the stream abort signal for proper cancellation handling const stream = chat({ adapter: anthropicText('claude-sonnet-4-5'), - tools: serverTools, + tools: Object.values(mergedTools), systemPrompts: [SYSTEM_PROMPT], agentLoopStrategy: maxIterations(20), - messages, + messages: params.messages, + threadId: params.threadId, + runId: params.runId, modelOptions: { thinking: { type: 'enabled', diff --git a/examples/ts-svelte-chat/src/routes/api/chat/+server.ts b/examples/ts-svelte-chat/src/routes/api/chat/+server.ts index 6308af357..21a0de167 100644 --- a/examples/ts-svelte-chat/src/routes/api/chat/+server.ts +++ b/examples/ts-svelte-chat/src/routes/api/chat/+server.ts @@ -1,9 +1,12 @@ import { chat, + chatParamsFromRequestBody, createChatOptions, maxIterations, + mergeAgentTools, toServerSentEventsResponse, } from '@tanstack/ai' +import type { Tool } from '@tanstack/ai' import { openaiText } from '@tanstack/ai-openai' import { ollamaText } from '@tanstack/ai-ollama' import { anthropicText } from '@tanstack/ai-anthropic' @@ -68,7 +71,7 @@ IMPORTANT: Example workflow: User: "I want an acoustic guitar" Step 1: Call getGuitars() -Step 2: Call recommendGuitar(id: "6") +Step 2: Call recommendGuitar(id: "6") Step 3: Done - do NOT add any text after calling recommendGuitar ` @@ -80,6 +83,18 @@ const addToCartToolServer = addToCartToolDef.server((args) => ({ totalItems: args.quantity, })) +const serverToolsList = [ + getGuitars, // Server tool + recommendGuitarToolDef, // No server execute - client will handle + addToCartToolServer, + addToWishListToolDef, + getPersonalGuitarPreferenceToolDef, +] + +const serverTools: Record = Object.fromEntries( + serverToolsList.map((t) => [t.name, t]), +) + export const POST: RequestHandler = async ({ request }) => { // Capture request signal before reading body (it may be aborted after body is consumed) const requestSignal = request.signal @@ -91,28 +106,37 @@ export const POST: RequestHandler = async ({ request }) => { const abortController = new AbortController() + let params try { - const body = await request.json() - const { messages, data } = body + params = await chatParamsFromRequestBody(await request.json()) + } catch (error) { + return new Response( + error instanceof Error ? error.message : 'Bad request', + { status: 400 }, + ) + } - // Extract provider from data - const provider: Provider = data?.provider || 'openai' + // Extract provider from forwardedProps (sent by the client) + const provider: Provider = + typeof params.forwardedProps?.provider === 'string' && + params.forwardedProps.provider in adapterConfig + ? (params.forwardedProps.provider as Provider) + : 'openai' + try { // Get typed adapter options using createOptions pattern const options = adapterConfig[provider]() + const mergedTools = mergeAgentTools(serverTools, params.tools) + const stream = chat({ ...options, - tools: [ - getGuitars, // Server tool - recommendGuitarToolDef, // No server execute - client will handle - addToCartToolServer, - addToWishListToolDef, - getPersonalGuitarPreferenceToolDef, - ], + tools: Object.values(mergedTools), systemPrompts: [SYSTEM_PROMPT], agentLoopStrategy: maxIterations(20), - messages, + messages: params.messages, + threadId: params.threadId, + runId: params.runId, abortController, }) diff --git a/examples/ts-vue-chat/vite.config.ts b/examples/ts-vue-chat/vite.config.ts index 74c3563f1..42140d0a2 100644 --- a/examples/ts-vue-chat/vite.config.ts +++ b/examples/ts-vue-chat/vite.config.ts @@ -2,7 +2,13 @@ import { fileURLToPath, URL } from 'node:url' import { defineConfig } from 'vite' import vue from '@vitejs/plugin-vue' import tailwindcss from '@tailwindcss/vite' -import { chat, maxIterations, toServerSentEventsStream } from '@tanstack/ai' +import { + chat, + chatParamsFromRequestBody, + maxIterations, + mergeAgentTools, + toServerSentEventsStream, +} from '@tanstack/ai' import { openaiText } from '@tanstack/ai-openai' import { anthropicText } from '@tanstack/ai-anthropic' import { geminiText } from '@tanstack/ai-gemini' @@ -196,10 +202,23 @@ export default defineConfig({ body += chunk } + let params try { - const { messages, data } = JSON.parse(body) - const provider: Provider = data?.provider || 'openai' - const model: string | undefined = data?.model + params = await chatParamsFromRequestBody(JSON.parse(body)) + } catch (error) { + res.statusCode = 400 + res.end(error instanceof Error ? error.message : 'Bad request') + return + } + + try { + const fp = params.forwardedProps as Record + const provider: Provider = + typeof fp.provider === 'string' + ? (fp.provider as Provider) + : 'openai' + const model: string | undefined = + typeof fp.model === 'string' ? fp.model : undefined let adapter @@ -231,18 +250,18 @@ export default defineConfig({ const abortController = new AbortController() + const serverTools = Object.fromEntries( + [getGuitars, addToCartToolServer].map((t) => [t.name, t]), + ) + const stream = chat({ adapter, - tools: [ - getGuitars, - recommendGuitarToolDef, - addToCartToolServer, - addToWishListToolDef, - getPersonalGuitarPreferenceToolDef, - ], + tools: Object.values(mergeAgentTools(serverTools, params.tools)), systemPrompts: [SYSTEM_PROMPT], agentLoopStrategy: maxIterations(20), - messages, + messages: params.messages, + threadId: params.threadId, + runId: params.runId, abortController, }) diff --git a/packages/typescript/ai-anthropic/src/adapters/text.ts b/packages/typescript/ai-anthropic/src/adapters/text.ts index 359f2377a..165c00b58 100644 --- a/packages/typescript/ai-anthropic/src/adapters/text.ts +++ b/packages/typescript/ai-anthropic/src/adapters/text.ts @@ -602,6 +602,7 @@ export class AnthropicTextAdapter< threadId, model, timestamp, + parentRunId: options.parentRunId, }) } diff --git a/packages/typescript/ai-client/src/chat-client.ts b/packages/typescript/ai-client/src/chat-client.ts index ab9d07dff..b0e5c0d49 100644 --- a/packages/typescript/ai-client/src/chat-client.ts +++ b/packages/typescript/ai-client/src/chat-client.ts @@ -30,6 +30,7 @@ export class ChatClient { private processor: StreamProcessor private connection: SubscribeConnectionAdapter private uniqueId: string + private threadId: string private body: Record = {} private pendingMessageBody: Record | undefined = undefined private isLoading = false @@ -81,6 +82,7 @@ export class ChatClient { constructor(options: ChatClientOptions) { this.uniqueId = options.id || this.generateUniqueId('chat') + this.threadId = options.threadId || this.generateUniqueId('thread') this.body = options.body || {} this.connection = normalizeConnectionAdapter(options.connection) this.events = new DefaultChatClientEventEmitter(this.uniqueId) @@ -605,11 +607,28 @@ export class ChatClient { // Set up promise that resolves when onStreamEnd fires const processingComplete = this.waitForProcessing() + // Build per-send run context for AG-UI compliance + // Note: mergedBody already contains the merged this.body + pendingMessageBody + // (pendingMessageBody was cleared above, so we use mergedBody as forwardedProps) + const runContext = { + threadId: this.threadId, + runId: `run-${Date.now()}-${Math.random().toString(36).slice(2, 8)}`, + clientTools: Array.from(this.clientToolsRef.current.values()).map( + (t) => ({ + name: t.name, + description: t.description, + parameters: t.inputSchema || { type: 'object' }, + }), + ), + forwardedProps: { ...mergedBody }, + } + // Send through normalized connection (pushes chunks to subscription queue) await this.connection.send( messages, mergedBody, this.abortController.signal, + runContext, ) // Wait for subscription loop to finish processing all chunks diff --git a/packages/typescript/ai-client/src/connection-adapters.ts b/packages/typescript/ai-client/src/connection-adapters.ts index 91d63a146..3eb7be432 100644 --- a/packages/typescript/ai-client/src/connection-adapters.ts +++ b/packages/typescript/ai-client/src/connection-adapters.ts @@ -1,5 +1,10 @@ +import { uiMessagesToWire } from '@tanstack/ai' import type { ModelMessage, StreamChunk, UIMessage } from '@tanstack/ai' +function generateRunId(prefix: string): string { + return `${prefix}-${Date.now()}-${Math.random().toString(36).slice(2, 8)}` +} + /** * Merge custom headers into request headers */ @@ -62,6 +67,25 @@ async function* readStreamLines( } } +/** + * Per-send context provided by the chat client to the connection adapter. + * The adapter combines this with serialized messages to build a full + * AG-UI `RunAgentInput` payload. + */ +export interface RunAgentInputContext { + threadId: string + runId: string + parentRunId?: string + /** Client-declared tools to advertise in the request payload. */ + clientTools?: Array<{ + name: string + description: string + parameters: unknown + }> + /** Arbitrary user-controlled passthrough data. */ + forwardedProps?: Record +} + export interface ConnectConnectionAdapter { /** * Connect and return an async iterable of StreamChunks. @@ -70,6 +94,7 @@ export interface ConnectConnectionAdapter { messages: Array | Array, data?: Record, abortSignal?: AbortSignal, + runContext?: RunAgentInputContext, ) => AsyncIterable } @@ -85,6 +110,7 @@ export interface SubscribeConnectionAdapter { messages: Array | Array, data?: Record, abortSignal?: AbortSignal, + runContext?: RunAgentInputContext, ) => Promise } @@ -173,10 +199,15 @@ export function normalizeConnectionAdapter( } })() }, - async send(messages, data, abortSignal) { + async send(messages, data, abortSignal, runContext) { let hasTerminalEvent = false try { - const stream = connection.connect(messages, data, abortSignal) + const stream = connection.connect( + messages, + data, + abortSignal, + runContext, + ) for await (const chunk of stream) { if (chunk.type === 'RUN_FINISHED' || chunk.type === 'RUN_ERROR') { hasTerminalEvent = true @@ -268,7 +299,7 @@ export function fetchServerSentEvents( | (() => FetchConnectionOptions | Promise) = {}, ): ConnectConnectionAdapter { return { - async *connect(messages, data, abortSignal) { + async *connect(messages, data, abortSignal, runContext) { // Resolve URL and options if they are functions const resolvedUrl = typeof url === 'function' ? url() : url const resolvedOptions = @@ -279,12 +310,23 @@ export function fetchServerSentEvents( ...mergeHeaders(resolvedOptions.headers), } - // Send messages as-is (UIMessages with parts preserved) - // Server-side TextEngine handles conversion to ModelMessages + // Build AG-UI RunAgentInput payload + const wireMessages = uiMessagesToWire(messages as Array) const requestBody = { - messages, - data, - ...resolvedOptions.body, + threadId: runContext?.threadId ?? generateRunId('thread'), + runId: runContext?.runId ?? generateRunId('run'), + ...(runContext?.parentRunId !== undefined && { + parentRunId: runContext.parentRunId, + }), + state: {}, + messages: wireMessages, + tools: runContext?.clientTools ?? [], + context: [], + forwardedProps: { + ...(runContext?.forwardedProps ?? {}), + ...data, + ...resolvedOptions.body, + }, } const fetchClient = resolvedOptions.fetchClient ?? fetch @@ -372,7 +414,7 @@ export function fetchHttpStream( | (() => FetchConnectionOptions | Promise) = {}, ): ConnectConnectionAdapter { return { - async *connect(messages, data, abortSignal) { + async *connect(messages, data, abortSignal, runContext) { // Resolve URL and options if they are functions const resolvedUrl = typeof url === 'function' ? url() : url const resolvedOptions = @@ -383,12 +425,23 @@ export function fetchHttpStream( ...mergeHeaders(resolvedOptions.headers), } - // Send messages as-is (UIMessages with parts preserved) - // Server-side TextEngine handles conversion to ModelMessages + // Build AG-UI RunAgentInput payload + const wireMessages = uiMessagesToWire(messages as Array) const requestBody = { - messages, - data, - ...resolvedOptions.body, + threadId: runContext?.threadId ?? generateRunId('thread'), + runId: runContext?.runId ?? generateRunId('run'), + ...(runContext?.parentRunId !== undefined && { + parentRunId: runContext.parentRunId, + }), + state: {}, + messages: wireMessages, + tools: runContext?.clientTools ?? [], + context: [], + forwardedProps: { + ...(runContext?.forwardedProps ?? {}), + ...data, + ...resolvedOptions.body, + }, } const fetchClient = resolvedOptions.fetchClient ?? fetch @@ -445,7 +498,7 @@ export function stream( ) => AsyncIterable, ): ConnectConnectionAdapter { return { - async *connect(messages, data) { + async *connect(messages, data, _abortSignal, _runContext) { // Pass messages as-is (UIMessages with parts preserved) // Server-side chat() handles conversion to ModelMessages yield* streamFactory(messages, data) @@ -476,7 +529,7 @@ export function rpcStream( ) => AsyncIterable, ): ConnectConnectionAdapter { return { - async *connect(messages, data) { + async *connect(messages, data, _abortSignal, _runContext) { // Pass messages as-is (UIMessages with parts preserved) // Server-side chat() handles conversion to ModelMessages yield* rpcCall(messages, data) diff --git a/packages/typescript/ai-client/src/types.ts b/packages/typescript/ai-client/src/types.ts index b705ebbdf..9d5bf632e 100644 --- a/packages/typescript/ai-client/src/types.ts +++ b/packages/typescript/ai-client/src/types.ts @@ -204,6 +204,12 @@ export interface ChatClientOptions< */ id?: string + /** + * Thread ID to use for this chat session. Persists across sends within + * the session. If omitted, a unique thread ID is generated. + */ + threadId?: string + /** * Additional body parameters to send */ diff --git a/packages/typescript/ai-client/tests/connection-adapters.test.ts b/packages/typescript/ai-client/tests/connection-adapters.test.ts index 60c36763a..b720b230a 100644 --- a/packages/typescript/ai-client/tests/connection-adapters.test.ts +++ b/packages/typescript/ai-client/tests/connection-adapters.test.ts @@ -302,7 +302,7 @@ describe('connection-adapters', () => { expect(authValue).toBe('Bearer token') }) - it('should pass data to request body', async () => { + it('should pass data to request body forwardedProps', async () => { const mockReader = { read: vi.fn().mockResolvedValue({ done: true, value: undefined }), releaseLock: vi.fn(), @@ -329,7 +329,7 @@ describe('connection-adapters', () => { expect(fetchMock).toHaveBeenCalled() const call = fetchMock.mock.calls[0] const body = JSON.parse(call?.[1]?.body as string) - expect(body.data).toEqual({ key: 'value' }) + expect(body.forwardedProps).toMatchObject({ key: 'value' }) }) it('should use custom fetchClient when provided', async () => { @@ -436,7 +436,7 @@ describe('connection-adapters', () => { expect(call?.[1]?.headers).toMatchObject({ 'X-Async': 'token' }) }) - it('should merge options.body into request body', async () => { + it('should merge options.body into request body forwardedProps', async () => { const mockReader = { read: vi.fn().mockResolvedValue({ done: true, value: undefined }), releaseLock: vi.fn(), @@ -462,9 +462,11 @@ describe('connection-adapters', () => { const call = fetchMock.mock.calls[0] const body = JSON.parse(call?.[1]?.body as string) - expect(body.model).toBe('gpt-4o') - expect(body.provider).toBe('openai') - expect(body.data).toEqual({ key: 'value' }) + expect(body.forwardedProps).toMatchObject({ + model: 'gpt-4o', + provider: 'openai', + key: 'value', + }) }) it('should handle multiple chunks across multiple reads', async () => { @@ -688,7 +690,7 @@ describe('connection-adapters', () => { }) }) - it('should pass data to request body', async () => { + it('should pass data to request body forwardedProps', async () => { const mockReader = { read: vi.fn().mockResolvedValue({ done: true, value: undefined }), releaseLock: vi.fn(), @@ -712,7 +714,7 @@ describe('connection-adapters', () => { const call = fetchMock.mock.calls[0] const body = JSON.parse(call?.[1]?.body as string) - expect(body.data).toEqual({ key: 'value' }) + expect(body.forwardedProps).toMatchObject({ key: 'value' }) }) it('should resolve dynamic URL from function', async () => { diff --git a/packages/typescript/ai-gemini/src/adapters/text.ts b/packages/typescript/ai-gemini/src/adapters/text.ts index f4efcc466..93cb10c93 100644 --- a/packages/typescript/ai-gemini/src/adapters/text.ts +++ b/packages/typescript/ai-gemini/src/adapters/text.ts @@ -273,6 +273,7 @@ export class GeminiTextAdapter< threadId, model, timestamp, + parentRunId: options.parentRunId, }) } diff --git a/packages/typescript/ai-grok/src/adapters/text.ts b/packages/typescript/ai-grok/src/adapters/text.ts index e185c5ecf..e5cd97127 100644 --- a/packages/typescript/ai-grok/src/adapters/text.ts +++ b/packages/typescript/ai-grok/src/adapters/text.ts @@ -126,6 +126,7 @@ export class GrokTextAdapter< threadId: aguiState.threadId, model: options.model, timestamp, + parentRunId: options.parentRunId, }) } @@ -266,6 +267,7 @@ export class GrokTextAdapter< threadId: aguiState.threadId, model: chunk.model || options.model, timestamp, + parentRunId: options.parentRunId, }) } diff --git a/packages/typescript/ai-groq/src/adapters/text.ts b/packages/typescript/ai-groq/src/adapters/text.ts index 34f44ba81..61c51bf6f 100644 --- a/packages/typescript/ai-groq/src/adapters/text.ts +++ b/packages/typescript/ai-groq/src/adapters/text.ts @@ -129,6 +129,7 @@ export class GroqTextAdapter< threadId: aguiState.threadId, model: options.model, timestamp, + parentRunId: options.parentRunId, }) } @@ -265,6 +266,7 @@ export class GroqTextAdapter< threadId: aguiState.threadId, model: chunk.model || options.model, timestamp, + parentRunId: options.parentRunId, }) } diff --git a/packages/typescript/ai-ollama/src/adapters/text.ts b/packages/typescript/ai-ollama/src/adapters/text.ts index 209951569..f60220ba6 100644 --- a/packages/typescript/ai-ollama/src/adapters/text.ts +++ b/packages/typescript/ai-ollama/src/adapters/text.ts @@ -253,6 +253,7 @@ export class OllamaTextAdapter extends BaseTextAdapter< threadId, model: chunk.model, timestamp, + parentRunId: options.parentRunId, }) } diff --git a/packages/typescript/ai-openai/src/adapters/text.ts b/packages/typescript/ai-openai/src/adapters/text.ts index 139629869..fed485cd2 100644 --- a/packages/typescript/ai-openai/src/adapters/text.ts +++ b/packages/typescript/ai-openai/src/adapters/text.ts @@ -321,6 +321,7 @@ export class OpenAITextAdapter< threadId, model: model || options.model, timestamp, + parentRunId: options.parentRunId, }) } diff --git a/packages/typescript/ai-openrouter/src/adapters/text.ts b/packages/typescript/ai-openrouter/src/adapters/text.ts index 29427171c..d0881bb05 100644 --- a/packages/typescript/ai-openrouter/src/adapters/text.ts +++ b/packages/typescript/ai-openrouter/src/adapters/text.ts @@ -162,6 +162,7 @@ export class OpenRouterTextAdapter< threadId: aguiState.threadId, model: currentModel || options.model, timestamp, + parentRunId: options.parentRunId, }) } @@ -229,6 +230,7 @@ export class OpenRouterTextAdapter< threadId: aguiState.threadId, model: options.model, timestamp, + parentRunId: options.parentRunId, }) } diff --git a/packages/typescript/ai/docs/chat-architecture.md b/packages/typescript/ai/docs/chat-architecture.md index 5625a6ab5..c0693bb45 100644 --- a/packages/typescript/ai/docs/chat-architecture.md +++ b/packages/typescript/ai/docs/chat-architecture.md @@ -9,18 +9,19 @@ ## Table of Contents 1. [System Overview](#system-overview) -2. [Single-Shot Text Response](#single-shot-text-response) -3. [Single-Shot Tool Call Response](#single-shot-tool-call-response) -4. [Parallel Tool Calls (Single Shot)](#parallel-tool-calls-single-shot) -5. [Text-Then-Tool Interleaving (Single Shot)](#text-then-tool-interleaving-single-shot) -6. [Thinking/Reasoning Content](#thinkingreasoning-content) -7. [Tool Results and the TOOL_CALL_END Dual Role](#tool-results-and-the-tool_call_end-dual-role) -8. [Client Tools and Approval Flows](#client-tools-and-approval-flows) -9. [Multi-Iteration Agent Loop](#multi-iteration-agent-loop) -10. [Adapter Contract](#adapter-contract) -11. [StreamProcessor Internal State](#streamprocessor-internal-state) -12. [UIMessage Part Ordering Invariants](#uimessage-part-ordering-invariants) -13. [Testing Strategy](#testing-strategy) +2. [Wire format (HTTP body)](#wire-format-http-body) +3. [Single-Shot Text Response](#single-shot-text-response) +4. [Single-Shot Tool Call Response](#single-shot-tool-call-response) +5. [Parallel Tool Calls (Single Shot)](#parallel-tool-calls-single-shot) +6. [Text-Then-Tool Interleaving (Single Shot)](#text-then-tool-interleaving-single-shot) +7. [Thinking/Reasoning Content](#thinkingreasoning-content) +8. [Tool Results and the TOOL_CALL_END Dual Role](#tool-results-and-the-tool_call_end-dual-role) +9. [Client Tools and Approval Flows](#client-tools-and-approval-flows) +10. [Multi-Iteration Agent Loop](#multi-iteration-agent-loop) +11. [Adapter Contract](#adapter-contract) +12. [StreamProcessor Internal State](#streamprocessor-internal-state) +13. [UIMessage Part Ordering Invariants](#uimessage-part-ordering-invariants) +14. [Testing Strategy](#testing-strategy) --- @@ -59,6 +60,33 @@ Both trust the adapter to emit events in the correct order. The processor does * --- +## Wire format (HTTP body) + +The HTTP body posted by `@tanstack/ai-client` to a `chat()` endpoint is the AG-UI `RunAgentInput` shape from `@ag-ui/core`: + +```json +{ + "threadId": "thread-...", + "runId": "run-...", + "state": {}, + "messages": [...], + "tools": [...], + "context": [], + "forwardedProps": {} +} +``` + +Each entry in `messages` is either: + +- A **TanStack anchor** β€” a `UIMessage` with its canonical `parts` array, augmented with AG-UI mirror fields (`content` for system/user/assistant; `toolCalls` for assistant) so AG-UI Zod parsing succeeds. +- An **AG-UI fan-out duplicate** β€” a `{role:'tool',...}` or `{role:'reasoning',...}` entry generated from each `ToolResultPart`/`ThinkingPart` on the assistant anchor. Strict AG-UI server consumers walk these role-based messages directly. + +On the server, `chatParamsFromRequestBody` validates the body and returns the parsed fields. `convertMessagesToModelMessages` (called inside `chat()`) handles dedup: when an anchor's `parts` already contain a `tool-result`, the matching fan-out tool message is dropped from the `ModelMessage[]` fed to the LLM. `reasoning` and `activity` messages are dropped (no `ModelMessage` equivalent today); `developer` messages collapse to `system`. + +For the migration story when upgrading, see `docs/migration/ag-ui-compliance.md`. + +--- + ## Single-Shot Text Response The simplest possible flow. The model returns text with no tool calls. diff --git a/packages/typescript/ai/package.json b/packages/typescript/ai/package.json index 2ad62f817..ac20beae3 100644 --- a/packages/typescript/ai/package.json +++ b/packages/typescript/ai/package.json @@ -61,7 +61,7 @@ "tanstack-intent" ], "dependencies": { - "@ag-ui/core": "0.0.49", + "@ag-ui/core": "^0.0.52", "@tanstack/ai-event-client": "workspace:*", "partial-json": "^0.1.7" }, diff --git a/packages/typescript/ai/skills/ai-core/ag-ui-protocol/SKILL.md b/packages/typescript/ai/skills/ai-core/ag-ui-protocol/SKILL.md index 561843174..236ff887d 100644 --- a/packages/typescript/ai/skills/ai-core/ag-ui-protocol/SKILL.md +++ b/packages/typescript/ai/skills/ai-core/ag-ui-protocol/SKILL.md @@ -39,6 +39,49 @@ export async function POST(request: Request) { typed AG-UI event (discriminated union on `type`). The `toServerSentEventsResponse()` helper encodes that iterable into an SSE-formatted `Response` with correct headers. +## Setup β€” Receiving AG-UI RunAgentInput on the Server + +```typescript +import { + chat, + chatParamsFromRequestBody, + mergeAgentTools, + toServerSentEventsResponse, +} from '@tanstack/ai' +import { openaiText } from '@tanstack/ai-openai/adapters' +import { serverTools } from './tools' + +export async function POST(req: Request) { + let params + try { + params = await chatParamsFromRequestBody(await req.json()) + } catch (error) { + return new Response( + error instanceof Error ? error.message : 'Bad request', + { status: 400 }, + ) + } + + const stream = chat({ + adapter: openaiText('gpt-4o'), + messages: params.messages, + tools: mergeAgentTools(serverTools, params.tools), + threadId: params.threadId, + runId: params.runId, + }) + + return toServerSentEventsResponse(stream) +} +``` + +`chatParamsFromRequestBody` validates the body against `RunAgentInputSchema` from `@ag-ui/core`. `mergeAgentTools` merges the server's tool registry with client-declared tools (server wins on collision; client-only tools become no-execute stubs that flow through the runtime's `ClientToolRequest` path). + +`params.messages` is a mixed array of TanStack `UIMessage` anchors (with `parts`) and AG-UI fan-out duplicates (`{role:'tool',...}`, `{role:'reasoning',...}`). The existing `convertMessagesToModelMessages` (called inside `chat()`) handles dedup automatically. + +**Wire shape (POST body):** AG-UI `RunAgentInput` β€” `{threadId, runId, parentRunId?, state, messages, tools, context, forwardedProps}`. The `messages` array carries TanStack `UIMessage` anchors with their canonical `parts` plus AG-UI mirror fields (`content`, `toolCalls`) inline; tool results and thinking parts are additionally emitted as fan-out `{role:'tool',...}` and `{role:'reasoning',...}` entries. + +**`forwardedProps` security:** Don't spread it directly into `chat()` β€” clients could override `adapter`, `model`, `tools`, etc. Always allowlist specific fields. + ## Core Patterns ### 1. SSE Format β€” toServerSentEventsStream / toServerSentEventsResponse @@ -223,9 +266,11 @@ Source: docs/protocol/chunk-definitions.md ## Tension -HIGH Tension: AG-UI protocol compliance vs. internal message format -- TanStack -AI's `UIMessage` format (parts-based) diverges from AG-UI spec (content-based). -Full compliance would require a different message structure. +RESOLVED: TanStack AI is fully AG-UI compliant on both axes (serverβ†’client events +AND clientβ†’server `RunAgentInput`). The wire format carries TanStack `UIMessage` +anchors with their parts intact alongside AG-UI fan-out messages, so strict AG-UI +servers see role-based messages while TanStack-aware servers read parts directly +without transformation. See `docs/migration/ag-ui-compliance.md` for details. ## Cross-References diff --git a/packages/typescript/ai/src/activities/chat/index.ts b/packages/typescript/ai/src/activities/chat/index.ts index e1327fdb5..e9eef21ad 100644 --- a/packages/typescript/ai/src/activities/chat/index.ts +++ b/packages/typescript/ai/src/activities/chat/index.ts @@ -45,6 +45,7 @@ import type { ToolCallArgsEvent, ToolCallEndEvent, ToolCallStartEvent, + UIMessage, } from '../../types' import type { ChatMiddleware, @@ -82,12 +83,21 @@ export interface TextActivityOptions< > { /** The text adapter to use (created by a provider function like openaiText('gpt-4o')) */ adapter: TAdapter - /** Conversation messages - content types are constrained by the adapter's input modalities and metadata */ + /** + * Conversation messages. Accepts: + * - `ConstrainedModelMessage` β€” content types constrained by the adapter's input modalities. + * - `ModelMessage` β€” unconstrained model message (e.g., forwarded from an AG-UI wire payload). + * - `UIMessage` β€” parts-based UI representation; converted internally via `convertMessagesToModelMessages`. + * + * The three shapes can be mixed in a single array (e.g., when forwarding a wire payload that includes both anchor UIMessages and AG-UI fan-out ModelMessages). + */ messages?: Array< - ConstrainedModelMessage<{ - inputModalities: TAdapter['~types']['inputModalities'] - messageMetadataByModality: TAdapter['~types']['messageMetadataByModality'] - }> + | UIMessage + | ModelMessage + | ConstrainedModelMessage<{ + inputModalities: TAdapter['~types']['inputModalities'] + messageMetadataByModality: TAdapter['~types']['messageMetadataByModality'] + }> > /** System prompts to prepend to the conversation */ systemPrompts?: TextOptions['systemPrompts'] @@ -125,6 +135,8 @@ export interface TextActivityOptions< threadId?: TextOptions['threadId'] /** Run ID override for AG-UI protocol. Auto-generated by adapter if not provided. */ runId?: TextOptions['runId'] + /** Parent run ID for AG-UI protocol nested run correlation. */ + parentRunId?: TextOptions['parentRunId'] /** * Optional Standard Schema for structured output. * When provided, the activity will: @@ -294,6 +306,7 @@ class TextEngine< // AG-UI protocol IDs private threadId: string private runIdOverride?: string + private parentRunIdOverride?: string // Middleware support private readonly middlewareRunner: MiddlewareRunner @@ -347,6 +360,7 @@ class TextEngine< this.effectiveSignal = config.params.abortController?.signal this.threadId = config.params.threadId || this.createId('thread') this.runIdOverride = config.params.runId + this.parentRunIdOverride = config.params.parentRunId // Initialize middleware β€” devtools first, strip-to-spec always last. // handleStreamChunk processes raw chunks BEFORE middleware, so internal @@ -612,6 +626,7 @@ class TextEngine< logger: this.logger, threadId: this.threadId, runId: this.runIdOverride, + parentRunId: this.parentRunIdOverride, })) { if (this.isCancelled()) { break diff --git a/packages/typescript/ai/src/activities/chat/messages.ts b/packages/typescript/ai/src/activities/chat/messages.ts index b7f97b880..735887bb4 100644 --- a/packages/typescript/ai/src/activities/chat/messages.ts +++ b/packages/typescript/ai/src/activities/chat/messages.ts @@ -63,15 +63,55 @@ function getTextContent(content: string | null | Array): string { export function convertMessagesToModelMessages( messages: Array, ): Array { + // Pre-pass: collect toolCallIds already represented in anchor UIMessage parts. + // Fan-out tool messages whose toolCallId matches an anchored ToolResultPart + // are AG-UI duplicates and must be dropped to avoid double-feeding the LLM. + const anchoredToolCallIds = new Set() + for (const msg of messages) { + if ('parts' in msg) { + for (const part of msg.parts) { + if (part.type === 'tool-result') { + anchoredToolCallIds.add(part.toolCallId) + } + } + } + } + const modelMessages: Array = [] for (const msg of messages) { if ('parts' in msg) { - // UIMessage - convert to ModelMessages + // UIMessage anchor β€” existing fan-out path modelMessages.push(...uiMessageToModelMessages(msg)) - } else { - // Already ModelMessage - modelMessages.push(msg) + continue + } + + const role = (msg as { role: string }).role + + // AG-UI tool fan-out duplicate β€” drop if anchor already covers it + if ( + role === 'tool' && + msg.toolCallId && + anchoredToolCallIds.has(msg.toolCallId) + ) { + continue } + + // AG-UI reasoning and activity β€” no ModelMessage equivalent today + if (role === 'reasoning' || role === 'activity') { + continue + } + + // AG-UI developer β€” collapse to system + if (role === 'developer') { + modelMessages.push({ + role: 'system' as ModelMessage['role'], + content: (msg as { content: string }).content, + } as ModelMessage) + continue + } + + // Already a ModelMessage (user, assistant, system, tool with no anchor) β€” pass through + modelMessages.push(msg) } return modelMessages } diff --git a/packages/typescript/ai/src/index.ts b/packages/typescript/ai/src/index.ts index ef45543be..e32f183e6 100644 --- a/packages/typescript/ai/src/index.ts +++ b/packages/typescript/ai/src/index.ts @@ -168,6 +168,16 @@ export type { JSONParser, } from './activities/chat/stream/index' +// Chat utilities +export { + chatParamsFromRequestBody, + mergeAgentTools, +} from './utilities/chat-params' + +// AG-UI wire serialization (used internally by @tanstack/ai-client) +export { uiMessagesToWire } from './utilities/ag-ui-wire' +export type { WireMessage } from './utilities/ag-ui-wire' + // Adapter extension utilities export { createModel, extendAdapter } from './extend-adapter' export type { ExtendedModelDef } from './extend-adapter' diff --git a/packages/typescript/ai/src/types.ts b/packages/typescript/ai/src/types.ts index e11e7176f..7abb1161c 100644 --- a/packages/typescript/ai/src/types.ts +++ b/packages/typescript/ai/src/types.ts @@ -758,6 +758,11 @@ export interface TextOptions< * If not provided, a unique ID will be generated. */ runId?: string + /** + * Parent run ID for AG-UI protocol nested run correlation. + * Surfaced for observability/middleware; not consumed by the LLM call. + */ + parentRunId?: string } // ============================================================================ diff --git a/packages/typescript/ai/src/utilities/ag-ui-wire.ts b/packages/typescript/ai/src/utilities/ag-ui-wire.ts new file mode 100644 index 000000000..f7abd3606 --- /dev/null +++ b/packages/typescript/ai/src/utilities/ag-ui-wire.ts @@ -0,0 +1,182 @@ +import type { ContentPart, MessagePart, TextPart, UIMessage } from '../types' + +type AGUITextInputContent = { type: 'text'; text: string } +type AGUIInputContent = + | AGUITextInputContent + | (ContentPart & { type: 'image' | 'audio' | 'video' | 'document' }) + +type AGUIToolCallMirror = { + id: string + type: 'function' + function: { name: string; arguments: string } +} + +type AGUIToolMessage = { + role: 'tool' + id: string + toolCallId: string + content: string + error?: string +} + +type AGUIReasoningMessage = { + role: 'reasoning' + id: string + content: string +} + +type WireAnchorMessage = UIMessage & { + content?: string | Array + toolCalls?: Array +} + +export type WireMessage = + | WireAnchorMessage + | AGUIToolMessage + | AGUIReasoningMessage + +/** + * Serialize TanStack `UIMessage`s into the AG-UI `RunAgentInput.messages` + * wire shape. Each anchor (system/user/assistant) carries the canonical + * `parts` array verbatim plus AG-UI mirror fields (`content`, `toolCalls`) + * so AG-UI Zod parsing succeeds. Tool results and thinking parts on + * assistant messages are additionally emitted as fan-out + * `{role:'tool',...}` and `{role:'reasoning',...}` entries for strict + * AG-UI server consumers. + */ +export function uiMessagesToWire( + messages: Array, +): Array { + const wire: Array = [] + + for (const msg of messages) { + // Defensive: if parts is missing (ModelMessage-shaped input), pass through as-is. + // UIMessage always has parts; ModelMessage uses content directly. + const parts: ReadonlyArray = + (msg.parts as ReadonlyArray | undefined) ?? [] + + if (msg.role === 'system') { + wire.push({ + ...msg, + content: + parts.length > 0 + ? collectText(parts) + : ((msg as unknown as { content?: string }).content ?? ''), + }) + continue + } + + if (msg.role === 'user') { + wire.push({ + ...msg, + content: + parts.length > 0 + ? collectUserContent(parts) + : ((msg as unknown as { content?: string }).content ?? ''), + }) + continue + } + + // assistant: emit reasoning fan-outs first, then anchor, then tool fan-outs + for (const part of parts) { + if (part.type === 'thinking') { + wire.push({ + role: 'reasoning', + id: deriveReasoningId(msg.id, part), + content: part.content, + }) + } + } + + const text = collectText(parts) + const toolCalls = collectToolCalls(parts) + wire.push({ + ...msg, + ...(text !== '' && { content: text }), + ...(toolCalls && { toolCalls }), + }) + + for (const part of parts) { + if (part.type === 'tool-result') { + wire.push({ + role: 'tool', + id: deriveToolMessageId(part.toolCallId), + toolCallId: part.toolCallId, + content: part.content, + ...(part.error !== undefined && { error: part.error }), + }) + } + } + } + + return wire +} + +function collectText(parts: ReadonlyArray): string { + return parts + .filter((p): p is TextPart => p.type === 'text') + .map((p) => p.content) + .join('') +} + +function collectUserContent( + parts: ReadonlyArray, +): string | Array { + const hasMultimodal = parts.some( + (p) => + p.type === 'image' || + p.type === 'audio' || + p.type === 'video' || + p.type === 'document', + ) + if (!hasMultimodal) { + return collectText(parts) + } + const out: Array = [] + for (const p of parts) { + if (p.type === 'text') { + out.push({ type: 'text', text: p.content }) + } else if ( + p.type === 'image' || + p.type === 'audio' || + p.type === 'video' || + p.type === 'document' + ) { + out.push(p as AGUIInputContent) + } + } + return out +} + +function collectToolCalls( + parts: ReadonlyArray, +): Array | undefined { + const calls: Array = [] + for (const p of parts) { + if (p.type === 'tool-call') { + calls.push({ + id: p.id, + type: 'function', + function: { name: p.name, arguments: p.arguments }, + }) + } + } + return calls.length > 0 ? calls : undefined +} + +function deriveReasoningId(messageId: string, part: MessagePart): string { + return `${messageId}-reasoning-${(part as { id?: string }).id ?? hashContent((part as { content: string }).content)}` +} + +function deriveToolMessageId(toolCallId: string): string { + return `tool-${toolCallId}` +} + +function hashContent(s: string): string { + // Cheap deterministic id suffix; collisions are tolerable since + // reasoning ids only matter for AG-UI server consumers, not for our + // own server's dedup logic (which keys on toolCallId, not reasoning id). + let h = 0 + for (let i = 0; i < s.length; i++) h = (h * 31 + s.charCodeAt(i)) | 0 + return Math.abs(h).toString(36) +} diff --git a/packages/typescript/ai/src/utilities/chat-params.ts b/packages/typescript/ai/src/utilities/chat-params.ts new file mode 100644 index 000000000..d5afc08c2 --- /dev/null +++ b/packages/typescript/ai/src/utilities/chat-params.ts @@ -0,0 +1,140 @@ +import { AGUIError, RunAgentInputSchema } from '@ag-ui/core' +import type { Context as AGUIContext } from '@ag-ui/core' +import type { JSONSchema, ModelMessage, Tool, UIMessage } from '../types' + +const KNOWN_PART_TYPES = new Set([ + 'text', + 'image', + 'audio', + 'video', + 'document', + 'tool-call', + 'tool-result', + 'thinking', +]) + +function isValidParts(value: unknown): value is Array<{ type: string }> { + if (!Array.isArray(value)) return false + for (const p of value) { + if (!p || typeof p !== 'object') return false + const type = (p as { type?: unknown }).type + if (typeof type !== 'string' || !KNOWN_PART_TYPES.has(type)) return false + } + return true +} + +/** + * Parse and validate an HTTP request body as an AG-UI `RunAgentInput`. + * + * Returns a spread-friendly object whose `messages` field is suitable for + * passing directly to `chat({ messages })`. The existing + * `convertMessagesToModelMessages` handles AG-UI fan-out dedup and + * reasoning/activity/developer-role normalization internally. + * + * @throws An error with a migration-pointing message when the body does + * not conform to AG-UI 0.0.52 `RunAgentInputSchema`. Surface this as a + * 400 Bad Request to the client. + */ +export function chatParamsFromRequestBody(body: unknown): Promise<{ + messages: Array + threadId: string + runId: string + parentRunId?: string + tools: Array<{ name: string; description: string; parameters: JSONSchema }> + forwardedProps: Record + state: unknown + context: Array +}> { + const parseResult = RunAgentInputSchema.safeParse(body) + if (!parseResult.success) { + return Promise.reject( + new AGUIError( + `Request body is not a valid AG-UI RunAgentInput. ` + + `If you're upgrading from a previous @tanstack/ai-client release, ` + + `see docs/migration/ag-ui-compliance.md. ` + + `Validation errors: ${parseResult.error.message}`, + ), + ) + } + + const parsed = parseResult.data + + // AG-UI Zod uses `.strip()` so extra fields like `parts` on messages are + // dropped during parse. We re-attach them from the original body so the + // existing UIMessage path inside `chat()` can use them directly. + const rawMessages = + (body as { messages?: Array> }).messages ?? [] + const messages = parsed.messages.map((m, i) => { + const raw = rawMessages[i] + if ( + raw && + typeof raw === 'object' && + 'parts' in raw && + isValidParts(raw.parts) + ) { + return { ...m, parts: raw.parts } as UIMessage | ModelMessage + } + return m as ModelMessage + }) + + return Promise.resolve({ + messages, + threadId: parsed.threadId, + runId: parsed.runId, + parentRunId: parsed.parentRunId, + tools: parsed.tools as Array<{ + name: string + description: string + parameters: JSONSchema + }>, + forwardedProps: (parsed.forwardedProps ?? {}) as Record, + state: parsed.state, + context: parsed.context, + }) +} + +/** + * Merge a server-side tool registry with the AG-UI client-declared tools + * received in the request body. + * + * Rules: + * - Server tools win on name collision. The client's declaration is + * ignored if the server already has a tool with that name. The client's + * UI-side handler still fires when the streamed tool-result event comes + * through (see `chat-client.ts` `onToolCall`), giving the + * "after server execution the client also handles" semantic for free. + * - Client-only tools (name not in `serverTools`) become no-execute + * entries: the runtime's existing `ClientToolRequest` path handles + * them β€” server emits a tool-call request, client executes via its + * registered handler, client posts back the result. + * + * @param serverTools - The server's `toolDefinition().server(...)` registry, + * keyed by tool name. + * @param clientTools - The `tools` array received from + * `chatParamsFromRequestBody(...)`. + * @returns A merged record suitable for `chat({ tools })`. + */ +export function mergeAgentTools( + serverTools: Record, + clientTools: Array<{ + name: string + description: string + parameters: JSONSchema + }>, +): Record { + const merged: Record = { ...serverTools } + for (const ct of clientTools) { + if (ct.name in merged) { + // Server wins + continue + } + merged[ct.name] = { + name: ct.name, + description: ct.description, + inputSchema: ct.parameters, + // No `execute` β€” runtime treats this as a client-side tool and + // emits ClientToolRequest events. + } as Tool + } + return merged +} diff --git a/packages/typescript/ai/tests/ag-ui-wire.test.ts b/packages/typescript/ai/tests/ag-ui-wire.test.ts new file mode 100644 index 000000000..7de0fc0a0 --- /dev/null +++ b/packages/typescript/ai/tests/ag-ui-wire.test.ts @@ -0,0 +1,162 @@ +import { describe, it, expect } from 'vitest' +import { uiMessagesToWire } from '../src/utilities/ag-ui-wire' +import type { UIMessage } from '../src/types' + +describe('uiMessagesToWire', () => { + it('mirrors a system UIMessage to a string content field', () => { + const messages: Array = [ + { + id: 's1', + role: 'system', + parts: [{ type: 'text', content: 'You are helpful' }], + }, + ] + const wire = uiMessagesToWire(messages) + expect(wire).toHaveLength(1) + expect(wire[0]!).toMatchObject({ + id: 's1', + role: 'system', + content: 'You are helpful', + }) + expect((wire[0]! as any).parts).toBeDefined() + }) + + it('mirrors a user UIMessage with a text-only parts list to a string content', () => { + const messages: Array = [ + { id: 'u1', role: 'user', parts: [{ type: 'text', content: 'hi' }] }, + ] + const wire = uiMessagesToWire(messages) + expect(wire).toHaveLength(1) + expect(wire[0]!).toMatchObject({ id: 'u1', role: 'user', content: 'hi' }) + }) + + it('mirrors a user UIMessage with mixed multimodal parts to an InputContent[] content', () => { + const messages: Array = [ + { + id: 'u1', + role: 'user', + parts: [ + { type: 'text', content: 'look at this' }, + { + type: 'image', + source: { + type: 'url', + value: 'https://example.com/cat.png', + mimeType: 'image/png', + }, + }, + ], + }, + ] + const wire = uiMessagesToWire(messages) + expect(wire).toHaveLength(1) + expect(Array.isArray((wire[0]! as any).content)).toBe(true) + expect((wire[0]! as any).content).toHaveLength(2) + expect((wire[0]! as any).content[0]).toEqual({ + type: 'text', + text: 'look at this', + }) + expect((wire[0]! as any).content[1]).toMatchObject({ + type: 'image', + source: { + type: 'url', + value: 'https://example.com/cat.png', + mimeType: 'image/png', + }, + }) + }) + + it('emits assistant anchor with toolCalls mirror and a separate tool fan-out per ToolResultPart', () => { + const messages: Array = [ + { + id: 'a1', + role: 'assistant', + parts: [ + { type: 'text', content: 'ok' }, + { + type: 'tool-call', + id: 'tc1', + name: 'getTodos', + arguments: '{}', + state: 'input-complete', + }, + { + type: 'tool-result', + toolCallId: 'tc1', + content: '[]', + state: 'complete', + }, + ], + }, + ] + const wire = uiMessagesToWire(messages) + expect(wire).toHaveLength(2) + // Anchor + expect(wire[0]!).toMatchObject({ + id: 'a1', + role: 'assistant', + content: 'ok', + toolCalls: [ + { + id: 'tc1', + type: 'function', + function: { name: 'getTodos', arguments: '{}' }, + }, + ], + }) + // Fan-out tool message + expect(wire[1]!).toMatchObject({ + role: 'tool', + toolCallId: 'tc1', + content: '[]', + }) + }) + + it('emits a separate reasoning fan-out before the assistant anchor for each ThinkingPart', () => { + const messages: Array = [ + { + id: 'a1', + role: 'assistant', + parts: [ + { type: 'thinking', content: 'pondering' }, + { type: 'text', content: 'answer' }, + ], + }, + ] + const wire = uiMessagesToWire(messages) + expect(wire).toHaveLength(2) + expect(wire[0]!).toMatchObject({ role: 'reasoning', content: 'pondering' }) + expect(wire[1]!).toMatchObject({ + id: 'a1', + role: 'assistant', + content: 'answer', + }) + }) + + it('preserves the original `parts` array on every anchor message', () => { + const messages: Array = [ + { id: 'u1', role: 'user', parts: [{ type: 'text', content: 'hi' }] }, + ] + const wire = uiMessagesToWire(messages) + expect((wire[0]! as any).parts).toEqual([{ type: 'text', content: 'hi' }]) + }) + + it('preserves per-part metadata on multimodal parts (round-trip via parts field)', () => { + const messages: Array = [ + { + id: 'u1', + role: 'user', + parts: [ + { + type: 'image', + source: { type: 'data', value: 'base64...', mimeType: 'image/png' }, + metadata: { detail: 'high' }, + }, + ], + }, + ] + const wire = uiMessagesToWire(messages) + const partOnAnchor = (wire[0]! as any).parts[0] + expect(partOnAnchor.metadata).toEqual({ detail: 'high' }) + }) +}) diff --git a/packages/typescript/ai/tests/chat-params.test.ts b/packages/typescript/ai/tests/chat-params.test.ts new file mode 100644 index 000000000..0c984288b --- /dev/null +++ b/packages/typescript/ai/tests/chat-params.test.ts @@ -0,0 +1,120 @@ +import { describe, it, expect } from 'vitest' +import { + chatParamsFromRequestBody, + mergeAgentTools, +} from '../src/utilities/chat-params' + +describe('chatParamsFromRequestBody', () => { + const validBody = { + threadId: 'thread-1', + runId: 'run-1', + state: {}, + messages: [ + { + id: 'm1', + role: 'user', + content: 'hello', + // TanStack canonical (extra) β€” should pass through untouched + parts: [{ type: 'text', content: 'hello' }], + }, + ], + tools: [], + context: [], + forwardedProps: { temperature: 0.7 }, + } + + it('returns parsed fields verbatim on a valid body', async () => { + const result = await chatParamsFromRequestBody(validBody) + expect(result.threadId).toBe('thread-1') + expect(result.runId).toBe('run-1') + expect(result.messages).toHaveLength(1) + expect(result.tools).toEqual([]) + expect(result.forwardedProps).toEqual({ temperature: 0.7 }) + }) + + it('preserves the `parts` field on messages (AG-UI strip mode tolerates extras in raw JSON)', async () => { + const result = await chatParamsFromRequestBody(validBody) + const m = result.messages[0] as { parts?: unknown } + expect(m.parts).toEqual([{ type: 'text', content: 'hello' }]) + }) + + it('throws on missing threadId', async () => { + const { threadId, ...rest } = validBody + await expect(chatParamsFromRequestBody(rest)).rejects.toThrow() + }) + + it('throws on missing runId', async () => { + const { runId, ...rest } = validBody + await expect(chatParamsFromRequestBody(rest)).rejects.toThrow() + }) + + it('throws on missing messages', async () => { + const { messages, ...rest } = validBody + await expect(chatParamsFromRequestBody(rest)).rejects.toThrow() + }) + + it('rejects the legacy {messages, data} shape with a migration-pointing error', async () => { + const oldBody = { + messages: [ + { id: 'm1', role: 'user', parts: [{ type: 'text', content: 'hi' }] }, + ], + data: {}, + } + await expect(chatParamsFromRequestBody(oldBody)).rejects.toThrow( + /AG-UI|RunAgentInput|migration/i, + ) + }) +}) + +describe('mergeAgentTools', () => { + const fakeServerTool = (name: string) => ({ + name, + description: `server ${name}`, + inputSchema: { type: 'object', properties: {} }, + execute: async () => ({ ok: true }), + }) + + it('returns server tools unchanged when client list is empty', () => { + const server = { greet: fakeServerTool('greet') } + const result = mergeAgentTools(server, []) + expect(Object.keys(result)).toEqual(['greet']) + expect(result['greet']!.execute).toBeDefined() + }) + + it('adds client-only tools as no-execute stubs', () => { + const server = {} + const client = [ + { + name: 'showToast', + description: 'render a toast', + parameters: { type: 'object', properties: {} }, + }, + ] + const result = mergeAgentTools(server, client) + expect(Object.keys(result)).toEqual(['showToast']) + expect(result['showToast']!.execute).toBeUndefined() + expect(result['showToast']!.inputSchema).toEqual({ + type: 'object', + properties: {}, + }) + expect(result['showToast']!.description).toBe('render a toast') + }) + + it('server wins on name collision (client declaration ignored)', () => { + const server = { greet: fakeServerTool('greet') } + const client = [ + { + name: 'greet', + description: 'overridden', + parameters: { type: 'object', properties: { foo: { type: 'string' } } }, + }, + ] + const result = mergeAgentTools(server, client) + expect(result['greet']!.description).toBe('server greet') + expect(result['greet']!.execute).toBeDefined() + }) + + it('handles empty server and empty client', () => { + expect(mergeAgentTools({}, [])).toEqual({}) + }) +}) diff --git a/packages/typescript/ai/tests/messages.test.ts b/packages/typescript/ai/tests/messages.test.ts new file mode 100644 index 000000000..025e33b8a --- /dev/null +++ b/packages/typescript/ai/tests/messages.test.ts @@ -0,0 +1,102 @@ +import { describe, expect, it } from 'vitest' +import { convertMessagesToModelMessages } from '../src/activities/chat/messages' +import type { ModelMessage, UIMessage } from '../src/types' + +describe('convertMessagesToModelMessages β€” AG-UI dedup pre-pass', () => { + it('drops fan-out tool message when an anchor UIMessage already represents the tool result', () => { + const messages = [ + { + id: 'a1', + role: 'assistant', + parts: [ + { type: 'text', content: 'calling' }, + { + type: 'tool-call', + id: 'tc1', + name: 'getTodos', + arguments: '{}', + state: 'input-complete', + }, + { + type: 'tool-result', + toolCallId: 'tc1', + content: '[]', + state: 'complete', + }, + ], + } as UIMessage, + // AG-UI fan-out duplicate β€” should be dropped + { + role: 'tool', + toolCallId: 'tc1', + content: '[]', + } as ModelMessage, + ] + + const result = convertMessagesToModelMessages(messages) + const toolMessages = result.filter((m) => m.role === 'tool') + expect(toolMessages).toHaveLength(1) + expect(toolMessages[0]?.toolCallId).toBe('tc1') + }) + + it('keeps tool messages from a foreign AG-UI client (no anchor parts)', () => { + const messages = [ + // No UIMessage with parts; this is what a foreign AG-UI client sends. + { + role: 'assistant', + content: 'calling', + toolCalls: [ + { + id: 'tc1', + type: 'function', + function: { name: 'getTodos', arguments: '{}' }, + }, + ], + } as ModelMessage, + { role: 'tool', toolCallId: 'tc1', content: '[]' } as ModelMessage, + ] + + const result = convertMessagesToModelMessages(messages) + const toolMessages = result.filter((m) => m.role === 'tool') + expect(toolMessages).toHaveLength(1) + expect(toolMessages[0]?.toolCallId).toBe('tc1') + }) + + it('drops AG-UI reasoning messages (no ModelMessage equivalent today)', () => { + const messages = [ + { role: 'reasoning', content: 'thinking...' } as unknown as ModelMessage, + { role: 'user', content: 'hi' } as ModelMessage, + ] + + const result = convertMessagesToModelMessages(messages) + expect(result.find((m) => (m as any).role === 'reasoning')).toBeUndefined() + expect(result).toHaveLength(1) + expect(result[0]?.role).toBe('user') + }) + + it('drops AG-UI activity messages', () => { + const messages = [ + { role: 'activity', content: 'event' } as unknown as ModelMessage, + { role: 'user', content: 'hi' } as ModelMessage, + ] + + const result = convertMessagesToModelMessages(messages) + expect(result).toHaveLength(1) + expect(result[0]?.role).toBe('user') + }) + + it('collapses AG-UI developer messages to system role', () => { + const messages = [ + { + role: 'developer', + content: 'You are helpful', + } as unknown as ModelMessage, + { role: 'user', content: 'hi' } as ModelMessage, + ] + + const result = convertMessagesToModelMessages(messages) + expect(result).toHaveLength(2) + expect(result[0]?.role).toBe('system') + expect(result[0]?.content).toBe('You are helpful') + }) +}) diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index eb41b0817..2615b6fa6 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -935,8 +935,8 @@ importers: packages/typescript/ai: dependencies: '@ag-ui/core': - specifier: 0.0.49 - version: 0.0.49 + specifier: ^0.0.52 + version: 0.0.52 '@tanstack/ai-event-client': specifier: workspace:* version: link:../ai-event-client @@ -1891,8 +1891,8 @@ packages: '@acemir/cssom@0.9.29': resolution: {integrity: sha512-G90x0VW+9nW4dFajtjCoT+NM0scAfH9Mb08IcjgFHYbfiL/lU04dTF9JuVOi3/OH+DJCQdcIseSXkdCB9Ky6JA==} - '@ag-ui/core@0.0.49': - resolution: {integrity: sha512-9ywypwjUGtIvTxJ2eKQjhPZgLnSFAfNK7vZUcT7Bz4ur4yAIB+lAFtzvu7VDYe6jsUx/6N/71Dh4R0zX5woNVw==} + '@ag-ui/core@0.0.52': + resolution: {integrity: sha512-Xo0bUaNV56EqylzcrAuhUkQX7et7+SZIrqZZtEByGwEq/I1EHny6ZMkWHLkKR7UNi0FJZwJyhKYmKJS3B2SEgA==} '@anthropic-ai/sdk@0.71.2': resolution: {integrity: sha512-TGNDEUuEstk/DKu0/TflXAEt+p+p/WhTlFzEnoosvbaDU2LTjm42igSdlL0VijrKpWejtOKxX0b8A7uc+XiSAQ==} @@ -12009,7 +12009,7 @@ snapshots: '@acemir/cssom@0.9.29': {} - '@ag-ui/core@0.0.49': + '@ag-ui/core@0.0.52': dependencies: zod: 3.25.76 diff --git a/testing/e2e/src/routes/api.chat.ts b/testing/e2e/src/routes/api.chat.ts index 30a00f8cc..8d42a21fd 100644 --- a/testing/e2e/src/routes/api.chat.ts +++ b/testing/e2e/src/routes/api.chat.ts @@ -1,5 +1,10 @@ import { createFileRoute } from '@tanstack/react-router' -import { chat, maxIterations, toServerSentEventsResponse } from '@tanstack/ai' +import { + chat, + chatParamsFromRequestBody, + maxIterations, + toServerSentEventsResponse, +} from '@tanstack/ai' import type { Feature, Provider } from '@/lib/types' import { createTextAdapter } from '@/lib/providers' import { featureConfigs } from '@/lib/features' @@ -14,14 +19,27 @@ export const Route = createFileRoute('/api/chat')({ } const abortController = new AbortController() - const body = await request.json() - const { messages, data } = body - const provider: Provider = data?.provider || 'openai' - const feature: Feature = data?.feature || 'chat' - const testId: string | undefined = - typeof data?.testId === 'string' ? data.testId : undefined - const aimockPort: number | undefined = - data?.aimockPort != null ? Number(data.aimockPort) : undefined + + let params + try { + params = await chatParamsFromRequestBody(await request.json()) + } catch (error) { + return new Response( + error instanceof Error ? error.message : 'Bad request', + { status: 400 }, + ) + } + + const fp = params.forwardedProps as Record + const provider: Provider = ( + typeof fp.provider === 'string' ? fp.provider : 'openai' + ) as Provider + const feature: Feature = ( + typeof fp.feature === 'string' ? fp.feature : 'chat' + ) as Feature + const testId = typeof fp.testId === 'string' ? fp.testId : undefined + const aimockPort = + fp.aimockPort != null ? Number(fp.aimockPort) : undefined const config = featureConfigs[feature] const modelOverride = config.modelOverrides?.[provider] @@ -39,7 +57,9 @@ export const Route = createFileRoute('/api/chat')({ modelOptions: config.modelOptions, systemPrompts: ['You are a helpful assistant for a guitar store.'], agentLoopStrategy: maxIterations(5), - messages, + messages: params.messages, + threadId: params.threadId, + runId: params.runId, abortController, }) diff --git a/testing/e2e/src/routes/api.middleware-test.ts b/testing/e2e/src/routes/api.middleware-test.ts index a8c0def9b..ef75c316e 100644 --- a/testing/e2e/src/routes/api.middleware-test.ts +++ b/testing/e2e/src/routes/api.middleware-test.ts @@ -1,6 +1,7 @@ import { createFileRoute } from '@tanstack/react-router' import { chat, + chatParamsFromRequestBody, maxIterations, toServerSentEventsResponse, toolDefinition, @@ -51,18 +52,27 @@ export const Route = createFileRoute('/api/middleware-test')({ if (request.signal?.aborted) return new Response(null, { status: 499 }) const abortController = new AbortController() + let params try { - const body = await request.json() - const messages = body.messages - const scenario = body.data?.scenario || 'basic-text' - const middlewareMode = body.data?.middlewareMode || 'none' - const testId: string | undefined = - typeof body.data?.testId === 'string' ? body.data.testId : undefined - const aimockPort: number | undefined = - body.data?.aimockPort != null - ? Number(body.data.aimockPort) - : undefined + params = await chatParamsFromRequestBody(await request.json()) + } catch (error) { + return new Response( + error instanceof Error ? error.message : 'Bad request', + { status: 400 }, + ) + } + const fp = params.forwardedProps as Record + const scenario = + typeof fp.scenario === 'string' ? fp.scenario : 'basic-text' + const middlewareMode = + typeof fp.middlewareMode === 'string' ? fp.middlewareMode : 'none' + const testId: string | undefined = + typeof fp.testId === 'string' ? fp.testId : undefined + const aimockPort: number | undefined = + fp.aimockPort != null ? Number(fp.aimockPort) : undefined + + try { const adapterOptions = createTextAdapter( 'openai', undefined, @@ -81,9 +91,11 @@ export const Route = createFileRoute('/api/middleware-test')({ const stream = chat({ ...adapterOptions, - messages, + messages: params.messages, tools, middleware, + threadId: params.threadId, + runId: params.runId, agentLoopStrategy: maxIterations(10), abortController, }) diff --git a/testing/e2e/src/routes/api.tools-test.ts b/testing/e2e/src/routes/api.tools-test.ts index 5dbd8da8a..403ad12c7 100644 --- a/testing/e2e/src/routes/api.tools-test.ts +++ b/testing/e2e/src/routes/api.tools-test.ts @@ -1,5 +1,10 @@ import { createFileRoute } from '@tanstack/react-router' -import { chat, maxIterations, toServerSentEventsResponse } from '@tanstack/ai' +import { + chat, + chatParamsFromRequestBody, + maxIterations, + toServerSentEventsResponse, +} from '@tanstack/ai' import { createTextAdapter } from '@/lib/providers' import { getToolsForScenario } from '@/lib/tools-test-tools' @@ -15,17 +20,25 @@ export const Route = createFileRoute('/api/tools-test')({ const abortController = new AbortController() + let params try { - const body = await request.json() - const messages = body.messages - const scenario = body.data?.scenario || body.scenario || 'text-only' - const testId: string | undefined = - typeof body.data?.testId === 'string' ? body.data.testId : undefined - const aimockPort: number | undefined = - body.data?.aimockPort != null - ? Number(body.data.aimockPort) - : undefined + params = await chatParamsFromRequestBody(await request.json()) + } catch (error) { + return new Response( + error instanceof Error ? error.message : 'Bad request', + { status: 400 }, + ) + } + const fp = params.forwardedProps as Record + const scenario = + typeof fp.scenario === 'string' ? fp.scenario : 'text-only' + const testId: string | undefined = + typeof fp.testId === 'string' ? fp.testId : undefined + const aimockPort: number | undefined = + fp.aimockPort != null ? Number(fp.aimockPort) : undefined + + try { // Special error scenario: return a stream that immediately errors if (scenario === 'error') { const errorStream = (async function* () { @@ -57,8 +70,10 @@ export const Route = createFileRoute('/api/tools-test')({ const stream = chat({ ...adapterOptions, - messages, + messages: params.messages, tools, + threadId: params.threadId, + runId: params.runId, agentLoopStrategy: maxIterations(20), abortController, }) diff --git a/testing/e2e/tests/ag-ui-compliance.spec.ts b/testing/e2e/tests/ag-ui-compliance.spec.ts new file mode 100644 index 000000000..31ded5dae --- /dev/null +++ b/testing/e2e/tests/ag-ui-compliance.spec.ts @@ -0,0 +1,63 @@ +import { test, expect } from './fixtures' +import { sendMessage, waitForResponse, featureUrl } from './helpers' + +test.describe('AG-UI client-to-server compliance', () => { + test('POST body has RunAgentInput shape and persists threadId across sends', async ({ + page, + testId, + aimockPort, + }) => { + const requestBodies: Array = [] + page.on('request', (request) => { + if (request.url().includes('/api/chat') && request.method() === 'POST') { + const body = request.postDataJSON() + if (body) requestBodies.push(body) + } + }) + + await page.goto(featureUrl('openai', 'chat', testId, aimockPort)) + + // Send first message + await sendMessage(page, '[chat] hello') + await waitForResponse(page) + + // Send second message in the same session + await sendMessage(page, '[chat] another message') + await waitForResponse(page) + + expect(requestBodies.length).toBeGreaterThanOrEqual(2) + + const first = requestBodies[0]! + const second = requestBodies[1]! + + // Wire shape: every field required by RunAgentInput must be present + for (const body of [first, second]) { + expect(body).toHaveProperty('threadId') + expect(body).toHaveProperty('runId') + expect(body).toHaveProperty('state') + expect(body).toHaveProperty('messages') + expect(body).toHaveProperty('tools') + expect(body).toHaveProperty('context') + expect(body).toHaveProperty('forwardedProps') + expect(Array.isArray(body.messages)).toBe(true) + expect(Array.isArray(body.tools)).toBe(true) + } + + // threadId continuity: same session β†’ same threadId + expect(first.threadId).toBe(second.threadId) + + // runId freshness: each send generates a new runId + expect(first.runId).not.toBe(second.runId) + + // Anchor messages carry `parts` (re-attached by chatParamsFromRequestBody) + const anchors = second.messages.filter( + (m: any) => + m.role === 'user' || m.role === 'system' || m.role === 'assistant', + ) + expect(anchors.length).toBeGreaterThan(0) + for (const a of anchors) { + expect(a).toHaveProperty('parts') + expect(Array.isArray(a.parts)).toBe(true) + } + }) +}) diff --git a/testing/e2e/tests/ag-ui-foreign-client.spec.ts b/testing/e2e/tests/ag-ui-foreign-client.spec.ts new file mode 100644 index 000000000..b7a19b252 --- /dev/null +++ b/testing/e2e/tests/ag-ui-foreign-client.spec.ts @@ -0,0 +1,65 @@ +import { test, expect } from './fixtures' + +test.describe('AG-UI foreign client compatibility', () => { + test('TanStack server accepts pure RunAgentInput with fan-out tool messages', async ({ + request, + testId, + aimockPort, + }) => { + const body = { + threadId: 'thread-foreign-1', + runId: 'run-foreign-1', + state: {}, + messages: [ + { id: 'u1', role: 'user', content: '[chat] recommend a guitar' }, + ], + tools: [], + context: [], + forwardedProps: { + provider: 'openai', + feature: 'chat', + testId, + aimockPort, + }, + } + const response = await request.post('/api/chat', { + data: body, + headers: { 'Content-Type': 'application/json' }, + }) + expect( + response.ok(), + `expected 200, got ${response.status()}: ${await response.text()}`, + ).toBe(true) + const text = await response.text() + expect(text).toContain('RUN_FINISHED') + }) + + test('developer role is collapsed to system without breaking the run', async ({ + request, + testId, + aimockPort, + }) => { + const body = { + threadId: 'thread-foreign-2', + runId: 'run-foreign-2', + state: {}, + messages: [ + { id: 'd1', role: 'developer', content: 'You only speak in haiku.' }, + { id: 'u1', role: 'user', content: '[chat] recommend a guitar' }, + ], + tools: [], + context: [], + forwardedProps: { + provider: 'openai', + feature: 'chat', + testId, + aimockPort, + }, + } + const response = await request.post('/api/chat', { + data: body, + headers: { 'Content-Type': 'application/json' }, + }) + expect(response.ok()).toBe(true) + }) +}) diff --git a/testing/e2e/tests/ag-ui-old-client-rejection.spec.ts b/testing/e2e/tests/ag-ui-old-client-rejection.spec.ts new file mode 100644 index 000000000..694a373fc --- /dev/null +++ b/testing/e2e/tests/ag-ui-old-client-rejection.spec.ts @@ -0,0 +1,19 @@ +import { test, expect } from './fixtures' + +test('legacy {messages, data} wire shape is rejected with a migration-pointing error', async ({ + request, +}) => { + const oldBody = { + messages: [ + { id: 'u1', role: 'user', parts: [{ type: 'text', content: 'hi' }] }, + ], + data: {}, + } + const response = await request.post('/api/chat', { + data: oldBody, + headers: { 'Content-Type': 'application/json' }, + }) + expect(response.status()).toBe(400) + const body = await response.text() + expect(body).toMatch(/AG-UI|RunAgentInput|migration/i) +})