From e11829586a3103cc114246507c275915e92ffd18 Mon Sep 17 00:00:00 2001 From: Alem Tuzlak Date: Mon, 20 Apr 2026 15:00:56 +0200 Subject: [PATCH 1/5] feat: dual ESM+CJS builds and toJSONResponse / fetchJSON utilities MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixes #308 and #309. - @tanstack/ai, @tanstack/ai-client, @tanstack/ai-event-client now emit both dist/esm/*.js and dist/cjs/*.cjs with matching .d.cts files. package.json exports gained nested import/require conditions plus a `main` field so Metro / Expo / other CJS-only resolvers can find the subpath exports (`./adapters`, `./middlewares`, etc.). - New toJSONResponse(stream, init?) on @tanstack/ai: drains the stream and returns a JSON-array Response. For runtimes that can't stream ReadableStream bodies (Expo's @expo/server, edge proxies). - New fetchJSON(url, options?) connection adapter on @tanstack/ai-client: the client-side counterpart — fetches the JSON array and replays each chunk into the normal ChatClient pipeline. - Trade-off documented in both: you lose incremental rendering; use SSE / HTTP-stream responses when the runtime supports them. --- .changeset/cjs-output-and-json-response.md | 9 +++ packages/typescript/ai-client/package.json | 11 ++- .../ai-client/src/connection-adapters.ts | 75 +++++++++++++++++++ packages/typescript/ai-client/src/index.ts | 1 + packages/typescript/ai-client/vite.config.ts | 2 +- .../typescript/ai-event-client/package.json | 11 ++- .../typescript/ai-event-client/vite.config.ts | 2 +- packages/typescript/ai/package.json | 31 ++++++-- packages/typescript/ai/src/index.ts | 1 + .../typescript/ai/src/stream-to-response.ts | 48 ++++++++++++ .../ai/tests/stream-to-response.test.ts | 73 ++++++++++++++++++ packages/typescript/ai/vite.config.ts | 2 +- 12 files changed, 253 insertions(+), 13 deletions(-) create mode 100644 .changeset/cjs-output-and-json-response.md diff --git a/.changeset/cjs-output-and-json-response.md b/.changeset/cjs-output-and-json-response.md new file mode 100644 index 000000000..389033ca4 --- /dev/null +++ b/.changeset/cjs-output-and-json-response.md @@ -0,0 +1,9 @@ +--- +'@tanstack/ai': minor +'@tanstack/ai-client': minor +'@tanstack/ai-event-client': patch +--- + +**Dual ESM + CJS output.** `@tanstack/ai`, `@tanstack/ai-client`, and `@tanstack/ai-event-client` now ship both ESM and CJS builds with type-aware dual `exports` maps (`import` → `./dist/esm/*.js`, `require` → `./dist/cjs/*.cjs`), plus a `main` field pointing at CJS. Fixes Metro / Expo / CJS-only resolvers that previously couldn't find `@tanstack/ai/adapters` or `@tanstack/ai-client` because the packages were ESM-only (#308). + +**New `toJSONResponse(stream, init?)` on `@tanstack/ai`.** Drains the chat stream fully and returns a JSON-array `Response` with `Content-Type: application/json`. Use on server runtimes that can't emit `ReadableStream` responses (Expo's `@expo/server`, some edge proxies). Pair with the new `fetchJSON(url, options?)` connection adapter on `@tanstack/ai-client` — it fetches the array and replays each chunk into the normal `ChatClient` pipeline. Trade-off: no incremental rendering (every chunk arrives at once when the request resolves). Closes #309. diff --git a/packages/typescript/ai-client/package.json b/packages/typescript/ai-client/package.json index 38330616e..939d6e453 100644 --- a/packages/typescript/ai-client/package.json +++ b/packages/typescript/ai-client/package.json @@ -18,12 +18,19 @@ "streaming" ], "type": "module", + "main": "./dist/cjs/index.cjs", "module": "./dist/esm/index.js", "types": "./dist/esm/index.d.ts", "exports": { ".": { - "types": "./dist/esm/index.d.ts", - "import": "./dist/esm/index.js" + "import": { + "types": "./dist/esm/index.d.ts", + "default": "./dist/esm/index.js" + }, + "require": { + "types": "./dist/cjs/index.d.cts", + "default": "./dist/cjs/index.cjs" + } } }, "files": [ diff --git a/packages/typescript/ai-client/src/connection-adapters.ts b/packages/typescript/ai-client/src/connection-adapters.ts index 91d63a146..a7bd32673 100644 --- a/packages/typescript/ai-client/src/connection-adapters.ts +++ b/packages/typescript/ai-client/src/connection-adapters.ts @@ -424,6 +424,81 @@ export function fetchHttpStream( } } +/** + * Create a JSON-array connection adapter for server runtimes that cannot + * stream `ReadableStream` responses (e.g. Expo's `@expo/server`, certain + * edge proxies). Pair with `toJSONResponse(stream)` on the server: the + * server drains the chat stream fully, JSON-serialises the collected + * chunks into an array, and this adapter fetches the array and replays + * each chunk one-by-one into the normal client pipeline. + * + * Trade-off: you lose incremental rendering — the UI sees every chunk + * only after the request resolves. Use SSE/HTTP-stream adapters when the + * runtime supports them. + * + * @param url - The API endpoint URL (or a function that returns the URL) + * @param options - Fetch options (headers, credentials, body, etc.) or a function that returns options (can be async) + * @returns A connection adapter for JSON-array responses + * + * @example + * ```typescript + * // Expo / RN client that hits an Expo API route returning toJSONResponse(stream) + * const connection = fetchJSON('/api/chat') + * + * const client = new ChatClient({ connection }) + * ``` + */ +export function fetchJSON( + url: string | (() => string), + options: + | FetchConnectionOptions + | (() => FetchConnectionOptions | Promise) = {}, +): ConnectConnectionAdapter { + return { + async *connect(messages, data, abortSignal) { + const resolvedUrl = typeof url === 'function' ? url() : url + const resolvedOptions = + typeof options === 'function' ? await options() : options + + const requestHeaders: Record = { + 'Content-Type': 'application/json', + ...mergeHeaders(resolvedOptions.headers), + } + + const requestBody = { + messages, + data, + ...resolvedOptions.body, + } + + const fetchClient = resolvedOptions.fetchClient ?? fetch + const response = await fetchClient(resolvedUrl, { + method: 'POST', + headers: requestHeaders, + body: JSON.stringify(requestBody), + credentials: resolvedOptions.credentials || 'same-origin', + signal: abortSignal || resolvedOptions.signal, + }) + + if (!response.ok) { + throw new Error( + `HTTP error! status: ${response.status} ${response.statusText}`, + ) + } + + const payload = (await response.json()) as unknown + if (!Array.isArray(payload)) { + throw new Error( + 'fetchJSON: expected response body to be a JSON array of StreamChunks. Did you forget to use `toJSONResponse(stream)` on the server?', + ) + } + for (const chunk of payload) { + yield chunk as StreamChunk + } + }, + } +} + /** * Create a direct stream connection adapter (for server functions or direct streams) * diff --git a/packages/typescript/ai-client/src/index.ts b/packages/typescript/ai-client/src/index.ts index 93654ba66..6f3e90028 100644 --- a/packages/typescript/ai-client/src/index.ts +++ b/packages/typescript/ai-client/src/index.ts @@ -54,6 +54,7 @@ export type { export { fetchServerSentEvents, fetchHttpStream, + fetchJSON, stream, rpcStream, type ConnectConnectionAdapter, diff --git a/packages/typescript/ai-client/vite.config.ts b/packages/typescript/ai-client/vite.config.ts index 77bcc2e60..c87275f3e 100644 --- a/packages/typescript/ai-client/vite.config.ts +++ b/packages/typescript/ai-client/vite.config.ts @@ -31,6 +31,6 @@ export default mergeConfig( tanstackViteConfig({ entry: ['./src/index.ts'], srcDir: './src', - cjs: false, + cjs: true, }), ) diff --git a/packages/typescript/ai-event-client/package.json b/packages/typescript/ai-event-client/package.json index ca860bfb3..48711de78 100644 --- a/packages/typescript/ai-event-client/package.json +++ b/packages/typescript/ai-event-client/package.json @@ -10,12 +10,19 @@ "directory": "packages/typescript/ai-event-client" }, "type": "module", + "main": "./dist/cjs/index.cjs", "module": "./dist/esm/index.js", "types": "./dist/esm/index.d.ts", "exports": { ".": { - "types": "./dist/esm/index.d.ts", - "import": "./dist/esm/index.js" + "import": { + "types": "./dist/esm/index.d.ts", + "default": "./dist/esm/index.js" + }, + "require": { + "types": "./dist/cjs/index.d.cts", + "default": "./dist/cjs/index.cjs" + } } }, "sideEffects": false, diff --git a/packages/typescript/ai-event-client/vite.config.ts b/packages/typescript/ai-event-client/vite.config.ts index 77bcc2e60..c87275f3e 100644 --- a/packages/typescript/ai-event-client/vite.config.ts +++ b/packages/typescript/ai-event-client/vite.config.ts @@ -31,6 +31,6 @@ export default mergeConfig( tanstackViteConfig({ entry: ['./src/index.ts'], srcDir: './src', - cjs: false, + cjs: true, }), ) diff --git a/packages/typescript/ai/package.json b/packages/typescript/ai/package.json index 1eea2b8b7..f7495342c 100644 --- a/packages/typescript/ai/package.json +++ b/packages/typescript/ai/package.json @@ -10,20 +10,39 @@ "directory": "packages/typescript/ai" }, "type": "module", + "main": "./dist/cjs/index.cjs", "module": "./dist/esm/index.js", "types": "./dist/esm/index.d.ts", "exports": { ".": { - "types": "./dist/esm/index.d.ts", - "import": "./dist/esm/index.js" + "import": { + "types": "./dist/esm/index.d.ts", + "default": "./dist/esm/index.js" + }, + "require": { + "types": "./dist/cjs/index.d.cts", + "default": "./dist/cjs/index.cjs" + } }, "./adapters": { - "types": "./dist/esm/activities/index.d.ts", - "import": "./dist/esm/activities/index.js" + "import": { + "types": "./dist/esm/activities/index.d.ts", + "default": "./dist/esm/activities/index.js" + }, + "require": { + "types": "./dist/cjs/activities/index.d.cts", + "default": "./dist/cjs/activities/index.cjs" + } }, "./middlewares": { - "types": "./dist/esm/middlewares/index.d.ts", - "import": "./dist/esm/middlewares/index.js" + "import": { + "types": "./dist/esm/middlewares/index.d.ts", + "default": "./dist/esm/middlewares/index.js" + }, + "require": { + "types": "./dist/cjs/middlewares/index.d.cts", + "default": "./dist/cjs/middlewares/index.cjs" + } } }, "sideEffects": false, diff --git a/packages/typescript/ai/src/index.ts b/packages/typescript/ai/src/index.ts index 34e1b5922..3f40b1422 100644 --- a/packages/typescript/ai/src/index.ts +++ b/packages/typescript/ai/src/index.ts @@ -58,6 +58,7 @@ export { toServerSentEventsResponse, toHttpStream, toHttpResponse, + toJSONResponse, } from './stream-to-response' // Tool call management diff --git a/packages/typescript/ai/src/stream-to-response.ts b/packages/typescript/ai/src/stream-to-response.ts index 2f83bc017..8da077d00 100644 --- a/packages/typescript/ai/src/stream-to-response.ts +++ b/packages/typescript/ai/src/stream-to-response.ts @@ -250,3 +250,51 @@ export function toHttpResponse( ...init, }) } + +/** + * Drain a StreamChunk async iterable fully, then return the collected chunks + * as a single JSON-array `Response`. + * + * Use this when the target runtime does not support streaming + * `ReadableStream` responses — for example Expo's `@expo/server` runtime, + * Vercel Edge/Node hybrids behind certain proxies, or Cloudflare setups + * without streaming enabled. The consumer pairs with + * `fetchJSON` on the client, which decodes the array and yields each + * chunk back into the normal streaming pipeline — so the on-screen UX + * becomes "render everything at once when the request resolves" instead + * of incremental streaming, but the rest of the chat pipeline is unchanged. + * + * Trade-off: you lose the incremental rendering. Use only when you can't + * ship SSE / HTTP-stream responses. + * + * @param stream - AsyncIterable of StreamChunks from chat() + * @param init - Optional Response initialization options (including `abortController`) + * @returns Response with `Content-Type: application/json` containing an array of StreamChunks + * + * @example + * ```typescript + * // Expo API route where streaming responses aren't supported + * export async function POST(request: Request) { + * const stream = chat({ adapter: openaiText(), messages: [...] }) + * return toJSONResponse(stream) + * } + * ``` + */ +export async function toJSONResponse( + stream: AsyncIterable, + init?: ResponseInit & { abortController?: AbortController }, +): Promise { + const { abortController, headers, ...rest } = init ?? {} + const chunks: Array = [] + try { + for await (const chunk of stream) { + chunks.push(chunk) + } + } catch (error) { + abortController?.abort() + throw error + } + const merged = new Headers(headers) + if (!merged.has('Content-Type')) merged.set('Content-Type', 'application/json') + return new Response(JSON.stringify(chunks), { ...rest, headers: merged }) +} diff --git a/packages/typescript/ai/tests/stream-to-response.test.ts b/packages/typescript/ai/tests/stream-to-response.test.ts index f57425fb0..d454ddbbc 100644 --- a/packages/typescript/ai/tests/stream-to-response.test.ts +++ b/packages/typescript/ai/tests/stream-to-response.test.ts @@ -2,6 +2,7 @@ import { describe, it, expect, vi } from 'vitest' import { toServerSentEventsStream, toServerSentEventsResponse, + toJSONResponse, } from '../src/stream-to-response' import type { StreamChunk } from '../src/types' @@ -870,3 +871,75 @@ describe('SSE Round-Trip (Encode → Decode)', () => { ) }) }) + +describe('toJSONResponse', () => { + it('drains the stream and returns a JSON-array Response', async () => { + const chunks: Array> = [ + { + type: 'RUN_STARTED', + runId: 'r1', + model: 'test', + timestamp: 1, + }, + { + type: 'TEXT_MESSAGE_CONTENT', + messageId: 'm1', + model: 'test', + timestamp: 2, + delta: 'Hello', + content: 'Hello', + }, + { + type: 'RUN_FINISHED', + runId: 'r1', + model: 'test', + timestamp: 3, + }, + ] + const response = await toJSONResponse(createMockStream(chunks)) + + expect(response.status).toBe(200) + expect(response.headers.get('Content-Type')).toBe('application/json') + expect(await response.json()).toEqual(chunks) + }) + + it('defers to caller-provided headers and preserves extra init', async () => { + const response = await toJSONResponse(createMockStream([]), { + status: 201, + headers: { 'X-Custom': '1' }, + }) + + expect(response.status).toBe(201) + expect(response.headers.get('X-Custom')).toBe('1') + expect(response.headers.get('Content-Type')).toBe('application/json') + }) + + it('does not override an explicit Content-Type', async () => { + const response = await toJSONResponse(createMockStream([]), { + headers: { 'Content-Type': 'application/vnd.tanstack-ai+json' }, + }) + + expect(response.headers.get('Content-Type')).toBe( + 'application/vnd.tanstack-ai+json', + ) + }) + + it('aborts the supplied controller and rethrows if the upstream errors', async () => { + const abortController = new AbortController() + const abortSpy = vi.spyOn(abortController, 'abort') + async function* failing(): AsyncGenerator { + yield { + type: 'RUN_STARTED', + runId: 'r1', + model: 'test', + timestamp: 1, + } as StreamChunk + throw new Error('upstream failure') + } + + await expect(toJSONResponse(failing(), { abortController })).rejects.toThrow( + 'upstream failure', + ) + expect(abortSpy).toHaveBeenCalledOnce() + }) +}) diff --git a/packages/typescript/ai/vite.config.ts b/packages/typescript/ai/vite.config.ts index 2a28101da..3ea666614 100644 --- a/packages/typescript/ai/vite.config.ts +++ b/packages/typescript/ai/vite.config.ts @@ -35,6 +35,6 @@ export default mergeConfig( './src/middlewares/index.ts', ], srcDir: './src', - cjs: false, + cjs: true, }), ) From 1bbc932081ab497eecc95ae9f86063d89d4fc56f Mon Sep 17 00:00:00 2001 From: "autofix-ci[bot]" <114827586+autofix-ci[bot]@users.noreply.github.com> Date: Mon, 20 Apr 2026 13:02:14 +0000 Subject: [PATCH 2/5] ci: apply automated fixes --- packages/typescript/ai/src/stream-to-response.ts | 3 ++- packages/typescript/ai/tests/stream-to-response.test.ts | 6 +++--- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/packages/typescript/ai/src/stream-to-response.ts b/packages/typescript/ai/src/stream-to-response.ts index 8da077d00..38a280324 100644 --- a/packages/typescript/ai/src/stream-to-response.ts +++ b/packages/typescript/ai/src/stream-to-response.ts @@ -295,6 +295,7 @@ export async function toJSONResponse( throw error } const merged = new Headers(headers) - if (!merged.has('Content-Type')) merged.set('Content-Type', 'application/json') + if (!merged.has('Content-Type')) + merged.set('Content-Type', 'application/json') return new Response(JSON.stringify(chunks), { ...rest, headers: merged }) } diff --git a/packages/typescript/ai/tests/stream-to-response.test.ts b/packages/typescript/ai/tests/stream-to-response.test.ts index d454ddbbc..d08c677ea 100644 --- a/packages/typescript/ai/tests/stream-to-response.test.ts +++ b/packages/typescript/ai/tests/stream-to-response.test.ts @@ -937,9 +937,9 @@ describe('toJSONResponse', () => { throw new Error('upstream failure') } - await expect(toJSONResponse(failing(), { abortController })).rejects.toThrow( - 'upstream failure', - ) + await expect( + toJSONResponse(failing(), { abortController }), + ).rejects.toThrow('upstream failure') expect(abortSpy).toHaveBeenCalledOnce() }) }) From ee3f393cdd95756f5e7bc889e8adf49fb601cff0 Mon Sep 17 00:00:00 2001 From: Alem Tuzlak Date: Mon, 20 Apr 2026 16:12:36 +0200 Subject: [PATCH 3/5] docs: add React Native & Expo guide + toJSONResponse / fetchJSON references MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Serves three personas: Expo/RN builders hitting streaming-response crashes, builders on other non-streaming runtimes (edge proxies, legacy serverless), and evaluators checking whether TanStack AI supports RN/Expo. - New journey page at docs/chat/non-streaming-runtimes.md titled 'React Native & Expo'. A → B: Expo API route crashing on streaming response → working chat via toJSONResponse + fetchJSON. - Cross-linked from chat/streaming.md (callout near toServerSentEventsResponse) and chat/connection-adapters.md (new 'JSON Array (non-streaming runtimes)' subsection). - Added the new entries to the API references: toJSONResponse in docs/api/ai.md and fetchJSON in docs/api/ai-client.md, each pointing back to the walkthrough. - Registered the new page in docs/config.json under 'Chat & Streaming', sequenced right after Connection Adapters. --- docs/api/ai-client.md | 16 +++++ docs/api/ai.md | 26 ++++++++ docs/chat/connection-adapters.md | 15 +++++ docs/chat/non-streaming-runtimes.md | 95 +++++++++++++++++++++++++++++ docs/chat/streaming.md | 2 + docs/config.json | 4 ++ 6 files changed, 158 insertions(+) create mode 100644 docs/chat/non-streaming-runtimes.md diff --git a/docs/api/ai-client.md b/docs/api/ai-client.md index 379e58589..87ed954ae 100644 --- a/docs/api/ai-client.md +++ b/docs/api/ai-client.md @@ -166,6 +166,22 @@ import { fetchHttpStream } from "@tanstack/ai-client"; const adapter = fetchHttpStream("/api/chat"); ``` +### `fetchJSON(url, options?)` + +Creates a connection adapter for non-streaming runtimes — pair with [`toJSONResponse`](./ai#tojsonresponsestream-init) on the server. The adapter POSTs `{ messages, data }`, expects a `StreamChunk[]` JSON body, and replays each chunk into the normal `ChatClient` pipeline. + +```typescript +import { fetchJSON } from "@tanstack/ai-client"; + +const adapter = fetchJSON("/api/chat", { + headers: { + Authorization: "Bearer token", + }, +}); +``` + +Use this on Expo / React Native / edge proxies that can't emit `ReadableStream` responses. Trade-off: no incremental rendering — the UI sees every chunk at once when the request resolves. Full walkthrough: [React Native & Expo](../chat/non-streaming-runtimes). + ### `stream(connectFn)` Creates a custom connection adapter. diff --git a/docs/api/ai.md b/docs/api/ai.md index da0970d14..b76184107 100644 --- a/docs/api/ai.md +++ b/docs/api/ai.md @@ -191,6 +191,32 @@ return toServerSentEventsResponse(stream); A `Response` object suitable for HTTP endpoints with SSE headers (`Content-Type: text/event-stream`, `Cache-Control: no-cache`, `Connection: keep-alive`). +## `toJSONResponse(stream, init?)` + +Drains the whole stream, then returns a JSON-array `Response` containing every `StreamChunk`. For runtimes that can't emit `ReadableStream` bodies (Expo's `@expo/server`, some edge proxies). Pair with [`fetchJSON`](./ai-client#fetchjsonurl-options) on the client. + +```typescript +import { chat, toJSONResponse } from "@tanstack/ai"; +import { openaiText } from "@tanstack/ai-openai"; + +const stream = chat({ + adapter: openaiText("gpt-5.2"), + messages: [...], +}); +return toJSONResponse(stream); +``` + +### Parameters + +- `stream` - Async iterable of `StreamChunk` +- `init?` - Optional ResponseInit options (including `abortController`). Caller-provided headers are preserved; `Content-Type` defaults to `application/json`. + +### Returns + +A `Promise` with the stringified `StreamChunk[]` as the body. If the upstream stream throws mid-drain, a provided `abortController` is aborted and the error propagates. + +> **Trade-off:** no incremental rendering — the UI sees every chunk at once when the request resolves. Use SSE / HTTP-stream responses when the runtime supports them. See [React Native & Expo](../chat/non-streaming-runtimes) for the full walkthrough. + ## `maxIterations(count)` Creates an agent loop strategy that limits iterations. diff --git a/docs/chat/connection-adapters.md b/docs/chat/connection-adapters.md index 0c4460b2b..cfdf22a32 100644 --- a/docs/chat/connection-adapters.md +++ b/docs/chat/connection-adapters.md @@ -81,6 +81,21 @@ const { messages } = useChat({ }); ``` +### JSON Array (non-streaming runtimes) + +For runtimes that can't emit `ReadableStream` responses — Expo / React Native, some edge proxies, certain legacy serverless runtimes — pair `fetchJSON` on the client with [`toJSONResponse`](../api/ai#tojsonresponsestream-init) on the server: + +```typescript +import { useChat } from "@tanstack/ai-react"; +import { fetchJSON } from "@tanstack/ai-client"; + +const { messages } = useChat({ + connection: fetchJSON("/api/chat"), +}); +``` + +The server drains the whole chat stream before responding, and this adapter replays each chunk into the normal `ChatClient` pipeline. Trade-off: no incremental rendering — the UI sees every chunk at once when the request resolves. See [React Native & Expo](./non-streaming-runtimes) for the full walkthrough. + ## Custom Adapters For specialized use cases, you can create custom adapters to meet specific protocols or requirements: diff --git a/docs/chat/non-streaming-runtimes.md b/docs/chat/non-streaming-runtimes.md new file mode 100644 index 000000000..32bb5c7f7 --- /dev/null +++ b/docs/chat/non-streaming-runtimes.md @@ -0,0 +1,95 @@ +--- +title: React Native & Expo +id: non-streaming-runtimes +order: 4 +description: "Run TanStack AI on React Native, Expo, and other runtimes that can't emit ReadableStream responses — using toJSONResponse on the server and fetchJSON on the client." +keywords: + - tanstack ai + - react native + - expo + - expo router + - metro bundler + - non-streaming + - toJSONResponse + - fetchJSON + - edge runtime +--- + +You have a React Native or Expo app and you want to add AI chat, but the usual `toServerSentEventsResponse()` helper crashes on Expo's server runtime with: + +``` +TypeError: Cannot read properties of undefined (reading 'statusText') +``` + +…and Metro refuses to resolve `@tanstack/ai/adapters` at all. By the end of this guide, you'll have a working chat flow on Expo/React Native using a JSON-array fallback path. The same approach works for any deployment target that can't stream `ReadableStream` responses (some edge proxies, legacy serverless runtimes, etc.). + +## What's actually going wrong + +Two separate problems show up on React Native / Expo: + +1. **Module resolution.** `@tanstack/ai` and `@tanstack/ai-client` ship dual ESM + CJS builds with `main`/`module`/`exports` all wired up. If your version is new enough, Metro resolves them out of the box. If you're stuck on an older version, upgrade — older releases were ESM-only and Metro can't consume them. + +2. **Response shape.** Expo's `@expo/server` runtime (and a few edge proxies) can't emit a `ReadableStream` body, which is what `toServerSentEventsResponse` and `toHttpResponse` return. The request silently fails on the client side and `isLoading` flips back to `false` immediately. + +The fix for (2) is to drain the chat stream on the server, send the collected chunks as a single JSON array, and replay them on the client. You lose incremental rendering — the UI sees every chunk at once when the request resolves — but every other piece of the chat pipeline keeps working as-is. + +## Step 1: Return a JSON-array response on the server + +Swap `toServerSentEventsResponse` for `toJSONResponse` in your API route. On Expo Router: + +```typescript +// app/api/chat+api.ts +import { chat, toJSONResponse } from "@tanstack/ai"; +import { openaiText } from "@tanstack/ai-openai"; + +export async function POST(request: Request) { + const { messages } = await request.json(); + + const stream = chat({ + adapter: openaiText("gpt-5.2"), + messages, + }); + + return toJSONResponse(stream); +} +``` + +`toJSONResponse` iterates the whole stream, collects each `StreamChunk` into an array, and returns a plain `Response` with `Content-Type: application/json`. It accepts the same `init` options as `toServerSentEventsResponse` (including `abortController`) and honours any `Content-Type` you pass in `headers`. + +## Step 2: Use `fetchJSON` as the connection adapter on the client + +Swap `fetchServerSentEvents` for `fetchJSON` in your `useChat` call: + +```typescript +import { useChat } from "@tanstack/ai-react"; +import { fetchJSON } from "@tanstack/ai-client"; + +export function ChatScreen() { + const { messages, sendMessage, isLoading } = useChat({ + connection: fetchJSON("/api/chat"), + }); + + // messages and isLoading behave identically to the streaming path — + // they just update all at once when the request resolves. + return ; +} +``` + +`fetchJSON` accepts the same `url` + `options` signature as the other connection adapters (static string or function, headers, credentials, custom `fetchClient`, extra body, abort signal). It POSTs the usual `{ messages, data }` body, decodes the response as a `StreamChunk[]`, and replays each chunk into the normal `ChatClient` pipeline — tool calls, approvals, thinking content, errors all behave the same way they do with SSE. + +## Step 3: Expect no incremental rendering + +The one thing you give up: the UI won't update character-by-character. The request hangs until the server finishes the whole run, then the full message — including tool calls, results, and the final assistant turn — appears at once. + +If this becomes a problem, the answer is to move to a runtime that supports streaming responses (Hono on Node, Next.js, TanStack Start, a real SSE endpoint proxied through a CDN that doesn't buffer) rather than to work around the limitation further. The JSON-array path is a pragmatic escape hatch, not the intended happy path. + +## Going back to streaming when you can + +If you later deploy your server code to a runtime that *does* support streaming, you only need to change two call sites — `toJSONResponse` → `toServerSentEventsResponse` and `fetchJSON` → `fetchServerSentEvents`. Everything downstream (messages, tool calls, approvals, `useChat` state, error handling) is identical between the two paths, so there's no cleanup to chase through the app. + +## Next Steps + +- [Streaming](./streaming) — the normal incremental-rendering path +- [Connection Adapters](./connection-adapters) — full list of client-side adapters, including `fetchJSON` +- [API Reference: `toJSONResponse`](../api/ai#tojsonresponsestream-init) — server-side helper reference +- [API Reference: `fetchJSON`](../api/ai-client#fetchjsonurl-options) — client-side adapter reference diff --git a/docs/chat/streaming.md b/docs/chat/streaming.md index a11bd2ca2..5a9a2afad 100644 --- a/docs/chat/streaming.md +++ b/docs/chat/streaming.md @@ -55,6 +55,8 @@ export async function POST(request: Request) { } ``` +> **Running on Expo, React Native, or another runtime that can't emit `ReadableStream` responses?** See [React Native & Expo](./non-streaming-runtimes) for the `toJSONResponse` + `fetchJSON` fallback pair. + ## Client-Side Streaming The `useChat` hook automatically handles streaming: diff --git a/docs/config.json b/docs/config.json index 698d18545..fb1357da6 100644 --- a/docs/config.json +++ b/docs/config.json @@ -92,6 +92,10 @@ "label": "Connection Adapters", "to": "chat/connection-adapters" }, + { + "label": "React Native & Expo", + "to": "chat/non-streaming-runtimes" + }, { "label": "Structured Outputs", "to": "chat/structured-outputs" From 3ab68713a6f9c33602f65737cd9d4a9c70b92054 Mon Sep 17 00:00:00 2001 From: Alem Tuzlak Date: Fri, 24 Apr 2026 11:49:33 +0200 Subject: [PATCH 4/5] fix(ai, ai-client): honor abort signal in toJSONResponse + test fetchJSON MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Address CR findings: - toJSONResponse now checks `abortController.signal.aborted` on entry (throws the signal's reason without draining the upstream) and inside the drain loop (breaks early if aborted mid-stream), matching the semantics of toServerSentEventsStream and toHttpStream. Previously the signal was only consulted from the error-path catch handler, so a pre-aborted controller drained the full stream anyway and a mid-drain abort was silently ignored. - Add two new tests covering pre-abort (infinite stream never pulled) and mid-drain abort (bounded pulls after abort fires). - Add 8 fetchJSON tests covering happy path, non-2xx, non-array body with descriptive error, url-as-function, options-as-async-function, options.body merging, custom fetchClient override, and AbortSignal propagation — the adapter previously had zero direct test coverage. --- .../tests/connection-adapters.test.ts | 172 ++++++++++++++++++ .../typescript/ai/src/stream-to-response.ts | 19 ++ .../ai/tests/stream-to-response.test.ts | 48 +++++ 3 files changed, 239 insertions(+) diff --git a/packages/typescript/ai-client/tests/connection-adapters.test.ts b/packages/typescript/ai-client/tests/connection-adapters.test.ts index 60c36763a..7817299ed 100644 --- a/packages/typescript/ai-client/tests/connection-adapters.test.ts +++ b/packages/typescript/ai-client/tests/connection-adapters.test.ts @@ -1,6 +1,7 @@ import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest' import { fetchHttpStream, + fetchJSON, fetchServerSentEvents, normalizeConnectionAdapter, rpcStream, @@ -1026,4 +1027,175 @@ describe('connection-adapters', () => { ) }) }) + + describe('fetchJSON', () => { + const jsonOk = (payload: unknown, init: ResponseInit = { status: 200 }) => + ({ + ok: (init.status ?? 200) >= 200 && (init.status ?? 200) < 300, + status: init.status ?? 200, + statusText: init.statusText ?? 'OK', + json: async () => payload, + }) as unknown as Response + + it('drains a JSON array body into chunks', async () => { + const payload = [ + asChunk({ + type: 'RUN_STARTED', + runId: 'r1', + model: 'test', + timestamp: 1, + }), + asChunk({ + type: 'TEXT_MESSAGE_CONTENT', + messageId: 'm1', + model: 'test', + timestamp: 2, + delta: 'Hi', + content: 'Hi', + }), + asChunk({ + type: 'RUN_FINISHED', + runId: 'r1', + model: 'test', + timestamp: 3, + }), + ] + fetchMock.mockResolvedValue(jsonOk(payload)) + + const adapter = fetchJSON('/api/chat') + const chunks: Array = [] + for await (const chunk of adapter.connect([ + { role: 'user', content: 'Hi' }, + ])) { + chunks.push(chunk) + } + + expect(chunks).toEqual(payload) + }) + + it('throws on a non-2xx response', async () => { + fetchMock.mockResolvedValue( + jsonOk(null, { status: 500, statusText: 'Internal Server Error' }), + ) + + const adapter = fetchJSON('/api/chat') + + await expect(async () => { + for await (const _ of adapter.connect([ + { role: 'user', content: 'x' }, + ])) { + // drain + } + }).rejects.toThrow(/500/) + }) + + it('throws a descriptive error when response body is not an array', async () => { + fetchMock.mockResolvedValue(jsonOk({ message: 'not an array' })) + + const adapter = fetchJSON('/api/chat') + + await expect(async () => { + for await (const _ of adapter.connect([ + { role: 'user', content: 'x' }, + ])) { + // drain + } + }).rejects.toThrow(/toJSONResponse/) + }) + + it('resolves url-as-function at call time', async () => { + fetchMock.mockResolvedValue(jsonOk([])) + + const getUrl = vi.fn(() => '/api/dynamic') + const adapter = fetchJSON(getUrl) + for await (const _ of adapter.connect([ + { role: 'user', content: 'x' }, + ])) { + // drain + } + + expect(getUrl).toHaveBeenCalledOnce() + expect(fetchMock).toHaveBeenCalledWith( + '/api/dynamic', + expect.any(Object), + ) + }) + + it('resolves options-as-async-function at call time', async () => { + fetchMock.mockResolvedValue(jsonOk([])) + + const getOptions = vi.fn( + async () => + ({ + headers: { 'X-Custom': 'yes' }, + body: { runId: 'abc' }, + }) as const, + ) + const adapter = fetchJSON('/api/chat', getOptions) + for await (const _ of adapter.connect([ + { role: 'user', content: 'x' }, + ])) { + // drain + } + + expect(getOptions).toHaveBeenCalledOnce() + const [, init] = fetchMock.mock.calls[0]! + expect(init.headers).toMatchObject({ 'X-Custom': 'yes' }) + const parsed = JSON.parse(init.body as string) as { + runId?: string + } + expect(parsed.runId).toBe('abc') + }) + + it('merges options.body into the POST body', async () => { + fetchMock.mockResolvedValue(jsonOk([])) + + const adapter = fetchJSON('/api/chat', { body: { extra: 42 } }) + for await (const _ of adapter.connect( + [{ role: 'user', content: 'x' }], + { sessionId: 'sess' }, + )) { + // drain + } + + const [, init] = fetchMock.mock.calls[0]! + const body = JSON.parse(init.body as string) as Record + expect(body).toMatchObject({ + messages: expect.any(Array), + data: { sessionId: 'sess' }, + extra: 42, + }) + }) + + it('honors a custom fetchClient override', async () => { + const customFetch = vi.fn().mockResolvedValue(jsonOk([])) + + const adapter = fetchJSON('/api/chat', { fetchClient: customFetch }) + for await (const _ of adapter.connect([ + { role: 'user', content: 'x' }, + ])) { + // drain + } + + expect(customFetch).toHaveBeenCalledOnce() + expect(fetchMock).not.toHaveBeenCalled() + }) + + it('propagates the abortSignal to fetch', async () => { + fetchMock.mockResolvedValue(jsonOk([])) + const controller = new AbortController() + + const adapter = fetchJSON('/api/chat') + for await (const _ of adapter.connect( + [{ role: 'user', content: 'x' }], + undefined, + controller.signal, + )) { + // drain + } + + const [, init] = fetchMock.mock.calls[0]! + expect(init.signal).toBe(controller.signal) + }) + }) }) diff --git a/packages/typescript/ai/src/stream-to-response.ts b/packages/typescript/ai/src/stream-to-response.ts index 38a280324..7076a59ee 100644 --- a/packages/typescript/ai/src/stream-to-response.ts +++ b/packages/typescript/ai/src/stream-to-response.ts @@ -285,9 +285,28 @@ export async function toJSONResponse( init?: ResponseInit & { abortController?: AbortController }, ): Promise { const { abortController, headers, ...rest } = init ?? {} + + // Honor a pre-aborted signal: don't drain the stream at all, throw the + // caller's abort reason (or a synthesized AbortError) so behavior matches + // the SSE / HTTP-stream variants, which both short-circuit on aborted. + if (abortController?.signal.aborted) { + throw ( + abortController.signal.reason ?? + new DOMException('The operation was aborted', 'AbortError') + ) + } + const chunks: Array = [] try { for await (const chunk of stream) { + // Honor mid-drain abort: break out early rather than over-draining an + // upstream that may not itself honor the signal. + if (abortController?.signal.aborted) { + throw ( + abortController.signal.reason ?? + new DOMException('The operation was aborted', 'AbortError') + ) + } chunks.push(chunk) } } catch (error) { diff --git a/packages/typescript/ai/tests/stream-to-response.test.ts b/packages/typescript/ai/tests/stream-to-response.test.ts index d08c677ea..a6fe0995c 100644 --- a/packages/typescript/ai/tests/stream-to-response.test.ts +++ b/packages/typescript/ai/tests/stream-to-response.test.ts @@ -942,4 +942,52 @@ describe('toJSONResponse', () => { ).rejects.toThrow('upstream failure') expect(abortSpy).toHaveBeenCalledOnce() }) + + it('throws immediately without draining when abortController is pre-aborted', async () => { + const abortController = new AbortController() + abortController.abort() + + let pulled = 0 + async function* infinite(): AsyncGenerator { + while (true) { + pulled++ + yield { + type: 'RUN_STARTED', + runId: 'r1', + model: 'test', + timestamp: 1, + } as StreamChunk + } + } + + await expect( + toJSONResponse(infinite(), { abortController }), + ).rejects.toThrow() + expect(pulled).toBe(0) + }) + + it('stops draining and throws when aborted mid-stream', async () => { + const abortController = new AbortController() + let pulled = 0 + async function* slow(): AsyncGenerator { + while (true) { + pulled++ + yield { + type: 'RUN_STARTED', + runId: `r${pulled}`, + model: 'test', + timestamp: pulled, + } as StreamChunk + if (pulled === 2) abortController.abort() + // Let the microtask queue flush so the signal is observed next iter. + await Promise.resolve() + } + } + + await expect( + toJSONResponse(slow(), { abortController }), + ).rejects.toThrow() + // Bounded: should not have pulled an unbounded number of items after abort. + expect(pulled).toBeLessThan(10) + }) }) From 43e59bba49803bb85c211edaecc96cf5fe98afe4 Mon Sep 17 00:00:00 2001 From: "autofix-ci[bot]" <114827586+autofix-ci[bot]@users.noreply.github.com> Date: Fri, 24 Apr 2026 09:50:38 +0000 Subject: [PATCH 5/5] ci: apply automated fixes --- .../tests/connection-adapters.test.ts | 24 ++++++------------- .../ai/tests/stream-to-response.test.ts | 4 +--- 2 files changed, 8 insertions(+), 20 deletions(-) diff --git a/packages/typescript/ai-client/tests/connection-adapters.test.ts b/packages/typescript/ai-client/tests/connection-adapters.test.ts index 7817299ed..6ee9edb18 100644 --- a/packages/typescript/ai-client/tests/connection-adapters.test.ts +++ b/packages/typescript/ai-client/tests/connection-adapters.test.ts @@ -1108,17 +1108,12 @@ describe('connection-adapters', () => { const getUrl = vi.fn(() => '/api/dynamic') const adapter = fetchJSON(getUrl) - for await (const _ of adapter.connect([ - { role: 'user', content: 'x' }, - ])) { + for await (const _ of adapter.connect([{ role: 'user', content: 'x' }])) { // drain } expect(getUrl).toHaveBeenCalledOnce() - expect(fetchMock).toHaveBeenCalledWith( - '/api/dynamic', - expect.any(Object), - ) + expect(fetchMock).toHaveBeenCalledWith('/api/dynamic', expect.any(Object)) }) it('resolves options-as-async-function at call time', async () => { @@ -1132,9 +1127,7 @@ describe('connection-adapters', () => { }) as const, ) const adapter = fetchJSON('/api/chat', getOptions) - for await (const _ of adapter.connect([ - { role: 'user', content: 'x' }, - ])) { + for await (const _ of adapter.connect([{ role: 'user', content: 'x' }])) { // drain } @@ -1151,10 +1144,9 @@ describe('connection-adapters', () => { fetchMock.mockResolvedValue(jsonOk([])) const adapter = fetchJSON('/api/chat', { body: { extra: 42 } }) - for await (const _ of adapter.connect( - [{ role: 'user', content: 'x' }], - { sessionId: 'sess' }, - )) { + for await (const _ of adapter.connect([{ role: 'user', content: 'x' }], { + sessionId: 'sess', + })) { // drain } @@ -1171,9 +1163,7 @@ describe('connection-adapters', () => { const customFetch = vi.fn().mockResolvedValue(jsonOk([])) const adapter = fetchJSON('/api/chat', { fetchClient: customFetch }) - for await (const _ of adapter.connect([ - { role: 'user', content: 'x' }, - ])) { + for await (const _ of adapter.connect([{ role: 'user', content: 'x' }])) { // drain } diff --git a/packages/typescript/ai/tests/stream-to-response.test.ts b/packages/typescript/ai/tests/stream-to-response.test.ts index a6fe0995c..8c304aebf 100644 --- a/packages/typescript/ai/tests/stream-to-response.test.ts +++ b/packages/typescript/ai/tests/stream-to-response.test.ts @@ -984,9 +984,7 @@ describe('toJSONResponse', () => { } } - await expect( - toJSONResponse(slow(), { abortController }), - ).rejects.toThrow() + await expect(toJSONResponse(slow(), { abortController })).rejects.toThrow() // Bounded: should not have pulled an unbounded number of items after abort. expect(pulled).toBeLessThan(10) })