diff --git a/.changeset/cjs-output-and-json-response.md b/.changeset/cjs-output-and-json-response.md new file mode 100644 index 000000000..389033ca4 --- /dev/null +++ b/.changeset/cjs-output-and-json-response.md @@ -0,0 +1,9 @@ +--- +'@tanstack/ai': minor +'@tanstack/ai-client': minor +'@tanstack/ai-event-client': patch +--- + +**Dual ESM + CJS output.** `@tanstack/ai`, `@tanstack/ai-client`, and `@tanstack/ai-event-client` now ship both ESM and CJS builds with type-aware dual `exports` maps (`import` → `./dist/esm/*.js`, `require` → `./dist/cjs/*.cjs`), plus a `main` field pointing at CJS. Fixes Metro / Expo / CJS-only resolvers that previously couldn't find `@tanstack/ai/adapters` or `@tanstack/ai-client` because the packages were ESM-only (#308). + +**New `toJSONResponse(stream, init?)` on `@tanstack/ai`.** Drains the chat stream fully and returns a JSON-array `Response` with `Content-Type: application/json`. Use on server runtimes that can't emit `ReadableStream` responses (Expo's `@expo/server`, some edge proxies). Pair with the new `fetchJSON(url, options?)` connection adapter on `@tanstack/ai-client` — it fetches the array and replays each chunk into the normal `ChatClient` pipeline. Trade-off: no incremental rendering (every chunk arrives at once when the request resolves). Closes #309. diff --git a/docs/api/ai-client.md b/docs/api/ai-client.md index 379e58589..87ed954ae 100644 --- a/docs/api/ai-client.md +++ b/docs/api/ai-client.md @@ -166,6 +166,22 @@ import { fetchHttpStream } from "@tanstack/ai-client"; const adapter = fetchHttpStream("/api/chat"); ``` +### `fetchJSON(url, options?)` + +Creates a connection adapter for non-streaming runtimes — pair with [`toJSONResponse`](./ai#tojsonresponsestream-init) on the server. The adapter POSTs `{ messages, data }`, expects a `StreamChunk[]` JSON body, and replays each chunk into the normal `ChatClient` pipeline. + +```typescript +import { fetchJSON } from "@tanstack/ai-client"; + +const adapter = fetchJSON("/api/chat", { + headers: { + Authorization: "Bearer token", + }, +}); +``` + +Use this on Expo / React Native / edge proxies that can't emit `ReadableStream` responses. Trade-off: no incremental rendering — the UI sees every chunk at once when the request resolves. Full walkthrough: [React Native & Expo](../chat/non-streaming-runtimes). + ### `stream(connectFn)` Creates a custom connection adapter. diff --git a/docs/api/ai.md b/docs/api/ai.md index da0970d14..b76184107 100644 --- a/docs/api/ai.md +++ b/docs/api/ai.md @@ -191,6 +191,32 @@ return toServerSentEventsResponse(stream); A `Response` object suitable for HTTP endpoints with SSE headers (`Content-Type: text/event-stream`, `Cache-Control: no-cache`, `Connection: keep-alive`). +## `toJSONResponse(stream, init?)` + +Drains the whole stream, then returns a JSON-array `Response` containing every `StreamChunk`. For runtimes that can't emit `ReadableStream` bodies (Expo's `@expo/server`, some edge proxies). Pair with [`fetchJSON`](./ai-client#fetchjsonurl-options) on the client. + +```typescript +import { chat, toJSONResponse } from "@tanstack/ai"; +import { openaiText } from "@tanstack/ai-openai"; + +const stream = chat({ + adapter: openaiText("gpt-5.2"), + messages: [...], +}); +return toJSONResponse(stream); +``` + +### Parameters + +- `stream` - Async iterable of `StreamChunk` +- `init?` - Optional ResponseInit options (including `abortController`). Caller-provided headers are preserved; `Content-Type` defaults to `application/json`. + +### Returns + +A `Promise` with the stringified `StreamChunk[]` as the body. If the upstream stream throws mid-drain, a provided `abortController` is aborted and the error propagates. + +> **Trade-off:** no incremental rendering — the UI sees every chunk at once when the request resolves. Use SSE / HTTP-stream responses when the runtime supports them. See [React Native & Expo](../chat/non-streaming-runtimes) for the full walkthrough. + ## `maxIterations(count)` Creates an agent loop strategy that limits iterations. diff --git a/docs/chat/connection-adapters.md b/docs/chat/connection-adapters.md index 0c4460b2b..cfdf22a32 100644 --- a/docs/chat/connection-adapters.md +++ b/docs/chat/connection-adapters.md @@ -81,6 +81,21 @@ const { messages } = useChat({ }); ``` +### JSON Array (non-streaming runtimes) + +For runtimes that can't emit `ReadableStream` responses — Expo / React Native, some edge proxies, certain legacy serverless runtimes — pair `fetchJSON` on the client with [`toJSONResponse`](../api/ai#tojsonresponsestream-init) on the server: + +```typescript +import { useChat } from "@tanstack/ai-react"; +import { fetchJSON } from "@tanstack/ai-client"; + +const { messages } = useChat({ + connection: fetchJSON("/api/chat"), +}); +``` + +The server drains the whole chat stream before responding, and this adapter replays each chunk into the normal `ChatClient` pipeline. Trade-off: no incremental rendering — the UI sees every chunk at once when the request resolves. See [React Native & Expo](./non-streaming-runtimes) for the full walkthrough. + ## Custom Adapters For specialized use cases, you can create custom adapters to meet specific protocols or requirements: diff --git a/docs/chat/non-streaming-runtimes.md b/docs/chat/non-streaming-runtimes.md new file mode 100644 index 000000000..32bb5c7f7 --- /dev/null +++ b/docs/chat/non-streaming-runtimes.md @@ -0,0 +1,95 @@ +--- +title: React Native & Expo +id: non-streaming-runtimes +order: 4 +description: "Run TanStack AI on React Native, Expo, and other runtimes that can't emit ReadableStream responses — using toJSONResponse on the server and fetchJSON on the client." +keywords: + - tanstack ai + - react native + - expo + - expo router + - metro bundler + - non-streaming + - toJSONResponse + - fetchJSON + - edge runtime +--- + +You have a React Native or Expo app and you want to add AI chat, but the usual `toServerSentEventsResponse()` helper crashes on Expo's server runtime with: + +``` +TypeError: Cannot read properties of undefined (reading 'statusText') +``` + +…and Metro refuses to resolve `@tanstack/ai/adapters` at all. By the end of this guide, you'll have a working chat flow on Expo/React Native using a JSON-array fallback path. The same approach works for any deployment target that can't stream `ReadableStream` responses (some edge proxies, legacy serverless runtimes, etc.). + +## What's actually going wrong + +Two separate problems show up on React Native / Expo: + +1. **Module resolution.** `@tanstack/ai` and `@tanstack/ai-client` ship dual ESM + CJS builds with `main`/`module`/`exports` all wired up. If your version is new enough, Metro resolves them out of the box. If you're stuck on an older version, upgrade — older releases were ESM-only and Metro can't consume them. + +2. **Response shape.** Expo's `@expo/server` runtime (and a few edge proxies) can't emit a `ReadableStream` body, which is what `toServerSentEventsResponse` and `toHttpResponse` return. The request silently fails on the client side and `isLoading` flips back to `false` immediately. + +The fix for (2) is to drain the chat stream on the server, send the collected chunks as a single JSON array, and replay them on the client. You lose incremental rendering — the UI sees every chunk at once when the request resolves — but every other piece of the chat pipeline keeps working as-is. + +## Step 1: Return a JSON-array response on the server + +Swap `toServerSentEventsResponse` for `toJSONResponse` in your API route. On Expo Router: + +```typescript +// app/api/chat+api.ts +import { chat, toJSONResponse } from "@tanstack/ai"; +import { openaiText } from "@tanstack/ai-openai"; + +export async function POST(request: Request) { + const { messages } = await request.json(); + + const stream = chat({ + adapter: openaiText("gpt-5.2"), + messages, + }); + + return toJSONResponse(stream); +} +``` + +`toJSONResponse` iterates the whole stream, collects each `StreamChunk` into an array, and returns a plain `Response` with `Content-Type: application/json`. It accepts the same `init` options as `toServerSentEventsResponse` (including `abortController`) and honours any `Content-Type` you pass in `headers`. + +## Step 2: Use `fetchJSON` as the connection adapter on the client + +Swap `fetchServerSentEvents` for `fetchJSON` in your `useChat` call: + +```typescript +import { useChat } from "@tanstack/ai-react"; +import { fetchJSON } from "@tanstack/ai-client"; + +export function ChatScreen() { + const { messages, sendMessage, isLoading } = useChat({ + connection: fetchJSON("/api/chat"), + }); + + // messages and isLoading behave identically to the streaming path — + // they just update all at once when the request resolves. + return ; +} +``` + +`fetchJSON` accepts the same `url` + `options` signature as the other connection adapters (static string or function, headers, credentials, custom `fetchClient`, extra body, abort signal). It POSTs the usual `{ messages, data }` body, decodes the response as a `StreamChunk[]`, and replays each chunk into the normal `ChatClient` pipeline — tool calls, approvals, thinking content, errors all behave the same way they do with SSE. + +## Step 3: Expect no incremental rendering + +The one thing you give up: the UI won't update character-by-character. The request hangs until the server finishes the whole run, then the full message — including tool calls, results, and the final assistant turn — appears at once. + +If this becomes a problem, the answer is to move to a runtime that supports streaming responses (Hono on Node, Next.js, TanStack Start, a real SSE endpoint proxied through a CDN that doesn't buffer) rather than to work around the limitation further. The JSON-array path is a pragmatic escape hatch, not the intended happy path. + +## Going back to streaming when you can + +If you later deploy your server code to a runtime that *does* support streaming, you only need to change two call sites — `toJSONResponse` → `toServerSentEventsResponse` and `fetchJSON` → `fetchServerSentEvents`. Everything downstream (messages, tool calls, approvals, `useChat` state, error handling) is identical between the two paths, so there's no cleanup to chase through the app. + +## Next Steps + +- [Streaming](./streaming) — the normal incremental-rendering path +- [Connection Adapters](./connection-adapters) — full list of client-side adapters, including `fetchJSON` +- [API Reference: `toJSONResponse`](../api/ai#tojsonresponsestream-init) — server-side helper reference +- [API Reference: `fetchJSON`](../api/ai-client#fetchjsonurl-options) — client-side adapter reference diff --git a/docs/chat/streaming.md b/docs/chat/streaming.md index a11bd2ca2..5a9a2afad 100644 --- a/docs/chat/streaming.md +++ b/docs/chat/streaming.md @@ -55,6 +55,8 @@ export async function POST(request: Request) { } ``` +> **Running on Expo, React Native, or another runtime that can't emit `ReadableStream` responses?** See [React Native & Expo](./non-streaming-runtimes) for the `toJSONResponse` + `fetchJSON` fallback pair. + ## Client-Side Streaming The `useChat` hook automatically handles streaming: diff --git a/docs/config.json b/docs/config.json index f24a5fa0a..882f760d0 100644 --- a/docs/config.json +++ b/docs/config.json @@ -96,6 +96,10 @@ "label": "Connection Adapters", "to": "chat/connection-adapters" }, + { + "label": "React Native & Expo", + "to": "chat/non-streaming-runtimes" + }, { "label": "Structured Outputs", "to": "chat/structured-outputs" diff --git a/packages/typescript/ai-client/package.json b/packages/typescript/ai-client/package.json index 1a87be7ae..bfffa0ee6 100644 --- a/packages/typescript/ai-client/package.json +++ b/packages/typescript/ai-client/package.json @@ -18,12 +18,19 @@ "streaming" ], "type": "module", + "main": "./dist/cjs/index.cjs", "module": "./dist/esm/index.js", "types": "./dist/esm/index.d.ts", "exports": { ".": { - "types": "./dist/esm/index.d.ts", - "import": "./dist/esm/index.js" + "import": { + "types": "./dist/esm/index.d.ts", + "default": "./dist/esm/index.js" + }, + "require": { + "types": "./dist/cjs/index.d.cts", + "default": "./dist/cjs/index.cjs" + } } }, "files": [ diff --git a/packages/typescript/ai-client/src/connection-adapters.ts b/packages/typescript/ai-client/src/connection-adapters.ts index 91d63a146..a7bd32673 100644 --- a/packages/typescript/ai-client/src/connection-adapters.ts +++ b/packages/typescript/ai-client/src/connection-adapters.ts @@ -424,6 +424,81 @@ export function fetchHttpStream( } } +/** + * Create a JSON-array connection adapter for server runtimes that cannot + * stream `ReadableStream` responses (e.g. Expo's `@expo/server`, certain + * edge proxies). Pair with `toJSONResponse(stream)` on the server: the + * server drains the chat stream fully, JSON-serialises the collected + * chunks into an array, and this adapter fetches the array and replays + * each chunk one-by-one into the normal client pipeline. + * + * Trade-off: you lose incremental rendering — the UI sees every chunk + * only after the request resolves. Use SSE/HTTP-stream adapters when the + * runtime supports them. + * + * @param url - The API endpoint URL (or a function that returns the URL) + * @param options - Fetch options (headers, credentials, body, etc.) or a function that returns options (can be async) + * @returns A connection adapter for JSON-array responses + * + * @example + * ```typescript + * // Expo / RN client that hits an Expo API route returning toJSONResponse(stream) + * const connection = fetchJSON('/api/chat') + * + * const client = new ChatClient({ connection }) + * ``` + */ +export function fetchJSON( + url: string | (() => string), + options: + | FetchConnectionOptions + | (() => FetchConnectionOptions | Promise) = {}, +): ConnectConnectionAdapter { + return { + async *connect(messages, data, abortSignal) { + const resolvedUrl = typeof url === 'function' ? url() : url + const resolvedOptions = + typeof options === 'function' ? await options() : options + + const requestHeaders: Record = { + 'Content-Type': 'application/json', + ...mergeHeaders(resolvedOptions.headers), + } + + const requestBody = { + messages, + data, + ...resolvedOptions.body, + } + + const fetchClient = resolvedOptions.fetchClient ?? fetch + const response = await fetchClient(resolvedUrl, { + method: 'POST', + headers: requestHeaders, + body: JSON.stringify(requestBody), + credentials: resolvedOptions.credentials || 'same-origin', + signal: abortSignal || resolvedOptions.signal, + }) + + if (!response.ok) { + throw new Error( + `HTTP error! status: ${response.status} ${response.statusText}`, + ) + } + + const payload = (await response.json()) as unknown + if (!Array.isArray(payload)) { + throw new Error( + 'fetchJSON: expected response body to be a JSON array of StreamChunks. Did you forget to use `toJSONResponse(stream)` on the server?', + ) + } + for (const chunk of payload) { + yield chunk as StreamChunk + } + }, + } +} + /** * Create a direct stream connection adapter (for server functions or direct streams) * diff --git a/packages/typescript/ai-client/src/index.ts b/packages/typescript/ai-client/src/index.ts index 0f86ae891..b76e9b1c1 100644 --- a/packages/typescript/ai-client/src/index.ts +++ b/packages/typescript/ai-client/src/index.ts @@ -55,6 +55,7 @@ export type { export { fetchServerSentEvents, fetchHttpStream, + fetchJSON, stream, rpcStream, type ConnectConnectionAdapter, diff --git a/packages/typescript/ai-client/tests/connection-adapters.test.ts b/packages/typescript/ai-client/tests/connection-adapters.test.ts index 60c36763a..6ee9edb18 100644 --- a/packages/typescript/ai-client/tests/connection-adapters.test.ts +++ b/packages/typescript/ai-client/tests/connection-adapters.test.ts @@ -1,6 +1,7 @@ import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest' import { fetchHttpStream, + fetchJSON, fetchServerSentEvents, normalizeConnectionAdapter, rpcStream, @@ -1026,4 +1027,165 @@ describe('connection-adapters', () => { ) }) }) + + describe('fetchJSON', () => { + const jsonOk = (payload: unknown, init: ResponseInit = { status: 200 }) => + ({ + ok: (init.status ?? 200) >= 200 && (init.status ?? 200) < 300, + status: init.status ?? 200, + statusText: init.statusText ?? 'OK', + json: async () => payload, + }) as unknown as Response + + it('drains a JSON array body into chunks', async () => { + const payload = [ + asChunk({ + type: 'RUN_STARTED', + runId: 'r1', + model: 'test', + timestamp: 1, + }), + asChunk({ + type: 'TEXT_MESSAGE_CONTENT', + messageId: 'm1', + model: 'test', + timestamp: 2, + delta: 'Hi', + content: 'Hi', + }), + asChunk({ + type: 'RUN_FINISHED', + runId: 'r1', + model: 'test', + timestamp: 3, + }), + ] + fetchMock.mockResolvedValue(jsonOk(payload)) + + const adapter = fetchJSON('/api/chat') + const chunks: Array = [] + for await (const chunk of adapter.connect([ + { role: 'user', content: 'Hi' }, + ])) { + chunks.push(chunk) + } + + expect(chunks).toEqual(payload) + }) + + it('throws on a non-2xx response', async () => { + fetchMock.mockResolvedValue( + jsonOk(null, { status: 500, statusText: 'Internal Server Error' }), + ) + + const adapter = fetchJSON('/api/chat') + + await expect(async () => { + for await (const _ of adapter.connect([ + { role: 'user', content: 'x' }, + ])) { + // drain + } + }).rejects.toThrow(/500/) + }) + + it('throws a descriptive error when response body is not an array', async () => { + fetchMock.mockResolvedValue(jsonOk({ message: 'not an array' })) + + const adapter = fetchJSON('/api/chat') + + await expect(async () => { + for await (const _ of adapter.connect([ + { role: 'user', content: 'x' }, + ])) { + // drain + } + }).rejects.toThrow(/toJSONResponse/) + }) + + it('resolves url-as-function at call time', async () => { + fetchMock.mockResolvedValue(jsonOk([])) + + const getUrl = vi.fn(() => '/api/dynamic') + const adapter = fetchJSON(getUrl) + for await (const _ of adapter.connect([{ role: 'user', content: 'x' }])) { + // drain + } + + expect(getUrl).toHaveBeenCalledOnce() + expect(fetchMock).toHaveBeenCalledWith('/api/dynamic', expect.any(Object)) + }) + + it('resolves options-as-async-function at call time', async () => { + fetchMock.mockResolvedValue(jsonOk([])) + + const getOptions = vi.fn( + async () => + ({ + headers: { 'X-Custom': 'yes' }, + body: { runId: 'abc' }, + }) as const, + ) + const adapter = fetchJSON('/api/chat', getOptions) + for await (const _ of adapter.connect([{ role: 'user', content: 'x' }])) { + // drain + } + + expect(getOptions).toHaveBeenCalledOnce() + const [, init] = fetchMock.mock.calls[0]! + expect(init.headers).toMatchObject({ 'X-Custom': 'yes' }) + const parsed = JSON.parse(init.body as string) as { + runId?: string + } + expect(parsed.runId).toBe('abc') + }) + + it('merges options.body into the POST body', async () => { + fetchMock.mockResolvedValue(jsonOk([])) + + const adapter = fetchJSON('/api/chat', { body: { extra: 42 } }) + for await (const _ of adapter.connect([{ role: 'user', content: 'x' }], { + sessionId: 'sess', + })) { + // drain + } + + const [, init] = fetchMock.mock.calls[0]! + const body = JSON.parse(init.body as string) as Record + expect(body).toMatchObject({ + messages: expect.any(Array), + data: { sessionId: 'sess' }, + extra: 42, + }) + }) + + it('honors a custom fetchClient override', async () => { + const customFetch = vi.fn().mockResolvedValue(jsonOk([])) + + const adapter = fetchJSON('/api/chat', { fetchClient: customFetch }) + for await (const _ of adapter.connect([{ role: 'user', content: 'x' }])) { + // drain + } + + expect(customFetch).toHaveBeenCalledOnce() + expect(fetchMock).not.toHaveBeenCalled() + }) + + it('propagates the abortSignal to fetch', async () => { + fetchMock.mockResolvedValue(jsonOk([])) + const controller = new AbortController() + + const adapter = fetchJSON('/api/chat') + for await (const _ of adapter.connect( + [{ role: 'user', content: 'x' }], + undefined, + controller.signal, + )) { + // drain + } + + const [, init] = fetchMock.mock.calls[0]! + expect(init.signal).toBe(controller.signal) + }) + }) }) diff --git a/packages/typescript/ai-client/vite.config.ts b/packages/typescript/ai-client/vite.config.ts index 77bcc2e60..c87275f3e 100644 --- a/packages/typescript/ai-client/vite.config.ts +++ b/packages/typescript/ai-client/vite.config.ts @@ -31,6 +31,6 @@ export default mergeConfig( tanstackViteConfig({ entry: ['./src/index.ts'], srcDir: './src', - cjs: false, + cjs: true, }), ) diff --git a/packages/typescript/ai-event-client/package.json b/packages/typescript/ai-event-client/package.json index 4042db793..676c1d256 100644 --- a/packages/typescript/ai-event-client/package.json +++ b/packages/typescript/ai-event-client/package.json @@ -10,12 +10,19 @@ "directory": "packages/typescript/ai-event-client" }, "type": "module", + "main": "./dist/cjs/index.cjs", "module": "./dist/esm/index.js", "types": "./dist/esm/index.d.ts", "exports": { ".": { - "types": "./dist/esm/index.d.ts", - "import": "./dist/esm/index.js" + "import": { + "types": "./dist/esm/index.d.ts", + "default": "./dist/esm/index.js" + }, + "require": { + "types": "./dist/cjs/index.d.cts", + "default": "./dist/cjs/index.cjs" + } } }, "sideEffects": false, diff --git a/packages/typescript/ai-event-client/vite.config.ts b/packages/typescript/ai-event-client/vite.config.ts index 77bcc2e60..c87275f3e 100644 --- a/packages/typescript/ai-event-client/vite.config.ts +++ b/packages/typescript/ai-event-client/vite.config.ts @@ -31,6 +31,6 @@ export default mergeConfig( tanstackViteConfig({ entry: ['./src/index.ts'], srcDir: './src', - cjs: false, + cjs: true, }), ) diff --git a/packages/typescript/ai/package.json b/packages/typescript/ai/package.json index 68b209cf8..a70295c4a 100644 --- a/packages/typescript/ai/package.json +++ b/packages/typescript/ai/package.json @@ -10,24 +10,49 @@ "directory": "packages/typescript/ai" }, "type": "module", + "main": "./dist/cjs/index.cjs", "module": "./dist/esm/index.js", "types": "./dist/esm/index.d.ts", "exports": { ".": { - "types": "./dist/esm/index.d.ts", - "import": "./dist/esm/index.js" + "import": { + "types": "./dist/esm/index.d.ts", + "default": "./dist/esm/index.js" + }, + "require": { + "types": "./dist/cjs/index.d.cts", + "default": "./dist/cjs/index.cjs" + } }, "./adapters": { - "types": "./dist/esm/activities/index.d.ts", - "import": "./dist/esm/activities/index.js" + "import": { + "types": "./dist/esm/activities/index.d.ts", + "default": "./dist/esm/activities/index.js" + }, + "require": { + "types": "./dist/cjs/activities/index.d.cts", + "default": "./dist/cjs/activities/index.cjs" + } }, "./middlewares": { - "types": "./dist/esm/middlewares/index.d.ts", - "import": "./dist/esm/middlewares/index.js" + "import": { + "types": "./dist/esm/middlewares/index.d.ts", + "default": "./dist/esm/middlewares/index.js" + }, + "require": { + "types": "./dist/cjs/middlewares/index.d.cts", + "default": "./dist/cjs/middlewares/index.cjs" + } }, "./adapter-internals": { - "types": "./dist/esm/adapter-internals.d.ts", - "import": "./dist/esm/adapter-internals.js" + "import": { + "types": "./dist/esm/adapter-internals.d.ts", + "default": "./dist/esm/adapter-internals.js" + }, + "require": { + "types": "./dist/cjs/adapter-internals.d.cts", + "default": "./dist/cjs/adapter-internals.cjs" + } } }, "sideEffects": false, diff --git a/packages/typescript/ai/src/index.ts b/packages/typescript/ai/src/index.ts index ef45543be..ab32a14c0 100644 --- a/packages/typescript/ai/src/index.ts +++ b/packages/typescript/ai/src/index.ts @@ -62,6 +62,7 @@ export { toServerSentEventsResponse, toHttpStream, toHttpResponse, + toJSONResponse, } from './stream-to-response' // Tool call management diff --git a/packages/typescript/ai/src/stream-to-response.ts b/packages/typescript/ai/src/stream-to-response.ts index 2f83bc017..7076a59ee 100644 --- a/packages/typescript/ai/src/stream-to-response.ts +++ b/packages/typescript/ai/src/stream-to-response.ts @@ -250,3 +250,71 @@ export function toHttpResponse( ...init, }) } + +/** + * Drain a StreamChunk async iterable fully, then return the collected chunks + * as a single JSON-array `Response`. + * + * Use this when the target runtime does not support streaming + * `ReadableStream` responses — for example Expo's `@expo/server` runtime, + * Vercel Edge/Node hybrids behind certain proxies, or Cloudflare setups + * without streaming enabled. The consumer pairs with + * `fetchJSON` on the client, which decodes the array and yields each + * chunk back into the normal streaming pipeline — so the on-screen UX + * becomes "render everything at once when the request resolves" instead + * of incremental streaming, but the rest of the chat pipeline is unchanged. + * + * Trade-off: you lose the incremental rendering. Use only when you can't + * ship SSE / HTTP-stream responses. + * + * @param stream - AsyncIterable of StreamChunks from chat() + * @param init - Optional Response initialization options (including `abortController`) + * @returns Response with `Content-Type: application/json` containing an array of StreamChunks + * + * @example + * ```typescript + * // Expo API route where streaming responses aren't supported + * export async function POST(request: Request) { + * const stream = chat({ adapter: openaiText(), messages: [...] }) + * return toJSONResponse(stream) + * } + * ``` + */ +export async function toJSONResponse( + stream: AsyncIterable, + init?: ResponseInit & { abortController?: AbortController }, +): Promise { + const { abortController, headers, ...rest } = init ?? {} + + // Honor a pre-aborted signal: don't drain the stream at all, throw the + // caller's abort reason (or a synthesized AbortError) so behavior matches + // the SSE / HTTP-stream variants, which both short-circuit on aborted. + if (abortController?.signal.aborted) { + throw ( + abortController.signal.reason ?? + new DOMException('The operation was aborted', 'AbortError') + ) + } + + const chunks: Array = [] + try { + for await (const chunk of stream) { + // Honor mid-drain abort: break out early rather than over-draining an + // upstream that may not itself honor the signal. + if (abortController?.signal.aborted) { + throw ( + abortController.signal.reason ?? + new DOMException('The operation was aborted', 'AbortError') + ) + } + chunks.push(chunk) + } + } catch (error) { + abortController?.abort() + throw error + } + const merged = new Headers(headers) + if (!merged.has('Content-Type')) + merged.set('Content-Type', 'application/json') + return new Response(JSON.stringify(chunks), { ...rest, headers: merged }) +} diff --git a/packages/typescript/ai/tests/stream-to-response.test.ts b/packages/typescript/ai/tests/stream-to-response.test.ts index f57425fb0..8c304aebf 100644 --- a/packages/typescript/ai/tests/stream-to-response.test.ts +++ b/packages/typescript/ai/tests/stream-to-response.test.ts @@ -2,6 +2,7 @@ import { describe, it, expect, vi } from 'vitest' import { toServerSentEventsStream, toServerSentEventsResponse, + toJSONResponse, } from '../src/stream-to-response' import type { StreamChunk } from '../src/types' @@ -870,3 +871,121 @@ describe('SSE Round-Trip (Encode → Decode)', () => { ) }) }) + +describe('toJSONResponse', () => { + it('drains the stream and returns a JSON-array Response', async () => { + const chunks: Array> = [ + { + type: 'RUN_STARTED', + runId: 'r1', + model: 'test', + timestamp: 1, + }, + { + type: 'TEXT_MESSAGE_CONTENT', + messageId: 'm1', + model: 'test', + timestamp: 2, + delta: 'Hello', + content: 'Hello', + }, + { + type: 'RUN_FINISHED', + runId: 'r1', + model: 'test', + timestamp: 3, + }, + ] + const response = await toJSONResponse(createMockStream(chunks)) + + expect(response.status).toBe(200) + expect(response.headers.get('Content-Type')).toBe('application/json') + expect(await response.json()).toEqual(chunks) + }) + + it('defers to caller-provided headers and preserves extra init', async () => { + const response = await toJSONResponse(createMockStream([]), { + status: 201, + headers: { 'X-Custom': '1' }, + }) + + expect(response.status).toBe(201) + expect(response.headers.get('X-Custom')).toBe('1') + expect(response.headers.get('Content-Type')).toBe('application/json') + }) + + it('does not override an explicit Content-Type', async () => { + const response = await toJSONResponse(createMockStream([]), { + headers: { 'Content-Type': 'application/vnd.tanstack-ai+json' }, + }) + + expect(response.headers.get('Content-Type')).toBe( + 'application/vnd.tanstack-ai+json', + ) + }) + + it('aborts the supplied controller and rethrows if the upstream errors', async () => { + const abortController = new AbortController() + const abortSpy = vi.spyOn(abortController, 'abort') + async function* failing(): AsyncGenerator { + yield { + type: 'RUN_STARTED', + runId: 'r1', + model: 'test', + timestamp: 1, + } as StreamChunk + throw new Error('upstream failure') + } + + await expect( + toJSONResponse(failing(), { abortController }), + ).rejects.toThrow('upstream failure') + expect(abortSpy).toHaveBeenCalledOnce() + }) + + it('throws immediately without draining when abortController is pre-aborted', async () => { + const abortController = new AbortController() + abortController.abort() + + let pulled = 0 + async function* infinite(): AsyncGenerator { + while (true) { + pulled++ + yield { + type: 'RUN_STARTED', + runId: 'r1', + model: 'test', + timestamp: 1, + } as StreamChunk + } + } + + await expect( + toJSONResponse(infinite(), { abortController }), + ).rejects.toThrow() + expect(pulled).toBe(0) + }) + + it('stops draining and throws when aborted mid-stream', async () => { + const abortController = new AbortController() + let pulled = 0 + async function* slow(): AsyncGenerator { + while (true) { + pulled++ + yield { + type: 'RUN_STARTED', + runId: `r${pulled}`, + model: 'test', + timestamp: pulled, + } as StreamChunk + if (pulled === 2) abortController.abort() + // Let the microtask queue flush so the signal is observed next iter. + await Promise.resolve() + } + } + + await expect(toJSONResponse(slow(), { abortController })).rejects.toThrow() + // Bounded: should not have pulled an unbounded number of items after abort. + expect(pulled).toBeLessThan(10) + }) +}) diff --git a/packages/typescript/ai/vite.config.ts b/packages/typescript/ai/vite.config.ts index cb2d342e3..dd8fd3eb9 100644 --- a/packages/typescript/ai/vite.config.ts +++ b/packages/typescript/ai/vite.config.ts @@ -36,6 +36,6 @@ export default mergeConfig( './src/adapter-internals.ts', ], srcDir: './src', - cjs: false, + cjs: true, }), )