diff --git a/.changeset/c3-frameworks-update-12713.md b/.changeset/c3-frameworks-update-12713.md new file mode 100644 index 000000000000..f17bc0bb4c45 --- /dev/null +++ b/.changeset/c3-frameworks-update-12713.md @@ -0,0 +1,11 @@ +--- +"create-cloudflare": patch +--- + +Update dependencies of "create-cloudflare" + +The following dependency versions have been updated: + +| Dependency | From | To | +| --------------- | ------ | ------ | +| @angular/create | 21.1.4 | 21.2.0 | diff --git a/.changeset/c3-frameworks-update-12715.md b/.changeset/c3-frameworks-update-12715.md new file mode 100644 index 000000000000..15d96bffe004 --- /dev/null +++ b/.changeset/c3-frameworks-update-12715.md @@ -0,0 +1,11 @@ +--- +"create-cloudflare": patch +--- + +Update dependencies of "create-cloudflare" + +The following dependency versions have been updated: + +| Dependency | From | To | +| ---------- | ------ | ------ | +| create-vue | 3.21.1 | 3.22.0 | diff --git a/.changeset/c3-frameworks-update-12716.md b/.changeset/c3-frameworks-update-12716.md new file mode 100644 index 000000000000..ecad27d67736 --- /dev/null +++ b/.changeset/c3-frameworks-update-12716.md @@ -0,0 +1,11 @@ +--- +"create-cloudflare": patch +--- + +Update dependencies of "create-cloudflare" + +The following dependency versions have been updated: + +| Dependency | From | To | +| ------------------- | ------ | ------ | +| create-react-router | 7.13.0 | 7.13.1 | diff --git a/.changeset/c3-frameworks-update-12719.md b/.changeset/c3-frameworks-update-12719.md new file mode 100644 index 000000000000..39d006f8bfb7 --- /dev/null +++ b/.changeset/c3-frameworks-update-12719.md @@ -0,0 +1,11 @@ +--- +"create-cloudflare": patch +--- + +Update dependencies of "create-cloudflare" + +The following dependency versions have been updated: + +| Dependency | From | To | +| ----------- | ------- | ------- | +| create-vike | 0.0.581 | 0.0.591 | diff --git a/.changeset/images-binding-crud-operations.md b/.changeset/images-binding-crud-operations.md new file mode 100644 index 000000000000..9488dcf061c4 --- /dev/null +++ b/.changeset/images-binding-crud-operations.md @@ -0,0 +1,7 @@ +--- +"miniflare": minor +--- + +Add Hosted Images CRUD operations to Images binding. + +This is an experimental API that only works locally for the moment. diff --git a/.changeset/wicked-wombats-decide.md b/.changeset/wicked-wombats-decide.md new file mode 100644 index 000000000000..64ae580ef618 --- /dev/null +++ b/.changeset/wicked-wombats-decide.md @@ -0,0 +1,5 @@ +--- +"@cloudflare/vitest-pool-workers": patch +--- + +Fix resource leak where remote proxy sessions were not disposed during pool shutdown, causing vitest processes to hang. diff --git a/.codeowners b/.codeowners new file mode 100644 index 000000000000..4af6bb802b93 --- /dev/null +++ b/.codeowners @@ -0,0 +1,68 @@ +# ============================================================ +# Codeowners Plus - Code Ownership Rules +# ============================================================ +# This file defines code ownership for the workers-sdk monorepo, +# enforced by Codeowners Plus (https://github.com/multimediallc/codeowners-plus). +# +# See CODEOWNERS.md for full documentation on how ownership works. +# +# Syntax: +# (no prefix) = primary owner (highest-priority match wins) +# & prefix = AND rule (additional required reviewer) +# ? prefix = optional/CC reviewer (non-blocking) +# Multiple teams on one line = OR (either can satisfy) +# +# Rules are relative to this file's directory (repo root). +# Unlike GitHub CODEOWNERS, `*.js` only matches in this directory; +# use `**/*.js` for recursive matching. +# +# Paths not matching any specific rule fall through to the default +# primary owner (currently @cloudflare/wrangler). +# ============================================================ + +# Default owner - ANT/Wrangler team owns everything +* @cloudflare/wrangler + +# ---------------------------------------------------------- +# D&C ownership (AND: requires wrangler + deploy-config) +# ---------------------------------------------------------- +& packages/workers-shared/** @cloudflare/deploy-config + +# ---------------------------------------------------------- +# D1 ownership (AND: requires wrangler + d1) +# ---------------------------------------------------------- +& packages/wrangler/src/api/d1/** @cloudflare/d1 +& packages/wrangler/src/d1/** @cloudflare/d1 +& packages/wrangler/src/__tests__/d1/** @cloudflare/d1 + +# ---------------------------------------------------------- +# Cloudchamber ownership (AND: requires wrangler + cloudchamber) +# ---------------------------------------------------------- +& packages/wrangler/src/cloudchamber/** @cloudflare/cloudchamber +& packages/wrangler/src/containers/** @cloudflare/cloudchamber +& packages/containers-shared/** @cloudflare/cloudchamber + +# ---------------------------------------------------------- +# Workers KV ownership (AND: requires wrangler + workers-kv) +# ---------------------------------------------------------- +& packages/wrangler/src/kv/** @cloudflare/workers-kv +& packages/wrangler/src/__tests__/kv/** @cloudflare/workers-kv +& packages/miniflare/src/workers/kv/** @cloudflare/workers-kv +& packages/miniflare/test/plugins/kv/** @cloudflare/workers-kv + +# ---------------------------------------------------------- +# Workflows ownership (AND: requires wrangler + workflows) +# ---------------------------------------------------------- +& packages/workflows-shared/** @cloudflare/workflows + +# ---------------------------------------------------------- +# Adding a new product team +# ---------------------------------------------------------- +# Copy this template and fill in and : +# +# # Product: (AND: requires wrangler + ) +# & packages/wrangler/src//** @cloudflare/ +# & packages/wrangler/src/__tests__//** @cloudflare/ +# & packages/miniflare/src/plugins//** @cloudflare/ +# & packages/miniflare/src/workers//** @cloudflare/ +# & packages/miniflare/test/plugins//** @cloudflare/ diff --git a/.github/workflows/codeowners.yml b/.github/workflows/codeowners.yml new file mode 100644 index 000000000000..5e4a24ae5518 --- /dev/null +++ b/.github/workflows/codeowners.yml @@ -0,0 +1,45 @@ +name: "Code Owners" + +# Re-evaluate when PRs are opened/updated, and when reviews are submitted/dismissed. +# Using pull_request_target (not pull_request) so the workflow has access to secrets +# for fork PRs. This is safe because: +# - The checkout is the BASE branch (ownership rules come from the protected branch) +# - PR head commits are fetched as git objects only (never checked out or executed) +# - The action only reads config files and calls the GitHub API +on: + pull_request_target: + types: [opened, reopened, synchronize, ready_for_review, labeled, unlabeled] + pull_request_review: + types: [submitted, dismissed] + +concurrency: + group: codeowners-${{ github.event.pull_request.number }} + cancel-in-progress: true + +permissions: + contents: read + issues: write + pull-requests: write + +jobs: + codeowners: + name: "Run Codeowners Plus" + runs-on: ubuntu-latest + steps: + - name: "Checkout Base Branch" + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: "Fetch PR Head (for diff computation)" + run: git fetch origin +refs/pull/${{ github.event.pull_request.number }}/head + env: + GITHUB_TOKEN: "${{ secrets.CODEOWNERS_GITHUB_PAT }}" + + - name: "Codeowners Plus" + uses: multimediallc/codeowners-plus@ff02aa993a92e8efe01642916d0877beb9439e9f # v1.9.0 + with: + github-token: "${{ secrets.CODEOWNERS_GITHUB_PAT }}" + pr: "${{ github.event.pull_request.number }}" + verbose: true + quiet: ${{ github.event.pull_request.draft }} diff --git a/CODEOWNERS.md b/CODEOWNERS.md new file mode 100644 index 000000000000..8ecf6bfdd9f5 --- /dev/null +++ b/CODEOWNERS.md @@ -0,0 +1,139 @@ +# Code Ownership + +This repository uses [Codeowners Plus](https://github.com/multimediallc/codeowners-plus) to enforce code ownership and review requirements. This replaces GitHub's native CODEOWNERS with more fine-grained control — specifically, the ability to require approval from **multiple teams** (AND rules) before a PR can merge. + +## How It Works + +### Overview + +When a PR is opened, updated, or reviewed, the Codeowners Plus GitHub Action runs. It reads `.codeowners` and `codeowners.toml` from the **base branch** (not the PR), evaluates the ownership rules, and: + +- Posts a PR comment listing which teams need to approve +- Requests reviews from those teams +- Sets a **required status check** ("Run Codeowners Plus") that passes only when all ownership rules are satisfied + +The native GitHub `CODEOWNERS` file is not involved in enforcement. + +### Key Difference from Native CODEOWNERS + +| Feature | Native GitHub CODEOWNERS | Codeowners Plus | +| ------------------------ | --------------------------------- | ------------------------------------------------------------------------------- | +| Multiple teams on a path | **OR** — any one team can approve | **AND** via `&` prefix — all listed teams must approve | +| Path matching for `*.js` | Matches anywhere in repo | Matches only in the `.codeowners` file's directory; use `**/*.js` for recursive | +| Per-directory config | Single file only | `.codeowners` file in any directory (rules are relative to that directory) | +| Stale review dismissal | All-or-nothing | Smart — only dismisses when reviewer's owned files change | +| Optional reviewers | Not supported | `?` prefix — CC without blocking | + +## Configuration Files + +### `.codeowners` — Ownership Rules + +Located at the repo root. Defines who owns what using path patterns and team handles. See the comments in the file itself for syntax details and a template for adding new product teams. + +### `codeowners.toml` — Advanced Configuration + +Located at the repo root. Controls enforcement behavior, ignored paths, and admin bypass. + +Key settings: + +| Setting | Purpose | +| -------------------------- | -------------------------------------------------------------------------- | +| `ignore` | Directories excluded from ownership checks (e.g. `.changeset`, `fixtures`) | +| `detailed_reviewers` | Show per-file owner breakdown in PR comments | +| `suppress_unowned_warning` | Don't warn about files with no owner | +| `enforcement.fail_check` | When `true`, the GHA check fails if rules aren't satisfied | +| `admin_bypass.enabled` | Allow admins to bypass by approving with "Codeowners Bypass" text | + +### `CODEOWNERS` — Native GitHub File + +The native GitHub `CODEOWNERS` file is kept for reference but is **not the enforcement mechanism**. Enforcement is handled by the Codeowners Plus required status check. All ownership logic lives in `.codeowners`. + +### `.github/workflows/codeowners.yml` — GitHub Actions Workflow + +A single workflow handles all events: + +- `pull_request_target` — PR opened, updated, marked ready, labeled +- `pull_request_review` — review submitted or dismissed + +Using `pull_request_target` (not `pull_request`) ensures the workflow has access to secrets for **fork PRs**. The checkout is always the base branch, so PR authors cannot modify ownership rules. + +## Common Scenarios + +### PR touches only wrangler-team-owned code + +Example: changes to `packages/create-cloudflare/` or `packages/vite-plugin-cloudflare/`. + +Only `@cloudflare/wrangler` approval is required. + +### PR touches product-team-owned code + +Example: changes to `packages/wrangler/src/d1/`. + +**Both** `@cloudflare/wrangler` AND `@cloudflare/d1` must approve. Codeowners Plus will post a comment listing who still needs to approve and request reviews from both teams. + +### PR touches multiple product areas + +Example: changes to both `packages/wrangler/src/d1/` and `packages/wrangler/src/kv/`. + +All three teams must approve: `@cloudflare/wrangler` + `@cloudflare/d1` + `@cloudflare/workers-kv`. + +### PR touches ignored paths only + +Example: changes only in `.changeset/` or `fixtures/`. + +No ownership checks apply. The codeowners-plus check passes automatically. + +### Draft PRs + +The workflow runs in **quiet mode** for draft PRs: + +- No PR comments posted +- No review requests sent +- The status check still runs for visibility + +### Emergency bypass + +Repository admins can bypass all requirements by submitting an **approval review** with the text "Codeowners Bypass" (case-insensitive). This creates an audit trail. + +### Fork PRs + +Fork PRs are fully supported. The workflow uses `pull_request_target` to run in the base repo context with access to secrets. The base branch is checked out (so ownership rules come from the protected branch), and the PR head is fetched as git objects only for diff computation. No fork code is executed. + +## Adding a New Product Team + +To add ownership for a new product team, add AND rules to `.codeowners`: + +```bash +# Product: (AND: requires wrangler + ) +& packages/wrangler/src//** @cloudflare/ +& packages/wrangler/src/__tests__//** @cloudflare/ +& packages/miniflare/src/plugins//** @cloudflare/ +& packages/miniflare/src/workers//** @cloudflare/ +& packages/miniflare/test/plugins//** @cloudflare/ +``` + +For example, to add R2 ownership: + +```bash +# Product: R2 (AND: requires wrangler + r2) +& packages/wrangler/src/r2/** @cloudflare/r2 +& packages/wrangler/src/__tests__/r2/** @cloudflare/r2 +& packages/miniflare/src/plugins/r2/** @cloudflare/r2 +& packages/miniflare/src/workers/r2/** @cloudflare/r2 +& packages/miniflare/test/plugins/r2/** @cloudflare/r2 +``` + +**Teams ready to add** (have source paths but no ownership entries yet): +R2, Queues, AI, Hyperdrive, Vectorize, Pipelines, SSL/Secrets Store, WVPC. + +## Stale Review Handling + +Codeowners Plus uses **smart dismissal**: when new commits are pushed to a PR, it only dismisses an approval if the files owned by that reviewer were changed. This avoids the frustration of GitHub's all-or-nothing stale review dismissal. + +For this to work, the branch protection setting **"Dismiss stale pull request approvals when new commits are pushed"** must be **disabled**. Codeowners Plus handles dismissal itself. + +## References + +- [Codeowners Plus documentation](https://github.com/multimediallc/codeowners-plus) +- [Codeowners Plus action on GitHub Marketplace](https://github.com/marketplace/actions/codeowners-plus) +- [GitHub branch protection docs](https://docs.github.com/en/repositories/configuring-branches-and-merges-in-your-repository/managing-protected-branches/about-protected-branches) diff --git a/codeowners.toml b/codeowners.toml new file mode 100644 index 000000000000..563b292e658c --- /dev/null +++ b/codeowners.toml @@ -0,0 +1,23 @@ +# Codeowners Plus - Advanced Configuration +# See: https://github.com/multimediallc/codeowners-plus +# See: CODEOWNERS.md for full documentation + +# Directories to ignore (no ownership checks, reduces review noise) +ignore = [".changeset", "fixtures"] + +# Show detailed file-to-owner mapping in PR comments +detailed_reviewers = true + +# Suppress warnings for intentionally unowned files (e.g. pnpm-lock.yaml) +suppress_unowned_warning = true + +[enforcement] +# Enforcement is via a required GitHub status check ("Run Codeowners Plus"). +# The check fails when ownership rules are not satisfied, blocking merge. +# No bot approval is used — the status check is the sole enforcement mechanism. +fail_check = true + +[admin_bypass] +# Allow repo admins to bypass codeowner requirements in emergencies +# by submitting an approval review containing "Codeowners Bypass" text +enabled = true diff --git a/fixtures/vitest-pool-workers-examples/images/README.md b/fixtures/vitest-pool-workers-examples/images/README.md index 8b2ea80fa742..9410bf75498a 100644 --- a/fixtures/vitest-pool-workers-examples/images/README.md +++ b/fixtures/vitest-pool-workers-examples/images/README.md @@ -1,3 +1,3 @@ -# 🖼️ images +# images -This Worker returns information about an image that is POSTed to it. +This Worker returns information about an image that is POSTed to it and can perform Hosted Images CRUD operations. diff --git a/packages/create-cloudflare/src/frameworks/package.json b/packages/create-cloudflare/src/frameworks/package.json index d0e16054b408..13da7b3cfb76 100644 --- a/packages/create-cloudflare/src/frameworks/package.json +++ b/packages/create-cloudflare/src/frameworks/package.json @@ -7,17 +7,17 @@ "dependencies": { "create-astro": "4.13.2", "create-analog": "2.3.1", - "@angular/create": "21.1.4", + "@angular/create": "21.2.0", "create-docusaurus": "3.9.2", "create-hono": "0.19.4", "create-next-app": "15.5.6", "create-qwik": "1.19.0", "create-vite": "8.3.0", "create-rwsdk": "3.1.3", - "create-react-router": "7.13.0", + "create-react-router": "7.13.1", "create-solid": "0.6.13", - "create-vike": "0.0.581", - "create-vue": "3.21.1", + "create-vike": "0.0.591", + "create-vue": "3.22.0", "create-waku": "0.12.5-1.0.0-alpha.5-0", "@tanstack/create-start": "0.59.8", "gatsby": "5.16.1", diff --git a/packages/create-cloudflare/templates/vue/workers/c3.ts b/packages/create-cloudflare/templates/vue/workers/c3.ts index 3bc21306224b..b99fd4ed7847 100644 --- a/packages/create-cloudflare/templates/vue/workers/c3.ts +++ b/packages/create-cloudflare/templates/vue/workers/c3.ts @@ -25,7 +25,7 @@ const generate = async (ctx: C3Context) => { await runFrameworkGenerator(ctx, [ ctx.project.name, "--router", - lang === "ts" ? "--ts" : "", + lang === "ts" ? "--ts" : "--no-ts", ]); logRaw(""); }; diff --git a/packages/miniflare/src/index.ts b/packages/miniflare/src/index.ts index 42e9794a0608..b24c49d50407 100644 --- a/packages/miniflare/src/index.ts +++ b/packages/miniflare/src/index.ts @@ -48,6 +48,7 @@ import { getPersistPath, HELLO_WORLD_PLUGIN_NAME, HOST_CAPNP_CONNECT, + IMAGES_PLUGIN_NAME, KV_PLUGIN_NAME, launchBrowser, loadExternalPlugins, @@ -143,6 +144,7 @@ import type { D1Database, DurableObjectNamespace, Fetcher, + ImagesBinding, KVNamespace, KVNamespaceListKey, Queue, @@ -2735,6 +2737,12 @@ export class Miniflare { ): Promise> { return this.#getProxy(R2_PLUGIN_NAME, bindingName, workerName); } + getImagesBinding( + bindingName: string, + workerName?: string + ): Promise> { + return this.#getProxy(IMAGES_PLUGIN_NAME, bindingName, workerName); + } getHelloWorldBinding( bindingName: string, workerName?: string diff --git a/packages/miniflare/src/plugins/core/proxy/client.ts b/packages/miniflare/src/plugins/core/proxy/client.ts index b20df90e6b6a..c116b449175b 100644 --- a/packages/miniflare/src/plugins/core/proxy/client.ts +++ b/packages/miniflare/src/plugins/core/proxy/client.ts @@ -342,13 +342,16 @@ class ProxyStubHandler }, }; }; + const proxy = this.bridge.getProxy(target) as any; const binding = { info: (stream: ReadableStream) => { - // @ts-expect-error The stream types are mismatched - return (this.bridge.getProxy(target) as ImagesBinding)["info"](stream); + return proxy["info"](stream); }, input: (stream: ReadableStream) => { - return transformer(this.bridge.getProxy(target), stream, []); + return transformer(proxy, stream, []); + }, + get hosted(): ImagesBinding["hosted"] { + return proxy["hosted"]; }, }; return binding; diff --git a/packages/miniflare/src/plugins/images/index.ts b/packages/miniflare/src/plugins/images/index.ts index 93fb0a71cf07..470d8d188186 100644 --- a/packages/miniflare/src/plugins/images/index.ts +++ b/packages/miniflare/src/plugins/images/index.ts @@ -1,25 +1,24 @@ +import fs from "node:fs/promises"; +import SCRIPT_IMAGES_SERVICE from "worker:images/images"; +import SCRIPT_KV_NAMESPACE_OBJECT from "worker:kv/namespace"; import { z } from "zod"; -import { CoreBindings, CoreHeaders } from "../../workers"; +import { Service } from "../../runtime"; +import { SharedBindings } from "../../workers"; +import { KV_NAMESPACE_OBJECT_CLASS_NAME } from "../kv"; import { + getMiniflareObjectBindings, + getPersistPath, getUserBindingServiceName, + objectEntryWorker, + PersistenceSchema, Plugin, ProxyNodeBinding, remoteProxyClientWorker, RemoteProxyConnectionString, + SERVICE_LOOPBACK, WORKER_BINDING_SERVICE_LOOPBACK, } from "../shared"; -const IMAGES_LOCAL_FETCHER = /* javascript */ ` - export default { - fetch(req, env) { - const request = new Request(req); - request.headers.set("${CoreHeaders.CUSTOM_FETCH_SERVICE}", "${CoreBindings.IMAGES_SERVICE}"); - request.headers.set("${CoreHeaders.ORIGINAL_URL}", request.url); - return env.${CoreBindings.SERVICE_LOOPBACK}.fetch(request) - } - } -`; - const ImagesSchema = z.object({ binding: z.string(), remoteProxyConnectionString: z @@ -31,10 +30,18 @@ export const ImagesOptionsSchema = z.object({ images: ImagesSchema.optional(), }); +export const ImagesSharedOptionsSchema = z.object({ + imagesPersist: PersistenceSchema, +}); + export const IMAGES_PLUGIN_NAME = "images"; -export const IMAGES_PLUGIN: Plugin = { +export const IMAGES_PLUGIN: Plugin< + typeof ImagesOptionsSchema, + typeof ImagesSharedOptionsSchema +> = { options: ImagesOptionsSchema, + sharedOptions: ImagesSharedOptionsSchema, async getBindings(options) { if (!options.images) { return []; @@ -69,34 +76,120 @@ export const IMAGES_PLUGIN: Plugin = { [options.images.binding]: new ProxyNodeBinding(), }; }, - async getServices({ options }) { + async getServices({ + options, + sharedOptions, + tmpPath, + defaultPersistRoot, + unsafeStickyBlobs, + }) { if (!options.images) { return []; } - return [ - { - name: getUserBindingServiceName( - IMAGES_PLUGIN_NAME, - options.images.binding, - options.images.remoteProxyConnectionString - ), - worker: options.images.remoteProxyConnectionString - ? remoteProxyClientWorker( - options.images.remoteProxyConnectionString, - options.images.binding - ) - : { - modules: [ - { - name: "index.worker.js", - esModule: IMAGES_LOCAL_FETCHER, - }, - ], - compatibilityDate: "2025-04-01", - bindings: [WORKER_BINDING_SERVICE_LOOPBACK], - }, + const serviceName = getUserBindingServiceName( + IMAGES_PLUGIN_NAME, + options.images.binding, + options.images.remoteProxyConnectionString + ); + + if (options.images.remoteProxyConnectionString) { + return [ + { + name: serviceName, + worker: remoteProxyClientWorker( + options.images.remoteProxyConnectionString, + options.images.binding + ), + }, + ]; + } + + const persistPath = getPersistPath( + IMAGES_PLUGIN_NAME, + tmpPath, + defaultPersistRoot, + sharedOptions.imagesPersist + ); + + await fs.mkdir(persistPath, { recursive: true }); + + const storageService = { + name: `${IMAGES_PLUGIN_NAME}:storage`, + disk: { path: persistPath, writable: true }, + } satisfies Service; + + const objectService = { + name: `${IMAGES_PLUGIN_NAME}:ns`, + worker: { + compatibilityDate: "2023-07-24", + compatibilityFlags: ["nodejs_compat", "experimental"], + modules: [ + { + name: "namespace.worker.js", + esModule: SCRIPT_KV_NAMESPACE_OBJECT(), + }, + ], + durableObjectNamespaces: [ + { + className: KV_NAMESPACE_OBJECT_CLASS_NAME, + uniqueKey: `miniflare-images-${KV_NAMESPACE_OBJECT_CLASS_NAME}`, + }, + ], + durableObjectStorage: { localDisk: storageService.name }, + bindings: [ + { + name: SharedBindings.MAYBE_SERVICE_BLOBS, + service: { name: storageService.name }, + }, + { + name: SharedBindings.MAYBE_SERVICE_LOOPBACK, + service: { name: SERVICE_LOOPBACK }, + }, + ...getMiniflareObjectBindings(unsafeStickyBlobs), + ], }, - ]; + } satisfies Service; + + const kvNamespaceService = { + name: `${IMAGES_PLUGIN_NAME}:ns:data`, + worker: objectEntryWorker( + { + serviceName: objectService.name, + className: KV_NAMESPACE_OBJECT_CLASS_NAME, + }, + "images-data" + ), + } satisfies Service; + + const imagesService = { + name: serviceName, + worker: { + compatibilityDate: "2025-04-01", + modules: [ + { + name: "images.worker.js", + esModule: SCRIPT_IMAGES_SERVICE(), + }, + ], + bindings: [ + { + name: "IMAGES_STORE", + kvNamespace: { name: kvNamespaceService.name }, + }, + WORKER_BINDING_SERVICE_LOOPBACK, + ], + }, + } satisfies Service; + + return [storageService, objectService, kvNamespaceService, imagesService]; + }, + getPersistPath({ imagesPersist }, tmpPath) { + return getPersistPath( + IMAGES_PLUGIN_NAME, + tmpPath, + undefined, + imagesPersist + ); }, }; diff --git a/packages/miniflare/src/plugins/index.ts b/packages/miniflare/src/plugins/index.ts index 3bfeb5a69a28..449f88d05108 100644 --- a/packages/miniflare/src/plugins/index.ts +++ b/packages/miniflare/src/plugins/index.ts @@ -144,6 +144,7 @@ export type SharedOptions = z.input & z.input & z.input & z.input & + z.input & z.input; export const PLUGIN_ENTRIES = Object.entries(PLUGINS) as [ diff --git a/packages/miniflare/src/workers/images/images.worker.ts b/packages/miniflare/src/workers/images/images.worker.ts new file mode 100644 index 000000000000..f322d3b7a51d --- /dev/null +++ b/packages/miniflare/src/workers/images/images.worker.ts @@ -0,0 +1,182 @@ +// KV-backed mock for Images binding CRUD operations +// Image data is stored as KV values, metadata as KV metadata +// Transforms and info operations are handled via HTTP loopback to Node.js Sharp + +import { WorkerEntrypoint } from "cloudflare:workers"; +import { CoreBindings, CoreHeaders } from "../core/constants"; + +interface Env { + IMAGES_STORE: KVNamespace; + [CoreBindings.SERVICE_LOOPBACK]: Fetcher; +} + +function base64DecodeArrayBuffer(buffer: ArrayBuffer): ArrayBuffer { + const decoder = new TextDecoder(); + const base64String = decoder.decode(buffer); + const binaryString = atob(base64String.trim()); + const bytes = new Uint8Array(binaryString.length); + for (let i = 0; i < binaryString.length; i++) { + bytes[i] = binaryString.charCodeAt(i); + } + return bytes.buffer; +} + +async function base64DecodeStream( + stream: ReadableStream +): Promise { + const response = new Response(stream); + const buffer = await response.arrayBuffer(); + return base64DecodeArrayBuffer(buffer); +} + +export default class ImagesService extends WorkerEntrypoint { + async details(imageId: string): Promise { + const result = await this.env.IMAGES_STORE.getWithMetadata( + imageId, + "arrayBuffer" + ); + return result.metadata ?? null; + } + + async image(imageId: string): Promise | null> { + const data = await this.env.IMAGES_STORE.get(imageId, "arrayBuffer"); + if (data === null) { + return null; + } + return new Blob([data]).stream(); + } + + async upload( + image: ReadableStream | ArrayBuffer, + options?: ImageUploadOptions + ): Promise { + let imageData: ReadableStream | ArrayBuffer = image; + if (options?.encoding === "base64") { + imageData = + image instanceof ArrayBuffer + ? base64DecodeArrayBuffer(image) + : await base64DecodeStream(image); + } + + const buffer = + imageData instanceof ArrayBuffer + ? imageData + : await new Response(imageData).arrayBuffer(); + + const id = options?.id ?? crypto.randomUUID(); + + const metadata: ImageMetadata = { + id, + filename: options?.filename ?? "uploaded.jpg", + uploaded: new Date().toISOString(), + requireSignedURLs: options?.requireSignedURLs ?? false, + meta: options?.metadata ?? {}, + variants: ["public"], + draft: false, + creator: options?.creator, + }; + + await this.env.IMAGES_STORE.put(id, buffer, { metadata }); + return metadata; + } + + async update( + imageId: string, + options: ImageUpdateOptions + ): Promise { + const existing = await this.env.IMAGES_STORE.getWithMetadata( + imageId, + "arrayBuffer" + ); + if (existing.value === null || existing.metadata === null) { + throw new Error(`Image not found: ${imageId}`); + } + + const updatedMetadata: ImageMetadata = { + ...existing.metadata, + requireSignedURLs: + options.requireSignedURLs ?? existing.metadata.requireSignedURLs, + meta: options.metadata ?? existing.metadata.meta, + creator: options.creator ?? existing.metadata.creator, + }; + + await this.env.IMAGES_STORE.put(imageId, existing.value, { + metadata: updatedMetadata, + }); + return updatedMetadata; + } + + async delete(imageId: string): Promise { + const existing = await this.env.IMAGES_STORE.get(imageId, "arrayBuffer"); + if (existing === null) { + return false; + } + await this.env.IMAGES_STORE.delete(imageId); + return true; + } + + async list(options?: ImageListOptions): Promise { + const limit = options?.limit ?? 50; + + // Fetch all keys so we can filter and sort accurately + const allImages: ImageMetadata[] = []; + let kvCursor: string | undefined; + do { + const kvResult = await this.env.IMAGES_STORE.list({ + cursor: kvCursor, + }); + for (const key of kvResult.keys) { + if (key.metadata) { + allImages.push(key.metadata); + } + } + kvCursor = kvResult.list_complete ? undefined : kvResult.cursor; + } while (kvCursor); + + if (options?.creator) { + allImages.splice( + 0, + allImages.length, + ...allImages.filter((i) => i.creator === options.creator) + ); + } + + allImages.sort((a, b) => { + const dateA = a.uploaded ?? ""; + const dateB = b.uploaded ?? ""; + const cmp = dateA.localeCompare(dateB) || a.id.localeCompare(b.id); + return options?.sortOrder === "desc" ? -cmp : cmp; + }); + + // Handle cursor-based pagination over the sorted/filtered results + let startIndex = 0; + if (options?.cursor) { + const cursorIndex = allImages.findIndex((i) => i.id === options.cursor); + if (cursorIndex >= 0) { + startIndex = cursorIndex + 1; + } + } + + const page = allImages.slice(startIndex, startIndex + limit); + const hasMore = startIndex + limit < allImages.length; + const lastImage = page[page.length - 1]; + + return { + images: page, + cursor: hasMore && lastImage ? lastImage.id : undefined, + listComplete: !hasMore, + }; + } + + // Handle HTTP requests for info and transform operations + // These are forwarded to Node.js via the loopback service where Sharp runs + async fetch(request: Request): Promise { + const forwardRequest = new Request(request); + forwardRequest.headers.set( + CoreHeaders.CUSTOM_FETCH_SERVICE, + CoreBindings.IMAGES_SERVICE + ); + forwardRequest.headers.set(CoreHeaders.ORIGINAL_URL, request.url); + return this.env[CoreBindings.SERVICE_LOOPBACK].fetch(forwardRequest); + } +} diff --git a/packages/miniflare/test/plugins/images/index.spec.ts b/packages/miniflare/test/plugins/images/index.spec.ts new file mode 100644 index 000000000000..ece312a50113 --- /dev/null +++ b/packages/miniflare/test/plugins/images/index.spec.ts @@ -0,0 +1,199 @@ +import assert from "node:assert"; +import { Miniflare } from "miniflare"; +import { describe, test } from "vitest"; +import { useDispose } from "../../test-shared"; + +// The worker stores and retrieves bytes without validation, so we don't need a real image. +const TEST_IMAGE_BYTES = new Uint8Array([1, 2, 3, 4, 5]); + +function imageBuffer(): ArrayBuffer { + return new Uint8Array(TEST_IMAGE_BYTES).buffer as ArrayBuffer; +} + +function createMiniflare(): Miniflare { + return new Miniflare({ + compatibilityDate: "2025-04-01", + images: { binding: "IMAGES" }, + imagesPersist: false, + modules: true, + script: `export default { fetch() { return new Response(null, { status: 404 }); } }`, + }); +} + +describe("Images hosted CRUD", () => { + test("upload and retrieve metadata", async ({ expect }) => { + const mf = createMiniflare(); + useDispose(mf); + const images = await mf.getImagesBinding("IMAGES"); + + const metadata = await images.hosted.upload(imageBuffer(), { + id: "test-123", + }); + expect(metadata.id).toBe("test-123"); + expect(metadata.filename).toBe("uploaded.jpg"); + expect(metadata.requireSignedURLs).toBe(false); + }); + + test("upload and retrieve image data", async ({ expect }) => { + const mf = createMiniflare(); + useDispose(mf); + const images = await mf.getImagesBinding("IMAGES"); + + await images.hosted.upload(imageBuffer(), { id: "blob-test" }); + + const stream = await images.hosted.image("blob-test"); + assert(stream !== null); + const data = new Uint8Array(await new Response(stream).arrayBuffer()); + expect(data).toEqual(TEST_IMAGE_BYTES); + }); + + test("upload with base64 encoding", async ({ expect }) => { + const mf = createMiniflare(); + useDispose(mf); + const images = await mf.getImagesBinding("IMAGES"); + + const base64String = btoa(String.fromCharCode(...TEST_IMAGE_BYTES)); + const base64Bytes = new TextEncoder().encode(base64String); + + const metadata = await images.hosted.upload( + base64Bytes.buffer as ArrayBuffer, + { + id: "base64-test", + encoding: "base64", + } + ); + expect(metadata.id).toBe("base64-test"); + + const stream = await images.hosted.image("base64-test"); + assert(stream !== null); + const data = new Uint8Array(await new Response(stream).arrayBuffer()); + expect(data).toEqual(TEST_IMAGE_BYTES); + }); + + test("get details for non-existent image returns null", async ({ + expect, + }) => { + const mf = createMiniflare(); + useDispose(mf); + const images = await mf.getImagesBinding("IMAGES"); + + const metadata = await images.hosted.details("does-not-exist"); + expect(metadata).toBe(null); + }); + + test("get image data for non-existent image returns null", async ({ + expect, + }) => { + const mf = createMiniflare(); + useDispose(mf); + const images = await mf.getImagesBinding("IMAGES"); + + const stream = await images.hosted.image("does-not-exist"); + expect(stream).toBe(null); + }); + + test("update image metadata", async ({ expect }) => { + const mf = createMiniflare(); + useDispose(mf); + const images = await mf.getImagesBinding("IMAGES"); + + await images.hosted.upload(imageBuffer(), { id: "update-test" }); + + const metadata = await images.hosted.update("update-test", { + requireSignedURLs: true, + }); + expect(metadata.requireSignedURLs).toBe(true); + }); + + test("delete image", async ({ expect }) => { + const mf = createMiniflare(); + useDispose(mf); + const images = await mf.getImagesBinding("IMAGES"); + + await images.hosted.upload(imageBuffer(), { id: "delete-test" }); + + const deleted = await images.hosted.delete("delete-test"); + expect(deleted).toBe(true); + + const metadata = await images.hosted.details("delete-test"); + expect(metadata).toBe(null); + }); + + test("delete non-existent image returns false", async ({ expect }) => { + const mf = createMiniflare(); + useDispose(mf); + const images = await mf.getImagesBinding("IMAGES"); + + const deleted = await images.hosted.delete("does-not-exist"); + expect(deleted).toBe(false); + }); + + test("list images", async ({ expect }) => { + const mf = createMiniflare(); + useDispose(mf); + const images = await mf.getImagesBinding("IMAGES"); + + await images.hosted.upload(imageBuffer(), { id: "list-1" }); + + const list = await images.hosted.list(); + expect(list.listComplete).toBe(true); + expect(list.images).toHaveLength(1); + expect(list.images[0].id).toBe("list-1"); + }); + + test("list images filtered by creator", async ({ expect }) => { + const mf = createMiniflare(); + useDispose(mf); + const images = await mf.getImagesBinding("IMAGES"); + + await images.hosted.upload(imageBuffer(), { + id: "img1", + creator: "socrates", + }); + await images.hosted.upload(imageBuffer(), { + id: "img2", + creator: "plato", + }); + + const list = await images.hosted.list({ creator: "plato" }); + expect(list.images).toHaveLength(1); + expect(list.images[0].id).toBe("img2"); + }); + + test("list images with cursor pagination", async ({ expect }) => { + const mf = createMiniflare(); + useDispose(mf); + const images = await mf.getImagesBinding("IMAGES"); + + for (const id of ["img1", "img2", "img3", "img4", "img5"]) { + await images.hosted.upload(imageBuffer(), { id }); + } + + const page1 = await images.hosted.list({ limit: 2 }); + expect(page1.images).toHaveLength(2); + expect(page1.listComplete).toBe(false); + expect(page1.cursor).toBeDefined(); + + const page2 = await images.hosted.list({ + limit: 2, + cursor: page1.cursor, + }); + expect(page2.images).toHaveLength(2); + expect(page2.listComplete).toBe(false); + expect(page2.cursor).toBeDefined(); + + const page3 = await images.hosted.list({ + limit: 2, + cursor: page2.cursor, + }); + expect(page3.images).toHaveLength(1); + expect(page3.listComplete).toBe(true); + + const allIds = [ + ...page1.images.map((i) => i.id), + ...page2.images.map((i) => i.id), + ...page3.images.map((i) => i.id), + ]; + expect(new Set(allIds).size).toBe(5); + }); +}); diff --git a/packages/vitest-pool-workers/src/pool/config.ts b/packages/vitest-pool-workers/src/pool/config.ts index ca42a70ff8fb..81ad38710aef 100644 --- a/packages/vitest-pool-workers/src/pool/config.ts +++ b/packages/vitest-pool-workers/src/pool/config.ts @@ -199,7 +199,7 @@ function filterTails( } /** Map that maps worker configPaths to their existing remote proxy session data (if any) */ -const remoteProxySessionsDataMap = new Map< +export const remoteProxySessionsDataMap = new Map< string, { session: RemoteProxySession; diff --git a/packages/vitest-pool-workers/src/pool/index.ts b/packages/vitest-pool-workers/src/pool/index.ts index e4ecf1ac68d2..0b85632b6df2 100644 --- a/packages/vitest-pool-workers/src/pool/index.ts +++ b/packages/vitest-pool-workers/src/pool/index.ts @@ -29,7 +29,11 @@ import { experimental_readRawConfig } from "wrangler"; import { workerdBuiltinModules } from "../shared/builtin-modules"; import { createChunkingSocket } from "../shared/chunking-socket"; import { CompatibilityFlagAssertions } from "./compatibility-flag-assertions"; -import { OPTIONS_PATH, parseProjectOptions } from "./config"; +import { + OPTIONS_PATH, + parseProjectOptions, + remoteProxySessionsDataMap, +} from "./config"; import { guessWorkerExports } from "./guess-exports"; import { getProjectPath, @@ -1284,6 +1288,14 @@ export default function (ctx: Vitest): ProcessPool { } } allProjects.clear(); + // Dispose remote proxy sessions to prevent handle leaks + log.debug("Disposing remote proxy sessions..."); + for (const sessionData of remoteProxySessionsDataMap.values()) { + if (sessionData?.session?.dispose) { + promises.push(sessionData.session.dispose()); + } + } + remoteProxySessionsDataMap.clear(); await Promise.all(promises); }, }; diff --git a/packages/vitest-pool-workers/test/remote-proxy-cleanup.test.ts b/packages/vitest-pool-workers/test/remote-proxy-cleanup.test.ts new file mode 100644 index 000000000000..508ef7c147d2 --- /dev/null +++ b/packages/vitest-pool-workers/test/remote-proxy-cleanup.test.ts @@ -0,0 +1,62 @@ +import dedent from "ts-dedent"; +import { test } from "./helpers"; + +// This test requires CLOUDFLARE_ACCOUNT_ID and CLOUDFLARE_API_TOKEN +// environment variables to be set, as it exercises remote proxy sessions +// that connect to the Cloudflare API. +test.skipIf( + !process.env.CLOUDFLARE_ACCOUNT_ID || !process.env.CLOUDFLARE_API_TOKEN +)( + "disposes remote proxy sessions on pool close", + async ({ expect, seed, vitestRun }) => { + await seed({ + "vitest.config.mts": dedent` + import { defineWorkersConfig } from "@cloudflare/vitest-pool-workers/config"; + export default defineWorkersConfig({ + test: { + reporters: ["hanging-process", "verbose"], + poolOptions: { + workers: { + wrangler: { configPath: "./wrangler.jsonc" }, + }, + }, + } + }); + `, + "wrangler.jsonc": dedent` + { + "name": "test-worker", + "main": "src/index.ts", + "compatibility_date": "2025-06-01", + "compatibility_flags": ["nodejs_compat"], + "ai": { "binding": "AI" }, + "account_id": "${process.env.CLOUDFLARE_ACCOUNT_ID}" + } + `, + "src/index.ts": dedent` + export default { + async fetch(request, env, ctx) { + return new Response("Hello"); + } + } + `, + "src/index.test.ts": dedent` + import { SELF } from "cloudflare:test"; + import { it, expect } from "vitest"; + it("responds with Hello", async () => { + const response = await SELF.fetch("http://localhost/"); + expect(await response.text()).toBe("Hello"); + }); + `, + }); + + const result = await vitestRun({ + flags: ["--reporter=hanging-process", "--reporter=verbose"], + }); + + expect(result.stderr).not.toContain( + "something prevents Vite server from exiting" + ); + }, + 20_000 +); diff --git a/packages/vitest-pool-workers/turbo.json b/packages/vitest-pool-workers/turbo.json index 6556dcf3e5e5..adaf4792e614 100644 --- a/packages/vitest-pool-workers/turbo.json +++ b/packages/vitest-pool-workers/turbo.json @@ -4,6 +4,9 @@ "tasks": { "build": { "outputs": ["dist/**"] + }, + "test": { + "env": ["CLOUDFLARE_ACCOUNT_ID", "CLOUDFLARE_API_TOKEN"] } } } diff --git a/packages/wrangler/src/__tests__/config-cache.test.ts b/packages/wrangler/src/__tests__/config-cache.test.ts index 45c0b67c287f..4bc16416d4b9 100644 --- a/packages/wrangler/src/__tests__/config-cache.test.ts +++ b/packages/wrangler/src/__tests__/config-cache.test.ts @@ -70,12 +70,12 @@ describe("config cache", () => { expect, }) => { // Don't create node_modules - this forces .wrangler/cache - // Note: findUpSync may find a parent node_modules, but we're testing - // the case where there's no existing cache in any found node_modules + // Note: wrangler, when looking up the filesystem, may find a parent node_modules, + // but we're testing the case where there's no existing cache in any found node_modules const cacheFolder = getCacheFolder(); // In a clean temp directory with no node_modules, should use .wrangler/cache - // However, findUpSync may find a parent node_modules, so we just verify - // that getCacheFolder returns a valid path + // However, wrangler, when looking up the filesystem, may find a parent node_modules, + // so we just verify that getCacheFolder returns a valid path expect(cacheFolder).toBeTruthy(); expect(typeof cacheFolder).toBe("string"); }); diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 3398e1657349..e00d07c28a8a 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -16437,7 +16437,7 @@ snapshots: devalue: 5.3.2 miniflare: 4.20251210.0 semver: 7.7.3 - vitest: 3.2.3(@types/debug@4.1.12)(@types/node@20.19.9)(@vitest/ui@3.2.3)(jiti@2.6.1)(lightningcss@1.30.2)(msw@2.12.0(@types/node@20.19.9)(typescript@5.9.3))(tsx@4.21.0)(yaml@2.8.1) + vitest: 3.2.3(@types/debug@4.1.12)(@types/node@20.19.9)(@vitest/ui@3.2.3)(jiti@2.6.1)(lightningcss@1.30.2)(msw@2.12.0(@types/node@20.19.9)(typescript@5.8.3))(supports-color@9.2.2)(tsx@4.21.0)(yaml@2.8.1) wrangler: 4.54.0(@cloudflare/workers-types@4.20260305.0) zod: 3.25.76 transitivePeerDependencies: