diff --git a/src/do.ts b/src/do.ts index b6bb2b6..e37f73f 100644 --- a/src/do.ts +++ b/src/do.ts @@ -1,4 +1,10 @@ import { DurableObject } from 'cloudflare:workers' +import { + initiateDump, + processDumpChunk, + getDumpStatus, + DumpStatus, +} from './export/dump-async' export class StarbaseDBDurableObject extends DurableObject { // Durable storage for the SQL database @@ -72,6 +78,9 @@ export class StarbaseDBDurableObject extends DurableObject { deleteAlarm: this.deleteAlarm.bind(this), getStatistics: this.getStatistics.bind(this), executeQuery: this.executeQuery.bind(this), + startAsyncDump: this.startAsyncDump.bind(this), + getAsyncDumpStatus: this.getAsyncDumpStatus.bind(this), + streamDumpDownload: this.streamDumpDownload.bind(this), } } @@ -104,8 +113,72 @@ export class StarbaseDBDurableObject extends DurableObject { return this.storage.deleteAlarm(options) } + /** Initiates an asynchronous database dump stored in R2. */ + public async startAsyncDump(params: { + callbackUrl?: string + }): Promise<{ dumpId: string }> { + if (!this.env.DATABASE_DUMPS) { + throw new Error( + 'DATABASE_DUMPS R2 binding is not configured. ' + + 'Add an R2 bucket binding named DATABASE_DUMPS to your wrangler.toml.' + ) + } + const result = await initiateDump( + this.sql, + this.env.DATABASE_DUMPS, + this.storage, + params.callbackUrl + ) + // Schedule the first processing alarm immediately + await this.setAlarm(Date.now() + 100) + return result + } + + /** Returns the status of an ongoing or completed dump. */ + public async getAsyncDumpStatus( + dumpId: string + ): Promise { + return getDumpStatus(this.storage, dumpId) + } + + /** Streams a completed dump file from R2. Returns null if not found/complete. */ + public async streamDumpDownload( + dumpId: string + ): Promise<{ body: ReadableStream; key: string } | null> { + if (!this.env.DATABASE_DUMPS) return null + const status = await getDumpStatus(this.storage, dumpId) + if (!status || status.status !== 'complete') return null + + // Retrieve the upload key from dump state + const state = await this.storage.get<{ uploadKey: string }>( + `dump:${dumpId}` + ) + if (!state) return null + + const object = await this.env.DATABASE_DUMPS.get(state.uploadKey) + if (!object) return null + + return { body: object.body, key: state.uploadKey } + } + async alarm() { try { + // Handle in-progress database dump continuation + const activeDumpId = await this.storage.get('activeDumpId') + if (activeDumpId && this.env.DATABASE_DUMPS) { + const isDone = await processDumpChunk( + this.sql, + this.env.DATABASE_DUMPS, + this.storage, + activeDumpId + ) + if (!isDone) { + // More work to do — reschedule immediately + await this.setAlarm(Date.now() + 100) + return + } + } + // Fetch all the tasks that are marked to emit an event for this cycle. const task = (await this.executeQuery({ sql: 'SELECT * FROM tmp_cron_tasks WHERE is_active = 1;', @@ -153,8 +226,8 @@ export class StarbaseDBDurableObject extends DurableObject { activeConnections: number recentQueries: number }> { - const sql = `SELECT COUNT(*) as count - FROM tmp_query_log + const sql = `SELECT COUNT(*) as count + FROM tmp_query_log WHERE created_at >= datetime('now', '-24 hours')` const result = (await this.executeQuery({ sql, diff --git a/src/export/dump-async.test.ts b/src/export/dump-async.test.ts new file mode 100644 index 0000000..2a1f5d3 --- /dev/null +++ b/src/export/dump-async.test.ts @@ -0,0 +1,250 @@ +import { describe, it, expect, vi, beforeEach } from 'vitest' +import { initiateDump, processDumpChunk, getDumpStatus } from './dump-async' + +// ── Helpers ────────────────────────────────────────────────────────────────── + +function makeSql( + tables: Record[] }> +): SqlStorage { + const mock = { + exec: vi.fn((sql: string, ...params: unknown[]) => { + // sqlite_master DDL query for a specific table + const tableNameMatch = sql.match(/name=\?/) && (params[0] as string) + if (tableNameMatch && tables[tableNameMatch]) { + return { toArray: () => [{ sql: tables[tableNameMatch].ddl }] } + } + + // sqlite_master list all tables + if (sql.includes("type='table'") && sql.includes('ORDER BY name')) { + return { + toArray: () => + Object.keys(tables).map((name) => ({ name })), + } + } + + // SELECT * FROM table LIMIT ? OFFSET ? + const dataMatch = sql.match(/FROM "([^"]+)" LIMIT \? OFFSET \?/) + if (dataMatch) { + const tableName = dataMatch[1] + const limit = params[0] as number + const offset = params[1] as number + const rows = tables[tableName]?.rows ?? [] + return { toArray: () => rows.slice(offset, offset + limit) } + } + + return { toArray: () => [] } + }), + databaseSize: 1024, + } as unknown as SqlStorage + return mock +} + +function makeR2Bucket() { + const uploadedParts: Record< + string, + { partNumber: number; data: string }[] + > = {} + const completedObjects: Record = {} + + return { + createMultipartUpload: vi.fn(async (key: string) => ({ + uploadId: `upload-${key}`, + uploadPart: vi.fn(async (partNumber: number, data: string) => { + if (!uploadedParts[key]) uploadedParts[key] = [] + uploadedParts[key].push({ partNumber, data }) + return { partNumber, etag: `etag-${partNumber}` } + }), + complete: vi.fn(async () => { + completedObjects[key] = (uploadedParts[key] ?? []) + .sort((a, b) => a.partNumber - b.partNumber) + .map((p) => p.data) + .join('') + }), + abort: vi.fn(), + })), + resumeMultipartUpload: vi.fn((key: string, _uploadId: string) => ({ + uploadPart: vi.fn(async (partNumber: number, data: string) => { + if (!uploadedParts[key]) uploadedParts[key] = [] + uploadedParts[key].push({ partNumber, data }) + return { partNumber, etag: `etag-${partNumber}` } + }), + complete: vi.fn(async () => { + completedObjects[key] = (uploadedParts[key] ?? []) + .sort((a, b) => a.partNumber - b.partNumber) + .map((p) => p.data) + .join('') + }), + abort: vi.fn(), + })), + get: vi.fn(async (key: string) => { + if (!(key in completedObjects)) return null + const body = completedObjects[key] + return { + body: new ReadableStream({ + start(controller) { + controller.enqueue(new TextEncoder().encode(body)) + controller.close() + }, + }), + } + }), + _completedObjects: completedObjects, + } as unknown as R2Bucket & { _completedObjects: Record } +} + +function makeStorage() { + const store = new Map() + return { + get: vi.fn(async (key: string) => store.get(key) ?? null), + put: vi.fn(async (key: string, value: unknown) => { + store.set(key, value) + }), + delete: vi.fn(async (key: string) => { + store.delete(key) + }), + _store: store, + } as unknown as DurableObjectStorage & { _store: Map } +} + +// ── Tests ───────────────────────────────────────────────────────────────────── + +describe('initiateDump', () => { + it('creates a dump state and returns a dumpId', async () => { + const sql = makeSql({ + users: { + ddl: 'CREATE TABLE users (id INTEGER, name TEXT)', + rows: [], + }, + }) + const r2 = makeR2Bucket() + const storage = makeStorage() + + const result = await initiateDump(sql, r2 as any, storage as any) + + expect(result.dumpId).toBeTruthy() + expect(r2.createMultipartUpload).toHaveBeenCalledOnce() + const state = storage._store.get(`dump:${result.dumpId}`) as any + expect(state.status).toBe('running') + expect(state.tables).toEqual(['users']) + }) + + it('rejects a second dump when one is already running', async () => { + const sql = makeSql({}) + const r2 = makeR2Bucket() + const storage = makeStorage() + + const { dumpId } = await initiateDump(sql, r2 as any, storage as any) + // The storage now has activeDumpId pointing to a running dump + await expect( + initiateDump(sql, r2 as any, storage as any) + ).rejects.toThrow(/already in progress/) + }) +}) + +describe('processDumpChunk + getDumpStatus', () => { + it('processes a small database in a single chunk and marks it complete', async () => { + const sql = makeSql({ + users: { + ddl: 'CREATE TABLE users (id INTEGER, name TEXT)', + rows: [ + { id: 1, name: 'Alice' }, + { id: 2, name: 'Bob' }, + ], + }, + }) + const r2 = makeR2Bucket() + const storage = makeStorage() + + const { dumpId } = await initiateDump(sql, r2 as any, storage as any) + const isDone = await processDumpChunk( + sql, + r2 as any, + storage as any, + dumpId + ) + + expect(isDone).toBe(true) + + const status = await getDumpStatus(storage as any, dumpId) + expect(status?.status).toBe('complete') + expect(status?.downloadPath).toBe(`/export/dump/${dumpId}/download`) + }) + + it('produces valid SQL INSERT statements in the dump', async () => { + const sql = makeSql({ + products: { + ddl: 'CREATE TABLE products (id INTEGER, title TEXT, price REAL)', + rows: [ + { id: 1, title: "O'Brien's Ale", price: 4.99 }, + { id: 2, title: null, price: 0 }, + ], + }, + }) + const r2 = makeR2Bucket() as any + const storage = makeStorage() + + const { dumpId } = await initiateDump(sql, r2, storage as any) + await processDumpChunk(sql, r2, storage as any, dumpId) + + const state = storage._store.get(`dump:${dumpId}`) as any + // Retrieve the upload key + const key = state.uploadKey + // The completed object should contain INSERT statements + const content = r2._completedObjects[key] + expect(content).toContain('CREATE TABLE products') + expect(content).toContain('INSERT INTO "products"') + expect(content).toContain("'O''Brien''s Ale'") // escaped single quote + expect(content).toContain('NULL') // null value + }) + + it('returns null status for an unknown dumpId', async () => { + const storage = makeStorage() + const status = await getDumpStatus(storage as any, 'nonexistent-id') + expect(status).toBeNull() + }) + + it('handles an empty database gracefully', async () => { + const sql = makeSql({}) + const r2 = makeR2Bucket() + const storage = makeStorage() + + const { dumpId } = await initiateDump(sql, r2 as any, storage as any) + const isDone = await processDumpChunk( + sql, + r2 as any, + storage as any, + dumpId + ) + + expect(isDone).toBe(true) + const status = await getDumpStatus(storage as any, dumpId) + expect(status?.status).toBe('complete') + }) + + it('calls callback URL on completion', async () => { + const fetchMock = vi + .spyOn(globalThis, 'fetch') + .mockResolvedValueOnce(new Response('ok', { status: 200 })) + + const sql = makeSql({ + t: { ddl: 'CREATE TABLE t (id INTEGER)', rows: [{ id: 1 }] }, + }) + const r2 = makeR2Bucket() + const storage = makeStorage() + + const { dumpId } = await initiateDump( + sql, + r2 as any, + storage as any, + 'https://my-app.example.com/dump-done' + ) + await processDumpChunk(sql, r2 as any, storage as any, dumpId) + + expect(fetchMock).toHaveBeenCalledWith( + 'https://my-app.example.com/dump-done', + expect.objectContaining({ method: 'POST' }) + ) + + fetchMock.mockRestore() + }) +}) diff --git a/src/export/dump-async.ts b/src/export/dump-async.ts new file mode 100644 index 0000000..351ba29 --- /dev/null +++ b/src/export/dump-async.ts @@ -0,0 +1,369 @@ +import { DataSource } from '../types' +import { StarbaseDBConfiguration } from '../handler' +import { createResponse } from '../utils' + +const ROWS_PER_BATCH = 1_000 +/** R2 requires each multipart part (except the last) to be at least 5 MiB. */ +const MIN_MULTIPART_PART_SIZE = 5 * 1024 * 1024 +/** Process for at most ~20 s per alarm cycle to stay well within the 30 s limit. */ +const MAX_ALARM_DURATION_MS = 20_000 +/** DO storage max value size is 128 KiB; use 64 KiB chunks for the pending buffer. */ +const STORAGE_CHUNK_SIZE = 64 * 1024 + +// ─── Types ─────────────────────────────────────────────────────────────────── + +export interface DumpState { + dumpId: string + status: 'running' | 'complete' | 'failed' + tables: string[] + /** Index of the table currently being processed. */ + currentTableIndex: number + /** Row OFFSET into the current table that has been READ (not necessarily flushed to R2). */ + currentOffset: number + /** Row OFFSET into the current table that has been COMMITTED to R2. */ + committedOffset: number + /** Table index that has been fully committed to R2. */ + committedTableIndex: number + uploadKey: string + uploadId: string + parts: Array<{ partNumber: number; etag: string }> + nextPartNumber: number + /** Number of 64 KiB chunks used to store the pending buffer. */ + pendingBufferChunks: number + callbackUrl?: string + startedAt: number + completedAt?: number + error?: string +} + +export interface DumpStatus { + dumpId: string + status: DumpState['status'] + progress?: { + processedTables: number + totalTables: number + currentTable?: string + } + downloadPath?: string + error?: string +} + +// ─── Buffer storage helpers ─────────────────────────────────────────────────── + +async function savePendingBuffer( + storage: DurableObjectStorage, + dumpId: string, + buffer: string +): Promise { + if (buffer.length === 0) { + await storage.delete(`dump:${dumpId}:buf:count`) + return 0 + } + const numChunks = Math.ceil(buffer.length / STORAGE_CHUNK_SIZE) + const puts: Promise[] = [] + for (let i = 0; i < numChunks; i++) { + const chunk = buffer.slice( + i * STORAGE_CHUNK_SIZE, + (i + 1) * STORAGE_CHUNK_SIZE + ) + puts.push(storage.put(`dump:${dumpId}:buf:${i}`, chunk)) + } + puts.push(storage.put(`dump:${dumpId}:buf:count`, numChunks)) + await Promise.all(puts) + return numChunks +} + +async function loadPendingBuffer( + storage: DurableObjectStorage, + dumpId: string, + numChunks: number +): Promise { + if (numChunks === 0) return '' + const chunks = await Promise.all( + Array.from({ length: numChunks }, (_, i) => + storage.get(`dump:${dumpId}:buf:${i}`) + ) + ) + return chunks.filter((c): c is string => c !== null).join('') +} + +// ─── Public API ─────────────────────────────────────────────────────────────── + +/** + * Initiates an asynchronous database dump. + * The DO alarm is NOT set here — the caller must schedule one after this returns. + */ +export async function initiateDump( + sql: SqlStorage, + r2Bucket: R2Bucket, + storage: DurableObjectStorage, + callbackUrl?: string +): Promise<{ dumpId: string }> { + // Reject concurrent dumps + const existingId = await storage.get('activeDumpId') + if (existingId) { + const existing = await storage.get(`dump:${existingId}`) + if (existing?.status === 'running') { + throw new Error( + `A dump is already in progress (id: ${existingId}). ` + + `Check GET /export/dump/${existingId} for status.` + ) + } + } + + // List all user tables (exclude internal tmp_ tables) + const tablesCursor = sql.exec<{ name: string }>( + "SELECT name FROM sqlite_master WHERE type='table' AND name NOT LIKE 'tmp_%' ORDER BY name;" + ) + const tables = tablesCursor.toArray().map((r) => r.name) + + const dumpId = crypto.randomUUID() + const timestamp = new Date() + .toISOString() + .replace(/[:.]/g, '-') + .slice(0, 19) + const uploadKey = `dump_${timestamp}_${dumpId.slice(0, 8)}.sql` + + const mpu = await r2Bucket.createMultipartUpload(uploadKey) + + const header = + `-- StarbaseDB database dump\n` + + `-- Generated: ${new Date().toISOString()}\n\n` + + const numChunks = await savePendingBuffer(storage, dumpId, header) + + const state: DumpState = { + dumpId, + status: 'running', + tables, + currentTableIndex: 0, + currentOffset: 0, + committedOffset: 0, + committedTableIndex: 0, + uploadKey, + uploadId: mpu.uploadId, + parts: [], + nextPartNumber: 1, + pendingBufferChunks: numChunks, + callbackUrl, + startedAt: Date.now(), + } + + await storage.put(`dump:${dumpId}`, state) + await storage.put('activeDumpId', dumpId) + return { dumpId } +} + +/** + * Processes one chunk of dump work in the current alarm invocation. + * + * @returns `true` when the dump is fully complete, `false` if more work remains. + */ +export async function processDumpChunk( + sql: SqlStorage, + r2Bucket: R2Bucket, + storage: DurableObjectStorage, + dumpId: string +): Promise { + const state = await storage.get(`dump:${dumpId}`) + if (!state || state.status !== 'running') return true + + const alarmStart = Date.now() + const mpu = r2Bucket.resumeMultipartUpload(state.uploadKey, state.uploadId) + + // Restore mutable state from storage + let { + currentTableIndex, + currentOffset, + committedOffset, + committedTableIndex, + parts, + nextPartNumber, + } = state + let pendingBuffer = await loadPendingBuffer( + storage, + dumpId, + state.pendingBufferChunks + ) + + const flushPart = async () => { + const part = await mpu.uploadPart(nextPartNumber, pendingBuffer) + parts = [...parts, { partNumber: nextPartNumber, etag: part.etag }] + nextPartNumber++ + committedTableIndex = currentTableIndex + committedOffset = currentOffset + pendingBuffer = '' + } + + try { + while (currentTableIndex < state.tables.length) { + const table = state.tables[currentTableIndex] + + // Emit DDL on the first row of each table + if (currentOffset === 0) { + const ddlCursor = sql.exec<{ sql: string | null }>( + `SELECT sql FROM sqlite_master WHERE type='table' AND name=?;`, + table + ) + const ddlRows = ddlCursor.toArray() + if (ddlRows.length && ddlRows[0].sql) { + pendingBuffer += `-- Table: ${table}\n${ddlRows[0].sql};\n` + } + } + + // Read rows in batches + const dataCursor = sql.exec>( + `SELECT * FROM "${table}" LIMIT ? OFFSET ?;`, + ROWS_PER_BATCH, + currentOffset + ) + const rows = dataCursor.toArray() + + for (const row of rows) { + const values = Object.values(row) + .map((v) => { + if (v === null || v === undefined) return 'NULL' + if (typeof v === 'number' || typeof v === 'bigint') + return String(v) + if (typeof v === 'string') + return `'${v.replace(/'/g, "''")}'` + if (v instanceof ArrayBuffer) { + const hex = Array.from(new Uint8Array(v)) + .map((b) => b.toString(16).padStart(2, '0')) + .join('') + return `X'${hex}'` + } + return `'${String(v).replace(/'/g, "''")}'` + }) + .join(', ') + pendingBuffer += `INSERT INTO "${table}" VALUES (${values});\n` + } + + if (rows.length < ROWS_PER_BATCH) { + // Table complete + pendingBuffer += '\n' + currentTableIndex++ + currentOffset = 0 + } else { + currentOffset += ROWS_PER_BATCH + } + + // Flush to R2 when buffer is large enough for a multipart part + if (pendingBuffer.length >= MIN_MULTIPART_PART_SIZE) { + await flushPart() + } + + // Yield if running low on alarm time + if (Date.now() - alarmStart >= MAX_ALARM_DURATION_MS) { + const numChunks = await savePendingBuffer( + storage, + dumpId, + pendingBuffer + ) + await storage.put(`dump:${dumpId}`, { + ...state, + currentTableIndex, + currentOffset, + committedOffset, + committedTableIndex, + parts, + nextPartNumber, + pendingBufferChunks: numChunks, + }) + return false + } + } + + // All tables processed — flush the remaining buffer as the final part. + // initiateDump always writes a header to pendingBuffer, so parts will + // have at least one entry after this flush. + if (pendingBuffer.length > 0) { + await flushPart() + } + + await mpu.complete(parts) + + // Clean up buffer chunks from storage + const deleteOps = Array.from( + { length: state.pendingBufferChunks }, + (_, i) => storage.delete(`dump:${dumpId}:buf:${i}`) + ) + await Promise.all([ + ...deleteOps, + storage.delete(`dump:${dumpId}:buf:count`), + ]) + + const completedState: DumpState = { + ...state, + status: 'complete', + currentTableIndex: state.tables.length, + parts, + pendingBufferChunks: 0, + completedAt: Date.now(), + } + await storage.put(`dump:${dumpId}`, completedState) + await storage.delete('activeDumpId') + + if (state.callbackUrl) { + try { + await fetch(state.callbackUrl, { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ + event: 'dump_complete', + dumpId, + downloadPath: `/export/dump/${dumpId}/download`, + completedAt: new Date( + completedState.completedAt! + ).toISOString(), + }), + }) + } catch { + // Non-fatal: a callback failure does not invalidate the dump + } + } + + return true + } catch (error: any) { + try { + await mpu.abort() + } catch {} + await storage.put(`dump:${dumpId}`, { + ...state, + status: 'failed', + error: error?.message ?? 'Unknown error during dump processing', + completedAt: Date.now(), + }) + await storage.delete('activeDumpId') + return true + } +} + +/** + * Returns the current status of a dump job, or `null` if the dumpId is unknown. + */ +export async function getDumpStatus( + storage: DurableObjectStorage, + dumpId: string +): Promise { + const state = await storage.get(`dump:${dumpId}`) + if (!state) return null + + return { + dumpId: state.dumpId, + status: state.status, + progress: { + processedTables: state.currentTableIndex, + totalTables: state.tables.length, + currentTable: + state.currentTableIndex < state.tables.length + ? state.tables[state.currentTableIndex] + : undefined, + }, + downloadPath: + state.status === 'complete' + ? `/export/dump/${dumpId}/download` + : undefined, + error: state.error, + } +} diff --git a/src/handler.ts b/src/handler.ts index 3fa0085..8be2171 100644 --- a/src/handler.ts +++ b/src/handler.ts @@ -124,6 +124,101 @@ export class StarbaseDB { return dumpDatabaseRoute(this.dataSource, this.config) }) + // Async dump for large databases (requires DATABASE_DUMPS R2 binding) + this.app.post('/export/dump', this.isInternalSource, async (c) => { + const body = await c.req.json().catch(() => ({})) + const callbackUrl: string | undefined = + typeof body?.callbackUrl === 'string' + ? body.callbackUrl + : undefined + + try { + const result = await this.dataSource.rpc.startAsyncDump({ + callbackUrl, + }) + const statusUrl = `/export/dump/${result.dumpId}` + return createResponse( + { + dumpId: result.dumpId, + status: 'started', + statusUrl, + message: + 'Database dump initiated. Poll the statusUrl for progress.', + }, + undefined, + 202 + ) + } catch (error: any) { + return createResponse( + undefined, + error?.message ?? 'Failed to start database dump', + error?.message?.includes('not configured') ? 501 : 500 + ) + } + }) + + // Status + completion check for async dumps + this.app.get( + '/export/dump/:dumpId', + this.isInternalSource, + async (c) => { + const { dumpId } = c.req.param() + const status = + await this.dataSource.rpc.getAsyncDumpStatus(dumpId) + + if (!status) { + return createResponse( + undefined, + `Dump '${dumpId}' not found`, + 404 + ) + } + + return createResponse( + status, + undefined, + status.status === 'failed' ? 500 : 200 + ) + } + ) + + // Download a completed dump + this.app.get( + '/export/dump/:dumpId/download', + this.isInternalSource, + async (c) => { + const { dumpId } = c.req.param() + const download = + await this.dataSource.rpc.streamDumpDownload(dumpId) + + if (!download) { + const status = + await this.dataSource.rpc.getAsyncDumpStatus(dumpId) + if (!status) { + return createResponse( + undefined, + `Dump '${dumpId}' not found`, + 404 + ) + } + return createResponse( + undefined, + status.status === 'running' + ? 'Dump is still in progress. Try again later.' + : `Dump is not available for download (status: ${status.status})`, + 409 + ) + } + + return new Response(download.body, { + headers: { + 'Content-Type': 'application/x-sqlite3', + 'Content-Disposition': `attachment; filename="${download.key}"`, + }, + }) + } + ) + this.app.get( '/export/json/:tableName', this.isInternalSource, diff --git a/src/index.ts b/src/index.ts index 4d08932..8f9a737 100644 --- a/src/index.ts +++ b/src/index.ts @@ -56,6 +56,10 @@ export interface Env { HYPERDRIVE: Hyperdrive + // Optional R2 bucket for async database dumps (required for large databases) + // Add to wrangler.toml: [[r2_buckets]] binding = "DATABASE_DUMPS" bucket_name = "your-bucket" + DATABASE_DUMPS?: R2Bucket + // ## DO NOT REMOVE: TEMPLATE INTERFACE ## } diff --git a/wrangler.toml b/wrangler.toml index 395c4ac..a0ae38b 100644 --- a/wrangler.toml +++ b/wrangler.toml @@ -27,6 +27,12 @@ enabled = true name = "DATABASE_DURABLE_OBJECT" class_name = "StarbaseDBDurableObject" +# Optional: R2 bucket for large database dumps (required for POST /export/dump) +# Uncomment and configure to enable async dump support for databases > 30 seconds to export. +# [[r2_buckets]] +# binding = "DATABASE_DUMPS" +# bucket_name = "your-database-dumps-bucket" + # Durable Object migrations. # Docs: https://developers.cloudflare.com/workers/wrangler/configuration/#migrations [[migrations]] @@ -48,7 +54,7 @@ REGION = "auto" # STUDIO_USER = "admin" # STUDIO_PASS = "123456" -# Toggle to enable default features +# Toggle to enable default features ENABLE_ALLOWLIST = 0 ENABLE_RLS = 0