From 8b35a4b803b71d434d37ed5686ca2d74f700f2f7 Mon Sep 17 00:00:00 2001 From: Taylor McKinnon Date: Thu, 12 Mar 2026 13:32:26 +0100 Subject: [PATCH 01/16] impr(CLDSRV-853): Refactor rate limit configuration to use joi schema --- lib/Config.js | 3 +- lib/api/apiUtils/rateLimit/config.js | 296 ++++++++------------ tests/unit/api/apiUtils/rateLimit/config.js | 18 ++ 3 files changed, 133 insertions(+), 184 deletions(-) diff --git a/lib/Config.js b/lib/Config.js index 9177b0a6a7..213003a4ff 100644 --- a/lib/Config.js +++ b/lib/Config.js @@ -1872,8 +1872,7 @@ class Config extends EventEmitter { if (config.rateLimiting?.enabled) { // rate limiting uses the same localCache config defined for S3 to avoid // config duplication. - assert(this.localCache, 'missing required property of rateLimit ' + - 'configuration: localCache'); + assert(this.localCache, 'localCache must be defined when rate limiting is enabled'); // Parse and validate all rate limiting configuration this.rateLimiting = parseRateLimitConfig(config.rateLimiting, this.clusters = this.clusters || 1); diff --git a/lib/api/apiUtils/rateLimit/config.js b/lib/api/apiUtils/rateLimit/config.js index 7a4576c45d..760da245d8 100644 --- a/lib/api/apiUtils/rateLimit/config.js +++ b/lib/api/apiUtils/rateLimit/config.js @@ -1,4 +1,4 @@ -const assert = require('assert'); +const Joi = require('@hapi/joi'); const { errors, ArsenalError } = require('arsenal'); const { rateLimitDefaultConfigCacheTTL, rateLimitDefaultBurstCapacity } = require('../../../../constants'); @@ -126,204 +126,136 @@ const { calculateInterval } = require('./gcra'); */ /** - * Parse and validate the complete rate limiting configuration - * - * @param {Object} rateLimitingConfig - config.rateLimiting object from config.json - * @param {number} clusters - Number of worker clusters (must be numeric) - * @returns {Object} Fully parsed and validated rate limiting configuration - * @throws {Error} If configuration is invalid - */ -function parseRateLimitConfig(rateLimitingConfig, clusters) { - const parsed = { - enabled: true, - }; + * RateLimitClassConfig + * @typedef {Object} RateLimitClassConfig + * @property {object} defaultConfig - Default config applied if no resource specific configuration is found. + * @property {number} configCacheTTL - Number of milliseconds to cache per resource configs + * @property {number} defaultBurstCapacity - Default used if resource does not specify a burst capacity. +*/ - // Validate and set serviceUserArn - assert.strictEqual( - typeof rateLimitingConfig.serviceUserArn, 'string', - 'rateLimiting.serviceUserArn must be a string' - ); - parsed.serviceUserArn = rateLimitingConfig.serviceUserArn; +const rateLimitClassConfigSchema = Joi.object({ + defaultConfig: Joi.object({ + requestsPerSecond: Joi.object({ + limit: Joi.number().integer().min(0).required(), + burstCapacity: Joi.number().positive(), + }), + }), + configCacheTTL: Joi.number().integer().positive().default(rateLimitDefaultConfigCacheTTL), + defaultBurstCapacity: Joi.number().positive().default(rateLimitDefaultBurstCapacity), +}).default({ + defaultConfig: undefined, + configCacheTTL: rateLimitDefaultConfigCacheTTL, + defaultBurstCapacity: rateLimitDefaultBurstCapacity, +}); - // Parse and validate node count - if (rateLimitingConfig.nodes !== undefined) { - assert( - typeof rateLimitingConfig.nodes === 'number' && - Number.isInteger(rateLimitingConfig.nodes) && - rateLimitingConfig.nodes > 0, - 'rateLimiting.nodes must be a positive integer' - ); - parsed.nodes = rateLimitingConfig.nodes; - } else { - parsed.nodes = 1; // Default to 1 node - } +const rateLimitConfigSchema = Joi.object({ + enabled: Joi.boolean().default(false), + serviceUserArn: Joi.string().required(), + nodes: Joi.number().integer().positive().default(1), + tokenBucketBufferSize: Joi.number().integer().positive().default(50), + tokenBucketRefillThreshold: Joi.number().integer().positive().default(20), + bucket: rateLimitClassConfigSchema, + error: Joi.object({ + statusCode: Joi.number().integer().min(400).max(599).default(503), + code: Joi.string().default(errors.SlowDown.message), + message: Joi.string().default(errors.SlowDown.description), + }).default({ + statusCode: 503, + code: errors.SlowDown.message, + message: errors.SlowDown.description, + }), +}); - if (rateLimitingConfig.tokenBucketBufferSize !== undefined) { - assert( - typeof rateLimitingConfig.tokenBucketBufferSize === 'number' && - Number.isInteger(rateLimitingConfig.tokenBucketBufferSize) && - rateLimitingConfig.tokenBucketBufferSize > 0, - 'rateLimiting.tokenBucketBufferSize must be a positive integer' - ); - parsed.tokenBucketBufferSize = rateLimitingConfig.tokenBucketBufferSize; - } else { - parsed.tokenBucketBufferSize = 50; // (5 seconds @ 10 req/s) - } - - if (rateLimitingConfig.tokenBucketRefillThreshold !== undefined) { - assert( - typeof rateLimitingConfig.tokenBucketRefillThreshold === 'number' && - Number.isInteger(rateLimitingConfig.tokenBucketRefillThreshold) && - rateLimitingConfig.tokenBucketRefillThreshold > 0, - 'rateLimiting.tokenBucketRefillThreshold must be a positive integer' - ); - parsed.tokenBucketRefillThreshold = rateLimitingConfig.tokenBucketRefillThreshold; - } else { - parsed.tokenBucketRefillThreshold = 20; - } - - // Parse bucket configuration - // Always initialize bucket config to support per-bucket rate limits set via API - // This ensures caching and default values work even when no global default is configured - parsed.bucket = { - defaultConfig: undefined, // No global default unless specified - configCacheTTL: rateLimitDefaultConfigCacheTTL, // Default cache TTL - defaultBurstCapacity: rateLimitDefaultBurstCapacity, // Default burst capacity for per-bucket configs +/** + * Transform validated class config (bucket or account) by calculating intervals + * and applying business logic + * + * @param {string} resourceClass - Rate limit class name ('bucket' or 'account') + * @param {object} validatedCfg - Already validated config from Joi + * @param {number} clusters - Number of worker processes spawned per instance + * @param {number} nodes - Number of instances that requests will be load balanced across + * @returns {RateLimitClassConfig} Transformed rate limit config + */ +function transformClassConfig(resourceClass, validatedCfg, clusters, nodes) { + const transformed = { + defaultConfig: undefined, + configCacheTTL: validatedCfg.configCacheTTL, + defaultBurstCapacity: validatedCfg.defaultBurstCapacity, }; - // Override with user-provided bucket configuration - if (rateLimitingConfig.bucket) { - assert.strictEqual( - typeof rateLimitingConfig.bucket, 'object', - 'rateLimiting.bucket must be an object' - ); + if (validatedCfg.defaultConfig?.requestsPerSecond) { + const { limit, burstCapacity } = validatedCfg.defaultConfig.requestsPerSecond; - // Parse default config for buckets (global default applied to all buckets) - // If defaultConfig is specified: Parse and validate the bucket rate limit settings - if (rateLimitingConfig.bucket.defaultConfig) { - const bucketConfig = rateLimitingConfig.bucket.defaultConfig; - - // Validate config structure - assert.strictEqual( - typeof bucketConfig, 'object', - 'rate limit config must be an object' + // Validate limit against nodes AND workers (business rule) + const minLimit = nodes * clusters; + if (limit > 0 && limit < minLimit) { + throw new Error( + `rateLimiting.${resourceClass}.defaultConfig.` + + `requestsPerSecond.limit (${limit}) must be >= ` + + `(nodes x workers = ${nodes} x ${clusters} = ${minLimit}) ` + + 'or 0 (unlimited). Each worker enforces limit/nodes/workers locally. ' + + `With limit < ${minLimit}, per-worker rate would be < 1 req/s, effectively blocking traffic.` ); + } - const limitConfig = {}; - - if (bucketConfig.requestsPerSecond) { - assert.strictEqual( - typeof bucketConfig.requestsPerSecond, 'object', - 'requestsPerSecond must be an object' - ); - - const { limit } = bucketConfig.requestsPerSecond; - - // Validate limit - assert( - typeof limit === 'number' && Number.isInteger(limit) && limit >= 0, - 'requestsPerSecond.limit must be a non-negative integer' - ); - - // Validate limit against nodes AND workers - const minLimit = parsed.nodes * clusters; - if (limit > 0 && limit < minLimit) { - throw new Error( - `requestsPerSecond.limit (${limit}) must be >= ` + - `(nodes × workers = ${parsed.nodes} × ${clusters} = ${minLimit}) ` + - 'or 0 (unlimited). Each worker enforces limit/nodes/workers locally. ' + - `With limit < ${minLimit}, per-worker rate would be < 1 req/s, effectively blocking traffic.` - ); - } - - // Default to global default burst capacity - let burstCapacity = parsed.bucket.defaultBurstCapacity; + // Use provided burstCapacity or fall back to default + const effectiveBurstCapacity = burstCapacity || transformed.defaultBurstCapacity; - // Override if provided in config - if (bucketConfig.requestsPerSecond.burstCapacity !== undefined) { - burstCapacity = bucketConfig.requestsPerSecond.burstCapacity; - assert( - typeof burstCapacity === 'number' && Number.isInteger(burstCapacity) && burstCapacity > 0, - 'requestsPerSecond.burstCapacity must be a positive integer' - ); - } + // Calculate per-worker interval using distributed architecture + const interval = calculateInterval(limit, nodes, clusters); - // Calculate per-worker interval using distributed architecture - const interval = calculateInterval(limit, parsed.nodes, clusters); + // Store both the original limit and the calculated values + transformed.defaultConfig = { + limit, + requestsPerSecond: { + interval, + bucketSize: effectiveBurstCapacity * 1000, + }, + }; + } - // Store both the original limit and the calculated values - limitConfig.limit = limit; - limitConfig.requestsPerSecond = { - interval, - bucketSize: burstCapacity * 1000, - }; - } + return transformed; +} - parsed.bucket.defaultConfig = limitConfig; +/** + * Parse and validate the complete rate limiting configuration + * + * @param {Object} rateLimitingConfig - config.rateLimiting object from config.json + * @param {number} clusters - Number of worker clusters (must be numeric) + * @returns {Object} Fully parsed and validated rate limiting configuration + * @throws {Error} If configuration is invalid + */ +function parseRateLimitConfig(rateLimitingConfig, clusters) { + // Validate configuration using Joi schema + const { error: validationError, value: validated } = rateLimitConfigSchema.validate( + rateLimitingConfig, + { + abortEarly: false, // Return all validation errors at once + allowUnknown: false, // Don't allow key not present in schema + convert: false, // Don't do type coercion (e.g. "1" -> 1) } + ); - // Parse config cache TTL - // If configCacheTTL is specified: Override the default cache TTL - if (rateLimitingConfig.bucket.configCacheTTL !== undefined) { - const configCacheTTL = rateLimitingConfig.bucket.configCacheTTL; - assert( - typeof configCacheTTL === 'number' && - Number.isInteger(configCacheTTL) && - configCacheTTL > 0, - 'rateLimiting.bucket.configCacheTTL must be a positive integer' - ); - parsed.bucket.configCacheTTL = configCacheTTL; - } + if (validationError) { + const details = validationError.details.map(d => d.message).join('; '); + throw new Error(`rateLimiting configuration is invalid: ${details}`); } - // Parse error configuration (supports any HTTP 4xx/5xx status code) - // Default to SlowDown error - parsed.error = errors.SlowDown; - - // Override with custom error if specified - if (rateLimitingConfig.error !== undefined) { - // Validate error is an object - assert.strictEqual( - typeof rateLimitingConfig.error, 'object', - 'rateLimiting.error must be an object' - ); - - // If statusCode is specified, validate and create custom error - if (rateLimitingConfig.error.statusCode !== undefined) { - assert( - typeof rateLimitingConfig.error.statusCode === 'number' && - Number.isInteger(rateLimitingConfig.error.statusCode) && - rateLimitingConfig.error.statusCode >= 400 && - rateLimitingConfig.error.statusCode < 600, - 'rateLimiting.error.statusCode must be a valid HTTP status code (400-599)' - ); - - // Validate error code if provided - const errorCode = rateLimitingConfig.error.code || 'SlowDown'; - if (rateLimitingConfig.error.code !== undefined) { - assert.strictEqual( - typeof rateLimitingConfig.error.code, 'string', - 'rateLimiting.error.code must be a string' - ); - } - - // Validate error message if provided - const errorMessage = rateLimitingConfig.error.message || errors.SlowDown.description; - if (rateLimitingConfig.error.message !== undefined) { - assert.strictEqual( - typeof rateLimitingConfig.error.message, 'string', - 'rateLimiting.error.message must be a string' - ); - } + // Initialize parsed config with validated values (including defaults) + const parsed = { + enabled: validated.enabled, + serviceUserArn: validated.serviceUserArn, + nodes: validated.nodes, + tokenBucketBufferSize: validated.tokenBucketBufferSize, + tokenBucketRefillThreshold: validated.tokenBucketRefillThreshold, + error: new ArsenalError( + validated.error.code, + validated.error.statusCode, + validated.error.message, + ), + }; - // Override default with custom Arsenal error - parsed.error = new ArsenalError( - errorCode, - rateLimitingConfig.error.statusCode, - errorMessage - ); - } - } + parsed.bucket = transformClassConfig('bucket', validated.bucket, clusters, parsed.nodes); return parsed; } diff --git a/tests/unit/api/apiUtils/rateLimit/config.js b/tests/unit/api/apiUtils/rateLimit/config.js index 8e2ed7493a..ae7d532eae 100644 --- a/tests/unit/api/apiUtils/rateLimit/config.js +++ b/tests/unit/api/apiUtils/rateLimit/config.js @@ -510,6 +510,24 @@ describe('parseRateLimitConfig', () => { /requestsPerSecond.burstCapacity must be a positive integer/ ); }); + + it('should accept float burstCapacity', () => { + const config = { + serviceUserArn: 'arn:aws:iam::123456789012:user/rate-limit-service', + bucket: { + defaultConfig: { + requestsPerSecond: { + limit: 100, + burstCapacity: 1.5, + }, + }, + }, + }; + + const result = parseRateLimitConfig(config, 1); + const bucketSize = result.bucket.defaultConfig.requestsPerSecond.bucketSize; + assert.strictEqual(bucketSize, 1.5 * 1000); + }); }); describe('bucket.configCacheTTL validation', () => { From fbbd2fc085d442adc173f05d533868d9a203aaab Mon Sep 17 00:00:00 2001 From: Taylor McKinnon Date: Thu, 12 Mar 2026 13:32:54 +0100 Subject: [PATCH 02/16] impr(CLDSRV-853): Add account level limit config --- lib/api/apiUtils/rateLimit/config.js | 30 +++++++++++++++++++++++++++- 1 file changed, 29 insertions(+), 1 deletion(-) diff --git a/lib/api/apiUtils/rateLimit/config.js b/lib/api/apiUtils/rateLimit/config.js index 760da245d8..6b7c3969e7 100644 --- a/lib/api/apiUtils/rateLimit/config.js +++ b/lib/api/apiUtils/rateLimit/config.js @@ -25,7 +25,7 @@ const { calculateInterval } = require('./gcra'); * // Optional: Bucket-level rate limiting configuration * "bucket": { * // Optional: Global default rate limit for all buckets - * // Can be overridden per-bucket via PutBucketRateLimit API + * // Limit can be overridden per-bucket via PutBucketRateLimit API * "defaultConfig": { * "requestsPerSecond": { * // Required: Requests per second limit (0 = unlimited) @@ -48,6 +48,32 @@ const { calculateInterval } = require('./gcra'); * "defaultBurstCapacity": 1 * }, * + * // Optional: Account-level rate limiting configuration + * "account": { + * // Optional: Global default rate limit for all accounts + * // Limit can be overridden per-account via UpdateAccountLimits API + * "defaultConfig": { + * "requestsPerSecond": { + * // Required: Requests per second limit (0 = unlimited) + * "limit": 100, + * + * // Optional: Burst capacity (allows temporary spikes) + * // Default: 1 (no burst allowed) + * // Higher values allow more requests in quick succession + * "burstCapacity": 2 + * } + * }, + * + * // Optional: How long to cache rate limit configs (milliseconds) + * // Default: 30000 (30 seconds) + * // Higher values = better performance, slower config updates + * "configCacheTTL": 30000, + * + * // Optional: Default burst capacity for per-account configs + * // Default: 1 + * "defaultBurstCapacity": 1 + * }, + * * // Optional: Custom error response for rate limited requests * "error": { * // Optional: HTTP status code (400-599) @@ -155,6 +181,7 @@ const rateLimitConfigSchema = Joi.object({ tokenBucketBufferSize: Joi.number().integer().positive().default(50), tokenBucketRefillThreshold: Joi.number().integer().positive().default(20), bucket: rateLimitClassConfigSchema, + account: rateLimitClassConfigSchema, error: Joi.object({ statusCode: Joi.number().integer().min(400).max(599).default(503), code: Joi.string().default(errors.SlowDown.message), @@ -256,6 +283,7 @@ function parseRateLimitConfig(rateLimitingConfig, clusters) { }; parsed.bucket = transformClassConfig('bucket', validated.bucket, clusters, parsed.nodes); + parsed.account = transformClassConfig('account', validated.account, clusters, parsed.nodes); return parsed; } From e62e716a57edc957029218999deb6cea1657ae90 Mon Sep 17 00:00:00 2001 From: Taylor McKinnon Date: Mon, 9 Mar 2026 14:18:05 +0100 Subject: [PATCH 03/16] impr(CLDSRV-853): Extend unit tests for rate limit config parsing --- tests/unit/api/apiUtils/rateLimit/config.js | 438 +++++++++++++++++--- 1 file changed, 385 insertions(+), 53 deletions(-) diff --git a/tests/unit/api/apiUtils/rateLimit/config.js b/tests/unit/api/apiUtils/rateLimit/config.js index ae7d532eae..8d6d5ae95b 100644 --- a/tests/unit/api/apiUtils/rateLimit/config.js +++ b/tests/unit/api/apiUtils/rateLimit/config.js @@ -28,7 +28,7 @@ describe('parseRateLimitConfig', () => { it('should parse complete valid configuration', () => { const result = parseRateLimitConfig(validConfig, 10); - assert.strictEqual(result.enabled, true); + assert.strictEqual(result.enabled, false); // Default when not specified assert.strictEqual(result.serviceUserArn, validConfig.serviceUserArn); assert.strictEqual(result.nodes, 2); assert.strictEqual(result.bucket.configCacheTTL, 300); @@ -46,9 +46,11 @@ describe('parseRateLimitConfig', () => { const result = parseRateLimitConfig(minimalConfig, 5); - assert.strictEqual(result.enabled, true); + assert.strictEqual(result.enabled, false); // Default assert.strictEqual(result.serviceUserArn, minimalConfig.serviceUserArn); assert.strictEqual(result.nodes, 1); // Default + assert.strictEqual(result.tokenBucketBufferSize, 50); // Default + assert.strictEqual(result.tokenBucketRefillThreshold, 20); // Default // Bucket config is always initialized for per-bucket rate limiting via API assert(result.bucket); assert.strictEqual(result.bucket.defaultConfig, undefined); // No global default @@ -96,13 +98,56 @@ describe('parseRateLimitConfig', () => { }); }); + describe('enabled field validation', () => { + it('should default enabled to false when not specified', () => { + const config = { + serviceUserArn: 'arn:aws:iam::123456789012:user/rate-limit-service', + }; + + const result = parseRateLimitConfig(config, 1); + assert.strictEqual(result.enabled, false); + }); + + it('should accept enabled: true', () => { + const config = { + enabled: true, + serviceUserArn: 'arn:aws:iam::123456789012:user/rate-limit-service', + }; + + const result = parseRateLimitConfig(config, 1); + assert.strictEqual(result.enabled, true); + }); + + it('should accept enabled: false', () => { + const config = { + enabled: false, + serviceUserArn: 'arn:aws:iam::123456789012:user/rate-limit-service', + }; + + const result = parseRateLimitConfig(config, 1); + assert.strictEqual(result.enabled, false); + }); + + it('should throw if enabled is not a boolean', () => { + const config = { + enabled: 'yes', + serviceUserArn: 'arn:aws:iam::123456789012:user/rate-limit-service', + }; + + assert.throws( + () => parseRateLimitConfig(config, 1), + /rateLimiting configuration is invalid/ + ); + }); + }); + describe('serviceUserArn validation', () => { it('should throw if serviceUserArn is missing', () => { const config = { nodes: 1 }; assert.throws( () => parseRateLimitConfig(config, 1), - /rateLimiting.serviceUserArn must be a string/ + /rateLimiting configuration is invalid.*"serviceUserArn" is required/ ); }); @@ -113,7 +158,7 @@ describe('parseRateLimitConfig', () => { assert.throws( () => parseRateLimitConfig(config, 1), - /rateLimiting.serviceUserArn must be a string/ + /rateLimiting configuration is invalid.*"serviceUserArn" must be a string/ ); }); }); @@ -146,7 +191,7 @@ describe('parseRateLimitConfig', () => { assert.throws( () => parseRateLimitConfig(config, 1), - /rateLimiting.nodes must be a positive integer/ + /rateLimiting configuration is invalid.*"nodes" must be a positive number/ ); }); @@ -158,7 +203,7 @@ describe('parseRateLimitConfig', () => { assert.throws( () => parseRateLimitConfig(config, 1), - /rateLimiting.nodes must be a positive integer/ + /rateLimiting configuration is invalid.*"nodes" must be a positive number/ ); }); @@ -170,7 +215,7 @@ describe('parseRateLimitConfig', () => { assert.throws( () => parseRateLimitConfig(config, 1), - /rateLimiting.nodes must be a positive integer/ + /rateLimiting configuration is invalid.*"nodes" must be an integer/ ); }); @@ -182,7 +227,7 @@ describe('parseRateLimitConfig', () => { assert.throws( () => parseRateLimitConfig(config, 1), - /rateLimiting.nodes must be a positive integer/ + /rateLimiting configuration is invalid.*"nodes" must be a number/ ); }); }); @@ -215,7 +260,7 @@ describe('parseRateLimitConfig', () => { assert.throws( () => parseRateLimitConfig(config, 1), - /rateLimiting.tokenBucketBufferSize must be a positive integer/ + /rateLimiting configuration is invalid.*"tokenBucketBufferSize" must be a positive number/ ); }); @@ -227,7 +272,7 @@ describe('parseRateLimitConfig', () => { assert.throws( () => parseRateLimitConfig(config, 1), - /rateLimiting.tokenBucketBufferSize must be a positive integer/ + /rateLimiting configuration is invalid.*"tokenBucketBufferSize" must be a positive number/ ); }); @@ -239,7 +284,7 @@ describe('parseRateLimitConfig', () => { assert.throws( () => parseRateLimitConfig(config, 1), - /rateLimiting.tokenBucketBufferSize must be a positive integer/ + /rateLimiting configuration is invalid.*"tokenBucketBufferSize" must be an integer/ ); }); }); @@ -272,7 +317,7 @@ describe('parseRateLimitConfig', () => { assert.throws( () => parseRateLimitConfig(config, 1), - /rateLimiting.tokenBucketRefillThreshold must be a positive integer/ + /rateLimiting configuration is invalid.*"tokenBucketRefillThreshold" must be a positive number/ ); }); @@ -284,7 +329,7 @@ describe('parseRateLimitConfig', () => { assert.throws( () => parseRateLimitConfig(config, 1), - /rateLimiting.tokenBucketRefillThreshold must be a positive integer/ + /rateLimiting configuration is invalid.*"tokenBucketRefillThreshold" must be a positive number/ ); }); @@ -296,7 +341,7 @@ describe('parseRateLimitConfig', () => { assert.throws( () => parseRateLimitConfig(config, 1), - /rateLimiting.tokenBucketRefillThreshold must be a positive integer/ + /rateLimiting configuration is invalid.*"tokenBucketRefillThreshold" must be an integer/ ); }); }); @@ -310,7 +355,7 @@ describe('parseRateLimitConfig', () => { assert.throws( () => parseRateLimitConfig(config, 1), - /rateLimiting.bucket must be an object/ + /rateLimiting configuration is invalid.*"bucket" must be of type object/ ); }); @@ -342,7 +387,7 @@ describe('parseRateLimitConfig', () => { assert.throws( () => parseRateLimitConfig(config, 1), - /rate limit config must be an object/ + /rateLimiting configuration is invalid.*"bucket.defaultConfig" must be of type object/ ); }); @@ -358,7 +403,7 @@ describe('parseRateLimitConfig', () => { assert.throws( () => parseRateLimitConfig(config, 1), - /requestsPerSecond must be an object/ + /rateLimiting configuration is invalid.*"bucket.defaultConfig.requestsPerSecond" must be of type object/ ); }); @@ -393,7 +438,26 @@ describe('parseRateLimitConfig', () => { assert.throws( () => parseRateLimitConfig(config, 1), - /requestsPerSecond.limit must be a non-negative integer/ + // eslint-disable-next-line max-len + /rateLimiting configuration is invalid.*"bucket.defaultConfig.requestsPerSecond.limit" must be larger than or equal to 0/ + ); + }); + + it('should throw if limit is missing when requestsPerSecond is provided', () => { + const config = { + serviceUserArn: 'arn:aws:iam::123456789012:user/rate-limit-service', + bucket: { + defaultConfig: { + requestsPerSecond: { + burstCapacity: 10, + }, + }, + }, + }; + + assert.throws( + () => parseRateLimitConfig(config, 1), + /rateLimiting configuration is invalid.*"bucket.defaultConfig.requestsPerSecond.limit" is required/ ); }); }); @@ -450,7 +514,8 @@ describe('parseRateLimitConfig', () => { assert.throws( () => parseRateLimitConfig(config, 1), - /requestsPerSecond.burstCapacity must be a positive integer/ + // eslint-disable-next-line max-len + /rateLimiting configuration is invalid.*"bucket.defaultConfig.requestsPerSecond.burstCapacity" must be a positive number/ ); }); @@ -469,26 +534,8 @@ describe('parseRateLimitConfig', () => { assert.throws( () => parseRateLimitConfig(config, 1), - /requestsPerSecond.burstCapacity must be a positive integer/ - ); - }); - - it('should throw if burstCapacity is not an integer', () => { - const config = { - serviceUserArn: 'arn:aws:iam::123456789012:user/rate-limit-service', - bucket: { - defaultConfig: { - requestsPerSecond: { - limit: 100, - burstCapacity: 5.5, - }, - }, - }, - }; - - assert.throws( - () => parseRateLimitConfig(config, 1), - /requestsPerSecond.burstCapacity must be a positive integer/ + // eslint-disable-next-line max-len + /rateLimiting configuration is invalid.*"bucket.defaultConfig.requestsPerSecond.burstCapacity" must be a positive number/ ); }); @@ -507,7 +554,8 @@ describe('parseRateLimitConfig', () => { assert.throws( () => parseRateLimitConfig(config, 1), - /requestsPerSecond.burstCapacity must be a positive integer/ + // eslint-disable-next-line max-len + /rateLimiting configuration is invalid.*"bucket.defaultConfig.requestsPerSecond.burstCapacity" must be a number/ ); }); @@ -553,7 +601,7 @@ describe('parseRateLimitConfig', () => { assert.throws( () => parseRateLimitConfig(config, 1), - /rateLimiting.bucket.configCacheTTL must be a positive integer/ + /rateLimiting configuration is invalid.*"bucket.configCacheTTL" must be a positive number/ ); }); @@ -567,7 +615,7 @@ describe('parseRateLimitConfig', () => { assert.throws( () => parseRateLimitConfig(config, 1), - /rateLimiting.bucket.configCacheTTL must be a positive integer/ + /rateLimiting configuration is invalid.*"bucket.configCacheTTL" must be a positive number/ ); }); @@ -581,7 +629,7 @@ describe('parseRateLimitConfig', () => { assert.throws( () => parseRateLimitConfig(config, 1), - /rateLimiting.bucket.configCacheTTL must be a positive integer/ + /rateLimiting configuration is invalid.*"bucket.configCacheTTL" must be an integer/ ); }); @@ -595,7 +643,7 @@ describe('parseRateLimitConfig', () => { assert.throws( () => parseRateLimitConfig(config, 1), - /rateLimiting.bucket.configCacheTTL must be a positive integer/ + /rateLimiting configuration is invalid.*"bucket.configCacheTTL" must be a number/ ); }); }); @@ -609,7 +657,7 @@ describe('parseRateLimitConfig', () => { assert.throws( () => parseRateLimitConfig(config, 1), - /rateLimiting.error must be an object/ + /rateLimiting configuration is invalid.*"error" must be of type object/ ); }); @@ -652,7 +700,7 @@ describe('parseRateLimitConfig', () => { assert.throws( () => parseRateLimitConfig(config, 1), - /rateLimiting.error.statusCode must be a valid HTTP status code \(400-599\)/ + /rateLimiting configuration is invalid.*"error.statusCode" must be larger than or equal to 400/ ); }); @@ -667,7 +715,7 @@ describe('parseRateLimitConfig', () => { assert.throws( () => parseRateLimitConfig(config, 1), - /rateLimiting.error.statusCode must be a valid HTTP status code \(400-599\)/ + /rateLimiting configuration is invalid.*"error.statusCode" must be less than or equal to 599/ ); }); @@ -682,7 +730,7 @@ describe('parseRateLimitConfig', () => { assert.throws( () => parseRateLimitConfig(config, 1), - /rateLimiting.error.statusCode must be a valid HTTP status code \(400-599\)/ + /rateLimiting configuration is invalid.*"error.statusCode" must be an integer/ ); }); @@ -697,7 +745,7 @@ describe('parseRateLimitConfig', () => { assert.throws( () => parseRateLimitConfig(config, 1), - /rateLimiting.error.statusCode must be a valid HTTP status code \(400-599\)/ + /rateLimiting configuration is invalid.*"error.statusCode" must be a number/ ); }); @@ -726,7 +774,7 @@ describe('parseRateLimitConfig', () => { assert.throws( () => parseRateLimitConfig(config, 1), - /rateLimiting.error.message must be a string/ + /rateLimiting configuration is invalid.*"error.message" must be a string/ ); }); @@ -783,7 +831,7 @@ describe('parseRateLimitConfig', () => { assert.throws( () => parseRateLimitConfig(config, 1), - /rateLimiting.error.code must be a string/ + /rateLimiting configuration is invalid.*"error.code" must be a string/ ); }); }); @@ -796,7 +844,7 @@ describe('parseRateLimitConfig', () => { bucket: { defaultConfig: { requestsPerSecond: { - limit: 10, // Less than 5 nodes × 10 workers = 50 + limit: 10, // Less than 5 nodes x 10 workers = 50 }, }, }, @@ -804,9 +852,293 @@ describe('parseRateLimitConfig', () => { assert.throws( () => parseRateLimitConfig(config, 10), - /requestsPerSecond\.limit \(10\) must be >= \(nodes × workers = 5 × 10 = 50\)/ + /requestsPerSecond\.limit \(10\) must be >= \(nodes x workers = 5 x 10 = 50\)/ + ); + }); + }); + + describe('account-level rate limiting', () => { + it('should parse account configuration similar to bucket', () => { + const config = { + serviceUserArn: 'arn:aws:iam::123456789012:user/rate-limit-service', + account: { + defaultConfig: { + requestsPerSecond: { + limit: 500, + burstCapacity: 5, + }, + }, + configCacheTTL: 60000, + defaultBurstCapacity: 2, + }, + }; + + const result = parseRateLimitConfig(config, 5); + + assert(result.account); + assert(result.account.defaultConfig); + assert.strictEqual(result.account.defaultConfig.limit, 500); + assert.strictEqual(result.account.configCacheTTL, 60000); + assert.strictEqual(result.account.defaultBurstCapacity, 2); + }); + + it('should support both bucket and account rate limiting simultaneously', () => { + const config = { + serviceUserArn: 'arn:aws:iam::123456789012:user/rate-limit-service', + bucket: { + defaultConfig: { + requestsPerSecond: { + limit: 1000, + }, + }, + }, + account: { + defaultConfig: { + requestsPerSecond: { + limit: 500, + }, + }, + }, + }; + + const result = parseRateLimitConfig(config, 5); + + assert(result.bucket.defaultConfig); + assert.strictEqual(result.bucket.defaultConfig.limit, 1000); + assert(result.account.defaultConfig); + assert.strictEqual(result.account.defaultConfig.limit, 500); + }); + + it('should validate account limit against nodes and workers', () => { + const config = { + serviceUserArn: 'arn:aws:iam::123456789012:user/rate-limit-service', + nodes: 10, + account: { + defaultConfig: { + requestsPerSecond: { + limit: 20, // Less than 10 nodes x 5 workers = 50 + }, + }, + }, + }; + + assert.throws( + () => parseRateLimitConfig(config, 5), + /requestsPerSecond\.limit \(20\) must be >= \(nodes x workers = 10 x 5 = 50\)/ + ); + }); + + it('should throw if account is not an object', () => { + const config = { + serviceUserArn: 'arn:aws:iam::123456789012:user/rate-limit-service', + account: 'invalid', + }; + + assert.throws( + () => parseRateLimitConfig(config, 1), + /rateLimiting configuration is invalid.*"account" must be of type object/ + ); + }); + + it('should apply defaults to account config when fields are omitted', () => { + const config = { + serviceUserArn: 'arn:aws:iam::123456789012:user/rate-limit-service', + account: { + defaultConfig: { + requestsPerSecond: { + limit: 100, + }, + }, + }, + }; + + const result = parseRateLimitConfig(config, 1); + + assert.strictEqual(result.account.configCacheTTL, constants.rateLimitDefaultConfigCacheTTL); + assert.strictEqual(result.account.defaultBurstCapacity, constants.rateLimitDefaultBurstCapacity); + }); + }); + + describe('account burstCapacity validation', () => { + it('should use default burstCapacity when not provided', () => { + const config = { + serviceUserArn: 'arn:aws:iam::123456789012:user/rate-limit-service', + account: { + defaultConfig: { + requestsPerSecond: { + limit: 100, + }, + }, + }, + }; + + const result = parseRateLimitConfig(config, 1); + const bucketSize = result.account.defaultConfig.requestsPerSecond.bucketSize; + assert.strictEqual(bucketSize, constants.rateLimitDefaultBurstCapacity * 1000); + }); + + it('should use custom burstCapacity when provided', () => { + const config = { + serviceUserArn: 'arn:aws:iam::123456789012:user/rate-limit-service', + account: { + defaultConfig: { + requestsPerSecond: { + limit: 100, + burstCapacity: 20, + }, + }, + }, + }; + + const result = parseRateLimitConfig(config, 1); + const bucketSize = result.account.defaultConfig.requestsPerSecond.bucketSize; + assert.strictEqual(bucketSize, 20 * 1000); + }); + + it('should accept float burstCapacity', () => { + const config = { + serviceUserArn: 'arn:aws:iam::123456789012:user/rate-limit-service', + account: { + defaultConfig: { + requestsPerSecond: { + limit: 100, + burstCapacity: 1.5, + }, + }, + }, + }; + + const result = parseRateLimitConfig(config, 1); + const bucketSize = result.account.defaultConfig.requestsPerSecond.bucketSize; + assert.strictEqual(bucketSize, 1.5 * 1000); + }); + + it('should throw if burstCapacity is negative', () => { + const config = { + serviceUserArn: 'arn:aws:iam::123456789012:user/rate-limit-service', + account: { + defaultConfig: { + requestsPerSecond: { + limit: 100, + burstCapacity: -5, + }, + }, + }, + }; + + assert.throws( + () => parseRateLimitConfig(config, 1), + // eslint-disable-next-line max-len + /rateLimiting configuration is invalid.*"account.defaultConfig.requestsPerSecond.burstCapacity" must be a positive number/ + ); + }); + + it('should throw if burstCapacity is zero', () => { + const config = { + serviceUserArn: 'arn:aws:iam::123456789012:user/rate-limit-service', + account: { + defaultConfig: { + requestsPerSecond: { + limit: 100, + burstCapacity: 0, + }, + }, + }, + }; + + assert.throws( + () => parseRateLimitConfig(config, 1), + // eslint-disable-next-line max-len + /rateLimiting configuration is invalid.*"account.defaultConfig.requestsPerSecond.burstCapacity" must be a positive number/ ); }); + + it('should throw if burstCapacity is not a number', () => { + const config = { + serviceUserArn: 'arn:aws:iam::123456789012:user/rate-limit-service', + account: { + defaultConfig: { + requestsPerSecond: { + limit: 100, + burstCapacity: 'ten', + }, + }, + }, + }; + + assert.throws( + () => parseRateLimitConfig(config, 1), + // eslint-disable-next-line max-len + /rateLimiting configuration is invalid.*"account.defaultConfig.requestsPerSecond.burstCapacity" must be a number/ + ); + }); + }); + + describe('schema validation - unknown fields', () => { + it('should reject unknown top-level fields', () => { + const config = { + serviceUserArn: 'arn:aws:iam::123456789012:user/rate-limit-service', + unknownField: 'invalid', + }; + + assert.throws( + () => parseRateLimitConfig(config, 1), + /rateLimiting configuration is invalid.*"unknownField" is not allowed/ + ); + }); + + it('should reject unknown fields in bucket config', () => { + const config = { + serviceUserArn: 'arn:aws:iam::123456789012:user/rate-limit-service', + bucket: { + unknownField: 'invalid', + }, + }; + + assert.throws( + () => parseRateLimitConfig(config, 1), + /rateLimiting configuration is invalid.*"bucket.unknownField" is not allowed/ + ); + }); + + it('should reject unknown fields in error config', () => { + const config = { + serviceUserArn: 'arn:aws:iam::123456789012:user/rate-limit-service', + error: { + statusCode: 503, + unknownField: 'invalid', + }, + }; + + assert.throws( + () => parseRateLimitConfig(config, 1), + /rateLimiting configuration is invalid.*"error.unknownField" is not allowed/ + ); + }); + }); + + describe('schema validation - multiple errors', () => { + it('should report all validation errors at once', () => { + const config = { + // Missing serviceUserArn (required) + nodes: -5, // Invalid (must be positive) + tokenBucketBufferSize: 0, // Invalid (must be positive) + bucket: 'not an object', // Invalid type + }; + + assert.throws(() => { + try { + parseRateLimitConfig(config, 1); + } catch (error) { + // Should contain multiple errors + assert(error.message.includes('serviceUserArn')); + assert(error.message.includes('nodes')); + assert(error.message.includes('tokenBucketBufferSize')); + assert(error.message.includes('bucket')); + throw error; + } + }, /rateLimiting configuration is invalid/); + }); }); describe('calculation verification', () => { From 219733eb1c4f32c9c20e7c83582acd76b673a75d Mon Sep 17 00:00:00 2001 From: Taylor McKinnon Date: Fri, 13 Mar 2026 09:11:17 +0100 Subject: [PATCH 04/16] impr(CLDSRV-852): Refator cache helpers to allow for namespaced keys --- lib/api/apiUtils/rateLimit/cache.js | 55 +++++++++++---------- lib/api/bucketDeleteRateLimit.js | 4 +- lib/api/bucketPutRateLimit.js | 4 +- tests/unit/api/apiUtils/rateLimit/cache.js | 57 ++++++++++++---------- 4 files changed, 64 insertions(+), 56 deletions(-) diff --git a/lib/api/apiUtils/rateLimit/cache.js b/lib/api/apiUtils/rateLimit/cache.js index a9bef0c1cd..a73147b580 100644 --- a/lib/api/apiUtils/rateLimit/cache.js +++ b/lib/api/apiUtils/rateLimit/cache.js @@ -1,63 +1,66 @@ const configCache = new Map(); -// Load tracking for adaptive burst capacity -// Map> - rolling 1-second window -const requestTimestamps = new Map(); +const namespace = { + bucket: 'bkt', +}; -function setCachedConfig(key, limitConfig, ttl) { +function cacheSet(cache, key, value, ttl) { const expiry = Date.now() + ttl; - configCache.set(key, { expiry, config: limitConfig }); + cache.set(key, { expiry, value }); } -function getCachedConfig(key) { - const value = configCache.get(key); - if (value === undefined) { +function cacheGet(cache, key) { + const cachedValue = cache.get(key); + if (cachedValue === undefined) { return undefined; } - const { expiry, config } = value; + const { expiry, value } = cachedValue; if (expiry <= Date.now()) { - configCache.delete(key); + cache.delete(key); return undefined; } - return config; + return value; +} + +function cacheDelete(cache, key) { + cache.delete(key); } -function expireCachedConfigs(now) { +function cacheExpire(cache) { + const now = Date.now(); + const toRemove = []; - for (const [key, { expiry }] of configCache.entries()) { + for (const [key, { expiry }] of cache.entries()) { if (expiry <= now) { toRemove.push(key); } } for (const key of toRemove) { - configCache.delete(key); + cache.delete(key); } return toRemove.length; } -/** - * Invalidate cached config for a specific bucket - * - * @param {string} bucketName - Bucket name - * @returns {boolean} True if entry was found and removed - */ -function invalidateCachedConfig(bucketName) { - const cacheKey = `bucket:${bucketName}`; - return configCache.delete(cacheKey); +function formatKeyDecorator(fn) { + return (resourceClass, resourceId, ...args) => fn(`${resourceClass}:${resourceId}`, ...args); } +const getCachedConfig = formatKeyDecorator(cacheGet.bind(null, configCache)); +const setCachedConfig = formatKeyDecorator(cacheSet.bind(null, configCache)); +const deleteCachedConfig = formatKeyDecorator(cacheDelete.bind(null, configCache)); +const expireCachedConfigs = cacheExpire.bind(null, configCache); + module.exports = { + namespace, setCachedConfig, getCachedConfig, expireCachedConfigs, - invalidateCachedConfig, - + deleteCachedConfig, // Do not access directly // Used only for tests configCache, - requestTimestamps, }; diff --git a/lib/api/bucketDeleteRateLimit.js b/lib/api/bucketDeleteRateLimit.js index 81bfde32eb..f500463746 100644 --- a/lib/api/bucketDeleteRateLimit.js +++ b/lib/api/bucketDeleteRateLimit.js @@ -4,7 +4,7 @@ const metadata = require('../metadata/wrapper'); const { standardMetadataValidateBucket } = require('../metadata/metadataUtils'); const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const { isRateLimitServiceUser } = require('./apiUtils/authorization/serviceUser'); -const { invalidateCachedConfig } = require('./apiUtils/rateLimit/cache'); +const cache = require('./apiUtils/rateLimit/cache'); const { removeTokenBucket } = require('./apiUtils/rateLimit/tokenBucket'); /** @@ -52,7 +52,7 @@ function bucketDeleteRateLimit(authInfo, request, log, callback) { return callback(err, corsHeaders); } // Invalidate cache and remove token bucket - invalidateCachedConfig(bucketName); + cache.deleteCachedConfig(cache.namespace.bucket, bucketName); removeTokenBucket(bucketName); log.debug('invalidated rate limit cache and token bucket for bucket', { bucketName }); // TODO: implement Utapi metric support diff --git a/lib/api/bucketPutRateLimit.js b/lib/api/bucketPutRateLimit.js index 116c8940d9..d895678775 100644 --- a/lib/api/bucketPutRateLimit.js +++ b/lib/api/bucketPutRateLimit.js @@ -6,7 +6,7 @@ const { config } = require('../Config'); const metadata = require('../metadata/wrapper'); const { standardMetadataValidateBucket } = require('../metadata/metadataUtils'); const { isRateLimitServiceUser } = require('./apiUtils/authorization/serviceUser'); -const { invalidateCachedConfig } = require('./apiUtils/rateLimit/cache'); +const cache = require('./apiUtils/rateLimit/cache'); function parseRequestBody(requestBody, callback) { try { @@ -94,7 +94,7 @@ function bucketPutRateLimit(authInfo, request, log, callback) { return callback(err, corsHeaders); } // Invalidate cache so new limit takes effect immediately - invalidateCachedConfig(bucketName); + cache.deleteCachedConfig(cache.namespace.bucket, bucketName); log.debug('invalidated rate limit cache for bucket', { bucketName }); // TODO: implement Utapi metric support return callback(null, corsHeaders); diff --git a/tests/unit/api/apiUtils/rateLimit/cache.js b/tests/unit/api/apiUtils/rateLimit/cache.js index dc8b15552e..cb3e705fa9 100644 --- a/tests/unit/api/apiUtils/rateLimit/cache.js +++ b/tests/unit/api/apiUtils/rateLimit/cache.js @@ -3,11 +3,12 @@ const sinon = require('sinon'); const constants = require('../../../../../constants'); const { + namespace, configCache, getCachedConfig, setCachedConfig, expireCachedConfigs, - invalidateCachedConfig, + deleteCachedConfig, } = require('../../../../../lib/api/apiUtils/rateLimit/cache'); describe('test limit config cache storage', () => { @@ -22,66 +23,70 @@ describe('test limit config cache storage', () => { clock.restore(); }); + beforeEach(() => { + configCache.clear(); + }); + it('should add config to cache', () => { - setCachedConfig('foo', 10, constants.rateLimitDefaultConfigCacheTTL); + setCachedConfig(namespace.bucket, 'foo', 10, constants.rateLimitDefaultConfigCacheTTL); assert.deepStrictEqual( - configCache.get('foo'), + configCache.get(`${namespace.bucket}:foo`), { expiry: now + constants.rateLimitDefaultConfigCacheTTL, - config: 10, + value: 10, } ); }); it('should get a non expired config', () => { - setCachedConfig('foo', 10, constants.rateLimitDefaultConfigCacheTTL); - assert.strictEqual(getCachedConfig('foo'), 10); + setCachedConfig(namespace.bucket, 'foo', 10, constants.rateLimitDefaultConfigCacheTTL); + assert.strictEqual(getCachedConfig(namespace.bucket, 'foo'), 10); }); it('should return undefined and delete the key for an expired config', () => { - configCache.set('foo', { + configCache.set(`${namespace.bucket}:foo`, { expiry: now - 10000, - config: 10, + value: 10, }); - assert.strictEqual(getCachedConfig('foo'), undefined); + assert.strictEqual(getCachedConfig(namespace.bucket, 'foo'), undefined); }); - it('should expire configs less than or equal to the given timestamp', () => { + it('should expire configs less than or equal to current time', () => { configCache.set('past', { expiry: now - 10000, - config: 10, + value: 10, }); configCache.set('present', { expiry: now, - config: 10, + value: 10, }); configCache.set('future', { expiry: now + 10000, - config: 10, + value: 10, }); - expireCachedConfigs(now); + // expireCachedConfigs uses Date.now() internally; fake clock is set to `now` + expireCachedConfigs(); assert.strictEqual(configCache.get('past'), undefined); assert.strictEqual(configCache.get('present'), undefined); assert.deepStrictEqual(configCache.get('future'), { expiry: now + 10000, - config: 10, + value: 10, }); }); - it('should invalidate cached config for a specific bucket', () => { - setCachedConfig('bucket:my-bucket', { limit: 100 }, constants.rateLimitDefaultConfigCacheTTL); - setCachedConfig('bucket:other-bucket', { limit: 200 }, constants.rateLimitDefaultConfigCacheTTL); + it('should delete cached config for a specific resource', () => { + setCachedConfig(namespace.bucket, 'my-bucket', { limit: 100 }, constants.rateLimitDefaultConfigCacheTTL); + setCachedConfig(namespace.bucket, 'other-bucket', { limit: 200 }, constants.rateLimitDefaultConfigCacheTTL); - const result = invalidateCachedConfig('my-bucket'); + deleteCachedConfig(namespace.bucket, 'my-bucket'); - assert.strictEqual(result, true); - assert.strictEqual(getCachedConfig('bucket:my-bucket'), undefined); - assert.deepStrictEqual(getCachedConfig('bucket:other-bucket'), { limit: 200 }); + assert.strictEqual(getCachedConfig(namespace.bucket, 'my-bucket'), undefined); + assert.deepStrictEqual(getCachedConfig(namespace.bucket, 'other-bucket'), { limit: 200 }); }); - it('should return false when invalidating non-existent bucket', () => { - const result = invalidateCachedConfig('non-existent-bucket'); - - assert.strictEqual(result, false); + it('should be a no-op when deleting a non-existent key', () => { + assert.doesNotThrow(() => { + deleteCachedConfig(namespace.bucket, 'non-existent-bucket'); + }); }); }); From 5fd1c111a6f2b50d069de8afc38fb34dbd4006b1 Mon Sep 17 00:00:00 2001 From: Taylor McKinnon Date: Fri, 13 Mar 2026 14:05:45 +0100 Subject: [PATCH 05/16] impr(CLDSRV-852): Refactor rate limit helpers --- lib/api/apiUtils/rateLimit/helpers.js | 129 ++++--- tests/unit/api/apiUtils/rateLimit/helpers.js | 362 ++++++++----------- 2 files changed, 212 insertions(+), 279 deletions(-) diff --git a/lib/api/apiUtils/rateLimit/helpers.js b/lib/api/apiUtils/rateLimit/helpers.js index 80c039b097..8bc8d7b83c 100644 --- a/lib/api/apiUtils/rateLimit/helpers.js +++ b/lib/api/apiUtils/rateLimit/helpers.js @@ -1,24 +1,12 @@ const { config } = require('../../../Config'); const cache = require('./cache'); -const constants = require('../../../../constants'); const { getTokenBucket } = require('./tokenBucket'); const { policies: { actionMaps: { actionMapBucketRateLimit } } } = require('arsenal'); const rateLimitApiActions = Object.keys(actionMapBucketRateLimit); /** - * Get rate limit configuration from cache only (no metadata fetch) - * - * @param {string} bucketName - Bucket name - * @returns {object|null|undefined} Rate limit config, null (no limit), or undefined (not cached) - */ -function getRateLimitFromCache(bucketName) { - const cacheKey = `bucket:${bucketName}`; - return cache.getCachedConfig(cacheKey); -} - -/** - * Extract rate limit configuration from bucket metadata and cache it + * Extract rate limit configuration from bucket metadata or global rate limit configuration. * * Resolves in priority order: * 1. Per-bucket configuration (from bucket metadata) @@ -30,24 +18,21 @@ function getRateLimitFromCache(bucketName) { * @param {object} log - Logger instance * @returns {object|null} Rate limit config or null if no limit */ -function extractAndCacheRateLimitConfig(bucket, bucketName, log) { - const cacheKey = `bucket:${bucketName}`; - const cacheTTL = config.rateLimiting.bucket?.configCacheTTL || - constants.rateLimitDefaultConfigCacheTTL; - +function extractBucketRateLimitConfig(bucket, bucketName, log) { // Try per-bucket config first const bucketConfig = bucket.getRateLimitConfiguration(); if (bucketConfig) { const data = bucketConfig.getData(); const limitConfig = { limit: data.RequestsPerSecond.Limit, + burstCapacity: config.rateLimiting.bucket.defaultBurstCapacity * 1000, source: 'bucket', }; - cache.setCachedConfig(cacheKey, limitConfig, cacheTTL); log.debug('Extracted per-bucket rate limit config', { bucketName, limit: limitConfig.limit, + burstCapacity: config.rateLimiting.bucket.defaultBurstCapacity * 1000, }); return limitConfig; @@ -58,10 +43,10 @@ function extractAndCacheRateLimitConfig(bucket, bucketName, log) { if (globalLimit !== undefined && globalLimit > 0) { const limitConfig = { limit: globalLimit, + burstCapacity: config.rateLimiting.bucket.defaultBurstCapacity * 1000, source: 'global', }; - cache.setCachedConfig(cacheKey, limitConfig, cacheTTL); log.debug('Using global default rate limit config', { bucketName, limit: limitConfig.limit, @@ -71,58 +56,86 @@ function extractAndCacheRateLimitConfig(bucket, bucketName, log) { } // No rate limiting configured - cache null to avoid repeated lookups - cache.setCachedConfig(cacheKey, null, cacheTTL); log.trace('No rate limit configured for bucket', { bucketName }); - return null; } -/** - * Check rate limit with pre-resolved configuration using token reservation - * - * Uses token bucket: Workers maintain local tokens granted by Redis. - * Token consumption is pure in-memory (fast). Refills happen async in background. - * - * @param {string} bucketName - Bucket name - * @param {object|null} limitConfig - Pre-resolved rate limit config - * @param {object} log - Logger instance - * @param {function} callback - Callback(err, rateLimited boolean) - * @returns {undefined} - */ -function checkRateLimitWithConfig(bucketName, limitConfig, log, callback) { - // No rate limiting configured - if (!limitConfig || limitConfig.limit === 0) { - return callback(null, false); +function extractRateLimitConfigFromRequest(request) { + const applyRateLimit = config.rateLimiting?.enabled + && !rateLimitApiActions.includes(request.apiMethod) // Don't limit any rate limit admin actions + && !request.isInternalServiceRequest; // Don't limit any calls from internal services + + if (!applyRateLimit) { + return { needsCheck: false }; + } + + const limitConfigs = {}; + let needsCheck = false; + + if (request.accountLimits) { + limitConfigs.account = { + ...request.accountLimits, + source: 'account', + }; + needsCheck = true; + } + + if (request.bucketName) { + const cachedConfig = cache.getCachedConfig(cache.namespace.bucket, request.bucketName); + if (cachedConfig) { + limitConfigs.bucket = cachedConfig; + needsCheck = true; + } + + if (!request.accountLimits) { + const cachedOwner = cache.getCachedBucketOwner(request.bucketName); + if (cachedOwner !== undefined) { + const cachedConfig = cache.getCachedConfig(cache.namespace.account, cachedOwner); + if (cachedConfig) { + limitConfigs.account = cachedConfig; + limitConfigs.bucketOwner = cachedOwner; + needsCheck = true; + } + } + } } - // Get or create token bucket for this bucket - const tokenBucket = getTokenBucket(bucketName, limitConfig, log); + return { needsCheck, limitConfigs }; +} + +function checkRateLimitsForRequest(checks, log) { + const buckets = []; + for (const check of checks) { + const bucket = getTokenBucket(check.resourceClass, check.resourceId, check.measure, check.config, log); + if (!bucket.hasCapacity()) { + log.debug('Rate limit check: denied (no tokens available)', { + resourceClass: bucket.resourceClass, + resourceId: bucket.resourceId, + measure: bucket.measure, + limit: bucket.limitConfig.limit, + source: bucket.limitConfig.source, + }); - // Try to consume a token (in-memory, no Redis) - const allowed = tokenBucket.tryConsume(); + return { allowed: false, rateLimitSource: `${check.resourceClass}:${check.source}`}; + } + + buckets.push(bucket); - if (allowed) { log.trace('Rate limit check: allowed (token consumed)', { - bucketName, - tokensRemaining: tokenBucket.tokens, - }); - } else { - log.debug('Rate limit check: denied (no tokens available)', { - bucketName, - limit: limitConfig.limit, - source: limitConfig.source, + resourceClass: bucket.resourceClass, + resourceId: bucket.resourceId, + measure: bucket.measure, + source: bucket.limitConfig.source, }); } - // Return inverse: callback expects "rateLimited" boolean - // allowed=true → rateLimited=false - // allowed=false → rateLimited=true - return callback(null, !allowed); + buckets.forEach(bucket => bucket.tryConsume()); + return { allowed: true }; } module.exports = { - getRateLimitFromCache, - extractAndCacheRateLimitConfig, - checkRateLimitWithConfig, rateLimitApiActions, + extractBucketRateLimitConfig, + extractRateLimitConfigFromRequest, + checkRateLimitsForRequest, }; diff --git a/tests/unit/api/apiUtils/rateLimit/helpers.js b/tests/unit/api/apiUtils/rateLimit/helpers.js index b1b14d4e24..39c49060b3 100644 --- a/tests/unit/api/apiUtils/rateLimit/helpers.js +++ b/tests/unit/api/apiUtils/rateLimit/helpers.js @@ -28,36 +28,7 @@ describe('Rate limit helpers', () => { sandbox.restore(); }); - describe('getRateLimitFromCache', () => { - it('should return cached config on cache hit', () => { - const bucketName = 'test-bucket'; - const limitConfig = { limit: 100, source: 'bucket' }; - cache.setCachedConfig(`bucket:${bucketName}`, limitConfig, 60000); - - const result = helpers.getRateLimitFromCache(bucketName); - - assert.strictEqual(result, limitConfig); - }); - - it('should return undefined on cache miss', () => { - const bucketName = 'test-bucket'; - - const result = helpers.getRateLimitFromCache(bucketName); - - assert.strictEqual(result, undefined); - }); - - it('should return null when null is cached', () => { - const bucketName = 'test-bucket'; - cache.setCachedConfig(`bucket:${bucketName}`, null, 60000); - - const result = helpers.getRateLimitFromCache(bucketName); - - assert.strictEqual(result, null); - }); - }); - - describe('extractAndCacheRateLimitConfig', () => { + describe('extractBucketRateLimitConfig', () => { let configStub; beforeEach(() => { @@ -65,11 +36,12 @@ describe('Rate limit helpers', () => { enabled: true, bucket: { configCacheTTL: 30000, + defaultBurstCapacity: 1, }, }); }); - it('should extract and cache per-bucket config', () => { + it('should extract per-bucket config', () => { const bucketName = 'test-bucket'; const mockBucket = { getRateLimitConfiguration: () => ({ @@ -79,15 +51,12 @@ describe('Rate limit helpers', () => { }), }; - const result = helpers.extractAndCacheRateLimitConfig(mockBucket, bucketName, mockLog); + const result = helpers.extractBucketRateLimitConfig(mockBucket, bucketName, mockLog); - assert.deepStrictEqual(result, { limit: 200, source: 'bucket' }); - // Verify it was cached - const cached = cache.getCachedConfig(`bucket:${bucketName}`); - assert.deepStrictEqual(cached, { limit: 200, source: 'bucket' }); + assert.deepStrictEqual(result, { limit: 200, burstCapacity: 1000, source: 'bucket' }); }); - it('should fall back to global config when no bucket config', () => { + it('should fall back to global default config when no bucket config', () => { const bucketName = 'test-bucket'; const mockBucket = { getRateLimitConfiguration: () => null, @@ -97,16 +66,14 @@ describe('Rate limit helpers', () => { enabled: true, bucket: { defaultConfig: { limit: 100 }, + defaultBurstCapacity: 1, configCacheTTL: 30000, }, }); - const result = helpers.extractAndCacheRateLimitConfig(mockBucket, bucketName, mockLog); + const result = helpers.extractBucketRateLimitConfig(mockBucket, bucketName, mockLog); - assert.deepStrictEqual(result, { limit: 100, source: 'global' }); - // Verify it was cached - const cached = cache.getCachedConfig(`bucket:${bucketName}`); - assert.deepStrictEqual(cached, { limit: 100, source: 'global' }); + assert.deepStrictEqual(result, { limit: 100, burstCapacity: 1000, source: 'global' }); }); it('should return null when no config exists', () => { @@ -118,19 +85,17 @@ describe('Rate limit helpers', () => { configStub.value({ enabled: true, bucket: { + defaultBurstCapacity: 1, configCacheTTL: 30000, }, }); - const result = helpers.extractAndCacheRateLimitConfig(mockBucket, bucketName, mockLog); + const result = helpers.extractBucketRateLimitConfig(mockBucket, bucketName, mockLog); assert.strictEqual(result, null); - // Verify null was cached - const cached = cache.getCachedConfig(`bucket:${bucketName}`); - assert.strictEqual(cached, null); }); - it('should skip global default if limit is 0', () => { + it('should return null when global default limit is 0', () => { const bucketName = 'test-bucket'; const mockBucket = { getRateLimitConfiguration: () => null, @@ -140,16 +105,17 @@ describe('Rate limit helpers', () => { enabled: true, bucket: { defaultConfig: { limit: 0 }, + defaultBurstCapacity: 1, configCacheTTL: 30000, }, }); - const result = helpers.extractAndCacheRateLimitConfig(mockBucket, bucketName, mockLog); + const result = helpers.extractBucketRateLimitConfig(mockBucket, bucketName, mockLog); assert.strictEqual(result, null); }); - it('should use default TTL when not configured', () => { + it('should use default TTL when configCacheTTL is not set', () => { const bucketName = 'test-bucket'; const mockBucket = { getRateLimitConfiguration: () => ({ @@ -161,240 +127,194 @@ describe('Rate limit helpers', () => { configStub.value({ enabled: true, - bucket: {}, + bucket: { + defaultBurstCapacity: 1, + }, }); sandbox.stub(constants, 'rateLimitDefaultConfigCacheTTL').value(60000); - helpers.extractAndCacheRateLimitConfig(mockBucket, bucketName, mockLog); + const result = helpers.extractBucketRateLimitConfig(mockBucket, bucketName, mockLog); - // Verify it was cached with default TTL - const cached = cache.getCachedConfig(`bucket:${bucketName}`); - assert.deepStrictEqual(cached, { limit: 200, source: 'bucket' }); + assert.deepStrictEqual(result, { limit: 200, burstCapacity: 1000, source: 'bucket' }); }); }); - describe('checkRateLimitWithConfig', () => { - let configStub; - - beforeEach(() => { - configStub = sandbox.stub(config, 'rateLimiting').value({ + describe('checkRateLimitsForRequest', () => { + beforeEach(() => + sandbox.stub(config, 'rateLimiting').value({ enabled: true, nodes: 1, + tokenBucketBufferSize: 50, + tokenBucketRefillThreshold: 20, bucket: { defaultBurstCapacity: 1, }, - }); - sandbox.stub(config, 'clusters').value(1); - }); + }) + ); - it('should allow request when no limit configured', done => { - const bucketName = 'test-bucket'; - const limitConfig = null; - - helpers.checkRateLimitWithConfig(bucketName, limitConfig, mockLog, (err, rateLimited) => { - assert.strictEqual(err, null); - assert.strictEqual(rateLimited, false); - done(); - }); - }); + afterEach(() => sinon.restore()); - it('should allow request when limit is 0', done => { - const bucketName = 'test-bucket'; - const limitConfig = { limit: 0, source: 'global' }; + it('should allow request when checks array is empty', () => { + const result = helpers.checkRateLimitsForRequest([], mockLog); - helpers.checkRateLimitWithConfig(bucketName, limitConfig, mockLog, (err, rateLimited) => { - assert.strictEqual(err, null); - assert.strictEqual(rateLimited, false); - done(); - }); + assert.deepStrictEqual(result, { allowed: true }); }); - it('should allow request when bucket has capacity', done => { - const bucketName = 'test-bucket'; - const limitConfig = { limit: 100, source: 'bucket' }; + it('should allow request when bucket has capacity', () => { + const check = { + resourceClass: 'bkt', resourceId: 'test-bucket', measure: 'rps', + config: { limit: 100, burstCapacity: 1000, source: 'bucket' }, source: 'bucket', + }; // Pre-populate token bucket with tokens - const bucket = tokenBucket.getTokenBucket(bucketName, limitConfig, mockLog); + const bucket = tokenBucket.getTokenBucket('bkt', 'test-bucket', 'rps', check.config, mockLog); bucket.tokens = 50; - helpers.checkRateLimitWithConfig(bucketName, limitConfig, mockLog, (err, rateLimited) => { - assert.strictEqual(err, null); - assert.strictEqual(rateLimited, false); - // Verify token was consumed - assert.strictEqual(bucket.tokens, 49); - done(); - }); + const result = helpers.checkRateLimitsForRequest([check], mockLog); + + assert.deepStrictEqual(result, { allowed: true }); + // Verify token was consumed + assert.strictEqual(bucket.tokens, 49); }); - it('should deny request when bucket is full', done => { - const bucketName = 'test-bucket'; - const limitConfig = { limit: 100, source: 'bucket' }; + it('should deny request when bucket has no tokens', () => { + const check = { + resourceClass: 'bkt', resourceId: 'test-bucket', measure: 'rps', + config: { limit: 100, burstCapacity: 1000, source: 'bucket' }, source: 'bucket', + }; - const bucket = tokenBucket.getTokenBucket(bucketName, limitConfig, mockLog); + const bucket = tokenBucket.getTokenBucket('bkt', 'test-bucket', 'rps', check.config, mockLog); // Explicitly set tokens to 0 to simulate exhausted quota bucket.tokens = 0; - helpers.checkRateLimitWithConfig(bucketName, limitConfig, mockLog, (err, rateLimited) => { - assert.strictEqual(err, null); - assert.strictEqual(rateLimited, true); - done(); - }); + const result = helpers.checkRateLimitsForRequest([check], mockLog); + + assert.strictEqual(result.allowed, false); + assert.strictEqual(result.rateLimitSource, 'bkt:bucket'); }); - it('should use configured burst capacity', done => { - const bucketName = 'test-bucket'; - const limitConfig = { limit: 100, source: 'bucket' }; + it('should not consume tokens when denied', () => { + const check = { + resourceClass: 'bkt', resourceId: 'test-bucket', measure: 'rps', + config: { limit: 100, burstCapacity: 1000, source: 'bucket' }, source: 'bucket', + }; - configStub.value({ - enabled: true, - nodes: 1, - bucket: { - defaultBurstCapacity: 2, - }, - }); + const bucket = tokenBucket.getTokenBucket('bkt', 'test-bucket', 'rps', check.config, mockLog); + bucket.tokens = 0; - // Pre-populate token bucket - const bucket = tokenBucket.getTokenBucket(bucketName, limitConfig, mockLog); - bucket.tokens = 50; + helpers.checkRateLimitsForRequest([check], mockLog); - helpers.checkRateLimitWithConfig(bucketName, limitConfig, mockLog, (err, rateLimited) => { - assert.strictEqual(err, null); - assert.strictEqual(rateLimited, false); - done(); - }); + assert.strictEqual(bucket.tokens, 0); }); - it('should use default burst capacity when not configured', done => { - const bucketName = 'test-bucket'; - const limitConfig = { limit: 100, source: 'bucket' }; - - configStub.value({ - enabled: true, - nodes: 1, - bucket: {}, - }); + it('should consume tokens from all buckets when all have capacity', () => { + const check1 = { + resourceClass: 'bkt', resourceId: 'bucket-1', measure: 'rps', + config: { limit: 100, burstCapacity: 1000, source: 'bucket' }, source: 'bucket', + }; + const check2 = { + resourceClass: 'acc', resourceId: 'account-1', measure: 'rps', + config: { limit: 200, burstCapacity: 1000, source: 'account' }, source: 'account', + }; - sandbox.stub(constants, 'rateLimitDefaultBurstCapacity').value(1); + const bucket1 = tokenBucket.getTokenBucket('bkt', 'bucket-1', 'rps', check1.config, mockLog); + const bucket2 = tokenBucket.getTokenBucket('acc', 'account-1', 'rps', check2.config, mockLog); + bucket1.tokens = 50; + bucket2.tokens = 50; - // Pre-populate token bucket - const bucket = tokenBucket.getTokenBucket(bucketName, limitConfig, mockLog); - bucket.tokens = 50; + const result = helpers.checkRateLimitsForRequest([check1, check2], mockLog); - helpers.checkRateLimitWithConfig(bucketName, limitConfig, mockLog, (err, rateLimited) => { - assert.strictEqual(err, null); - assert.strictEqual(rateLimited, false); - done(); - }); + assert.deepStrictEqual(result, { allowed: true }); + assert.strictEqual(bucket1.tokens, 49); + assert.strictEqual(bucket2.tokens, 49); }); - it('should calculate interval for distributed setup', done => { - const bucketName = 'test-bucket'; - const limitConfig = { limit: 1000, source: 'global' }; + it('should deny on first exhausted bucket and not consume other buckets', () => { + const check1 = { + resourceClass: 'bkt', resourceId: 'bucket-1', measure: 'rps', + config: { limit: 100, burstCapacity: 1000, source: 'bucket' }, source: 'bucket', + }; + const check2 = { + resourceClass: 'acc', resourceId: 'account-1', measure: 'rps', + config: { limit: 200, burstCapacity: 1000, source: 'account' }, source: 'account', + }; - configStub.value({ - enabled: true, - nodes: 10, - bucket: { - defaultBurstCapacity: 1, - }, - }); - sandbox.stub(config, 'clusters').value(5); + const bucket1 = tokenBucket.getTokenBucket('bkt', 'bucket-1', 'rps', check1.config, mockLog); + const bucket2 = tokenBucket.getTokenBucket('acc', 'account-1', 'rps', check2.config, mockLog); + bucket1.tokens = 0; // exhausted + bucket2.tokens = 50; - helpers.checkRateLimitWithConfig(bucketName, limitConfig, mockLog, err => { - assert.strictEqual(err, null); - // Should work correctly with distributed calculation - done(); - }); + const result = helpers.checkRateLimitsForRequest([check1, check2], mockLog); + + assert.strictEqual(result.allowed, false); + // bucket2 tokens should be unchanged (not consumed when an earlier check fails) + assert.strictEqual(bucket2.tokens, 50); }); - it('should log debug info when using token bucket', done => { - const bucketName = 'test-bucket'; - const limitConfig = { limit: 100, source: 'bucket' }; + it('should log debug info when request is denied', () => { + const check = { + resourceClass: 'bkt', resourceId: 'test-bucket', measure: 'rps', + config: { limit: 100, burstCapacity: 1000, source: 'bucket' }, source: 'bucket', + }; - const bucket = tokenBucket.getTokenBucket(bucketName, limitConfig, mockLog); + const bucket = tokenBucket.getTokenBucket('bkt', 'test-bucket', 'rps', check.config, mockLog); // Explicitly set tokens to 0 to trigger denial log bucket.tokens = 0; - helpers.checkRateLimitWithConfig(bucketName, limitConfig, mockLog, () => { - // Check for token bucket denial log - const deniedCall = mockLog.debug.getCalls().find( - call => call.args[0] === 'Rate limit check: denied (no tokens available)' - ); - assert(deniedCall, 'Should have logged denied message'); - const logArgs = deniedCall.args[1]; - assert.strictEqual(logArgs.bucketName, bucketName); - assert.strictEqual(logArgs.limit, 100); - assert.strictEqual(logArgs.source, 'bucket'); - done(); - }); + helpers.checkRateLimitsForRequest([check], mockLog); + + const deniedCall = mockLog.debug.getCalls().find( + call => call.args[0] === 'Rate limit check: denied (no tokens available)' + ); + assert(deniedCall, 'Should have logged denied message'); + const logArgs = deniedCall.args[1]; + assert.strictEqual(logArgs.resourceClass, 'bkt'); + assert.strictEqual(logArgs.resourceId, 'test-bucket'); + assert.strictEqual(logArgs.limit, 100); + assert.strictEqual(logArgs.source, 'bucket'); }); - it('should log trace info when request allowed', done => { - const bucketName = 'test-bucket'; - const limitConfig = { limit: 100, source: 'bucket' }; + it('should log trace info when request is allowed', () => { + const check = { + resourceClass: 'bkt', resourceId: 'test-bucket', measure: 'rps', + config: { limit: 100, burstCapacity: 1000, source: 'bucket' }, source: 'bucket', + }; // Pre-populate token bucket - const bucket = tokenBucket.getTokenBucket(bucketName, limitConfig, mockLog); + const bucket = tokenBucket.getTokenBucket('bkt', 'test-bucket', 'rps', check.config, mockLog); bucket.tokens = 50; - helpers.checkRateLimitWithConfig(bucketName, limitConfig, mockLog, (err, rateLimited) => { - assert.strictEqual(rateLimited, false); - // Check for trace log (token consumed) - const allowedCall = mockLog.trace.getCalls().find( - call => call.args[0] === 'Rate limit check: allowed (token consumed)' - ); - assert(allowedCall, 'Should have logged allowed message'); - assert.strictEqual(allowedCall.args[1].bucketName, bucketName); - assert.strictEqual(allowedCall.args[1].tokensRemaining, 49); - done(); - }); - }); - - it('should log debug info when request denied (no tokens)', done => { - const bucketName = 'test-bucket'; - const limitConfig = { limit: 100, source: 'bucket' }; + const result = helpers.checkRateLimitsForRequest([check], mockLog); - const bucket = tokenBucket.getTokenBucket(bucketName, limitConfig, mockLog); - // Explicitly set tokens to 0 to simulate exhausted quota - bucket.tokens = 0; - - helpers.checkRateLimitWithConfig(bucketName, limitConfig, mockLog, (err, rateLimited) => { - assert.strictEqual(rateLimited, true); - // Find the "denied" log call - const deniedCall = mockLog.debug.getCalls().find( - call => call.args[0] === 'Rate limit check: denied (no tokens available)' - ); - assert(deniedCall, 'Should have logged denied message'); - assert.strictEqual(deniedCall.args[1].bucketName, bucketName); - assert.strictEqual(deniedCall.args[1].limit, 100); - assert.strictEqual(deniedCall.args[1].source, 'bucket'); - done(); - }); + assert.strictEqual(result.allowed, true); + const allowedCall = mockLog.trace.getCalls().find( + call => call.args[0] === 'Rate limit check: allowed (token consumed)' + ); + assert(allowedCall, 'Should have logged allowed message'); + assert.strictEqual(allowedCall.args[1].resourceClass, 'bkt'); + assert.strictEqual(allowedCall.args[1].resourceId, 'test-bucket'); }); - it('should handle multiple sequential requests correctly', done => { - const bucketName = 'test-bucket'; - const limitConfig = { limit: 100, source: 'bucket' }; + it('should handle multiple sequential requests correctly', () => { + const check = { + resourceClass: 'bkt', resourceId: 'test-bucket', measure: 'rps', + config: { limit: 100, burstCapacity: 1000, source: 'bucket' }, source: 'bucket', + }; // Pre-populate token bucket with multiple tokens - const bucket = tokenBucket.getTokenBucket(bucketName, limitConfig, mockLog); + const bucket = tokenBucket.getTokenBucket('bkt', 'test-bucket', 'rps', check.config, mockLog); bucket.tokens = 50; // First request should be allowed - helpers.checkRateLimitWithConfig(bucketName, limitConfig, mockLog, (err, rateLimited) => { - assert.strictEqual(err, null); - assert.strictEqual(rateLimited, false); - assert.strictEqual(bucket.tokens, 49); - - // Second request should also be allowed (still has tokens) - helpers.checkRateLimitWithConfig(bucketName, limitConfig, mockLog, (err2, rateLimited2) => { - assert.strictEqual(err2, null); - assert.strictEqual(rateLimited2, false); - assert.strictEqual(bucket.tokens, 48); - done(); - }); - }); + const result1 = helpers.checkRateLimitsForRequest([check], mockLog); + assert.strictEqual(result1.allowed, true); + assert.strictEqual(bucket.tokens, 49); + + // Second request should also be allowed (still has tokens) + const result2 = helpers.checkRateLimitsForRequest([check], mockLog); + assert.strictEqual(result2.allowed, true); + assert.strictEqual(bucket.tokens, 48); }); }); }); From b7c90a5c82dfea3acd87562cec7ad387e589ce36 Mon Sep 17 00:00:00 2001 From: Taylor McKinnon Date: Fri, 13 Mar 2026 14:21:20 +0100 Subject: [PATCH 06/16] impr(CLDSRV-852): Change RateLimitClient.grantTokens() to accept resourceClass, resourceId, and measure --- lib/api/apiUtils/rateLimit/client.js | 8 +++++--- .../aws-node-sdk/test/rateLimit/client.js | 14 +++++++------- 2 files changed, 12 insertions(+), 10 deletions(-) diff --git a/lib/api/apiUtils/rateLimit/client.js b/lib/api/apiUtils/rateLimit/client.js index 183ae032b5..203643fc0f 100644 --- a/lib/api/apiUtils/rateLimit/client.js +++ b/lib/api/apiUtils/rateLimit/client.js @@ -38,14 +38,16 @@ class RateLimitClient { * * Used by token reservation system to request capacity in advance. * - * @param {string} bucketName - Bucket name + * @param {string} resourceClass - Resource class name e.g. 'account' or 'bucket' + * @param {string} resourceId - Unique resource ID e.g. bucket name or account ID + * @param {string} measure - ID of rate limit measure e.g. 'rps' * @param {number} requested - Number of tokens requested * @param {number} interval - Interval per request in ms * @param {number} burstCapacity - Burst capacity in ms * @param {RateLimitClient~grantTokens} cb - Callback */ - grantTokens(bucketName, requested, interval, burstCapacity, cb) { - const key = `ratelimit:bucket:${bucketName}:rps:emptyAt`; + grantTokens(resourceClass, resourceId, measure, requested, interval, burstCapacity, cb) { + const key = `ratelimit:${resourceClass}:${resourceId}:${measure}:emptyAt`; const now = Date.now(); this.redis.grantTokens( diff --git a/tests/functional/aws-node-sdk/test/rateLimit/client.js b/tests/functional/aws-node-sdk/test/rateLimit/client.js index d82dbf636a..083eb6271b 100644 --- a/tests/functional/aws-node-sdk/test/rateLimit/client.js +++ b/tests/functional/aws-node-sdk/test/rateLimit/client.js @@ -19,7 +19,7 @@ skipIfRateLimitDisabled('RateLimitClient', () => { after(async () => client.redis.quit().catch(() => {})); beforeEach(async () => { - const keys = await client.redis.keys('ratelimit:bucket:*'); + const keys = await client.redis.keys('ratelimit:*'); if (keys.length > 0) { await client.redis.del(...keys); } @@ -42,7 +42,7 @@ skipIfRateLimitDisabled('RateLimitClient', () => { const interval = 100; // 100ms per request = 10 req/s const burstCapacity = 1000; // 1000ms burst capacity - client.grantTokens(testBucket, requested, interval, burstCapacity, (err, granted) => { + client.grantTokens('bucket', testBucket, 'rps', requested, interval, burstCapacity, (err, granted) => { assert.ifError(err); assert.strictEqual(granted, requested); done(); @@ -55,12 +55,12 @@ skipIfRateLimitDisabled('RateLimitClient', () => { const burstCapacity = 1000; // 1000ms burst capacity // First request - client.grantTokens(testBucket, requested, interval, burstCapacity, (err, granted1) => { + client.grantTokens('bucket', testBucket, 'rps', requested, interval, burstCapacity, (err, granted1) => { assert.ifError(err); assert.strictEqual(granted1, requested); // Second request immediately after - client.grantTokens(testBucket, requested, interval, burstCapacity, (err, granted2) => { + client.grantTokens('bucket', testBucket, 'rps', requested, interval, burstCapacity, (err, granted2) => { assert.ifError(err); assert.strictEqual(granted2, requested); done(); @@ -73,7 +73,7 @@ skipIfRateLimitDisabled('RateLimitClient', () => { const burstCapacity = 500; // 500ms burst capacity = max 5 tokens // Request more tokens than available in burst - client.grantTokens(testBucket, 10, interval, burstCapacity, (err, granted) => { + client.grantTokens('bucket', testBucket, 'rps', 10, interval, burstCapacity, (err, granted) => { assert.ifError(err); // Should grant partial tokens (5 tokens max with 500ms burst) assert(granted > 0, 'Should grant at least some tokens'); @@ -87,12 +87,12 @@ skipIfRateLimitDisabled('RateLimitClient', () => { const burstCapacity = 100; // 100ms burst capacity = max 1 token // First request consumes the burst capacity - client.grantTokens(testBucket, 1, interval, burstCapacity, (err, granted1) => { + client.grantTokens('bucket', testBucket, 'rps', 1, interval, burstCapacity, (err, granted1) => { assert.ifError(err); assert.strictEqual(granted1, 1); // Second request immediately after should be denied - client.grantTokens(testBucket, 1, interval, burstCapacity, (err, granted2) => { + client.grantTokens('bucket', testBucket, 'rps', 1, interval, burstCapacity, (err, granted2) => { assert.ifError(err); assert.strictEqual(granted2, 0, 'Should deny tokens when quota exhausted'); done(); From b105f02a96a3c4273ca352cba6273152c8068bc5 Mon Sep 17 00:00:00 2001 From: Taylor McKinnon Date: Fri, 13 Mar 2026 14:21:59 +0100 Subject: [PATCH 07/16] impr(CLDSRV-852): Remove unused workers params from interval calculation --- lib/Config.js | 2 +- lib/api/apiUtils/rateLimit/config.js | 25 ++- lib/api/apiUtils/rateLimit/gcra.js | 29 ++-- tests/unit/api/apiUtils/rateLimit/config.js | 176 ++++++++++---------- tests/unit/api/apiUtils/rateLimit/gcra.js | 31 ++-- 5 files changed, 125 insertions(+), 138 deletions(-) diff --git a/lib/Config.js b/lib/Config.js index 213003a4ff..887d5572bb 100644 --- a/lib/Config.js +++ b/lib/Config.js @@ -1875,7 +1875,7 @@ class Config extends EventEmitter { assert(this.localCache, 'localCache must be defined when rate limiting is enabled'); // Parse and validate all rate limiting configuration - this.rateLimiting = parseRateLimitConfig(config.rateLimiting, this.clusters = this.clusters || 1); + this.rateLimiting = parseRateLimitConfig(config.rateLimiting); } diff --git a/lib/api/apiUtils/rateLimit/config.js b/lib/api/apiUtils/rateLimit/config.js index 6b7c3969e7..1b6928b94f 100644 --- a/lib/api/apiUtils/rateLimit/config.js +++ b/lib/api/apiUtils/rateLimit/config.js @@ -199,11 +199,10 @@ const rateLimitConfigSchema = Joi.object({ * * @param {string} resourceClass - Rate limit class name ('bucket' or 'account') * @param {object} validatedCfg - Already validated config from Joi - * @param {number} clusters - Number of worker processes spawned per instance * @param {number} nodes - Number of instances that requests will be load balanced across * @returns {RateLimitClassConfig} Transformed rate limit config */ -function transformClassConfig(resourceClass, validatedCfg, clusters, nodes) { +function transformClassConfig(resourceClass, validatedCfg, nodes) { const transformed = { defaultConfig: undefined, configCacheTTL: validatedCfg.configCacheTTL, @@ -213,23 +212,22 @@ function transformClassConfig(resourceClass, validatedCfg, clusters, nodes) { if (validatedCfg.defaultConfig?.requestsPerSecond) { const { limit, burstCapacity } = validatedCfg.defaultConfig.requestsPerSecond; - // Validate limit against nodes AND workers (business rule) - const minLimit = nodes * clusters; - if (limit > 0 && limit < minLimit) { + // Validate limit against nodes (business rule) + if (limit > 0 && limit < nodes) { throw new Error( `rateLimiting.${resourceClass}.defaultConfig.` + `requestsPerSecond.limit (${limit}) must be >= ` + - `(nodes x workers = ${nodes} x ${clusters} = ${minLimit}) ` + - 'or 0 (unlimited). Each worker enforces limit/nodes/workers locally. ' + - `With limit < ${minLimit}, per-worker rate would be < 1 req/s, effectively blocking traffic.` + `nodes (${nodes}) ` + + 'or 0 (unlimited). Each node enforces limit/nodes locally. ' + + `With limit < ${nodes}, per-node rate would be < 1 req/s, effectively blocking traffic.` ); } // Use provided burstCapacity or fall back to default const effectiveBurstCapacity = burstCapacity || transformed.defaultBurstCapacity; - // Calculate per-worker interval using distributed architecture - const interval = calculateInterval(limit, nodes, clusters); + // Calculate per-node interval using distributed architecture + const interval = calculateInterval(limit, nodes); // Store both the original limit and the calculated values transformed.defaultConfig = { @@ -248,11 +246,10 @@ function transformClassConfig(resourceClass, validatedCfg, clusters, nodes) { * Parse and validate the complete rate limiting configuration * * @param {Object} rateLimitingConfig - config.rateLimiting object from config.json - * @param {number} clusters - Number of worker clusters (must be numeric) * @returns {Object} Fully parsed and validated rate limiting configuration * @throws {Error} If configuration is invalid */ -function parseRateLimitConfig(rateLimitingConfig, clusters) { +function parseRateLimitConfig(rateLimitingConfig) { // Validate configuration using Joi schema const { error: validationError, value: validated } = rateLimitConfigSchema.validate( rateLimitingConfig, @@ -282,8 +279,8 @@ function parseRateLimitConfig(rateLimitingConfig, clusters) { ), }; - parsed.bucket = transformClassConfig('bucket', validated.bucket, clusters, parsed.nodes); - parsed.account = transformClassConfig('account', validated.account, clusters, parsed.nodes); + parsed.bucket = transformClassConfig('bucket', validated.bucket, parsed.nodes); + parsed.account = transformClassConfig('account', validated.account, parsed.nodes); return parsed; } diff --git a/lib/api/apiUtils/rateLimit/gcra.js b/lib/api/apiUtils/rateLimit/gcra.js index 93264aee2e..e14c2e9fc5 100644 --- a/lib/api/apiUtils/rateLimit/gcra.js +++ b/lib/api/apiUtils/rateLimit/gcra.js @@ -1,38 +1,35 @@ /** - * Calculate per-worker interval based on distributed architecture + * Calculate per-node interval based on distributed architecture * - * In a distributed setup with N nodes and W workers per node: + * In a distributed setup with N nodes: * - Global limit: R requests per second - * - Per-worker limit: R / N / W - * - Interval = 1000ms / (R / N / W) + * - Per-node limit: R / N + * - Interval = 1000ms / (R / N) * * The interval represents milliseconds between requests. We divide 1000 (milliseconds * in a second) by the rate to convert "requests per second" to "milliseconds per request". * * Examples: - * - 100 req/s ÷ 1 node ÷ 10 workers = 10 req/s per worker → interval = 100ms - * - 600 req/s ÷ 6 nodes ÷ 10 workers = 10 req/s per worker → interval = 100ms + * - 100 req/s ÷ 1 node = 100 req/s per node → interval = 100ms + * - 600 req/s ÷ 6 nodes = 100 req/s per node → interval = 100ms * * Dynamic work-stealing is achieved through Redis sync reconciliation: - * - Each worker evaluates locally at its fixed per-worker quota - * - Workers report consumed / workers to Redis - * - Redis sums all workers' shares - * - Workers overwrite local counters with Redis values + * - Each worker evaluates locally using preallocated tokens + * - Workers report processed requests to Redis + * - Redis sums all workers' requests * - Idle workers' unused capacity accumulates in Redis * - Busy workers pull back higher emptyAt values and throttle proportionally * - * IMPORTANT: Limit must be >= N * W, otherwise per-worker rate < 1 req/s + * IMPORTANT: Limit must be >= N, otherwise per-node rate < 1 req/s * which results in intervals > 1000ms and effectively blocks traffic. * * @param {number} limit - Global requests per second * @param {number} nodes - Total number of nodes - * @param {number} _workers - Number of workers per node (unused in token reservation) * @returns {number} Interval in milliseconds between requests */ -// eslint-disable-next-line no-unused-vars -function calculateInterval(limit, nodes, _workers) { - // Per-node rate = limit / nodes (workers NOT divided) - // This allows dynamic work-stealing - workers evaluate at node quota + +function calculateInterval(limit, nodes) { + // Per-node rate = limit / nodes const perNodeRate = limit / nodes; // Interval = 1000ms / rate diff --git a/tests/unit/api/apiUtils/rateLimit/config.js b/tests/unit/api/apiUtils/rateLimit/config.js index 8d6d5ae95b..118032603a 100644 --- a/tests/unit/api/apiUtils/rateLimit/config.js +++ b/tests/unit/api/apiUtils/rateLimit/config.js @@ -26,7 +26,7 @@ describe('parseRateLimitConfig', () => { describe('valid configurations', () => { it('should parse complete valid configuration', () => { - const result = parseRateLimitConfig(validConfig, 10); + const result = parseRateLimitConfig(validConfig); assert.strictEqual(result.enabled, false); // Default when not specified assert.strictEqual(result.serviceUserArn, validConfig.serviceUserArn); @@ -44,7 +44,7 @@ describe('parseRateLimitConfig', () => { serviceUserArn: 'arn:aws:iam::123456789012:user/rate-limit-service', }; - const result = parseRateLimitConfig(minimalConfig, 5); + const result = parseRateLimitConfig(minimalConfig); assert.strictEqual(result.enabled, false); // Default assert.strictEqual(result.serviceUserArn, minimalConfig.serviceUserArn); @@ -68,7 +68,7 @@ describe('parseRateLimitConfig', () => { }, }; - const result = parseRateLimitConfig(config, 3); + const result = parseRateLimitConfig(config); assert.strictEqual(result.bucket.configCacheTTL, 600); assert.strictEqual(result.bucket.defaultConfig, undefined); @@ -80,7 +80,7 @@ describe('parseRateLimitConfig', () => { bucket: {}, }; - const result = parseRateLimitConfig(config, 2); + const result = parseRateLimitConfig(config); assert.strictEqual(result.bucket.configCacheTTL, constants.rateLimitDefaultConfigCacheTTL); }); @@ -91,7 +91,7 @@ describe('parseRateLimitConfig', () => { error: {}, }; - const result = parseRateLimitConfig(config, 1); + const result = parseRateLimitConfig(config); assert.strictEqual(result.error.code, errors.SlowDown.code); assert.strictEqual(result.error.description, errors.SlowDown.description); @@ -104,7 +104,7 @@ describe('parseRateLimitConfig', () => { serviceUserArn: 'arn:aws:iam::123456789012:user/rate-limit-service', }; - const result = parseRateLimitConfig(config, 1); + const result = parseRateLimitConfig(config); assert.strictEqual(result.enabled, false); }); @@ -114,7 +114,7 @@ describe('parseRateLimitConfig', () => { serviceUserArn: 'arn:aws:iam::123456789012:user/rate-limit-service', }; - const result = parseRateLimitConfig(config, 1); + const result = parseRateLimitConfig(config); assert.strictEqual(result.enabled, true); }); @@ -124,7 +124,7 @@ describe('parseRateLimitConfig', () => { serviceUserArn: 'arn:aws:iam::123456789012:user/rate-limit-service', }; - const result = parseRateLimitConfig(config, 1); + const result = parseRateLimitConfig(config); assert.strictEqual(result.enabled, false); }); @@ -135,7 +135,7 @@ describe('parseRateLimitConfig', () => { }; assert.throws( - () => parseRateLimitConfig(config, 1), + () => parseRateLimitConfig(config), /rateLimiting configuration is invalid/ ); }); @@ -146,7 +146,7 @@ describe('parseRateLimitConfig', () => { const config = { nodes: 1 }; assert.throws( - () => parseRateLimitConfig(config, 1), + () => parseRateLimitConfig(config), /rateLimiting configuration is invalid.*"serviceUserArn" is required/ ); }); @@ -157,7 +157,7 @@ describe('parseRateLimitConfig', () => { }; assert.throws( - () => parseRateLimitConfig(config, 1), + () => parseRateLimitConfig(config), /rateLimiting configuration is invalid.*"serviceUserArn" must be a string/ ); }); @@ -170,7 +170,7 @@ describe('parseRateLimitConfig', () => { nodes: 5, }; - const result = parseRateLimitConfig(config, 10); + const result = parseRateLimitConfig(config); assert.strictEqual(result.nodes, 5); }); @@ -179,7 +179,7 @@ describe('parseRateLimitConfig', () => { serviceUserArn: 'arn:aws:iam::123456789012:user/rate-limit-service', }; - const result = parseRateLimitConfig(config, 2); + const result = parseRateLimitConfig(config); assert.strictEqual(result.nodes, 1); }); @@ -190,7 +190,7 @@ describe('parseRateLimitConfig', () => { }; assert.throws( - () => parseRateLimitConfig(config, 1), + () => parseRateLimitConfig(config), /rateLimiting configuration is invalid.*"nodes" must be a positive number/ ); }); @@ -202,7 +202,7 @@ describe('parseRateLimitConfig', () => { }; assert.throws( - () => parseRateLimitConfig(config, 1), + () => parseRateLimitConfig(config), /rateLimiting configuration is invalid.*"nodes" must be a positive number/ ); }); @@ -214,7 +214,7 @@ describe('parseRateLimitConfig', () => { }; assert.throws( - () => parseRateLimitConfig(config, 1), + () => parseRateLimitConfig(config), /rateLimiting configuration is invalid.*"nodes" must be an integer/ ); }); @@ -226,7 +226,7 @@ describe('parseRateLimitConfig', () => { }; assert.throws( - () => parseRateLimitConfig(config, 1), + () => parseRateLimitConfig(config), /rateLimiting configuration is invalid.*"nodes" must be a number/ ); }); @@ -238,7 +238,7 @@ describe('parseRateLimitConfig', () => { serviceUserArn: 'arn:aws:iam::123456789012:user/rate-limit-service', }; - const result = parseRateLimitConfig(config, 1); + const result = parseRateLimitConfig(config); assert.strictEqual(result.tokenBucketBufferSize, 50); }); @@ -248,7 +248,7 @@ describe('parseRateLimitConfig', () => { tokenBucketBufferSize: 100, }; - const result = parseRateLimitConfig(config, 1); + const result = parseRateLimitConfig(config); assert.strictEqual(result.tokenBucketBufferSize, 100); }); @@ -259,7 +259,7 @@ describe('parseRateLimitConfig', () => { }; assert.throws( - () => parseRateLimitConfig(config, 1), + () => parseRateLimitConfig(config), /rateLimiting configuration is invalid.*"tokenBucketBufferSize" must be a positive number/ ); }); @@ -271,7 +271,7 @@ describe('parseRateLimitConfig', () => { }; assert.throws( - () => parseRateLimitConfig(config, 1), + () => parseRateLimitConfig(config), /rateLimiting configuration is invalid.*"tokenBucketBufferSize" must be a positive number/ ); }); @@ -283,7 +283,7 @@ describe('parseRateLimitConfig', () => { }; assert.throws( - () => parseRateLimitConfig(config, 1), + () => parseRateLimitConfig(config), /rateLimiting configuration is invalid.*"tokenBucketBufferSize" must be an integer/ ); }); @@ -295,7 +295,7 @@ describe('parseRateLimitConfig', () => { serviceUserArn: 'arn:aws:iam::123456789012:user/rate-limit-service', }; - const result = parseRateLimitConfig(config, 1); + const result = parseRateLimitConfig(config); assert.strictEqual(result.tokenBucketRefillThreshold, 20); }); @@ -305,7 +305,7 @@ describe('parseRateLimitConfig', () => { tokenBucketRefillThreshold: 30, }; - const result = parseRateLimitConfig(config, 1); + const result = parseRateLimitConfig(config); assert.strictEqual(result.tokenBucketRefillThreshold, 30); }); @@ -316,7 +316,7 @@ describe('parseRateLimitConfig', () => { }; assert.throws( - () => parseRateLimitConfig(config, 1), + () => parseRateLimitConfig(config), /rateLimiting configuration is invalid.*"tokenBucketRefillThreshold" must be a positive number/ ); }); @@ -328,7 +328,7 @@ describe('parseRateLimitConfig', () => { }; assert.throws( - () => parseRateLimitConfig(config, 1), + () => parseRateLimitConfig(config), /rateLimiting configuration is invalid.*"tokenBucketRefillThreshold" must be a positive number/ ); }); @@ -340,7 +340,7 @@ describe('parseRateLimitConfig', () => { }; assert.throws( - () => parseRateLimitConfig(config, 1), + () => parseRateLimitConfig(config), /rateLimiting configuration is invalid.*"tokenBucketRefillThreshold" must be an integer/ ); }); @@ -354,7 +354,7 @@ describe('parseRateLimitConfig', () => { }; assert.throws( - () => parseRateLimitConfig(config, 1), + () => parseRateLimitConfig(config), /rateLimiting configuration is invalid.*"bucket" must be of type object/ ); }); @@ -371,7 +371,7 @@ describe('parseRateLimitConfig', () => { }, }; - const result = parseRateLimitConfig(config, 5); + const result = parseRateLimitConfig(config); assert(result.bucket.defaultConfig); assert(result.bucket.defaultConfig.requestsPerSecond); @@ -386,7 +386,7 @@ describe('parseRateLimitConfig', () => { }; assert.throws( - () => parseRateLimitConfig(config, 1), + () => parseRateLimitConfig(config), /rateLimiting configuration is invalid.*"bucket.defaultConfig" must be of type object/ ); }); @@ -402,7 +402,7 @@ describe('parseRateLimitConfig', () => { }; assert.throws( - () => parseRateLimitConfig(config, 1), + () => parseRateLimitConfig(config), /rateLimiting configuration is invalid.*"bucket.defaultConfig.requestsPerSecond" must be of type object/ ); }); @@ -419,7 +419,7 @@ describe('parseRateLimitConfig', () => { }, }; - const result = parseRateLimitConfig(config, 1); + const result = parseRateLimitConfig(config); // limit = 0 means unlimited, should be accepted assert(result.bucket.defaultConfig.requestsPerSecond); }); @@ -437,7 +437,7 @@ describe('parseRateLimitConfig', () => { }; assert.throws( - () => parseRateLimitConfig(config, 1), + () => parseRateLimitConfig(config), // eslint-disable-next-line max-len /rateLimiting configuration is invalid.*"bucket.defaultConfig.requestsPerSecond.limit" must be larger than or equal to 0/ ); @@ -456,7 +456,7 @@ describe('parseRateLimitConfig', () => { }; assert.throws( - () => parseRateLimitConfig(config, 1), + () => parseRateLimitConfig(config), /rateLimiting configuration is invalid.*"bucket.defaultConfig.requestsPerSecond.limit" is required/ ); }); @@ -475,7 +475,7 @@ describe('parseRateLimitConfig', () => { }, }; - const result = parseRateLimitConfig(config, 1); + const result = parseRateLimitConfig(config); const bucketSize = result.bucket.defaultConfig.requestsPerSecond.bucketSize; // bucketSize = burstCapacity * 1000 assert.strictEqual(bucketSize, constants.rateLimitDefaultBurstCapacity * 1000); @@ -494,7 +494,7 @@ describe('parseRateLimitConfig', () => { }, }; - const result = parseRateLimitConfig(config, 1); + const result = parseRateLimitConfig(config); const bucketSize = result.bucket.defaultConfig.requestsPerSecond.bucketSize; assert.strictEqual(bucketSize, 20 * 1000); }); @@ -513,7 +513,7 @@ describe('parseRateLimitConfig', () => { }; assert.throws( - () => parseRateLimitConfig(config, 1), + () => parseRateLimitConfig(config), // eslint-disable-next-line max-len /rateLimiting configuration is invalid.*"bucket.defaultConfig.requestsPerSecond.burstCapacity" must be a positive number/ ); @@ -533,7 +533,7 @@ describe('parseRateLimitConfig', () => { }; assert.throws( - () => parseRateLimitConfig(config, 1), + () => parseRateLimitConfig(config), // eslint-disable-next-line max-len /rateLimiting configuration is invalid.*"bucket.defaultConfig.requestsPerSecond.burstCapacity" must be a positive number/ ); @@ -553,7 +553,7 @@ describe('parseRateLimitConfig', () => { }; assert.throws( - () => parseRateLimitConfig(config, 1), + () => parseRateLimitConfig(config), // eslint-disable-next-line max-len /rateLimiting configuration is invalid.*"bucket.defaultConfig.requestsPerSecond.burstCapacity" must be a number/ ); @@ -572,7 +572,7 @@ describe('parseRateLimitConfig', () => { }, }; - const result = parseRateLimitConfig(config, 1); + const result = parseRateLimitConfig(config); const bucketSize = result.bucket.defaultConfig.requestsPerSecond.bucketSize; assert.strictEqual(bucketSize, 1.5 * 1000); }); @@ -587,7 +587,7 @@ describe('parseRateLimitConfig', () => { }, }; - const result = parseRateLimitConfig(config, 1); + const result = parseRateLimitConfig(config); assert.strictEqual(result.bucket.configCacheTTL, 450); }); @@ -600,7 +600,7 @@ describe('parseRateLimitConfig', () => { }; assert.throws( - () => parseRateLimitConfig(config, 1), + () => parseRateLimitConfig(config), /rateLimiting configuration is invalid.*"bucket.configCacheTTL" must be a positive number/ ); }); @@ -614,7 +614,7 @@ describe('parseRateLimitConfig', () => { }; assert.throws( - () => parseRateLimitConfig(config, 1), + () => parseRateLimitConfig(config), /rateLimiting configuration is invalid.*"bucket.configCacheTTL" must be a positive number/ ); }); @@ -628,7 +628,7 @@ describe('parseRateLimitConfig', () => { }; assert.throws( - () => parseRateLimitConfig(config, 1), + () => parseRateLimitConfig(config), /rateLimiting configuration is invalid.*"bucket.configCacheTTL" must be an integer/ ); }); @@ -642,7 +642,7 @@ describe('parseRateLimitConfig', () => { }; assert.throws( - () => parseRateLimitConfig(config, 1), + () => parseRateLimitConfig(config), /rateLimiting configuration is invalid.*"bucket.configCacheTTL" must be a number/ ); }); @@ -656,7 +656,7 @@ describe('parseRateLimitConfig', () => { }; assert.throws( - () => parseRateLimitConfig(config, 1), + () => parseRateLimitConfig(config), /rateLimiting configuration is invalid.*"error" must be of type object/ ); }); @@ -671,7 +671,7 @@ describe('parseRateLimitConfig', () => { }, }; - const result4xx = parseRateLimitConfig(config4xx, 1); + const result4xx = parseRateLimitConfig(config4xx); assert.strictEqual(result4xx.error.code, 429); assert.strictEqual(result4xx.error.description, 'Too Many Requests'); @@ -684,7 +684,7 @@ describe('parseRateLimitConfig', () => { }, }; - const result5xx = parseRateLimitConfig(config5xx, 1); + const result5xx = parseRateLimitConfig(config5xx); assert.strictEqual(result5xx.error.code, 503); assert.strictEqual(result5xx.error.description, 'Service Unavailable'); }); @@ -699,7 +699,7 @@ describe('parseRateLimitConfig', () => { }; assert.throws( - () => parseRateLimitConfig(config, 1), + () => parseRateLimitConfig(config), /rateLimiting configuration is invalid.*"error.statusCode" must be larger than or equal to 400/ ); }); @@ -714,7 +714,7 @@ describe('parseRateLimitConfig', () => { }; assert.throws( - () => parseRateLimitConfig(config, 1), + () => parseRateLimitConfig(config), /rateLimiting configuration is invalid.*"error.statusCode" must be less than or equal to 599/ ); }); @@ -729,7 +729,7 @@ describe('parseRateLimitConfig', () => { }; assert.throws( - () => parseRateLimitConfig(config, 1), + () => parseRateLimitConfig(config), /rateLimiting configuration is invalid.*"error.statusCode" must be an integer/ ); }); @@ -744,7 +744,7 @@ describe('parseRateLimitConfig', () => { }; assert.throws( - () => parseRateLimitConfig(config, 1), + () => parseRateLimitConfig(config), /rateLimiting configuration is invalid.*"error.statusCode" must be a number/ ); }); @@ -757,7 +757,7 @@ describe('parseRateLimitConfig', () => { }, }; - const result = parseRateLimitConfig(config, 1); + const result = parseRateLimitConfig(config); assert.strictEqual(result.error.code, 503); assert.strictEqual(result.error.message, 'SlowDown'); assert.strictEqual(result.error.description, errors.SlowDown.description); @@ -773,7 +773,7 @@ describe('parseRateLimitConfig', () => { }; assert.throws( - () => parseRateLimitConfig(config, 1), + () => parseRateLimitConfig(config), /rateLimiting configuration is invalid.*"error.message" must be a string/ ); }); @@ -783,7 +783,7 @@ describe('parseRateLimitConfig', () => { serviceUserArn: 'arn:aws:iam::123456789012:user/rate-limit-service', }; - const result = parseRateLimitConfig(config, 1); + const result = parseRateLimitConfig(config); assert.strictEqual(result.error.code, errors.SlowDown.code); assert.strictEqual(result.error.description, errors.SlowDown.description); }); @@ -798,7 +798,7 @@ describe('parseRateLimitConfig', () => { }, }; - const result = parseRateLimitConfig(config, 1); + const result = parseRateLimitConfig(config); assert.strictEqual(result.error.code, 429); assert.strictEqual(result.error.message, 'TooManyRequests'); assert.strictEqual(result.error.description, 'Please slow down'); @@ -813,7 +813,7 @@ describe('parseRateLimitConfig', () => { }, }; - const result = parseRateLimitConfig(config, 1); + const result = parseRateLimitConfig(config); assert.strictEqual(result.error.code, 429); assert.strictEqual(result.error.message, 'SlowDown'); assert.strictEqual(result.error.description, 'Please slow down'); @@ -830,29 +830,29 @@ describe('parseRateLimitConfig', () => { }; assert.throws( - () => parseRateLimitConfig(config, 1), + () => parseRateLimitConfig(config), /rateLimiting configuration is invalid.*"error.code" must be a string/ ); }); }); describe('distributed rate limiting validation', () => { - it('should validate limit against nodes and workers', () => { + it('should validate limit against nodes', () => { const config = { serviceUserArn: 'arn:aws:iam::123456789012:user/rate-limit-service', nodes: 5, bucket: { defaultConfig: { requestsPerSecond: { - limit: 10, // Less than 5 nodes x 10 workers = 50 + limit: 3, // Less than 5 nodes }, }, }, }; assert.throws( - () => parseRateLimitConfig(config, 10), - /requestsPerSecond\.limit \(10\) must be >= \(nodes x workers = 5 x 10 = 50\)/ + () => parseRateLimitConfig(config), + /requestsPerSecond\.limit \(3\) must be >= nodes \(5\)/ ); }); }); @@ -873,7 +873,7 @@ describe('parseRateLimitConfig', () => { }, }; - const result = parseRateLimitConfig(config, 5); + const result = parseRateLimitConfig(config); assert(result.account); assert(result.account.defaultConfig); @@ -901,7 +901,7 @@ describe('parseRateLimitConfig', () => { }, }; - const result = parseRateLimitConfig(config, 5); + const result = parseRateLimitConfig(config); assert(result.bucket.defaultConfig); assert.strictEqual(result.bucket.defaultConfig.limit, 1000); @@ -909,22 +909,22 @@ describe('parseRateLimitConfig', () => { assert.strictEqual(result.account.defaultConfig.limit, 500); }); - it('should validate account limit against nodes and workers', () => { + it('should validate account limit against nodes', () => { const config = { serviceUserArn: 'arn:aws:iam::123456789012:user/rate-limit-service', nodes: 10, account: { defaultConfig: { requestsPerSecond: { - limit: 20, // Less than 10 nodes x 5 workers = 50 + limit: 7, // Less than 10 nodes }, }, }, }; assert.throws( - () => parseRateLimitConfig(config, 5), - /requestsPerSecond\.limit \(20\) must be >= \(nodes x workers = 10 x 5 = 50\)/ + () => parseRateLimitConfig(config), + /requestsPerSecond\.limit \(7\) must be >= nodes \(10\)/ ); }); @@ -935,7 +935,7 @@ describe('parseRateLimitConfig', () => { }; assert.throws( - () => parseRateLimitConfig(config, 1), + () => parseRateLimitConfig(config), /rateLimiting configuration is invalid.*"account" must be of type object/ ); }); @@ -952,7 +952,7 @@ describe('parseRateLimitConfig', () => { }, }; - const result = parseRateLimitConfig(config, 1); + const result = parseRateLimitConfig(config); assert.strictEqual(result.account.configCacheTTL, constants.rateLimitDefaultConfigCacheTTL); assert.strictEqual(result.account.defaultBurstCapacity, constants.rateLimitDefaultBurstCapacity); @@ -972,7 +972,7 @@ describe('parseRateLimitConfig', () => { }, }; - const result = parseRateLimitConfig(config, 1); + const result = parseRateLimitConfig(config); const bucketSize = result.account.defaultConfig.requestsPerSecond.bucketSize; assert.strictEqual(bucketSize, constants.rateLimitDefaultBurstCapacity * 1000); }); @@ -990,7 +990,7 @@ describe('parseRateLimitConfig', () => { }, }; - const result = parseRateLimitConfig(config, 1); + const result = parseRateLimitConfig(config); const bucketSize = result.account.defaultConfig.requestsPerSecond.bucketSize; assert.strictEqual(bucketSize, 20 * 1000); }); @@ -1008,7 +1008,7 @@ describe('parseRateLimitConfig', () => { }, }; - const result = parseRateLimitConfig(config, 1); + const result = parseRateLimitConfig(config); const bucketSize = result.account.defaultConfig.requestsPerSecond.bucketSize; assert.strictEqual(bucketSize, 1.5 * 1000); }); @@ -1027,7 +1027,7 @@ describe('parseRateLimitConfig', () => { }; assert.throws( - () => parseRateLimitConfig(config, 1), + () => parseRateLimitConfig(config), // eslint-disable-next-line max-len /rateLimiting configuration is invalid.*"account.defaultConfig.requestsPerSecond.burstCapacity" must be a positive number/ ); @@ -1047,7 +1047,7 @@ describe('parseRateLimitConfig', () => { }; assert.throws( - () => parseRateLimitConfig(config, 1), + () => parseRateLimitConfig(config), // eslint-disable-next-line max-len /rateLimiting configuration is invalid.*"account.defaultConfig.requestsPerSecond.burstCapacity" must be a positive number/ ); @@ -1067,7 +1067,7 @@ describe('parseRateLimitConfig', () => { }; assert.throws( - () => parseRateLimitConfig(config, 1), + () => parseRateLimitConfig(config), // eslint-disable-next-line max-len /rateLimiting configuration is invalid.*"account.defaultConfig.requestsPerSecond.burstCapacity" must be a number/ ); @@ -1082,7 +1082,7 @@ describe('parseRateLimitConfig', () => { }; assert.throws( - () => parseRateLimitConfig(config, 1), + () => parseRateLimitConfig(config), /rateLimiting configuration is invalid.*"unknownField" is not allowed/ ); }); @@ -1096,7 +1096,7 @@ describe('parseRateLimitConfig', () => { }; assert.throws( - () => parseRateLimitConfig(config, 1), + () => parseRateLimitConfig(config), /rateLimiting configuration is invalid.*"bucket.unknownField" is not allowed/ ); }); @@ -1111,7 +1111,7 @@ describe('parseRateLimitConfig', () => { }; assert.throws( - () => parseRateLimitConfig(config, 1), + () => parseRateLimitConfig(config), /rateLimiting configuration is invalid.*"error.unknownField" is not allowed/ ); }); @@ -1128,7 +1128,7 @@ describe('parseRateLimitConfig', () => { assert.throws(() => { try { - parseRateLimitConfig(config, 1); + parseRateLimitConfig(config); } catch (error) { // Should contain multiple errors assert(error.message.includes('serviceUserArn')); @@ -1155,11 +1155,10 @@ describe('parseRateLimitConfig', () => { }, }; - const result = parseRateLimitConfig(config, 5); // 5 workers (ignored) + const result = parseRateLimitConfig(config); - // NEW BEHAVIOR: Per-NODE rate = 100 / 2 nodes = 50 req/s (workers NOT divided) + // Per-NODE rate = 100 / 2 nodes = 50 req/s // Interval = 1000ms / 50 = 20ms - // Workers can dynamically share node quota via Redis reconciliation const interval = result.bucket.defaultConfig.requestsPerSecond.interval; assert.strictEqual(interval, 20); }); @@ -1177,7 +1176,7 @@ describe('parseRateLimitConfig', () => { }, }; - const result = parseRateLimitConfig(config, 1); + const result = parseRateLimitConfig(config); // bucketSize = burstCapacity * 1000 const bucketSize = result.bucket.defaultConfig.requestsPerSecond.bucketSize; @@ -1197,9 +1196,9 @@ describe('parseRateLimitConfig', () => { }, }; - const result = parseRateLimitConfig(config, 1); // 1 worker + const result = parseRateLimitConfig(config); - // Per-worker rate = 50 / 1 / 1 = 50 req/s + // Per-node rate = 50 / 1 = 50 req/s // Interval = 1000ms / 50 = 20ms const interval = result.bucket.defaultConfig.requestsPerSecond.interval; assert.strictEqual(interval, 20); @@ -1218,11 +1217,10 @@ describe('parseRateLimitConfig', () => { }, }; - const result = parseRateLimitConfig(config, 20); // 20 workers per node (ignored) + const result = parseRateLimitConfig(config); - // NEW BEHAVIOR: Per-NODE rate = 10000 / 10 nodes = 1000 req/s (workers NOT divided) + // Per-NODE rate = 10000 / 10 nodes = 1000 req/s // Interval = 1000ms / 1000 = 1ms - // Workers dynamically share the 1000 req/s node quota via Redis const interval = result.bucket.defaultConfig.requestsPerSecond.interval; assert.strictEqual(interval, 1); }); diff --git a/tests/unit/api/apiUtils/rateLimit/gcra.js b/tests/unit/api/apiUtils/rateLimit/gcra.js index 0684cb365d..484b47fa76 100644 --- a/tests/unit/api/apiUtils/rateLimit/gcra.js +++ b/tests/unit/api/apiUtils/rateLimit/gcra.js @@ -3,15 +3,14 @@ const assert = require('assert'); const { calculateInterval } = require('../../../../../lib/api/apiUtils/rateLimit/gcra'); describe('GCRA calculateInterval function', () => { - it('should calculate interval for 1770 req/s across 177 nodes (ignoring workers)', () => { + it('should calculate interval for 1770 req/s across 177 nodes', () => { const limit = 1770; const nodes = 177; - const workers = 10; // Ignored in new implementation - // Per-NODE rate = 1770 / 177 = 10 req/s (workers NOT divided) + // Per-NODE rate = 1770 / 177 = 10 req/s // Interval = 1000 / 10 = 100ms - // This allows dynamic work-stealing: busy workers can use full node quota - const interval = calculateInterval(limit, nodes, workers); + // Workers on the same node share node quota dynamically via Redis reconciliation + const interval = calculateInterval(limit, nodes); assert.strictEqual(interval, 100); }); @@ -19,24 +18,22 @@ describe('GCRA calculateInterval function', () => { it('should calculate interval for 1000 req/s with single node', () => { const limit = 1000; const nodes = 1; - const workers = 1; // Ignored - // Per-NODE rate = 1000 / 1 = 1000 req/s (workers NOT divided) + // Per-NODE rate = 1000 / 1 = 1000 req/s // Interval = 1000 / 1000 = 1ms - const interval = calculateInterval(limit, nodes, workers); + const interval = calculateInterval(limit, nodes); assert.strictEqual(interval, 1); }); - it('should calculate interval for 100 req/s with 10 workers on single node', () => { + it('should calculate interval for 100 req/s on a single node', () => { const limit = 100; const nodes = 1; - const workers = 10; // Ignored in new implementation - // Per-NODE rate = 100 / 1 = 100 req/s (workers NOT divided) + // Per-NODE rate = 100 / 1 = 100 req/s // Interval = 1000 / 100 = 10ms - // Each worker evaluates at node quota, Redis reconciliation shares capacity - const interval = calculateInterval(limit, nodes, workers); + // Workers share the node quota; Redis reconciliation distributes capacity + const interval = calculateInterval(limit, nodes); assert.strictEqual(interval, 10); }); @@ -44,11 +41,10 @@ describe('GCRA calculateInterval function', () => { it('should handle fractional intervals', () => { const limit = 3000; const nodes = 177; - const workers = 10; // Ignored - // Per-NODE rate = 3000 / 177 = 16.95 req/s (workers NOT divided) + // Per-NODE rate = 3000 / 177 = 16.95 req/s // Interval = 1000 / 16.95 = 58.99ms - const interval = calculateInterval(limit, nodes, workers); + const interval = calculateInterval(limit, nodes); assert.strictEqual(Math.floor(interval), 58); }); @@ -56,7 +52,6 @@ describe('GCRA calculateInterval function', () => { it('should demonstrate dynamic work-stealing behavior', () => { const limit = 600; const nodes = 6; - const workers = 10; // Per-NODE rate = 600 / 6 = 100 req/s // Interval = 1000 / 100 = 10ms per request @@ -65,7 +60,7 @@ describe('GCRA calculateInterval function', () => { // - If 1 worker is busy, 9 idle: busy worker can use ~100 req/s // - If all 10 workers are busy: they share the 100 req/s (~10 req/s each) // - Redis reconciliation dynamically balances across active workers - const interval = calculateInterval(limit, nodes, workers); + const interval = calculateInterval(limit, nodes); assert.strictEqual(interval, 10); // Worker quota is NOT pre-divided: 100 req/s node quota available From 3a91c786ff417bc3a06cadcb5b1dd97aad177e35 Mon Sep 17 00:00:00 2001 From: Taylor McKinnon Date: Fri, 13 Mar 2026 14:23:18 +0100 Subject: [PATCH 08/16] impr(CLDSRV-852): Refactor TokenBucket class - Change constructor to accept resourceClass, resourceId, measure - Precalculate interval for reuse in constructor - Add hasCapacity() method - Add updateLimit() method to encasulate limit change logic - Remove unused mechanism to record request durations - Simplify refill logic --- lib/api/apiUtils/rateLimit/tokenBucket.js | 201 ++++------ lib/api/bucketDeleteRateLimit.js | 2 +- .../api/apiUtils/rateLimit/tokenBucket.js | 355 +++++++----------- 3 files changed, 207 insertions(+), 351 deletions(-) diff --git a/lib/api/apiUtils/rateLimit/tokenBucket.js b/lib/api/apiUtils/rateLimit/tokenBucket.js index 26b7850187..f20f284607 100644 --- a/lib/api/apiUtils/rateLimit/tokenBucket.js +++ b/lib/api/apiUtils/rateLimit/tokenBucket.js @@ -16,32 +16,44 @@ const { instance: redisClient } = require('./client'); const { config } = require('../../../Config'); const { calculateInterval } = require('./gcra'); -// Map of bucket name -> WorkerTokenBucket instance +// Map of `${resourceClass}:${resourceId}:${measure}` -> WorkerTokenBucket instance const tokenBuckets = new Map(); /** - * Per-bucket token bucket for a single worker + * Per-resourceClass+resourceID+measure token bucket for a single worker */ class WorkerTokenBucket { - constructor(bucketName, limitConfig, log) { - this.bucketName = bucketName; + constructor(resourceClass, resourceId, measure, limitConfig, log) { + this.resourceClass = resourceClass; + this.resourceId = resourceId; + this.measure = measure; this.limitConfig = limitConfig; this.log = log; - // Token buffer configuration - this.bufferSize = config.rateLimiting?.tokenBucketBufferSize || 50; // Max tokens to hold - this.refillThreshold = config.rateLimiting?.tokenBucketRefillThreshold || 20; // Trigger refill when below this + this.bufferSize = config.rateLimiting?.tokenBucketBufferSize; // Max tokens to hold + this.refillThreshold = config.rateLimiting?.tokenBucketRefillThreshold; // Trigger refill when below this this.tokens = this.bufferSize; // Start with full buffer for fail-open at startup - - // Refill state - this.refillInProgress = false; + this.interval = calculateInterval(this.limitConfig.limit, config.rateLimiting.nodes); this.lastRefillTime = Date.now(); this.refillCount = 0; - this.requestCounter = 0; - this.lastRequestTime = 0; + this.refillInProgress = false; + } - // Track consumption rate for adaptive refill - this.requestTimestamps = new Set(); + hasCapacity() { + return this.tokens > 0; + } + + updateLimit(updatedConfig) { + if (this.limitConfig.limit !== updatedConfig.limit || + this.limitConfig.burstCapacity !== updatedConfig.burstCapacity + ) { + const oldConfig = this.limitConfig; + this.limitConfig = updatedConfig; + this.interval = calculateInterval(updatedConfig.limit, config.rateLimiting.nodes); + return { updated: true, oldConfig }; + } + + return { updated: false }; } /** @@ -50,11 +62,8 @@ class WorkerTokenBucket { * @returns {boolean} True if request allowed, false if throttled */ tryConsume() { - // Record request for rate calculation - this.recordRequest(); - if (this.tokens > 0) { - this.tokens--; + this.tokens -= 1; return true; // ALLOWED } @@ -78,16 +87,6 @@ class WorkerTokenBucket { return; } - // Trigger async refill - await this.refill(); - } - - /** - * Request tokens from Redis using GCRA enforcement - * - * @returns {Promise} - */ - async refill() { this.refillInProgress = true; const startTime = Date.now(); @@ -100,21 +99,16 @@ class WorkerTokenBucket { } // Calculate GCRA parameters - const nodes = config.rateLimiting.nodes || 1; - const workers = config.clusters || 1; - const interval = calculateInterval(this.limitConfig.limit, nodes, workers); - const burstCapacitySeconds = - config.rateLimiting.bucket?.defaultConfig?.requestsPerSecond?.burstCapacity || 1; - const burstCapacity = burstCapacitySeconds * 1000; - let granted = requested; if (redisClient.isReady()) { // Request tokens from Redis (atomic GCRA enforcement) granted = await util.promisify(redisClient.grantTokens.bind(redisClient))( - this.bucketName, + this.resourceClass, + this.resourceId, + this.measure, requested, - interval, - burstCapacity, + this.interval, + this.limitConfig.burstCapacity, ); } else { // Connection to redis has failed in some way. @@ -122,7 +116,11 @@ class WorkerTokenBucket { // We grant the requested amount of tokens anyway to avoid degrading service availability. this.log.warn( 'rate limit redis client not connected. granting tokens anyway to avoid service degradation', - { bucketName: this.bucketName }, + { + resourceClass: this.resourceClass, + resourceId: this.resourceId, + measure: this.measure, + }, ); } @@ -134,7 +132,9 @@ class WorkerTokenBucket { const duration = this.lastRefillTime - startTime; this.log.debug('Token refill completed', { - bucketName: this.bucketName, + resourceClass: this.resourceClass, + resourceId: this.resourceId, + measure: this.measure, requested, granted, newBalance: this.tokens, @@ -145,21 +145,27 @@ class WorkerTokenBucket { // Warn if refill took too long or granted too few if (duration > 100) { this.log.warn('Slow token refill detected', { - bucketName: this.bucketName, + resourceClass: this.resourceClass, + resourceId: this.resourceId, + measure: this.measure, durationMs: duration, }); } if (granted === 0 && requested > 0) { this.log.trace('Token refill denied - quota exhausted', { - bucketName: this.bucketName, + resourceClass: this.resourceClass, + resourceId: this.resourceId, + measure: this.measure, requested, }); } } catch (err) { this.log.error('Token refill failed', { - bucketName: this.bucketName, + resourceClass: this.resourceClass, + resourceId: this.resourceId, + measure: this.measure, error: err.message, stack: err.stack, }); @@ -167,96 +173,39 @@ class WorkerTokenBucket { this.refillInProgress = false; } } - - /** - * Record request timestamp for rate calculation - */ - recordRequest() { - const now = Date.now(); - if (now == this.lastRequestTime) { - this.requestCounter++; - } else { - this.lastRequestTime = now; - this.requestCounter = 0; - } - - this.requestTimestamps.add(now * 1000 + this.requestCounter); - - // Keep only last 1 second of timestamps - const cutoff = (now - 1000) * 1000; - for (const timestamp of this.requestTimestamps.values()) { - if (timestamp < cutoff) { - this.requestTimestamps.delete(timestamp); - } - } - } - - /** - * Get current request rate (requests per second) - * - * @returns {number} - */ - getCurrentRate() { - const now = Date.now(); - const cutoff = (now - 1000) * 1000; - - let count = 0; - for (const timestamp of this.requestTimestamps.values()) { - if (timestamp >= cutoff) { - count++; - } - } - return count; - } - - /** - * Get token bucket stats for monitoring - * - * @returns {object} - */ - getStats() { - return { - tokens: this.tokens, - bufferSize: this.bufferSize, - refillThreshold: this.refillThreshold, - refillInProgress: this.refillInProgress, - lastRefillTime: this.lastRefillTime, - refillCount: this.refillCount, - currentRate: this.getCurrentRate(), - }; - } } /** * Get or create token bucket for a bucket * - * @param {string} bucketName - Bucket name + * @param {string} resourceClass - "bucket" or "account" + * @param {string} resourceId - bucket name or account canonicalId + * @param {string} measure - measure id e.g. "rps" * @param {object} limitConfig - Rate limit configuration * @param {object} log - Logger instance * @returns {WorkerTokenBucket} */ -function getTokenBucket(bucketName, limitConfig, log) { - let bucket = tokenBuckets.get(bucketName); - +function getTokenBucket(resourceClass, resourceId, measure, limitConfig, log) { + const cacheKey = `${resourceClass}:${resourceId}:${measure}`; + let bucket = tokenBuckets.get(cacheKey); if (!bucket) { - bucket = new WorkerTokenBucket(bucketName, limitConfig, log); - tokenBuckets.set(bucketName, bucket); + bucket = new WorkerTokenBucket(resourceClass, resourceId, measure, limitConfig, log); + tokenBuckets.set(cacheKey, bucket); log.debug('Created token bucket', { - bucketName, + cacheKey, bufferSize: bucket.bufferSize, refillThreshold: bucket.refillThreshold, }); - } else if (bucket.limitConfig.limit !== limitConfig.limit) { - // Update limit config when it changes dynamically - const oldLimit = bucket.limitConfig.limit; - bucket.limitConfig = limitConfig; - - log.info('Updated token bucket limit config', { - bucketName, - oldLimit, - newLimit: limitConfig.limit, - }); + } else { + const { updated, oldConfig } = bucket.updateLimit(limitConfig); + if (updated) { + log.info('Updated token bucket limit config', { + cacheKey, + old: oldConfig, + new: limitConfig, + }); + } } return bucket; @@ -282,15 +231,15 @@ function cleanupTokenBuckets(maxIdleMs = 60000) { const now = Date.now(); const toRemove = []; - for (const [bucketName, bucket] of tokenBuckets.entries()) { + for (const [key, bucket] of tokenBuckets.entries()) { const idleTime = now - bucket.lastRefillTime; if (idleTime > maxIdleMs && bucket.tokens === 0) { - toRemove.push(bucketName); + toRemove.push(key); } } - for (const bucketName of toRemove) { - tokenBuckets.delete(bucketName); + for (const key of toRemove) { + tokenBuckets.delete(key); } return toRemove.length; @@ -299,11 +248,13 @@ function cleanupTokenBuckets(maxIdleMs = 60000) { /** * Remove a specific token bucket (used when rate limit config is deleted) * - * @param {string} bucketName - Bucket name + * @param {string} resourceClass - "bucket" or "account" + * @param {string} resourceId - bucket name or account canonicalId + * @param {string} measure - measure id e.g. "rps" * @returns {boolean} True if bucket was found and removed */ -function removeTokenBucket(bucketName) { - return tokenBuckets.delete(bucketName); +function removeTokenBucket(resourceClass, resourceId, measure) { + return tokenBuckets.delete(`${resourceClass}:${resourceId}:${measure}`); } module.exports = { diff --git a/lib/api/bucketDeleteRateLimit.js b/lib/api/bucketDeleteRateLimit.js index f500463746..445a6dc2ed 100644 --- a/lib/api/bucketDeleteRateLimit.js +++ b/lib/api/bucketDeleteRateLimit.js @@ -53,7 +53,7 @@ function bucketDeleteRateLimit(authInfo, request, log, callback) { } // Invalidate cache and remove token bucket cache.deleteCachedConfig(cache.namespace.bucket, bucketName); - removeTokenBucket(bucketName); + removeTokenBucket('bucket', bucketName, 'rps'); log.debug('invalidated rate limit cache and token bucket for bucket', { bucketName }); // TODO: implement Utapi metric support return callback(null, corsHeaders); diff --git a/tests/unit/api/apiUtils/rateLimit/tokenBucket.js b/tests/unit/api/apiUtils/rateLimit/tokenBucket.js index fcb635416b..fdd490b294 100644 --- a/tests/unit/api/apiUtils/rateLimit/tokenBucket.js +++ b/tests/unit/api/apiUtils/rateLimit/tokenBucket.js @@ -18,20 +18,12 @@ describe('WorkerTokenBucket', () => { error: sinon.stub(), }; - // Stub config sandbox.stub(config, 'rateLimiting').value({ nodes: 1, - bucket: { - defaultConfig: { - requestsPerSecond: { - burstCapacity: 2, - }, - }, - }, + tokenBucketBufferSize: 50, + tokenBucketRefillThreshold: 20, }); - sandbox.stub(config, 'clusters').value(1); - // Clear token buckets map tokenBucket.getAllTokenBuckets().clear(); }); @@ -40,16 +32,16 @@ describe('WorkerTokenBucket', () => { }); describe('constructor', () => { - it('should initialize with default values', () => { - const bucket = new tokenBucket.WorkerTokenBucket('test-bucket', { limit: 100 }, mockLog); + it('should initialize with correct values from config', () => { + const bucket = new tokenBucket.WorkerTokenBucket('bucket', 'test-bucket', 'rps', { limit: 100 }, mockLog); - assert.strictEqual(bucket.bucketName, 'test-bucket'); + assert.strictEqual(bucket.resourceClass, 'bucket'); + assert.strictEqual(bucket.resourceId, 'test-bucket'); + assert.strictEqual(bucket.measure, 'rps'); assert.deepStrictEqual(bucket.limitConfig, { limit: 100 }); assert.strictEqual(bucket.bufferSize, 50); assert.strictEqual(bucket.refillThreshold, 20); assert.strictEqual(bucket.tokens, 50); // Starts with full buffer for fail-open - assert.strictEqual(bucket.refillInProgress, false); - assert.strictEqual(bucket.refillCount, 0); }); it('should use custom bufferSize from config.rateLimiting', () => { @@ -57,32 +49,24 @@ describe('WorkerTokenBucket', () => { sandbox.stub(config, 'rateLimiting').value({ nodes: 1, tokenBucketBufferSize: 100, - bucket: { - defaultConfig: { - requestsPerSecond: { burstCapacity: 2 }, - }, - }, + tokenBucketRefillThreshold: 20, }); - const bucket = new tokenBucket.WorkerTokenBucket('test-bucket', { limit: 100 }, mockLog); + const bucket = new tokenBucket.WorkerTokenBucket('bucket', 'test-bucket', 'rps', { limit: 100 }, mockLog); assert.strictEqual(bucket.bufferSize, 100); - assert.strictEqual(bucket.tokens, 100); // tokens = bufferSize + assert.strictEqual(bucket.tokens, 100); }); it('should use custom refillThreshold from config.rateLimiting', () => { sandbox.restore(); sandbox.stub(config, 'rateLimiting').value({ nodes: 1, + tokenBucketBufferSize: 50, tokenBucketRefillThreshold: 30, - bucket: { - defaultConfig: { - requestsPerSecond: { burstCapacity: 2 }, - }, - }, }); - const bucket = new tokenBucket.WorkerTokenBucket('test-bucket', { limit: 100 }, mockLog); + const bucket = new tokenBucket.WorkerTokenBucket('bucket', 'test-bucket', 'rps', { limit: 100 }, mockLog); assert.strictEqual(bucket.refillThreshold, 30); }); @@ -93,35 +77,19 @@ describe('WorkerTokenBucket', () => { nodes: 1, tokenBucketBufferSize: 75, tokenBucketRefillThreshold: 25, - bucket: { - defaultConfig: { - requestsPerSecond: { burstCapacity: 2 }, - }, - }, }); - const bucket = new tokenBucket.WorkerTokenBucket('test-bucket', { limit: 100 }, mockLog); + const bucket = new tokenBucket.WorkerTokenBucket('bucket', 'test-bucket', 'rps', { limit: 100 }, mockLog); assert.strictEqual(bucket.bufferSize, 75); assert.strictEqual(bucket.refillThreshold, 25); - assert.strictEqual(bucket.tokens, 75); // tokens = bufferSize - }); - - it('should fallback to defaults when rateLimiting is undefined', () => { - sandbox.restore(); - sandbox.stub(config, 'rateLimiting').value(undefined); - - const bucket = new tokenBucket.WorkerTokenBucket('test-bucket', { limit: 100 }, mockLog); - - assert.strictEqual(bucket.bufferSize, 50); - assert.strictEqual(bucket.refillThreshold, 20); - assert.strictEqual(bucket.tokens, 50); + assert.strictEqual(bucket.tokens, 75); }); }); describe('tryConsume', () => { it('should consume token when available', () => { - const bucket = new tokenBucket.WorkerTokenBucket('test-bucket', { limit: 100 }, mockLog); + const bucket = new tokenBucket.WorkerTokenBucket('bucket', 'test-bucket', 'rps', { limit: 100 }, mockLog); bucket.tokens = 10; const result = bucket.tryConsume(); @@ -131,7 +99,7 @@ describe('WorkerTokenBucket', () => { }); it('should return false when no tokens available', () => { - const bucket = new tokenBucket.WorkerTokenBucket('test-bucket', { limit: 100 }, mockLog); + const bucket = new tokenBucket.WorkerTokenBucket('bucket', 'test-bucket', 'rps', { limit: 100 }, mockLog); bucket.tokens = 0; const result = bucket.tryConsume(); @@ -140,19 +108,8 @@ describe('WorkerTokenBucket', () => { assert.strictEqual(bucket.tokens, 0); }); - it('should record request timestamp', () => { - const bucket = new tokenBucket.WorkerTokenBucket('test-bucket', { limit: 100 }, mockLog); - bucket.tokens = 5; - - bucket.tryConsume(); - - assert.strictEqual(bucket.requestTimestamps.size, 1); - const firstValue = bucket.requestTimestamps.values().next().value; - assert(firstValue <= Date.now() * 1000); - }); - it('should handle multiple sequential consumptions', () => { - const bucket = new tokenBucket.WorkerTokenBucket('test-bucket', { limit: 100 }, mockLog); + const bucket = new tokenBucket.WorkerTokenBucket('bucket', 'test-bucket', 'rps', { limit: 100 }, mockLog); bucket.tokens = 3; assert.strictEqual(bucket.tryConsume(), true); @@ -169,159 +126,89 @@ describe('WorkerTokenBucket', () => { }); }); - describe('recordRequest', () => { - it('should add timestamp to array', () => { - const bucket = new tokenBucket.WorkerTokenBucket('test-bucket', { limit: 100 }, mockLog); - const before = Date.now() * 1000; - - bucket.recordRequest(); + describe('hasCapacity', () => { + it('should return true when tokens are available', () => { + const bucket = new tokenBucket.WorkerTokenBucket('bucket', 'test-bucket', 'rps', { limit: 100 }, mockLog); + bucket.tokens = 1; - assert.strictEqual(bucket.requestTimestamps.size, 1); - const firstValue = bucket.requestTimestamps.values().next().value; - assert(firstValue >= before); - assert(firstValue <= Date.now() * 1000); + assert.strictEqual(bucket.hasCapacity(), true); }); - it('should expire old timestamps beyond 1 second', () => { - const bucket = new tokenBucket.WorkerTokenBucket('test-bucket', { limit: 100 }, mockLog); - const now = Date.now() * 1000; - - // Add old timestamp (2 seconds ago) - bucket.requestTimestamps.add(now - 2000000); - - // Add recent timestamp - bucket.requestTimestamps.add(now - 500000); - - // Record new request - bucket.recordRequest(); + it('should return false when no tokens available', () => { + const bucket = new tokenBucket.WorkerTokenBucket('bucket', 'test-bucket', 'rps', { limit: 100 }, mockLog); + bucket.tokens = 0; - // Old timestamp should be removed - assert.strictEqual(bucket.requestTimestamps.size, 2); - const firstValue = bucket.requestTimestamps.values().next().value; - assert(firstValue >= now - 1000000); + assert.strictEqual(bucket.hasCapacity(), false); }); }); - describe('getCurrentRate', () => { - it('should return 0 when no requests', () => { - const bucket = new tokenBucket.WorkerTokenBucket('test-bucket', { limit: 100 }, mockLog); + describe('updateLimit', () => { + it('should update limitConfig and interval when limit changes', () => { + const bucket = new tokenBucket.WorkerTokenBucket( + 'bucket', 'test-bucket', 'rps', { limit: 100, burstCapacity: 1000 }, mockLog); + const oldInterval = bucket.interval; - assert.strictEqual(bucket.getCurrentRate(), 0); + const result = bucket.updateLimit({ limit: 200, burstCapacity: 1000 }); + + assert.strictEqual(result.updated, true); + assert.deepStrictEqual(result.oldConfig, { limit: 100, burstCapacity: 1000 }); + assert.strictEqual(bucket.limitConfig.limit, 200); + assert.notStrictEqual(bucket.interval, oldInterval); }); - it('should count requests in last second', () => { - const bucket = new tokenBucket.WorkerTokenBucket('test-bucket', { limit: 100 }, mockLog); - const now = Date.now() * 1000; + it('should update limitConfig when burstCapacity changes', () => { + const bucket = new tokenBucket.WorkerTokenBucket( + 'bucket', 'test-bucket', 'rps', { limit: 100, burstCapacity: 1000 }, mockLog); - // Add 5 requests in last second - bucket.requestTimestamps.add(now - 900000); - bucket.requestTimestamps.add(now - 700000); - bucket.requestTimestamps.add(now - 500000); - bucket.requestTimestamps.add(now - 300000); - bucket.requestTimestamps.add(now - 100000); + const result = bucket.updateLimit({ limit: 100, burstCapacity: 2000 }); - assert.strictEqual(bucket.getCurrentRate(), 5); + assert.strictEqual(result.updated, true); + assert.strictEqual(bucket.limitConfig.burstCapacity, 2000); }); - it('should exclude requests older than 1 second', () => { - const bucket = new tokenBucket.WorkerTokenBucket('test-bucket', { limit: 100 }, mockLog); - const now = Date.now() * 1000; - - // Add old requests (beyond 1 second) - bucket.requestTimestamps.add(now - 1500000); - bucket.requestTimestamps.add(now - 1200000); + it('should return updated: false when config is unchanged', () => { + const bucket = new tokenBucket.WorkerTokenBucket( + 'bucket', 'test-bucket', 'rps', { limit: 100, burstCapacity: 1000 }, mockLog); - // Add recent requests - bucket.requestTimestamps.add(now - 800000); - bucket.requestTimestamps.add(now - 400000); + const result = bucket.updateLimit({ limit: 100, burstCapacity: 1000 }); - assert.strictEqual(bucket.getCurrentRate(), 2); + assert.strictEqual(result.updated, false); }); }); describe('refillIfNeeded', () => { it('should skip refill when above threshold', async () => { - const bucket = new tokenBucket.WorkerTokenBucket('test-bucket', { limit: 100 }, mockLog); + const bucket = new tokenBucket.WorkerTokenBucket( + 'bucket', 'test-bucket', 'rps', { limit: 100 }, mockLog); bucket.tokens = 30; // Above threshold of 20 - const refillSpy = sandbox.spy(bucket, 'refill'); await bucket.refillIfNeeded(); - assert.strictEqual(refillSpy.called, false); + // refillInProgress was never set (no refill attempted) + assert.ok(!bucket.refillInProgress); }); it('should skip refill when already in progress', async () => { - const bucket = new tokenBucket.WorkerTokenBucket('test-bucket', { limit: 100 }, mockLog); + const bucket = new tokenBucket.WorkerTokenBucket( + 'bucket', 'test-bucket', 'rps', { limit: 100 }, mockLog); bucket.tokens = 10; // Below threshold bucket.refillInProgress = true; - const refillSpy = sandbox.spy(bucket, 'refill'); await bucket.refillIfNeeded(); - assert.strictEqual(refillSpy.called, false); + // Still true — function returned early without clearing it + assert.strictEqual(bucket.refillInProgress, true); }); it('should trigger refill when below threshold', async () => { - const bucket = new tokenBucket.WorkerTokenBucket('test-bucket', { limit: 100 }, mockLog); + const bucket = new tokenBucket.WorkerTokenBucket( + 'bucket', 'test-bucket', 'rps', { limit: 100, burstCapacity: 1000 }, mockLog); bucket.tokens = 10; // Below threshold of 20 - // Stub refill to prevent actual Redis call - sandbox.stub(bucket, 'refill').resolves(); - await bucket.refillIfNeeded(); - assert.strictEqual(bucket.refill.calledOnce, true); - }); - }); - - describe('refill', () => { - it('should skip refill when buffer is already full', async () => { - const bucket = new tokenBucket.WorkerTokenBucket('test-bucket', { limit: 100 }, mockLog); - bucket.tokens = 50; // Buffer at maximum (bufferSize = 50) - - // Mock refill to verify early return - const refillSpy = sandbox.spy(bucket, 'refill'); - - await bucket.refill(); - - // Verify refill was called but returned early (no Redis call) - assert.strictEqual(refillSpy.calledOnce, true); - - // Tokens should remain unchanged - assert.strictEqual(bucket.tokens, 50); - }); - - it('should not call Redis when requested amount is zero', async () => { - const bucket = new tokenBucket.WorkerTokenBucket('test-bucket', { limit: 100 }, mockLog); - bucket.tokens = 51; // More than buffer size (edge case) - - await bucket.refill(); - - // Should complete without errors even though requested < 0 - assert(true); - }); - - // NOTE: Full refill() tests with actual Redis calls and token granting logic - // are better suited for functional/integration tests. These unit tests verify - // the early-return logic and buffer boundary conditions without requiring Redis. - }); - - describe('getStats', () => { - it('should return current bucket stats', () => { - const bucket = new tokenBucket.WorkerTokenBucket('test-bucket', { limit: 100 }, mockLog); - bucket.tokens = 25; - bucket.refillCount = 5; - bucket.requestTimestamps.add(Date.now() * 1000); - - const stats = bucket.getStats(); - - assert.strictEqual(stats.tokens, 25); - assert.strictEqual(stats.bufferSize, 50); - assert.strictEqual(stats.refillThreshold, 20); - assert.strictEqual(stats.refillInProgress, false); - assert.strictEqual(stats.refillCount, 5); - assert.strictEqual(stats.currentRate, 1); - assert(typeof stats.lastRefillTime === 'number'); + // refillInProgress is cleared in finally block regardless of outcome + assert.strictEqual(bucket.refillInProgress, false); }); }); }); @@ -340,10 +227,14 @@ describe('Token bucket management functions', () => { error: sinon.stub(), }; - // Load tokenBucket module (no need for Redis client in these tests) - tokenBucket = require('../../../../../lib/api/apiUtils/rateLimit/tokenBucket'); + const { config } = require('../../../../../lib/Config'); + sandbox.stub(config, 'rateLimiting').value({ + nodes: 1, + tokenBucketBufferSize: 50, + tokenBucketRefillThreshold: 20, + }); - // Clear token buckets + tokenBucket = require('../../../../../lib/api/apiUtils/rateLimit/tokenBucket'); tokenBucket.getAllTokenBuckets().clear(); }); @@ -353,49 +244,70 @@ describe('Token bucket management functions', () => { describe('getTokenBucket', () => { it('should create new bucket on first call', () => { - const bucket = tokenBucket.getTokenBucket('test-bucket', { limit: 100 }, mockLog); + const bucket = tokenBucket.getTokenBucket('bucket', 'test-bucket', 'rps', { limit: 100 }, mockLog); assert(bucket instanceof tokenBucket.WorkerTokenBucket); - assert.strictEqual(bucket.bucketName, 'test-bucket'); + assert.strictEqual(bucket.resourceClass, 'bucket'); + assert.strictEqual(bucket.resourceId, 'test-bucket'); + assert.strictEqual(bucket.measure, 'rps'); assert(mockLog.debug.calledOnce); assert(mockLog.debug.firstCall.args[0].includes('Created token bucket')); }); it('should return existing bucket on subsequent calls', () => { - const bucket1 = tokenBucket.getTokenBucket('test-bucket', { limit: 100 }, mockLog); - const bucket2 = tokenBucket.getTokenBucket('test-bucket', { limit: 100 }, mockLog); + const bucket1 = tokenBucket.getTokenBucket('bucket', 'test-bucket', 'rps', { limit: 100 }, mockLog); + const bucket2 = tokenBucket.getTokenBucket('bucket', 'test-bucket', 'rps', { limit: 100 }, mockLog); assert.strictEqual(bucket1, bucket2); - assert.strictEqual(mockLog.debug.callCount, 1); // Only called once + assert.strictEqual(mockLog.debug.callCount, 1); + }); + + it('should create separate buckets for different resource IDs', () => { + const bucket1 = tokenBucket.getTokenBucket('bucket', 'bucket-1', 'rps', { limit: 100 }, mockLog); + const bucket2 = tokenBucket.getTokenBucket('bucket', 'bucket-2', 'rps', { limit: 100 }, mockLog); + + assert.notStrictEqual(bucket1, bucket2); + assert.strictEqual(bucket1.resourceId, 'bucket-1'); + assert.strictEqual(bucket2.resourceId, 'bucket-2'); + }); + + it('should create separate buckets for different measures', () => { + const bucket1 = tokenBucket.getTokenBucket('bucket', 'test-bucket', 'rps', { limit: 100 }, mockLog); + const bucket2 = tokenBucket.getTokenBucket('bucket', 'test-bucket', 'bps', { limit: 100 }, mockLog); + + assert.notStrictEqual(bucket1, bucket2); + assert.strictEqual(bucket1.measure, 'rps'); + assert.strictEqual(bucket2.measure, 'bps'); }); - it('should create separate buckets for different bucket names', () => { - const bucket1 = tokenBucket.getTokenBucket('bucket-1', { limit: 100 }, mockLog); - const bucket2 = tokenBucket.getTokenBucket('bucket-2', { limit: 100 }, mockLog); + it('should create separate buckets for different resource classes', () => { + const bucket1 = tokenBucket.getTokenBucket('bucket', 'test', 'rps', { limit: 100 }, mockLog); + const bucket2 = tokenBucket.getTokenBucket('account', 'test', 'rps', { limit: 100 }, mockLog); assert.notStrictEqual(bucket1, bucket2); - assert.strictEqual(bucket1.bucketName, 'bucket-1'); - assert.strictEqual(bucket2.bucketName, 'bucket-2'); + assert.strictEqual(bucket1.resourceClass, 'bucket'); + assert.strictEqual(bucket2.resourceClass, 'account'); }); it('should update limitConfig when limit changes', () => { - const bucket1 = tokenBucket.getTokenBucket('test-bucket', { limit: 100, source: 'bucket' }, mockLog); + const bucket1 = tokenBucket.getTokenBucket( + 'bucket', 'test-bucket', 'rps', { limit: 100, source: 'bucket' }, mockLog); assert.strictEqual(bucket1.limitConfig.limit, 100); - // Simulate rate limit change - const bucket2 = tokenBucket.getTokenBucket('test-bucket', { limit: 200, source: 'bucket' }, mockLog); + const bucket2 = tokenBucket.getTokenBucket( + 'bucket', 'test-bucket', 'rps', { limit: 200, source: 'bucket' }, mockLog); - assert.strictEqual(bucket1, bucket2); // Same bucket instance - assert.strictEqual(bucket2.limitConfig.limit, 200); // Limit updated + assert.strictEqual(bucket1, bucket2); + assert.strictEqual(bucket2.limitConfig.limit, 200); assert(mockLog.info.calledOnce); assert(mockLog.info.firstCall.args[0].includes('Updated token bucket limit config')); }); it('should not log update when limit is unchanged', () => { - tokenBucket.getTokenBucket('test-bucket', { limit: 100 }, mockLog); + tokenBucket.getTokenBucket('bucket', 'test-bucket', 'rps', { limit: 100 }, mockLog); mockLog.info.resetHistory(); - tokenBucket.getTokenBucket('test-bucket', { limit: 100 }, mockLog); + tokenBucket.getTokenBucket('bucket', 'test-bucket', 'rps', { limit: 100 }, mockLog); assert.strictEqual(mockLog.info.called, false); }); @@ -403,17 +315,17 @@ describe('Token bucket management functions', () => { describe('removeTokenBucket', () => { it('should remove existing bucket and return true', () => { - tokenBucket.getTokenBucket('test-bucket', { limit: 100 }, mockLog); + tokenBucket.getTokenBucket('bucket', 'test-bucket', 'rps', { limit: 100 }, mockLog); assert.strictEqual(tokenBucket.getAllTokenBuckets().size, 1); - const result = tokenBucket.removeTokenBucket('test-bucket'); + const result = tokenBucket.removeTokenBucket('bucket', 'test-bucket', 'rps'); assert.strictEqual(result, true); assert.strictEqual(tokenBucket.getAllTokenBuckets().size, 0); }); it('should return false when bucket does not exist', () => { - const result = tokenBucket.removeTokenBucket('non-existent-bucket'); + const result = tokenBucket.removeTokenBucket('bucket', 'non-existent-bucket', 'rps'); assert.strictEqual(result, false); }); @@ -428,46 +340,43 @@ describe('Token bucket management functions', () => { }); it('should return all created buckets', () => { - tokenBucket.getTokenBucket('bucket-1', { limit: 100 }, mockLog); - tokenBucket.getTokenBucket('bucket-2', { limit: 200 }, mockLog); - tokenBucket.getTokenBucket('bucket-3', { limit: 300 }, mockLog); + tokenBucket.getTokenBucket('bucket', 'bucket-1', 'rps', { limit: 100 }, mockLog); + tokenBucket.getTokenBucket('bucket', 'bucket-2', 'rps', { limit: 200 }, mockLog); + tokenBucket.getTokenBucket('bucket', 'bucket-3', 'rps', { limit: 300 }, mockLog); const buckets = tokenBucket.getAllTokenBuckets(); assert.strictEqual(buckets.size, 3); - assert(buckets.has('bucket-1')); - assert(buckets.has('bucket-2')); - assert(buckets.has('bucket-3')); + assert(buckets.has('bucket:bucket-1:rps')); + assert(buckets.has('bucket:bucket-2:rps')); + assert(buckets.has('bucket:bucket-3:rps')); }); }); describe('cleanupTokenBuckets', () => { it('should remove idle buckets with no tokens', () => { - const bucket1 = tokenBucket.getTokenBucket('bucket-1', { limit: 100 }, mockLog); - const bucket2 = tokenBucket.getTokenBucket('bucket-2', { limit: 200 }, mockLog); + const bucket1 = tokenBucket.getTokenBucket('bucket', 'bucket-1', 'rps', { limit: 100 }, mockLog); + const bucket2 = tokenBucket.getTokenBucket('bucket', 'bucket-2', 'rps', { limit: 200 }, mockLog); - // Make bucket-1 idle (old lastRefillTime, no tokens) - bucket1.lastRefillTime = Date.now() - 120000; // 2 minutes ago + bucket1.lastRefillTime = Date.now() - 120000; bucket1.tokens = 0; - // Make bucket-2 active bucket2.lastRefillTime = Date.now(); bucket2.tokens = 10; - const removed = tokenBucket.cleanupTokenBuckets(60000); // 60s threshold + const removed = tokenBucket.cleanupTokenBuckets(60000); assert.strictEqual(removed, 1); assert.strictEqual(tokenBucket.getAllTokenBuckets().size, 1); - assert(tokenBucket.getAllTokenBuckets().has('bucket-2')); - assert(!tokenBucket.getAllTokenBuckets().has('bucket-1')); + assert(tokenBucket.getAllTokenBuckets().has('bucket:bucket-2:rps')); + assert(!tokenBucket.getAllTokenBuckets().has('bucket:bucket-1:rps')); }); it('should not remove idle buckets with tokens', () => { - const bucket = tokenBucket.getTokenBucket('test-bucket', { limit: 100 }, mockLog); + const bucket = tokenBucket.getTokenBucket('bucket', 'test-bucket', 'rps', { limit: 100 }, mockLog); - // Make bucket idle but with tokens bucket.lastRefillTime = Date.now() - 120000; - bucket.tokens = 10; // Has tokens + bucket.tokens = 10; const removed = tokenBucket.cleanupTokenBuckets(60000); @@ -476,10 +385,9 @@ describe('Token bucket management functions', () => { }); it('should not remove recently active buckets', () => { - const bucket = tokenBucket.getTokenBucket('test-bucket', { limit: 100 }, mockLog); + const bucket = tokenBucket.getTokenBucket('bucket', 'test-bucket', 'rps', { limit: 100 }, mockLog); - // Recently active - bucket.lastRefillTime = Date.now() - 30000; // 30s ago + bucket.lastRefillTime = Date.now() - 30000; bucket.tokens = 0; const removed = tokenBucket.cleanupTokenBuckets(60000); @@ -489,13 +397,12 @@ describe('Token bucket management functions', () => { }); it('should use default maxIdleMs if not provided', () => { - const bucket = tokenBucket.getTokenBucket('test-bucket', { limit: 100 }, mockLog); + const bucket = tokenBucket.getTokenBucket('bucket', 'test-bucket', 'rps', { limit: 100 }, mockLog); - // Idle beyond default (60s) bucket.lastRefillTime = Date.now() - 70000; bucket.tokens = 0; - const removed = tokenBucket.cleanupTokenBuckets(); // No argument + const removed = tokenBucket.cleanupTokenBuckets(); assert.strictEqual(removed, 1); }); @@ -507,24 +414,22 @@ describe('Token bucket management functions', () => { }); it('should remove multiple expired buckets', () => { - const bucket1 = tokenBucket.getTokenBucket('bucket-1', { limit: 100 }, mockLog); - const bucket2 = tokenBucket.getTokenBucket('bucket-2', { limit: 200 }, mockLog); - const bucket3 = tokenBucket.getTokenBucket('bucket-3', { limit: 300 }, mockLog); + const bucket1 = tokenBucket.getTokenBucket('bucket', 'bucket-1', 'rps', { limit: 100 }, mockLog); + const bucket2 = tokenBucket.getTokenBucket('bucket', 'bucket-2', 'rps', { limit: 200 }, mockLog); + const bucket3 = tokenBucket.getTokenBucket('bucket', 'bucket-3', 'rps', { limit: 300 }, mockLog); - // Make bucket-1 and bucket-2 idle bucket1.lastRefillTime = Date.now() - 120000; bucket1.tokens = 0; bucket2.lastRefillTime = Date.now() - 120000; bucket2.tokens = 0; - // Keep bucket-3 active bucket3.lastRefillTime = Date.now(); const removed = tokenBucket.cleanupTokenBuckets(60000); assert.strictEqual(removed, 2); assert.strictEqual(tokenBucket.getAllTokenBuckets().size, 1); - assert(tokenBucket.getAllTokenBuckets().has('bucket-3')); + assert(tokenBucket.getAllTokenBuckets().has('bucket:bucket-3:rps')); }); }); }); From b7d18d9037a0a9a4bdea187af8cb1a43cae91f6f Mon Sep 17 00:00:00 2001 From: Taylor McKinnon Date: Wed, 1 Apr 2026 12:29:30 -0700 Subject: [PATCH 09/16] impr(CLDSRV-855): Simplify config parsing --- lib/Config.js | 18 ++--- lib/api/apiUtils/rateLimit/config.js | 48 ++++++----- tests/unit/api/apiUtils/rateLimit/config.js | 90 +++++++++++---------- 3 files changed, 83 insertions(+), 73 deletions(-) diff --git a/lib/Config.js b/lib/Config.js index 887d5572bb..9faea9e96a 100644 --- a/lib/Config.js +++ b/lib/Config.js @@ -1862,23 +1862,15 @@ class Config extends EventEmitter { this.enableVeeamRoute = config.enableVeeamRoute; } - this.rateLimiting = { - enabled: false, - bucket: { - configCacheTTL: constants.rateLimitDefaultConfigCacheTTL, - }, - }; + // Parse and validate all rate limiting configuration + this.rateLimiting = parseRateLimitConfig(config.rateLimiting); - if (config.rateLimiting?.enabled) { - // rate limiting uses the same localCache config defined for S3 to avoid - // config duplication. + // Rate limiting uses the same localCache config defined for S3 to avoid config duplication. + // If rate limiting is enabled check to make sure it is also configured. + if (this.rateLimiting.enabled) { assert(this.localCache, 'localCache must be defined when rate limiting is enabled'); - - // Parse and validate all rate limiting configuration - this.rateLimiting = parseRateLimitConfig(config.rateLimiting); } - if (config.capabilities) { if (config.capabilities.locationTypes) { this.supportedLocationTypes = new Set(config.capabilities.locationTypes); diff --git a/lib/api/apiUtils/rateLimit/config.js b/lib/api/apiUtils/rateLimit/config.js index 1b6928b94f..84c4e49a35 100644 --- a/lib/api/apiUtils/rateLimit/config.js +++ b/lib/api/apiUtils/rateLimit/config.js @@ -2,7 +2,6 @@ const Joi = require('@hapi/joi'); const { errors, ArsenalError } = require('arsenal'); const { rateLimitDefaultConfigCacheTTL, rateLimitDefaultBurstCapacity } = require('../../../../constants'); -const { calculateInterval } = require('./gcra'); /** * Full Rate Limiting Configuration Example @@ -191,6 +190,24 @@ const rateLimitConfigSchema = Joi.object({ code: errors.SlowDown.message, message: errors.SlowDown.description, }), +}).default({ + enabled: false, + nodes: 1, + tokenBucketBufferSize: 50, + tokenBucketRefillThreshold: 20, + bucket: { + configCacheTTL: rateLimitDefaultConfigCacheTTL, + defaultBurstCapacity: rateLimitDefaultBurstCapacity, + }, + account: { + configCacheTTL: rateLimitDefaultConfigCacheTTL, + defaultBurstCapacity: rateLimitDefaultBurstCapacity, + }, + error: { + statusCode: 503, + code: errors.SlowDown.message, + message: errors.SlowDown.description, + }, }); /** @@ -203,10 +220,10 @@ const rateLimitConfigSchema = Joi.object({ * @returns {RateLimitClassConfig} Transformed rate limit config */ function transformClassConfig(resourceClass, validatedCfg, nodes) { - const transformed = { - defaultConfig: undefined, - configCacheTTL: validatedCfg.configCacheTTL, - defaultBurstCapacity: validatedCfg.defaultBurstCapacity, + const defaultConfig = { + RequestsPerSecond: { + BurstCapacity: validatedCfg.defaultBurstCapacity, + }, }; if (validatedCfg.defaultConfig?.requestsPerSecond) { @@ -223,23 +240,18 @@ function transformClassConfig(resourceClass, validatedCfg, nodes) { ); } - // Use provided burstCapacity or fall back to default - const effectiveBurstCapacity = burstCapacity || transformed.defaultBurstCapacity; - - // Calculate per-node interval using distributed architecture - const interval = calculateInterval(limit, nodes); - // Store both the original limit and the calculated values - transformed.defaultConfig = { - limit, - requestsPerSecond: { - interval, - bucketSize: effectiveBurstCapacity * 1000, - }, + defaultConfig.RequestsPerSecond = { + Limit: limit, + BurstCapacity: burstCapacity || validatedCfg.defaultBurstCapacity, }; } - return transformed; + return { + defaultConfig, + configCacheTTL: validatedCfg.configCacheTTL, + defaultBurstCapacity: validatedCfg.defaultBurstCapacity, + }; } /** diff --git a/tests/unit/api/apiUtils/rateLimit/config.js b/tests/unit/api/apiUtils/rateLimit/config.js index 118032603a..e6fbd35e74 100644 --- a/tests/unit/api/apiUtils/rateLimit/config.js +++ b/tests/unit/api/apiUtils/rateLimit/config.js @@ -36,7 +36,7 @@ describe('parseRateLimitConfig', () => { assert.strictEqual(result.error.message, 'ServiceUnavailable'); assert.strictEqual(result.error.description, 'Service Unavailable'); assert(result.bucket.defaultConfig); - assert(result.bucket.defaultConfig.requestsPerSecond); + assert(result.bucket.defaultConfig.RequestsPerSecond); }); it('should use default values when optional fields are omitted', () => { @@ -53,7 +53,9 @@ describe('parseRateLimitConfig', () => { assert.strictEqual(result.tokenBucketRefillThreshold, 20); // Default // Bucket config is always initialized for per-bucket rate limiting via API assert(result.bucket); - assert.strictEqual(result.bucket.defaultConfig, undefined); // No global default + assert.deepStrictEqual(result.bucket.defaultConfig, { + RequestsPerSecond: { BurstCapacity: constants.rateLimitDefaultBurstCapacity }, + }); assert.strictEqual(result.bucket.configCacheTTL, constants.rateLimitDefaultConfigCacheTTL); // Default assert.strictEqual(result.bucket.defaultBurstCapacity, constants.rateLimitDefaultBurstCapacity); // Default assert.strictEqual(result.error.code, errors.SlowDown.code); // Default @@ -71,7 +73,9 @@ describe('parseRateLimitConfig', () => { const result = parseRateLimitConfig(config); assert.strictEqual(result.bucket.configCacheTTL, 600); - assert.strictEqual(result.bucket.defaultConfig, undefined); + assert.deepStrictEqual(result.bucket.defaultConfig, { + RequestsPerSecond: { BurstCapacity: constants.rateLimitDefaultBurstCapacity }, + }); }); it('should use default configCacheTTL when not specified', () => { @@ -374,7 +378,7 @@ describe('parseRateLimitConfig', () => { const result = parseRateLimitConfig(config); assert(result.bucket.defaultConfig); - assert(result.bucket.defaultConfig.requestsPerSecond); + assert(result.bucket.defaultConfig.RequestsPerSecond); }); it('should throw if defaultConfig is not an object', () => { @@ -421,7 +425,7 @@ describe('parseRateLimitConfig', () => { const result = parseRateLimitConfig(config); // limit = 0 means unlimited, should be accepted - assert(result.bucket.defaultConfig.requestsPerSecond); + assert(result.bucket.defaultConfig.RequestsPerSecond); }); it('should propagate validation errors for negative limit', () => { @@ -476,9 +480,10 @@ describe('parseRateLimitConfig', () => { }; const result = parseRateLimitConfig(config); - const bucketSize = result.bucket.defaultConfig.requestsPerSecond.bucketSize; - // bucketSize = burstCapacity * 1000 - assert.strictEqual(bucketSize, constants.rateLimitDefaultBurstCapacity * 1000); + assert.strictEqual( + result.bucket.defaultConfig.RequestsPerSecond.BurstCapacity, + constants.rateLimitDefaultBurstCapacity + ); }); it('should use custom burstCapacity when provided', () => { @@ -495,8 +500,9 @@ describe('parseRateLimitConfig', () => { }; const result = parseRateLimitConfig(config); - const bucketSize = result.bucket.defaultConfig.requestsPerSecond.bucketSize; - assert.strictEqual(bucketSize, 20 * 1000); + assert.strictEqual( + result.bucket.defaultConfig.RequestsPerSecond.BurstCapacity, 20 + ); }); it('should throw if burstCapacity is negative', () => { @@ -573,8 +579,9 @@ describe('parseRateLimitConfig', () => { }; const result = parseRateLimitConfig(config); - const bucketSize = result.bucket.defaultConfig.requestsPerSecond.bucketSize; - assert.strictEqual(bucketSize, 1.5 * 1000); + assert.strictEqual( + result.bucket.defaultConfig.RequestsPerSecond.BurstCapacity, 1.5 + ); }); }); @@ -877,7 +884,7 @@ describe('parseRateLimitConfig', () => { assert(result.account); assert(result.account.defaultConfig); - assert.strictEqual(result.account.defaultConfig.limit, 500); + assert.strictEqual(result.account.defaultConfig.RequestsPerSecond.Limit, 500); assert.strictEqual(result.account.configCacheTTL, 60000); assert.strictEqual(result.account.defaultBurstCapacity, 2); }); @@ -904,9 +911,9 @@ describe('parseRateLimitConfig', () => { const result = parseRateLimitConfig(config); assert(result.bucket.defaultConfig); - assert.strictEqual(result.bucket.defaultConfig.limit, 1000); + assert.strictEqual(result.bucket.defaultConfig.RequestsPerSecond.Limit, 1000); assert(result.account.defaultConfig); - assert.strictEqual(result.account.defaultConfig.limit, 500); + assert.strictEqual(result.account.defaultConfig.RequestsPerSecond.Limit, 500); }); it('should validate account limit against nodes', () => { @@ -973,8 +980,10 @@ describe('parseRateLimitConfig', () => { }; const result = parseRateLimitConfig(config); - const bucketSize = result.account.defaultConfig.requestsPerSecond.bucketSize; - assert.strictEqual(bucketSize, constants.rateLimitDefaultBurstCapacity * 1000); + assert.strictEqual( + result.account.defaultConfig.RequestsPerSecond.BurstCapacity, + constants.rateLimitDefaultBurstCapacity + ); }); it('should use custom burstCapacity when provided', () => { @@ -991,8 +1000,9 @@ describe('parseRateLimitConfig', () => { }; const result = parseRateLimitConfig(config); - const bucketSize = result.account.defaultConfig.requestsPerSecond.bucketSize; - assert.strictEqual(bucketSize, 20 * 1000); + assert.strictEqual( + result.account.defaultConfig.RequestsPerSecond.BurstCapacity, 20 + ); }); it('should accept float burstCapacity', () => { @@ -1009,8 +1019,9 @@ describe('parseRateLimitConfig', () => { }; const result = parseRateLimitConfig(config); - const bucketSize = result.account.defaultConfig.requestsPerSecond.bucketSize; - assert.strictEqual(bucketSize, 1.5 * 1000); + assert.strictEqual( + result.account.defaultConfig.RequestsPerSecond.BurstCapacity, 1.5 + ); }); it('should throw if burstCapacity is negative', () => { @@ -1142,14 +1153,14 @@ describe('parseRateLimitConfig', () => { }); describe('calculation verification', () => { - it('should calculate correct interval for distributed setup', () => { + it('should store Limit in defaultConfig for distributed setup', () => { const config = { serviceUserArn: 'arn:aws:iam::123456789012:user/rate-limit-service', nodes: 2, bucket: { defaultConfig: { requestsPerSecond: { - limit: 100, // 100 req/s global + limit: 100, }, }, }, @@ -1157,13 +1168,14 @@ describe('parseRateLimitConfig', () => { const result = parseRateLimitConfig(config); - // Per-NODE rate = 100 / 2 nodes = 50 req/s - // Interval = 1000ms / 50 = 20ms - const interval = result.bucket.defaultConfig.requestsPerSecond.interval; - assert.strictEqual(interval, 20); + assert.strictEqual(result.bucket.defaultConfig.RequestsPerSecond.Limit, 100); + assert.strictEqual( + result.bucket.defaultConfig.RequestsPerSecond.BurstCapacity, + constants.rateLimitDefaultBurstCapacity + ); }); - it('should calculate correct bucketSize from burstCapacity', () => { + it('should store BurstCapacity from burstCapacity', () => { const config = { serviceUserArn: 'arn:aws:iam::123456789012:user/rate-limit-service', bucket: { @@ -1178,19 +1190,17 @@ describe('parseRateLimitConfig', () => { const result = parseRateLimitConfig(config); - // bucketSize = burstCapacity * 1000 - const bucketSize = result.bucket.defaultConfig.requestsPerSecond.bucketSize; - assert.strictEqual(bucketSize, 15 * 1000); + assert.strictEqual(result.bucket.defaultConfig.RequestsPerSecond.BurstCapacity, 15); }); - it('should handle single node and single worker', () => { + it('should handle single node setup', () => { const config = { serviceUserArn: 'arn:aws:iam::123456789012:user/rate-limit-service', nodes: 1, bucket: { defaultConfig: { requestsPerSecond: { - limit: 50, // 50 req/s + limit: 50, }, }, }, @@ -1198,10 +1208,7 @@ describe('parseRateLimitConfig', () => { const result = parseRateLimitConfig(config); - // Per-node rate = 50 / 1 = 50 req/s - // Interval = 1000ms / 50 = 20ms - const interval = result.bucket.defaultConfig.requestsPerSecond.interval; - assert.strictEqual(interval, 20); + assert.strictEqual(result.bucket.defaultConfig.RequestsPerSecond.Limit, 50); }); it('should handle high-scale distributed setup', () => { @@ -1211,7 +1218,8 @@ describe('parseRateLimitConfig', () => { bucket: { defaultConfig: { requestsPerSecond: { - limit: 10000, // 10,000 req/s global + limit: 10000, + burstCapacity: 5, }, }, }, @@ -1219,10 +1227,8 @@ describe('parseRateLimitConfig', () => { const result = parseRateLimitConfig(config); - // Per-NODE rate = 10000 / 10 nodes = 1000 req/s - // Interval = 1000ms / 1000 = 1ms - const interval = result.bucket.defaultConfig.requestsPerSecond.interval; - assert.strictEqual(interval, 1); + assert.strictEqual(result.bucket.defaultConfig.RequestsPerSecond.Limit, 10000); + assert.strictEqual(result.bucket.defaultConfig.RequestsPerSecond.BurstCapacity, 5); }); }); }); From b362b33d6a767ab61a3d22124599b0ab8255d385 Mon Sep 17 00:00:00 2001 From: Taylor McKinnon Date: Wed, 8 Apr 2026 11:25:57 -0700 Subject: [PATCH 10/16] impr(CLDSRV-855): Add bucket owner cache --- lib/api/apiUtils/rateLimit/cache.js | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/lib/api/apiUtils/rateLimit/cache.js b/lib/api/apiUtils/rateLimit/cache.js index a73147b580..2d9a3b2b67 100644 --- a/lib/api/apiUtils/rateLimit/cache.js +++ b/lib/api/apiUtils/rateLimit/cache.js @@ -1,7 +1,9 @@ const configCache = new Map(); +const bucketOwnerCache = new Map(); const namespace = { bucket: 'bkt', + account: 'acc', }; function cacheSet(cache, key, value, ttl) { @@ -54,13 +56,23 @@ const setCachedConfig = formatKeyDecorator(cacheSet.bind(null, configCache)); const deleteCachedConfig = formatKeyDecorator(cacheDelete.bind(null, configCache)); const expireCachedConfigs = cacheExpire.bind(null, configCache); +const getCachedBucketOwner = cacheGet.bind(null, bucketOwnerCache); +const setCachedBucketOwner = cacheSet.bind(null, bucketOwnerCache); +const deleteCachedBucketOwner = cacheDelete.bind(null, bucketOwnerCache); +const expireCachedBucketOwners = cacheExpire.bind(null, bucketOwnerCache); + module.exports = { namespace, setCachedConfig, getCachedConfig, expireCachedConfigs, deleteCachedConfig, + setCachedBucketOwner, + getCachedBucketOwner, + deleteCachedBucketOwner, + expireCachedBucketOwners, // Do not access directly // Used only for tests configCache, + bucketOwnerCache, }; From 345ec4ede539d3f8971f59359a72b49e8b8ec7bc Mon Sep 17 00:00:00 2001 From: Taylor McKinnon Date: Wed, 8 Apr 2026 11:26:13 -0700 Subject: [PATCH 11/16] impr(CLDSRV-855): Refactor cleanup job --- lib/api/apiUtils/rateLimit/cleanup.js | 37 ++++++++++------ tests/unit/api/apiUtils/rateLimit/cleanup.js | 46 ++++++++++---------- 2 files changed, 46 insertions(+), 37 deletions(-) diff --git a/lib/api/apiUtils/rateLimit/cleanup.js b/lib/api/apiUtils/rateLimit/cleanup.js index 40628cc97c..3a54fbcfaa 100644 --- a/lib/api/apiUtils/rateLimit/cleanup.js +++ b/lib/api/apiUtils/rateLimit/cleanup.js @@ -1,7 +1,21 @@ -const { expireCachedConfigs } = require('./cache'); +const { expireCachedConfigs, expireCachedBucketOwners } = require('./cache'); const { rateLimitCleanupInterval } = require('../../../../constants'); -let cleanupInterval = null; +let cleanupTimer = null; + +function cleanupJob(log, options = {}) { + const expiredConfigs = expireCachedConfigs(); + const expiredBucketOwners = expireCachedBucketOwners(); + if (expiredConfigs > 0 || expiredBucketOwners > 0) { + log.debug('Rate limit cleanup completed', { expiredConfigs, expiredBucketOwners }); + } + + cleanupTimer = setTimeout(() => cleanupJob(log, options), rateLimitCleanupInterval); + if (!options.skipUnref) { + cleanupTimer.unref(); + } +} + /** * Start periodic cleanup of expired rate limit counters and cached configs @@ -9,11 +23,12 @@ let cleanupInterval = null; * Runs every 10 seconds to: * - Remove expired GCRA counters (where emptyAt <= now) * - Remove expired cached rate limit configs (where TTL expired) + * - Remove expired cached bucket owners (where TTL expired) * * This prevents memory leaks from accumulating expired entries. */ function startCleanupJob(log, options = {}) { - if (cleanupInterval) { + if (cleanupTimer) { log.warn('Rate limit cleanup job already running'); return; } @@ -22,18 +37,12 @@ function startCleanupJob(log, options = {}) { interval: rateLimitCleanupInterval, }); - cleanupInterval = setInterval(() => { - const now = Date.now(); - const expiredConfigs = expireCachedConfigs(now); - if (expiredConfigs > 0) { - log.debug('Rate limit cleanup completed', { expiredConfigs }); - } - }, rateLimitCleanupInterval); + cleanupTimer = setTimeout(() => cleanupJob(log, options), rateLimitCleanupInterval); // Prevent cleanup job from keeping process alive // Skip unref() in test environment to work with sinon fake timers if (!options.skipUnref) { - cleanupInterval.unref(); + cleanupTimer.unref(); } } @@ -42,9 +51,9 @@ function startCleanupJob(log, options = {}) { * Used for graceful shutdown or testing */ function stopCleanupJob(log) { - if (cleanupInterval) { - clearInterval(cleanupInterval); - cleanupInterval = null; + if (cleanupTimer !== null) { + clearTimeout(cleanupTimer); + cleanupTimer = null; if (log) { log.info('Stopped rate limit cleanup job'); } diff --git a/tests/unit/api/apiUtils/rateLimit/cleanup.js b/tests/unit/api/apiUtils/rateLimit/cleanup.js index fccf6cc048..eab3e5901b 100644 --- a/tests/unit/api/apiUtils/rateLimit/cleanup.js +++ b/tests/unit/api/apiUtils/rateLimit/cleanup.js @@ -9,8 +9,8 @@ const constants = require('../../../../../constants'); describe('Rate limit cleanup job', () => { let mockLog; - let setIntervalSpy; - let clearIntervalSpy; + let setTimeoutSpy; + let clearTimeoutSpy; beforeEach(() => { mockLog = { @@ -18,14 +18,14 @@ describe('Rate limit cleanup job', () => { warn: sinon.stub(), debug: sinon.stub(), }; - setIntervalSpy = sinon.spy(global, 'setInterval'); - clearIntervalSpy = sinon.spy(global, 'clearInterval'); + setTimeoutSpy = sinon.spy(global, 'setTimeout'); + clearTimeoutSpy = sinon.spy(global, 'clearTimeout'); }); afterEach(() => { stopCleanupJob(); - setIntervalSpy.restore(); - clearIntervalSpy.restore(); + setTimeoutSpy.restore(); + clearTimeoutSpy.restore(); }); it('should start cleanup job successfully', () => { @@ -35,22 +35,22 @@ describe('Rate limit cleanup job', () => { assert(mockLog.info.calledWith('Starting rate limit cleanup job', { interval: constants.rateLimitCleanupInterval, })); - assert(setIntervalSpy.calledOnce); - assert.strictEqual(setIntervalSpy.firstCall.args[1], constants.rateLimitCleanupInterval); + assert(setTimeoutSpy.calledOnce); + assert.strictEqual(setTimeoutSpy.firstCall.args[1], constants.rateLimitCleanupInterval); }); it('should not start cleanup job if already running', () => { startCleanupJob(mockLog, { skipUnref: true }); mockLog.info.resetHistory(); mockLog.warn.resetHistory(); - setIntervalSpy.resetHistory(); + setTimeoutSpy.resetHistory(); startCleanupJob(mockLog, { skipUnref: true }); assert(mockLog.warn.calledOnce); assert(mockLog.warn.calledWith('Rate limit cleanup job already running')); assert(mockLog.info.notCalled); - assert(setIntervalSpy.notCalled); + assert(setTimeoutSpy.notCalled); }); it('should stop cleanup job successfully', () => { @@ -61,36 +61,36 @@ describe('Rate limit cleanup job', () => { assert(mockLog.info.calledOnce); assert(mockLog.info.calledWith('Stopped rate limit cleanup job')); - assert(clearIntervalSpy.calledOnce); + assert(clearTimeoutSpy.calledOnce); }); it('should not error when stopping cleanup job that is not running', () => { assert.doesNotThrow(() => { stopCleanupJob(mockLog); }); - assert(clearIntervalSpy.notCalled); + assert(clearTimeoutSpy.notCalled); }); it('should allow restarting cleanup job after stopping', () => { startCleanupJob(mockLog, { skipUnref: true }); - assert(setIntervalSpy.calledOnce); + assert(setTimeoutSpy.calledOnce); stopCleanupJob(mockLog); - assert(clearIntervalSpy.calledOnce); + assert(clearTimeoutSpy.calledOnce); - setIntervalSpy.resetHistory(); - clearIntervalSpy.resetHistory(); + setTimeoutSpy.resetHistory(); + clearTimeoutSpy.resetHistory(); mockLog.info.resetHistory(); startCleanupJob(mockLog, { skipUnref: true }); - assert(setIntervalSpy.calledOnce); + assert(setTimeoutSpy.calledOnce); assert(mockLog.info.calledOnce); }); it('should call unref() on interval by default', () => { const mockUnref = sinon.stub(); - setIntervalSpy.restore(); - setIntervalSpy = sinon.stub(global, 'setInterval').returns({ + setTimeoutSpy.restore(); + setTimeoutSpy = sinon.stub(global, 'setTimeout').returns({ unref: mockUnref, }); @@ -99,13 +99,13 @@ describe('Rate limit cleanup job', () => { assert(mockUnref.calledOnce); stopCleanupJob(mockLog); - setIntervalSpy.restore(); + setTimeoutSpy.restore(); }); it('should not call unref() when skipUnref is true', () => { const mockUnref = sinon.stub(); - setIntervalSpy.restore(); - setIntervalSpy = sinon.stub(global, 'setInterval').returns({ + setTimeoutSpy.restore(); + setTimeoutSpy = sinon.stub(global, 'setTimeout').returns({ unref: mockUnref, }); @@ -114,6 +114,6 @@ describe('Rate limit cleanup job', () => { assert(mockUnref.notCalled); stopCleanupJob(mockLog); - setIntervalSpy.restore(); + setTimeoutSpy.restore(); }); }); From d3c6d02fb039c2a091197bea881ea8cd5dd28ef0 Mon Sep 17 00:00:00 2001 From: Taylor McKinnon Date: Wed, 1 Apr 2026 12:32:34 -0700 Subject: [PATCH 12/16] impr(CLDSRV-855): Refactor helpers to extract bucket and account configs --- lib/api/apiUtils/rateLimit/helpers.js | 200 +++++--- lib/api/apiUtils/rateLimit/tokenBucket.js | 2 +- tests/unit/api/apiUtils/rateLimit/helpers.js | 461 +++++++++++++++--- .../unit/api/apiUtils/rateLimit/refillJob.js | 101 ++-- .../api/apiUtils/rateLimit/tokenBucket.js | 2 - 5 files changed, 574 insertions(+), 192 deletions(-) diff --git a/lib/api/apiUtils/rateLimit/helpers.js b/lib/api/apiUtils/rateLimit/helpers.js index 8bc8d7b83c..07811b021c 100644 --- a/lib/api/apiUtils/rateLimit/helpers.js +++ b/lib/api/apiUtils/rateLimit/helpers.js @@ -5,102 +5,159 @@ const { policies: { actionMaps: { actionMapBucketRateLimit } } } = require('arse const rateLimitApiActions = Object.keys(actionMapBucketRateLimit); +function requestNeedsRateCheck(request) { + // Is the feature enabled? + if (!config.rateLimiting.enabled) { + return false; + } + + // Is the request an internal or rate limit admin operation? + if (rateLimitApiActions.includes(request.apiMethod) || request.isInternalServiceRequest) { + return false; + } + + // Have we already checked both bucket and account configs? + return !(request.rateLimitAccountAlreadyChecked && request.rateLimitBucketAlreadyChecked); +} + /** - * Extract rate limit configuration from bucket metadata or global rate limit configuration. + * Extract bucket rate limit configuration from bucket metadata or global rate limit configuration. * * Resolves in priority order: * 1. Per-bucket configuration (from bucket metadata) * 2. Global default configuration - * 3. No rate limiting (null) * * @param {object} bucket - Bucket metadata object - * @param {string} bucketName - Bucket name * @param {object} log - Logger instance - * @returns {object|null} Rate limit config or null if no limit + * @returns {object} Rate limit config */ -function extractBucketRateLimitConfig(bucket, bucketName, log) { +function extractBucketRateLimitConfig(bucketMD, log) { // Try per-bucket config first - const bucketConfig = bucket.getRateLimitConfiguration(); + const bucketConfig = bucketMD.getRateLimitConfiguration(); if (bucketConfig) { const data = bucketConfig.getData(); - const limitConfig = { - limit: data.RequestsPerSecond.Limit, - burstCapacity: config.rateLimiting.bucket.defaultBurstCapacity * 1000, - source: 'bucket', + + const merged = { + RequestsPerSecond: { + ...config.rateLimiting.bucket.defaultConfig.RequestsPerSecond, + ...(data.RequestsPerSecond || {}), + source: data.RequestsPerSecond !== undefined ? 'resource' : 'global', + }, }; log.debug('Extracted per-bucket rate limit config', { - bucketName, - limit: limitConfig.limit, - burstCapacity: config.rateLimiting.bucket.defaultBurstCapacity * 1000, + bucketName: bucketMD.getName(), + cfg: merged, }); - return limitConfig; + return merged; } - // Fall back to global default config - const globalLimit = config.rateLimiting.bucket?.defaultConfig?.limit; - if (globalLimit !== undefined && globalLimit > 0) { - const limitConfig = { - limit: globalLimit, - burstCapacity: config.rateLimiting.bucket.defaultBurstCapacity * 1000, + log.debug('Using global default rate limit config', { + bucketName: bucketMD.getName(), + cfg: { + ...config.rateLimiting.bucket.defaultConfig, + source: 'global', + } + }); + + return { + RequestsPerSecond: { + ...config.rateLimiting.bucket.defaultConfig.RequestsPerSecond, source: 'global', + }, + }; +} + +/** + * Extract account rate limit configuration from request or global rate limit configuration. + * + * Resolves in priority order: + * 1. Per-account configuration (from request) + * 2. Global default configuration + * + * @param {object} authInfo - Instance of AuthInfo class with requester's info + * @param {object} request - request object given by router + * @param {object} log - Logger instance + * @returns {object} Rate limit config + */ +function extractAccountRateLimitConfig(authInfo, request, log) { + // Try per-account config first + if (request.accountLimits) { + const merged = { + RequestsPerSecond: { + ...config.rateLimiting.account.defaultConfig.RequestsPerSecond, + ...(request.accountLimits.RequestsPerSecond || {}), + source: request.accountLimits.RequestsPerSecond !== undefined ? 'resource' : 'global', + }, }; - log.debug('Using global default rate limit config', { - bucketName, - limit: limitConfig.limit, + log.debug('Extracted per-account rate limit config', { + accountId: authInfo.getCanonicalID(), + cfg: merged, }); - return limitConfig; + return merged; } - // No rate limiting configured - cache null to avoid repeated lookups - log.trace('No rate limit configured for bucket', { bucketName }); - return null; -} + const cfg = { + RequestsPerSecond: { + ...config.rateLimiting.account.defaultConfig.RequestsPerSecond, + source: 'global', + }, + }; -function extractRateLimitConfigFromRequest(request) { - const applyRateLimit = config.rateLimiting?.enabled - && !rateLimitApiActions.includes(request.apiMethod) // Don't limit any rate limit admin actions - && !request.isInternalServiceRequest; // Don't limit any calls from internal services + log.debug('Using global default rate limit config', { + accountId: authInfo.getCanonicalID(), + cfg, + }); - if (!applyRateLimit) { - return { needsCheck: false }; - } + return cfg; +} - const limitConfigs = {}; - let needsCheck = false; +function extractRateLimitConfigFromRequest(request, authInfo, bucketMD, log) { + const limitConfig = { + bucket: extractBucketRateLimitConfig(bucketMD, log), + account: extractAccountRateLimitConfig(authInfo, request, log), + }; + return limitConfig; +} - if (request.accountLimits) { - limitConfigs.account = { - ...request.accountLimits, - source: 'account', - }; - needsCheck = true; +function getCachedRateLimitConfig(request) { + const cachedConfig = {}; + const cachedBucketConfig = cache.getCachedConfig(cache.namespace.bucket, request.bucketName); + if (cachedBucketConfig !== undefined) { + cachedConfig.bucket = cachedBucketConfig; } - if (request.bucketName) { - const cachedConfig = cache.getCachedConfig(cache.namespace.bucket, request.bucketName); - if (cachedConfig) { - limitConfigs.bucket = cachedConfig; - needsCheck = true; + const cachedOwner = cache.getCachedBucketOwner(request.bucketName); + if (cachedOwner !== undefined) { + const cachedAccountConfig = cache.getCachedConfig(cache.namespace.account, cachedOwner); + if (cachedAccountConfig !== undefined) { + cachedConfig.account = cachedAccountConfig; + cachedConfig.bucketOwner = cachedOwner; } + } - if (!request.accountLimits) { - const cachedOwner = cache.getCachedBucketOwner(request.bucketName); - if (cachedOwner !== undefined) { - const cachedConfig = cache.getCachedConfig(cache.namespace.account, cachedOwner); - if (cachedConfig) { - limitConfigs.account = cachedConfig; - limitConfigs.bucketOwner = cachedOwner; - needsCheck = true; - } - } - } + return cachedConfig; +} + +function buildRateChecksFromConfig(resourceClass, resourceId, limitConfig) { + const checks = []; + if (limitConfig?.RequestsPerSecond?.Limit > 0) { + checks.push({ + resourceClass, + resourceId, + measure: 'rps', + source: limitConfig.RequestsPerSecond.source, + config: { + limit: limitConfig.RequestsPerSecond.Limit, + burstCapacity: limitConfig.RequestsPerSecond.BurstCapacity * 1000, + }, + }); } - return { needsCheck, limitConfigs }; + return checks; } function checkRateLimitsForRequest(checks, log) { @@ -109,11 +166,11 @@ function checkRateLimitsForRequest(checks, log) { const bucket = getTokenBucket(check.resourceClass, check.resourceId, check.measure, check.config, log); if (!bucket.hasCapacity()) { log.debug('Rate limit check: denied (no tokens available)', { - resourceClass: bucket.resourceClass, - resourceId: bucket.resourceId, - measure: bucket.measure, - limit: bucket.limitConfig.limit, - source: bucket.limitConfig.source, + resourceClass: check.resourceClass, + resourceId: check.resourceId, + measure: check.measure, + limit: check.config.limit, + source: check.source, }); return { allowed: false, rateLimitSource: `${check.resourceClass}:${check.source}`}; @@ -122,13 +179,15 @@ function checkRateLimitsForRequest(checks, log) { buckets.push(bucket); log.trace('Rate limit check: allowed (token consumed)', { - resourceClass: bucket.resourceClass, - resourceId: bucket.resourceId, - measure: bucket.measure, - source: bucket.limitConfig.source, + resourceClass: check.resourceClass, + resourceId: check.resourceId, + measure: check.measure, + source: check.source, }); } + // buckets have already been checked for available capacity + // no need to check return values buckets.forEach(bucket => bucket.tryConsume()); return { allowed: true }; } @@ -137,5 +196,8 @@ module.exports = { rateLimitApiActions, extractBucketRateLimitConfig, extractRateLimitConfigFromRequest, + buildRateChecksFromConfig, checkRateLimitsForRequest, + getCachedRateLimitConfig, + requestNeedsRateCheck, }; diff --git a/lib/api/apiUtils/rateLimit/tokenBucket.js b/lib/api/apiUtils/rateLimit/tokenBucket.js index f20f284607..e372efd4eb 100644 --- a/lib/api/apiUtils/rateLimit/tokenBucket.js +++ b/lib/api/apiUtils/rateLimit/tokenBucket.js @@ -200,7 +200,7 @@ function getTokenBucket(resourceClass, resourceId, measure, limitConfig, log) { } else { const { updated, oldConfig } = bucket.updateLimit(limitConfig); if (updated) { - log.info('Updated token bucket limit config', { + log.debug('Updated token bucket limit config', { cacheKey, old: oldConfig, new: limitConfig, diff --git a/tests/unit/api/apiUtils/rateLimit/helpers.js b/tests/unit/api/apiUtils/rateLimit/helpers.js index 39c49060b3..953f335ce0 100644 --- a/tests/unit/api/apiUtils/rateLimit/helpers.js +++ b/tests/unit/api/apiUtils/rateLimit/helpers.js @@ -3,7 +3,6 @@ const sinon = require('sinon'); const { config } = require('../../../../../lib/Config'); const cache = require('../../../../../lib/api/apiUtils/rateLimit/cache'); const helpers = require('../../../../../lib/api/apiUtils/rateLimit/helpers'); -const constants = require('../../../../../constants'); const tokenBucket = require('../../../../../lib/api/apiUtils/rateLimit/tokenBucket'); describe('Rate limit helpers', () => { @@ -20,6 +19,7 @@ describe('Rate limit helpers', () => { }; // Clear cache before each test cache.configCache.clear(); + cache.bucketOwnerCache.clear(); // Clear token buckets tokenBucket.getAllTokenBuckets().clear(); }); @@ -28,22 +28,111 @@ describe('Rate limit helpers', () => { sandbox.restore(); }); - describe('extractBucketRateLimitConfig', () => { - let configStub; + describe('requestNeedsRateCheck', () => { + it('should return false when rate limiting is disabled', () => { + sandbox.stub(config, 'rateLimiting').value({ + enabled: false, + }); + + const request = { apiMethod: 'objectGet' }; + assert.strictEqual(helpers.requestNeedsRateCheck(request), false); + }); + + it('should return false for rate limit admin API actions', () => { + sandbox.stub(config, 'rateLimiting').value({ + enabled: true, + }); + + for (const action of helpers.rateLimitApiActions) { + const request = { apiMethod: action }; + assert.strictEqual(helpers.requestNeedsRateCheck(request), false, + `Expected false for rate limit action: ${action}`); + } + }); + it('should return false for internal service requests', () => { + sandbox.stub(config, 'rateLimiting').value({ + enabled: true, + }); + + const request = { + apiMethod: 'objectGet', + isInternalServiceRequest: true, + }; + assert.strictEqual(helpers.requestNeedsRateCheck(request), false); + }); + + it('should return false when both bucket and account are already checked', () => { + sandbox.stub(config, 'rateLimiting').value({ + enabled: true, + }); + + const request = { + apiMethod: 'objectGet', + rateLimitAccountAlreadyChecked: true, + rateLimitBucketAlreadyChecked: true, + }; + assert.strictEqual(helpers.requestNeedsRateCheck(request), false); + }); + + it('should return true when only bucket is already checked', () => { + sandbox.stub(config, 'rateLimiting').value({ + enabled: true, + }); + + const request = { + apiMethod: 'objectGet', + rateLimitAccountAlreadyChecked: false, + rateLimitBucketAlreadyChecked: true, + }; + assert.strictEqual(helpers.requestNeedsRateCheck(request), true); + }); + + it('should return true when only account is already checked', () => { + sandbox.stub(config, 'rateLimiting').value({ + enabled: true, + }); + + const request = { + apiMethod: 'objectGet', + rateLimitAccountAlreadyChecked: true, + rateLimitBucketAlreadyChecked: false, + }; + assert.strictEqual(helpers.requestNeedsRateCheck(request), true); + }); + + it('should return true for a normal request needing rate check', () => { + sandbox.stub(config, 'rateLimiting').value({ + enabled: true, + }); + + const request = { + apiMethod: 'objectGet', + isInternalServiceRequest: false, + rateLimitAccountAlreadyChecked: false, + rateLimitBucketAlreadyChecked: false, + }; + assert.strictEqual(helpers.requestNeedsRateCheck(request), true); + }); + }); + + describe('extractBucketRateLimitConfig', () => { beforeEach(() => { - configStub = sandbox.stub(config, 'rateLimiting').value({ + sandbox.stub(config, 'rateLimiting').value({ enabled: true, + serviceUserArn: 'foo', bucket: { configCacheTTL: 30000, - defaultBurstCapacity: 1, + defaultConfig: { + RequestsPerSecond: { BurstCapacity: 1 }, + }, }, }); }); it('should extract per-bucket config', () => { - const bucketName = 'test-bucket'; const mockBucket = { + getName: () => 'test-bucket', getRateLimitConfiguration: () => ({ getData: () => ({ RequestsPerSecond: { Limit: 200 }, @@ -51,92 +140,81 @@ describe('Rate limit helpers', () => { }), }; - const result = helpers.extractBucketRateLimitConfig(mockBucket, bucketName, mockLog); + const result = helpers.extractBucketRateLimitConfig(mockBucket, mockLog); - assert.deepStrictEqual(result, { limit: 200, burstCapacity: 1000, source: 'bucket' }); + assert.deepStrictEqual(result, { + RequestsPerSecond: { BurstCapacity: 1, Limit: 200, source: 'resource' }, + }); }); - it('should fall back to global default config when no bucket config', () => { - const bucketName = 'test-bucket'; + it('should use global defaults when bucket has no per-resource config', () => { const mockBucket = { - getRateLimitConfiguration: () => null, + getName: () => 'test-bucket', + getRateLimitConfiguration: () => ({ + getData: () => ({ + RequestsPerSecond: undefined, + }), + }), }; - configStub.value({ - enabled: true, - bucket: { - defaultConfig: { limit: 100 }, - defaultBurstCapacity: 1, - configCacheTTL: 30000, - }, - }); - - const result = helpers.extractBucketRateLimitConfig(mockBucket, bucketName, mockLog); + const result = helpers.extractBucketRateLimitConfig(mockBucket, mockLog); - assert.deepStrictEqual(result, { limit: 100, burstCapacity: 1000, source: 'global' }); + assert.deepStrictEqual(result, { + RequestsPerSecond: { BurstCapacity: 1, source: 'global' }, + }); }); - it('should return null when no config exists', () => { - const bucketName = 'test-bucket'; + it('should fall back to global default config when no bucket config', () => { const mockBucket = { + getName: () => 'test-bucket', getRateLimitConfiguration: () => null, }; - configStub.value({ + sandbox.stub(config, 'rateLimiting').value({ enabled: true, bucket: { - defaultBurstCapacity: 1, + defaultConfig: { + RequestsPerSecond: { Limit: 100, BurstCapacity: 1 }, + }, configCacheTTL: 30000, }, }); - const result = helpers.extractBucketRateLimitConfig(mockBucket, bucketName, mockLog); + const result = helpers.extractBucketRateLimitConfig(mockBucket, mockLog); - assert.strictEqual(result, null); + assert.deepStrictEqual(result, { + RequestsPerSecond: { Limit: 100, BurstCapacity: 1, source: 'global' }, + }); }); - it('should return null when global default limit is 0', () => { - const bucketName = 'test-bucket'; + it('should return global defaults with no Limit when defaultConfig has no Limit', () => { const mockBucket = { + getName: () => 'test-bucket', getRateLimitConfiguration: () => null, }; - configStub.value({ - enabled: true, - bucket: { - defaultConfig: { limit: 0 }, - defaultBurstCapacity: 1, - configCacheTTL: 30000, - }, - }); - - const result = helpers.extractBucketRateLimitConfig(mockBucket, bucketName, mockLog); + const result = helpers.extractBucketRateLimitConfig(mockBucket, mockLog); - assert.strictEqual(result, null); + assert.deepStrictEqual(result, { + RequestsPerSecond: { BurstCapacity: 1, source: 'global' }, + }); }); - it('should use default TTL when configCacheTTL is not set', () => { - const bucketName = 'test-bucket'; + it('should merge per-bucket config over global defaults', () => { const mockBucket = { + getName: () => 'test-bucket', getRateLimitConfiguration: () => ({ getData: () => ({ - RequestsPerSecond: { Limit: 200 }, + RequestsPerSecond: { Limit: 500, BurstCapacity: 10 }, }), }), }; - configStub.value({ - enabled: true, - bucket: { - defaultBurstCapacity: 1, - }, - }); - - sandbox.stub(constants, 'rateLimitDefaultConfigCacheTTL').value(60000); + const result = helpers.extractBucketRateLimitConfig(mockBucket, mockLog); - const result = helpers.extractBucketRateLimitConfig(mockBucket, bucketName, mockLog); - - assert.deepStrictEqual(result, { limit: 200, burstCapacity: 1000, source: 'bucket' }); + assert.deepStrictEqual(result, { + RequestsPerSecond: { BurstCapacity: 10, Limit: 500, source: 'resource' }, + }); }); }); @@ -317,4 +395,277 @@ describe('Rate limit helpers', () => { assert.strictEqual(bucket.tokens, 48); }); }); + + describe('extractRateLimitConfigFromRequest', () => { + beforeEach(() => { + sandbox.stub(config, 'rateLimiting').value({ + enabled: true, + serviceUserArn: 'foo', + bucket: { + configCacheTTL: 30000, + defaultConfig: { + RequestsPerSecond: { BurstCapacity: 1 }, + }, + }, + account: { + configCacheTTL: 30000, + defaultConfig: { + RequestsPerSecond: { BurstCapacity: 2 }, + }, + }, + }); + }); + + it('should return both bucket and account configs', () => { + const mockBucket = { + getName: () => 'test-bucket', + getRateLimitConfiguration: () => ({ + getData: () => ({ + RequestsPerSecond: { Limit: 200 }, + }), + }), + }; + const mockAuthInfo = { + getCanonicalID: () => 'account-123', + }; + const request = { + accountLimits: { + RequestsPerSecond: { Limit: 500 }, + }, + }; + + const result = helpers.extractRateLimitConfigFromRequest( + request, mockAuthInfo, mockBucket, mockLog); + + assert.deepStrictEqual(result.bucket, { + RequestsPerSecond: { BurstCapacity: 1, Limit: 200, source: 'resource' }, + }); + assert.deepStrictEqual(result.account, { + RequestsPerSecond: { BurstCapacity: 2, Limit: 500, source: 'resource' }, + }); + }); + + it('should use global defaults when no per-resource configs exist', () => { + const mockBucket = { + getName: () => 'test-bucket', + getRateLimitConfiguration: () => null, + }; + const mockAuthInfo = { + getCanonicalID: () => 'account-123', + }; + const request = {}; + + const result = helpers.extractRateLimitConfigFromRequest( + request, mockAuthInfo, mockBucket, mockLog); + + assert.deepStrictEqual(result.bucket, { + RequestsPerSecond: { BurstCapacity: 1, source: 'global' }, + }); + assert.deepStrictEqual(result.account, { + RequestsPerSecond: { BurstCapacity: 2, source: 'global' }, + }); + }); + + it('should extract per-account config with resource source', () => { + const mockBucket = { + getName: () => 'test-bucket', + getRateLimitConfiguration: () => null, + }; + const mockAuthInfo = { + getCanonicalID: () => 'account-123', + }; + const request = { + accountLimits: { + RequestsPerSecond: { Limit: 300, BurstCapacity: 5 }, + }, + }; + + const result = helpers.extractRateLimitConfigFromRequest( + request, mockAuthInfo, mockBucket, mockLog); + + assert.deepStrictEqual(result.account, { + RequestsPerSecond: { BurstCapacity: 5, Limit: 300, source: 'resource' }, + }); + }); + + it('should use global source when accountLimits has no RequestsPerSecond', () => { + const mockBucket = { + getName: () => 'test-bucket', + getRateLimitConfiguration: () => null, + }; + const mockAuthInfo = { + getCanonicalID: () => 'account-123', + }; + const request = { + accountLimits: { + RequestsPerSecond: undefined, + }, + }; + + const result = helpers.extractRateLimitConfigFromRequest( + request, mockAuthInfo, mockBucket, mockLog); + + assert.deepStrictEqual(result.account, { + RequestsPerSecond: { BurstCapacity: 2, source: 'global' }, + }); + }); + }); + + describe('buildRateChecksFromConfig', () => { + it('should build a check when Limit is set and positive', () => { + const limitConfig = { + RequestsPerSecond: { Limit: 100, BurstCapacity: 2, source: 'resource' }, + }; + + const checks = helpers.buildRateChecksFromConfig('bkt', 'test-bucket', limitConfig); + + assert.strictEqual(checks.length, 1); + assert.deepStrictEqual(checks[0], { + resourceClass: 'bkt', + resourceId: 'test-bucket', + measure: 'rps', + source: 'resource', + config: { + limit: 100, + burstCapacity: 2000, + }, + }); + }); + + it('should return empty array when Limit is 0', () => { + const limitConfig = { + RequestsPerSecond: { Limit: 0, BurstCapacity: 2, source: 'global' }, + }; + + const checks = helpers.buildRateChecksFromConfig('bkt', 'test-bucket', limitConfig); + + assert.strictEqual(checks.length, 0); + }); + + it('should return empty array when Limit is undefined', () => { + const limitConfig = { + RequestsPerSecond: { BurstCapacity: 2, source: 'global' }, + }; + + const checks = helpers.buildRateChecksFromConfig('bkt', 'test-bucket', limitConfig); + + assert.strictEqual(checks.length, 0); + }); + + it('should return empty array when limitConfig is null', () => { + const checks = helpers.buildRateChecksFromConfig('bkt', 'test-bucket', null); + + assert.strictEqual(checks.length, 0); + }); + + it('should return empty array when limitConfig is undefined', () => { + const checks = helpers.buildRateChecksFromConfig('bkt', 'test-bucket', undefined); + + assert.strictEqual(checks.length, 0); + }); + + it('should return empty array when RequestsPerSecond is missing', () => { + const checks = helpers.buildRateChecksFromConfig('bkt', 'test-bucket', {}); + + assert.strictEqual(checks.length, 0); + }); + + it('should multiply BurstCapacity by 1000', () => { + const limitConfig = { + RequestsPerSecond: { Limit: 50, BurstCapacity: 5, source: 'global' }, + }; + + const checks = helpers.buildRateChecksFromConfig('acc', 'account-1', limitConfig); + + assert.strictEqual(checks[0].config.burstCapacity, 5000); + }); + + it('should return empty array when Limit is negative', () => { + const limitConfig = { + RequestsPerSecond: { Limit: -1, BurstCapacity: 2, source: 'global' }, + }; + + const checks = helpers.buildRateChecksFromConfig('bkt', 'test-bucket', limitConfig); + + assert.strictEqual(checks.length, 0); + }); + }); + + describe('getCachedRateLimitConfig', () => { + it('should return empty object when nothing is cached', () => { + const request = { bucketName: 'test-bucket' }; + + const result = helpers.getCachedRateLimitConfig(request); + + assert.deepStrictEqual(result, {}); + }); + + it('should return cached bucket config when available', () => { + const bucketConfig = { + RequestsPerSecond: { Limit: 100, source: 'resource' }, + }; + cache.setCachedConfig(cache.namespace.bucket, 'test-bucket', bucketConfig, 30000); + + const request = { bucketName: 'test-bucket' }; + const result = helpers.getCachedRateLimitConfig(request); + + assert.deepStrictEqual(result.bucket, bucketConfig); + assert.strictEqual(result.account, undefined); + }); + + it('should return cached account config when bucket owner and account config are cached', () => { + const accountConfig = { + RequestsPerSecond: { Limit: 500, source: 'global' }, + }; + cache.setCachedBucketOwner('test-bucket', 'owner-123', 30000); + cache.setCachedConfig(cache.namespace.account, 'owner-123', accountConfig, 30000); + + const request = { bucketName: 'test-bucket' }; + const result = helpers.getCachedRateLimitConfig(request); + + assert.deepStrictEqual(result.account, accountConfig); + assert.strictEqual(result.bucketOwner, 'owner-123'); + }); + + it('should return both bucket and account configs when all are cached', () => { + const bucketConfig = { + RequestsPerSecond: { Limit: 100, source: 'resource' }, + }; + const accountConfig = { + RequestsPerSecond: { Limit: 500, source: 'global' }, + }; + cache.setCachedConfig(cache.namespace.bucket, 'test-bucket', bucketConfig, 30000); + cache.setCachedBucketOwner('test-bucket', 'owner-123', 30000); + cache.setCachedConfig(cache.namespace.account, 'owner-123', accountConfig, 30000); + + const request = { bucketName: 'test-bucket' }; + const result = helpers.getCachedRateLimitConfig(request); + + assert.deepStrictEqual(result.bucket, bucketConfig); + assert.deepStrictEqual(result.account, accountConfig); + assert.strictEqual(result.bucketOwner, 'owner-123'); + }); + + it('should not return account config when bucket owner is cached but account config is not', () => { + cache.setCachedBucketOwner('test-bucket', 'owner-123', 30000); + + const request = { bucketName: 'test-bucket' }; + const result = helpers.getCachedRateLimitConfig(request); + + assert.strictEqual(result.account, undefined); + assert.strictEqual(result.bucketOwner, undefined); + }); + + it('should not return account config when bucket owner is not cached', () => { + const accountConfig = { + RequestsPerSecond: { Limit: 500, source: 'global' }, + }; + cache.setCachedConfig(cache.namespace.account, 'owner-123', accountConfig, 30000); + + const request = { bucketName: 'test-bucket' }; + const result = helpers.getCachedRateLimitConfig(request); + + assert.strictEqual(result.account, undefined); + }); + }); }); diff --git a/tests/unit/api/apiUtils/rateLimit/refillJob.js b/tests/unit/api/apiUtils/rateLimit/refillJob.js index b6e9d90595..0e870f33a9 100644 --- a/tests/unit/api/apiUtils/rateLimit/refillJob.js +++ b/tests/unit/api/apiUtils/rateLimit/refillJob.js @@ -1,15 +1,30 @@ const assert = require('assert'); const sinon = require('sinon'); +const { config } = require('../../../../../lib/Config'); const refillJob = require('../../../../../lib/api/apiUtils/rateLimit/refillJob'); const tokenBucket = require('../../../../../lib/api/apiUtils/rateLimit/tokenBucket'); const logger = require('../../../../../lib/utilities/logger'); describe('Token refill job', () => { let sandbox; + let mockLog; beforeEach(() => { sandbox = sinon.createSandbox(); + sandbox.stub(config, 'rateLimiting').value({ + enabled: true, + nodes: 1, + tokenBucketBufferSize: 50, + tokenBucketRefillThreshold: 20, + }); + mockLog = { + trace: sinon.stub(), + debug: sinon.stub(), + info: sinon.stub(), + warn: sinon.stub(), + error: sinon.stub(), + }; // Clear token buckets tokenBucket.getAllTokenBuckets().clear(); @@ -34,16 +49,10 @@ describe('Token refill job', () => { }); it('should check all active token buckets', async () => { - const mockLog = { - trace: sinon.stub(), - debug: sinon.stub(), - error: sinon.stub(), - }; - // Create 3 buckets - const bucket1 = tokenBucket.getTokenBucket('bucket-1', { limit: 100 }, mockLog); - const bucket2 = tokenBucket.getTokenBucket('bucket-2', { limit: 200 }, mockLog); - const bucket3 = tokenBucket.getTokenBucket('bucket-3', { limit: 300 }, mockLog); + const bucket1 = tokenBucket.getTokenBucket('bkt', 'bucket-1', 'rps', { limit: 100 }, mockLog); + const bucket2 = tokenBucket.getTokenBucket('bkt', 'bucket-2', 'rps', { limit: 200 }, mockLog); + const bucket3 = tokenBucket.getTokenBucket('bkt', 'bucket-3', 'rps', { limit: 300 }, mockLog); // Stub refillIfNeeded to prevent actual refill sandbox.stub(bucket1, 'refillIfNeeded').resolves(); @@ -59,13 +68,7 @@ describe('Token refill job', () => { }); it('should call refillIfNeeded on each bucket', async () => { - const mockLog = { - trace: sinon.stub(), - debug: sinon.stub(), - error: sinon.stub(), - }; - - const bucket = tokenBucket.getTokenBucket('test-bucket', { limit: 100 }, mockLog); + const bucket = tokenBucket.getTokenBucket('bkt', 'test-bucket', 'rps', { limit: 100 }, mockLog); const refillSpy = sandbox.stub(bucket, 'refillIfNeeded').resolves(); await refillJob.refillTokenBuckets(logger); @@ -74,13 +77,7 @@ describe('Token refill job', () => { }); it('should handle refill errors gracefully', async () => { - const mockLog = { - trace: sinon.stub(), - debug: sinon.stub(), - error: sinon.stub(), - }; - - const bucket = tokenBucket.getTokenBucket('test-bucket', { limit: 100 }, mockLog); + const bucket = tokenBucket.getTokenBucket('bkt', 'test-bucket', 'rps', { limit: 100 }, mockLog); sandbox.stub(bucket, 'refillIfNeeded').rejects(new Error('Refill failed')); // Should not throw @@ -90,14 +87,8 @@ describe('Token refill job', () => { }); it('should process multiple buckets in parallel', async () => { - const mockLog = { - trace: sinon.stub(), - debug: sinon.stub(), - error: sinon.stub(), - }; - - const bucket1 = tokenBucket.getTokenBucket('bucket-1', { limit: 100 }, mockLog); - const bucket2 = tokenBucket.getTokenBucket('bucket-2', { limit: 200 }, mockLog); + const bucket1 = tokenBucket.getTokenBucket('bkt', 'bucket-1', 'rps', { limit: 100 }, mockLog); + const bucket2 = tokenBucket.getTokenBucket('bkt', 'bucket-2', 'rps', { limit: 200 }, mockLog); let refill1Called = false; let refill2Called = false; @@ -117,14 +108,8 @@ describe('Token refill job', () => { }); it('should wait for all refills to complete', async () => { - const mockLog = { - trace: sinon.stub(), - debug: sinon.stub(), - error: sinon.stub(), - }; - - const bucket1 = tokenBucket.getTokenBucket('bucket-1', { limit: 100 }, mockLog); - const bucket2 = tokenBucket.getTokenBucket('bucket-2', { limit: 200 }, mockLog); + const bucket1 = tokenBucket.getTokenBucket('bkt', 'bucket-1', 'rps', { limit: 100 }, mockLog); + const bucket2 = tokenBucket.getTokenBucket('bkt', 'bucket-2', 'rps', { limit: 200 }, mockLog); let call1 = false; let call2 = false; @@ -197,13 +182,7 @@ describe('Token refill job', () => { describe('Integration scenarios', () => { it('should call refillIfNeeded on buckets below threshold', async () => { - const mockLog = { - trace: sinon.stub(), - debug: sinon.stub(), - error: sinon.stub(), - }; - - const bucket = tokenBucket.getTokenBucket('test-bucket', { limit: 100 }, mockLog); + const bucket = tokenBucket.getTokenBucket('bkt', 'test-bucket', 'rps', { limit: 100 }, mockLog); bucket.tokens = 10; // Below threshold (20) // Stub refillIfNeeded @@ -215,33 +194,25 @@ describe('Token refill job', () => { }); it('should skip refill for buckets above threshold', async () => { - const mockLog = { - trace: sinon.stub(), - debug: sinon.stub(), - error: sinon.stub(), - }; - - const bucket = tokenBucket.getTokenBucket('test-bucket', { limit: 100 }, mockLog); + const bucket = tokenBucket.getTokenBucket('bkt', 'test-bucket', 'rps', { limit: 100 }, mockLog); bucket.tokens = 40; // Above threshold (20) - const refillSpy = sandbox.spy(bucket, 'refill'); + // refillIfNeeded is always called by the job, but it checks + // the threshold internally and returns early without refilling + const refillSpy = sandbox.spy(bucket, 'refillIfNeeded'); await refillJob.refillTokenBuckets(logger); - // Refill should not have been called - assert.strictEqual(refillSpy.called, false); + // refillIfNeeded was called, but tokens should be unchanged + // (no actual refill happened since above threshold) + assert.strictEqual(refillSpy.calledOnce, true); + assert.strictEqual(bucket.tokens, 40); }); it('should handle concurrent refills for multiple buckets', async () => { - const mockLog = { - trace: sinon.stub(), - debug: sinon.stub(), - error: sinon.stub(), - }; - - const bucket1 = tokenBucket.getTokenBucket('bucket-1', { limit: 100 }, mockLog); - const bucket2 = tokenBucket.getTokenBucket('bucket-2', { limit: 200 }, mockLog); - const bucket3 = tokenBucket.getTokenBucket('bucket-3', { limit: 300 }, mockLog); + const bucket1 = tokenBucket.getTokenBucket('bkt', 'bucket-1', 'rps', { limit: 100 }, mockLog); + const bucket2 = tokenBucket.getTokenBucket('bkt', 'bucket-2', 'rps', { limit: 200 }, mockLog); + const bucket3 = tokenBucket.getTokenBucket('bkt', 'bucket-3', 'rps', { limit: 300 }, mockLog); // All below threshold bucket1.tokens = 5; diff --git a/tests/unit/api/apiUtils/rateLimit/tokenBucket.js b/tests/unit/api/apiUtils/rateLimit/tokenBucket.js index fdd490b294..7959c01adc 100644 --- a/tests/unit/api/apiUtils/rateLimit/tokenBucket.js +++ b/tests/unit/api/apiUtils/rateLimit/tokenBucket.js @@ -299,8 +299,6 @@ describe('Token bucket management functions', () => { assert.strictEqual(bucket1, bucket2); assert.strictEqual(bucket2.limitConfig.limit, 200); - assert(mockLog.info.calledOnce); - assert(mockLog.info.firstCall.args[0].includes('Updated token bucket limit config')); }); it('should not log update when limit is unchanged', () => { From 57730bfae05f6476a1851cfe1c3611a52a53975d Mon Sep 17 00:00:00 2001 From: Taylor McKinnon Date: Wed, 1 Apr 2026 12:32:58 -0700 Subject: [PATCH 13/16] impr(CLDSRV-855): Update post check --- lib/metadata/metadataUtils.js | 100 ++++++++++++++++++---------------- 1 file changed, 52 insertions(+), 48 deletions(-) diff --git a/lib/metadata/metadataUtils.js b/lib/metadata/metadataUtils.js index c8588f9859..7c55ed1513 100644 --- a/lib/metadata/metadataUtils.js +++ b/lib/metadata/metadataUtils.js @@ -11,10 +11,13 @@ const { onlyOwnerAllowed } = require('../../constants'); const { actionNeedQuotaCheck, actionWithDataDeletion } = require('arsenal/build/lib/policyEvaluator/RequestContext'); const { processBytesToWrite, validateQuotas } = require('../api/apiUtils/quotas/quotaUtils'); const { config } = require('../Config'); +const cache = require('../api/apiUtils/rateLimit/cache'); const { - extractAndCacheRateLimitConfig, - checkRateLimitWithConfig, rateLimitApiActions, + requestNeedsRateCheck, + extractRateLimitConfigFromRequest, + buildRateChecksFromConfig, + checkRateLimitsForRequest, } = require('../api/apiUtils/rateLimit/helpers'); /** @@ -247,67 +250,68 @@ function validateBucket(bucket, params, log, actionImplicitDenies = {}) { * Extracts rate limit config from bucket metadata, caches it, and enforces limit. * Calls callback with error if rate limited, null if allowed or no rate limiting. * - * @param {object} bucket - Bucket metadata object - * @param {string} bucketName - Bucket name - * @param {object} request - Request object with rateLimitAlreadyChecked tracker + * @param {object} request - Request object + * @param {AuthInfo} authInfo - AuthInfo class instance, requester's info + * @param {BucketInfo} bucketMD - Bucket metadata * @param {object} log - Logger instance * @param {function} callback - Callback(err) - err if rate limited, null if allowed * @returns {undefined} */ -function checkRateLimitIfNeeded(bucket, bucketName, request, log, callback) { +function checkRateLimitIfNeeded(request, authInfo, bucketMD, log, callback) { // Skip if already checked or not enabled - if (request.rateLimitAlreadyChecked - || !config.rateLimiting?.enabled - || rateLimitApiActions.includes(request.apiMethod) - || request.isInternalServiceRequest) { + if (!requestNeedsRateCheck(request)) { return process.nextTick(callback, null); } // Extract rate limit config from bucket metadata and cache it - const rateLimitConfig = extractAndCacheRateLimitConfig(bucket, bucketName, log); + const checks = []; + const rateLimitConfig = extractRateLimitConfigFromRequest(request, authInfo, bucketMD, log); - // No rate limiting configured - if (!rateLimitConfig) { + if (!request.rateLimitBucketAlreadyChecked && rateLimitConfig.bucket !== undefined) { + cache.setCachedConfig( + cache.namespace.bucket, + bucketMD.getName(), + rateLimitConfig.bucket, + config.rateLimiting.bucket.configCacheTTL + ); + cache.setCachedBucketOwner(bucketMD.getName(), bucketMD.getOwner(), config.rateLimiting.bucket.configCacheTTL); + checks.push(...buildRateChecksFromConfig('bucket', bucketMD.getName(), rateLimitConfig.bucket)); // eslint-disable-next-line no-param-reassign - request.rateLimitAlreadyChecked = true; - return process.nextTick(callback, null); + request.rateLimitBucketAlreadyChecked = true; } - // Check rate limit with GCRA - return checkRateLimitWithConfig( - bucketName, - rateLimitConfig, - log, - (rateLimitErr, rateLimited) => { - if (rateLimitErr) { - log.error('Rate limit check error in metadata validation', { - error: rateLimitErr, - }); - } + if (!request.rateLimitAccountAlreadyChecked && rateLimitConfig.account !== undefined) { + cache.setCachedConfig( + cache.namespace.account, + authInfo.getCanonicalID(), + rateLimitConfig.account, + config.rateLimiting.account.configCacheTTL + ); + checks.push(...buildRateChecksFromConfig('account', authInfo.getCanonicalID(), rateLimitConfig.account)); + // eslint-disable-next-line no-param-reassign + request.rateLimitAccountAlreadyChecked = true; + } - if (rateLimited) { - log.addDefaultFields({ - rateLimited: true, - rateLimitSource: rateLimitConfig.source, - }); - // Add to server access log - /* eslint-disable no-param-reassign */ - if (request.serverAccessLog) { - request.serverAccessLog.rateLimited = true; - request.serverAccessLog.rateLimitSource = rateLimitConfig.source; - } - request.rateLimitAlreadyChecked = true; - /* eslint-enable no-param-reassign */ - return callback(config.rateLimiting.error); - } + const { allowed, rateLimitSource } = checkRateLimitsForRequest(checks, log); + if (!allowed) { + log.addDefaultFields({ + rateLimited: true, + rateLimitSource, + }); - // Allowed - set tracker and continue - // eslint-disable-next-line no-param-reassign - request.rateLimitAlreadyChecked = true; - return callback(null); + if (request.serverAccessLog) { + /* eslint-disable no-param-reassign */ + request.serverAccessLog.rateLimited = true; + request.serverAccessLog.rateLimitSource = rateLimitSource; + /* eslint-enable no-param-reassign */ } - ); + + return process.nextTick(callback, config.rateLimiting.error); + } + + return process.nextTick(callback, null); } + /** standardMetadataValidateBucketAndObj - retrieve bucket and object md from metadata * and check if user is authorized to access them. * @param {object} params - function parameters @@ -371,7 +375,7 @@ function standardMetadataValidateBucketAndObj(params, actionImplicitDenies, log, } // Rate limiting check if not already done in api.js - return checkRateLimitIfNeeded(bucket, bucketName, request, log, err => { + return checkRateLimitIfNeeded(request, authInfo, bucket, log, err => { if (err) { return next(err, bucket); } @@ -465,7 +469,7 @@ function standardMetadataValidateBucket(params, actionImplicitDenies, log, callb } // Rate limiting check if not already done in api.js - return checkRateLimitIfNeeded(bucket, bucketName, request, log, err => { + return checkRateLimitIfNeeded(request, params.authInfo, bucket, log, err => { if (err) { return callback(err); } From edf419b95f51ec020a64dde63c93d1616418b091 Mon Sep 17 00:00:00 2001 From: Taylor McKinnon Date: Wed, 1 Apr 2026 12:33:10 -0700 Subject: [PATCH 14/16] impr(CLDSRV-854): Update pre check --- lib/api/api.js | 449 ++++++++++++++++++++++++------------------------- 1 file changed, 220 insertions(+), 229 deletions(-) diff --git a/lib/api/api.js b/lib/api/api.js index 349c29fc97..d2ecd426bb 100644 --- a/lib/api/api.js +++ b/lib/api/api.js @@ -83,9 +83,10 @@ const constants = require('../../constants'); const { config } = require('../Config.js'); const { validateMethodChecksumNoChunking } = require('./apiUtils/integrity/validateChecksums'); const { - getRateLimitFromCache, - checkRateLimitWithConfig, - rateLimitApiActions, + requestNeedsRateCheck, + getCachedRateLimitConfig, + buildRateChecksFromConfig, + checkRateLimitsForRequest, } = require('./apiUtils/rateLimit/helpers'); const monitoringMap = policies.actionMaps.actionMonitoringMapS3; @@ -154,6 +155,193 @@ function handleAuthorizationResults(request, authorizationResults, apiMethod, re return callback(null, { returnTagCount }); } +function callApiHandler(apiMethod, apiHandler, request, response, log, callback) { + let returnTagCount = true; + + const validationRes = validateQueryAndHeaders(request, log); + if (validationRes.error) { + log.debug('request query / header validation failed', { + error: validationRes.error, + method: 'api.callApiMethod', + }); + return process.nextTick(callback, validationRes.error); + } + + // no need to check auth on website or cors preflight requests + if (apiMethod === 'websiteGet' || apiMethod === 'websiteHead' || + apiMethod === 'corsPreflight') { + request.actionImplicitDenies = false; + return apiHandler(request, log, callback); + } + + const { sourceBucket, sourceObject, sourceVersionId, parsingError } = + parseCopySource(apiMethod, request.headers['x-amz-copy-source']); + if (parsingError) { + log.debug('error parsing copy source', { + error: parsingError, + }); + return process.nextTick(callback, parsingError); + } + + const { httpHeadersSizeError } = checkHttpHeadersSize(request.headers); + if (httpHeadersSizeError) { + log.debug('http header size limit exceeded', { + error: httpHeadersSizeError, + }); + return process.nextTick(callback, httpHeadersSizeError); + } + + const requestContexts = prepareRequestContexts(apiMethod, request, + sourceBucket, sourceObject, sourceVersionId); + + // Extract all the _apiMethods and store them in an array + const apiMethods = requestContexts ? requestContexts.map(context => context._apiMethod) : []; + // Attach the names to the current request + request.apiMethods = apiMethods; + + return async.waterfall([ + next => auth.server.doAuth( + request, log, (err, userInfo, authorizationResults, streamingV4Params, infos) => { + if (request.serverAccessLog) { + request.serverAccessLog.authInfo = userInfo; + } + if (err) { + // VaultClient returns standard errors, but the route requires + // Arsenal errors + const arsenalError = err.metadata ? err : errors[err.code] || errors.InternalError; + log.trace('authentication error', { error: err }); + return next(arsenalError); + } + return next(null, userInfo, authorizationResults, streamingV4Params, infos); + }, 's3', requestContexts), + (userInfo, authorizationResults, streamingV4Params, infos, next) => { + const authNames = { accountName: userInfo.getAccountDisplayName() }; + if (userInfo.isRequesterAnIAMUser()) { + authNames.userName = userInfo.getIAMdisplayName(); + } + if (isRequesterASessionUser(userInfo)) { + authNames.sessionName = userInfo.getShortid().split(':')[1]; + } + log.addDefaultFields(authNames); + if (request.serverAccessLog) { + request.serverAccessLog.analyticsAccountName = authNames.accountName; + request.serverAccessLog.analyticsUserName = authNames.userName; + } + if (apiMethod === 'objectPut' || apiMethod === 'objectPutPart') { + const setStartTurnAroundTime = () => { + if (request.serverAccessLog) { + request.serverAccessLog.startTurnAroundTime = process.hrtime.bigint(); + } + }; + // For 0-byte uploads, downstream handlers do not consume + // the request stream, so 'end' never fires. Set + // startTurnAroundTime synchronously in that case. + if (request.headers['content-length'] === '0') { + setStartTurnAroundTime(); + } else { + request.on('end', setStartTurnAroundTime); + } + return next(null, userInfo, authorizationResults, streamingV4Params, infos); + } + // issue 100 Continue to the client + writeContinue(request, response); + + const defaultMaxBodyLength = request.method === 'POST' ? + constants.oneMegaBytes : constants.halfMegaBytes; + const MAX_BODY_LENGTH = config.apiBodySizeLimits[apiMethod] || defaultMaxBodyLength; + const post = []; + let bodyLength = 0; + request.on('data', chunk => { + bodyLength += chunk.length; + // Sanity check on post length + if (bodyLength <= MAX_BODY_LENGTH) { + post.push(chunk); + } + }); + + request.on('error', err => { + log.trace('error receiving request', { + error: err, + }); + return next(errors.InternalError); + }); + + request.on('end', () => { + if (request.serverAccessLog) { + request.serverAccessLog.startTurnAroundTime = process.hrtime.bigint(); + } + + if (bodyLength > MAX_BODY_LENGTH) { + log.error('body length is too long for request type', + { bodyLength }); + return next(errors.InvalidRequest); + } + + const buff = Buffer.concat(post, bodyLength); + + const err = validateMethodChecksumNoChunking(request, buff, log); + if (err) { + return next(err); + } + + // Convert array of post buffers into one string + request.post = buff.toString(); + return next(null, userInfo, authorizationResults, streamingV4Params, infos); + }); + return undefined; + }, + // Tag condition keys require information from CloudServer for evaluation + (userInfo, authorizationResults, streamingV4Params, infos, next) => tagConditionKeyAuth( + authorizationResults, + request, + requestContexts, + apiMethod, + log, + (err, authResultsWithTags) => { + if (err) { + log.trace('tag authentication error', { error: err }); + return next(err); + } + return next(null, userInfo, authResultsWithTags, streamingV4Params, infos); + }, + ), + (userInfo, authorizationResults, streamingV4Params, infos, next) => handleAuthorizationResults( + request, authorizationResults, apiMethod, returnTagCount, log, (err, res) => { + request.accountQuotas = infos?.accountQuota; + request.accountLimits = infos?.limits; + if (err) { + return next(err); + } + returnTagCount = res.returnTagCount; + return next(null, userInfo, authorizationResults, streamingV4Params); + }), + ], (err, userInfo, authorizationResults, streamingV4Params) => { + if (err) { + return callback(err); + } + const methodCallback = (err, ...results) => async.forEachLimit(request.finalizerHooks, 5, + (hook, done) => hook(err, done), + () => callback(err, ...results)); + + if (apiMethod === 'objectPut' || apiMethod === 'objectPutPart') { + request._response = response; + return apiHandler(userInfo, request, streamingV4Params, + log, methodCallback, authorizationResults); + } + if (apiMethod === 'objectCopy' || apiMethod === 'objectPutCopyPart') { + return apiHandler(userInfo, request, sourceBucket, + sourceObject, sourceVersionId, log, methodCallback); + } + if (apiMethod === 'objectGet') { + // remove objectGetTagging/objectGetTaggingVersion from apiMethods, these were added by + // prepareRequestContexts to determine the value of returnTagCount. + request.apiMethods = request.apiMethods.filter(methodName => !methodName.includes('Tagging')); + return apiHandler(userInfo, request, returnTagCount, log, callback); + } + return apiHandler(userInfo, request, log, methodCallback); + }); +} + const api = { callApiMethod(apiMethod, request, response, log, callback) { // Attach the apiMethod method to the request, so it can used by monitoring in the server @@ -186,249 +374,52 @@ const api = { request.serverAccessLog.objectKey = request.objectKey; request.serverAccessLog.analyticsAction = actionLog; } - let returnTagCount = true; // Initialize rate limit tracker flag - request.rateLimitAlreadyChecked = false; + request.rateLimitBucketAlreadyChecked = false; + request.rateLimitAccountAlreadyChecked = false; + + const apiHandler = this[apiMethod]; // Process the request with validation, authentication, and execution - const processRequest = () => { - const validationRes = validateQueryAndHeaders(request, log); - if (validationRes.error) { - log.debug('request query / header validation failed', { - error: validationRes.error, - method: 'api.callApiMethod', - }); - return process.nextTick(callback, validationRes.error); - } - // no need to check auth on website or cors preflight requests - if (apiMethod === 'websiteGet' || apiMethod === 'websiteHead' || - apiMethod === 'corsPreflight') { - request.actionImplicitDenies = false; - return this[apiMethod](request, log, callback); + if (request.bucketName === undefined || !requestNeedsRateCheck(request)) { + return process.nextTick(callApiHandler, apiMethod, apiHandler, request, response, log, callback); } - const { sourceBucket, sourceObject, sourceVersionId, parsingError } = - parseCopySource(apiMethod, request.headers['x-amz-copy-source']); - if (parsingError) { - log.debug('error parsing copy source', { - error: parsingError, - }); - return process.nextTick(callback, parsingError); - } + const checks = []; + const rateLimitConfig = getCachedRateLimitConfig(request); - const { httpHeadersSizeError } = checkHttpHeadersSize(request.headers); - if (httpHeadersSizeError) { - log.debug('http header size limit exceeded', { - error: httpHeadersSizeError, - }); - return process.nextTick(callback, httpHeadersSizeError); + if (rateLimitConfig.bucket !== undefined) { + request.rateLimitBucketAlreadyChecked = true; + checks.push(...buildRateChecksFromConfig('bucket', request.bucketName, rateLimitConfig.bucket)); } - const requestContexts = prepareRequestContexts(apiMethod, request, - sourceBucket, sourceObject, sourceVersionId); - - // Extract all the _apiMethods and store them in an array - const apiMethods = requestContexts ? requestContexts.map(context => context._apiMethod) : []; - // Attach the names to the current request - request.apiMethods = apiMethods; + if (rateLimitConfig.account !== undefined) { + request.rateLimitAccountAlreadyChecked = true; + checks.push(...buildRateChecksFromConfig('account', rateLimitConfig.bucketOwner, rateLimitConfig.account)); + } - return async.waterfall([ - next => auth.server.doAuth( - request, log, (err, userInfo, authorizationResults, streamingV4Params, infos) => { - if (request.serverAccessLog) { - request.serverAccessLog.authInfo = userInfo; - } - if (err) { - // VaultClient returns standard errors, but the route requires - // Arsenal errors - const arsenalError = err.metadata ? err : errors[err.code] || errors.InternalError; - log.trace('authentication error', { error: err }); - return next(arsenalError); - } - return next(null, userInfo, authorizationResults, streamingV4Params, infos); - }, 's3', requestContexts), - (userInfo, authorizationResults, streamingV4Params, infos, next) => { - const authNames = { accountName: userInfo.getAccountDisplayName() }; - if (userInfo.isRequesterAnIAMUser()) { - authNames.userName = userInfo.getIAMdisplayName(); - } - if (isRequesterASessionUser(userInfo)) { - authNames.sessionName = userInfo.getShortid().split(':')[1]; - } - log.addDefaultFields(authNames); + if (checks.length > 0) { + const { allowed, rateLimitSource } = checkRateLimitsForRequest(checks, log); + if (!allowed) { + log.addDefaultFields({ + rateLimited: true, + rateLimitSource, + }); + // Add to server access log if (request.serverAccessLog) { - request.serverAccessLog.analyticsAccountName = authNames.accountName; - request.serverAccessLog.analyticsUserName = authNames.userName; - } - if (apiMethod === 'objectPut' || apiMethod === 'objectPutPart') { - const setStartTurnAroundTime = () => { - if (request.serverAccessLog) { - request.serverAccessLog.startTurnAroundTime = process.hrtime.bigint(); - } - }; - // For 0-byte uploads, downstream handlers do not consume - // the request stream, so 'end' never fires. Set - // startTurnAroundTime synchronously in that case. - if (request.headers['content-length'] === '0') { - setStartTurnAroundTime(); - } else { - request.on('end', setStartTurnAroundTime); - } - return next(null, userInfo, authorizationResults, streamingV4Params, infos); + /* eslint-disable no-param-reassign */ + request.serverAccessLog.rateLimited = true; + request.serverAccessLog.rateLimitSource = rateLimitSource; + /* eslint-enable no-param-reassign */ } - // issue 100 Continue to the client - writeContinue(request, response); - - const defaultMaxBodyLength = request.method === 'POST' ? - constants.oneMegaBytes : constants.halfMegaBytes; - const MAX_BODY_LENGTH = config.apiBodySizeLimits[apiMethod] || defaultMaxBodyLength; - const post = []; - let bodyLength = 0; - request.on('data', chunk => { - bodyLength += chunk.length; - // Sanity check on post length - if (bodyLength <= MAX_BODY_LENGTH) { - post.push(chunk); - } - }); - request.on('error', err => { - log.trace('error receiving request', { - error: err, - }); - return next(errors.InternalError); - }); - - request.on('end', () => { - if (request.serverAccessLog) { - request.serverAccessLog.startTurnAroundTime = process.hrtime.bigint(); - } - - if (bodyLength > MAX_BODY_LENGTH) { - log.error('body length is too long for request type', - { bodyLength }); - return next(errors.InvalidRequest); - } - - const buff = Buffer.concat(post, bodyLength); - - const err = validateMethodChecksumNoChunking(request, buff, log); - if (err) { - return next(err); - } - - // Convert array of post buffers into one string - request.post = buff.toString(); - return next(null, userInfo, authorizationResults, streamingV4Params, infos); - }); - return undefined; - }, - // Tag condition keys require information from CloudServer for evaluation - (userInfo, authorizationResults, streamingV4Params, infos, next) => tagConditionKeyAuth( - authorizationResults, - request, - requestContexts, - apiMethod, - log, - (err, authResultsWithTags) => { - if (err) { - log.trace('tag authentication error', { error: err }); - return next(err); - } - return next(null, userInfo, authResultsWithTags, streamingV4Params, infos); - }, - ), - (userInfo, authorizationResults, streamingV4Params, infos, next) => handleAuthorizationResults( - request, authorizationResults, apiMethod, returnTagCount, log, (err, res) => { - request.accountQuotas = infos?.accountQuota; - if (err) { - return next(err); - } - returnTagCount = res.returnTagCount; - return next(null, userInfo, authorizationResults, streamingV4Params); - }), - ], (err, userInfo, authorizationResults, streamingV4Params) => { - if (err) { - return callback(err); - } - const methodCallback = (err, ...results) => async.forEachLimit(request.finalizerHooks, 5, - (hook, done) => hook(err, done), - () => callback(err, ...results)); - - if (apiMethod === 'objectPut' || apiMethod === 'objectPutPart') { - request._response = response; - return this[apiMethod](userInfo, request, streamingV4Params, - log, methodCallback, authorizationResults); - } - if (apiMethod === 'objectCopy' || apiMethod === 'objectPutCopyPart') { - return this[apiMethod](userInfo, request, sourceBucket, - sourceObject, sourceVersionId, log, methodCallback); - } - if (apiMethod === 'objectGet') { - // remove objectGetTagging/objectGetTaggingVersion from apiMethods, these were added by - // prepareRequestContexts to determine the value of returnTagCount. - request.apiMethods = request.apiMethods.filter(methodName => !methodName.includes('Tagging')); - return this[apiMethod](userInfo, request, returnTagCount, log, callback); - } - return this[apiMethod](userInfo, request, log, methodCallback); - }); - }; // End of processRequest helper function - - const applyRateLimit = request.bucketName - && config.rateLimiting?.enabled - && !rateLimitApiActions.includes(apiMethod) // Don't limit any rate limit admin actions - && !request.isInternalServiceRequest; // Don't limit any calls from internal services - - if (applyRateLimit) { - // Cache-only rate limit check (fast path, no metadata fetch) - // If config is cached, apply rate limiting now - // If not cached, metadata validation functions will handle it - const cachedConfig = getRateLimitFromCache(request.bucketName); - - if (cachedConfig !== undefined) { - // Cache hit - apply rate limiting NOW (fast path) - return checkRateLimitWithConfig( - request.bucketName, - cachedConfig, - log, - (rateLimitErr, rateLimited) => { - if (rateLimitErr) { - log.error('Rate limit check error in api.js', { - error: rateLimitErr, - }); - } - - if (rateLimited) { - log.addDefaultFields({ - rateLimited: true, - rateLimitSource: cachedConfig.source, - }); - // Add to server access log - if (request.serverAccessLog) { - /* eslint-disable no-param-reassign */ - request.serverAccessLog.rateLimited = true; - request.serverAccessLog.rateLimitSource = cachedConfig.source; - /* eslint-enable no-param-reassign */ - } - return callback(config.rateLimiting.error); - } - - // Set tracker - rate limiting already applied - // eslint-disable-next-line no-param-reassign - request.rateLimitAlreadyChecked = true; - - // Continue with normal flow - return processRequest(); - } - ); + return process.nextTick(callback, config.rateLimiting.error); } } - // Cache miss or no bucket name - continue to metadata validation - // Rate limiting will happen there after metadata fetch - return processRequest(); + return process.nextTick(callApiHandler, apiMethod, apiHandler, request, response, log, callback); }, bucketDelete, bucketDeleteCors, From 57f02cb6457a89d4d313396d9278ec516a5a5343 Mon Sep 17 00:00:00 2001 From: Taylor McKinnon Date: Mon, 27 Apr 2026 09:47:30 -0700 Subject: [PATCH 15/16] impr(CLDSRV-869): Fix buckets refilled calculation --- lib/api/apiUtils/rateLimit/refillJob.js | 4 ++-- lib/api/apiUtils/rateLimit/tokenBucket.js | 15 ++++++++------- 2 files changed, 10 insertions(+), 9 deletions(-) diff --git a/lib/api/apiUtils/rateLimit/refillJob.js b/lib/api/apiUtils/rateLimit/refillJob.js index a638ac364e..3303d3c30a 100644 --- a/lib/api/apiUtils/rateLimit/refillJob.js +++ b/lib/api/apiUtils/rateLimit/refillJob.js @@ -38,9 +38,9 @@ async function refillTokenBuckets(logger) { checked++; // Trigger async refill if needed (non-blocking) - const promise = bucket.refillIfNeeded().then(() => { + const promise = bucket.refillIfNeeded().then(bucketRefilled => { // Check if refill actually happened - if (bucket.refillCount > 0) { + if (bucketRefilled) { refilled++; } }).catch(err => { diff --git a/lib/api/apiUtils/rateLimit/tokenBucket.js b/lib/api/apiUtils/rateLimit/tokenBucket.js index e372efd4eb..cdff9b969d 100644 --- a/lib/api/apiUtils/rateLimit/tokenBucket.js +++ b/lib/api/apiUtils/rateLimit/tokenBucket.js @@ -35,7 +35,6 @@ class WorkerTokenBucket { this.tokens = this.bufferSize; // Start with full buffer for fail-open at startup this.interval = calculateInterval(this.limitConfig.limit, config.rateLimiting.nodes); this.lastRefillTime = Date.now(); - this.refillCount = 0; this.refillInProgress = false; } @@ -74,17 +73,17 @@ class WorkerTokenBucket { * Check if refill is needed and trigger async refill * Called by background job every 100ms * - * @returns {Promise} + * @returns {Promise} */ async refillIfNeeded() { // Already refilling, skip if (this.refillInProgress) { - return; + return false; } // Above threshold, no need to refill yet if (this.tokens >= this.refillThreshold) { - return; + return false; } this.refillInProgress = true; @@ -95,7 +94,7 @@ class WorkerTokenBucket { const requested = this.bufferSize - this.tokens; if (requested <= 0) { - return; // Buffer already full + return false; // Buffer already full } // Calculate GCRA parameters @@ -126,7 +125,6 @@ class WorkerTokenBucket { // Add granted tokens to buffer this.tokens += granted; - this.refillCount++; this.lastRefillTime = Date.now(); const duration = this.lastRefillTime - startTime; @@ -139,7 +137,6 @@ class WorkerTokenBucket { granted, newBalance: this.tokens, durationMs: duration, - refillCount: this.refillCount, }); // Warn if refill took too long or granted too few @@ -159,8 +156,11 @@ class WorkerTokenBucket { measure: this.measure, requested, }); + + return false; } + return true; } catch (err) { this.log.error('Token refill failed', { resourceClass: this.resourceClass, @@ -169,6 +169,7 @@ class WorkerTokenBucket { error: err.message, stack: err.stack, }); + return false; } finally { this.refillInProgress = false; } From 28e587a9295779dddcfa4be63b90081fb9211407 Mon Sep 17 00:00:00 2001 From: Taylor McKinnon Date: Tue, 28 Apr 2026 11:12:45 -0700 Subject: [PATCH 16/16] Bump project version --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index b4ae39b20d..7dca506866 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@zenko/cloudserver", - "version": "9.2.37", + "version": "9.2.38", "description": "Zenko CloudServer, an open-source Node.js implementation of a server handling the Amazon S3 protocol", "main": "index.js", "engines": {