Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
21 changes: 6 additions & 15 deletions lib/Config.js
Original file line number Diff line number Diff line change
Expand Up @@ -1862,24 +1862,15 @@ class Config extends EventEmitter {
this.enableVeeamRoute = config.enableVeeamRoute;
}

this.rateLimiting = {
enabled: false,
bucket: {
configCacheTTL: constants.rateLimitDefaultConfigCacheTTL,
},
};
// Parse and validate all rate limiting configuration
this.rateLimiting = parseRateLimitConfig(config.rateLimiting);

if (config.rateLimiting?.enabled) {
// rate limiting uses the same localCache config defined for S3 to avoid
// config duplication.
assert(this.localCache, 'missing required property of rateLimit ' +
'configuration: localCache');

// Parse and validate all rate limiting configuration
this.rateLimiting = parseRateLimitConfig(config.rateLimiting, this.clusters = this.clusters || 1);
// Rate limiting uses the same localCache config defined for S3 to avoid config duplication.
// If rate limiting is enabled check to make sure it is also configured.
if (this.rateLimiting.enabled) {
assert(this.localCache, 'localCache must be defined when rate limiting is enabled');
}


if (config.capabilities) {
if (config.capabilities.locationTypes) {
this.supportedLocationTypes = new Set(config.capabilities.locationTypes);
Expand Down
449 changes: 220 additions & 229 deletions lib/api/api.js

Large diffs are not rendered by default.

67 changes: 41 additions & 26 deletions lib/api/apiUtils/rateLimit/cache.js
Original file line number Diff line number Diff line change
@@ -1,63 +1,78 @@
const configCache = new Map();
const bucketOwnerCache = new Map();

// Load tracking for adaptive burst capacity
// Map<bucketKey, Array<timestamp>> - rolling 1-second window
const requestTimestamps = new Map();
const namespace = {
bucket: 'bkt',
account: 'acc',
};

function setCachedConfig(key, limitConfig, ttl) {
function cacheSet(cache, key, value, ttl) {
const expiry = Date.now() + ttl;
configCache.set(key, { expiry, config: limitConfig });
cache.set(key, { expiry, value });
}

function getCachedConfig(key) {
const value = configCache.get(key);
if (value === undefined) {
function cacheGet(cache, key) {
const cachedValue = cache.get(key);
if (cachedValue === undefined) {
return undefined;
}

const { expiry, config } = value;
const { expiry, value } = cachedValue;
if (expiry <= Date.now()) {
configCache.delete(key);
cache.delete(key);
return undefined;
}

return config;
return value;
}

function cacheDelete(cache, key) {
cache.delete(key);
}

function expireCachedConfigs(now) {
function cacheExpire(cache) {
const now = Date.now();

const toRemove = [];
for (const [key, { expiry }] of configCache.entries()) {
for (const [key, { expiry }] of cache.entries()) {
if (expiry <= now) {
toRemove.push(key);
}
}

for (const key of toRemove) {
configCache.delete(key);
cache.delete(key);
}

return toRemove.length;
}

/**
* Invalidate cached config for a specific bucket
*
* @param {string} bucketName - Bucket name
* @returns {boolean} True if entry was found and removed
*/
function invalidateCachedConfig(bucketName) {
const cacheKey = `bucket:${bucketName}`;
return configCache.delete(cacheKey);
function formatKeyDecorator(fn) {
return (resourceClass, resourceId, ...args) => fn(`${resourceClass}:${resourceId}`, ...args);
}

const getCachedConfig = formatKeyDecorator(cacheGet.bind(null, configCache));
const setCachedConfig = formatKeyDecorator(cacheSet.bind(null, configCache));
const deleteCachedConfig = formatKeyDecorator(cacheDelete.bind(null, configCache));
const expireCachedConfigs = cacheExpire.bind(null, configCache);

const getCachedBucketOwner = cacheGet.bind(null, bucketOwnerCache);
const setCachedBucketOwner = cacheSet.bind(null, bucketOwnerCache);
const deleteCachedBucketOwner = cacheDelete.bind(null, bucketOwnerCache);
const expireCachedBucketOwners = cacheExpire.bind(null, bucketOwnerCache);

module.exports = {
namespace,
setCachedConfig,
getCachedConfig,
expireCachedConfigs,
invalidateCachedConfig,

deleteCachedConfig,
setCachedBucketOwner,
getCachedBucketOwner,
deleteCachedBucketOwner,
expireCachedBucketOwners,
// Do not access directly
// Used only for tests
configCache,
requestTimestamps,
bucketOwnerCache,
};
37 changes: 23 additions & 14 deletions lib/api/apiUtils/rateLimit/cleanup.js
Original file line number Diff line number Diff line change
@@ -1,19 +1,34 @@
const { expireCachedConfigs } = require('./cache');
const { expireCachedConfigs, expireCachedBucketOwners } = require('./cache');
const { rateLimitCleanupInterval } = require('../../../../constants');

let cleanupInterval = null;
let cleanupTimer = null;

function cleanupJob(log, options = {}) {
const expiredConfigs = expireCachedConfigs();
const expiredBucketOwners = expireCachedBucketOwners();
if (expiredConfigs > 0 || expiredBucketOwners > 0) {
log.debug('Rate limit cleanup completed', { expiredConfigs, expiredBucketOwners });
}

cleanupTimer = setTimeout(() => cleanupJob(log, options), rateLimitCleanupInterval);
if (!options.skipUnref) {
cleanupTimer.unref();
}
}


/**
* Start periodic cleanup of expired rate limit counters and cached configs
*
* Runs every 10 seconds to:
* - Remove expired GCRA counters (where emptyAt <= now)
* - Remove expired cached rate limit configs (where TTL expired)
* - Remove expired cached bucket owners (where TTL expired)
*
* This prevents memory leaks from accumulating expired entries.
*/
function startCleanupJob(log, options = {}) {
if (cleanupInterval) {
if (cleanupTimer) {
log.warn('Rate limit cleanup job already running');
return;
}
Expand All @@ -22,18 +37,12 @@ function startCleanupJob(log, options = {}) {
interval: rateLimitCleanupInterval,
});

cleanupInterval = setInterval(() => {
const now = Date.now();
const expiredConfigs = expireCachedConfigs(now);
if (expiredConfigs > 0) {
log.debug('Rate limit cleanup completed', { expiredConfigs });
}
}, rateLimitCleanupInterval);
cleanupTimer = setTimeout(() => cleanupJob(log, options), rateLimitCleanupInterval);

// Prevent cleanup job from keeping process alive
// Skip unref() in test environment to work with sinon fake timers
if (!options.skipUnref) {
cleanupInterval.unref();
cleanupTimer.unref();
}
}

Expand All @@ -42,9 +51,9 @@ function startCleanupJob(log, options = {}) {
* Used for graceful shutdown or testing
*/
function stopCleanupJob(log) {
if (cleanupInterval) {
clearInterval(cleanupInterval);
cleanupInterval = null;
if (cleanupTimer !== null) {
clearTimeout(cleanupTimer);
cleanupTimer = null;
if (log) {
log.info('Stopped rate limit cleanup job');
}
Expand Down
8 changes: 5 additions & 3 deletions lib/api/apiUtils/rateLimit/client.js
Original file line number Diff line number Diff line change
Expand Up @@ -38,14 +38,16 @@ class RateLimitClient {
*
* Used by token reservation system to request capacity in advance.
*
* @param {string} bucketName - Bucket name
* @param {string} resourceClass - Resource class name e.g. 'account' or 'bucket'
* @param {string} resourceId - Unique resource ID e.g. bucket name or account ID
* @param {string} measure - ID of rate limit measure e.g. 'rps'
* @param {number} requested - Number of tokens requested
* @param {number} interval - Interval per request in ms
* @param {number} burstCapacity - Burst capacity in ms
* @param {RateLimitClient~grantTokens} cb - Callback
*/
grantTokens(bucketName, requested, interval, burstCapacity, cb) {
const key = `ratelimit:bucket:${bucketName}:rps:emptyAt`;
grantTokens(resourceClass, resourceId, measure, requested, interval, burstCapacity, cb) {
const key = `ratelimit:${resourceClass}:${resourceId}:${measure}:emptyAt`;
const now = Date.now();

this.redis.grantTokens(
Expand Down
Loading
Loading