Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion data/dataStore.js
Original file line number Diff line number Diff line change
Expand Up @@ -46,5 +46,6 @@ module.exports = {
sb5EstDate: 0,

// Masternodes array (will be populated)
masternodesArr: []
masternodesArr: [],
masternodesUpdatedAt: 0
};
3 changes: 3 additions & 0 deletions lib/appFactory.js
Original file line number Diff line number Diff line change
Expand Up @@ -178,12 +178,14 @@ function mountAuthAndVault(
register: rateLimiters.disabled(),
verifyEmail: rateLimiters.disabled(),
vote: rateLimiters.disabled(),
reconcile: rateLimiters.disabled(),
}
: {
login: rateLimiters.loginLimiter(),
register: rateLimiters.registerLimiter(),
verifyEmail: rateLimiters.verifyEmailLimiter(),
vote: rateLimiters.voteLimiter(),
reconcile: rateLimiters.reconcileLimiter(),
};

app.use(
Expand Down Expand Up @@ -233,6 +235,7 @@ function mountAuthAndVault(
? invalidateCurrentVotes
: null,
voteLimiter: limiters.vote,
reconcileLimiter: limiters.reconcile,
nowMs: now,
})
);
Expand Down
8 changes: 5 additions & 3 deletions lib/vaults.js
Original file line number Diff line number Diff line change
Expand Up @@ -70,9 +70,11 @@ function createVaultsRepo(db, opts = {}) {
const existing = selectByUser.get(userId);

if (!existing) {
// Accept either '*' (explicit "no existing") or absent. Any concrete
// etag here is a client bug / stale read.
if (ifMatch && ifMatch !== '*') throw mkErr('etag_mismatch');
// First writes must be explicit too. The frontend already sends
// If-Match: *, and requiring it keeps blind creates from silently
// bypassing the same precondition contract as updates.
if (!ifMatch) throw mkErr('etag_required');
if (ifMatch !== '*') throw mkErr('etag_mismatch');
const etag = etagFor(blob);
try {
insertFirst.run(userId, blob, etag, now());
Expand Down
15 changes: 8 additions & 7 deletions lib/vaults.test.js
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ describe('vaults repo', () => {

test('first put creates a vault and returns etag (no saltV — it lives on users now)', () => {
const uid = seedUser(db);
const result = vaults.put(uid, { blob: 'ciphertextA' });
const result = vaults.put(uid, { blob: 'ciphertextA', ifMatch: '*' });
expect(result).toEqual({ etag: etagFor('ciphertextA') });
// Migration 004 removed the salt_v column from vaults; make sure the
// repo's response shape matches and no stray saltV leaks back.
Expand All @@ -46,8 +46,9 @@ describe('vaults repo', () => {
expect(got.etag).toBe(etagFor('ciphertextA'));
});

test('first put accepts ifMatch "*" but rejects any concrete etag', () => {
test('first put requires ifMatch "*" and rejects any concrete etag', () => {
const uid = seedUser(db);
expect(() => vaults.put(uid, { blob: 'x' })).toThrow(/etag_required/);
expect(() => vaults.put(uid, { blob: 'x', ifMatch: 'abc' })).toThrow(
/etag_mismatch/
);
Expand All @@ -56,7 +57,7 @@ describe('vaults repo', () => {

test('subsequent put requires ifMatch to equal current etag', () => {
const uid = seedUser(db);
const first = vaults.put(uid, { blob: 'A' });
const first = vaults.put(uid, { blob: 'A', ifMatch: '*' });

expect(() => vaults.put(uid, { blob: 'B' })).toThrow(/etag_required/);
expect(() => vaults.put(uid, { blob: 'B', ifMatch: 'wrong' })).toThrow(
Expand All @@ -76,7 +77,7 @@ describe('vaults repo', () => {
// observed etag. Recovery from a truly corrupted local state goes
// through an explicit "reset vault" flow instead.
const uid = seedUser(db);
vaults.put(uid, { blob: 'A' });
vaults.put(uid, { blob: 'A', ifMatch: '*' });
expect(() => vaults.put(uid, { blob: 'C', ifMatch: '*' })).toThrow(
/etag_mismatch/
);
Expand Down Expand Up @@ -108,7 +109,7 @@ describe('vaults repo', () => {
// statement shape directly so the guarantee survives even a repo
// refactor that someday skipped the pre-read.
const uid = seedUser(db);
const first = vaults.put(uid, { blob: 'A' });
const first = vaults.put(uid, { blob: 'A', ifMatch: '*' });

const stmt = db.prepare(
`UPDATE vaults
Expand All @@ -129,7 +130,7 @@ describe('vaults repo', () => {

test('simulated-race: stale ifMatch against a newer row is rejected', () => {
const uid = seedUser(db);
const first = vaults.put(uid, { blob: 'A' });
const first = vaults.put(uid, { blob: 'A', ifMatch: '*' });
// A concurrent writer advances the state.
vaults.put(uid, { blob: 'B', ifMatch: first.etag });

Expand Down Expand Up @@ -174,7 +175,7 @@ describe('vaults repo', () => {
// a "no such column" at query-prepare time and every vault GET
// turns into a 500.
const uid = seedUser(db);
vaults.put(uid, { blob: 'hello' });
vaults.put(uid, { blob: 'hello', ifMatch: '*' });
const row = vaults.get(uid);
expect(Object.keys(row).sort()).toEqual(['blob', 'etag', 'updatedAt']);
});
Expand Down
23 changes: 23 additions & 0 deletions middleware/rateLimit.js
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,15 @@ function voteKey(req) {
return `vote|ip|${ipBucket(req)}`;
}

// Per-user bucket for /gov/receipts/reconcile. This endpoint may issue
// gobject_getcurrentvotes RPCs, so it needs its own budget instead of
// sharing /gov/vote's relay budget.
function reconcileKey(req) {
const uid = req.user && req.user.id != null ? String(req.user.id) : null;
if (uid) return `reconcile|u${uid}`;
return `reconcile|ip|${ipBucket(req)}`;
}

function loginLimiter() {
return rateLimit({
windowMs: 15 * MINUTE,
Expand Down Expand Up @@ -135,6 +144,18 @@ function voteLimiter() {
});
}

function reconcileLimiter() {
return rateLimit({
windowMs: 60 * MINUTE,
max: 30,
standardHeaders: true,
legacyHeaders: false,
keyGenerator: reconcileKey,
message: { error: 'too_many_reconcile_requests' },
handler: trippedHandler('reconcile'),
});
}

function disabled() {
return (_req, _res, next) => next();
}
Expand All @@ -144,10 +165,12 @@ module.exports = {
registerLimiter,
verifyEmailLimiter,
voteLimiter,
reconcileLimiter,
disabled,
// Exported for direct unit testing.
loginKey,
registerKey,
verifyEmailKey,
voteKey,
reconcileKey,
};
15 changes: 15 additions & 0 deletions middleware/rateLimit.test.js
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
const {
loginKey,
registerKey,
reconcileKey,
voteKey,
} = require('./rateLimit');

Expand Down Expand Up @@ -83,4 +84,18 @@ describe('rate-limit key generators', () => {
expect(a).toMatch(/^vote\|ip\|1\.2\.3\.4$/);
});
});

describe('reconcileKey', () => {
test('buckets by authenticated user.id when present', () => {
const a = reconcileKey(mk({}, '1.2.3.4', { id: 42 }));
const b = reconcileKey(mk({}, '9.9.9.9', { id: 42 }));
expect(a).toBe(b);
expect(a).toBe('reconcile|u42');
});

test('falls back to IP bucket when the user is missing', () => {
const a = reconcileKey(mk({}, '1.2.3.4'));
expect(a).toMatch(/^reconcile\|ip\|1\.2\.3\.4$/);
});
});
});
95 changes: 90 additions & 5 deletions routes/gov.js
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,41 @@ const HEX64 = /^[0-9a-f]{64}$/i;
// say stale" race. Callers that want stricter freshness can force
// a reconcile with `?refresh=1`.
const DEFAULT_RECEIPTS_FRESHNESS_MS = 2 * 60 * 1000;
const DEFAULT_MASTERNODE_CACHE_MAX_AGE_MS = 30 * 1000;

function readMasternodeSnapshot(value, nowMs, maxAgeMs) {
const masternodes = Array.isArray(value)
? value
: value && Array.isArray(value.masternodes)
? value.masternodes
: [];
if (Array.isArray(value)) {
return { masternodes, fresh: true };
}
const updatedAt = value && Number.isInteger(value.updatedAt)
? value.updatedAt
: 0;
const age = nowMs - updatedAt;
return {
masternodes,
fresh: updatedAt > 0 && age >= 0 && age <= maxAgeMs,
};
}

function knownOutpointSet(masternodes) {
const out = new Set();
for (const mn of Array.isArray(masternodes) ? masternodes : []) {
if (
mn &&
typeof mn.collateralHash === 'string' &&
HEX64.test(mn.collateralHash) &&
Number.isInteger(mn.collateralIndex)
) {
out.add(`${mn.collateralHash.toLowerCase()}:${mn.collateralIndex}`);
}
}
return out;
}

// Governance HTTP surface.
//
Expand Down Expand Up @@ -66,6 +101,8 @@ const DEFAULT_RECEIPTS_FRESHNESS_MS = 2 * 60 * 1000;
// - nowMs: injectable clock for deterministic time-window tests.
// - voteLimiter: an express-rate-limit middleware (or a no-op in
// tests). Mounted only on POST /gov/vote.
// - reconcileLimiter: same shape, mounted only on POST
// /gov/receipts/reconcile.

function createGovRouter({
masternodesProvider,
Expand All @@ -76,7 +113,9 @@ function createGovRouter({
getCurrentVotes = null,
invalidateCurrentVotes = null,
receiptsFreshnessMs = DEFAULT_RECEIPTS_FRESHNESS_MS,
masternodeCacheMaxAgeMs = DEFAULT_MASTERNODE_CACHE_MAX_AGE_MS,
voteLimiter = (_req, _res, next) => next(),
reconcileLimiter = (_req, _res, next) => next(),
nowMs = () => Date.now(),
}) {
if (typeof masternodesProvider !== 'function') {
Expand Down Expand Up @@ -113,8 +152,12 @@ function createGovRouter({
(req, res) => {
const parsed = validateLookupBody(req.body);
if (!parsed.ok) return res.status(400).json({ error: parsed.error });
const mnArr = masternodesProvider() || [];
const matches = lookupMatches(mnArr, parsed.votingAddresses);
const snapshot = readMasternodeSnapshot(
masternodesProvider(),
nowMs(),
masternodeCacheMaxAgeMs
);
const matches = lookupMatches(snapshot.masternodes, parsed.votingAddresses);
return res.json({ matches });
}
);
Expand Down Expand Up @@ -154,10 +197,51 @@ function createGovRouter({
const parsed = validateVoteBody(req.body, { nowMs: nowMs() });
if (!parsed.ok) return res.status(400).json({ error: parsed.error });
try {
const out = await relayVotes(voteRaw, parsed, {
receipts,
userId: req.user && req.user.id,
const snapshot = readMasternodeSnapshot(
masternodesProvider(),
nowMs(),
masternodeCacheMaxAgeMs
);
const knownOutpoints = snapshot.fresh
? knownOutpointSet(snapshot.masternodes)
: new Set();
const relayEntries = [];
const relayIndexes = [];
const results = new Array(parsed.entries.length);
parsed.entries.forEach((entry, index) => {
const key = `${entry.collateralHash}:${entry.collateralIndex}`;
if (knownOutpoints.size > 0 && !knownOutpoints.has(key)) {
Comment thread
sidhujag marked this conversation as resolved.
results[index] = {
collateralHash: entry.collateralHash,
collateralIndex: entry.collateralIndex,
ok: false,
error: 'mn_not_found',
};
return;
}
relayIndexes.push(index);
relayEntries.push(entry);
});

if (relayEntries.length > 0) {
const relayed = await relayVotes(
voteRaw,
{ ...parsed, entries: relayEntries },
{
receipts,
userId: req.user && req.user.id,
}
);
relayed.results.forEach((result, index) => {
results[relayIndexes[index]] = result;
});
}

const out = {
accepted: results.filter((r) => r && r.ok).length,
rejected: results.filter((r) => r && !r.ok).length,
results,
};
// Invalidate the cached gobject_getcurrentvotes snapshot for
// this proposal so the next /gov/receipts read observes the
// votes we just relayed (or the chain state that followed
Expand Down Expand Up @@ -284,6 +368,7 @@ function createGovRouter({
'/receipts/reconcile',
sessionMw.requireAuth,
csrfMw.require,
reconcileLimiter,
async (req, res) => {
if (!receipts) {
return res.json({ receipts: [], reconciled: false });
Expand Down
11 changes: 7 additions & 4 deletions server.js
Original file line number Diff line number Diff line change
Expand Up @@ -327,12 +327,15 @@ mountAuthAndVault(app, {
mailer,
baseUrl: process.env.BASE_URL || 'http://localhost:3001',
frontendUrl: PUBLIC_BASE_URL,
// Read the live tracker array fresh on every call rather than
// Read the live tracker snapshot fresh on every call rather than
// snapshotting it here — the tracker REASSIGNS `masternodesArr`
// every 10s (`data.masternodesArr = []`), so a captured reference
// would go stale after the first refresh. `dataStore.masternodesArr`
// is a property access and therefore always returns the current value.
masternodesProvider: () => dataStore.masternodesArr,
// would go stale after the first refresh. `masternodesUpdatedAt`
// lets /gov/vote avoid hard-rejecting outpoints from a stale cache.
masternodesProvider: () => ({
masternodes: dataStore.masternodesArr,
updatedAt: dataStore.masternodesUpdatedAt,
}),
voteRaw: (collateralHash, collateralIndex, governanceHash, signal, outcome, time, voteSig) =>
rpcServices(client.callRpc)
.voteRaw(
Expand Down
1 change: 1 addition & 0 deletions services/masternodeTracker.js
Original file line number Diff line number Diff line change
Expand Up @@ -74,6 +74,7 @@ setInterval(() => {
}

data.masternodesArr.sort((a, b) => b.lastpaidtime - a.lastpaidtime);
data.masternodesUpdatedAt = Date.now();

data.highestMN = Math.max(...Object.values(data.mapData).map(e => e.masternodes || 0));

Expand Down
Loading
Loading