diff --git a/.github/workflows/ci-repeat.yml b/.github/workflows/ci-repeat.yml
new file mode 100644
index 00000000000..49e6d764cbf
--- /dev/null
+++ b/.github/workflows/ci-repeat.yml
@@ -0,0 +1,182 @@
+name: CI Repeat
+
+on:
+ # 监听名为 CI 的 workflow;只要它完整结束一次,就触发本 workflow
+ workflow_run:
+ workflows: ["CI"]
+ types: [completed]
+
+permissions:
+ # 需要写 Actions,才能发起 swap job 的 rerun
+ actions: write
+ contents: read
+ # 需要写 PR comment,记录每一轮 swap/test 的结果
+ pull-requests: write
+
+jobs:
+ repeat-swap:
+ # 仅处理 PR 场景;cancelled 不继续
+ if: >
+ github.event.workflow_run.event == 'pull_request' &&
+ github.event.workflow_run.conclusion != 'cancelled'
+ runs-on: ubuntu-24.04
+ env:
+ # 最多自动重跑多少轮;达到上限后只记录结果,不再继续触发
+ MAX_ATTEMPTS: "10"
+ steps:
+ - name: Record swap result and rerun if needed
+ uses: actions/github-script@v7
+ with:
+ script: |
+ const run = context.payload.workflow_run;
+ const owner = context.repo.owner;
+ const repo = context.repo.repo;
+ const runId = Number(run.id);
+ const runAttempt = Number(run.run_attempt || "1");
+ const maxAttempts = Number(process.env.MAX_ATTEMPTS || "10");
+ const marker = '';
+
+ if (!run.pull_requests || run.pull_requests.length === 0) {
+ core.info('No pull request is associated with this run.');
+ return;
+ }
+
+ const pr = run.pull_requests[0];
+ const jobs = await github.paginate(
+ github.rest.actions.listJobsForWorkflowRun,
+ {
+ owner,
+ repo,
+ run_id: runId,
+ per_page: 100,
+ }
+ );
+
+ const sanitizeLog = text => text.replace(/```/g, '``\u200b`');
+ const pickLogSnippet = text => {
+ const lines = text.replace(/\r\n/g, '\n').split('\n');
+ const needle = "couldn't open socket: connection refused";
+ const hit = lines.findIndex(line => line.includes(needle));
+
+ if (hit !== -1) {
+ const start = Math.max(0, hit - 200);
+ return lines.slice(start, hit + 1).join('\n').trimEnd();
+ }
+
+ return lines.slice(-200).join('\n').trimEnd();
+ };
+
+ const getJobLog = async jobId => {
+ try {
+ const response = await github.request(
+ 'GET /repos/{owner}/{repo}/actions/jobs/{job_id}/logs',
+ {
+ owner,
+ repo,
+ job_id: jobId,
+ request: { redirect: 'manual' },
+ }
+ );
+
+ const logUrl = response.headers.location;
+ if (!logUrl) {
+ return '(failed to get log download url)';
+ }
+
+ const logResponse = await fetch(logUrl, {
+ headers: {
+ 'User-Agent': 'actions/github-script',
+ },
+ });
+
+ if (!logResponse.ok) {
+ return `(failed to fetch logs: ${logResponse.status})`;
+ }
+
+ return await logResponse.text();
+ } catch (error) {
+ return `(failed to fetch logs: ${error.message})`;
+ }
+ };
+
+ const swapJob = jobs.find(job => /^swap(?:\s|\(|$)/.test(job.name));
+ const testStep = swapJob?.steps?.find(step => step.name?.toLowerCase() === 'test');
+
+ let swapLogSection = '_swap job not found_';
+ if (swapJob) {
+ const swapJobLog = await getJobLog(swapJob.id);
+ const snippet = sanitizeLog(pickLogSnippet(swapJobLog || ''));
+ swapLogSection =
+ `${swapJob.name} / test
\n\n` +
+ '```text\n' +
+ `${snippet || '(empty log)'}\n` +
+ '```\n' +
+ ' ';
+ }
+
+ const body = [
+ marker,
+ '## CI repeat status',
+ '',
+ `- PR: #${pr.number}`,
+ `- Commit: \`${run.head_sha.slice(0, 7)}\``,
+ `- Attempt: ${runAttempt}/${maxAttempts}`,
+ `- Workflow run: [CI #${run.run_number} / attempt ${runAttempt}](${run.html_url})`,
+ '',
+ `- swap job: **${swapJob ? (swapJob.conclusion || swapJob.status) : 'not found'}**`,
+ `- swap / test step: **${testStep ? (testStep.conclusion || testStep.status) : 'not found'}**`,
+ '',
+ '## swap / test log snippet',
+ '',
+ swapLogSection,
+ ].join('\n');
+
+ const comments = await github.paginate(
+ github.rest.issues.listComments,
+ {
+ owner,
+ repo,
+ issue_number: pr.number,
+ per_page: 100,
+ }
+ );
+
+ const existing = comments.find(comment =>
+ comment.user?.type === 'Bot' && comment.body?.includes(marker)
+ );
+
+ if (existing) {
+ await github.rest.issues.updateComment({
+ owner,
+ repo,
+ comment_id: existing.id,
+ body,
+ });
+ } else {
+ await github.rest.issues.createComment({
+ owner,
+ repo,
+ issue_number: pr.number,
+ body,
+ });
+ }
+
+ if (!swapJob) {
+ core.info('swap job not found; no rerun requested.');
+ return;
+ }
+
+ if (runAttempt >= maxAttempts) {
+ core.info(`Reached max attempts (${maxAttempts}); no rerun requested.`);
+ return;
+ }
+
+ await github.request(
+ 'POST /repos/{owner}/{repo}/actions/jobs/{job_id}/rerun',
+ {
+ owner,
+ repo,
+ job_id: swapJob.id,
+ }
+ );
+ core.info(`Requested rerun for swap job ${swapJob.id}.`);
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index ec2fe2a7f96..004599961f7 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -138,3 +138,175 @@ jobs:
run: make SWAP=1 SANITIZER=address -j8
- name: make test-asan
run: LD_LIBRARY_PATH=/home/runner/work/Redis-On-Rocks/Redis-On-Rocks/deps/rocksdb/_build_folly/libs/glog/lib:/home/runner/work/Redis-On-Rocks/Redis-On-Rocks/deps/rocksdb/_build_folly/libs/libevent/lib make SWAP=1 test-asan -j8
+
+ repeat-swap:
+ if: ${{ always() && github.event_name == 'pull_request' }}
+ needs: [unit, mem, mem-asan, swap, swap-asan]
+ runs-on: ubuntu-24.04
+ permissions:
+ actions: write
+ contents: read
+ pull-requests: write
+ env:
+ MAX_ATTEMPTS: "10"
+ steps:
+ - name: Record swap result and rerun if needed
+ uses: actions/github-script@v7
+ with:
+ script: |
+ const owner = context.repo.owner;
+ const repo = context.repo.repo;
+ const runId = Number(process.env.GITHUB_RUN_ID);
+ const runAttempt = Number(process.env.GITHUB_RUN_ATTEMPT || "1");
+ const maxAttempts = Number(process.env.MAX_ATTEMPTS || "10");
+ const pr = context.payload.pull_request;
+ const marker = '';
+
+ if (!pr) {
+ core.info('No pull request is associated with this run.');
+ return;
+ }
+
+ const jobs = await github.paginate(
+ github.rest.actions.listJobsForWorkflowRun,
+ {
+ owner,
+ repo,
+ run_id: runId,
+ per_page: 100,
+ }
+ );
+
+ const sanitizeLog = text => text.replace(/```/g, '``\u200b`');
+ const pickLogSnippet = text => {
+ const lines = text.replace(/\r\n/g, '\n').split('\n');
+ const needle = "couldn't open socket: connection refused";
+ const hit = lines.findIndex(line => line.includes(needle));
+
+ if (hit !== -1) {
+ const start = Math.max(0, hit - 200);
+ return lines.slice(start, hit + 1).join('\n').trimEnd();
+ }
+
+ return lines.slice(-200).join('\n').trimEnd();
+ };
+
+ const getJobLog = async jobId => {
+ try {
+ const response = await github.request(
+ 'GET /repos/{owner}/{repo}/actions/jobs/{job_id}/logs',
+ {
+ owner,
+ repo,
+ job_id: jobId,
+ request: { redirect: 'manual' },
+ }
+ );
+
+ const logUrl = response.headers.location;
+ if (!logUrl) {
+ return '(failed to get log download url)';
+ }
+
+ const logResponse = await fetch(logUrl, {
+ headers: {
+ 'User-Agent': 'actions/github-script',
+ },
+ });
+
+ if (!logResponse.ok) {
+ return `(failed to fetch logs: ${logResponse.status})`;
+ }
+
+ return await logResponse.text();
+ } catch (error) {
+ return `(failed to fetch logs: ${error.message})`;
+ }
+ };
+
+ const swapJob = jobs.find(job => /^swap(?:\s|\(|$)/.test(job.name));
+ const testStep = swapJob?.steps?.find(step => step.name?.toLowerCase() === 'test');
+
+ let swapLogSection = '_swap job not found_';
+ if (swapJob) {
+ const swapJobLog = await getJobLog(swapJob.id);
+ const snippet = sanitizeLog(pickLogSnippet(swapJobLog || ''));
+ swapLogSection =
+ `${swapJob.name} / test
\n\n` +
+ '```text\n' +
+ `${snippet || '(empty log)'}\n` +
+ '```\n' +
+ ' ';
+ }
+
+ const body = [
+ marker,
+ '## CI repeat status',
+ '',
+ `- PR: #${pr.number}`,
+ `- Commit: \`${context.sha.slice(0, 7)}\``,
+ `- Attempt: ${runAttempt}/${maxAttempts}`,
+ `- Workflow run: ${context.runNumber}`,
+ '',
+ `- swap job: **${swapJob ? (swapJob.conclusion || swapJob.status) : 'not found'}**`,
+ `- swap / test step: **${testStep ? (testStep.conclusion || testStep.status) : 'not found'}**`,
+ '',
+ '## swap / test log snippet',
+ '',
+ swapLogSection,
+ ].join('\n');
+
+ const comments = await github.paginate(
+ github.rest.issues.listComments,
+ {
+ owner,
+ repo,
+ issue_number: pr.number,
+ per_page: 100,
+ }
+ );
+
+ const existing = comments.find(comment =>
+ comment.user?.type === 'Bot' && comment.body?.includes(marker)
+ );
+
+ if (existing) {
+ await github.rest.issues.updateComment({
+ owner,
+ repo,
+ comment_id: existing.id,
+ body,
+ });
+ } else {
+ await github.rest.issues.createComment({
+ owner,
+ repo,
+ issue_number: pr.number,
+ body,
+ });
+ }
+
+ if (!swapJob) {
+ core.info('swap job not found; no rerun requested.');
+ return;
+ }
+
+ if (swapJob.conclusion !== 'failure') {
+ core.info(`swap job conclusion is ${swapJob.conclusion || swapJob.status}; no rerun requested.`);
+ return;
+ }
+
+ if (runAttempt >= maxAttempts) {
+ core.info(`Reached max attempts (${maxAttempts}); no rerun requested.`);
+ return;
+ }
+
+ await github.request(
+ 'POST /repos/{owner}/{repo}/actions/jobs/{job_id}/rerun',
+ {
+ owner,
+ repo,
+ job_id: swapJob.id,
+ }
+ );
+ core.info(`Requested rerun for swap job ${swapJob.id}.`);
diff --git a/tests/gtid/master_restart.tcl b/tests/gtid/master_restart.tcl
index 29abc7a0db3..f96295547e2 100644
--- a/tests/gtid/master_restart.tcl
+++ b/tests/gtid/master_restart.tcl
@@ -163,11 +163,15 @@ proc restart_test {master_gtid_enabled slave_gtid_enabled restat_master_gtid_ena
}
wait_for_condition 1000 30 {
- [$master dbsize] eq [$slave dbsize]
- && [$slave dbsize] eq 0
+ [dbsize_loadsafe $master master_dbsize] &&
+ [dbsize_loadsafe $slave slave_dbsize] &&
+ $master_dbsize eq $slave_dbsize &&
+ $slave_dbsize eq 0
} else {
- puts [$master dbsize]
- puts [$slave dbsize]
+ puts [dbsize_loadsafe $master master_dbsize]
+ puts $master_dbsize
+ puts [dbsize_loadsafe $slave slave_dbsize]
+ puts $slave_dbsize
fail "slave dbszie != 0"
}
diff --git a/tests/gtid/sync.tcl b/tests/gtid/sync.tcl
index 40b1c2dac9f..2d047752a52 100644
--- a/tests/gtid/sync.tcl
+++ b/tests/gtid/sync.tcl
@@ -21,7 +21,8 @@ start_server {overrides {gtid-enabled yes}} {
$R(1) slaveof $R_host(0) $R_port(0)
wait_for_condition 50 1000 {
[status $R(1) master_link_status] == "up" &&
- [$R(1) dbsize] == 1
+ [dbsize_loadsafe $R(1) replica_dbsize] &&
+ $replica_dbsize == 1
} else {
fail "Replicas not replicating from master"
}
@@ -155,7 +156,8 @@ start_server {overrides {gtid-enabled yes}} {
$R(1) slaveof $R_host(0) $R_port(0)
wait_for_condition 50 1000 {
[status $R(1) master_link_status] == "up" &&
- [$R(1) dbsize] == 1
+ [dbsize_loadsafe $R(1) replica_dbsize] &&
+ $replica_dbsize == 1
} else {
fail "Replicas not replicating from master"
}
diff --git a/tests/gtid/xsync.tcl b/tests/gtid/xsync.tcl
index 34d07fff4db..77d72a323c5 100644
--- a/tests/gtid/xsync.tcl
+++ b/tests/gtid/xsync.tcl
@@ -166,6 +166,18 @@ start_server {tags {"xsync"} overrides {gtid-enabled yes}} {
# after fullresync, S SS is consistent with M
assert_equal [$M hmget hello f1 f2] {v2 v2}
+
+ # In swap mode servercron (which triggers forced full resync on
+ # WRONGTYPE) may be delayed. wait_for_sync only
+ # checks master_link_status=="up", which can still be true before
+ # the resync fires. Wait explicitly for S to have the correct type.
+ if {$::swap} {
+ wait_for_condition 500 100 {
+ [catch {$S hmget hello f1 f2} _sr] == 0 && $_sr eq {v2 v2}
+ } else {
+ fail "S not fixed by forced full resync in swap mode"
+ }
+ }
assert_equal [$S hmget hello f1 f2] {v2 v2}
catch {$SS hmget hello f1 f2} result
@@ -1638,4 +1650,4 @@ start_server {tags {"xsync"} overrides {gtid-enabled yes}} {
}
}
-}
\ No newline at end of file
+}
diff --git a/tests/integration/psync2-pingoff.tcl b/tests/integration/psync2-pingoff.tcl
index 3589d07e75f..ec179635af8 100644
--- a/tests/integration/psync2-pingoff.tcl
+++ b/tests/integration/psync2-pingoff.tcl
@@ -24,7 +24,10 @@ start_server {} {
$R(0) set foo bar
wait_for_condition 50 1000 {
[status $R(1) master_link_status] == "up" &&
- [$R(0) dbsize] == 1 && [$R(1) dbsize] == 1
+ [dbsize_loadsafe $R(0) master_dbsize] &&
+ [dbsize_loadsafe $R(1) replica_dbsize] &&
+ $master_dbsize == 1 &&
+ $replica_dbsize == 1
} else {
fail "Replicas not replicating from master"
}
diff --git a/tests/integration/psync2-reg.tcl b/tests/integration/psync2-reg.tcl
index e20f152babd..4d29b7ba6c3 100644
--- a/tests/integration/psync2-reg.tcl
+++ b/tests/integration/psync2-reg.tcl
@@ -30,8 +30,10 @@ start_server {} {
wait_for_condition 50 1000 {
[status $R(1) master_link_status] == "up" &&
[status $R(2) master_link_status] == "up" &&
- [$R(1) dbsize] == 1 &&
- [$R(2) dbsize] == 1
+ [dbsize_loadsafe $R(1) replica1_dbsize] &&
+ [dbsize_loadsafe $R(2) replica2_dbsize] &&
+ $replica1_dbsize == 1 &&
+ $replica2_dbsize == 1
} else {
fail "Replicas not replicating from master"
}
diff --git a/tests/integration/replication.tcl b/tests/integration/replication.tcl
index 5a0d0ea69d2..7e29944e558 100644
--- a/tests/integration/replication.tcl
+++ b/tests/integration/replication.tcl
@@ -1037,7 +1037,9 @@ start_server {tags {"repl external:skip tsan:skip"} overrides {save ""}} {
# Make sure that replicas and master have same
# number of keys
wait_for_condition 50 100 {
- [$master dbsize] == [$replica dbsize]
+ [dbsize_loadsafe $master master_dbsize] &&
+ [dbsize_loadsafe $replica replica_dbsize] &&
+ $master_dbsize == $replica_dbsize
} else {
fail "Different number of keys between master and replicas after too long time."
}
diff --git a/tests/support/util.tcl b/tests/support/util.tcl
index 3090b54bc19..a11bb23cefe 100644
--- a/tests/support/util.tcl
+++ b/tests/support/util.tcl
@@ -100,6 +100,15 @@ proc status {r property} {
set _ [getInfoProperty [{*}$r info] $property]
}
+proc dbsize_loadsafe {r varname} {
+ upvar 1 $varname dbsize
+ if {$::swap} {
+ return [expr {[catch {{*}$r dbsize} dbsize] == 0}]
+ }
+ set dbsize [{*}$r dbsize]
+ return 1
+}
+
proc waitForBgsave r {
while 1 {
if {[status $r rdb_bgsave_in_progress] eq 1} {
diff --git a/tests/swap/integration/repro-swapdb-inconsistency.tcl b/tests/swap/integration/repro-swapdb-inconsistency.tcl
index 87d48e4f1ee..d58d9ab5395 100644
--- a/tests/swap/integration/repro-swapdb-inconsistency.tcl
+++ b/tests/swap/integration/repro-swapdb-inconsistency.tcl
@@ -100,7 +100,9 @@ start_server {tags {"repl"}} {
}
wait_for_condition 200 100 {
- [$master dbsize] == [$slave dbsize]
+ [dbsize_loadsafe $master master_dbsize] &&
+ [dbsize_loadsafe $slave slave_dbsize] &&
+ $master_dbsize == $slave_dbsize
} else {
# dump key lists (scan-based, works for cold keys too)
dump_keylist $master /tmp/replkeys_master.txt
@@ -120,4 +122,3 @@ start_server {tags {"repl"}} {
}
}
-
diff --git a/tests/swap/ported/integration/block-repl.tcl b/tests/swap/ported/integration/block-repl.tcl
index 7aa2758bf82..f42489008e9 100644
--- a/tests/swap/ported/integration/block-repl.tcl
+++ b/tests/swap/ported/integration/block-repl.tcl
@@ -34,7 +34,9 @@ start_server {tags {"repl"}} {
stop_bg_block_op $load_handle1
stop_bg_block_op $load_handle2
wait_for_condition 100 100 {
- [$master dbsize] == [$slave dbsize]
+ [dbsize_loadsafe $master master_dbsize] &&
+ [dbsize_loadsafe $slave slave_dbsize] &&
+ $master_dbsize == $slave_dbsize
} else {
set csv1 [csvdump r]
set csv2 [csvdump {r -1}]
diff --git a/tests/swap/ported/integration/psync2-reg.tcl b/tests/swap/ported/integration/psync2-reg.tcl
index 2d1043faa21..e5a94484ef4 100644
--- a/tests/swap/ported/integration/psync2-reg.tcl
+++ b/tests/swap/ported/integration/psync2-reg.tcl
@@ -30,8 +30,10 @@ start_server {} {
wait_for_condition 50 1000 {
[status $R(1) master_link_status] == "up" &&
[status $R(2) master_link_status] == "up" &&
- [$R(1) dbsize] == 1 &&
- [$R(2) dbsize] == 1
+ [dbsize_loadsafe $R(1) replica1_dbsize] &&
+ [dbsize_loadsafe $R(2) replica2_dbsize] &&
+ $replica1_dbsize == 1 &&
+ $replica2_dbsize == 1
} else {
fail "Replicas not replicating from master"
}
@@ -73,8 +75,11 @@ start_server {} {
test "PSYNC2 #3899 regression: verify consistency" {
wait_for_condition 50 1000 {
- ([$R(0) dbsize] eq [$R(1) dbsize]) &&
- ([$R(1) dbsize] eq [$R(2) dbsize])
+ [dbsize_loadsafe $R(0) master_dbsize] &&
+ [dbsize_loadsafe $R(1) replica1_dbsize] &&
+ [dbsize_loadsafe $R(2) replica2_dbsize] &&
+ ($master_dbsize eq $replica1_dbsize) &&
+ ($replica1_dbsize eq $replica2_dbsize)
} else {
fail "The three instances have different data sets"
}
diff --git a/tests/swap/ported/integration/replication-3.tcl b/tests/swap/ported/integration/replication-3.tcl
index 9233c12db01..a1e92ad0411 100644
--- a/tests/swap/ported/integration/replication-3.tcl
+++ b/tests/swap/ported/integration/replication-3.tcl
@@ -17,7 +17,9 @@ start_server {tags {"repl"}} {
r keys * ;# Force DEL syntesizing to slave
after 1000 ;# Wait another second. Now everything should be fine.
wait_for_condition 100 50 {
- [r -1 dbsize] == [r dbsize]
+ [dbsize_loadsafe {r -1} replica_dbsize] &&
+ [dbsize_loadsafe r master_dbsize] &&
+ $replica_dbsize == $master_dbsize
} else {
fail "wait sync"
}
@@ -44,7 +46,9 @@ start_server {tags {"repl"}} {
test {Slave is able to evict keys created in writable slaves} {
# wait createComplexDataset
wait_for_condition 500 100 {
- [r dbsize] == [r -1 dbsize]
+ [dbsize_loadsafe r master_dbsize] &&
+ [dbsize_loadsafe {r -1} replica_dbsize] &&
+ $master_dbsize == $replica_dbsize
} else {
fail "Replicas and master offsets were unable to match *exactly*."
}
@@ -64,4 +68,3 @@ start_server {tags {"repl"}} {
} {0}
}
}
-
diff --git a/tests/swap/ported/integration/replication-psync.tcl b/tests/swap/ported/integration/replication-psync.tcl
index 717853fbc8e..43355da99b7 100644
--- a/tests/swap/ported/integration/replication-psync.tcl
+++ b/tests/swap/ported/integration/replication-psync.tcl
@@ -89,7 +89,9 @@ proc test_psync {descr duration backlog_size backlog_ttl delay cond mdl sdl bgsa
}
wait_for_condition 100 100 {
- [$master dbsize] == [$slave dbsize]
+ [dbsize_loadsafe $master master_dbsize] &&
+ [dbsize_loadsafe $slave slave_dbsize] &&
+ $master_dbsize == $slave_dbsize
} else {
set csv1 [csvdump r]
set csv2 [csvdump {r -1}]
diff --git a/tests/swap/ported/integration/replication.tcl b/tests/swap/ported/integration/replication.tcl
index bbeab9d5e9b..25f040025ed 100644
--- a/tests/swap/ported/integration/replication.tcl
+++ b/tests/swap/ported/integration/replication.tcl
@@ -205,9 +205,12 @@ start_server {tags {"repl"} overrides {repl-backlog-size 10mb}} {
# Stop the write load
stop_write_load $load_handle0
- # number of keys
+ # number of keys
wait_for_condition 500 100 {
- [$master dbsize] eq [$slave dbsize] && [$master dbsize] > 0
+ [dbsize_loadsafe $master master_dbsize] &&
+ [dbsize_loadsafe $slave slave_dbsize] &&
+ $master_dbsize eq $slave_dbsize &&
+ $master_dbsize > 0
} else {
fail "Different datasets between replica and master"
}
@@ -450,7 +453,9 @@ start_server {tags {"repl" "nosanitizer"} overrides {swap-repl-rordb-sync no}} {
# Make sure that replicas and master have same
# number of keys
wait_for_condition 50 100 {
- [$master dbsize] == [$replica dbsize]
+ [dbsize_loadsafe $master master_dbsize] &&
+ [dbsize_loadsafe $replica replica_dbsize] &&
+ $master_dbsize == $replica_dbsize
} else {
fail "Different number of keys between master and replicas after too long time."
}
diff --git a/tests/swap/support/util.tcl b/tests/swap/support/util.tcl
index 3a544fe4d18..c617b965ad7 100644
--- a/tests/swap/support/util.tcl
+++ b/tests/swap/support/util.tcl
@@ -530,9 +530,9 @@ proc swap_data_comp {r1 r2} {
if {$len != $len2} {
data_conflict $t $key '' 'SLEN:$len' 'SLEN:$len2'
}
- set skeys [r smembers k1]
+ set skeys [$r1 smembers $key]
foreach skey $skeys {
- if {0 == [$r2 sismember $skey]} {
+ if {0 == [$r2 sismember $key $skey]} {
data_conflict $t $key $skey "1" "0"
}
}
diff --git a/tests/swap/unit/select.tcl b/tests/swap/unit/select.tcl
index cac26164443..72cda8c79fe 100644
--- a/tests/swap/unit/select.tcl
+++ b/tests/swap/unit/select.tcl
@@ -345,7 +345,9 @@ start_server {overrides {save ""} tags {"swap" "select"}} {
$master select $db
$slave select $db
wait_for_condition 500 10 {
- [$master dbsize] eq [$slave dbsize]
+ [dbsize_loadsafe $master master_dbsize] &&
+ [dbsize_loadsafe $slave slave_dbsize] &&
+ $master_dbsize eq $slave_dbsize
} else {
fail "db$db dbsize not match"
}