Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions byconity/benchmark.sh
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,10 @@ byconity --database bench --query "INSERT INTO hits FORMAT TSV" < hits.tsv
END=$(date +%s)
echo "Load time: $(echo "$END - $START" | bc)"

# Drop the downloaded source files so the sync at the top of run.sh
# doesn't flush their pages and inflate cold-run prep time.
rm -f hits.tsv hits.tsv.gz

# NOTE: sometimes may hung due to docker-compose, using docker directly may help
./run.sh

Expand Down
4 changes: 4 additions & 0 deletions cedardb/benchmark.sh
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,10 @@ PGPASSWORD=test command time -f '%e' psql -h localhost -U postgres -q -t -c "COP
echo -n "Data size: "
PGPASSWORD=test psql -h localhost -U postgres -q -t -c "SELECT pg_total_relation_size('hits');"

# Drop the downloaded source files so the sync at the top of run.sh
# doesn't flush their pages and inflate cold-run prep time.
rm -f data/hits.tsv

# run benchmark
echo "running benchmark..."
./run.sh 2>&1 | tee log.txt
Expand Down
4 changes: 4 additions & 0 deletions chdb/benchmark.sh
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,10 @@ pigz -d -f hits.csv.gz
echo -n "Load time: "
command time -f '%e' ./load.py

# Drop the downloaded source files so the sync at the top of run.sh
# doesn't flush their pages and inflate cold-run prep time.
rm -f hits.csv

# Run the queries
./run.sh 2>&1 | tee log.txt

Expand Down
4 changes: 4 additions & 0 deletions citus/benchmark.sh
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,10 @@ command time -f '%e' psql -U postgres -h localhost -d postgres test -q -t -c "\\
# COPY 99997497
# Time: 1579203.482 ms (26:19.203)

# Drop the downloaded source files so the sync at the top of run.sh
# doesn't flush their pages and inflate cold-run prep time.
rm -f hits.tsv

./run.sh 2>&1 | tee log.txt

echo -n "Data size: "
Expand Down
4 changes: 4 additions & 0 deletions clickhouse/benchmark.sh
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,10 @@ sudo chown clickhouse:clickhouse /var/lib/clickhouse/user_files/hits_*.parquet
echo -n "Load time: "
clickhouse-client --time --query "INSERT INTO hits SELECT * FROM file('hits_*.parquet')" --max-insert-threads $(( $(nproc) / 4 ))

# Drop the downloaded source files so the sync at the top of run.sh
# doesn't flush their pages and inflate cold-run prep time.
sudo rm -f /var/lib/clickhouse/user_files/hits_*.parquet

# Run the queries

./run.sh "$1"
Expand Down
3 changes: 3 additions & 0 deletions cloudberry/benchmark.sh
Original file line number Diff line number Diff line change
Expand Up @@ -120,6 +120,9 @@ elif [[ $1 == 'test' ]]; then
if [[ $2 != 'no_dl' ]]; then echo -n "Load time: "
command time -f '%e' sudo -iu gpadmin psql -d postgres -t -c "ANALYZE hits;"; fi
du -sh /data0*
# Drop the downloaded source files so the sync at the top of run.sh
# doesn't flush their pages and inflate cold-run prep time.
sudo -u gpadmin rm -f ~gpadmin/hits.tsv ~gpadmin/hits.tsv.gz
sudo -iu gpadmin /home/gpadmin/run.sh 2>&1 | tee log.txt
cat log.txt | grep -oP 'Time: \d+\.\d+ ms|psql: error' | sed -r -e 's/Time: ([0-9]+\.[0-9]+) ms/\1/; s/^.*psql: error.*$/null/' |awk '{ if (i % 3 == 0) { printf "[" }; if ($1 == "null") { printf $1 } else { printf $1 / 1000 }; if (i % 3 != 2) { printf "," } else { print "]," }; ++i; }'

Expand Down
4 changes: 4 additions & 0 deletions cockroachdb/benchmark.sh
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,10 @@ cockroach sql --insecure --host=localhost --database=test --execute="IMPORT INTO
END=$(date +%s)
echo "Load time: $(echo "$END - $START" | bc)"

# Drop the downloaded source files so the sync at the top of run.sh
# doesn't flush their pages and inflate cold-run prep time.
sudo rm -f /tmp/hits.csv.gz $CRDBDATADIR/extern/hits.csv

./run.sh 2>&1 | tee log.txt

echo -n "Data size: "
Expand Down
4 changes: 4 additions & 0 deletions cratedb/benchmark.sh
Original file line number Diff line number Diff line change
Expand Up @@ -68,6 +68,10 @@ if [[ $MODE == "tuned" ]]; then
psql -U crate -h localhost --no-password -t -c "REFRESH TABLE hits; OPTIMIZE TABLE hits;"
fi;

# Drop the downloaded source files so the sync at the top of run.sh
# doesn't flush their pages and inflate cold-run prep time.
sudo rm -f /tmp/hits.tsv

# Some queries don't fit into the available heap space and raise an CircuitBreakingException
./run.sh "$MODE" 2>&1 | tee log.txt

Expand Down
4 changes: 4 additions & 0 deletions databend/benchmark.sh
Original file line number Diff line number Diff line change
Expand Up @@ -73,4 +73,8 @@ du -bcs _data | grep total
# curl 'http://default@localhost:8124/' --data-binary "select humanize_size(bytes_compressed) from fuse_snapshot('default', 'hits') order by timestamp desc limit 1"
# 18.48 GiB

# Drop the downloaded source files so the sync at the top of run.sh
# doesn't flush their pages and inflate cold-run prep time.
rm -f hits.tsv

./run.sh 2>&1 | tee log.txt
4 changes: 4 additions & 0 deletions doris/benchmark.sh
Original file line number Diff line number Diff line change
Expand Up @@ -228,6 +228,10 @@ echo "$LOADTIME" > loadtime
du -bs "$DORIS_HOME"/be/storage/ | cut -f1 | tee storage_size
echo "Data size: $(cat storage_size)"

# Drop the downloaded source files so the per-query sync below
# doesn't flush their pages and inflate cold-run prep time.
rm -f "$BE_DATA_DIR/user_files_secure"/hits_*.parquet

mysql -h 127.0.0.1 -P9030 -uroot hits -e "set global enable_sql_cache = false"
# Dataset contains 99997497 rows, storage size is about 13319588503 bytes
mysql -h 127.0.0.1 -P9030 -uroot hits -e "SELECT count(*) FROM hits"
Expand Down
4 changes: 4 additions & 0 deletions druid/benchmark.sh
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,10 @@ command time -f '%e' ./apache-druid-${VERSION}/bin/post-index-task --file ingest
# The command above will fail due to timeout but still continue to run in background.
# The loading time should be checked from the logs.

# Drop the downloaded source files so the sync at the top of run.sh
# doesn't flush their pages and inflate cold-run prep time.
rm -f hits.tsv

# Run the queries
./run.sh

Expand Down
4 changes: 4 additions & 0 deletions duckdb-vortex/benchmark.sh
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,10 @@ command time -f '%e' duckdb hits-single.db -f create.sql

echo 'single'

# Drop the downloaded source files so the sync at the top of run.sh
# doesn't flush their pages and inflate cold-run prep time.
rm -f hits.parquet

./run.sh 'hits-single.db' 2>&1 | tee log-s.txt
cat log-s.txt |
grep -P '^\d|Killed|Segmentation|^Run Time \(s\): real' |
Expand Down
4 changes: 4 additions & 0 deletions duckdb/benchmark.sh
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,10 @@ wget --continue --progress=dot:giga 'https://datasets.clickhouse.com/hits_compat
echo -n "Load time: "
command time -f '%e' duckdb hits.db -storage_version latest -f create.sql -f load.sql

# Drop the downloaded source files so the sync at the top of run.sh
# doesn't flush their pages and inflate cold-run prep time.
rm -f hits.parquet

# Run the queries

./run.sh 2>&1 | tee log.txt
Expand Down
4 changes: 4 additions & 0 deletions elasticsearch/benchmark.sh
Original file line number Diff line number Diff line change
Expand Up @@ -76,5 +76,9 @@ echo "Data size: $(jq -r '._all.total.store.total_data_set_size_in_bytes' stats.
END=$(date +%s)
echo "Load time: $(echo "$END - $START" | bc)"

# Drop the downloaded source files so the sync at the top of run.sh
# doesn't flush their pages and inflate cold-run prep time.
rm -f hits.json.gz

###### Run the queries
./run.sh
3 changes: 3 additions & 0 deletions greenplum/benchmark.sh
Original file line number Diff line number Diff line change
Expand Up @@ -78,5 +78,8 @@ command time -f '%e' psql -d postgres -t -c "insert into hits select * from hits
echo -n "Load time: "
command time -f '%e' psql -d postgres -t -c "ANALYZE hits;"
du -sh /gpdata*
# Drop the downloaded source files so the sync at the top of run.sh
# doesn't flush their pages and inflate cold-run prep time.
rm -f hits.tsv
./run.sh 2>&1 | tee log.txt
cat log.txt | grep -oP 'Time: \d+\.\d+ ms|psql: error' | sed -r -e 's/Time: ([0-9]+\.[0-9]+) ms/\1/; s/^.*psql: error.*$/null/' |awk '{ if (i % 3 == 0) { printf "[" }; if ($1 == "null") { printf $1 } else { printf $1 / 1000 }; if (i % 3 != 2) { printf "," } else { print "]," }; ++i; }'
4 changes: 4 additions & 0 deletions heavyai/benchmark.sh
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,10 @@ command time -f '%e' /opt/heavyai/bin/heavysql -q -t -p HyperInteractive <<< "CO

# Loaded: 99997497 recs, Rejected: 0 recs in 572.633 secs

# Drop the downloaded source files so the sync at the top of run.sh
# doesn't flush their pages and inflate cold-run prep time.
rm -f hits.csv

./run.sh 2>&1 | tee log.txt

echo -n "Data size: "
Expand Down
4 changes: 4 additions & 0 deletions hologres/benchmark.sh
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,10 @@ for file in hits_part_*; do
PGUSER=$PG_USER PGPASSWORD=$PG_PASSWORD command time -f '%e' psql -h $HOST_NAME -p $PORT -d $DATABASE -t -c "\\copy hits FROM '$file'"
done

# Drop the downloaded source files so the sync at the top of run.sh
# doesn't flush their pages and inflate cold-run prep time.
rm -f hits.tsv hits_part_*

# run clickbench test with queries
echo "Starting to run queries..."

Expand Down
4 changes: 4 additions & 0 deletions hyper/benchmark.sh
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,10 @@ pigz -d -f hits.csv.gz
echo -n "Load time: "
command time -f '%e' ./load.py

# Drop the downloaded source files so the sync at the top of run.sh
# doesn't flush their pages and inflate cold-run prep time.
rm -f hits.csv

./run.sh | tee log.txt

cat log.txt |
Expand Down
4 changes: 4 additions & 0 deletions infobright/benchmark.sh
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,10 @@ sudo docker exec mysql_ib du -bcs /mnt/mysql_data/ /usr/local/infobright-4.0.7-x

# 13 760 341 294

# Drop the downloaded source files so the sync at the top of run.sh
# doesn't flush their pages and inflate cold-run prep time.
rm -f hits.tsv hits90m.tsv

./run.sh 2>&1 | tee log.txt

cat log.txt |
Expand Down
4 changes: 4 additions & 0 deletions kinetica/benchmark.sh
Original file line number Diff line number Diff line change
Expand Up @@ -31,5 +31,9 @@ LOADTIME=$(echo "$END - $START" | bc)
echo "Load time: $LOADTIME"
echo "Data size: $(du -bcs ./kinetica-persist/gpudb | grep total)"

# Drop the downloaded source files so the sync at the top of run.sh
# doesn't flush their pages and inflate cold-run prep time.
sudo rm -f ./kinetica-persist/hits.tsv.gz

# run the queries
./run.sh
4 changes: 4 additions & 0 deletions mariadb-columnstore/benchmark.sh
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,10 @@ command time -f '%e' mysql --password="${PASSWORD}" --host 127.0.0.1 clickbench

# 41m47.856s

# Drop the downloaded source files so the sync at the top of run.sh
# doesn't flush their pages and inflate cold-run prep time.
rm -f hits.tsv

./run.sh 2>&1 | tee log.txt

echo -n "Data size: "
Expand Down
4 changes: 4 additions & 0 deletions mariadb/benchmark.sh
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,10 @@ sudo mariadb test < create.sql
echo -n "Load time: "
command time -f '%e' split -l 10000 --filter="sudo mariadb test -e \"SET sql_log_bin = 0; LOAD DATA LOCAL INFILE '/dev/stdin' INTO TABLE hits;\"" hits.tsv

# Drop the downloaded source files so the sync at the top of run.sh
# doesn't flush their pages and inflate cold-run prep time.
rm -f hits.tsv

./run.sh 2>&1 | tee log.txt

echo -n "Data size: "
Expand Down
4 changes: 4 additions & 0 deletions monetdb/benchmark.sh
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,10 @@ command time -f '%e' ./query.expect "COPY INTO hits FROM '$(pwd)/hits.tsv' USING
# 99997497 affected rows
# clk: 15:39 min

# Drop the downloaded source files so the sync at the top of run.sh
# doesn't flush their pages and inflate cold-run prep time.
rm -f hits.tsv

./run.sh 2>&1 | tee log.txt

echo -n "Data size: "
Expand Down
4 changes: 4 additions & 0 deletions mysql-myisam/benchmark.sh
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,10 @@ command time -f '%e' sudo mysql test -e "SET sql_log_bin = 0; LOAD DATA LOCAL IN

# 41m8.979s

# Drop the downloaded source files so the sync at the top of run.sh
# doesn't flush their pages and inflate cold-run prep time.
rm -f hits.tsv

./run.sh 2>&1 | tee log.txt

echo -n "Data size: "
Expand Down
4 changes: 4 additions & 0 deletions mysql/benchmark.sh
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,10 @@ command time -f '%e' sudo mysql test -e "SET sql_log_bin = 0; LOAD DATA LOCAL IN

# 2:37:52 elapsed

# Drop the downloaded source files so the sync at the top of run.sh
# doesn't flush their pages and inflate cold-run prep time.
rm -f hits.tsv

./run.sh 2>&1 | tee log.txt

echo -n "Data size: "
Expand Down
4 changes: 4 additions & 0 deletions oxla/benchmark.sh
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,10 @@ PGPASSWORD=oxla command time -f '%e' psql -h localhost -U oxla -q -t -c "COPY hi
echo -n "Data size: "
PGPASSWORD=oxla psql -h localhost -U oxla -q -t -c "SELECT pg_total_relation_size('hits');"

# Drop the downloaded source files so the sync at the top of run.sh
# doesn't flush their pages and inflate cold-run prep time.
sudo rm -f data/hits.csv

# run benchmark
echo "running benchmark..."
./run.sh
4 changes: 4 additions & 0 deletions pg_duckdb-indexed/benchmark.sh
Original file line number Diff line number Diff line change
Expand Up @@ -74,6 +74,10 @@ fi
echo -n "Load time: "
command time -f '%e' ./load.sh

# Drop the downloaded source files so the sync at the top of run.sh
# doesn't flush their pages and inflate cold-run prep time.
rm -f hits.tsv

psql $CONNECTION -c "ALTER DATABASE postgres SET duckdb.force_execution = true;"
./run.sh 2>&1 | tee log.txt

Expand Down
4 changes: 4 additions & 0 deletions pg_duckdb/benchmark.sh
Original file line number Diff line number Diff line change
Expand Up @@ -74,6 +74,10 @@ fi
echo -n "Load time: "
command time -f '%e' ./load.sh

# Drop the downloaded source files so the sync at the top of run.sh
# doesn't flush their pages and inflate cold-run prep time.
rm -f hits.tsv

psql $CONNECTION -c "ALTER DATABASE postgres SET duckdb.force_execution = true;"
./run.sh 2>&1 | tee log.txt

Expand Down
4 changes: 4 additions & 0 deletions pgpro_tam/benchmark.sh
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,10 @@ else
command time -f '%e' psql -h 127.0.0.1 -U postgres -t -c "COPY hits FROM '/tmp/hits.tsv'"
fi

# Drop the downloaded source files so the sync at the top of run.sh
# doesn't flush their pages and inflate cold-run prep time.
sudo docker exec pgpro_tam rm -f /tmp/hits.tsv

#run benchmark
./run.sh 2>&1 | tee log.txt

Expand Down
4 changes: 4 additions & 0 deletions pinot/benchmark.sh
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,10 @@ command time -f '%e' ./apache-pinot-$PINOT_VERSION-bin/bin/pinot-admin.sh Launch

# After upload it shows 94465149 rows instead of 99997497 in the dataset

# Drop the downloaded source files so the sync at the top of run.sh
# doesn't flush their pages and inflate cold-run prep time.
rm -f hits.tsv parts*.tsv

# Run the queries
./run.sh

Expand Down
4 changes: 4 additions & 0 deletions selectdb/benchmark.sh
Original file line number Diff line number Diff line change
Expand Up @@ -119,6 +119,10 @@ du -bs "$DORIS_HOME"/be/storage/ | cut -f1 | tee storage_size

echo "Data size: $(cat storage_size)"

# Drop the downloaded source files so the sync at the top of run.sh
# doesn't flush their pages and inflate cold-run prep time.
rm -f hits.tsv

# Run queries
./run.sh 2>&1 | tee -a log.txt

Expand Down
4 changes: 4 additions & 0 deletions starrocks/benchmark.sh
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,10 @@ du -bcs StarRocks-${VERSION}/storage/ | grep total
# Dataset contains 99997497 rows
mysql -h 127.0.0.1 -P9030 -uroot hits -e "SELECT count(*) FROM hits"

# Drop the downloaded source files so the sync at the top of run.sh
# doesn't flush their pages and inflate cold-run prep time.
rm -f hits.tsv

./run.sh 2>&1 | tee -a log.txt

cat log.txt |
Expand Down
4 changes: 4 additions & 0 deletions umbra/benchmark.sh
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,10 @@ then
fi
echo "Load time: $(( (end - start) / 1000 ))"

# Drop the downloaded source files so the sync at the top of run.sh
# doesn't flush their pages and inflate cold-run prep time.
rm -f data/hits.tsv

./run.sh 2>&1 | tee log.txt

# Calculate persistence size
Expand Down