bench: Cloud benchmark
This commit is contained in:
1192
scripts/cloud_bench_orchestrate.mjs
Executable file
1192
scripts/cloud_bench_orchestrate.mjs
Executable file
File diff suppressed because it is too large
Load Diff
205
scripts/run_bench_cloud.sh
Executable file
205
scripts/run_bench_cloud.sh
Executable file
@@ -0,0 +1,205 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
|
||||
cd "$ROOT_DIR"
|
||||
|
||||
usage() {
|
||||
cat <<'EOF'
|
||||
usage:
|
||||
./scripts/run_bench_cloud.sh [options] [-- extra args for cloud_bench_orchestrate.mjs]
|
||||
|
||||
Friendly wrapper around scripts/cloud_bench_orchestrate.mjs.
|
||||
|
||||
Defaults (override via env or flags):
|
||||
datacenter: fsn1-dc14
|
||||
server/client type: cx23
|
||||
clients: 3
|
||||
runs: 3
|
||||
targets: parrhesia-pg,parrhesia-memory,strfry,nostr-rs-relay
|
||||
|
||||
Flags:
|
||||
--quick Quick smoke profile (1 run, 1 client, lower load)
|
||||
--clients N Override client count
|
||||
--runs N Override run count
|
||||
--targets CSV Override targets
|
||||
--datacenter NAME Override datacenter
|
||||
--server-type NAME Override server type
|
||||
--client-type NAME Override client type
|
||||
--image IMAGE Use remote Parrhesia image (e.g. ghcr.io/...)
|
||||
--git-ref REF Build Parrhesia image from git ref (default: HEAD)
|
||||
--keep Keep cloud resources after run
|
||||
-h, --help
|
||||
|
||||
Environment overrides:
|
||||
PARRHESIA_CLOUD_DATACENTER (default: fsn1-dc14)
|
||||
PARRHESIA_CLOUD_SERVER_TYPE (default: cx23)
|
||||
PARRHESIA_CLOUD_CLIENT_TYPE (default: cx23)
|
||||
PARRHESIA_CLOUD_CLIENTS (default: 3)
|
||||
PARRHESIA_BENCH_RUNS (default: 3)
|
||||
PARRHESIA_CLOUD_TARGETS (default: all 4)
|
||||
PARRHESIA_CLOUD_PARRHESIA_IMAGE (optional)
|
||||
PARRHESIA_CLOUD_GIT_REF (default: HEAD)
|
||||
|
||||
Bench knobs (forwarded):
|
||||
PARRHESIA_BENCH_CONNECT_COUNT
|
||||
PARRHESIA_BENCH_CONNECT_RATE
|
||||
PARRHESIA_BENCH_ECHO_COUNT
|
||||
PARRHESIA_BENCH_ECHO_RATE
|
||||
PARRHESIA_BENCH_ECHO_SIZE
|
||||
PARRHESIA_BENCH_EVENT_COUNT
|
||||
PARRHESIA_BENCH_EVENT_RATE
|
||||
PARRHESIA_BENCH_REQ_COUNT
|
||||
PARRHESIA_BENCH_REQ_RATE
|
||||
PARRHESIA_BENCH_REQ_LIMIT
|
||||
PARRHESIA_BENCH_KEEPALIVE_SECONDS
|
||||
|
||||
Examples:
|
||||
# Default full cloud run
|
||||
./scripts/run_bench_cloud.sh
|
||||
|
||||
# Quick smoke
|
||||
./scripts/run_bench_cloud.sh --quick
|
||||
|
||||
# Use a GHCR image
|
||||
./scripts/run_bench_cloud.sh --image ghcr.io/owner/parrhesia:latest
|
||||
EOF
|
||||
}
|
||||
|
||||
DATACENTER="${PARRHESIA_CLOUD_DATACENTER:-fsn1-dc14}"
|
||||
SERVER_TYPE="${PARRHESIA_CLOUD_SERVER_TYPE:-cx23}"
|
||||
CLIENT_TYPE="${PARRHESIA_CLOUD_CLIENT_TYPE:-cx23}"
|
||||
CLIENTS="${PARRHESIA_CLOUD_CLIENTS:-3}"
|
||||
RUNS="${PARRHESIA_BENCH_RUNS:-3}"
|
||||
TARGETS="${PARRHESIA_CLOUD_TARGETS:-parrhesia-pg,parrhesia-memory,strfry,nostr-rs-relay}"
|
||||
PARRHESIA_IMAGE="${PARRHESIA_CLOUD_PARRHESIA_IMAGE:-}"
|
||||
GIT_REF="${PARRHESIA_CLOUD_GIT_REF:-HEAD}"
|
||||
KEEP=0
|
||||
QUICK=0
|
||||
|
||||
EXTRA_ARGS=()
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
-h|--help)
|
||||
usage
|
||||
exit 0
|
||||
;;
|
||||
--quick)
|
||||
QUICK=1
|
||||
shift
|
||||
;;
|
||||
--clients)
|
||||
CLIENTS="$2"
|
||||
shift 2
|
||||
;;
|
||||
--runs)
|
||||
RUNS="$2"
|
||||
shift 2
|
||||
;;
|
||||
--targets)
|
||||
TARGETS="$2"
|
||||
shift 2
|
||||
;;
|
||||
--datacenter)
|
||||
DATACENTER="$2"
|
||||
shift 2
|
||||
;;
|
||||
--server-type)
|
||||
SERVER_TYPE="$2"
|
||||
shift 2
|
||||
;;
|
||||
--client-type)
|
||||
CLIENT_TYPE="$2"
|
||||
shift 2
|
||||
;;
|
||||
--image)
|
||||
PARRHESIA_IMAGE="$2"
|
||||
shift 2
|
||||
;;
|
||||
--git-ref)
|
||||
GIT_REF="$2"
|
||||
shift 2
|
||||
;;
|
||||
--keep)
|
||||
KEEP=1
|
||||
shift
|
||||
;;
|
||||
--)
|
||||
shift
|
||||
EXTRA_ARGS+=("$@")
|
||||
break
|
||||
;;
|
||||
*)
|
||||
echo "Unknown argument: $1" >&2
|
||||
usage
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [[ "$QUICK" == "1" ]]; then
|
||||
RUNS=1
|
||||
CLIENTS=1
|
||||
: "${PARRHESIA_BENCH_CONNECT_COUNT:=20}"
|
||||
: "${PARRHESIA_BENCH_CONNECT_RATE:=20}"
|
||||
: "${PARRHESIA_BENCH_ECHO_COUNT:=20}"
|
||||
: "${PARRHESIA_BENCH_ECHO_RATE:=20}"
|
||||
: "${PARRHESIA_BENCH_ECHO_SIZE:=512}"
|
||||
: "${PARRHESIA_BENCH_EVENT_COUNT:=20}"
|
||||
: "${PARRHESIA_BENCH_EVENT_RATE:=20}"
|
||||
: "${PARRHESIA_BENCH_REQ_COUNT:=20}"
|
||||
: "${PARRHESIA_BENCH_REQ_RATE:=20}"
|
||||
: "${PARRHESIA_BENCH_REQ_LIMIT:=10}"
|
||||
: "${PARRHESIA_BENCH_KEEPALIVE_SECONDS:=2}"
|
||||
fi
|
||||
|
||||
CMD=(
|
||||
node scripts/cloud_bench_orchestrate.mjs
|
||||
--datacenter "$DATACENTER"
|
||||
--server-type "$SERVER_TYPE"
|
||||
--client-type "$CLIENT_TYPE"
|
||||
--clients "$CLIENTS"
|
||||
--runs "$RUNS"
|
||||
--targets "$TARGETS"
|
||||
)
|
||||
|
||||
if [[ -n "$PARRHESIA_IMAGE" ]]; then
|
||||
CMD+=(--parrhesia-image "$PARRHESIA_IMAGE")
|
||||
else
|
||||
CMD+=(--git-ref "$GIT_REF")
|
||||
fi
|
||||
|
||||
if [[ "$KEEP" == "1" ]]; then
|
||||
CMD+=(--keep)
|
||||
fi
|
||||
|
||||
# Forward bench knob envs if set
|
||||
for kv in \
|
||||
PARRHESIA_BENCH_CONNECT_COUNT \
|
||||
PARRHESIA_BENCH_CONNECT_RATE \
|
||||
PARRHESIA_BENCH_ECHO_COUNT \
|
||||
PARRHESIA_BENCH_ECHO_RATE \
|
||||
PARRHESIA_BENCH_ECHO_SIZE \
|
||||
PARRHESIA_BENCH_EVENT_COUNT \
|
||||
PARRHESIA_BENCH_EVENT_RATE \
|
||||
PARRHESIA_BENCH_REQ_COUNT \
|
||||
PARRHESIA_BENCH_REQ_RATE \
|
||||
PARRHESIA_BENCH_REQ_LIMIT \
|
||||
PARRHESIA_BENCH_KEEPALIVE_SECONDS
|
||||
do
|
||||
if [[ -n "${!kv:-}" ]]; then
|
||||
flag="--$(echo "$kv" | tr '[:upper:]' '[:lower:]' | sed -E 's/^parrhesia_bench_//' | tr '_' '-')"
|
||||
CMD+=("$flag" "${!kv}")
|
||||
fi
|
||||
done
|
||||
|
||||
CMD+=("${EXTRA_ARGS[@]}")
|
||||
|
||||
printf 'Running cloud bench:\n %q' "${CMD[0]}"
|
||||
for ((i=1; i<${#CMD[@]}; i++)); do
|
||||
printf ' %q' "${CMD[$i]}"
|
||||
done
|
||||
printf '\n\n'
|
||||
|
||||
"${CMD[@]}"
|
||||
@@ -78,11 +78,23 @@ const [, , jsonOut, timestamp, machineId, gitTag, gitCommit, runsStr, historyFil
|
||||
const { versions, ...servers } = JSON.parse(fs.readFileSync(jsonOut, "utf8"));
|
||||
|
||||
const entry = {
|
||||
schema_version: 2,
|
||||
timestamp,
|
||||
run_id: `local-${timestamp}-${machineId}-${gitCommit}`,
|
||||
machine_id: machineId,
|
||||
git_tag: gitTag,
|
||||
git_commit: gitCommit,
|
||||
runs: Number(runsStr),
|
||||
source: {
|
||||
kind: "local",
|
||||
mode: "run_bench_collect",
|
||||
git_ref: gitTag,
|
||||
git_tag: gitTag,
|
||||
git_commit: gitCommit,
|
||||
},
|
||||
infra: {
|
||||
provider: "local",
|
||||
},
|
||||
versions: versions || {},
|
||||
servers,
|
||||
};
|
||||
|
||||
@@ -7,100 +7,178 @@ cd "$ROOT_DIR"
|
||||
usage() {
|
||||
cat <<'EOF'
|
||||
usage:
|
||||
./scripts/run_bench_update.sh [machine_id]
|
||||
./scripts/run_bench_update.sh [machine_id|all]
|
||||
./scripts/run_bench_update.sh --machine <machine_id|all> [--run-id <run_id>]
|
||||
./scripts/run_bench_update.sh --list
|
||||
|
||||
Regenerates bench/chart.svg and updates the benchmark table in README.md
|
||||
from collected data in bench/history.jsonl.
|
||||
|
||||
Arguments:
|
||||
machine_id Optional. Filter to a specific machine's data.
|
||||
Default: current machine (hostname -s)
|
||||
Use "all" to include all machines (will use latest entry per tag)
|
||||
|
||||
Examples:
|
||||
# Update chart for current machine
|
||||
./scripts/run_bench_update.sh
|
||||
|
||||
# Update chart for specific machine
|
||||
./scripts/run_bench_update.sh my-server
|
||||
|
||||
# Update chart using all machines (latest entry per tag wins)
|
||||
./scripts/run_bench_update.sh all
|
||||
Options:
|
||||
--machine <id|all> Filter by machine_id (default: hostname -s)
|
||||
--run-id <id> Filter to an exact run_id
|
||||
--history-file <path> History JSONL file (default: bench/history.jsonl)
|
||||
--list List available machines and runs, then exit
|
||||
-h, --help
|
||||
EOF
|
||||
}
|
||||
|
||||
if [[ "${1:-}" == "-h" || "${1:-}" == "--help" ]]; then
|
||||
usage
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# --- Configuration -----------------------------------------------------------
|
||||
|
||||
BENCH_DIR="$ROOT_DIR/bench"
|
||||
HISTORY_FILE="$BENCH_DIR/history.jsonl"
|
||||
CHART_FILE="$BENCH_DIR/chart.svg"
|
||||
GNUPLOT_TEMPLATE="$BENCH_DIR/chart.gnuplot"
|
||||
README_FILE="$ROOT_DIR/README.md"
|
||||
|
||||
MACHINE_ID="${1:-$(hostname -s)}"
|
||||
MACHINE_ID="$(hostname -s)"
|
||||
RUN_ID=""
|
||||
LIST_ONLY=0
|
||||
POSITIONAL_MACHINE=""
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
-h|--help)
|
||||
usage
|
||||
exit 0
|
||||
;;
|
||||
--machine)
|
||||
MACHINE_ID="$2"
|
||||
shift 2
|
||||
;;
|
||||
--run-id)
|
||||
RUN_ID="$2"
|
||||
shift 2
|
||||
;;
|
||||
--history-file)
|
||||
HISTORY_FILE="$2"
|
||||
shift 2
|
||||
;;
|
||||
--list)
|
||||
LIST_ONLY=1
|
||||
shift
|
||||
;;
|
||||
*)
|
||||
if [[ -z "$POSITIONAL_MACHINE" ]]; then
|
||||
POSITIONAL_MACHINE="$1"
|
||||
shift
|
||||
else
|
||||
echo "Unexpected argument: $1" >&2
|
||||
usage
|
||||
exit 1
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [[ -n "$POSITIONAL_MACHINE" ]]; then
|
||||
MACHINE_ID="$POSITIONAL_MACHINE"
|
||||
fi
|
||||
|
||||
if [[ ! -f "$HISTORY_FILE" ]]; then
|
||||
echo "Error: No history file found at $HISTORY_FILE" >&2
|
||||
echo "Run ./scripts/run_bench_collect.sh first to collect benchmark data" >&2
|
||||
echo "Run ./scripts/run_bench_collect.sh or ./scripts/run_bench_cloud.sh first" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ "$LIST_ONLY" == "1" ]]; then
|
||||
node - "$HISTORY_FILE" <<'NODE'
|
||||
const fs = require("node:fs");
|
||||
|
||||
const [, , historyFile] = process.argv;
|
||||
|
||||
const entries = fs.readFileSync(historyFile, "utf8")
|
||||
.split("\n")
|
||||
.filter((l) => l.trim().length > 0)
|
||||
.map((l) => JSON.parse(l));
|
||||
|
||||
if (entries.length === 0) {
|
||||
console.log("No entries in history file.");
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
entries.sort((a, b) => b.timestamp.localeCompare(a.timestamp));
|
||||
|
||||
const machines = new Map();
|
||||
for (const e of entries) {
|
||||
const machineId = e.machine_id || "unknown";
|
||||
const prev = machines.get(machineId);
|
||||
if (!prev) {
|
||||
machines.set(machineId, { count: 1, latest: e });
|
||||
} else {
|
||||
prev.count += 1;
|
||||
if ((e.timestamp || "") > (prev.latest.timestamp || "")) prev.latest = e;
|
||||
}
|
||||
}
|
||||
|
||||
console.log("Machines:");
|
||||
console.log(" machine_id entries latest_timestamp latest_tag");
|
||||
for (const [machineId, info] of [...machines.entries()].sort((a, b) => a[0].localeCompare(b[0]))) {
|
||||
const id = machineId.padEnd(34, " ");
|
||||
const count = String(info.count).padStart(7, " ");
|
||||
const ts = (info.latest.timestamp || "").padEnd(24, " ");
|
||||
const tag = info.latest.git_tag || "";
|
||||
console.log(` ${id} ${count} ${ts} ${tag}`);
|
||||
}
|
||||
|
||||
console.log("\nRuns (newest first):");
|
||||
console.log(" timestamp run_id machine_id source git_tag targets");
|
||||
for (const e of entries) {
|
||||
const ts = (e.timestamp || "").slice(0, 19).padEnd(24, " ");
|
||||
const runId = (e.run_id || "").slice(0, 36).padEnd(36, " ");
|
||||
const machineId = (e.machine_id || "").slice(0, 24).padEnd(24, " ");
|
||||
const source = (e.source?.kind || "").padEnd(6, " ");
|
||||
const tag = (e.git_tag || "").slice(0, 16).padEnd(16, " ");
|
||||
const targets = (e.bench?.targets || Object.keys(e.servers || {})).join(",");
|
||||
console.log(` ${ts} ${runId} ${machineId} ${source} ${tag} ${targets}`);
|
||||
}
|
||||
NODE
|
||||
exit 0
|
||||
fi
|
||||
|
||||
WORK_DIR="$(mktemp -d)"
|
||||
trap 'rm -rf "$WORK_DIR"' EXIT
|
||||
|
||||
# --- Generate chart ----------------------------------------------------------
|
||||
echo "Generating chart (machine=$MACHINE_ID${RUN_ID:+, run_id=$RUN_ID})"
|
||||
|
||||
echo "Generating chart for machine: $MACHINE_ID"
|
||||
|
||||
node - "$HISTORY_FILE" "$MACHINE_ID" "$WORK_DIR" <<'NODE'
|
||||
if ! node - "$HISTORY_FILE" "$MACHINE_ID" "$RUN_ID" "$WORK_DIR" <<'NODE'
|
||||
const fs = require("node:fs");
|
||||
const path = require("node:path");
|
||||
|
||||
const [, , historyFile, machineId, workDir] = process.argv;
|
||||
|
||||
if (!fs.existsSync(historyFile)) {
|
||||
console.log(" no history file, skipping chart generation");
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
const lines = fs.readFileSync(historyFile, "utf8")
|
||||
.split("\n")
|
||||
.filter(l => l.trim().length > 0)
|
||||
.map(l => JSON.parse(l));
|
||||
|
||||
// Filter to selected machine(s)
|
||||
let entries;
|
||||
if (machineId === "all") {
|
||||
entries = lines;
|
||||
console.log(" using all machines");
|
||||
} else {
|
||||
entries = lines.filter(e => e.machine_id === machineId);
|
||||
console.log(" filtered to machine: " + machineId);
|
||||
}
|
||||
|
||||
if (entries.length === 0) {
|
||||
console.log(" no history entries for machine '" + machineId + "', skipping chart");
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
// Sort chronologically, deduplicate by tag (latest wins),
|
||||
// then order the resulting series by git tag.
|
||||
entries.sort((a, b) => a.timestamp.localeCompare(b.timestamp));
|
||||
const byTag = new Map();
|
||||
for (const e of entries) {
|
||||
byTag.set(e.git_tag, e);
|
||||
}
|
||||
const deduped = [...byTag.values()];
|
||||
const [, , historyFile, machineId, runId, workDir] = process.argv;
|
||||
|
||||
function parseSemverTag(tag) {
|
||||
const match = /^v?(\d+)\.(\d+)\.(\d+)$/.exec(tag);
|
||||
const match = /^v?(\d+)\.(\d+)\.(\d+)$/.exec(tag || "");
|
||||
return match ? match.slice(1).map(Number) : null;
|
||||
}
|
||||
|
||||
const all = fs.readFileSync(historyFile, "utf8")
|
||||
.split("\n")
|
||||
.filter((l) => l.trim().length > 0)
|
||||
.map((l) => JSON.parse(l));
|
||||
|
||||
let selected = all;
|
||||
if (runId && runId.length > 0) {
|
||||
selected = all.filter((e) => e.run_id === runId);
|
||||
console.log(` filtered by run_id: ${runId}`);
|
||||
} else if (machineId !== "all") {
|
||||
selected = all.filter((e) => e.machine_id === machineId);
|
||||
console.log(` filtered to machine: ${machineId}`);
|
||||
} else {
|
||||
console.log(" using all machines");
|
||||
}
|
||||
|
||||
if (selected.length === 0) {
|
||||
console.error(" no matching history entries");
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
selected.sort((a, b) => (a.timestamp || "").localeCompare(b.timestamp || ""));
|
||||
|
||||
const byTag = new Map();
|
||||
for (const e of selected) {
|
||||
byTag.set(e.git_tag || "untagged", e);
|
||||
}
|
||||
const deduped = [...byTag.values()];
|
||||
|
||||
deduped.sort((a, b) => {
|
||||
const aTag = parseSemverTag(a.git_tag);
|
||||
const bTag = parseSemverTag(b.git_tag);
|
||||
@@ -109,24 +187,19 @@ deduped.sort((a, b) => {
|
||||
return aTag[0] - bTag[0] || aTag[1] - bTag[1] || aTag[2] - bTag[2];
|
||||
}
|
||||
|
||||
return a.git_tag.localeCompare(b.git_tag, undefined, { numeric: true });
|
||||
return (a.git_tag || "").localeCompare(b.git_tag || "", undefined, { numeric: true });
|
||||
});
|
||||
|
||||
// Determine which non-parrhesia servers are present
|
||||
const baselineServerNames = ["strfry", "nostr-rs-relay"];
|
||||
const presentBaselines = baselineServerNames.filter(srv =>
|
||||
deduped.some(e => e.servers[srv])
|
||||
);
|
||||
const presentBaselines = baselineServerNames.filter((srv) => deduped.some((e) => e.servers?.[srv]));
|
||||
|
||||
// Metrics to chart
|
||||
const chartMetrics = [
|
||||
{ key: "event_tps", label: "Event Throughput (TPS) — higher is better", file: "event_tps.tsv", ylabel: "TPS" },
|
||||
{ key: "req_tps", label: "Req Throughput (TPS) — higher is better", file: "req_tps.tsv", ylabel: "TPS" },
|
||||
{ key: "echo_tps", label: "Echo Throughput (TPS) — higher is better", file: "echo_tps.tsv", ylabel: "TPS" },
|
||||
{ key: "connect_avg_ms", label: "Connect Avg Latency (ms) — lower is better", file: "connect_avg_ms.tsv", ylabel: "ms" },
|
||||
{ key: "event_tps", label: "Event Throughput (TPS) — higher is better", file: "event_tps.tsv", ylabel: "TPS" },
|
||||
{ key: "req_tps", label: "Req Throughput (TPS) — higher is better", file: "req_tps.tsv", ylabel: "TPS" },
|
||||
{ key: "echo_tps", label: "Echo Throughput (TPS) — higher is better", file: "echo_tps.tsv", ylabel: "TPS" },
|
||||
{ key: "connect_avg_ms", label: "Connect Avg Latency (ms) — lower is better", file: "connect_avg_ms.tsv", ylabel: "ms" },
|
||||
];
|
||||
|
||||
// Write per-metric TSV files
|
||||
for (const cm of chartMetrics) {
|
||||
const header = ["tag", "parrhesia-pg", "parrhesia-memory"];
|
||||
for (const srv of presentBaselines) header.push(srv);
|
||||
@@ -134,12 +207,12 @@ for (const cm of chartMetrics) {
|
||||
const rows = [header.join("\t")];
|
||||
for (const e of deduped) {
|
||||
const row = [
|
||||
e.git_tag,
|
||||
e.servers["parrhesia-pg"]?.[cm.key] ?? "NaN",
|
||||
e.servers["parrhesia-memory"]?.[cm.key] ?? "NaN",
|
||||
e.git_tag || "untagged",
|
||||
e.servers?.["parrhesia-pg"]?.[cm.key] ?? "NaN",
|
||||
e.servers?.["parrhesia-memory"]?.[cm.key] ?? "NaN",
|
||||
];
|
||||
for (const srv of presentBaselines) {
|
||||
row.push(e.servers[srv]?.[cm.key] ?? "NaN");
|
||||
row.push(e.servers?.[srv]?.[cm.key] ?? "NaN");
|
||||
}
|
||||
rows.push(row.join("\t"));
|
||||
}
|
||||
@@ -147,7 +220,6 @@ for (const cm of chartMetrics) {
|
||||
fs.writeFileSync(path.join(workDir, cm.file), rows.join("\n") + "\n", "utf8");
|
||||
}
|
||||
|
||||
// Generate gnuplot plot commands (handles variable column counts)
|
||||
const serverLabels = ["parrhesia-pg", "parrhesia-memory"];
|
||||
for (const srv of presentBaselines) serverLabels.push(srv);
|
||||
|
||||
@@ -158,10 +230,9 @@ for (const cm of chartMetrics) {
|
||||
plotLines.push(`set ylabel "${cm.ylabel}"`);
|
||||
|
||||
const plotParts = [];
|
||||
// Column 2 = parrhesia-pg, 3 = parrhesia-memory, 4+ = baselines
|
||||
plotParts.push(`${dataFile} using 0:2:xtic(1) lt 1 title "${serverLabels[0]}"`);
|
||||
plotParts.push(`'' using 0:3 lt 2 title "${serverLabels[1]}"`);
|
||||
for (let i = 0; i < presentBaselines.length; i++) {
|
||||
for (let i = 0; i < presentBaselines.length; i += 1) {
|
||||
plotParts.push(`'' using 0:${4 + i} lt ${3 + i} title "${serverLabels[2 + i]}"`);
|
||||
}
|
||||
|
||||
@@ -169,14 +240,22 @@ for (const cm of chartMetrics) {
|
||||
plotLines.push("");
|
||||
}
|
||||
|
||||
fs.writeFileSync(
|
||||
path.join(workDir, "plot_commands.gnuplot"),
|
||||
plotLines.join("\n") + "\n",
|
||||
"utf8"
|
||||
);
|
||||
fs.writeFileSync(path.join(workDir, "plot_commands.gnuplot"), plotLines.join("\n") + "\n", "utf8");
|
||||
|
||||
console.log(" " + deduped.length + " tag(s), " + presentBaselines.length + " baseline server(s)");
|
||||
const latestForReadme = [...selected]
|
||||
.sort((a, b) => (b.timestamp || "").localeCompare(a.timestamp || ""))
|
||||
.find((e) => e.servers?.["parrhesia-pg"] && e.servers?.["parrhesia-memory"]);
|
||||
|
||||
if (latestForReadme) {
|
||||
fs.writeFileSync(path.join(workDir, "latest_entry.json"), JSON.stringify(latestForReadme), "utf8");
|
||||
}
|
||||
|
||||
console.log(` selected=${selected.length}, series_tags=${deduped.length}, baselines=${presentBaselines.length}`);
|
||||
NODE
|
||||
then
|
||||
echo "No matching data for chart/update" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -f "$WORK_DIR/plot_commands.gnuplot" ]]; then
|
||||
gnuplot \
|
||||
@@ -185,52 +264,26 @@ if [[ -f "$WORK_DIR/plot_commands.gnuplot" ]]; then
|
||||
"$GNUPLOT_TEMPLATE"
|
||||
echo " chart written to $CHART_FILE"
|
||||
else
|
||||
echo " chart generation skipped (no data for this machine)"
|
||||
exit 0
|
||||
echo " chart generation skipped"
|
||||
fi
|
||||
|
||||
# --- Update README.md -------------------------------------------------------
|
||||
|
||||
echo "Updating README.md with latest benchmark..."
|
||||
|
||||
# Find the most recent entry for this machine
|
||||
LATEST_ENTRY=$(node - "$HISTORY_FILE" "$MACHINE_ID" <<'NODE'
|
||||
const fs = require("node:fs");
|
||||
const [, , historyFile, machineId] = process.argv;
|
||||
|
||||
const lines = fs.readFileSync(historyFile, "utf8")
|
||||
.split("\n")
|
||||
.filter(l => l.trim().length > 0)
|
||||
.map(l => JSON.parse(l));
|
||||
|
||||
let entries;
|
||||
if (machineId === "all") {
|
||||
entries = lines;
|
||||
} else {
|
||||
entries = lines.filter(e => e.machine_id === machineId);
|
||||
}
|
||||
|
||||
if (entries.length === 0) {
|
||||
console.error("No entries found for machine: " + machineId);
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
// Get latest entry
|
||||
entries.sort((a, b) => b.timestamp.localeCompare(a.timestamp));
|
||||
console.log(JSON.stringify(entries[0]));
|
||||
NODE
|
||||
)
|
||||
|
||||
if [[ -z "$LATEST_ENTRY" ]]; then
|
||||
echo "Warning: Could not find latest entry, skipping README update" >&2
|
||||
if [[ ! -f "$WORK_DIR/latest_entry.json" ]]; then
|
||||
echo "Warning: no selected entry contains both parrhesia-pg and parrhesia-memory; skipping README table update" >&2
|
||||
echo
|
||||
echo "Benchmark rendering complete. Files updated:"
|
||||
echo " $CHART_FILE"
|
||||
echo
|
||||
exit 0
|
||||
fi
|
||||
|
||||
node - "$LATEST_ENTRY" "$ROOT_DIR/README.md" <<'NODE'
|
||||
LATEST_ENTRY="$(cat "$WORK_DIR/latest_entry.json")"
|
||||
|
||||
node - "$LATEST_ENTRY" "$README_FILE" <<'NODE'
|
||||
const fs = require("node:fs");
|
||||
|
||||
const [, , entryJson, readmePath] = process.argv;
|
||||
|
||||
const entry = JSON.parse(entryJson);
|
||||
const servers = entry.servers || {};
|
||||
|
||||
@@ -240,11 +293,7 @@ const strfry = servers["strfry"];
|
||||
const nostrRs = servers["nostr-rs-relay"];
|
||||
|
||||
if (!pg || !mem) {
|
||||
const present = Object.keys(servers).sort().join(", ") || "(none)";
|
||||
console.error(
|
||||
"Latest benchmark entry must include parrhesia-pg and parrhesia-memory. Present servers: " +
|
||||
present
|
||||
);
|
||||
console.error("Selected entry is missing parrhesia-pg or parrhesia-memory");
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
@@ -259,26 +308,26 @@ function ratio(base, other) {
|
||||
|
||||
function boldIf(ratioStr, lowerIsBetter) {
|
||||
if (ratioStr === "n/a") return ratioStr;
|
||||
const num = parseFloat(ratioStr);
|
||||
const num = Number.parseFloat(ratioStr);
|
||||
if (!Number.isFinite(num)) return ratioStr;
|
||||
const better = lowerIsBetter ? num < 1 : num > 1;
|
||||
return better ? "**" + ratioStr + "**" : ratioStr;
|
||||
return better ? `**${ratioStr}**` : ratioStr;
|
||||
}
|
||||
|
||||
const metricRows = [
|
||||
["connect avg latency (ms) ↓", "connect_avg_ms", true],
|
||||
["connect max latency (ms) ↓", "connect_max_ms", true],
|
||||
["echo throughput (TPS) ↑", "echo_tps", false],
|
||||
["echo throughput (MiB/s) ↑", "echo_mibs", false],
|
||||
["event throughput (TPS) ↑", "event_tps", false],
|
||||
["event throughput (MiB/s) ↑", "event_mibs", false],
|
||||
["req throughput (TPS) ↑", "req_tps", false],
|
||||
["req throughput (MiB/s) ↑", "req_mibs", false],
|
||||
["echo throughput (TPS) ↑", "echo_tps", false],
|
||||
["echo throughput (MiB/s) ↑", "echo_mibs", false],
|
||||
["event throughput (TPS) ↑", "event_tps", false],
|
||||
["event throughput (MiB/s) ↑", "event_mibs", false],
|
||||
["req throughput (TPS) ↑", "req_tps", false],
|
||||
["req throughput (MiB/s) ↑", "req_mibs", false],
|
||||
];
|
||||
|
||||
const hasStrfry = !!strfry;
|
||||
const hasNostrRs = !!nostrRs;
|
||||
|
||||
// Build header
|
||||
const header = ["metric", "parrhesia-pg", "parrhesia-mem"];
|
||||
if (hasStrfry) header.push("strfry");
|
||||
if (hasNostrRs) header.push("nostr-rs-relay");
|
||||
@@ -287,7 +336,7 @@ if (hasStrfry) header.push("strfry/pg");
|
||||
if (hasNostrRs) header.push("nostr-rs/pg");
|
||||
|
||||
const alignRow = ["---"];
|
||||
for (let i = 1; i < header.length; i++) alignRow.push("---:");
|
||||
for (let i = 1; i < header.length; i += 1) alignRow.push("---:");
|
||||
|
||||
const rows = metricRows.map(([label, key, lowerIsBetter]) => {
|
||||
const row = [label, toFixed(pg[key]), toFixed(mem[key])];
|
||||
@@ -304,13 +353,12 @@ const rows = metricRows.map(([label, key, lowerIsBetter]) => {
|
||||
const tableLines = [
|
||||
"| " + header.join(" | ") + " |",
|
||||
"| " + alignRow.join(" | ") + " |",
|
||||
...rows.map(r => "| " + r.join(" | ") + " |"),
|
||||
...rows.map((r) => "| " + r.join(" | ") + " |"),
|
||||
];
|
||||
|
||||
// Replace the first markdown table in the ## Benchmark section
|
||||
const readme = fs.readFileSync(readmePath, "utf8");
|
||||
const readmeLines = readme.split("\n");
|
||||
const benchIdx = readmeLines.findIndex(l => /^## Benchmark/.test(l));
|
||||
const lines = readme.split("\n");
|
||||
const benchIdx = lines.findIndex((l) => /^## Benchmark/.test(l));
|
||||
if (benchIdx === -1) {
|
||||
console.error("Could not find '## Benchmark' section in README.md");
|
||||
process.exit(1);
|
||||
@@ -318,8 +366,8 @@ if (benchIdx === -1) {
|
||||
|
||||
let tableStart = -1;
|
||||
let tableEnd = -1;
|
||||
for (let i = benchIdx + 1; i < readmeLines.length; i++) {
|
||||
if (readmeLines[i].startsWith("|")) {
|
||||
for (let i = benchIdx + 1; i < lines.length; i += 1) {
|
||||
if (lines[i].startsWith("|")) {
|
||||
if (tableStart === -1) tableStart = i;
|
||||
tableEnd = i;
|
||||
} else if (tableStart !== -1) {
|
||||
@@ -332,19 +380,19 @@ if (tableStart === -1) {
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
const before = readmeLines.slice(0, tableStart);
|
||||
const after = readmeLines.slice(tableEnd + 1);
|
||||
const updated = [...before, ...tableLines, ...after].join("\n");
|
||||
const updated = [
|
||||
...lines.slice(0, tableStart),
|
||||
...tableLines,
|
||||
...lines.slice(tableEnd + 1),
|
||||
].join("\n");
|
||||
|
||||
fs.writeFileSync(readmePath, updated, "utf8");
|
||||
console.log(" table updated (" + tableLines.length + " rows)");
|
||||
console.log(` table updated (${tableLines.length} rows)`);
|
||||
NODE
|
||||
|
||||
# --- Done ---------------------------------------------------------------------
|
||||
|
||||
echo
|
||||
echo "Benchmark rendering complete. Files updated:"
|
||||
echo " $CHART_FILE"
|
||||
echo " $ROOT_DIR/README.md"
|
||||
echo " $README_FILE"
|
||||
echo
|
||||
echo "Review with: git diff"
|
||||
|
||||
Reference in New Issue
Block a user