#!/usr/bin/env node import fs from "node:fs"; import os from "node:os"; import path from "node:path"; import { spawn } from "node:child_process"; import readline from "node:readline"; import { fileURLToPath } from "node:url"; const __filename = fileURLToPath(import.meta.url); const __dirname = path.dirname(__filename); const ROOT_DIR = path.resolve(__dirname, ".."); const DEFAULT_TARGETS = ["parrhesia-pg", "parrhesia-memory", "strfry", "nostr-rs-relay", "nostream", "haven"]; const ESTIMATE_WINDOW_MINUTES = 30; const ESTIMATE_WINDOW_HOURS = ESTIMATE_WINDOW_MINUTES / 60; const ESTIMATE_WINDOW_LABEL = `${ESTIMATE_WINDOW_MINUTES}m`; const BENCH_BUILD_DIR = path.join(ROOT_DIR, "_build", "bench"); const DEFAULTS = { datacenter: "fsn1-dc14", serverType: "cx23", clientType: "cx23", imageBase: "ubuntu-24.04", clients: 3, runs: 3, targets: DEFAULT_TARGETS, historyFile: "bench/history.jsonl", artifactsDir: "bench/cloud_artifacts", gitRef: "HEAD", parrhesiaImage: null, postgresImage: "postgres:17", strfryImage: "ghcr.io/hoytech/strfry:latest", nostrRsImage: "scsibug/nostr-rs-relay:latest", nostreamRepo: "https://github.com/Cameri/nostream.git", nostreamRef: "main", havenImage: "holgerhatgarkeinenode/haven-docker:latest", keep: false, bench: { connectCount: 200, connectRate: 100, echoCount: 100, echoRate: 50, echoSize: 512, eventCount: 100, eventRate: 50, reqCount: 100, reqRate: 50, reqLimit: 10, keepaliveSeconds: 5, }, }; function usage() { console.log(`usage: node scripts/cloud_bench_orchestrate.mjs [options] Creates one server node + N client nodes on Hetzner Cloud, runs nostr-bench in parallel from clients against selected relay targets, stores raw client logs in bench/cloud_artifacts//, and appends metadata + pointers to bench/history.jsonl. Options: --datacenter Initial datacenter selection (default: ${DEFAULTS.datacenter}) --server-type (default: ${DEFAULTS.serverType}) --client-type (default: ${DEFAULTS.clientType}) --image-base (default: ${DEFAULTS.imageBase}) --clients (default: ${DEFAULTS.clients}) --runs (default: ${DEFAULTS.runs}) --targets (default: ${DEFAULT_TARGETS.join(",")}) Source selection (choose one style): --parrhesia-image Use remote image tag directly (e.g. ghcr.io/...) --git-ref Build local nix docker archive from git ref (default: HEAD) Images for comparison targets: --postgres-image (default: ${DEFAULTS.postgresImage}) --strfry-image (default: ${DEFAULTS.strfryImage}) --nostr-rs-image (default: ${DEFAULTS.nostrRsImage}) --nostream-repo (default: ${DEFAULTS.nostreamRepo}) --nostream-ref (default: ${DEFAULTS.nostreamRef}) --haven-image (default: ${DEFAULTS.havenImage}) Benchmark knobs: --connect-count (default: ${DEFAULTS.bench.connectCount}) --connect-rate (default: ${DEFAULTS.bench.connectRate}) --echo-count (default: ${DEFAULTS.bench.echoCount}) --echo-rate (default: ${DEFAULTS.bench.echoRate}) --echo-size (default: ${DEFAULTS.bench.echoSize}) --event-count (default: ${DEFAULTS.bench.eventCount}) --event-rate (default: ${DEFAULTS.bench.eventRate}) --req-count (default: ${DEFAULTS.bench.reqCount}) --req-rate (default: ${DEFAULTS.bench.reqRate}) --req-limit (default: ${DEFAULTS.bench.reqLimit}) --keepalive-seconds (default: ${DEFAULTS.bench.keepaliveSeconds}) Output + lifecycle: --history-file (default: ${DEFAULTS.historyFile}) --artifacts-dir (default: ${DEFAULTS.artifactsDir}) --keep Keep cloud resources (no cleanup) -h, --help Notes: - Requires hcloud, ssh, scp, ssh-keygen, git. - Before provisioning, checks all datacenters for type availability and estimates ${ESTIMATE_WINDOW_LABEL} cost. - In interactive terminals, prompts you to pick + confirm the datacenter. - Caches built nostr-bench at _build/bench/nostr-bench and reuses it when valid. - Auto-tunes Postgres/Redis/app pool sizing from server RAM + CPU for DB-backed targets. - Randomizes target order per run and wipes persisted target data directories on each start. - Tries nix .#nostrBenchStaticX86_64Musl first; falls back to docker-built portable nostr-bench. - If --parrhesia-image is omitted, requires nix locally. `); } function parseArgs(argv) { const opts = JSON.parse(JSON.stringify(DEFAULTS)); const intOpt = (name, value) => { const n = Number(value); if (!Number.isInteger(n) || n < 1) { throw new Error(`${name} must be a positive integer, got: ${value}`); } return n; }; for (let i = 0; i < argv.length; i += 1) { const arg = argv[i]; switch (arg) { case "-h": case "--help": usage(); process.exit(0); break; case "--datacenter": opts.datacenter = argv[++i]; break; case "--server-type": opts.serverType = argv[++i]; break; case "--client-type": opts.clientType = argv[++i]; break; case "--image-base": opts.imageBase = argv[++i]; break; case "--clients": opts.clients = intOpt(arg, argv[++i]); break; case "--runs": opts.runs = intOpt(arg, argv[++i]); break; case "--targets": opts.targets = argv[++i] .split(",") .map((s) => s.trim()) .filter(Boolean); break; case "--parrhesia-image": opts.parrhesiaImage = argv[++i]; break; case "--git-ref": opts.gitRef = argv[++i]; break; case "--postgres-image": opts.postgresImage = argv[++i]; break; case "--strfry-image": opts.strfryImage = argv[++i]; break; case "--nostr-rs-image": opts.nostrRsImage = argv[++i]; break; case "--nostream-repo": opts.nostreamRepo = argv[++i]; break; case "--nostream-ref": opts.nostreamRef = argv[++i]; break; case "--haven-image": opts.havenImage = argv[++i]; break; case "--connect-count": opts.bench.connectCount = intOpt(arg, argv[++i]); break; case "--connect-rate": opts.bench.connectRate = intOpt(arg, argv[++i]); break; case "--echo-count": opts.bench.echoCount = intOpt(arg, argv[++i]); break; case "--echo-rate": opts.bench.echoRate = intOpt(arg, argv[++i]); break; case "--echo-size": opts.bench.echoSize = intOpt(arg, argv[++i]); break; case "--event-count": opts.bench.eventCount = intOpt(arg, argv[++i]); break; case "--event-rate": opts.bench.eventRate = intOpt(arg, argv[++i]); break; case "--req-count": opts.bench.reqCount = intOpt(arg, argv[++i]); break; case "--req-rate": opts.bench.reqRate = intOpt(arg, argv[++i]); break; case "--req-limit": opts.bench.reqLimit = intOpt(arg, argv[++i]); break; case "--keepalive-seconds": opts.bench.keepaliveSeconds = intOpt(arg, argv[++i]); break; case "--history-file": opts.historyFile = argv[++i]; break; case "--artifacts-dir": opts.artifactsDir = argv[++i]; break; case "--keep": opts.keep = true; break; default: throw new Error(`Unknown argument: ${arg}`); } } if (!opts.targets.length) { throw new Error("--targets must include at least one target"); } for (const t of opts.targets) { if (!DEFAULT_TARGETS.includes(t)) { throw new Error(`invalid target: ${t} (valid: ${DEFAULT_TARGETS.join(", ")})`); } } return opts; } function shellEscape(value) { return `'${String(value).replace(/'/g, `'"'"'`)}'`; } function shuffled(values) { const out = [...values]; for (let i = out.length - 1; i > 0; i -= 1) { const j = Math.floor(Math.random() * (i + 1)); [out[i], out[j]] = [out[j], out[i]]; } return out; } function commandExists(cmd) { const pathEnv = process.env.PATH || ""; for (const dir of pathEnv.split(":")) { if (!dir) continue; const full = path.join(dir, cmd); try { fs.accessSync(full, fs.constants.X_OK); return true; } catch { // ignore } } return false; } function runCommand(command, args = [], options = {}) { const { cwd = ROOT_DIR, env = process.env, stdio = "pipe" } = options; return new Promise((resolve, reject) => { const child = spawn(command, args, { cwd, env, stdio }); let stdout = ""; let stderr = ""; if (child.stdout) { child.stdout.on("data", (chunk) => { stdout += chunk.toString(); }); } if (child.stderr) { child.stderr.on("data", (chunk) => { stderr += chunk.toString(); }); } child.on("error", (error) => { reject(error); }); child.on("close", (code) => { if (code === 0) { resolve({ code, stdout, stderr }); } else { const error = new Error( `Command failed (${code}): ${command} ${args.map((a) => shellEscape(a)).join(" ")}`, ); error.code = code; error.stdout = stdout; error.stderr = stderr; reject(error); } }); }); } async function sshExec(hostIp, keyPath, remoteCommand, options = {}) { return runCommand( "ssh", [ "-o", "StrictHostKeyChecking=no", "-o", "UserKnownHostsFile=/dev/null", "-o", "LogLevel=ERROR", "-o", "BatchMode=yes", "-o", "ConnectTimeout=8", "-i", keyPath, `root@${hostIp}`, remoteCommand, ], options, ); } async function scpToHost(hostIp, keyPath, localPath, remotePath) { await runCommand("scp", [ "-o", "StrictHostKeyChecking=no", "-o", "UserKnownHostsFile=/dev/null", "-o", "LogLevel=ERROR", "-i", keyPath, localPath, `root@${hostIp}:${remotePath}`, ]); } async function waitForSsh(hostIp, keyPath, attempts = 60) { for (let i = 1; i <= attempts; i += 1) { try { await sshExec(hostIp, keyPath, "echo ready >/dev/null"); return; } catch { await new Promise((r) => setTimeout(r, 2000)); } } throw new Error(`SSH not ready after ${attempts} attempts: ${hostIp}`); } async function ensureLocalPrereqs(opts) { const required = ["hcloud", "ssh", "scp", "ssh-keygen", "git", "docker", "file"]; const needsParrhesia = opts.targets.includes("parrhesia-pg") || opts.targets.includes("parrhesia-memory"); if (needsParrhesia && !opts.parrhesiaImage) { required.push("nix"); } for (const cmd of required) { if (!commandExists(cmd)) { throw new Error(`Required command not found in PATH: ${cmd}`); } } } function priceForLocation(serverType, locationName, key) { const price = serverType.prices?.find((entry) => entry.location === locationName); const value = Number(price?.price_hourly?.[key]); if (!Number.isFinite(value)) { return null; } return value; } function formatEuro(value) { if (!Number.isFinite(value)) { return "n/a"; } return `€${value.toFixed(4)}`; } function compatibleDatacenterChoices(datacenters, serverType, clientType, clientCount) { const compatible = []; for (const dc of datacenters) { const availableIds = dc?.server_types?.available || dc?.server_types?.supported || []; if (!availableIds.includes(serverType.id) || !availableIds.includes(clientType.id)) { continue; } const locationName = dc.location?.name; const serverGrossHourly = priceForLocation(serverType, locationName, "gross"); const clientGrossHourly = priceForLocation(clientType, locationName, "gross"); const serverNetHourly = priceForLocation(serverType, locationName, "net"); const clientNetHourly = priceForLocation(clientType, locationName, "net"); const totalGrossHourly = Number.isFinite(serverGrossHourly) && Number.isFinite(clientGrossHourly) ? serverGrossHourly + clientGrossHourly * clientCount : null; const totalNetHourly = Number.isFinite(serverNetHourly) && Number.isFinite(clientNetHourly) ? serverNetHourly + clientNetHourly * clientCount : null; compatible.push({ name: dc.name, description: dc.description, location: { name: locationName, city: dc.location?.city, country: dc.location?.country, }, totalHourly: { gross: totalGrossHourly, net: totalNetHourly, }, estimatedTotal: { gross: Number.isFinite(totalGrossHourly) ? totalGrossHourly * ESTIMATE_WINDOW_HOURS : null, net: Number.isFinite(totalNetHourly) ? totalNetHourly * ESTIMATE_WINDOW_HOURS : null, }, }); } compatible.sort((a, b) => { const aPrice = Number.isFinite(a.estimatedTotal.gross) ? a.estimatedTotal.gross : Number.POSITIVE_INFINITY; const bPrice = Number.isFinite(b.estimatedTotal.gross) ? b.estimatedTotal.gross : Number.POSITIVE_INFINITY; if (aPrice !== bPrice) { return aPrice - bPrice; } return a.name.localeCompare(b.name); }); return compatible; } function printDatacenterChoices(choices, opts) { console.log("[plan] datacenter availability for requested server/client types"); console.log( `[plan] requested: server=${opts.serverType}, client=${opts.clientType}, clients=${opts.clients}, estimate window=${ESTIMATE_WINDOW_LABEL}`, ); choices.forEach((choice, index) => { const where = `${choice.location.name} (${choice.location.city}, ${choice.location.country})`; console.log( ` [${index + 1}] ${choice.name.padEnd(10)} ${where} ${ESTIMATE_WINDOW_LABEL} est gross=${formatEuro(choice.estimatedTotal.gross)} net=${formatEuro(choice.estimatedTotal.net)}`, ); }); } function askLine(prompt) { return new Promise((resolve) => { const rl = readline.createInterface({ input: process.stdin, output: process.stdout }); rl.question(prompt, (answer) => { rl.close(); resolve(answer.trim()); }); }); } async function chooseDatacenter(opts) { const [dcRes, serverTypeRes] = await Promise.all([ runCommand("hcloud", ["datacenter", "list", "-o", "json"]), runCommand("hcloud", ["server-type", "list", "-o", "json"]), ]); const datacenters = JSON.parse(dcRes.stdout); const serverTypes = JSON.parse(serverTypeRes.stdout); const serverType = serverTypes.find((type) => type.name === opts.serverType); if (!serverType) { throw new Error(`Unknown server type: ${opts.serverType}`); } const clientType = serverTypes.find((type) => type.name === opts.clientType); if (!clientType) { throw new Error(`Unknown client type: ${opts.clientType}`); } const choices = compatibleDatacenterChoices(datacenters, serverType, clientType, opts.clients); if (choices.length === 0) { throw new Error( `No datacenter has both server type ${opts.serverType} and client type ${opts.clientType} available right now`, ); } printDatacenterChoices(choices, opts); const defaultChoice = choices.find((choice) => choice.name === opts.datacenter) || choices[0]; if (!process.stdin.isTTY || !process.stdout.isTTY) { if (!choices.some((choice) => choice.name === opts.datacenter)) { throw new Error( `Requested datacenter ${opts.datacenter} is not currently compatible. Compatible: ${choices .map((choice) => choice.name) .join(", ")}`, ); } console.log( `[plan] non-interactive mode: using datacenter ${opts.datacenter} (${ESTIMATE_WINDOW_LABEL} est gross=${formatEuro(defaultChoice.estimatedTotal.gross)} net=${formatEuro(defaultChoice.estimatedTotal.net)})`, ); return defaultChoice; } const defaultIndex = choices.findIndex((choice) => choice.name === defaultChoice.name) + 1; let selected = defaultChoice; while (true) { const response = await askLine( `Select datacenter by number or name [default: ${defaultIndex}/${defaultChoice.name}] (or 'abort'): `, ); if (response === "") { selected = defaultChoice; break; } const normalized = response.trim().toLowerCase(); if (["a", "abort", "q", "quit", "n"].includes(normalized)) { throw new Error("Aborted by user before provisioning"); } if (/^\d+$/.test(response)) { const idx = Number(response); if (idx >= 1 && idx <= choices.length) { selected = choices[idx - 1]; break; } } const byName = choices.find((choice) => choice.name.toLowerCase() === normalized); if (byName) { selected = byName; break; } console.log(`Invalid selection: ${response}`); } const confirm = await askLine( `Provision in ${selected.name} (${ESTIMATE_WINDOW_LABEL} est gross=${formatEuro(selected.estimatedTotal.gross)} net=${formatEuro(selected.estimatedTotal.net)})? [y/N]: `, ); if (!["y", "yes"].includes(confirm.trim().toLowerCase())) { throw new Error("Aborted by user before provisioning"); } return selected; } async function buildNostrBenchBinary(tmpDir) { const cacheDir = BENCH_BUILD_DIR; const cachedBinaryPath = path.join(cacheDir, "nostr-bench"); const cacheMetadataPath = path.join(cacheDir, "nostr-bench.json"); fs.mkdirSync(cacheDir, { recursive: true }); const staticLinked = (fileOutput) => fileOutput.includes("statically linked") || fileOutput.includes("static-pie linked"); const binaryLooksPortable = (fileOutput) => fileOutput.includes("/lib64/ld-linux-x86-64.so.2") || staticLinked(fileOutput); const validatePortableBinary = async (binaryPath) => { const fileOut = await runCommand("file", [binaryPath]); if (!binaryLooksPortable(fileOut.stdout)) { throw new Error(`Built nostr-bench binary does not look portable: ${fileOut.stdout.trim()}`); } return fileOut.stdout.trim(); }; const readCacheMetadata = () => { if (!fs.existsSync(cacheMetadataPath)) { return null; } try { return JSON.parse(fs.readFileSync(cacheMetadataPath, "utf8")); } catch { return null; } }; const writeCacheMetadata = (metadata) => { fs.writeFileSync(cacheMetadataPath, `${JSON.stringify(metadata, null, 2)}\n`, "utf8"); }; const readVersionIfRunnable = async (binaryPath, fileSummary, phase) => { const binaryIsX86_64 = /x86-64|x86_64/i.test(fileSummary); if (binaryIsX86_64 && process.arch !== "x64") { console.log( `[local] skipping nostr-bench --version check (${phase}): host arch ${process.arch} cannot execute x86_64 binary`, ); return ""; } try { return (await runCommand(binaryPath, ["--version"])).stdout.trim(); } catch (error) { console.warn(`[local] unable to run nostr-bench --version (${phase}), continuing: ${error.message}`); return ""; } }; const tryReuseCachedBinary = async () => { if (!fs.existsSync(cachedBinaryPath)) { return null; } try { const fileSummary = await validatePortableBinary(cachedBinaryPath); fs.chmodSync(cachedBinaryPath, 0o755); const version = await readVersionIfRunnable(cachedBinaryPath, fileSummary, "cache-reuse"); const metadata = readCacheMetadata(); console.log(`[local] reusing cached nostr-bench: ${cachedBinaryPath}`); if (metadata?.build_mode) { console.log(`[local] cache metadata: build_mode=${metadata.build_mode}, built_at=${metadata.built_at || "unknown"}`); } if (version) { console.log(`[local] ${version}`); } console.log(`[local] ${fileSummary}`); return { path: cachedBinaryPath, buildMode: "cache-reuse" }; } catch (error) { console.warn(`[local] cached nostr-bench invalid, rebuilding: ${error.message}`); return null; } }; const cacheAndValidateBinary = async (binaryPath, buildMode) => { await validatePortableBinary(binaryPath); fs.copyFileSync(binaryPath, cachedBinaryPath); fs.chmodSync(cachedBinaryPath, 0o755); const copiedFileOut = await runCommand("file", [cachedBinaryPath]); const version = await readVersionIfRunnable(cachedBinaryPath, copiedFileOut.stdout.trim(), "post-build"); writeCacheMetadata({ build_mode: buildMode, built_at: new Date().toISOString(), binary_path: cachedBinaryPath, file_summary: copiedFileOut.stdout.trim(), version, }); console.log(`[local] nostr-bench ready (${buildMode}): ${cachedBinaryPath}`); if (version) { console.log(`[local] ${version}`); } console.log(`[local] ${copiedFileOut.stdout.trim()}`); return { path: cachedBinaryPath, buildMode }; }; const cachedBinary = await tryReuseCachedBinary(); if (cachedBinary) { return cachedBinary; } if (commandExists("nix")) { try { console.log("[local] building nostr-bench static binary via nix flake output .#nostrBenchStaticX86_64Musl..."); const nixOut = ( await runCommand("nix", ["build", ".#nostrBenchStaticX86_64Musl", "--print-out-paths", "--no-link"], { cwd: ROOT_DIR, }) ).stdout.trim(); if (!nixOut) { throw new Error("nix build did not return an output path"); } const binaryPath = path.join(nixOut, "bin", "nostr-bench"); return await cacheAndValidateBinary(binaryPath, "nix-flake-musl-static"); } catch (error) { console.warn(`[local] nix static build unavailable, falling back to docker build: ${error.message}`); } } const srcDir = path.join(tmpDir, "nostr-bench-src"); console.log("[local] cloning nostr-bench source for docker fallback..."); await runCommand("git", ["clone", "--depth", "1", "https://github.com/rnostr/nostr-bench.git", srcDir], { stdio: "inherit", }); const binaryPath = path.join(srcDir, "target", "release", "nostr-bench"); console.log("[local] building portable glibc binary in rust:1-bookworm..."); await runCommand( "docker", [ "run", "--rm", "-v", `${srcDir}:/src`, "-w", "/src", "rust:1-bookworm", "bash", "-lc", "export PATH=/usr/local/cargo/bin:$PATH; apt-get update -qq >/dev/null; apt-get install -y -qq pkg-config build-essential >/dev/null; cargo build --release", ], { stdio: "inherit" }, ); return await cacheAndValidateBinary(binaryPath, "docker-glibc-portable"); } async function buildParrhesiaArchiveIfNeeded(opts, tmpDir) { if (opts.parrhesiaImage) { return { mode: "remote-image", image: opts.parrhesiaImage, archivePath: null, gitRef: null, gitCommit: null, }; } const resolved = (await runCommand("git", ["rev-parse", "--verify", opts.gitRef], { cwd: ROOT_DIR })).stdout.trim(); let buildDir = ROOT_DIR; let worktreeDir = null; if (opts.gitRef !== "HEAD") { worktreeDir = path.join(tmpDir, "parrhesia-worktree"); console.log(`[local] creating temporary worktree for ${opts.gitRef}...`); await runCommand("git", ["worktree", "add", "--detach", worktreeDir, opts.gitRef], { cwd: ROOT_DIR, stdio: "inherit", }); buildDir = worktreeDir; } try { console.log(`[local] building parrhesia docker archive via nix at ${opts.gitRef}...`); const archivePath = ( await runCommand("nix", ["build", ".#dockerImage", "--print-out-paths", "--no-link"], { cwd: buildDir, }) ).stdout.trim(); if (!archivePath) { throw new Error("nix build did not return an archive path"); } return { mode: "local-git-ref", image: "parrhesia:latest", archivePath, gitRef: opts.gitRef, gitCommit: resolved, }; } finally { if (worktreeDir) { await runCommand("git", ["worktree", "remove", "--force", worktreeDir], { cwd: ROOT_DIR, }).catch(() => { // ignore }); } } } function makeServerScript() { return `#!/usr/bin/env bash set -euo pipefail PARRHESIA_IMAGE="\${PARRHESIA_IMAGE:-parrhesia:latest}" POSTGRES_IMAGE="\${POSTGRES_IMAGE:-postgres:17}" STRFRY_IMAGE="\${STRFRY_IMAGE:-ghcr.io/hoytech/strfry:latest}" NOSTR_RS_IMAGE="\${NOSTR_RS_IMAGE:-scsibug/nostr-rs-relay:latest}" NOSTREAM_REPO="\${NOSTREAM_REPO:-https://github.com/Cameri/nostream.git}" NOSTREAM_REF="\${NOSTREAM_REF:-main}" HAVEN_IMAGE="\${HAVEN_IMAGE:-holgerhatgarkeinenode/haven-docker:latest}" HAVEN_RELAY_URL="\${HAVEN_RELAY_URL:-127.0.0.1:3355}" NOSTREAM_SECRET="0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" HAVEN_OWNER_NPUB="npub1utx00neqgqln72j22kej3ux7803c2k986henvvha4thuwfkper4s7r50e8" cleanup_containers() { docker rm -f parrhesia pg strfry nostr-rs nostream nostream-db nostream-cache haven >/dev/null 2>&1 || true } ensure_benchnet() { docker network create benchnet >/dev/null 2>&1 || true } wait_http() { local url="\$1" local timeout="\${2:-60}" local log_container="\${3:-}" for _ in \$(seq 1 "\$timeout"); do if curl -fsS "\$url" >/dev/null 2>&1; then return 0 fi sleep 1 done if [[ -n "\$log_container" ]]; then docker logs --tail 200 "\$log_container" >&2 || true fi echo "Timed out waiting for HTTP endpoint: \$url" >&2 return 1 } wait_pg() { local timeout="\${1:-90}" for _ in \$(seq 1 "\$timeout"); do if docker exec pg pg_isready -U parrhesia -d parrhesia >/dev/null 2>&1; then return 0 fi sleep 1 done docker logs --tail 200 pg >&2 || true echo "Timed out waiting for Postgres" >&2 return 1 } wait_nostream_pg() { local timeout="\${1:-90}" for _ in \$(seq 1 "\$timeout"); do if docker exec nostream-db pg_isready -U nostr_ts_relay -d nostr_ts_relay >/dev/null 2>&1; then return 0 fi sleep 1 done docker logs --tail 200 nostream-db >&2 || true echo "Timed out waiting for nostream Postgres" >&2 return 1 } wait_nostream_redis() { local timeout="\${1:-60}" for _ in \$(seq 1 "\$timeout"); do if docker exec nostream-cache redis-cli -a nostr_ts_relay ping >/dev/null 2>&1; then return 0 fi sleep 1 done docker logs --tail 200 nostream-cache >&2 || true echo "Timed out waiting for nostream Redis" >&2 return 1 } wait_port() { local port="\$1" local timeout="\${2:-60}" local log_container="\${3:-}" for _ in \$(seq 1 "\$timeout"); do if ss -ltn | grep -q ":\${port} "; then return 0 fi sleep 1 done if [[ -n "\$log_container" ]]; then docker logs --tail 200 "\$log_container" >&2 || true fi echo "Timed out waiting for port: \$port" >&2 return 1 } clamp() { local value="\$1" local min="\$2" local max="\$3" if (( value < min )); then echo "\$min" elif (( value > max )); then echo "\$max" else echo "\$value" fi } derive_resource_tuning() { local mem_kb mem_kb="$(awk '/MemTotal:/ {print $2}' /proc/meminfo 2>/dev/null || true)" if [[ -z "\$mem_kb" || ! "\$mem_kb" =~ ^[0-9]+$ ]]; then mem_kb=4194304 fi HOST_MEM_MB=$((mem_kb / 1024)) HOST_CPU_CORES=$(nproc 2>/dev/null || echo 2) local computed_pg_max_connections=$((HOST_CPU_CORES * 50)) local computed_pg_shared_buffers_mb=$((HOST_MEM_MB / 4)) local computed_pg_effective_cache_size_mb=$((HOST_MEM_MB * 3 / 4)) local computed_pg_maintenance_work_mem_mb=$((HOST_MEM_MB / 16)) local computed_pg_max_wal_size_gb=$((HOST_MEM_MB / 8192)) computed_pg_max_connections=$(clamp "\$computed_pg_max_connections" 200 1000) computed_pg_shared_buffers_mb=$(clamp "\$computed_pg_shared_buffers_mb" 512 32768) computed_pg_effective_cache_size_mb=$(clamp "\$computed_pg_effective_cache_size_mb" 1024 98304) computed_pg_maintenance_work_mem_mb=$(clamp "\$computed_pg_maintenance_work_mem_mb" 256 2048) computed_pg_max_wal_size_gb=$(clamp "\$computed_pg_max_wal_size_gb" 4 64) local computed_pg_min_wal_size_gb=$((computed_pg_max_wal_size_gb / 4)) computed_pg_min_wal_size_gb=$(clamp "\$computed_pg_min_wal_size_gb" 1 16) local computed_pg_work_mem_mb=$(((HOST_MEM_MB - computed_pg_shared_buffers_mb) / (computed_pg_max_connections * 3))) computed_pg_work_mem_mb=$(clamp "\$computed_pg_work_mem_mb" 4 128) local computed_parrhesia_pool_size=$((HOST_CPU_CORES * 8)) computed_parrhesia_pool_size=$(clamp "\$computed_parrhesia_pool_size" 20 200) local computed_nostream_db_min_pool_size=$((HOST_CPU_CORES * 4)) computed_nostream_db_min_pool_size=$(clamp "\$computed_nostream_db_min_pool_size" 16 128) local computed_nostream_db_max_pool_size=$((HOST_CPU_CORES * 16)) computed_nostream_db_max_pool_size=$(clamp "\$computed_nostream_db_max_pool_size" 64 512) if (( computed_nostream_db_max_pool_size < computed_nostream_db_min_pool_size )); then computed_nostream_db_max_pool_size="\$computed_nostream_db_min_pool_size" fi local computed_redis_maxmemory_mb=$((HOST_MEM_MB / 3)) computed_redis_maxmemory_mb=$(clamp "\$computed_redis_maxmemory_mb" 256 65536) PG_MAX_CONNECTIONS="\${PG_MAX_CONNECTIONS:-\$computed_pg_max_connections}" PG_SHARED_BUFFERS_MB="\${PG_SHARED_BUFFERS_MB:-\$computed_pg_shared_buffers_mb}" PG_EFFECTIVE_CACHE_SIZE_MB="\${PG_EFFECTIVE_CACHE_SIZE_MB:-\$computed_pg_effective_cache_size_mb}" PG_MAINTENANCE_WORK_MEM_MB="\${PG_MAINTENANCE_WORK_MEM_MB:-\$computed_pg_maintenance_work_mem_mb}" PG_WORK_MEM_MB="\${PG_WORK_MEM_MB:-\$computed_pg_work_mem_mb}" PG_MIN_WAL_SIZE_GB="\${PG_MIN_WAL_SIZE_GB:-\$computed_pg_min_wal_size_gb}" PG_MAX_WAL_SIZE_GB="\${PG_MAX_WAL_SIZE_GB:-\$computed_pg_max_wal_size_gb}" PARRHESIA_POOL_SIZE="\${PARRHESIA_POOL_SIZE:-\$computed_parrhesia_pool_size}" NOSTREAM_DB_MIN_POOL_SIZE="\${NOSTREAM_DB_MIN_POOL_SIZE:-\$computed_nostream_db_min_pool_size}" NOSTREAM_DB_MAX_POOL_SIZE="\${NOSTREAM_DB_MAX_POOL_SIZE:-\$computed_nostream_db_max_pool_size}" REDIS_MAXMEMORY_MB="\${REDIS_MAXMEMORY_MB:-\$computed_redis_maxmemory_mb}" PG_TUNING_ARGS=( -c max_connections="\$PG_MAX_CONNECTIONS" -c shared_buffers="\${PG_SHARED_BUFFERS_MB}MB" -c effective_cache_size="\${PG_EFFECTIVE_CACHE_SIZE_MB}MB" -c maintenance_work_mem="\${PG_MAINTENANCE_WORK_MEM_MB}MB" -c work_mem="\${PG_WORK_MEM_MB}MB" -c min_wal_size="\${PG_MIN_WAL_SIZE_GB}GB" -c max_wal_size="\${PG_MAX_WAL_SIZE_GB}GB" -c checkpoint_completion_target=0.9 -c wal_compression=on ) echo "[server] resource profile: mem_mb=\$HOST_MEM_MB cpu_cores=\$HOST_CPU_CORES" echo "[server] postgres tuning: max_connections=\$PG_MAX_CONNECTIONS shared_buffers=\${PG_SHARED_BUFFERS_MB}MB effective_cache_size=\${PG_EFFECTIVE_CACHE_SIZE_MB}MB work_mem=\${PG_WORK_MEM_MB}MB" echo "[server] app tuning: parrhesia_pool=\$PARRHESIA_POOL_SIZE nostream_db_pool=\${NOSTREAM_DB_MIN_POOL_SIZE}-\${NOSTREAM_DB_MAX_POOL_SIZE} redis_maxmemory=\${REDIS_MAXMEMORY_MB}MB" } common_parrhesia_env=() common_parrhesia_env+=( -e PARRHESIA_ENABLE_EXPIRATION_WORKER=0 ) common_parrhesia_env+=( -e PARRHESIA_ENABLE_PARTITION_RETENTION_WORKER=0 ) common_parrhesia_env+=( -e PARRHESIA_PUBLIC_MAX_CONNECTIONS=infinity ) common_parrhesia_env+=( -e PARRHESIA_LIMITS_MAX_FRAME_BYTES=16777216 ) common_parrhesia_env+=( -e PARRHESIA_LIMITS_MAX_EVENT_BYTES=4194304 ) common_parrhesia_env+=( -e PARRHESIA_LIMITS_MAX_FILTERS_PER_REQ=1024 ) common_parrhesia_env+=( -e PARRHESIA_LIMITS_MAX_FILTER_LIMIT=100000 ) common_parrhesia_env+=( -e PARRHESIA_LIMITS_MAX_TAGS_PER_EVENT=4096 ) common_parrhesia_env+=( -e PARRHESIA_LIMITS_MAX_TAG_VALUES_PER_FILTER=4096 ) common_parrhesia_env+=( -e PARRHESIA_LIMITS_IP_MAX_EVENT_INGEST_PER_WINDOW=1000000 ) common_parrhesia_env+=( -e PARRHESIA_LIMITS_RELAY_MAX_EVENT_INGEST_PER_WINDOW=1000000 ) common_parrhesia_env+=( -e PARRHESIA_LIMITS_MAX_SUBSCRIPTIONS_PER_CONNECTION=4096 ) common_parrhesia_env+=( -e PARRHESIA_LIMITS_MAX_EVENT_FUTURE_SKEW_SECONDS=31536000 ) common_parrhesia_env+=( -e PARRHESIA_LIMITS_MAX_EVENT_INGEST_PER_WINDOW=1000000 ) common_parrhesia_env+=( -e PARRHESIA_LIMITS_AUTH_MAX_AGE_SECONDS=31536000 ) common_parrhesia_env+=( -e PARRHESIA_LIMITS_MAX_OUTBOUND_QUEUE=65536 ) common_parrhesia_env+=( -e PARRHESIA_LIMITS_OUTBOUND_DRAIN_BATCH_SIZE=4096 ) common_parrhesia_env+=( -e PARRHESIA_LIMITS_MAX_NEGENTROPY_PAYLOAD_BYTES=1048576 ) common_parrhesia_env+=( -e PARRHESIA_LIMITS_MAX_NEGENTROPY_SESSIONS_PER_CONNECTION=256 ) common_parrhesia_env+=( -e PARRHESIA_LIMITS_MAX_NEGENTROPY_TOTAL_SESSIONS=100000 ) common_parrhesia_env+=( -e PARRHESIA_LIMITS_MAX_NEGENTROPY_ITEMS_PER_SESSION=1000000 ) cmd="\${1:-}" if [[ -z "\$cmd" ]]; then echo "usage: cloud-bench-server.sh " >&2 exit 1 fi derive_resource_tuning case "\$cmd" in start-parrhesia-pg) cleanup_containers docker network create benchnet >/dev/null 2>&1 || true docker run -d --name pg --network benchnet \ -e POSTGRES_DB=parrhesia \ -e POSTGRES_USER=parrhesia \ -e POSTGRES_PASSWORD=parrhesia \ "\$POSTGRES_IMAGE" \ "\${PG_TUNING_ARGS[@]}" >/dev/null wait_pg 90 docker run --rm --network benchnet \ -e DATABASE_URL=ecto://parrhesia:parrhesia@pg:5432/parrhesia \ "\$PARRHESIA_IMAGE" \ eval "Parrhesia.Release.migrate()" docker run -d --name parrhesia --network benchnet \ -p 4413:4413 \ -e DATABASE_URL=ecto://parrhesia:parrhesia@pg:5432/parrhesia \ -e POOL_SIZE="\$PARRHESIA_POOL_SIZE" \ "\${common_parrhesia_env[@]}" \ "\$PARRHESIA_IMAGE" >/dev/null wait_http "http://127.0.0.1:4413/health" 120 parrhesia ;; start-parrhesia-memory) cleanup_containers docker run -d --name parrhesia \ -p 4413:4413 \ -e PARRHESIA_STORAGE_BACKEND=memory \ -e PARRHESIA_MODERATION_CACHE_ENABLED=0 \ "\${common_parrhesia_env[@]}" \ "\$PARRHESIA_IMAGE" >/dev/null wait_http "http://127.0.0.1:4413/health" 120 parrhesia ;; start-strfry) cleanup_containers rm -rf /root/strfry-data mkdir -p /root/strfry-data/strfry cat > /root/strfry.conf <<'EOF' # generated by cloud bench script db = "/data/strfry" relay { bind = "0.0.0.0" port = 7777 nofiles = 131072 } EOF docker run -d --name strfry \ -p 7777:7777 \ -v /root/strfry.conf:/etc/strfry.conf:ro \ -v /root/strfry-data:/data \ "\$STRFRY_IMAGE" \ --config /etc/strfry.conf relay >/dev/null wait_port 7777 60 strfry ;; start-nostr-rs-relay) cleanup_containers cat > /root/nostr-rs.toml <<'EOF' [database] engine = "sqlite" [network] ip = "0.0.0.0" port = 8080 EOF docker run -d --name nostr-rs \ -p 8080:8080 \ -v /root/nostr-rs.toml:/usr/src/app/config.toml:ro \ "\$NOSTR_RS_IMAGE" >/dev/null wait_http "http://127.0.0.1:8080/" 60 nostr-rs ;; start-nostream) cleanup_containers ensure_benchnet if [[ ! -d /root/nostream-src/.git ]]; then git clone --depth 1 "\$NOSTREAM_REPO" /root/nostream-src >/dev/null fi git -C /root/nostream-src fetch --depth 1 origin "\$NOSTREAM_REF" >/dev/null 2>&1 || true if git -C /root/nostream-src rev-parse --verify FETCH_HEAD >/dev/null 2>&1; then git -C /root/nostream-src checkout --force FETCH_HEAD >/dev/null else git -C /root/nostream-src checkout --force "\$NOSTREAM_REF" >/dev/null fi nostream_ref_marker=/root/nostream-src/.bench_ref should_build_nostream=0 if ! docker image inspect nostream:bench >/dev/null 2>&1; then should_build_nostream=1 elif [[ ! -f "\$nostream_ref_marker" ]] || [[ "$(cat "\$nostream_ref_marker")" != "\$NOSTREAM_REF" ]]; then should_build_nostream=1 fi if [[ "\$should_build_nostream" == "1" ]]; then docker build -t nostream:bench /root/nostream-src >/dev/null printf '%s\n' "\$NOSTREAM_REF" > "\$nostream_ref_marker" fi mkdir -p /root/nostream-config if [[ ! -f /root/nostream-config/settings.yaml ]]; then cp /root/nostream-src/resources/default-settings.yaml /root/nostream-config/settings.yaml fi docker run -d --name nostream-db --network benchnet \ -e POSTGRES_DB=nostr_ts_relay \ -e POSTGRES_USER=nostr_ts_relay \ -e POSTGRES_PASSWORD=nostr_ts_relay \ "\$POSTGRES_IMAGE" \ "\${PG_TUNING_ARGS[@]}" >/dev/null wait_nostream_pg 90 docker run -d --name nostream-cache --network benchnet \ redis:7.0.5-alpine3.16 \ redis-server \ --loglevel warning \ --requirepass nostr_ts_relay \ --maxmemory "\${REDIS_MAXMEMORY_MB}mb" \ --maxmemory-policy noeviction >/dev/null wait_nostream_redis 60 docker run --rm --network benchnet \ -e DB_HOST=nostream-db \ -e DB_PORT=5432 \ -e DB_USER=nostr_ts_relay \ -e DB_PASSWORD=nostr_ts_relay \ -e DB_NAME=nostr_ts_relay \ -v /root/nostream-src/migrations:/code/migrations:ro \ -v /root/nostream-src/knexfile.js:/code/knexfile.js:ro \ node:18-alpine3.16 \ sh -lc 'cd /code && npm install --no-save --quiet knex@2.4.0 pg@8.8.0 && npx knex migrate:latest' docker run -d --name nostream --network benchnet \ -p 8008:8008 \ -e SECRET="\$NOSTREAM_SECRET" \ -e RELAY_PORT=8008 \ -e NOSTR_CONFIG_DIR=/home/node/.nostr \ -e DB_HOST=nostream-db \ -e DB_PORT=5432 \ -e DB_USER=nostr_ts_relay \ -e DB_PASSWORD=nostr_ts_relay \ -e DB_NAME=nostr_ts_relay \ -e DB_MIN_POOL_SIZE="\$NOSTREAM_DB_MIN_POOL_SIZE" \ -e DB_MAX_POOL_SIZE="\$NOSTREAM_DB_MAX_POOL_SIZE" \ -e DB_ACQUIRE_CONNECTION_TIMEOUT=60000 \ -e REDIS_HOST=nostream-cache \ -e REDIS_PORT=6379 \ -e REDIS_USER=default \ -e REDIS_PASSWORD=nostr_ts_relay \ -v /root/nostream-config:/home/node/.nostr:ro \ nostream:bench >/dev/null wait_port 8008 180 nostream ;; start-haven) cleanup_containers rm -rf /root/haven-bench mkdir -p /root/haven-bench/db mkdir -p /root/haven-bench/blossom mkdir -p /root/haven-bench/templates/static if [[ ! -f /root/haven-bench/templates/index.html ]]; then cat > /root/haven-bench/templates/index.html <<'EOF' Haven

Haven

EOF fi printf '[]\n' > /root/haven-bench/relays_import.json printf '[]\n' > /root/haven-bench/relays_blastr.json printf '[]\n' > /root/haven-bench/blacklisted_npubs.json printf '[]\n' > /root/haven-bench/whitelisted_npubs.json cat > /root/haven-bench/haven.env </dev/null wait_port 3355 120 haven ;; cleanup) cleanup_containers ;; *) echo "unknown command: \$cmd" >&2 exit 1 ;; esac `; } function makeClientScript() { return `#!/usr/bin/env bash set -euo pipefail relay_url="\${1:-}" if [[ -z "\$relay_url" ]]; then echo "usage: cloud-bench-client.sh " >&2 exit 1 fi bench_bin="\${NOSTR_BENCH_BIN:-/usr/local/bin/nostr-bench}" echo "==> nostr-bench connect \${relay_url}" "\$bench_bin" connect --json \ -c "\${PARRHESIA_BENCH_CONNECT_COUNT:-200}" \ -r "\${PARRHESIA_BENCH_CONNECT_RATE:-100}" \ -k "\${PARRHESIA_BENCH_KEEPALIVE_SECONDS:-5}" \ "\${relay_url}" echo echo "==> nostr-bench echo \${relay_url}" "\$bench_bin" echo --json \ -c "\${PARRHESIA_BENCH_ECHO_COUNT:-100}" \ -r "\${PARRHESIA_BENCH_ECHO_RATE:-50}" \ -k "\${PARRHESIA_BENCH_KEEPALIVE_SECONDS:-5}" \ --size "\${PARRHESIA_BENCH_ECHO_SIZE:-512}" \ "\${relay_url}" echo echo "==> nostr-bench event \${relay_url}" "\$bench_bin" event --json \ -c "\${PARRHESIA_BENCH_EVENT_COUNT:-100}" \ -r "\${PARRHESIA_BENCH_EVENT_RATE:-50}" \ -k "\${PARRHESIA_BENCH_KEEPALIVE_SECONDS:-5}" \ "\${relay_url}" echo echo "==> nostr-bench req \${relay_url}" "\$bench_bin" req --json \ -c "\${PARRHESIA_BENCH_REQ_COUNT:-100}" \ -r "\${PARRHESIA_BENCH_REQ_RATE:-50}" \ -k "\${PARRHESIA_BENCH_KEEPALIVE_SECONDS:-5}" \ --limit "\${PARRHESIA_BENCH_REQ_LIMIT:-10}" \ "\${relay_url}" `; } function parseNostrBenchSections(output) { const lines = output.split(/\r?\n/); let section = null; const parsed = {}; for (const lineRaw of lines) { const line = lineRaw.trim(); const header = line.match(/^==>\s+nostr-bench\s+(connect|echo|event|req)\s+/); if (header) { section = header[1]; continue; } if (!line.startsWith("{")) continue; try { const json = JSON.parse(line); if (section) { parsed[section] = json; } } catch { // ignore noisy non-json lines } } return parsed; } function mean(values) { const valid = values.filter((v) => Number.isFinite(v)); if (valid.length === 0) return NaN; return valid.reduce((a, b) => a + b, 0) / valid.length; } function metricFromSections(sections) { const connect = sections?.connect?.connect_stats?.success_time || {}; const echo = sections?.echo || {}; const event = sections?.event || {}; const req = sections?.req || {}; return { connect_avg_ms: Number(connect.avg ?? NaN), connect_max_ms: Number(connect.max ?? NaN), echo_tps: Number(echo.tps ?? NaN), echo_mibs: Number(echo.size ?? NaN), event_tps: Number(event.tps ?? NaN), event_mibs: Number(event.size ?? NaN), req_tps: Number(req.tps ?? NaN), req_mibs: Number(req.size ?? NaN), }; } function summariseServersFromResults(results) { const byServer = new Map(); for (const runEntry of results) { const serverName = runEntry.target; if (!byServer.has(serverName)) { byServer.set(serverName, []); } const samples = byServer.get(serverName); for (const clientResult of runEntry.clients || []) { if (clientResult.status !== "ok") continue; samples.push(metricFromSections(clientResult.sections || {})); } } const metricKeys = [ "connect_avg_ms", "connect_max_ms", "echo_tps", "echo_mibs", "event_tps", "event_mibs", "req_tps", "req_mibs", ]; const out = {}; for (const [serverName, samples] of byServer.entries()) { const summary = {}; for (const key of metricKeys) { summary[key] = mean(samples.map((s) => s[key])); } out[serverName] = summary; } return out; } async function tryCommandStdout(command, args = [], options = {}) { try { const res = await runCommand(command, args, options); return res.stdout.trim(); } catch { return ""; } } async function main() { const opts = parseArgs(process.argv.slice(2)); await ensureLocalPrereqs(opts); const datacenterChoice = await chooseDatacenter(opts); opts.datacenter = datacenterChoice.name; console.log( `[plan] selected datacenter=${opts.datacenter} (${ESTIMATE_WINDOW_LABEL} est gross=${formatEuro(datacenterChoice.estimatedTotal.gross)} net=${formatEuro(datacenterChoice.estimatedTotal.net)})`, ); const timestamp = new Date().toISOString(); const runId = `cloudbench-${timestamp.replace(/[:.]/g, "-")}-${Math.floor(Math.random() * 100000)}`; const detectedGitTag = (await tryCommandStdout("git", ["describe", "--tags", "--abbrev=0"], { cwd: ROOT_DIR, })) || "untagged"; const detectedGitCommit = await tryCommandStdout("git", ["rev-parse", "--short=7", "HEAD"], { cwd: ROOT_DIR, }); const tmpDir = fs.mkdtempSync(path.join(os.tmpdir(), "parrhesia-cloud-bench-")); const localServerScriptPath = path.join(tmpDir, "cloud-bench-server.sh"); const localClientScriptPath = path.join(tmpDir, "cloud-bench-client.sh"); fs.writeFileSync(localServerScriptPath, makeServerScript(), "utf8"); fs.writeFileSync(localClientScriptPath, makeClientScript(), "utf8"); fs.chmodSync(localServerScriptPath, 0o755); fs.chmodSync(localClientScriptPath, 0o755); const artifactsRoot = path.resolve(ROOT_DIR, opts.artifactsDir); const artifactsDir = path.join(artifactsRoot, runId); fs.mkdirSync(artifactsDir, { recursive: true }); const historyFile = path.resolve(ROOT_DIR, opts.historyFile); fs.mkdirSync(path.dirname(historyFile), { recursive: true }); console.log(`[run] ${runId}`); console.log("[phase] local preparation"); const nostrBench = await buildNostrBenchBinary(tmpDir); const needsParrhesia = opts.targets.includes("parrhesia-pg") || opts.targets.includes("parrhesia-memory"); const parrhesiaSource = needsParrhesia ? await buildParrhesiaArchiveIfNeeded(opts, tmpDir) : { mode: "not-needed", image: opts.parrhesiaImage, archivePath: null, gitRef: null, gitCommit: null, }; const keyName = `${runId}-ssh`; const keyPath = path.join(tmpDir, "id_ed25519"); const keyPubPath = `${keyPath}.pub`; const createdServers = []; let sshKeyCreated = false; const cleanup = async () => { if (opts.keep) { console.log("[cleanup] --keep set, skipping cloud cleanup"); return; } if (createdServers.length > 0) { console.log("[cleanup] deleting servers..."); await Promise.all( createdServers.map((name) => runCommand("hcloud", ["server", "delete", name]) .then(() => { console.log(`[cleanup] deleted server: ${name}`); }) .catch((error) => { console.warn(`[cleanup] failed to delete server ${name}: ${error.message || error}`); }), ), ); } if (sshKeyCreated) { console.log("[cleanup] deleting ssh key..."); await runCommand("hcloud", ["ssh-key", "delete", keyName]) .then(() => { console.log(`[cleanup] deleted ssh key: ${keyName}`); }) .catch((error) => { console.warn(`[cleanup] failed to delete ssh key ${keyName}: ${error.message || error}`); }); } }; try { console.log("[phase] create ssh credentials"); await runCommand("ssh-keygen", ["-t", "ed25519", "-N", "", "-f", keyPath, "-C", keyName], { stdio: "inherit", }); await runCommand("hcloud", ["ssh-key", "create", "--name", keyName, "--public-key-from-file", keyPubPath], { stdio: "inherit", }); sshKeyCreated = true; console.log("[phase] create cloud servers in parallel"); const serverName = `${runId}-server`; const clientNames = Array.from({ length: opts.clients }, (_, i) => `${runId}-client-${i + 1}`); const createOne = (name, role, type) => runCommand( "hcloud", [ "server", "create", "--name", name, "--type", type, "--datacenter", opts.datacenter, "--image", opts.imageBase, "--ssh-key", keyName, "--label", `bench_run=${runId}`, "--label", `bench_role=${role}`, "-o", "json", ], { stdio: "pipe" }, ).then((res) => JSON.parse(res.stdout)); const [serverCreate, ...clientCreates] = await Promise.all([ createOne(serverName, "server", opts.serverType), ...clientNames.map((name) => createOne(name, "client", opts.clientType)), ]); createdServers.push(serverName, ...clientNames); const serverIp = serverCreate.server.public_net.ipv4.ip; const clientInfos = clientCreates.map((c) => ({ name: c.server.name, id: c.server.id, ip: c.server.public_net.ipv4.ip, })); console.log("[phase] wait for SSH"); await Promise.all([ waitForSsh(serverIp, keyPath), ...clientInfos.map((client) => waitForSsh(client.ip, keyPath)), ]); console.log("[phase] install runtime dependencies on nodes"); const installCmd = [ "set -euo pipefail", "export DEBIAN_FRONTEND=noninteractive", "apt-get update -y >/dev/null", "apt-get install -y docker.io curl jq git >/dev/null", "systemctl enable --now docker >/dev/null", "docker --version", ].join("; "); await Promise.all([ sshExec(serverIp, keyPath, installCmd, { stdio: "inherit" }), ...clientInfos.map((client) => sshExec(client.ip, keyPath, installCmd, { stdio: "inherit" })), ]); console.log("[phase] upload control scripts + nostr-bench binary"); await scpToHost(serverIp, keyPath, localServerScriptPath, "/root/cloud-bench-server.sh"); await sshExec(serverIp, keyPath, "chmod +x /root/cloud-bench-server.sh"); for (const client of clientInfos) { await scpToHost(client.ip, keyPath, localClientScriptPath, "/root/cloud-bench-client.sh"); await scpToHost(client.ip, keyPath, nostrBench.path, "/usr/local/bin/nostr-bench"); await sshExec(client.ip, keyPath, "chmod +x /root/cloud-bench-client.sh /usr/local/bin/nostr-bench"); } console.log("[phase] server image setup"); let parrhesiaImageOnServer = parrhesiaSource.image; if (needsParrhesia) { if (parrhesiaSource.archivePath) { console.log("[server] uploading parrhesia docker archive..."); await scpToHost(serverIp, keyPath, parrhesiaSource.archivePath, "/root/parrhesia.tar.gz"); await sshExec(serverIp, keyPath, "docker load -i /root/parrhesia.tar.gz", { stdio: "inherit" }); parrhesiaImageOnServer = "parrhesia:latest"; } else { console.log(`[server] pulling parrhesia image ${parrhesiaImageOnServer}...`); await sshExec(serverIp, keyPath, `docker pull ${shellEscape(parrhesiaImageOnServer)}`, { stdio: "inherit", }); } } console.log("[server] pre-pulling comparison images..."); const comparisonImages = new Set(); if (opts.targets.includes("parrhesia-pg") || opts.targets.includes("nostream")) { comparisonImages.add(opts.postgresImage); } if (opts.targets.includes("strfry")) { comparisonImages.add(opts.strfryImage); } if (opts.targets.includes("nostr-rs-relay")) { comparisonImages.add(opts.nostrRsImage); } if (opts.targets.includes("nostream")) { comparisonImages.add("redis:7.0.5-alpine3.16"); comparisonImages.add("node:18-alpine3.16"); } if (opts.targets.includes("haven")) { comparisonImages.add(opts.havenImage); } for (const image of comparisonImages) { await sshExec(serverIp, keyPath, `docker pull ${shellEscape(image)}`, { stdio: "inherit" }); } const serverDescribe = JSON.parse( (await runCommand("hcloud", ["server", "describe", serverName, "-o", "json"])).stdout, ); const clientDescribes = await Promise.all( clientInfos.map(async (c) => JSON.parse((await runCommand("hcloud", ["server", "describe", c.name, "-o", "json"])).stdout), ), ); const versions = { nostr_bench: ( await sshExec(clientInfos[0].ip, keyPath, "/usr/local/bin/nostr-bench --version") ).stdout.trim(), }; const startCommands = { "parrhesia-pg": "start-parrhesia-pg", "parrhesia-memory": "start-parrhesia-memory", strfry: "start-strfry", "nostr-rs-relay": "start-nostr-rs-relay", nostream: "start-nostream", haven: "start-haven", }; const relayUrls = { "parrhesia-pg": `ws://${serverIp}:4413/relay`, "parrhesia-memory": `ws://${serverIp}:4413/relay`, strfry: `ws://${serverIp}:7777`, "nostr-rs-relay": `ws://${serverIp}:8080`, nostream: `ws://${serverIp}:8008`, haven: `ws://${serverIp}:3355`, }; const results = []; const targetOrderPerRun = []; console.log("[phase] benchmark execution"); for (let runIndex = 1; runIndex <= opts.runs; runIndex += 1) { const runTargets = shuffled(opts.targets); targetOrderPerRun.push({ run: runIndex, targets: runTargets }); console.log(`[bench] run ${runIndex}/${opts.runs} target-order=${runTargets.join(",")}`); for (const target of runTargets) { console.log(`[bench] run ${runIndex}/${opts.runs} target=${target}`); const serverEnvPrefix = [ `PARRHESIA_IMAGE=${shellEscape(parrhesiaImageOnServer || "parrhesia:latest")}`, `POSTGRES_IMAGE=${shellEscape(opts.postgresImage)}`, `STRFRY_IMAGE=${shellEscape(opts.strfryImage)}`, `NOSTR_RS_IMAGE=${shellEscape(opts.nostrRsImage)}`, `NOSTREAM_REPO=${shellEscape(opts.nostreamRepo)}`, `NOSTREAM_REF=${shellEscape(opts.nostreamRef)}`, `HAVEN_IMAGE=${shellEscape(opts.havenImage)}`, `HAVEN_RELAY_URL=${shellEscape(`${serverIp}:3355`)}`, ].join(" "); try { await sshExec(serverIp, keyPath, `${serverEnvPrefix} /root/cloud-bench-server.sh ${shellEscape(startCommands[target])}`); } catch (error) { console.error(`[bench] target startup failed target=${target} run=${runIndex}`); if (error?.stdout?.trim()) { console.error(`[bench] server startup stdout:\n${error.stdout.trim()}`); } if (error?.stderr?.trim()) { console.error(`[bench] server startup stderr:\n${error.stderr.trim()}`); } throw error; } const relayUrl = relayUrls[target]; const runTargetDir = path.join(artifactsDir, target, `run-${runIndex}`); fs.mkdirSync(runTargetDir, { recursive: true }); const benchEnvPrefix = [ `PARRHESIA_BENCH_CONNECT_COUNT=${opts.bench.connectCount}`, `PARRHESIA_BENCH_CONNECT_RATE=${opts.bench.connectRate}`, `PARRHESIA_BENCH_ECHO_COUNT=${opts.bench.echoCount}`, `PARRHESIA_BENCH_ECHO_RATE=${opts.bench.echoRate}`, `PARRHESIA_BENCH_ECHO_SIZE=${opts.bench.echoSize}`, `PARRHESIA_BENCH_EVENT_COUNT=${opts.bench.eventCount}`, `PARRHESIA_BENCH_EVENT_RATE=${opts.bench.eventRate}`, `PARRHESIA_BENCH_REQ_COUNT=${opts.bench.reqCount}`, `PARRHESIA_BENCH_REQ_RATE=${opts.bench.reqRate}`, `PARRHESIA_BENCH_REQ_LIMIT=${opts.bench.reqLimit}`, `PARRHESIA_BENCH_KEEPALIVE_SECONDS=${opts.bench.keepaliveSeconds}`, ].join(" "); const clientRunResults = await Promise.all( clientInfos.map(async (client) => { const startedAt = new Date().toISOString(); const startMs = Date.now(); const stdoutPath = path.join(runTargetDir, `${client.name}.stdout.log`); const stderrPath = path.join(runTargetDir, `${client.name}.stderr.log`); try { const benchRes = await sshExec( client.ip, keyPath, `${benchEnvPrefix} /root/cloud-bench-client.sh ${shellEscape(relayUrl)}`, ); fs.writeFileSync(stdoutPath, benchRes.stdout, "utf8"); fs.writeFileSync(stderrPath, benchRes.stderr, "utf8"); return { client_name: client.name, client_ip: client.ip, status: "ok", started_at: startedAt, finished_at: new Date().toISOString(), duration_ms: Date.now() - startMs, stdout_path: path.relative(ROOT_DIR, stdoutPath), stderr_path: path.relative(ROOT_DIR, stderrPath), sections: parseNostrBenchSections(benchRes.stdout), }; } catch (error) { const out = error.stdout || ""; const err = error.stderr || String(error); fs.writeFileSync(stdoutPath, out, "utf8"); fs.writeFileSync(stderrPath, err, "utf8"); return { client_name: client.name, client_ip: client.ip, status: "error", started_at: startedAt, finished_at: new Date().toISOString(), duration_ms: Date.now() - startMs, stdout_path: path.relative(ROOT_DIR, stdoutPath), stderr_path: path.relative(ROOT_DIR, stderrPath), error: String(error.message || error), sections: parseNostrBenchSections(out), }; } }), ); results.push({ run: runIndex, target, relay_url: relayUrl, clients: clientRunResults, }); const failed = clientRunResults.filter((r) => r.status !== "ok"); if (failed.length > 0) { throw new Error( `Client benchmark failed for target=${target}, run=${runIndex}: ${failed .map((f) => f.client_name) .join(", ")}`, ); } } } console.log("[phase] final server cleanup (containers)"); await sshExec(serverIp, keyPath, "/root/cloud-bench-server.sh cleanup"); const gitTag = detectedGitTag || "untagged"; const gitCommit = parrhesiaSource.gitCommit || detectedGitCommit || "unknown"; const servers = summariseServersFromResults(results); const entry = { schema_version: 2, timestamp, run_id: runId, machine_id: os.hostname(), git_tag: gitTag, git_commit: gitCommit, runs: opts.runs, source: { kind: "cloud", mode: parrhesiaSource.mode, parrhesia_image: parrhesiaImageOnServer, git_ref: parrhesiaSource.gitRef, git_tag: gitTag, git_commit: gitCommit, }, infra: { provider: "hcloud", datacenter: opts.datacenter, datacenter_location: datacenterChoice.location, server_type: opts.serverType, client_type: opts.clientType, image_base: opts.imageBase, clients: opts.clients, estimated_price_window_eur: { minutes: ESTIMATE_WINDOW_MINUTES, gross: datacenterChoice.estimatedTotal.gross, net: datacenterChoice.estimatedTotal.net, }, }, bench: { runs: opts.runs, targets: opts.targets, target_order_per_run: targetOrderPerRun, ...opts.bench, }, versions, servers, artifacts_dir: path.relative(ROOT_DIR, artifactsDir), hcloud: { server: serverDescribe, clients: clientDescribes, }, results, }; fs.appendFileSync(historyFile, `${JSON.stringify(entry)}\n`, "utf8"); console.log("[done] benchmark complete"); console.log(`[done] history appended: ${path.relative(ROOT_DIR, historyFile)}`); console.log(`[done] artifacts: ${path.relative(ROOT_DIR, artifactsDir)}`); if (opts.keep) { console.log(`[done] resources kept. server=${serverName} clients=${clientNames.join(",")}`); console.log(`[done] ssh key kept: ${keyName}`); } } finally { await cleanup(); } } main().catch((error) => { console.error("[error]", error?.message || error); if (error?.stderr) { console.error(error.stderr); } process.exit(1); });