92 Commits

Author SHA1 Message Date
a74106d665 chore: Bump version to 0.8.0
Some checks failed
CI / Test (OTP 28.4 / Elixir 1.19.4 + E2E) (push) Failing after 0s
Release / Release Gate (push) Failing after 0s
Release / Build and publish image (push) Has been skipped
CI / Test (OTP 27.2 / Elixir 1.18.2) (push) Failing after 0s
2026-03-26 01:23:23 +01:00
d34b398eed Merge remote-tracking branch 'public/master' (GH actions, test stability) 2026-03-26 00:49:25 +01:00
b402d95e47 feat: add sync relay guard fanout gating and env config
Some checks failed
CI / Test (OTP 27.2 / Elixir 1.18.2) (push) Failing after 0s
CI / Test (OTP 28.4 / Elixir 1.19.4 + E2E) (push) Failing after 0s
2026-03-26 00:36:00 +01:00
8309a89ba7 perf: tune cloud seeding and lower hot fill target
Some checks failed
CI / Test (OTP 27.2 / Elixir 1.18.2) (push) Failing after 0s
CI / Test (OTP 28.4 / Elixir 1.19.4 + E2E) (push) Failing after 0s
2026-03-20 21:33:17 +01:00
9ed1d80b7f bench: simplify cloud bench flow and align phased naming
Some checks failed
CI / Test (OTP 27.2 / Elixir 1.18.2) (push) Failing after 0s
CI / Test (OTP 28.4 / Elixir 1.19.4 + E2E) (push) Failing after 0s
2026-03-20 20:56:32 +01:00
4bd8663126 bench/fix: prefix bracketed cloud bench logs with T+ timestamps
Some checks failed
CI / Test (OTP 27.2 / Elixir 1.18.2) (push) Failing after 0s
CI / Test (OTP 28.4 / Elixir 1.19.4 + E2E) (push) Failing after 0s
2026-03-20 18:10:29 +01:00
f7ff3a4bd7 bench: use nostr-bench seed mode and expose relay json counters 2026-03-20 18:00:14 +01:00
8f22eb2097 build: pin nostr-bench submodule in nix and cloud bench pipeline 2026-03-20 17:43:31 +01:00
6b59fa6328 build: nostr-bench submodule
Some checks failed
CI / Test (OTP 27.2 / Elixir 1.18.2) (push) Failing after 0s
CI / Test (OTP 28.4 / Elixir 1.19.4 + E2E) (push) Failing after 0s
2026-03-20 14:33:51 +01:00
070464f2eb bench: Cloud seeding
Some checks failed
CI / Test (OTP 27.2 / Elixir 1.18.2) (push) Failing after 0s
CI / Test (OTP 28.4 / Elixir 1.19.4 + E2E) (push) Failing after 0s
2026-03-20 14:19:58 +01:00
bbcaa00f0b chore: Bump version to 0.7.0, 1st beta
Some checks failed
CI / Test (OTP 27.2 / Elixir 1.18.2) (push) Failing after 0s
CI / Test (OTP 28.4 / Elixir 1.19.4 + E2E) (push) Failing after 0s
Release / Release Gate (push) Failing after 0s
Release / Build and publish image (push) Has been skipped
2026-03-20 04:21:51 +01:00
28c47ab435 test/build: Stability, compatibility 2026-03-20 04:15:50 +01:00
6bd0143de4 chore: Bump version to 0.7.0, 1st beta
Some checks failed
CI / Test (OTP 27.2 / Elixir 1.18.2) (push) Failing after 0s
CI / Test (OTP 28.4 / Elixir 1.19.4 + E2E) (push) Failing after 0s
Release / Release Gate (push) Failing after 0s
Release / Build and publish image (push) Has been skipped
2026-03-20 03:44:24 +01:00
8b5231fa0d test: expand protocol property-based coverage 2026-03-20 02:32:41 +01:00
a15856bdac websocket: add configurable ping/pong keepalive 2026-03-20 02:32:34 +01:00
b22fe98ab0 auth: use constant-time NIP-42 challenge comparison 2026-03-20 02:32:30 +01:00
a4ded3c008 docs: Alpha review
Some checks failed
CI / Test (OTP 27.2 / Elixir 1.18.2) (push) Failing after 0s
CI / Test (OTP 28.4 / Elixir 1.19.4 + E2E) (push) Failing after 0s
2026-03-20 02:19:44 +01:00
c446b8596a feat: Official plug API
Some checks failed
CI / Test (OTP 27.2 / Elixir 1.18.2) (push) Failing after 0s
CI / Test (OTP 28.4 / Elixir 1.19.4 + E2E) (push) Failing after 0s
2026-03-20 01:31:57 +01:00
be9d348660 bench: Cloud firewall
Some checks failed
CI / Test (OTP 27.2 / Elixir 1.18.2) (push) Failing after 0s
CI / Test (OTP 28.4 / Elixir 1.19.4 + E2E) (push) Failing after 0s
2026-03-20 01:12:26 +01:00
046f80591b bench: Cloud monitoring 2026-03-19 23:45:57 +01:00
57c2c0b822 bench: Cloud tuning
Some checks failed
CI / Test (OTP 27.2 / Elixir 1.18.2) (push) Failing after 0s
CI / Test (OTP 28.4 / Elixir 1.19.4 + E2E) (push) Failing after 0s
2026-03-19 23:14:46 +01:00
e02bd99a43 bench: Multi-temperature cloud bench
Some checks failed
CI / Test (OTP 27.2 / Elixir 1.18.2) (push) Failing after 0s
CI / Test (OTP 28.4 / Elixir 1.19.4 + E2E) (push) Failing after 0s
2026-03-19 22:14:35 +01:00
c45dbadd78 bench: Server/client tuning
Some checks failed
CI / Test (OTP 27.2 / Elixir 1.18.2) (push) Failing after 0s
CI / Test (OTP 28.4 / Elixir 1.19.4 + E2E) (push) Failing after 0s
2026-03-19 20:53:41 +01:00
f86b1deff8 bench: Smart datacenter selection 2026-03-19 16:04:14 +01:00
64d03f0b2d bench: Add Nostream and HAVEN to cloud test 2026-03-19 15:00:56 +01:00
a410e07425 bench: Nix build static linux/amd64 nostr-bench 2026-03-19 14:36:07 +01:00
07953a7608 test: Fix openssl exec path
Some checks failed
CI / Test (OTP 28.4 / Elixir 1.19.4 + E2E) (push) Failing after 0s
CI / Test (OTP 27.2 / Elixir 1.18.2) (push) Failing after 0s
2026-03-19 13:52:17 +01:00
e7a7460191 dev: E2E and bench runners via justfile 2026-03-19 13:49:12 +01:00
833c85f4ac bench: Cloud benchmark 2026-03-19 13:29:55 +01:00
f0ef42fe3f docs: Bench chart in README 2026-03-19 11:46:12 +01:00
9947635855 More E2E tests 2026-03-19 11:46:12 +01:00
f70d50933d dev: nips submodule, skill 2026-03-19 10:44:15 +01:00
edf139d488 bench: Split scripts 2026-03-19 10:44:15 +01:00
101a506eda Run bench at Git ref helper script 2026-03-19 10:44:15 +01:00
7c0ad28f6e bench: Capture versions 2026-03-19 10:44:15 +01:00
3e5bf462e9 chore: Bump version to 0.6.0, fix tests
Some checks failed
CI / Test (OTP 27.2 / Elixir 1.18.2) (push) Failing after 0s
CI / Test (OTP 28.4 / Elixir 1.19.4 + E2E) (push) Failing after 0s
Release / Release Gate (push) Failing after 0s
Release / Build and publish image (push) Has been skipped
2026-03-18 21:58:08 +01:00
fc3d121599 Benchmark capture and plot
Some checks failed
CI / Test (OTP 27.2 / Elixir 1.18.2) (push) Failing after 0s
CI / Test (OTP 28.4 / Elixir 1.19.4 + Marmot E2E) (push) Failing after 0s
2026-03-18 21:23:23 +01:00
970cee2c0e Document embedded API surface
Some checks failed
CI / Test (OTP 27.2 / Elixir 1.18.2) (push) Failing after 0s
CI / Test (OTP 28.4 / Elixir 1.19.4 + Marmot E2E) (push) Failing after 0s
2026-03-18 20:22:12 +01:00
7a43ebd395 Expand in-memory storage indexes 2026-03-18 19:43:11 +01:00
4c40edfd83 Optimize memory-backed benchmark path 2026-03-18 18:56:47 +01:00
f60b8ba02a Add memory-backed benchmark profile 2026-03-18 18:39:53 +01:00
2225dfdc9e Improve public API documentation 2026-03-18 18:08:47 +01:00
9014912e9d Unify HTTP metadata handling 2026-03-18 18:00:07 +01:00
c30449b318 Expand relay metrics and observability 2026-03-18 17:39:13 +01:00
c377ed4b62 Separate read pool and harden fanout state handling 2026-03-18 17:21:58 +01:00
dce473662f Lock signature verification and add per-IP ingest limits 2026-03-18 16:46:32 +01:00
a2bdf11139 Add DB constraints for binary identifier lengths 2026-03-18 16:00:07 +01:00
bc66dfcbbe Upgrade NIP-50 search to ranked Postgres FTS 2026-03-18 15:56:45 +01:00
f732d9cf24 Implement full NIP-43 relay access flow 2026-03-18 15:28:15 +01:00
f2856d000e Implement NIP-66 relay discovery publishing 2026-03-18 14:50:25 +01:00
dc5f0c1e5d Add first-class listener connection caps 2026-03-18 14:21:43 +01:00
b56925f413 Decouple publish fanout and use ETS ingest counters
Some checks failed
CI / Test (OTP 27.2 / Elixir 1.18.2) (push) Failing after 0s
CI / Test (OTP 28.4 / Elixir 1.19.4 + Marmot E2E) (push) Failing after 0s
2026-03-18 14:10:32 +01:00
05718d4b91 Prevent NIP-98 token replay 2026-03-18 14:05:38 +01:00
1fef184f50 Add relay-wide event ingest limiter 2026-03-18 14:05:27 +01:00
57fdb4ed85 Add configurable tag guardrails 2026-03-18 14:05:09 +01:00
8dbf05b7fe docs: Opus review 2026-03-18 13:23:06 +01:00
7b2d92b714 fix: Sandbox owner checks in DB connection before exiting
Some checks failed
CI / Test (OTP 27.2 / Elixir 1.18.2) (push) Failing after 0s
CI / Test (OTP 28.4 / Elixir 1.19.4 + Marmot E2E) (push) Failing after 0s
The shared sandbox owner process exited without releasing its Postgrex
connection, causing intermittent "client exited" error logs on CI. The
owner now calls Sandbox.checkin before exiting, and on_exit waits for
the owner to finish before switching to manual mode.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-17 20:11:31 +01:00
a19b7d97f0 fix: Subscription workers restart strategy, sandbox ownership race condition
Some checks failed
CI / Test (OTP 27.2 / Elixir 1.18.2) (push) Failing after 0s
CI / Test (OTP 28.4 / Elixir 1.19.4 + Marmot E2E) (push) Failing after 0s
Clear OTP SSL PEM cache between listener terminate/restart so reloaded
certs are read from disk instead of serving stale cached data. Make
reconcile_worker idempotent to prevent unnecessary worker churn when
put_server is followed by start_server. Add request timeouts to
RelayInfoClient to prevent hanging connections.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-03-17 19:42:18 +01:00
65b47ec191 fix: Subscription workers restart strategy, sandbox ownership race condition
Some checks failed
CI / Test (OTP 27.2 / Elixir 1.18.2) (push) Failing after 0s
CI / Test (OTP 28.4 / Elixir 1.19.4 + Marmot E2E) (push) Failing after 0s
2026-03-17 18:49:50 +01:00
e13c08fd5a Strengthening the TLS reload test 2026-03-17 12:42:08 +01:00
101ded43cb Stabilize TLS and sync worker tests
Some checks failed
CI / Test (OTP 27.2 / Elixir 1.18.2) (push) Failing after 0s
CI / Test (OTP 28.4 / Elixir 1.19.4 + Marmot E2E) (push) Failing after 0s
2026-03-17 12:17:29 +01:00
f4d94c9fcb Refactor test runtime ownership
Some checks failed
CI / Test (OTP 27.2 / Elixir 1.18.2) (push) Failing after 0s
CI / Test (OTP 28.4 / Elixir 1.19.4 + Marmot E2E) (push) Failing after 0s
2026-03-17 12:06:32 +01:00
35c8d50db0 Stabilize TLS listener reload e2e
Some checks failed
CI / Test (OTP 27.2 / Elixir 1.18.2) (push) Failing after 1s
CI / Test (OTP 28.4 / Elixir 1.19.4 + Marmot E2E) (push) Failing after 1s
2026-03-17 04:12:42 +01:00
4d169c23ae Harden CI-sensitive integration tests
Some checks failed
CI / Test (OTP 27.2 / Elixir 1.18.2) (push) Failing after 1s
CI / Test (OTP 28.4 / Elixir 1.19.4 + Marmot E2E) (push) Failing after 1s
2026-03-17 03:55:49 +01:00
a1a8b30d12 Stabilize test harness and node sync e2e
Some checks failed
CI / Test (OTP 27.2 / Elixir 1.18.2) (push) Failing after 1s
CI / Test (OTP 28.4 / Elixir 1.19.4 + Marmot E2E) (push) Failing after 1s
2026-03-17 03:46:58 +01:00
e557eba437 Fix connection test harness startup assumptions
Some checks failed
CI / Test (OTP 27.2 / Elixir 1.18.2) (push) Failing after 1s
CI / Test (OTP 28.4 / Elixir 1.19.4 + Marmot E2E) (push) Failing after 1s
Release / Release Gate (push) Failing after 1s
Release / Build and publish image (push) Has been skipped
2026-03-17 03:13:45 +01:00
a652bf3448 Add sync e2e aliases and release checks
Some checks failed
CI / Test (OTP 27.2 / Elixir 1.18.2) (push) Failing after 1s
CI / Test (OTP 28.4 / Elixir 1.19.4 + Marmot E2E) (push) Failing after 1s
Release / Release Gate (push) Failing after 1s
Release / Build and publish image (push) Has been skipped
2026-03-17 03:00:50 +01:00
f518a25cab Add node sync e2e harness and bump 0.5.0
Some checks failed
CI / Test (OTP 27.2 / Elixir 1.18.2) (push) Failing after 1s
CI / Test (OTP 28.4 / Elixir 1.19.4 + Marmot E2E) (push) Failing after 1s
2026-03-17 02:47:42 +01:00
02f2584757 Align websocket and admin APIs with shared surfaces 2026-03-17 01:08:41 +01:00
e8fd6c7328 Add listener TLS support and pinning tests 2026-03-17 00:48:48 +01:00
1f608ee2bd Refactor ingress to listener-based configuration 2026-03-16 23:47:17 +01:00
5f4f086d28 Add outbound sync worker runtime 2026-03-16 21:55:03 +01:00
9be3b6ca52 Add sync control plane API 2026-03-16 21:23:39 +01:00
769177a63e Add shared auth and identity APIs 2026-03-16 21:07:26 +01:00
987415d80c docs: README intro
Some checks failed
CI / Test (OTP 27.2 / Elixir 1.18.2) (push) Failing after 1s
CI / Test (OTP 28.4 / Elixir 1.19.4 + Marmot E2E) (push) Failing after 1s
2026-03-16 20:31:21 +01:00
d119d21d99 Extract API events and stream layers
Some checks failed
CI / Test (OTP 27.2 / Elixir 1.18.2) (push) Failing after 1s
CI / Test (OTP 28.4 / Elixir 1.19.4 + Marmot E2E) (push) Failing after 1s
2026-03-16 20:21:58 +01:00
5d4d181d00 Add trusted proxy IP enforcement tests 2026-03-16 19:09:27 +01:00
fd17026c32 Implement ACL runtime enforcement and management API 2026-03-16 17:49:16 +01:00
14fb0f7ffb docs: Study Khatru 2026-03-16 16:53:55 +01:00
186d0f98ee improve: NIF-77 single-filter fast path 2026-03-16 16:30:07 +01:00
39dbc069a7 feat: NIF-77 negentropy sync 2026-03-16 16:00:15 +01:00
4c2c93deb3 docs: Sketch NIF-77 sync and ACLs 2026-03-16 14:57:08 +01:00
b628770517 docs: Narrow NIP-DBSYNC protocol 2026-03-16 12:58:21 +01:00
705971cbc4 docs: Nostr DB sync proposal 2026-03-16 12:57:22 +01:00
b86b5db78c ci: GitHub release action
Some checks failed
CI / Test (OTP 27.2 / Elixir 1.18.2) (push) Failing after 1s
CI / Test (OTP 28.4 / Elixir 1.19.4 + Marmot E2E) (push) Failing after 1s
Release / Release Gate (push) Failing after 1s
Release / Build and publish image (push) Has been skipped
2026-03-14 20:02:05 +01:00
5577445e80 ci: GitHub action 2026-03-14 19:49:07 +01:00
1a4572013d chore: Bump version to 0.4.0 2026-03-14 19:15:31 +01:00
5c2fadc28e Rename archiver to partitions and drop archive SQL helper 2026-03-14 18:31:12 +01:00
7faf8c84c8 Align event_tags partition lifecycle with events 2026-03-14 18:23:21 +01:00
889d630c12 Add monthly partition maintenance and retention pruning 2026-03-14 18:11:40 +01:00
19664ac56c build: Default port to 4413 2026-03-14 17:37:37 +01:00
708e26e4f4 buid/test/docs: Docker image, Darwin fix, logo 2026-03-14 17:08:07 +01:00
223 changed files with 33673 additions and 2891 deletions

View File

@@ -0,0 +1,123 @@
---
name: nostr-nip-sync
description: Check upstream NIP changes in ./docs/nips and assess required updates to our Elixir Nostr server implementation.
scope: project
disable-model-invocation: false
tags:
- elixir
- nostr
- protocol
- maintenance
triggers:
- "nostr nips"
- "sync with upstream nips"
- "check for protocol changes"
- "review nip updates"
---
You are an assistant responsible for keeping this Elixir-based Nostr server aligned with the upstream Nostr Implementation Possibilities (NIPs) specification.
## Goal
When invoked, you will:
1. Detect upstream changes to the NIPs repository mirrored as a git submodule at `./docs/nips/`.
2. Understand what those changes mean for a Nostr relay implementation.
3. Decide whether they require updates to our Elixir server code, configuration, or documentation.
4. Propose concrete implementation tasks (modules to touch, new tests, migrations, etc.) or explicitly state that no changes are required.
The user may optionally pass additional arguments describing context or constraints. Treat all trailing arguments as a free-form description of extra requirements or focus areas.
## Repository assumptions
- This project is an Elixir Nostr relay / server.
- The upstream NIPs repository is included as a git submodule at `./docs/nips/`, tracking `nostr-protocol/nips` on GitHub.[web:27]
- Our implementation aims to conform to all NIPs listed in our project documentation and `CLAUDE.md`, not necessarily every NIP in existence.
- Our project uses standard Elixir project structure (`mix.exs`, `lib/`, `test/`, etc.).
If any of these assumptions appear false based on the actual repository layout, first correct your mental model and explicitly call that out to the user before proceeding.
## High-level workflow
When this skill is invoked, follow this procedure:
1. **Gather local context**
- Read `CLAUDE.md` for an overview of the servers purpose, supported NIPs, architecture, and key modules.
- Inspect `mix.exs` and the `lib/` directory to understand the main supervision tree, key contexts, and modules related to Nostr protocol handling (parsing, persistence, filters, subscriptions, etc.).
- Look for any existing documentation about supported NIPs (e.g. `docs/`, `README.md`, `SUPPORT.md`, or `NIPS.md`).
2. **Inspect the NIPs submodule**
- Open `./docs/nips/` and identify:
- The current git commit (`HEAD`) of the submodule.
- The previous commit referenced by the parent repo (if accessible via diff or git history).
- If you cannot run `git` commands directly, approximate by:
- Listing recently modified NIP files in `./docs/nips/`.
- Comparing file contents where possible (old vs new) if the repository history is available locally.
- Summarize which NIPs have changed:
- New NIPs added.
- Existing NIPs significantly modified.
- NIPs deprecated, withdrawn, or marked as superseded.
3. **Map NIP changes to implementation impact**
For each changed NIP:
- Identify the NIPs purpose (e.g. basic protocol flow, new event kinds, tags, message types, relay behaviours).[web:27]
- Determine which aspects of our server may be affected:
- Event validation and data model (schemas, changesets, database schema and migrations).
- Message types and subscription protocol (WebSocket handlers, filters, back-pressure logic).
- Authentication, rate limiting, and relay policy configuration.
- New or changed tags, fields, or event kinds that must be supported or rejected.
- Operational behaviours like deletions, expirations, and command results.
- Check our codebase for references to the relevant NIP ID (e.g. `NIP-01`, `NIP-11`, etc.), related constants, or modules named after the feature (e.g. `Deletion`, `Expiration`, `RelayInfo`, `DMs`).
4. **Decide if changes are required**
For each NIP change, decide between:
- **No action required**
- The change is editorial, clarificatory, or fully backward compatible.
- Our current behaviour already matches or exceeds the new requirements.
- **Implementation update recommended**
- The NIP introduces a new mandatory requirement for compliant relays.
- The NIP deprecates or changes behaviour we currently rely on.
- The NIP adds new event kinds, tags, fields, or message flows that we intend to support.
- **Design decision required**
- The NIP is optional or experimental and may not align with our goals.
- The change requires non-trivial architectural decisions or policy updates.
Be explicit about your reasoning, citing concrete NIP sections and relevant code locations when possible.
5. **Produce an actionable report**
Output a structured report with these sections:
1. `Summary of upstream NIP changes`
- Bullet list of changed NIPs with short descriptions.
2. `Impact on our server`
- For each NIP: whether it is **No action**, **Update recommended**, or **Design decision required**, with a brief justification.
3. `Proposed implementation tasks`
- Concrete tasks formatted as a checklist with suggested modules/files, e.g.:
- `[ ] Add support for new event kind XYZ from NIP-XX in \`lib/nostr/events/\`.`
- `[ ] Extend validation for tag ABC per NIP-YY in \`lib/nostr/validation/\` and tests in \`test/nostr/\`.`
4. `Open questions / assumptions`
- Items where you are not confident and need human confirmation.
When relevant, include short Elixir code skeletons or diff-style snippets to illustrate changes, but keep them focused and idiomatic.
## Behavioural guidelines
- **Be conservative with breaking changes.** If a NIP change might break existing clients or stored data, call that out clearly and recommend a migration strategy.
- **Prefer incremental steps.** Propose small, well-scoped PR-sized tasks rather than a monolithic refactor.
- **Respect project conventions.** Match the existing style, naming, and architectural patterns described in `CLAUDE.md` and evident in `lib/` and `test/`.
- **Keep humans in the loop.** When unsure about how strictly to adhere to a new or optional NIP, surface the tradeoffs and suggest options instead of silently choosing one.
## Invocation examples
The skill should handle invocations like:
- `/nostr-nip-sync`
- `/nostr-nip-sync check for new NIPs that affect DMs or deletion semantics`
- `/nostr-nip-sync review changes since last submodule update and propose concrete tasks`
When the user provides extra text after the command, treat it as guidance for what to prioritize in your analysis (e.g. performance, privacy, specific NIPs, or components).

21
.env.example Normal file
View File

@@ -0,0 +1,21 @@
PARRHESIA_IMAGE=parrhesia:latest
PARRHESIA_HOST_PORT=4000
POSTGRES_DB=parrhesia
POSTGRES_USER=parrhesia
POSTGRES_PASSWORD=parrhesia
DATABASE_URL=ecto://parrhesia:parrhesia@db:5432/parrhesia
POOL_SIZE=20
# Optional runtime overrides:
# PARRHESIA_RELAY_URL=ws://localhost:4000/relay
# PARRHESIA_POLICIES_AUTH_REQUIRED_FOR_WRITES=false
# PARRHESIA_POLICIES_AUTH_REQUIRED_FOR_READS=false
# PARRHESIA_POLICIES_MIN_POW_DIFFICULTY=0
# PARRHESIA_SYNC_RELAY_GUARD=false
# PARRHESIA_FEATURES_VERIFY_EVENT_SIGNATURES=true
# PARRHESIA_METRICS_ENABLED_ON_MAIN_ENDPOINT=true
# PARRHESIA_METRICS_PRIVATE_NETWORKS_ONLY=true
# PARRHESIA_METRICS_AUTH_TOKEN=
# PARRHESIA_EXTRA_CONFIG=/config/parrhesia.runtime.exs

130
.github/workflows/ci.yaml vendored Normal file
View File

@@ -0,0 +1,130 @@
name: CI
on:
push:
branches: ["**"]
pull_request:
branches: ["**"]
env:
MIX_ENV: test
MIX_OS_DEPS_COMPILE_PARTITION_COUNT: 8
permissions:
contents: read
jobs:
test:
name: ${{ matrix.name }}
runs-on: ubuntu-24.04
strategy:
fail-fast: false
matrix:
include:
- name: Test (OTP 27.2 / Elixir 1.18.2)
otp: "27.2"
elixir: "1.18.2"
main: false
- name: Test (OTP 28.4 / Elixir 1.19.4 + E2E)
otp: "28.4"
elixir: "1.19.4"
main: true
services:
postgres:
image: postgres:16-alpine
env:
POSTGRES_USER: postgres
POSTGRES_PASSWORD: postgres
POSTGRES_DB: app_test
ports:
- 5432:5432
options: >-
--health-cmd "pg_isready -U postgres"
--health-interval 10s
--health-timeout 5s
--health-retries 5
env:
PGHOST: localhost
PGPORT: 5432
PGUSER: postgres
PGPASSWORD: postgres
PGDATABASE: app_test
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Init submodules
run: |
git submodule init marmot-ts
git submodule update --recursive
- name: Set up Elixir + OTP
uses: erlef/setup-beam@v1
with:
otp-version: ${{ matrix.otp }}
elixir-version: ${{ matrix.elixir }}
- name: Set up Node.js
uses: actions/setup-node@v4
with:
node-version: 24
- name: Install just
run: |
sudo apt-get update
sudo apt-get install -y just
# Cache deps/ directory — keyed on mix.lock
- name: Cache Mix deps
uses: actions/cache@v4
id: deps-cache
with:
path: deps
key: ${{ runner.os }}-mix-deps-${{ hashFiles('mix.lock') }}
restore-keys: |
${{ runner.os }}-mix-deps-
# Cache _build/ — keyed on mix.lock + OTP/Elixir versions
- name: Cache _build
uses: actions/cache@v4
with:
path: _build
key: ${{ runner.os }}-mix-build-${{ matrix.otp }}-${{ matrix.elixir }}-${{ hashFiles('mix.lock') }}
restore-keys: |
${{ runner.os }}-mix-build-${{ matrix.otp }}-${{ matrix.elixir }}-
- name: Install Mix dependencies
if: steps.deps-cache.outputs.cache-hit != 'true'
run: mix deps.get
- name: Compile (warnings as errors)
if: ${{ matrix.main }}
run: mix compile --warnings-as-errors
- name: Check formatting
if: ${{ matrix.main }}
run: mix format --check-formatted
- name: Credo
if: ${{ matrix.main }}
run: mix credo --strict --all
- name: Check for unused locked deps
if: ${{ matrix.main }}
run: |
mix deps.unlock --unused
git diff --exit-code -- mix.lock
- name: Run tests
run: mix test --color
- name: Run Node Sync E2E tests
if: ${{ matrix.main }}
run: just e2e node-sync
- name: Run Marmot E2E tests
if: ${{ matrix.main }}
run: just e2e marmot

193
.github/workflows/release.yaml vendored Normal file
View File

@@ -0,0 +1,193 @@
name: Release
on:
push:
tags:
- "v*.*.*"
workflow_dispatch:
inputs:
push:
description: "Push image to GHCR?"
required: false
default: "true"
type: choice
options: ["true", "false"]
env:
REGISTRY: ghcr.io
IMAGE_NAME: ${{ github.repository }}
FLAKE_OUTPUT: packages.x86_64-linux.dockerImage
permissions:
contents: read
packages: write
id-token: write
jobs:
test:
name: Release Gate
runs-on: ubuntu-24.04
services:
postgres:
image: postgres:16-alpine
env:
POSTGRES_USER: postgres
POSTGRES_PASSWORD: postgres
POSTGRES_DB: app_test
ports:
- 5432:5432
options: >-
--health-cmd "pg_isready -U postgres"
--health-interval 10s
--health-timeout 5s
--health-retries 5
env:
MIX_ENV: test
PGHOST: localhost
PGPORT: 5432
PGUSER: postgres
PGPASSWORD: postgres
PGDATABASE: app_test
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Init submodules
run: |
git submodule init marmot-ts
git submodule update --recursive
- name: Set up Elixir + OTP
uses: erlef/setup-beam@v1
with:
otp-version: "28.4"
elixir-version: "1.19.4"
- name: Set up Node.js
uses: actions/setup-node@v4
with:
node-version: 24
- name: Install just
run: |
sudo apt-get update
sudo apt-get install -y just
- name: Cache Mix deps
uses: actions/cache@v4
id: deps-cache
with:
path: deps
key: ${{ runner.os }}-mix-deps-${{ hashFiles('mix.lock') }}
restore-keys: |
${{ runner.os }}-mix-deps-
- name: Cache _build
uses: actions/cache@v4
with:
path: _build
key: ${{ runner.os }}-mix-build-28.4-1.19.4-${{ hashFiles('mix.lock') }}
restore-keys: |
${{ runner.os }}-mix-build-28.4-1.19.4-
- name: Install Mix dependencies
if: steps.deps-cache.outputs.cache-hit != 'true'
run: mix deps.get
- name: Check tag matches Mix version
if: ${{ startsWith(github.ref, 'refs/tags/v') }}
run: |
TAG_VERSION="${GITHUB_REF_NAME#v}"
MIX_VERSION="$(mix run --no-start -e 'IO.puts(Mix.Project.config()[:version])' | tail -n 1)"
if [ "$TAG_VERSION" != "$MIX_VERSION" ]; then
echo "Tag version $TAG_VERSION does not match mix.exs version $MIX_VERSION"
exit 1
fi
- name: Compile
run: mix compile --warnings-as-errors
- name: Check formatting
run: mix format --check-formatted
- name: Credo
run: mix credo --strict --all
- name: Run tests
run: mix test --color
- name: Run Node Sync E2E
run: just e2e node-sync
- name: Run Marmot E2E
run: just e2e marmot
- name: Check for unused locked deps
run: |
mix deps.unlock --unused
git diff --exit-code -- mix.lock
build-and-push:
name: Build and publish image
runs-on: ubuntu-24.04
needs: test
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Install Nix
uses: DeterminateSystems/nix-installer-action@main
with:
extra-conf: |
experimental-features = nix-command flakes
substituters = https://cache.nixos.org
trusted-public-keys = cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY=
- name: Magic Nix Cache
uses: DeterminateSystems/magic-nix-cache-action@main
- name: Extract image metadata
id: meta
uses: docker/metadata-action@v5
with:
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
tags: |
type=semver,pattern={{version}}
type=semver,pattern={{major}}.{{minor}}
type=raw,value=latest,enable={{is_default_branch}}
type=raw,value=latest,enable=${{ startsWith(github.ref, 'refs/tags/v') }}
type=sha,prefix=sha-,format=short
- name: Build Docker image with Nix
id: build
run: |
nix build .#${{ env.FLAKE_OUTPUT }} --out-link ./docker-image-result
echo "archive_path=$(readlink -f ./docker-image-result)" >> "$GITHUB_OUTPUT"
- name: Push image to GHCR
env:
TAGS: ${{ steps.meta.outputs.tags }}
SHOULD_PUSH: ${{ github.event.inputs.push != 'false' }}
ARCHIVE_PATH: ${{ steps.build.outputs.archive_path }}
run: |
if [ "$SHOULD_PUSH" != "true" ]; then
echo "Skipping push"
exit 0
fi
IMAGE_ARCHIVE="docker-archive:${ARCHIVE_PATH}"
while IFS= read -r TAG; do
if [ -n "$TAG" ]; then
echo "Pushing $TAG"
nix run nixpkgs#skopeo -- copy \
--dest-creds "${{ github.actor }}:${{ secrets.GITHUB_TOKEN }}" \
"$IMAGE_ARCHIVE" \
"docker://$TAG"
fi
done <<< "$TAGS"

6
.gitmodules vendored
View File

@@ -1,3 +1,9 @@
[submodule "marmot-ts"] [submodule "marmot-ts"]
path = marmot-ts path = marmot-ts
url = https://github.com/marmot-protocol/marmot-ts.git url = https://github.com/marmot-protocol/marmot-ts.git
[submodule "docs/nips"]
path = docs/nips
url = https://github.com/nostr-protocol/nips.git
[submodule "nix/nostr-bench"]
path = nix/nostr-bench
url = ssh://gitea@git.teralink.net:10322/self/nostr-bench.git

View File

@@ -1,11 +1,12 @@
This is a Nostr server written using Elixir and PostgreSQL. This is a Nostr server written using Elixir and PostgreSQL.
NOTE: Nostr and NIP specs are available in `~/nostr/` and `~/nips/`. NOTE: NIP specs are available in `./docs/nips/`.
## Project guidelines ## Project guidelines
- Use `mix precommit` alias when you are done with all changes and fix any pending issues - Use `mix precommit` alias when you are done with all changes and fix any pending issues
- Use the already included and available `:req` (`Req`) library for HTTP requests, **avoid** `:httpoison`, `:tesla`, and `:httpc`. - Use the already included and available `:req` (`Req`) library for HTTP requests, **avoid** `:httpoison`, `:tesla`, and `:httpc`.
- Use semantic prefixes in commit messages (feat:, fix:, docs:, chore:, test:, build:, ci:, bench:, dev:)
<!-- usage-rules-start --> <!-- usage-rules-start -->

View File

@@ -1,33 +0,0 @@
Running 2 comparison run(s)...
Versions:
parrhesia 0.3.0
strfry 1.0.4 (nixpkgs)
nostr-rs-relay 0.9.0
nostr-bench 0.4.0
[run 1/2] Parrhesia
[run 1/2] strfry
[run 1/2] nostr-rs-relay
[run 2/2] Parrhesia
[run 2/2] strfry
[run 2/2] nostr-rs-relay
=== Bench comparison (averages) ===
metric parrhesia strfry nostr-rs-relay strfry/parrhesia nostr-rs/parrhesia
-------------------------- --------- -------- -------------- ---------------- ------------------
connect avg latency (ms) ↓ 13.50 3.00 2.00 0.22x 0.15x
connect max latency (ms) ↓ 22.50 5.50 3.00 0.24x 0.13x
echo throughput (TPS) ↑ 80385.00 61673.00 164516.00 0.77x 2.05x
echo throughput (MiB/s) ↑ 44.00 34.45 90.10 0.78x 2.05x
event throughput (TPS) ↑ 2000.00 3404.50 788.00 1.70x 0.39x
event throughput (MiB/s) ↑ 1.30 2.20 0.50 1.69x 0.38x
req throughput (TPS) ↑ 3664.00 1808.50 877.50 0.49x 0.24x
req throughput (MiB/s) ↑ 20.75 11.75 2.45 0.57x 0.12x
Legend: ↑ higher is better, ↓ lower is better.
Ratio columns are server/parrhesia (for ↓ metrics, <1.00x means that server is faster).
Run details:
run 1: parrhesia(echo_tps=81402, event_tps=1979, req_tps=3639, connect_avg_ms=14) | strfry(echo_tps=61745, event_tps=3457, req_tps=1818, connect_avg_ms=3) | nostr-rs-relay(echo_tps=159974, event_tps=784, req_tps=905, connect_avg_ms=2)
run 2: parrhesia(echo_tps=79368, event_tps=2021, req_tps=3689, connect_avg_ms=13) | strfry(echo_tps=61601, event_tps=3352, req_tps=1799, connect_avg_ms=3) | nostr-rs-relay(echo_tps=169058, event_tps=792, req_tps=850, connect_avg_ms=2)

18
CHANGELOG.md Normal file
View File

@@ -0,0 +1,18 @@
# Changelog
All notable changes to this project will be documented in this file.
## [0.7.0] - 2026-03-20
First beta release!
### Added
- Configurable WebSocket keepalive support in `Parrhesia.Web.Connection`:
- server-initiated `PING` frames
- `PONG` timeout handling with connection close on timeout
- New runtime limit settings:
- `:websocket_ping_interval_seconds` (`PARRHESIA_LIMITS_WEBSOCKET_PING_INTERVAL_SECONDS`)
- `:websocket_pong_timeout_seconds` (`PARRHESIA_LIMITS_WEBSOCKET_PONG_TIMEOUT_SECONDS`)
### Changed
- NIP-42 challenge validation now uses constant-time comparison via `Plug.Crypto.secure_compare/2`.

635
README.md
View File

@@ -1,13 +1,37 @@
# Parrhesia # Parrhesia
Parrhesia is a Nostr relay server written in Elixir/OTP with PostgreSQL storage. <img alt="Parrhesia Logo" src="./docs/logo.svg" width="150" align="right">
Parrhesia is a Nostr relay server written in Elixir/OTP.
**BETA CONDITION BREAKING CHANGES MAY STILL HAPPEN!**
Supported storage backends:
- PostgreSQL, which is the primary and production-oriented backend
- in-memory storage, which is useful for tests, local experiments, and benchmarks
Advanced Nostr features:
- Advanced Querying: Full-text search (NIP-50) and COUNT queries (NIP-45).
- Secure Messaging: First-class support for Marmot MLS-encrypted groups and NIP-17/44/59 gift-wrapped DMs.
- Identity & Auth: NIP-42 authentication flows and NIP-86 management API with NIP-98 HTTP auth.
- Data Integrity: Negentropy-based synchronization and NIP-62 vanish flows.
It exposes: It exposes:
- a WebSocket relay endpoint at `/relay`
- listener-configurable WS/HTTP ingress, with a default `public` listener on port `4413`
- a WebSocket relay endpoint at `/relay` on listeners that enable the `nostr` feature
- NIP-11 relay info on `GET /relay` with `Accept: application/nostr+json` - NIP-11 relay info on `GET /relay` with `Accept: application/nostr+json`
- operational HTTP endpoints (`/health`, `/ready`, `/metrics`) - operational HTTP endpoints such as `/health`, `/ready`, and `/metrics` on listeners that enable them
- `/metrics` is restricted by default to private/loopback source IPs - a NIP-86-style management API at `POST /management` on listeners that enable the `admin` feature
- a NIP-86-style management API at `POST /management` (NIP-98 auth)
Listeners can run in plain HTTP, HTTPS, mutual TLS, or proxy-terminated TLS modes. The current TLS implementation supports:
- server TLS on listener sockets
- optional client certificate admission with listener-side client pin checks
- proxy-asserted client TLS identity on trusted proxy hops
- admin-triggered certificate reload by restarting an individual listener from disk
## Supported NIPs ## Supported NIPs
@@ -15,11 +39,31 @@ Current `supported_nips` list:
`1, 9, 11, 13, 17, 40, 42, 43, 44, 45, 50, 59, 62, 66, 70, 77, 86, 98` `1, 9, 11, 13, 17, 40, 42, 43, 44, 45, 50, 59, 62, 66, 70, 77, 86, 98`
`43` is advertised when the built-in NIP-43 relay access flow is enabled. Parrhesia generates relay-signed `28935` invite responses on `REQ`, validates join and leave requests locally, and publishes the resulting signed `8000`, `8001`, and `13534` relay membership events into its own local event store.
`50` uses ranked PostgreSQL full-text search over event `content` by default. Parrhesia applies the filter `limit` after ordering by match quality, and falls back to trigram-backed substring matching for short or symbol-heavy queries such as search-as-you-type prefixes, domains, and punctuation-rich tokens.
`66` is advertised when the built-in NIP-66 publisher is enabled and has at least one relay target. The default config enables it for the `public` relay URL. Parrhesia probes those target relays, collects the resulting NIP-11 / websocket liveness data, and then publishes the signed `10166` and `30166` events locally on this relay.
## Requirements ## Requirements
- Elixir `~> 1.19` - Elixir `~> 1.18`
- Erlang/OTP 28 - Erlang/OTP 28
- PostgreSQL (18 used in the dev environment; 16+ recommended) - PostgreSQL (18 used in the dev environment; 16+ recommended)
- [`just`](https://github.com/casey/just) for the command runner used in this repo
- Docker or Podman plus Docker Compose support if you want to run the published container image
---
## Command runner (`just`)
This repo includes a `justfile` that provides a grouped command/subcommand CLI over common mix tasks and scripts.
```bash
just
just help bench
just help e2e
```
--- ---
@@ -45,12 +89,12 @@ mix setup
mix run --no-halt mix run --no-halt
``` ```
Server listens on `http://localhost:4000` by default. The default `public` listener binds to `http://localhost:4413`.
WebSocket clients should connect to: WebSocket clients should connect to:
```text ```text
ws://localhost:4000/relay ws://localhost:4413/relay
``` ```
### Useful endpoints ### Useful endpoints
@@ -63,80 +107,360 @@ ws://localhost:4000/relay
--- ---
## Test suites
Primary test entrypoints:
- `mix test` for the ExUnit suite
- `just e2e marmot` for the Marmot client end-to-end suite
- `just e2e node-sync` for the two-node relay sync end-to-end suite
- `just e2e node-sync-docker` for the release-image Docker two-node relay sync suite
The node-sync harnesses are driven by:
- [`scripts/run_node_sync_e2e.sh`](./scripts/run_node_sync_e2e.sh)
- [`scripts/run_node_sync_docker_e2e.sh`](./scripts/run_node_sync_docker_e2e.sh)
- [`scripts/node_sync_e2e.exs`](./scripts/node_sync_e2e.exs)
- [`compose.node-sync-e2e.yaml`](./compose.node-sync-e2e.yaml)
`just e2e node-sync` runs two real Parrhesia nodes against separate PostgreSQL databases, verifies catch-up and live sync, restarts one node, and verifies persisted resume behavior. `just e2e node-sync-docker` runs the same scenario against the release Docker image.
GitHub CI currently runs the non-Docker node-sync e2e on the main Linux matrix job. The Docker node-sync e2e remains an explicit/manual check because it depends on release-image build/runtime fidelity and a working Docker host.
---
## Embedding in another Elixir app
Parrhesia is usable as an embedded OTP dependency, not just as a standalone relay process.
The intended in-process surface is `Parrhesia.API.*`, especially:
- `Parrhesia.API.Events` for publish, query, and count
- `Parrhesia.API.Stream` for local REQ-like subscriptions
- `Parrhesia.API.Admin` for management operations
- `Parrhesia.API.Identity`, `Parrhesia.API.ACL`, and `Parrhesia.API.Sync` for relay identity, protected sync ACLs, and outbound relay sync
For host-managed HTTP/WebSocket ingress mounting, use `Parrhesia.Plug`.
Start with:
- [`docs/LOCAL_API.md`](./docs/LOCAL_API.md) for the embedding model and a minimal host setup
- generated ExDoc for the `Embedded API` module group when running `mix docs`
Important caveats for host applications:
- Parrhesia is beta software; expect some API and config churn as the runtime stabilizes.
- Parrhesia currently assumes a single runtime per BEAM node and uses globally registered process names.
- The defaults in this repo's `config/*.exs` are not imported automatically when Parrhesia is used as a dependency. A host app must set `config :parrhesia, ...` explicitly.
- The host app is responsible for migrating Parrhesia's schema, for example with `Parrhesia.Release.migrate()` or `mix ecto.migrate -r Parrhesia.Repo`.
### Official embedding boundary
For embedded use, the stable boundaries are:
- `Parrhesia.API.*` for in-process publish/query/admin/sync operations
- `Parrhesia.Plug` for host-managed HTTP/WebSocket ingress mounting
If your host app owns the public HTTPS endpoint, keep this as the baseline runtime config:
```elixir
config :parrhesia, :listeners, %{}
```
Notes:
- `listeners: %{}` disables Parrhesia-managed HTTP/WebSocket ingress (`/relay`, `/management`, `/metrics`, etc.).
- Mount `Parrhesia.Plug` in your host endpoint/router when you still want Parrhesia ingress under the host's single HTTPS surface.
- `Parrhesia.Web.*` modules remain internal runtime wiring. Use `Parrhesia.Plug` as the documented mount API.
The config reference below still applies when embedded. That is the primary place to document basic setup and runtime configuration changes.
---
## Production configuration ## Production configuration
### Minimal setup
Before a Nostr client can publish its first event successfully, make sure these pieces are in place:
1. PostgreSQL is reachable from Parrhesia.
Set `DATABASE_URL` and create/migrate the database with `Parrhesia.Release.migrate()` or `mix ecto.migrate`.
PostgreSQL is the supported production datastore. The in-memory backend is intended for
non-persistent runs such as tests and benchmarks.
2. Parrhesia listeners are configured for your deployment.
The default config exposes a `public` listener on plain HTTP port `4413`, and a reverse proxy can terminate TLS and forward WebSocket traffic to `/relay`. Additional listeners can be defined in `config/*.exs`.
3. `:relay_url` matches the public relay URL clients should use.
Set `PARRHESIA_RELAY_URL` to the public relay URL exposed by the reverse proxy.
In the normal deployment model, this should be your public `wss://.../relay` URL.
4. The database schema is migrated before starting normal traffic.
The app image does not auto-run migrations on boot.
That is the actual minimum. With default policy settings, writes do not require auth, event signatures are verified, and no extra Nostr-specific bootstrap step is needed before posting ordinary events.
In `prod`, these environment variables are used: In `prod`, these environment variables are used:
- `DATABASE_URL` (**required**), e.g. `ecto://USER:PASS@HOST/parrhesia_prod` - `DATABASE_URL` (**required**), e.g. `ecto://USER:PASS@HOST/parrhesia_prod`
- `POOL_SIZE` (optional, default `10`) - `POOL_SIZE` (optional, default `32`)
- `PORT` (optional, default `4000`) - `PORT` (optional, default `4413`)
- `PARRHESIA_*` runtime overrides for relay config, metadata, identity, sync, ACL, limits, policies, listeners, retention, and features
- `PARRHESIA_EXTRA_CONFIG` (optional path to an extra runtime config file)
`config/runtime.exs` reads these values at runtime in production releases. `config/runtime.exs` reads these values at runtime in production releases.
### Typical relay config ### Runtime env naming
Add/override in config files (for example in `config/prod.exs` or a `config/runtime.exs`): For runtime overrides, use the `PARRHESIA_...` prefix:
```elixir - `PARRHESIA_RELAY_URL`
config :parrhesia, Parrhesia.Web.Endpoint, - `PARRHESIA_METADATA_HIDE_VERSION`
ip: {0, 0, 0, 0}, - `PARRHESIA_IDENTITY_*`
port: 4000 - `PARRHESIA_SYNC_*`
- `PARRHESIA_ACL_*`
- `PARRHESIA_TRUSTED_PROXIES`
- `PARRHESIA_PUBLIC_MAX_CONNECTIONS`
- `PARRHESIA_MODERATION_CACHE_ENABLED`
- `PARRHESIA_ENABLE_EXPIRATION_WORKER`
- `PARRHESIA_ENABLE_PARTITION_RETENTION_WORKER`
- `PARRHESIA_STORAGE_BACKEND`
- `PARRHESIA_LIMITS_*`
- `PARRHESIA_POLICIES_*`
- `PARRHESIA_METRICS_*`
- `PARRHESIA_METRICS_ENDPOINT_MAX_CONNECTIONS`
- `PARRHESIA_RETENTION_*`
- `PARRHESIA_FEATURES_*`
- `PARRHESIA_METRICS_ENDPOINT_*`
# Optional dedicated metrics listener (keep this internal) Examples:
config :parrhesia, Parrhesia.Web.MetricsEndpoint,
enabled: true,
ip: {127, 0, 0, 1},
port: 9568
config :parrhesia, ```bash
metrics: [ export PARRHESIA_POLICIES_AUTH_REQUIRED_FOR_WRITES=true
enabled_on_main_endpoint: false, export PARRHESIA_METRICS_ALLOWED_CIDRS="10.0.0.0/8,192.168.0.0/16"
public: false, export PARRHESIA_LIMITS_OUTBOUND_OVERFLOW_STRATEGY=drop_oldest
private_networks_only: true,
allowed_cidrs: [],
auth_token: nil
],
limits: [
max_frame_bytes: 1_048_576,
max_event_bytes: 262_144,
max_filters_per_req: 16,
max_filter_limit: 500,
max_subscriptions_per_connection: 32,
max_event_future_skew_seconds: 900,
max_outbound_queue: 256,
outbound_drain_batch_size: 64,
outbound_overflow_strategy: :close
],
policies: [
auth_required_for_writes: false,
auth_required_for_reads: false,
min_pow_difficulty: 0,
accept_ephemeral_events: true,
mls_group_event_ttl_seconds: 300,
marmot_require_h_for_group_queries: true,
marmot_group_max_h_values_per_filter: 32,
marmot_group_max_query_window_seconds: 2_592_000,
marmot_media_max_imeta_tags_per_event: 8,
marmot_media_max_field_value_bytes: 1024,
marmot_media_max_url_bytes: 2048,
marmot_media_allowed_mime_prefixes: [],
marmot_media_reject_mip04_v1: true,
marmot_push_server_pubkeys: [],
marmot_push_max_relay_tags: 16,
marmot_push_max_payload_bytes: 65_536,
marmot_push_max_trigger_age_seconds: 120,
marmot_push_require_expiration: true,
marmot_push_max_expiration_window_seconds: 120,
marmot_push_max_server_recipients: 1
],
features: [
nip_45_count: true,
nip_50_search: true,
nip_77_negentropy: true,
marmot_push_notifications: false
]
``` ```
Listeners themselves are primarily configured under `config :parrhesia, :listeners, ...`. The current runtime env helpers tune the default public listener and the optional dedicated metrics listener, including their connection ceilings.
For settings that are awkward to express as env vars, mount an extra config file and set `PARRHESIA_EXTRA_CONFIG` to its path inside the container.
### Config reference
CSV env vars use comma-separated values. Boolean env vars accept `1/0`, `true/false`, `yes/no`, or `on/off`.
#### Top-level `:parrhesia`
| Atom key | ENV | Default | Notes |
| --- | --- | --- | --- |
| `:relay_url` | `PARRHESIA_RELAY_URL` | `ws://localhost:4413/relay` | Advertised relay URL and auth relay tag target |
| `:metadata.hide_version?` | `PARRHESIA_METADATA_HIDE_VERSION` | `true` | Hides the relay version from outbound `User-Agent` and NIP-11 when enabled |
| `:acl.protected_filters` | `PARRHESIA_ACL_PROTECTED_FILTERS` | `[]` | JSON-encoded protected filter list for sync ACL checks |
| `:identity.path` | `PARRHESIA_IDENTITY_PATH` | `nil` | Optional path for persisted relay identity material |
| `:identity.private_key` | `PARRHESIA_IDENTITY_PRIVATE_KEY` | `nil` | Optional inline relay private key |
| `:moderation_cache_enabled` | `PARRHESIA_MODERATION_CACHE_ENABLED` | `true` | Toggle moderation cache |
| `:enable_expiration_worker` | `PARRHESIA_ENABLE_EXPIRATION_WORKER` | `true` | Toggle background expiration worker |
| `:nip43` | config-file driven | see table below | Built-in NIP-43 relay access invite / membership flow |
| `:nip66` | config-file driven | see table below | Built-in NIP-66 discovery / monitor publisher |
| `:sync.path` | `PARRHESIA_SYNC_PATH` | `nil` | Optional path to sync peer config |
| `:sync.start_workers?` | `PARRHESIA_SYNC_START_WORKERS` | `true` | Start outbound sync workers on boot |
| `:sync.relay_guard` | `PARRHESIA_SYNC_RELAY_GUARD` | `false` | Suppress multi-node re-fanout for sync-originated events |
| `:limits` | `PARRHESIA_LIMITS_*` | see table below | Runtime override group |
| `:policies` | `PARRHESIA_POLICIES_*` | see table below | Runtime override group |
| `:listeners` | config-file driven | see notes below | Ingress listeners with bind, transport, feature, auth, network, and baseline ACL settings |
| `:retention` | `PARRHESIA_RETENTION_*` | see table below | Partition lifecycle and pruning policy |
| `:features` | `PARRHESIA_FEATURES_*` | see table below | Runtime override group |
| `:storage.events` | `-` | `Parrhesia.Storage.Adapters.Postgres.Events` | Config-file override only |
| `:storage.moderation` | `-` | `Parrhesia.Storage.Adapters.Postgres.Moderation` | Config-file override only |
| `:storage.groups` | `-` | `Parrhesia.Storage.Adapters.Postgres.Groups` | Config-file override only |
| `:storage.admin` | `-` | `Parrhesia.Storage.Adapters.Postgres.Admin` | Config-file override only |
#### `Parrhesia.Repo`
| Atom key | ENV | Default | Notes |
| --- | --- | --- | --- |
| `:url` | `DATABASE_URL` | required | Example: `ecto://USER:PASS@HOST/DATABASE` |
| `:pool_size` | `POOL_SIZE` | `32` | DB connection pool size |
| `:queue_target` | `DB_QUEUE_TARGET_MS` | `1000` | Ecto queue target in ms |
| `:queue_interval` | `DB_QUEUE_INTERVAL_MS` | `5000` | Ecto queue interval in ms |
| `:types` | `-` | `Parrhesia.PostgresTypes` | Internal config-file setting |
#### `Parrhesia.ReadRepo`
| Atom key | ENV | Default | Notes |
| --- | --- | --- | --- |
| `:url` | `DATABASE_URL` | required | Shares the primary DB URL with the write repo |
| `:pool_size` | `DB_READ_POOL_SIZE` | `32` | Read-only query pool size |
| `:queue_target` | `DB_READ_QUEUE_TARGET_MS` | `1000` | Read pool Ecto queue target in ms |
| `:queue_interval` | `DB_READ_QUEUE_INTERVAL_MS` | `5000` | Read pool Ecto queue interval in ms |
| `:types` | `-` | `Parrhesia.PostgresTypes` | Internal config-file setting |
#### `:listeners`
| Atom key | ENV | Default | Notes |
| --- | --- | --- | --- |
| `:public.bind.port` | `PORT` | `4413` | Default public listener port |
| `:public.max_connections` | `PARRHESIA_PUBLIC_MAX_CONNECTIONS` | `20000` | Target total connection ceiling for the public listener |
| `:public.proxy.trusted_cidrs` | `PARRHESIA_TRUSTED_PROXIES` | `[]` | Trusted reverse proxies for forwarded IP handling |
| `:public.features.metrics.*` | `PARRHESIA_METRICS_*` | see below | Convenience runtime overrides for metrics on the public listener |
| `:metrics.bind.port` | `PARRHESIA_METRICS_ENDPOINT_PORT` | `9568` | Optional dedicated metrics listener port |
| `:metrics.max_connections` | `PARRHESIA_METRICS_ENDPOINT_MAX_CONNECTIONS` | `1024` | Target total connection ceiling for the dedicated metrics listener |
| `:metrics.enabled` | `PARRHESIA_METRICS_ENDPOINT_ENABLED` | `false` | Enables the optional dedicated metrics listener |
Listener `max_connections` is a first-class config field. Parrhesia translates it to ThousandIsland's per-acceptor `num_connections` limit based on the active acceptor count. Raw `bandit_options[:thousand_island_options]` can still override that for advanced tuning.
Listener `transport.tls` supports `:disabled`, `:server`, `:mutual`, and `:proxy_terminated`. For TLS-enabled listeners, the main config-file fields are `certfile`, `keyfile`, optional `cacertfile`, optional `cipher_suite`, optional `client_pins`, and `proxy_headers` for proxy-terminated identity.
Every listener supports this config-file schema:
| Atom key | ENV | Default | Notes |
| --- | --- | --- | --- |
| `:id` | `-` | listener key or `:listener` | Listener identifier |
| `:enabled` | public/metrics helpers only | `true` | Whether the listener is started |
| `:bind.ip` | `-` | `0.0.0.0` (`public`) / `127.0.0.1` (`metrics`) | Bind address |
| `:bind.port` | `PORT` / `PARRHESIA_METRICS_ENDPOINT_PORT` | `4413` / `9568` | Bind port |
| `:max_connections` | `PARRHESIA_PUBLIC_MAX_CONNECTIONS` / `PARRHESIA_METRICS_ENDPOINT_MAX_CONNECTIONS` | `20000` / `1024` | Target total listener connection ceiling; accepts integer or `:infinity` in config files |
| `:transport.scheme` | `-` | `:http` | Listener scheme |
| `:transport.tls` | `-` | `%{mode: :disabled}` | TLS mode and TLS-specific options |
| `:proxy.trusted_cidrs` | `PARRHESIA_TRUSTED_PROXIES` on `public` | `[]` | Trusted proxy CIDRs for forwarded identity / IP handling |
| `:proxy.honor_x_forwarded_for` | `-` | `true` | Respect `X-Forwarded-For` from trusted proxies |
| `:network.public` | `-` | `false` | Allow only public networks |
| `:network.private_networks_only` | `-` | `false` | Allow only RFC1918 / local networks |
| `:network.allow_cidrs` | `-` | `[]` | Explicit CIDR allowlist |
| `:network.allow_all` | `-` | `true` | Allow all source IPs |
| `:features.nostr.enabled` | `-` | `true` on `public`, `false` on metrics listener | Enables `/relay` |
| `:features.admin.enabled` | `-` | `true` on `public`, `false` on metrics listener | Enables `/management` |
| `:features.metrics.enabled` | `PARRHESIA_METRICS_ENABLED_ON_MAIN_ENDPOINT` on `public` | `true` on `public`, `true` on metrics listener | Enables `/metrics` |
| `:features.metrics.auth_token` | `PARRHESIA_METRICS_AUTH_TOKEN` | `nil` | Optional bearer token for `/metrics` |
| `:features.metrics.access.public` | `PARRHESIA_METRICS_PUBLIC` | `false` | Allow public-network access to `/metrics` |
| `:features.metrics.access.private_networks_only` | `PARRHESIA_METRICS_PRIVATE_NETWORKS_ONLY` | `true` | Restrict `/metrics` to private networks |
| `:features.metrics.access.allow_cidrs` | `PARRHESIA_METRICS_ALLOWED_CIDRS` | `[]` | Additional CIDR allowlist for `/metrics` |
| `:features.metrics.access.allow_all` | `-` | `true` | Unconditional metrics access in config files |
| `:auth.nip42_required` | `-` | `false` | Require NIP-42 for relay reads / writes |
| `:auth.nip98_required_for_admin` | `PARRHESIA_POLICIES_MANAGEMENT_AUTH_REQUIRED` on `public` | `true` | Require NIP-98 for management API calls |
| `:baseline_acl.read` | `-` | `[]` | Static read deny/allow rules |
| `:baseline_acl.write` | `-` | `[]` | Static write deny/allow rules |
| `:bandit_options` | `-` | `[]` | Advanced Bandit / ThousandIsland passthrough |
#### `:nip66`
| Atom key | ENV | Default | Notes |
| --- | --- | --- | --- |
| `:enabled` | `-` | `true` | Enables the built-in NIP-66 publisher worker |
| `:publish_interval_seconds` | `-` | `900` | Republish cadence for `10166` and `30166` events |
| `:publish_monitor_announcement?` | `-` | `true` | Publish a `10166` monitor announcement alongside discovery events |
| `:timeout_ms` | `-` | `5000` | Probe timeout for websocket and NIP-11 checks |
| `:checks` | `-` | `[:open, :read, :nip11]` | Checks advertised in `10166` and run against each target relay during probing |
| `:targets` | `-` | `[]` | Optional explicit relay targets to probe; when empty, Parrhesia uses `:relay_url` for the `public` listener |
NIP-66 targets are probe sources, not publish destinations. Parrhesia connects to each target relay, collects the configured liveness / discovery data, and stores the resulting signed `10166` / `30166` events in its own local event store so clients can query them here.
#### `:nip43`
| Atom key | ENV | Default | Notes |
| --- | --- | --- | --- |
| `:enabled` | `-` | `true` | Enables the built-in NIP-43 relay access flow and advertises `43` in NIP-11 |
| `:invite_ttl_seconds` | `-` | `900` | Expiration window for generated invite claim strings returned by `REQ` filters targeting kind `28935` |
| `:request_max_age_seconds` | `-` | `300` | Maximum allowed age for inbound join (`28934`) and leave (`28936`) requests |
Parrhesia treats NIP-43 invite requests as synthetic relay output, not stored client input. A `REQ` for kind `28935` causes the relay to generate a fresh relay-signed invite event on the fly. Clients then submit that claim back in a protected kind `28934` join request. When a join or leave request is accepted, Parrhesia updates its local relay membership state and publishes the corresponding relay-signed `8000` / `8001` delta plus the latest `13534` membership snapshot locally.
#### `:limits`
| Atom key | ENV | Default |
| --- | --- | --- |
| `:max_frame_bytes` | `PARRHESIA_LIMITS_MAX_FRAME_BYTES` | `1048576` |
| `:max_event_bytes` | `PARRHESIA_LIMITS_MAX_EVENT_BYTES` | `262144` |
| `:max_filters_per_req` | `PARRHESIA_LIMITS_MAX_FILTERS_PER_REQ` | `16` |
| `:max_filter_limit` | `PARRHESIA_LIMITS_MAX_FILTER_LIMIT` | `500` |
| `:max_tags_per_event` | `PARRHESIA_LIMITS_MAX_TAGS_PER_EVENT` | `256` |
| `:max_tag_values_per_filter` | `PARRHESIA_LIMITS_MAX_TAG_VALUES_PER_FILTER` | `128` |
| `:ip_max_event_ingest_per_window` | `PARRHESIA_LIMITS_IP_MAX_EVENT_INGEST_PER_WINDOW` | `1000` |
| `:ip_event_ingest_window_seconds` | `PARRHESIA_LIMITS_IP_EVENT_INGEST_WINDOW_SECONDS` | `1` |
| `:relay_max_event_ingest_per_window` | `PARRHESIA_LIMITS_RELAY_MAX_EVENT_INGEST_PER_WINDOW` | `10000` |
| `:relay_event_ingest_window_seconds` | `PARRHESIA_LIMITS_RELAY_EVENT_INGEST_WINDOW_SECONDS` | `1` |
| `:max_subscriptions_per_connection` | `PARRHESIA_LIMITS_MAX_SUBSCRIPTIONS_PER_CONNECTION` | `32` |
| `:max_event_future_skew_seconds` | `PARRHESIA_LIMITS_MAX_EVENT_FUTURE_SKEW_SECONDS` | `900` |
| `:max_event_ingest_per_window` | `PARRHESIA_LIMITS_MAX_EVENT_INGEST_PER_WINDOW` | `120` |
| `:event_ingest_window_seconds` | `PARRHESIA_LIMITS_EVENT_INGEST_WINDOW_SECONDS` | `1` |
| `:auth_max_age_seconds` | `PARRHESIA_LIMITS_AUTH_MAX_AGE_SECONDS` | `600` |
| `:websocket_ping_interval_seconds` | `PARRHESIA_LIMITS_WEBSOCKET_PING_INTERVAL_SECONDS` | `30` |
| `:websocket_pong_timeout_seconds` | `PARRHESIA_LIMITS_WEBSOCKET_PONG_TIMEOUT_SECONDS` | `10` |
| `:max_outbound_queue` | `PARRHESIA_LIMITS_MAX_OUTBOUND_QUEUE` | `256` |
| `:outbound_drain_batch_size` | `PARRHESIA_LIMITS_OUTBOUND_DRAIN_BATCH_SIZE` | `64` |
| `:outbound_overflow_strategy` | `PARRHESIA_LIMITS_OUTBOUND_OVERFLOW_STRATEGY` | `:close` |
| `:max_negentropy_payload_bytes` | `PARRHESIA_LIMITS_MAX_NEGENTROPY_PAYLOAD_BYTES` | `4096` |
| `:max_negentropy_sessions_per_connection` | `PARRHESIA_LIMITS_MAX_NEGENTROPY_SESSIONS_PER_CONNECTION` | `8` |
| `:max_negentropy_total_sessions` | `PARRHESIA_LIMITS_MAX_NEGENTROPY_TOTAL_SESSIONS` | `10000` |
| `:max_negentropy_items_per_session` | `PARRHESIA_LIMITS_MAX_NEGENTROPY_ITEMS_PER_SESSION` | `50000` |
| `:negentropy_id_list_threshold` | `PARRHESIA_LIMITS_NEGENTROPY_ID_LIST_THRESHOLD` | `32` |
| `:negentropy_session_idle_timeout_seconds` | `PARRHESIA_LIMITS_NEGENTROPY_SESSION_IDLE_TIMEOUT_SECONDS` | `60` |
| `:negentropy_session_sweep_interval_seconds` | `PARRHESIA_LIMITS_NEGENTROPY_SESSION_SWEEP_INTERVAL_SECONDS` | `10` |
#### `:policies`
| Atom key | ENV | Default |
| --- | --- | --- |
| `:auth_required_for_writes` | `PARRHESIA_POLICIES_AUTH_REQUIRED_FOR_WRITES` | `false` |
| `:auth_required_for_reads` | `PARRHESIA_POLICIES_AUTH_REQUIRED_FOR_READS` | `false` |
| `:min_pow_difficulty` | `PARRHESIA_POLICIES_MIN_POW_DIFFICULTY` | `0` |
| `:accept_ephemeral_events` | `PARRHESIA_POLICIES_ACCEPT_EPHEMERAL_EVENTS` | `true` |
| `:mls_group_event_ttl_seconds` | `PARRHESIA_POLICIES_MLS_GROUP_EVENT_TTL_SECONDS` | `300` |
| `:marmot_require_h_for_group_queries` | `PARRHESIA_POLICIES_MARMOT_REQUIRE_H_FOR_GROUP_QUERIES` | `true` |
| `:marmot_group_max_h_values_per_filter` | `PARRHESIA_POLICIES_MARMOT_GROUP_MAX_H_VALUES_PER_FILTER` | `32` |
| `:marmot_group_max_query_window_seconds` | `PARRHESIA_POLICIES_MARMOT_GROUP_MAX_QUERY_WINDOW_SECONDS` | `2592000` |
| `:marmot_media_max_imeta_tags_per_event` | `PARRHESIA_POLICIES_MARMOT_MEDIA_MAX_IMETA_TAGS_PER_EVENT` | `8` |
| `:marmot_media_max_field_value_bytes` | `PARRHESIA_POLICIES_MARMOT_MEDIA_MAX_FIELD_VALUE_BYTES` | `1024` |
| `:marmot_media_max_url_bytes` | `PARRHESIA_POLICIES_MARMOT_MEDIA_MAX_URL_BYTES` | `2048` |
| `:marmot_media_allowed_mime_prefixes` | `PARRHESIA_POLICIES_MARMOT_MEDIA_ALLOWED_MIME_PREFIXES` | `[]` |
| `:marmot_media_reject_mip04_v1` | `PARRHESIA_POLICIES_MARMOT_MEDIA_REJECT_MIP04_V1` | `true` |
| `:marmot_push_server_pubkeys` | `PARRHESIA_POLICIES_MARMOT_PUSH_SERVER_PUBKEYS` | `[]` |
| `:marmot_push_max_relay_tags` | `PARRHESIA_POLICIES_MARMOT_PUSH_MAX_RELAY_TAGS` | `16` |
| `:marmot_push_max_payload_bytes` | `PARRHESIA_POLICIES_MARMOT_PUSH_MAX_PAYLOAD_BYTES` | `65536` |
| `:marmot_push_max_trigger_age_seconds` | `PARRHESIA_POLICIES_MARMOT_PUSH_MAX_TRIGGER_AGE_SECONDS` | `120` |
| `:marmot_push_require_expiration` | `PARRHESIA_POLICIES_MARMOT_PUSH_REQUIRE_EXPIRATION` | `true` |
| `:marmot_push_max_expiration_window_seconds` | `PARRHESIA_POLICIES_MARMOT_PUSH_MAX_EXPIRATION_WINDOW_SECONDS` | `120` |
| `:marmot_push_max_server_recipients` | `PARRHESIA_POLICIES_MARMOT_PUSH_MAX_SERVER_RECIPIENTS` | `1` |
| `:management_auth_required` | `PARRHESIA_POLICIES_MANAGEMENT_AUTH_REQUIRED` | `true` |
#### Listener-related Metrics Helpers
| Atom key | ENV | Default |
| --- | --- | --- |
| `:public.features.metrics.enabled` | `PARRHESIA_METRICS_ENABLED_ON_MAIN_ENDPOINT` | `true` |
| `:public` | `PARRHESIA_METRICS_PUBLIC` | `false` |
| `:private_networks_only` | `PARRHESIA_METRICS_PRIVATE_NETWORKS_ONLY` | `true` |
| `:allowed_cidrs` | `PARRHESIA_METRICS_ALLOWED_CIDRS` | `[]` |
| `:auth_token` | `PARRHESIA_METRICS_AUTH_TOKEN` | `nil` |
#### `:retention`
| Atom key | ENV | Default | Notes |
| --- | --- | --- | --- |
| `:check_interval_hours` | `PARRHESIA_RETENTION_CHECK_INTERVAL_HOURS` | `24` | Partition maintenance + pruning cadence |
| `:months_ahead` | `PARRHESIA_RETENTION_MONTHS_AHEAD` | `2` | Pre-create current month plus N future monthly partitions for `events` and `event_tags` |
| `:max_db_bytes` | `PARRHESIA_RETENTION_MAX_DB_BYTES` | `:infinity` | Interpreted as GiB threshold; accepts integer or `infinity` |
| `:max_months_to_keep` | `PARRHESIA_RETENTION_MAX_MONTHS_TO_KEEP` | `:infinity` | Keep at most N months (including current month); accepts integer or `infinity` |
| `:max_partitions_to_drop_per_run` | `PARRHESIA_RETENTION_MAX_PARTITIONS_TO_DROP_PER_RUN` | `1` | Safety cap for each maintenance run |
#### `:features`
| Atom key | ENV | Default |
| --- | --- | --- |
| `:verify_event_signatures` | `-` | `true` |
| `:nip_45_count` | `PARRHESIA_FEATURES_NIP_45_COUNT` | `true` |
| `:nip_50_search` | `PARRHESIA_FEATURES_NIP_50_SEARCH` | `true` |
| `:nip_77_negentropy` | `PARRHESIA_FEATURES_NIP_77_NEGENTROPY` | `true` |
| `:marmot_push_notifications` | `PARRHESIA_FEATURES_MARMOT_PUSH_NOTIFICATIONS` | `false` |
`:verify_event_signatures` is config-file only. Production releases always verify event signatures.
#### Extra runtime config
| Atom key | ENV | Default | Notes |
| --- | --- | --- | --- |
| extra runtime config file | `PARRHESIA_EXTRA_CONFIG` | unset | Imports an additional runtime `.exs` file |
--- ---
## Deploy ## Deploy
@@ -150,24 +474,179 @@ export POOL_SIZE=20
mix deps.get --only prod mix deps.get --only prod
mix compile mix compile
mix ecto.migrate
mix release mix release
_build/prod/rel/parrhesia/bin/parrhesia foreground _build/prod/rel/parrhesia/bin/parrhesia eval "Parrhesia.Release.migrate()"
_build/prod/rel/parrhesia/bin/parrhesia start
``` ```
For systemd/process managers, run the release command in foreground mode. For systemd/process managers, run the release command with `start`.
### Option B: Nix package (`default.nix`) ### Option B: Nix release package (`default.nix`)
Build: Build:
```bash ```bash
nix-build nix build
``` ```
Run the built release from `./result/bin/parrhesia` (release command interface). Run the built release from `./result/bin/parrhesia` (release command interface).
### Option C: Docker image via Nix flake
Build the image tarball:
```bash
nix build .#dockerImage
# or with explicit build target:
nix build .#packages.x86_64-linux.dockerImage
```
Load it into Docker:
```bash
docker load < result
```
Run database migrations:
```bash
docker run --rm \
-e DATABASE_URL="ecto://USER:PASS@HOST/parrhesia_prod" \
parrhesia:latest \
eval "Parrhesia.Release.migrate()"
```
Start the relay:
```bash
docker run --rm \
-p 4413:4413 \
-e DATABASE_URL="ecto://USER:PASS@HOST/parrhesia_prod" \
-e POOL_SIZE=20 \
parrhesia:latest
```
### Option D: Docker Compose with PostgreSQL
The repo includes [`compose.yaml`](./compose.yaml) and [`.env.example`](./.env.example) so Docker users can run Postgres and Parrhesia together.
Set up the environment file:
```bash
cp .env.example .env
```
If you are building locally from source, build and load the image first:
```bash
nix build .#dockerImage
docker load < result
```
Then start the stack:
```bash
docker compose up -d db
docker compose run --rm migrate
docker compose up -d parrhesia
```
The relay will be available on:
```text
ws://localhost:4413/relay
```
Notes:
- `compose.yaml` keeps PostgreSQL in a separate container; the Parrhesia image only runs the app release.
- The container listens on port `4413`; use `PARRHESIA_HOST_PORT` if you want a different published host port.
- Migrations are run explicitly through the one-shot `migrate` service instead of on every app boot.
- Common runtime overrides can go straight into `.env`; see [`.env.example`](./.env.example) for examples.
- For more specialized overrides, mount a file and set `PARRHESIA_EXTRA_CONFIG=/path/in/container/runtime.exs`.
- When a GHCR image is published, set `PARRHESIA_IMAGE=ghcr.io/<owner>/parrhesia:<tag>` in `.env` and reuse the same compose flow.
---
## Benchmark
The benchmark compares two Parrhesia profiles, one backed by PostgreSQL and one backed by the in-memory adapter, against [`strfry`](https://github.com/hoytech/strfry) and [`nostr-rs-relay`](https://sr.ht/~gheartsfield/nostr-rs-relay/) using [`nostr-bench`](https://github.com/rnostr/nostr-bench). The cloud benchmark target set also includes [`nostream`](https://github.com/Cameri/nostream) and [`Haven`](https://github.com/bitvora/haven). Benchmark runs also lift Parrhesia's relay-side limits by default so the benchmark client, not server guardrails, is the main bottleneck.
`just bench compare` is a sequential mixed-workload benchmark, not an isolated per-endpoint microbenchmark. Each relay instance runs `connect`, then `echo`, then `event`, then `req` against the same live process, so later phases measure against state and load created by earlier phases.
Run it with:
```bash
just bench compare
```
### Cloud benchmark (Hetzner Cloud)
For distributed runs (one server node + multiple client nodes), use:
```bash
just bench cloud
# or: ./scripts/run_bench_cloud.sh
```
or invoke the orchestrator directly:
```bash
node scripts/cloud_bench_orchestrate.mjs
```
Prerequisites:
- [`hcloud`](https://github.com/hetznercloud/cli) CLI installed
- Hetzner Cloud token exported as `HCLOUD_TOKEN`
- local `docker`, `git`, `ssh`, and `scp` available
Example:
```bash
export HCLOUD_TOKEN=...
just bench cloud-quick
# or: ./scripts/run_bench_cloud.sh --quick
```
Outputs:
- raw client logs per run: `bench/cloud_artifacts/<run_id>/...`
- JSONL history entries (local + cloud): `bench/history.jsonl`
Useful history/render commands:
```bash
# List available machines and runs in history
just bench list
# Regenerate chart + README table for a machine
just bench update <machine_id>
# Regenerate from all machines
just bench update all
```
Current comparison results:
| metric | parrhesia-pg | parrhesia-mem | strfry | nostr-rs-relay | mem/pg | strfry/pg | nostr-rs-relay/pg |
| --- | ---: | ---: | ---: | ---: | ---: | ---: | ---: |
| connect avg latency (ms) ↓ | 34.67 | 43.33 | 2.67 | 2.67 | 1.25x | **0.08x** | **0.08x** |
| connect max latency (ms) ↓ | 61.67 | 74.67 | 4.67 | 4.00 | 1.21x | **0.08x** | **0.06x** |
| echo throughput (TPS) ↑ | 72441.00 | 62704.67 | 61189.33 | 152654.33 | 0.87x | 0.84x | **2.11x** |
| echo throughput (MiB/s) ↑ | 39.67 | 34.30 | 34.20 | 83.63 | 0.86x | 0.86x | **2.11x** |
| event throughput (TPS) ↑ | 1897.33 | 1370.00 | 3426.67 | 772.67 | 0.72x | **1.81x** | 0.41x |
| event throughput (MiB/s) ↑ | 1.23 | 0.87 | 2.20 | 0.50 | 0.70x | **1.78x** | 0.41x |
| req throughput (TPS) ↑ | 13.33 | 47.00 | 1811.33 | 878.33 | **3.52x** | **135.85x** | **65.88x** |
| req throughput (MiB/s) ↑ | 0.03 | 0.17 | 11.77 | 2.40 | **5.00x** | **353.00x** | **72.00x** |
Higher is better for `↑` metrics. Lower is better for `↓` metrics.
(Results from a Linux container on a 6-core Intel i5-8400T with NVMe drive, PostgreSQL 18)
![Benchmark chart](./bench/chart.svg)
--- ---
## Development quality checks ## Development quality checks
@@ -178,14 +657,16 @@ Before opening a PR:
mix precommit mix precommit
``` ```
For external CLI end-to-end checks with `nak`: Additional external CLI end-to-end checks with `nak`:
```bash ```bash
mix test.nak_e2e just e2e nak
``` ```
For Marmot client end-to-end checks (TypeScript/Node suite using `marmot-ts`): For Marmot client end-to-end checks (TypeScript/Node suite using `marmot-ts`, included in `precommit`):
```bash ```bash
mix test.marmot_e2e just e2e marmot
```
``` ```

31
bench/chart.gnuplot Normal file
View File

@@ -0,0 +1,31 @@
# bench/chart.gnuplot — multi-panel SVG showing relay performance over git tags.
#
# Invoked by scripts/run_bench_update.sh with:
# gnuplot -e "data_dir='...'" -e "output_file='...'" bench/chart.gnuplot
#
# The data_dir contains per-metric TSV files and a plot_commands.gnuplot
# fragment generated by the data-prep step that defines the actual plot
# directives (handling variable server columns).
set terminal svg enhanced size 1200,900 font "sans-serif,11" background "#f3f4f6"
set output output_file
set style data linespoints
set key outside right top
set grid ytics
set xtics rotate by -30
set datafile separator "\t"
# parrhesia-pg: blue solid, parrhesia-memory: green solid
# strfry: orange dashed, nostr-rs-relay: red dashed
set linetype 1 lc rgb "#2563eb" lw 2 pt 7 ps 1.0
set linetype 2 lc rgb "#16a34a" lw 2 pt 9 ps 1.0
set linetype 3 lc rgb "#ea580c" lw 1.5 pt 5 ps 0.8 dt 2
set linetype 4 lc rgb "#dc2626" lw 1.5 pt 4 ps 0.8 dt 2
set multiplot layout 2,2 title "Parrhesia Relay Benchmark History" font ",14"
# Load dynamically generated plot commands (handles variable column counts)
load data_dir."/plot_commands.gnuplot"
unset multiplot

953
bench/chart.svg Normal file
View File

@@ -0,0 +1,953 @@
<?xml version="1.0" encoding="utf-8" standalone="no"?>
<svg
width="1200" height="900"
viewBox="0 0 1200 900"
xmlns="http://www.w3.org/2000/svg"
xmlns:xlink="http://www.w3.org/1999/xlink"
>
<title>Gnuplot</title>
<desc>Produced by GNUPLOT 6.0 patchlevel 4 </desc>
<g id="gnuplot_canvas">
<rect x="0" y="0" width="1200" height="900" fill="#f3f4f6"/>
<defs>
<circle id='gpDot' r='0.5' stroke-width='0.5' stroke='currentColor'/>
<path id='gpPt0' stroke-width='0.242' stroke='currentColor' d='M-1,0 h2 M0,-1 v2'/>
<path id='gpPt1' stroke-width='0.242' stroke='currentColor' d='M-1,-1 L1,1 M1,-1 L-1,1'/>
<path id='gpPt2' stroke-width='0.242' stroke='currentColor' d='M-1,0 L1,0 M0,-1 L0,1 M-1,-1 L1,1 M-1,1 L1,-1'/>
<rect id='gpPt3' stroke-width='0.242' stroke='currentColor' x='-1' y='-1' width='2' height='2'/>
<rect id='gpPt4' stroke-width='0.242' stroke='currentColor' fill='currentColor' x='-1' y='-1' width='2' height='2'/>
<circle id='gpPt5' stroke-width='0.242' stroke='currentColor' cx='0' cy='0' r='1'/>
<use xlink:href='#gpPt5' id='gpPt6' fill='currentColor' stroke='none'/>
<path id='gpPt7' stroke-width='0.242' stroke='currentColor' d='M0,-1.33 L-1.33,0.67 L1.33,0.67 z'/>
<use xlink:href='#gpPt7' id='gpPt8' fill='currentColor' stroke='none'/>
<use xlink:href='#gpPt7' id='gpPt9' stroke='currentColor' transform='rotate(180)'/>
<use xlink:href='#gpPt9' id='gpPt10' fill='currentColor' stroke='none'/>
<use xlink:href='#gpPt3' id='gpPt11' stroke='currentColor' transform='rotate(45)'/>
<use xlink:href='#gpPt11' id='gpPt12' fill='currentColor' stroke='none'/>
<path id='gpPt13' stroke-width='0.242' stroke='currentColor' d='M0,1.330 L1.265,0.411 L0.782,-1.067 L-0.782,-1.076 L-1.265,0.411 z'/>
<use xlink:href='#gpPt13' id='gpPt14' fill='currentColor' stroke='none'/>
<filter id='textbox' filterUnits='objectBoundingBox' x='0' y='0' height='1' width='1'>
<feFlood flood-color='#F3F4F6' flood-opacity='1' result='bgnd'/>
<feComposite in='SourceGraphic' in2='bgnd' operator='atop'/>
</filter>
<filter id='greybox' filterUnits='objectBoundingBox' x='0' y='0' height='1' width='1'>
<feFlood flood-color='lightgrey' flood-opacity='1' result='grey'/>
<feComposite in='SourceGraphic' in2='grey' operator='atop'/>
</filter>
</defs>
<g fill="none" color="#F3F4F6" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
<g transform="translate(600.00,21.05)" stroke="none" fill="black" font-family="sans-serif" font-size="14.00" text-anchor="middle">
<text><tspan font-family="sans-serif" >Parrhesia Relay Benchmark History</tspan></text>
</g>
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M74.17,420.94 L399.49,420.94 '/></g>
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='black' d='M74.17,420.94 L82.42,420.94 M399.49,420.94 L391.24,420.94 '/> <g transform="translate(66.48,424.52)" stroke="none" fill="black" font-family="sans-serif" font-size="11.00" text-anchor="end">
<text><tspan font-family="sans-serif" > 500</tspan></text>
</g>
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M74.17,370.88 L399.49,370.88 '/></g>
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='black' d='M74.17,370.88 L82.42,370.88 M399.49,370.88 L391.24,370.88 '/> <g transform="translate(66.48,374.46)" stroke="none" fill="black" font-family="sans-serif" font-size="11.00" text-anchor="end">
<text><tspan font-family="sans-serif" > 1000</tspan></text>
</g>
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M74.17,320.82 L399.49,320.82 '/></g>
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='black' d='M74.17,320.82 L82.42,320.82 M399.49,320.82 L391.24,320.82 '/> <g transform="translate(66.48,324.40)" stroke="none" fill="black" font-family="sans-serif" font-size="11.00" text-anchor="end">
<text><tspan font-family="sans-serif" > 1500</tspan></text>
</g>
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M74.17,270.76 L399.49,270.76 '/></g>
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='black' d='M74.17,270.76 L82.42,270.76 M399.49,270.76 L391.24,270.76 '/> <g transform="translate(66.48,274.34)" stroke="none" fill="black" font-family="sans-serif" font-size="11.00" text-anchor="end">
<text><tspan font-family="sans-serif" > 2000</tspan></text>
</g>
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M74.17,220.69 L399.49,220.69 '/></g>
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='black' d='M74.17,220.69 L82.42,220.69 M399.49,220.69 L391.24,220.69 '/> <g transform="translate(66.48,224.27)" stroke="none" fill="black" font-family="sans-serif" font-size="11.00" text-anchor="end">
<text><tspan font-family="sans-serif" > 2500</tspan></text>
</g>
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M74.17,170.63 L399.49,170.63 '/></g>
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='black' d='M74.17,170.63 L82.42,170.63 M399.49,170.63 L391.24,170.63 '/> <g transform="translate(66.48,174.21)" stroke="none" fill="black" font-family="sans-serif" font-size="11.00" text-anchor="end">
<text><tspan font-family="sans-serif" > 3000</tspan></text>
</g>
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M74.17,120.57 L399.49,120.57 '/></g>
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='black' d='M74.17,120.57 L82.42,120.57 M399.49,120.57 L391.24,120.57 '/> <g transform="translate(66.48,124.15)" stroke="none" fill="black" font-family="sans-serif" font-size="11.00" text-anchor="end">
<text><tspan font-family="sans-serif" > 3500</tspan></text>
</g>
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M74.17,70.51 L399.49,70.51 '/></g>
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='black' d='M74.17,70.51 L82.42,70.51 M399.49,70.51 L391.24,70.51 '/> <g transform="translate(66.48,74.09)" stroke="none" fill="black" font-family="sans-serif" font-size="11.00" text-anchor="end">
<text><tspan font-family="sans-serif" > 4000</tspan></text>
</g>
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='black' d='M74.17,420.94 L74.17,412.69 M74.17,70.51 L74.17,78.76 '/> <g transform="translate(72.38,431.73) rotate(30.00)" stroke="none" fill="black" font-family="sans-serif" font-size="11.00" text-anchor="start">
<text><tspan font-family="sans-serif" >v0.2.0</tspan></text>
</g>
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='black' d='M155.50,420.94 L155.50,412.69 M155.50,70.51 L155.50,78.76 '/> <g transform="translate(153.71,431.73) rotate(30.00)" stroke="none" fill="black" font-family="sans-serif" font-size="11.00" text-anchor="start">
<text><tspan font-family="sans-serif" >v0.3.0</tspan></text>
</g>
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='black' d='M236.83,420.94 L236.83,412.69 M236.83,70.51 L236.83,78.76 '/> <g transform="translate(235.04,431.73) rotate(30.00)" stroke="none" fill="black" font-family="sans-serif" font-size="11.00" text-anchor="start">
<text><tspan font-family="sans-serif" >v0.4.0</tspan></text>
</g>
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='black' d='M318.16,420.94 L318.16,412.69 M318.16,70.51 L318.16,78.76 '/> <g transform="translate(316.37,431.73) rotate(30.00)" stroke="none" fill="black" font-family="sans-serif" font-size="11.00" text-anchor="start">
<text><tspan font-family="sans-serif" >v0.5.0</tspan></text>
</g>
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='black' d='M399.49,420.94 L399.49,412.69 M399.49,70.51 L399.49,78.76 '/> <g transform="translate(397.70,431.73) rotate(30.00)" stroke="none" fill="black" font-family="sans-serif" font-size="11.00" text-anchor="start">
<text><tspan font-family="sans-serif" >v0.6.0</tspan></text>
</g>
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='black' d='M74.17,70.51 L74.17,420.94 L399.49,420.94 L399.49,70.51 L74.17,70.51 Z '/></g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g id="gnuplot_plot_1a" ><title>parrhesia-pg</title>
<g fill="none" color="#F3F4F6" stroke="black" stroke-width="2.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="2.00" stroke-linecap="butt" stroke-linejoin="miter">
<g transform="translate(537.91,82.34)" stroke="none" fill="black" font-family="sans-serif" font-size="11.00" text-anchor="end">
<text><tspan font-family="sans-serif" >parrhesia-pg</tspan></text>
</g>
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="2.00" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='rgb( 37, 99, 235)' d='M545.60,78.76 L584.61,78.76 M74.17,310.54 L155.50,273.69 L236.83,276.90 L318.16,281.03 L399.49,306.07 '/> <use xlink:href='#gpPt6' transform='translate(74.17,310.54) scale(4.12)' color='rgb( 37, 99, 235)'/>
<use xlink:href='#gpPt6' transform='translate(155.50,273.69) scale(4.12)' color='rgb( 37, 99, 235)'/>
<use xlink:href='#gpPt6' transform='translate(236.83,276.90) scale(4.12)' color='rgb( 37, 99, 235)'/>
<use xlink:href='#gpPt6' transform='translate(318.16,281.03) scale(4.12)' color='rgb( 37, 99, 235)'/>
<use xlink:href='#gpPt6' transform='translate(399.49,306.07) scale(4.12)' color='rgb( 37, 99, 235)'/>
<use xlink:href='#gpPt6' transform='translate(565.10,78.76) scale(4.12)' color='rgb( 37, 99, 235)'/>
</g>
</g>
<g id="gnuplot_plot_2a" ><title>parrhesia-memory</title>
<g fill="none" color="black" stroke="currentColor" stroke-width="2.00" stroke-linecap="butt" stroke-linejoin="miter">
<g transform="translate(537.91,98.84)" stroke="none" fill="black" font-family="sans-serif" font-size="11.00" text-anchor="end">
<text><tspan font-family="sans-serif" >parrhesia-memory</tspan></text>
</g>
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="2.00" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='rgb( 22, 163, 74)' d='M545.60,95.26 L584.61,95.26 M74.17,312.97 L155.50,275.49 L236.83,313.51 L318.16,333.83 L399.49,382.69 '/> <use xlink:href='#gpPt8' transform='translate(74.17,312.97) scale(4.12)' color='rgb( 22, 163, 74)'/>
<use xlink:href='#gpPt8' transform='translate(155.50,275.49) scale(4.12)' color='rgb( 22, 163, 74)'/>
<use xlink:href='#gpPt8' transform='translate(236.83,313.51) scale(4.12)' color='rgb( 22, 163, 74)'/>
<use xlink:href='#gpPt8' transform='translate(318.16,333.83) scale(4.12)' color='rgb( 22, 163, 74)'/>
<use xlink:href='#gpPt8' transform='translate(399.49,382.69) scale(4.12)' color='rgb( 22, 163, 74)'/>
<use xlink:href='#gpPt8' transform='translate(565.10,95.26) scale(4.12)' color='rgb( 22, 163, 74)'/>
</g>
</g>
<g id="gnuplot_plot_3a" ><title>strfry</title>
<g fill="none" color="#F3F4F6" stroke="rgb( 22, 163, 74)" stroke-width="1.50" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.50" stroke-linecap="butt" stroke-linejoin="miter">
<g transform="translate(537.91,115.34)" stroke="none" fill="black" font-family="sans-serif" font-size="11.00" text-anchor="end">
<text><tspan font-family="sans-serif" >strfry</tspan></text>
</g>
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.50" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='rgb(234, 88, 12)' stroke-dasharray='3.8,6.0' d='M545.60,111.76 L584.61,111.76 M74.17,122.84 L155.50,126.38 L236.83,132.52 L318.16,127.91 L399.49,115.73 '/> <use xlink:href='#gpPt4' transform='translate(74.17,122.84) scale(3.30)' color='rgb(234, 88, 12)'/>
<use xlink:href='#gpPt4' transform='translate(155.50,126.38) scale(3.30)' color='rgb(234, 88, 12)'/>
<use xlink:href='#gpPt4' transform='translate(236.83,132.52) scale(3.30)' color='rgb(234, 88, 12)'/>
<use xlink:href='#gpPt4' transform='translate(318.16,127.91) scale(3.30)' color='rgb(234, 88, 12)'/>
<use xlink:href='#gpPt4' transform='translate(399.49,115.73) scale(3.30)' color='rgb(234, 88, 12)'/>
<use xlink:href='#gpPt4' transform='translate(565.10,111.76) scale(3.30)' color='rgb(234, 88, 12)'/>
</g>
</g>
<g id="gnuplot_plot_4a" ><title>nostr-rs-relay</title>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.50" stroke-linecap="butt" stroke-linejoin="miter">
<g transform="translate(537.91,131.84)" stroke="none" fill="black" font-family="sans-serif" font-size="11.00" text-anchor="end">
<text><tspan font-family="sans-serif" >nostr-rs-relay</tspan></text>
</g>
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.50" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='rgb(220, 38, 38)' stroke-dasharray='3.8,6.0' d='M545.60,128.26 L584.61,128.26 M74.17,394.71 L155.50,394.74 L236.83,394.11 L318.16,393.64 L399.49,392.20 '/> <use xlink:href='#gpPt3' transform='translate(74.17,394.71) scale(3.30)' color='rgb(220, 38, 38)'/>
<use xlink:href='#gpPt3' transform='translate(155.50,394.74) scale(3.30)' color='rgb(220, 38, 38)'/>
<use xlink:href='#gpPt3' transform='translate(236.83,394.11) scale(3.30)' color='rgb(220, 38, 38)'/>
<use xlink:href='#gpPt3' transform='translate(318.16,393.64) scale(3.30)' color='rgb(220, 38, 38)'/>
<use xlink:href='#gpPt3' transform='translate(399.49,392.20) scale(3.30)' color='rgb(220, 38, 38)'/>
<use xlink:href='#gpPt3' transform='translate(565.10,128.26) scale(3.30)' color='rgb(220, 38, 38)'/>
</g>
</g>
<g fill="none" color="#F3F4F6" stroke="rgb(220, 38, 38)" stroke-width="2.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="2.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="black" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='black' d='M74.17,70.51 L74.17,420.94 L399.49,420.94 L399.49,70.51 L74.17,70.51 Z '/> <g transform="translate(17.58,245.73) rotate(270.00)" stroke="none" fill="black" font-family="sans-serif" font-size="11.00" text-anchor="middle">
<text><tspan font-family="sans-serif" >TPS</tspan></text>
</g>
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
<g transform="translate(236.83,49.34)" stroke="none" fill="black" font-family="sans-serif" font-size="11.00" text-anchor="middle">
<text><tspan font-family="sans-serif" >Event Throughput (TPS) — higher is better</tspan></text>
</g>
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M674.17,420.94 L999.49,420.94 '/></g>
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='black' d='M674.17,420.94 L682.42,420.94 M999.49,420.94 L991.24,420.94 '/> <g transform="translate(666.48,424.52)" stroke="none" fill="black" font-family="sans-serif" font-size="11.00" text-anchor="end">
<text><tspan font-family="sans-serif" > 0</tspan></text>
</g>
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M674.17,370.88 L999.49,370.88 '/></g>
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='black' d='M674.17,370.88 L682.42,370.88 M999.49,370.88 L991.24,370.88 '/> <g transform="translate(666.48,374.46)" stroke="none" fill="black" font-family="sans-serif" font-size="11.00" text-anchor="end">
<text><tspan font-family="sans-serif" > 1000</tspan></text>
</g>
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M674.17,320.82 L999.49,320.82 '/></g>
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='black' d='M674.17,320.82 L682.42,320.82 M999.49,320.82 L991.24,320.82 '/> <g transform="translate(666.48,324.40)" stroke="none" fill="black" font-family="sans-serif" font-size="11.00" text-anchor="end">
<text><tspan font-family="sans-serif" > 2000</tspan></text>
</g>
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M674.17,270.76 L999.49,270.76 '/></g>
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='black' d='M674.17,270.76 L682.42,270.76 M999.49,270.76 L991.24,270.76 '/> <g transform="translate(666.48,274.34)" stroke="none" fill="black" font-family="sans-serif" font-size="11.00" text-anchor="end">
<text><tspan font-family="sans-serif" > 3000</tspan></text>
</g>
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M674.17,220.69 L999.49,220.69 '/></g>
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='black' d='M674.17,220.69 L682.42,220.69 M999.49,220.69 L991.24,220.69 '/> <g transform="translate(666.48,224.27)" stroke="none" fill="black" font-family="sans-serif" font-size="11.00" text-anchor="end">
<text><tspan font-family="sans-serif" > 4000</tspan></text>
</g>
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M674.17,170.63 L999.49,170.63 '/></g>
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='black' d='M674.17,170.63 L682.42,170.63 M999.49,170.63 L991.24,170.63 '/> <g transform="translate(666.48,174.21)" stroke="none" fill="black" font-family="sans-serif" font-size="11.00" text-anchor="end">
<text><tspan font-family="sans-serif" > 5000</tspan></text>
</g>
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M674.17,120.57 L999.49,120.57 '/></g>
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='black' d='M674.17,120.57 L682.42,120.57 M999.49,120.57 L991.24,120.57 '/> <g transform="translate(666.48,124.15)" stroke="none" fill="black" font-family="sans-serif" font-size="11.00" text-anchor="end">
<text><tspan font-family="sans-serif" > 6000</tspan></text>
</g>
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M674.17,70.51 L999.49,70.51 '/></g>
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='black' d='M674.17,70.51 L682.42,70.51 M999.49,70.51 L991.24,70.51 '/> <g transform="translate(666.48,74.09)" stroke="none" fill="black" font-family="sans-serif" font-size="11.00" text-anchor="end">
<text><tspan font-family="sans-serif" > 7000</tspan></text>
</g>
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='black' d='M674.17,420.94 L674.17,412.69 M674.17,70.51 L674.17,78.76 '/> <g transform="translate(672.38,431.73) rotate(30.00)" stroke="none" fill="black" font-family="sans-serif" font-size="11.00" text-anchor="start">
<text><tspan font-family="sans-serif" >v0.2.0</tspan></text>
</g>
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='black' d='M755.50,420.94 L755.50,412.69 M755.50,70.51 L755.50,78.76 '/> <g transform="translate(753.71,431.73) rotate(30.00)" stroke="none" fill="black" font-family="sans-serif" font-size="11.00" text-anchor="start">
<text><tspan font-family="sans-serif" >v0.3.0</tspan></text>
</g>
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='black' d='M836.83,420.94 L836.83,412.69 M836.83,70.51 L836.83,78.76 '/> <g transform="translate(835.04,431.73) rotate(30.00)" stroke="none" fill="black" font-family="sans-serif" font-size="11.00" text-anchor="start">
<text><tspan font-family="sans-serif" >v0.4.0</tspan></text>
</g>
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='black' d='M918.16,420.94 L918.16,412.69 M918.16,70.51 L918.16,78.76 '/> <g transform="translate(916.37,431.73) rotate(30.00)" stroke="none" fill="black" font-family="sans-serif" font-size="11.00" text-anchor="start">
<text><tspan font-family="sans-serif" >v0.5.0</tspan></text>
</g>
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='black' d='M999.49,420.94 L999.49,412.69 M999.49,70.51 L999.49,78.76 '/> <g transform="translate(997.70,431.73) rotate(30.00)" stroke="none" fill="black" font-family="sans-serif" font-size="11.00" text-anchor="start">
<text><tspan font-family="sans-serif" >v0.6.0</tspan></text>
</g>
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='black' d='M674.17,70.51 L674.17,420.94 L999.49,420.94 L999.49,70.51 L674.17,70.51 Z '/></g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g id="gnuplot_plot_1b" ><title>parrhesia-pg</title>
<g fill="none" color="#F3F4F6" stroke="black" stroke-width="2.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="2.00" stroke-linecap="butt" stroke-linejoin="miter">
<g transform="translate(1137.91,82.34)" stroke="none" fill="black" font-family="sans-serif" font-size="11.00" text-anchor="end">
<text><tspan font-family="sans-serif" >parrhesia-pg</tspan></text>
</g>
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="2.00" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='rgb( 37, 99, 235)' d='M1145.60,78.76 L1184.61,78.76 M674.17,299.89 L755.50,240.02 L836.83,189.67 L918.16,420.27 L999.49,241.89 '/> <use xlink:href='#gpPt6' transform='translate(674.17,299.89) scale(4.12)' color='rgb( 37, 99, 235)'/>
<use xlink:href='#gpPt6' transform='translate(755.50,240.02) scale(4.12)' color='rgb( 37, 99, 235)'/>
<use xlink:href='#gpPt6' transform='translate(836.83,189.67) scale(4.12)' color='rgb( 37, 99, 235)'/>
<use xlink:href='#gpPt6' transform='translate(918.16,420.27) scale(4.12)' color='rgb( 37, 99, 235)'/>
<use xlink:href='#gpPt6' transform='translate(999.49,241.89) scale(4.12)' color='rgb( 37, 99, 235)'/>
<use xlink:href='#gpPt6' transform='translate(1165.10,78.76) scale(4.12)' color='rgb( 37, 99, 235)'/>
</g>
</g>
<g id="gnuplot_plot_2b" ><title>parrhesia-memory</title>
<g fill="none" color="black" stroke="currentColor" stroke-width="2.00" stroke-linecap="butt" stroke-linejoin="miter">
<g transform="translate(1137.91,98.84)" stroke="none" fill="black" font-family="sans-serif" font-size="11.00" text-anchor="end">
<text><tspan font-family="sans-serif" >parrhesia-memory</tspan></text>
</g>
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="2.00" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='rgb( 22, 163, 74)' d='M1145.60,95.26 L1184.61,95.26 M674.17,299.22 L755.50,239.92 L836.83,182.25 L918.16,418.59 L999.49,76.12 '/> <use xlink:href='#gpPt8' transform='translate(674.17,299.22) scale(4.12)' color='rgb( 22, 163, 74)'/>
<use xlink:href='#gpPt8' transform='translate(755.50,239.92) scale(4.12)' color='rgb( 22, 163, 74)'/>
<use xlink:href='#gpPt8' transform='translate(836.83,182.25) scale(4.12)' color='rgb( 22, 163, 74)'/>
<use xlink:href='#gpPt8' transform='translate(918.16,418.59) scale(4.12)' color='rgb( 22, 163, 74)'/>
<use xlink:href='#gpPt8' transform='translate(999.49,76.12) scale(4.12)' color='rgb( 22, 163, 74)'/>
<use xlink:href='#gpPt8' transform='translate(1165.10,95.26) scale(4.12)' color='rgb( 22, 163, 74)'/>
</g>
</g>
<g id="gnuplot_plot_3b" ><title>strfry</title>
<g fill="none" color="#F3F4F6" stroke="rgb( 22, 163, 74)" stroke-width="1.50" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.50" stroke-linecap="butt" stroke-linejoin="miter">
<g transform="translate(1137.91,115.34)" stroke="none" fill="black" font-family="sans-serif" font-size="11.00" text-anchor="end">
<text><tspan font-family="sans-serif" >strfry</tspan></text>
</g>
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.50" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='rgb(234, 88, 12)' stroke-dasharray='3.8,6.0' d='M1145.60,111.76 L1184.61,111.76 M674.17,330.63 L755.50,330.63 L836.83,329.81 L918.16,330.26 L999.49,330.43 '/> <use xlink:href='#gpPt4' transform='translate(674.17,330.63) scale(3.30)' color='rgb(234, 88, 12)'/>
<use xlink:href='#gpPt4' transform='translate(755.50,330.63) scale(3.30)' color='rgb(234, 88, 12)'/>
<use xlink:href='#gpPt4' transform='translate(836.83,329.81) scale(3.30)' color='rgb(234, 88, 12)'/>
<use xlink:href='#gpPt4' transform='translate(918.16,330.26) scale(3.30)' color='rgb(234, 88, 12)'/>
<use xlink:href='#gpPt4' transform='translate(999.49,330.43) scale(3.30)' color='rgb(234, 88, 12)'/>
<use xlink:href='#gpPt4' transform='translate(1165.10,111.76) scale(3.30)' color='rgb(234, 88, 12)'/>
</g>
</g>
<g id="gnuplot_plot_4b" ><title>nostr-rs-relay</title>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.50" stroke-linecap="butt" stroke-linejoin="miter">
<g transform="translate(1137.91,131.84)" stroke="none" fill="black" font-family="sans-serif" font-size="11.00" text-anchor="end">
<text><tspan font-family="sans-serif" >nostr-rs-relay</tspan></text>
</g>
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.50" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='rgb(220, 38, 38)' stroke-dasharray='3.8,6.0' d='M1145.60,128.26 L1184.61,128.26 M674.17,379.34 L755.50,378.57 L836.83,378.52 L918.16,376.97 L999.49,377.85 '/> <use xlink:href='#gpPt3' transform='translate(674.17,379.34) scale(3.30)' color='rgb(220, 38, 38)'/>
<use xlink:href='#gpPt3' transform='translate(755.50,378.57) scale(3.30)' color='rgb(220, 38, 38)'/>
<use xlink:href='#gpPt3' transform='translate(836.83,378.52) scale(3.30)' color='rgb(220, 38, 38)'/>
<use xlink:href='#gpPt3' transform='translate(918.16,376.97) scale(3.30)' color='rgb(220, 38, 38)'/>
<use xlink:href='#gpPt3' transform='translate(999.49,377.85) scale(3.30)' color='rgb(220, 38, 38)'/>
<use xlink:href='#gpPt3' transform='translate(1165.10,128.26) scale(3.30)' color='rgb(220, 38, 38)'/>
</g>
</g>
<g fill="none" color="#F3F4F6" stroke="rgb(220, 38, 38)" stroke-width="2.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="2.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="black" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='black' d='M674.17,70.51 L674.17,420.94 L999.49,420.94 L999.49,70.51 L674.17,70.51 Z '/> <g transform="translate(617.58,245.73) rotate(270.00)" stroke="none" fill="black" font-family="sans-serif" font-size="11.00" text-anchor="middle">
<text><tspan font-family="sans-serif" >TPS</tspan></text>
</g>
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
<g transform="translate(836.83,49.34)" stroke="none" fill="black" font-family="sans-serif" font-size="11.00" text-anchor="middle">
<text><tspan font-family="sans-serif" >Req Throughput (TPS) — higher is better</tspan></text>
</g>
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M89.55,860.44 L399.49,860.44 '/></g>
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='black' d='M89.55,860.44 L97.80,860.44 M399.49,860.44 L391.24,860.44 '/> <g transform="translate(81.86,864.02)" stroke="none" fill="black" font-family="sans-serif" font-size="11.00" text-anchor="end">
<text><tspan font-family="sans-serif" > 40000</tspan></text>
</g>
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M89.55,810.38 L399.49,810.38 '/></g>
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='black' d='M89.55,810.38 L97.80,810.38 M399.49,810.38 L391.24,810.38 '/> <g transform="translate(81.86,813.96)" stroke="none" fill="black" font-family="sans-serif" font-size="11.00" text-anchor="end">
<text><tspan font-family="sans-serif" > 60000</tspan></text>
</g>
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M89.55,760.31 L399.49,760.31 '/></g>
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='black' d='M89.55,760.31 L97.80,760.31 M399.49,760.31 L391.24,760.31 '/> <g transform="translate(81.86,763.89)" stroke="none" fill="black" font-family="sans-serif" font-size="11.00" text-anchor="end">
<text><tspan font-family="sans-serif" > 80000</tspan></text>
</g>
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M89.55,710.25 L399.49,710.25 '/></g>
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='black' d='M89.55,710.25 L97.80,710.25 M399.49,710.25 L391.24,710.25 '/> <g transform="translate(81.86,713.83)" stroke="none" fill="black" font-family="sans-serif" font-size="11.00" text-anchor="end">
<text><tspan font-family="sans-serif" > 100000</tspan></text>
</g>
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M89.55,660.19 L399.49,660.19 '/></g>
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='black' d='M89.55,660.19 L97.80,660.19 M399.49,660.19 L391.24,660.19 '/> <g transform="translate(81.86,663.77)" stroke="none" fill="black" font-family="sans-serif" font-size="11.00" text-anchor="end">
<text><tspan font-family="sans-serif" > 120000</tspan></text>
</g>
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M89.55,610.13 L399.49,610.13 '/></g>
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='black' d='M89.55,610.13 L97.80,610.13 M399.49,610.13 L391.24,610.13 '/> <g transform="translate(81.86,613.71)" stroke="none" fill="black" font-family="sans-serif" font-size="11.00" text-anchor="end">
<text><tspan font-family="sans-serif" > 140000</tspan></text>
</g>
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M89.55,560.06 L399.49,560.06 '/></g>
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='black' d='M89.55,560.06 L97.80,560.06 M399.49,560.06 L391.24,560.06 '/> <g transform="translate(81.86,563.64)" stroke="none" fill="black" font-family="sans-serif" font-size="11.00" text-anchor="end">
<text><tspan font-family="sans-serif" > 160000</tspan></text>
</g>
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M89.55,510.00 L399.49,510.00 '/></g>
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='black' d='M89.55,510.00 L97.80,510.00 M399.49,510.00 L391.24,510.00 '/> <g transform="translate(81.86,513.58)" stroke="none" fill="black" font-family="sans-serif" font-size="11.00" text-anchor="end">
<text><tspan font-family="sans-serif" > 180000</tspan></text>
</g>
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='black' d='M89.55,860.44 L89.55,852.19 M89.55,510.00 L89.55,518.25 '/> <g transform="translate(87.76,871.23) rotate(30.00)" stroke="none" fill="black" font-family="sans-serif" font-size="11.00" text-anchor="start">
<text><tspan font-family="sans-serif" >v0.2.0</tspan></text>
</g>
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='black' d='M167.04,860.44 L167.04,852.19 M167.04,510.00 L167.04,518.25 '/> <g transform="translate(165.25,871.23) rotate(30.00)" stroke="none" fill="black" font-family="sans-serif" font-size="11.00" text-anchor="start">
<text><tspan font-family="sans-serif" >v0.3.0</tspan></text>
</g>
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='black' d='M244.52,860.44 L244.52,852.19 M244.52,510.00 L244.52,518.25 '/> <g transform="translate(242.73,871.23) rotate(30.00)" stroke="none" fill="black" font-family="sans-serif" font-size="11.00" text-anchor="start">
<text><tspan font-family="sans-serif" >v0.4.0</tspan></text>
</g>
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='black' d='M322.01,860.44 L322.01,852.19 M322.01,510.00 L322.01,518.25 '/> <g transform="translate(320.22,871.23) rotate(30.00)" stroke="none" fill="black" font-family="sans-serif" font-size="11.00" text-anchor="start">
<text><tspan font-family="sans-serif" >v0.5.0</tspan></text>
</g>
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='black' d='M399.49,860.44 L399.49,852.19 M399.49,510.00 L399.49,518.25 '/> <g transform="translate(397.70,871.23) rotate(30.00)" stroke="none" fill="black" font-family="sans-serif" font-size="11.00" text-anchor="start">
<text><tspan font-family="sans-serif" >v0.6.0</tspan></text>
</g>
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='black' d='M89.55,510.00 L89.55,860.44 L399.49,860.44 L399.49,510.00 L89.55,510.00 Z '/></g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g id="gnuplot_plot_1c" ><title>parrhesia-pg</title>
<g fill="none" color="#F3F4F6" stroke="black" stroke-width="2.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="2.00" stroke-linecap="butt" stroke-linejoin="miter">
<g transform="translate(537.91,521.83)" stroke="none" fill="black" font-family="sans-serif" font-size="11.00" text-anchor="end">
<text><tspan font-family="sans-serif" >parrhesia-pg</tspan></text>
</g>
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="2.00" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='rgb( 37, 99, 235)' d='M545.60,518.25 L584.61,518.25 M89.55,767.49 L167.04,783.59 L244.52,787.50 L322.01,779.24 L399.49,790.10 '/> <use xlink:href='#gpPt6' transform='translate(89.55,767.49) scale(4.12)' color='rgb( 37, 99, 235)'/>
<use xlink:href='#gpPt6' transform='translate(167.04,783.59) scale(4.12)' color='rgb( 37, 99, 235)'/>
<use xlink:href='#gpPt6' transform='translate(244.52,787.50) scale(4.12)' color='rgb( 37, 99, 235)'/>
<use xlink:href='#gpPt6' transform='translate(322.01,779.24) scale(4.12)' color='rgb( 37, 99, 235)'/>
<use xlink:href='#gpPt6' transform='translate(399.49,790.10) scale(4.12)' color='rgb( 37, 99, 235)'/>
<use xlink:href='#gpPt6' transform='translate(565.10,518.25) scale(4.12)' color='rgb( 37, 99, 235)'/>
</g>
</g>
<g id="gnuplot_plot_2c" ><title>parrhesia-memory</title>
<g fill="none" color="black" stroke="currentColor" stroke-width="2.00" stroke-linecap="butt" stroke-linejoin="miter">
<g transform="translate(537.91,538.33)" stroke="none" fill="black" font-family="sans-serif" font-size="11.00" text-anchor="end">
<text><tspan font-family="sans-serif" >parrhesia-memory</tspan></text>
</g>
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="2.00" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='rgb( 22, 163, 74)' d='M545.60,534.75 L584.61,534.75 M89.55,799.82 L167.04,809.24 L244.52,803.58 L322.01,803.61 L399.49,820.44 '/> <use xlink:href='#gpPt8' transform='translate(89.55,799.82) scale(4.12)' color='rgb( 22, 163, 74)'/>
<use xlink:href='#gpPt8' transform='translate(167.04,809.24) scale(4.12)' color='rgb( 22, 163, 74)'/>
<use xlink:href='#gpPt8' transform='translate(244.52,803.58) scale(4.12)' color='rgb( 22, 163, 74)'/>
<use xlink:href='#gpPt8' transform='translate(322.01,803.61) scale(4.12)' color='rgb( 22, 163, 74)'/>
<use xlink:href='#gpPt8' transform='translate(399.49,820.44) scale(4.12)' color='rgb( 22, 163, 74)'/>
<use xlink:href='#gpPt8' transform='translate(565.10,534.75) scale(4.12)' color='rgb( 22, 163, 74)'/>
</g>
</g>
<g id="gnuplot_plot_3c" ><title>strfry</title>
<g fill="none" color="#F3F4F6" stroke="rgb( 22, 163, 74)" stroke-width="1.50" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.50" stroke-linecap="butt" stroke-linejoin="miter">
<g transform="translate(537.91,554.83)" stroke="none" fill="black" font-family="sans-serif" font-size="11.00" text-anchor="end">
<text><tspan font-family="sans-serif" >strfry</tspan></text>
</g>
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.50" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='rgb(234, 88, 12)' stroke-dasharray='3.8,6.0' d='M545.60,551.25 L584.61,551.25 M89.55,801.16 L167.04,802.55 L244.52,807.98 L322.01,807.40 L399.49,791.06 '/> <use xlink:href='#gpPt4' transform='translate(89.55,801.16) scale(3.30)' color='rgb(234, 88, 12)'/>
<use xlink:href='#gpPt4' transform='translate(167.04,802.55) scale(3.30)' color='rgb(234, 88, 12)'/>
<use xlink:href='#gpPt4' transform='translate(244.52,807.98) scale(3.30)' color='rgb(234, 88, 12)'/>
<use xlink:href='#gpPt4' transform='translate(322.01,807.40) scale(3.30)' color='rgb(234, 88, 12)'/>
<use xlink:href='#gpPt4' transform='translate(399.49,791.06) scale(3.30)' color='rgb(234, 88, 12)'/>
<use xlink:href='#gpPt4' transform='translate(565.10,551.25) scale(3.30)' color='rgb(234, 88, 12)'/>
</g>
</g>
<g id="gnuplot_plot_4c" ><title>nostr-rs-relay</title>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.50" stroke-linecap="butt" stroke-linejoin="miter">
<g transform="translate(537.91,571.33)" stroke="none" fill="black" font-family="sans-serif" font-size="11.00" text-anchor="end">
<text><tspan font-family="sans-serif" >nostr-rs-relay</tspan></text>
</g>
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.50" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='rgb(220, 38, 38)' stroke-dasharray='3.8,6.0' d='M545.60,567.75 L584.61,567.75 M89.55,560.04 L167.04,547.56 L244.52,557.15 L322.01,578.45 L399.49,544.60 '/> <use xlink:href='#gpPt3' transform='translate(89.55,560.04) scale(3.30)' color='rgb(220, 38, 38)'/>
<use xlink:href='#gpPt3' transform='translate(167.04,547.56) scale(3.30)' color='rgb(220, 38, 38)'/>
<use xlink:href='#gpPt3' transform='translate(244.52,557.15) scale(3.30)' color='rgb(220, 38, 38)'/>
<use xlink:href='#gpPt3' transform='translate(322.01,578.45) scale(3.30)' color='rgb(220, 38, 38)'/>
<use xlink:href='#gpPt3' transform='translate(399.49,544.60) scale(3.30)' color='rgb(220, 38, 38)'/>
<use xlink:href='#gpPt3' transform='translate(565.10,567.75) scale(3.30)' color='rgb(220, 38, 38)'/>
</g>
</g>
<g fill="none" color="#F3F4F6" stroke="rgb(220, 38, 38)" stroke-width="2.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="2.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="black" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='black' d='M89.55,510.00 L89.55,860.44 L399.49,860.44 L399.49,510.00 L89.55,510.00 Z '/> <g transform="translate(17.58,685.22) rotate(270.00)" stroke="none" fill="black" font-family="sans-serif" font-size="11.00" text-anchor="middle">
<text><tspan font-family="sans-serif" >TPS</tspan></text>
</g>
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
<g transform="translate(244.52,488.83)" stroke="none" fill="black" font-family="sans-serif" font-size="11.00" text-anchor="middle">
<text><tspan font-family="sans-serif" >Echo Throughput (TPS) — higher is better</tspan></text>
</g>
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M658.79,860.44 L999.49,860.44 '/></g>
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='black' d='M658.79,860.44 L667.04,860.44 M999.49,860.44 L991.24,860.44 '/> <g transform="translate(651.10,864.02)" stroke="none" fill="black" font-family="sans-serif" font-size="11.00" text-anchor="end">
<text><tspan font-family="sans-serif" > 0</tspan></text>
</g>
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M658.79,821.50 L999.49,821.50 '/></g>
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='black' d='M658.79,821.50 L667.04,821.50 M999.49,821.50 L991.24,821.50 '/> <g transform="translate(651.10,825.08)" stroke="none" fill="black" font-family="sans-serif" font-size="11.00" text-anchor="end">
<text><tspan font-family="sans-serif" > 5</tspan></text>
</g>
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M658.79,782.56 L999.49,782.56 '/></g>
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='black' d='M658.79,782.56 L667.04,782.56 M999.49,782.56 L991.24,782.56 '/> <g transform="translate(651.10,786.14)" stroke="none" fill="black" font-family="sans-serif" font-size="11.00" text-anchor="end">
<text><tspan font-family="sans-serif" > 10</tspan></text>
</g>
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M658.79,743.63 L999.49,743.63 '/></g>
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='black' d='M658.79,743.63 L667.04,743.63 M999.49,743.63 L991.24,743.63 '/> <g transform="translate(651.10,747.21)" stroke="none" fill="black" font-family="sans-serif" font-size="11.00" text-anchor="end">
<text><tspan font-family="sans-serif" > 15</tspan></text>
</g>
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M658.79,704.69 L999.49,704.69 '/></g>
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='black' d='M658.79,704.69 L667.04,704.69 M999.49,704.69 L991.24,704.69 '/> <g transform="translate(651.10,708.27)" stroke="none" fill="black" font-family="sans-serif" font-size="11.00" text-anchor="end">
<text><tspan font-family="sans-serif" > 20</tspan></text>
</g>
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M658.79,665.75 L999.49,665.75 '/></g>
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='black' d='M658.79,665.75 L667.04,665.75 M999.49,665.75 L991.24,665.75 '/> <g transform="translate(651.10,669.33)" stroke="none" fill="black" font-family="sans-serif" font-size="11.00" text-anchor="end">
<text><tspan font-family="sans-serif" > 25</tspan></text>
</g>
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M658.79,626.81 L999.49,626.81 '/></g>
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='black' d='M658.79,626.81 L667.04,626.81 M999.49,626.81 L991.24,626.81 '/> <g transform="translate(651.10,630.39)" stroke="none" fill="black" font-family="sans-serif" font-size="11.00" text-anchor="end">
<text><tspan font-family="sans-serif" > 30</tspan></text>
</g>
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M658.79,587.88 L999.49,587.88 '/></g>
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='black' d='M658.79,587.88 L667.04,587.88 M999.49,587.88 L991.24,587.88 '/> <g transform="translate(651.10,591.46)" stroke="none" fill="black" font-family="sans-serif" font-size="11.00" text-anchor="end">
<text><tspan font-family="sans-serif" > 35</tspan></text>
</g>
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M658.79,548.94 L999.49,548.94 '/></g>
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='black' d='M658.79,548.94 L667.04,548.94 M999.49,548.94 L991.24,548.94 '/> <g transform="translate(651.10,552.52)" stroke="none" fill="black" font-family="sans-serif" font-size="11.00" text-anchor="end">
<text><tspan font-family="sans-serif" > 40</tspan></text>
</g>
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M658.79,510.00 L999.49,510.00 '/></g>
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='black' d='M658.79,510.00 L667.04,510.00 M999.49,510.00 L991.24,510.00 '/> <g transform="translate(651.10,513.58)" stroke="none" fill="black" font-family="sans-serif" font-size="11.00" text-anchor="end">
<text><tspan font-family="sans-serif" > 45</tspan></text>
</g>
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='black' d='M658.79,860.44 L658.79,852.19 M658.79,510.00 L658.79,518.25 '/> <g transform="translate(657.00,871.23) rotate(30.00)" stroke="none" fill="black" font-family="sans-serif" font-size="11.00" text-anchor="start">
<text><tspan font-family="sans-serif" >v0.2.0</tspan></text>
</g>
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='black' d='M743.97,860.44 L743.97,852.19 M743.97,510.00 L743.97,518.25 '/> <g transform="translate(742.18,871.23) rotate(30.00)" stroke="none" fill="black" font-family="sans-serif" font-size="11.00" text-anchor="start">
<text><tspan font-family="sans-serif" >v0.3.0</tspan></text>
</g>
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='black' d='M829.14,860.44 L829.14,852.19 M829.14,510.00 L829.14,518.25 '/> <g transform="translate(827.35,871.23) rotate(30.00)" stroke="none" fill="black" font-family="sans-serif" font-size="11.00" text-anchor="start">
<text><tspan font-family="sans-serif" >v0.4.0</tspan></text>
</g>
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='black' d='M914.32,860.44 L914.32,852.19 M914.32,510.00 L914.32,518.25 '/> <g transform="translate(912.53,871.23) rotate(30.00)" stroke="none" fill="black" font-family="sans-serif" font-size="11.00" text-anchor="start">
<text><tspan font-family="sans-serif" >v0.5.0</tspan></text>
</g>
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='black' d='M999.49,860.44 L999.49,852.19 M999.49,510.00 L999.49,518.25 '/> <g transform="translate(997.70,871.23) rotate(30.00)" stroke="none" fill="black" font-family="sans-serif" font-size="11.00" text-anchor="start">
<text><tspan font-family="sans-serif" >v0.6.0</tspan></text>
</g>
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='black' d='M658.79,510.00 L658.79,860.44 L999.49,860.44 L999.49,510.00 L658.79,510.00 Z '/></g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g id="gnuplot_plot_1d" ><title>parrhesia-pg</title>
<g fill="none" color="#F3F4F6" stroke="black" stroke-width="2.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="2.00" stroke-linecap="butt" stroke-linejoin="miter">
<g transform="translate(1137.91,521.83)" stroke="none" fill="black" font-family="sans-serif" font-size="11.00" text-anchor="end">
<text><tspan font-family="sans-serif" >parrhesia-pg</tspan></text>
</g>
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="2.00" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='rgb( 37, 99, 235)' d='M1145.60,518.25 L1184.61,518.25 M658.79,746.22 L743.97,759.20 L829.14,772.18 L914.32,590.47 L999.49,652.77 '/> <use xlink:href='#gpPt6' transform='translate(658.79,746.22) scale(4.12)' color='rgb( 37, 99, 235)'/>
<use xlink:href='#gpPt6' transform='translate(743.97,759.20) scale(4.12)' color='rgb( 37, 99, 235)'/>
<use xlink:href='#gpPt6' transform='translate(829.14,772.18) scale(4.12)' color='rgb( 37, 99, 235)'/>
<use xlink:href='#gpPt6' transform='translate(914.32,590.47) scale(4.12)' color='rgb( 37, 99, 235)'/>
<use xlink:href='#gpPt6' transform='translate(999.49,652.77) scale(4.12)' color='rgb( 37, 99, 235)'/>
<use xlink:href='#gpPt6' transform='translate(1165.10,518.25) scale(4.12)' color='rgb( 37, 99, 235)'/>
</g>
</g>
<g id="gnuplot_plot_2d" ><title>parrhesia-memory</title>
<g fill="none" color="black" stroke="currentColor" stroke-width="2.00" stroke-linecap="butt" stroke-linejoin="miter">
<g transform="translate(1137.91,538.33)" stroke="none" fill="black" font-family="sans-serif" font-size="11.00" text-anchor="end">
<text><tspan font-family="sans-serif" >parrhesia-memory</tspan></text>
</g>
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="2.00" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='rgb( 22, 163, 74)' d='M1145.60,534.75 L1184.61,534.75 M658.79,790.35 L743.97,759.20 L829.14,782.56 L914.32,522.98 L999.49,746.22 '/> <use xlink:href='#gpPt8' transform='translate(658.79,790.35) scale(4.12)' color='rgb( 22, 163, 74)'/>
<use xlink:href='#gpPt8' transform='translate(743.97,759.20) scale(4.12)' color='rgb( 22, 163, 74)'/>
<use xlink:href='#gpPt8' transform='translate(829.14,782.56) scale(4.12)' color='rgb( 22, 163, 74)'/>
<use xlink:href='#gpPt8' transform='translate(914.32,522.98) scale(4.12)' color='rgb( 22, 163, 74)'/>
<use xlink:href='#gpPt8' transform='translate(999.49,746.22) scale(4.12)' color='rgb( 22, 163, 74)'/>
<use xlink:href='#gpPt8' transform='translate(1165.10,534.75) scale(4.12)' color='rgb( 22, 163, 74)'/>
</g>
</g>
<g id="gnuplot_plot_3d" ><title>strfry</title>
<g fill="none" color="#F3F4F6" stroke="rgb( 22, 163, 74)" stroke-width="1.50" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.50" stroke-linecap="butt" stroke-linejoin="miter">
<g transform="translate(1137.91,554.83)" stroke="none" fill="black" font-family="sans-serif" font-size="11.00" text-anchor="end">
<text><tspan font-family="sans-serif" >strfry</tspan></text>
</g>
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.50" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='rgb(234, 88, 12)' stroke-dasharray='3.8,6.0' d='M1145.60,551.25 L1184.61,551.25 M658.79,834.48 L743.97,831.89 L829.14,834.48 L914.32,839.67 L999.49,837.08 '/> <use xlink:href='#gpPt4' transform='translate(658.79,834.48) scale(3.30)' color='rgb(234, 88, 12)'/>
<use xlink:href='#gpPt4' transform='translate(743.97,831.89) scale(3.30)' color='rgb(234, 88, 12)'/>
<use xlink:href='#gpPt4' transform='translate(829.14,834.48) scale(3.30)' color='rgb(234, 88, 12)'/>
<use xlink:href='#gpPt4' transform='translate(914.32,839.67) scale(3.30)' color='rgb(234, 88, 12)'/>
<use xlink:href='#gpPt4' transform='translate(999.49,837.08) scale(3.30)' color='rgb(234, 88, 12)'/>
<use xlink:href='#gpPt4' transform='translate(1165.10,551.25) scale(3.30)' color='rgb(234, 88, 12)'/>
</g>
</g>
<g id="gnuplot_plot_4d" ><title>nostr-rs-relay</title>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.50" stroke-linecap="butt" stroke-linejoin="miter">
<g transform="translate(1137.91,571.33)" stroke="none" fill="black" font-family="sans-serif" font-size="11.00" text-anchor="end">
<text><tspan font-family="sans-serif" >nostr-rs-relay</tspan></text>
</g>
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.50" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='rgb(220, 38, 38)' stroke-dasharray='3.8,6.0' d='M1145.60,567.75 L1184.61,567.75 M658.79,839.67 L743.97,844.86 L829.14,839.67 L914.32,839.67 L999.49,844.86 '/> <use xlink:href='#gpPt3' transform='translate(658.79,839.67) scale(3.30)' color='rgb(220, 38, 38)'/>
<use xlink:href='#gpPt3' transform='translate(743.97,844.86) scale(3.30)' color='rgb(220, 38, 38)'/>
<use xlink:href='#gpPt3' transform='translate(829.14,839.67) scale(3.30)' color='rgb(220, 38, 38)'/>
<use xlink:href='#gpPt3' transform='translate(914.32,839.67) scale(3.30)' color='rgb(220, 38, 38)'/>
<use xlink:href='#gpPt3' transform='translate(999.49,844.86) scale(3.30)' color='rgb(220, 38, 38)'/>
<use xlink:href='#gpPt3' transform='translate(1165.10,567.75) scale(3.30)' color='rgb(220, 38, 38)'/>
</g>
</g>
<g fill="none" color="#F3F4F6" stroke="rgb(220, 38, 38)" stroke-width="2.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="2.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="black" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
<path stroke='black' d='M658.79,510.00 L658.79,860.44 L999.49,860.44 L999.49,510.00 L658.79,510.00 Z '/> <g transform="translate(617.58,685.22) rotate(270.00)" stroke="none" fill="black" font-family="sans-serif" font-size="11.00" text-anchor="middle">
<text><tspan font-family="sans-serif" >ms</tspan></text>
</g>
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
<g transform="translate(829.14,488.83)" stroke="none" fill="black" font-family="sans-serif" font-size="11.00" text-anchor="middle">
<text><tspan font-family="sans-serif" >Connect Avg Latency (ms) — lower is better</tspan></text>
</g>
</g>
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
</g>
</g>
</svg>

After

Width:  |  Height:  |  Size: 72 KiB

5
bench/history.jsonl Normal file
View File

@@ -0,0 +1,5 @@
{"schema_version":2,"timestamp":"2026-03-18T21:35:03Z","machine_id":"agent","git_tag":"v0.6.0","git_commit":"7b337d9","runs":3,"versions":{"parrhesia":"0.6.0","strfry":"strfry 1.0.4 (nixpkgs)","nostr-rs-relay":"nostr-rs-relay 0.9.0","nostr-bench":"nostr-bench 0.4.0"},"servers":{"parrhesia-pg":{"connect_avg_ms":26.666666666666668,"connect_max_ms":45.333333333333336,"echo_tps":68100.33333333333,"echo_mibs":37.233333333333334,"event_tps":1647.3333333333333,"event_mibs":1.0666666666666667,"req_tps":3576.6666666666665,"req_mibs":18.833333333333332},"parrhesia-memory":{"connect_avg_ms":14.666666666666666,"connect_max_ms":24.333333333333332,"echo_tps":55978,"echo_mibs":30.633333333333336,"event_tps":882,"event_mibs":0.5666666666666668,"req_tps":6888,"req_mibs":36.06666666666666},"strfry":{"connect_avg_ms":3,"connect_max_ms":4.666666666666667,"echo_tps":67718.33333333333,"echo_mibs":37.86666666666667,"event_tps":3548.3333333333335,"event_mibs":2.3,"req_tps":1808,"req_mibs":11.699999999999998},"nostr-rs-relay":{"connect_avg_ms":2,"connect_max_ms":3.3333333333333335,"echo_tps":166178,"echo_mibs":91.03333333333335,"event_tps":787,"event_mibs":0.5,"req_tps":860.6666666666666,"req_mibs":2.4}},"run_id":"local-2026-03-18T21:35:03Z-agent-7b337d9","source":{"kind":"local","git_tag":"v0.6.0","git_commit":"7b337d9"},"infra":{"provider":"local"},"bench":{"runs":3,"targets":["parrhesia-pg","parrhesia-memory","strfry","nostr-rs-relay"]}}
{"schema_version":2,"timestamp":"2026-03-18T22:14:37Z","machine_id":"agent","git_tag":"v0.2.0","git_commit":"b20dbf6","runs":3,"versions":{"parrhesia":"0.2.0","strfry":"strfry 1.0.4 (nixpkgs)","nostr-rs-relay":"nostr-rs-relay 0.9.0","nostr-bench":"nostr-bench 0.4.0"},"servers":{"parrhesia-pg":{"connect_avg_ms":14.666666666666666,"connect_max_ms":25.666666666666668,"echo_tps":77133,"echo_mibs":42.233333333333334,"event_tps":1602.6666666666667,"event_mibs":1.0666666666666667,"req_tps":2418,"req_mibs":12.5},"parrhesia-memory":{"connect_avg_ms":9,"connect_max_ms":16,"echo_tps":64218.333333333336,"echo_mibs":35.166666666666664,"event_tps":1578.3333333333333,"event_mibs":1,"req_tps":2431.3333333333335,"req_mibs":12.633333333333333},"strfry":{"connect_avg_ms":3.3333333333333335,"connect_max_ms":6,"echo_tps":63682.666666666664,"echo_mibs":35.6,"event_tps":3477.3333333333335,"event_mibs":2.2333333333333334,"req_tps":1804,"req_mibs":11.733333333333334},"nostr-rs-relay":{"connect_avg_ms":2.6666666666666665,"connect_max_ms":4.333333333333333,"echo_tps":160009,"echo_mibs":87.63333333333333,"event_tps":762,"event_mibs":0.4666666666666666,"req_tps":831,"req_mibs":2.2333333333333334}},"run_id":"local-2026-03-18T22:14:37Z-agent-b20dbf6","source":{"kind":"local","git_tag":"v0.2.0","git_commit":"b20dbf6"},"infra":{"provider":"local"},"bench":{"runs":3,"targets":["parrhesia-pg","parrhesia-memory","strfry","nostr-rs-relay"]}}
{"schema_version":2,"timestamp":"2026-03-18T22:22:12Z","machine_id":"agent","git_tag":"v0.3.0","git_commit":"8c8d5a8","runs":3,"versions":{"parrhesia":"0.3.0","strfry":"strfry 1.0.4 (nixpkgs)","nostr-rs-relay":"nostr-rs-relay 0.9.0","nostr-bench":"nostr-bench 0.4.0"},"servers":{"parrhesia-pg":{"connect_avg_ms":13,"connect_max_ms":21.666666666666668,"echo_tps":70703.33333333333,"echo_mibs":38.7,"event_tps":1970.6666666666667,"event_mibs":1.3,"req_tps":3614,"req_mibs":20.966666666666665},"parrhesia-memory":{"connect_avg_ms":13,"connect_max_ms":22.333333333333332,"echo_tps":60452.333333333336,"echo_mibs":33.1,"event_tps":1952.6666666666667,"event_mibs":1.3,"req_tps":3616,"req_mibs":20.766666666666666},"strfry":{"connect_avg_ms":3.6666666666666665,"connect_max_ms":6,"echo_tps":63128.666666666664,"echo_mibs":35.300000000000004,"event_tps":3442,"event_mibs":2.2333333333333334,"req_tps":1804,"req_mibs":11.699999999999998},"nostr-rs-relay":{"connect_avg_ms":2,"connect_max_ms":3.3333333333333335,"echo_tps":164995.33333333334,"echo_mibs":90.36666666666667,"event_tps":761.6666666666666,"event_mibs":0.5,"req_tps":846.3333333333334,"req_mibs":2.333333333333333}},"run_id":"local-2026-03-18T22:22:12Z-agent-8c8d5a8","source":{"kind":"local","git_tag":"v0.3.0","git_commit":"8c8d5a8"},"infra":{"provider":"local"},"bench":{"runs":3,"targets":["parrhesia-pg","parrhesia-memory","strfry","nostr-rs-relay"]}}
{"schema_version":2,"timestamp":"2026-03-18T22:30:08Z","machine_id":"agent","git_tag":"v0.4.0","git_commit":"b86b5db","runs":3,"versions":{"parrhesia":"0.4.0","strfry":"strfry 1.0.4 (nixpkgs)","nostr-rs-relay":"nostr-rs-relay 0.9.0","nostr-bench":"nostr-bench 0.4.0"},"servers":{"parrhesia-pg":{"connect_avg_ms":11.333333333333334,"connect_max_ms":20.666666666666668,"echo_tps":69139.33333333333,"echo_mibs":37.833333333333336,"event_tps":1938.6666666666667,"event_mibs":1.3,"req_tps":4619.666666666667,"req_mibs":26.266666666666666},"parrhesia-memory":{"connect_avg_ms":10,"connect_max_ms":17.333333333333332,"echo_tps":62715.333333333336,"echo_mibs":34.333333333333336,"event_tps":1573,"event_mibs":1.0333333333333334,"req_tps":4768,"req_mibs":23.733333333333334},"strfry":{"connect_avg_ms":3.3333333333333335,"connect_max_ms":6,"echo_tps":60956.666666666664,"echo_mibs":34.06666666666667,"event_tps":3380.6666666666665,"event_mibs":2.2,"req_tps":1820.3333333333333,"req_mibs":11.800000000000002},"nostr-rs-relay":{"connect_avg_ms":2.6666666666666665,"connect_max_ms":4.333333333333333,"echo_tps":161165.33333333334,"echo_mibs":88.26666666666665,"event_tps":768,"event_mibs":0.5,"req_tps":847.3333333333334,"req_mibs":2.3000000000000003}},"run_id":"local-2026-03-18T22:30:08Z-agent-b86b5db","source":{"kind":"local","git_tag":"v0.4.0","git_commit":"b86b5db"},"infra":{"provider":"local"},"bench":{"runs":3,"targets":["parrhesia-pg","parrhesia-memory","strfry","nostr-rs-relay"]}}
{"schema_version":2,"timestamp":"2026-03-18T22:36:37Z","machine_id":"agent","git_tag":"v0.5.0","git_commit":"e557eba","runs":3,"versions":{"parrhesia":"0.5.0","strfry":"strfry 1.0.4 (nixpkgs)","nostr-rs-relay":"nostr-rs-relay 0.9.0","nostr-bench":"nostr-bench 0.4.0"},"servers":{"parrhesia-pg":{"connect_avg_ms":34.666666666666664,"connect_max_ms":61.666666666666664,"echo_tps":72441,"echo_mibs":39.666666666666664,"event_tps":1897.3333333333333,"event_mibs":1.2333333333333334,"req_tps":13.333333333333334,"req_mibs":0.03333333333333333},"parrhesia-memory":{"connect_avg_ms":43.333333333333336,"connect_max_ms":74.66666666666667,"echo_tps":62704.666666666664,"echo_mibs":34.300000000000004,"event_tps":1370,"event_mibs":0.8666666666666667,"req_tps":47,"req_mibs":0.16666666666666666},"strfry":{"connect_avg_ms":2.6666666666666665,"connect_max_ms":4.666666666666667,"echo_tps":61189.333333333336,"echo_mibs":34.2,"event_tps":3426.6666666666665,"event_mibs":2.2,"req_tps":1811.3333333333333,"req_mibs":11.766666666666666},"nostr-rs-relay":{"connect_avg_ms":2.6666666666666665,"connect_max_ms":4,"echo_tps":152654.33333333334,"echo_mibs":83.63333333333333,"event_tps":772.6666666666666,"event_mibs":0.5,"req_tps":878.3333333333334,"req_mibs":2.4}},"run_id":"local-2026-03-18T22:36:37Z-agent-e557eba","source":{"kind":"local","git_tag":"v0.5.0","git_commit":"e557eba"},"infra":{"provider":"local"},"bench":{"runs":3,"targets":["parrhesia-pg","parrhesia-memory","strfry","nostr-rs-relay"]}}

View File

@@ -0,0 +1,92 @@
services:
db-a:
image: postgres:17
restart: unless-stopped
environment:
POSTGRES_DB: parrhesia_a
POSTGRES_USER: parrhesia
POSTGRES_PASSWORD: parrhesia
healthcheck:
test: ["CMD-SHELL", "pg_isready -U $$POSTGRES_USER -d $$POSTGRES_DB"]
interval: 5s
timeout: 5s
retries: 12
volumes:
- postgres-a-data:/var/lib/postgresql/data
db-b:
image: postgres:17
restart: unless-stopped
environment:
POSTGRES_DB: parrhesia_b
POSTGRES_USER: parrhesia
POSTGRES_PASSWORD: parrhesia
healthcheck:
test: ["CMD-SHELL", "pg_isready -U $$POSTGRES_USER -d $$POSTGRES_DB"]
interval: 5s
timeout: 5s
retries: 12
volumes:
- postgres-b-data:/var/lib/postgresql/data
migrate-a:
image: ${PARRHESIA_IMAGE:-parrhesia:latest}
restart: "no"
depends_on:
db-a:
condition: service_healthy
environment:
DATABASE_URL: ecto://parrhesia:parrhesia@db-a:5432/parrhesia_a
POOL_SIZE: ${POOL_SIZE:-20}
PARRHESIA_ACL_PROTECTED_FILTERS: ${PARRHESIA_ACL_PROTECTED_FILTERS}
command: ["eval", "Parrhesia.Release.migrate()"]
migrate-b:
image: ${PARRHESIA_IMAGE:-parrhesia:latest}
restart: "no"
depends_on:
db-b:
condition: service_healthy
environment:
DATABASE_URL: ecto://parrhesia:parrhesia@db-b:5432/parrhesia_b
POOL_SIZE: ${POOL_SIZE:-20}
PARRHESIA_ACL_PROTECTED_FILTERS: ${PARRHESIA_ACL_PROTECTED_FILTERS}
command: ["eval", "Parrhesia.Release.migrate()"]
parrhesia-a:
image: ${PARRHESIA_IMAGE:-parrhesia:latest}
restart: unless-stopped
depends_on:
db-a:
condition: service_healthy
environment:
DATABASE_URL: ecto://parrhesia:parrhesia@db-a:5432/parrhesia_a
POOL_SIZE: ${POOL_SIZE:-20}
PORT: 4413
PARRHESIA_RELAY_URL: ${PARRHESIA_NODE_A_RELAY_URL:-ws://parrhesia-a:4413/relay}
PARRHESIA_ACL_PROTECTED_FILTERS: ${PARRHESIA_ACL_PROTECTED_FILTERS}
PARRHESIA_IDENTITY_PATH: /tmp/parrhesia-a/server_identity.json
PARRHESIA_SYNC_PATH: /tmp/parrhesia-a/sync_servers.json
ports:
- "${PARRHESIA_NODE_A_HOST_PORT:-45131}:4413"
parrhesia-b:
image: ${PARRHESIA_IMAGE:-parrhesia:latest}
restart: unless-stopped
depends_on:
db-b:
condition: service_healthy
environment:
DATABASE_URL: ecto://parrhesia:parrhesia@db-b:5432/parrhesia_b
POOL_SIZE: ${POOL_SIZE:-20}
PORT: 4413
PARRHESIA_RELAY_URL: ${PARRHESIA_NODE_B_RELAY_URL:-ws://parrhesia-b:4413/relay}
PARRHESIA_ACL_PROTECTED_FILTERS: ${PARRHESIA_ACL_PROTECTED_FILTERS}
PARRHESIA_IDENTITY_PATH: /tmp/parrhesia-b/server_identity.json
PARRHESIA_SYNC_PATH: /tmp/parrhesia-b/sync_servers.json
ports:
- "${PARRHESIA_NODE_B_HOST_PORT:-45132}:4413"
volumes:
postgres-a-data:
postgres-b-data:

42
compose.yaml Normal file
View File

@@ -0,0 +1,42 @@
services:
db:
image: postgres:17
restart: unless-stopped
environment:
POSTGRES_DB: ${POSTGRES_DB:-parrhesia}
POSTGRES_USER: ${POSTGRES_USER:-parrhesia}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-parrhesia}
healthcheck:
test: ["CMD-SHELL", "pg_isready -U $$POSTGRES_USER -d $$POSTGRES_DB"]
interval: 5s
timeout: 5s
retries: 12
volumes:
- postgres-data:/var/lib/postgresql/data
migrate:
image: ${PARRHESIA_IMAGE:-parrhesia:latest}
profiles: ["tools"]
restart: "no"
depends_on:
db:
condition: service_healthy
environment:
DATABASE_URL: ${DATABASE_URL:-ecto://parrhesia:parrhesia@db:5432/parrhesia}
POOL_SIZE: ${POOL_SIZE:-20}
command: ["eval", "Parrhesia.Release.migrate()"]
parrhesia:
image: ${PARRHESIA_IMAGE:-parrhesia:latest}
restart: unless-stopped
depends_on:
db:
condition: service_healthy
environment:
DATABASE_URL: ${DATABASE_URL:-ecto://parrhesia:parrhesia@db:5432/parrhesia}
POOL_SIZE: ${POOL_SIZE:-20}
ports:
- "${PARRHESIA_HOST_PORT:-4413}:4413"
volumes:
postgres-data:

View File

@@ -1,26 +1,73 @@
import Config import Config
project_version =
case Mix.Project.config()[:version] do
version when is_binary(version) -> version
version -> to_string(version)
end
config :postgrex, :json_library, JSON config :postgrex, :json_library, JSON
config :parrhesia, config :parrhesia,
metadata: [
name: "Parrhesia",
version: project_version,
hide_version?: true
],
database: [
separate_read_pool?: config_env() != :test
],
moderation_cache_enabled: true, moderation_cache_enabled: true,
relay_url: "ws://localhost:4000/relay", enable_partition_retention_worker: true,
relay_url: "ws://localhost:4413/relay",
nip43: [
enabled: true,
invite_ttl_seconds: 900,
request_max_age_seconds: 300
],
nip66: [
enabled: true,
publish_interval_seconds: 900,
publish_monitor_announcement?: true,
timeout_ms: 5_000,
checks: [:open, :read, :nip11],
targets: []
],
identity: [
path: nil,
private_key: nil
],
sync: [
path: nil,
start_workers?: true,
relay_guard: false
],
limits: [ limits: [
max_frame_bytes: 1_048_576, max_frame_bytes: 1_048_576,
max_event_bytes: 262_144, max_event_bytes: 262_144,
max_filters_per_req: 16, max_filters_per_req: 16,
max_filter_limit: 500, max_filter_limit: 500,
max_tags_per_event: 256,
max_tag_values_per_filter: 128,
ip_max_event_ingest_per_window: 1_000,
ip_event_ingest_window_seconds: 1,
relay_max_event_ingest_per_window: 10_000,
relay_event_ingest_window_seconds: 1,
max_subscriptions_per_connection: 32, max_subscriptions_per_connection: 32,
max_event_future_skew_seconds: 900, max_event_future_skew_seconds: 900,
max_event_ingest_per_window: 120, max_event_ingest_per_window: 120,
event_ingest_window_seconds: 1, event_ingest_window_seconds: 1,
auth_max_age_seconds: 600, auth_max_age_seconds: 600,
websocket_ping_interval_seconds: 30,
websocket_pong_timeout_seconds: 10,
max_outbound_queue: 256, max_outbound_queue: 256,
outbound_drain_batch_size: 64, outbound_drain_batch_size: 64,
outbound_overflow_strategy: :close, outbound_overflow_strategy: :close,
max_negentropy_payload_bytes: 4096, max_negentropy_payload_bytes: 4096,
max_negentropy_sessions_per_connection: 8, max_negentropy_sessions_per_connection: 8,
max_negentropy_total_sessions: 10_000, max_negentropy_total_sessions: 10_000,
max_negentropy_items_per_session: 50_000,
negentropy_id_list_threshold: 32,
negentropy_session_idle_timeout_seconds: 60, negentropy_session_idle_timeout_seconds: 60,
negentropy_session_sweep_interval_seconds: 10 negentropy_session_sweep_interval_seconds: 10
], ],
@@ -47,14 +94,36 @@ config :parrhesia,
marmot_push_max_server_recipients: 1, marmot_push_max_server_recipients: 1,
management_auth_required: true management_auth_required: true
], ],
metrics: [ listeners: %{
enabled_on_main_endpoint: true, public: %{
public: false, enabled: true,
private_networks_only: true, bind: %{ip: {0, 0, 0, 0}, port: 4413},
allowed_cidrs: [], max_connections: 20_000,
auth_token: nil transport: %{scheme: :http, tls: %{mode: :disabled}},
proxy: %{trusted_cidrs: [], honor_x_forwarded_for: true},
network: %{allow_all: true},
features: %{
nostr: %{enabled: true},
admin: %{enabled: true},
metrics: %{
enabled: true,
access: %{private_networks_only: true},
auth_token: nil
}
},
auth: %{nip42_required: false, nip98_required_for_admin: true},
baseline_acl: %{read: [], write: []}
}
},
retention: [
check_interval_hours: 24,
months_ahead: 2,
max_db_bytes: :infinity,
max_months_to_keep: :infinity,
max_partitions_to_drop_per_run: 1
], ],
features: [ features: [
verify_event_signatures_locked?: config_env() == :prod,
verify_event_signatures: true, verify_event_signatures: true,
nip_45_count: true, nip_45_count: true,
nip_50_search: true, nip_50_search: true,
@@ -62,20 +131,16 @@ config :parrhesia,
marmot_push_notifications: false marmot_push_notifications: false
], ],
storage: [ storage: [
backend: :postgres,
events: Parrhesia.Storage.Adapters.Postgres.Events, events: Parrhesia.Storage.Adapters.Postgres.Events,
acl: Parrhesia.Storage.Adapters.Postgres.ACL,
moderation: Parrhesia.Storage.Adapters.Postgres.Moderation, moderation: Parrhesia.Storage.Adapters.Postgres.Moderation,
groups: Parrhesia.Storage.Adapters.Postgres.Groups, groups: Parrhesia.Storage.Adapters.Postgres.Groups,
admin: Parrhesia.Storage.Adapters.Postgres.Admin admin: Parrhesia.Storage.Adapters.Postgres.Admin
] ]
config :parrhesia, Parrhesia.Web.Endpoint, port: 4000
config :parrhesia, Parrhesia.Web.MetricsEndpoint,
enabled: false,
ip: {127, 0, 0, 1},
port: 9568
config :parrhesia, Parrhesia.Repo, types: Parrhesia.PostgresTypes config :parrhesia, Parrhesia.Repo, types: Parrhesia.PostgresTypes
config :parrhesia, Parrhesia.ReadRepo, types: Parrhesia.PostgresTypes
config :parrhesia, ecto_repos: [Parrhesia.Repo] config :parrhesia, ecto_repos: [Parrhesia.Repo]

View File

@@ -23,3 +23,13 @@ config :parrhesia,
show_sensitive_data_on_connection_error: true, show_sensitive_data_on_connection_error: true,
pool_size: 10 pool_size: 10
] ++ repo_host_opts ] ++ repo_host_opts
config :parrhesia,
Parrhesia.ReadRepo,
[
username: System.get_env("PGUSER") || System.get_env("USER") || "agent",
password: System.get_env("PGPASSWORD"),
database: System.get_env("PGDATABASE") || "parrhesia_dev",
show_sensitive_data_on_connection_error: true,
pool_size: 10
] ++ repo_host_opts

View File

@@ -5,4 +5,9 @@ config :parrhesia, Parrhesia.Repo,
queue_target: 1_000, queue_target: 1_000,
queue_interval: 5_000 queue_interval: 5_000
config :parrhesia, Parrhesia.ReadRepo,
pool_size: 32,
queue_target: 1_000,
queue_interval: 5_000
# Production runtime configuration lives in config/runtime.exs. # Production runtime configuration lives in config/runtime.exs.

View File

@@ -1,40 +1,781 @@
import Config import Config
if config_env() == :prod do string_env = fn name, default ->
database_url = case System.get_env(name) do
System.get_env("DATABASE_URL") || nil -> default
raise "environment variable DATABASE_URL is missing. Example: ecto://USER:PASS@HOST/DATABASE" "" -> default
value -> value
end
end
int_env = fn name, default ->
case System.get_env(name) do
nil -> default
value -> String.to_integer(value)
end
end
bool_env = fn name, default ->
case System.get_env(name) do
nil ->
default
value ->
case String.downcase(value) do
"1" -> true
"true" -> true
"yes" -> true
"on" -> true
"0" -> false
"false" -> false
"no" -> false
"off" -> false
_other -> raise "environment variable #{name} must be a boolean value"
end
end
end
storage_backend_env = fn name, default ->
case System.get_env(name) do
nil ->
default
value ->
case String.downcase(String.trim(value)) do
"postgres" -> :postgres
"memory" -> :memory
_other -> raise "environment variable #{name} must be one of: postgres, memory"
end
end
end
csv_env = fn name, default ->
case System.get_env(name) do
nil ->
default
value ->
value
|> String.split(",", trim: true)
|> Enum.map(&String.trim/1)
|> Enum.reject(&(&1 == ""))
end
end
json_env = fn name, default ->
case System.get_env(name) do
nil ->
default
"" ->
default
value ->
case JSON.decode(value) do
{:ok, decoded} ->
decoded
{:error, reason} ->
raise "environment variable #{name} must contain valid JSON: #{inspect(reason)}"
end
end
end
infinity_or_int_env = fn name, default ->
case System.get_env(name) do
nil ->
default
value ->
normalized = value |> String.trim() |> String.downcase()
if normalized == "infinity" do
:infinity
else
String.to_integer(value)
end
end
end
outbound_overflow_strategy_env = fn name, default ->
case System.get_env(name) do
nil ->
default
"close" ->
:close
"drop_oldest" ->
:drop_oldest
"drop_newest" ->
:drop_newest
_other ->
raise "environment variable #{name} must be one of: close, drop_oldest, drop_newest"
end
end
ipv4_env = fn name, default ->
case System.get_env(name) do
nil ->
default
value ->
case String.split(value, ".", parts: 4) do
[a, b, c, d] ->
octets = Enum.map([a, b, c, d], &String.to_integer/1)
if Enum.all?(octets, &(&1 >= 0 and &1 <= 255)) do
List.to_tuple(octets)
else
raise "environment variable #{name} must be a valid IPv4 address"
end
_other ->
raise "environment variable #{name} must be a valid IPv4 address"
end
end
end
if config_env() == :prod do
repo_defaults = Application.get_env(:parrhesia, Parrhesia.Repo, []) repo_defaults = Application.get_env(:parrhesia, Parrhesia.Repo, [])
read_repo_defaults = Application.get_env(:parrhesia, Parrhesia.ReadRepo, [])
relay_url_default = Application.get_env(:parrhesia, :relay_url)
metadata_defaults = Application.get_env(:parrhesia, :metadata, [])
database_defaults = Application.get_env(:parrhesia, :database, [])
storage_defaults = Application.get_env(:parrhesia, :storage, [])
moderation_cache_enabled_default =
Application.get_env(:parrhesia, :moderation_cache_enabled, true)
enable_expiration_worker_default =
Application.get_env(:parrhesia, :enable_expiration_worker, true)
enable_partition_retention_worker_default =
Application.get_env(:parrhesia, :enable_partition_retention_worker, true)
limits_defaults = Application.get_env(:parrhesia, :limits, [])
policies_defaults = Application.get_env(:parrhesia, :policies, [])
listeners_defaults = Application.get_env(:parrhesia, :listeners, %{})
retention_defaults = Application.get_env(:parrhesia, :retention, [])
features_defaults = Application.get_env(:parrhesia, :features, [])
acl_defaults = Application.get_env(:parrhesia, :acl, [])
sync_defaults = Application.get_env(:parrhesia, :sync, [])
default_pool_size = Keyword.get(repo_defaults, :pool_size, 32) default_pool_size = Keyword.get(repo_defaults, :pool_size, 32)
default_queue_target = Keyword.get(repo_defaults, :queue_target, 1_000) default_queue_target = Keyword.get(repo_defaults, :queue_target, 1_000)
default_queue_interval = Keyword.get(repo_defaults, :queue_interval, 5_000) default_queue_interval = Keyword.get(repo_defaults, :queue_interval, 5_000)
default_read_pool_size = Keyword.get(read_repo_defaults, :pool_size, default_pool_size)
default_read_queue_target = Keyword.get(read_repo_defaults, :queue_target, default_queue_target)
pool_size = default_read_queue_interval =
case System.get_env("POOL_SIZE") do Keyword.get(read_repo_defaults, :queue_interval, default_queue_interval)
nil -> default_pool_size
value -> String.to_integer(value) default_storage_backend =
storage_defaults
|> Keyword.get(:backend, :postgres)
|> case do
:postgres -> :postgres
:memory -> :memory
other -> raise "unsupported storage backend default: #{inspect(other)}"
end end
queue_target = storage_backend = storage_backend_env.("PARRHESIA_STORAGE_BACKEND", default_storage_backend)
case System.get_env("DB_QUEUE_TARGET_MS") do postgres_backend? = storage_backend == :postgres
nil -> default_queue_target
value -> String.to_integer(value) separate_read_pool? =
postgres_backend? and Keyword.get(database_defaults, :separate_read_pool?, true)
database_url =
if postgres_backend? do
System.get_env("DATABASE_URL") ||
raise "environment variable DATABASE_URL is missing. Example: ecto://USER:PASS@HOST/DATABASE"
else
nil
end end
queue_interval = pool_size = int_env.("POOL_SIZE", default_pool_size)
case System.get_env("DB_QUEUE_INTERVAL_MS") do queue_target = int_env.("DB_QUEUE_TARGET_MS", default_queue_target)
nil -> default_queue_interval queue_interval = int_env.("DB_QUEUE_INTERVAL_MS", default_queue_interval)
value -> String.to_integer(value) read_pool_size = int_env.("DB_READ_POOL_SIZE", default_read_pool_size)
read_queue_target = int_env.("DB_READ_QUEUE_TARGET_MS", default_read_queue_target)
read_queue_interval = int_env.("DB_READ_QUEUE_INTERVAL_MS", default_read_queue_interval)
limits = [
max_frame_bytes:
int_env.(
"PARRHESIA_LIMITS_MAX_FRAME_BYTES",
Keyword.get(limits_defaults, :max_frame_bytes, 1_048_576)
),
max_event_bytes:
int_env.(
"PARRHESIA_LIMITS_MAX_EVENT_BYTES",
Keyword.get(limits_defaults, :max_event_bytes, 262_144)
),
max_filters_per_req:
int_env.(
"PARRHESIA_LIMITS_MAX_FILTERS_PER_REQ",
Keyword.get(limits_defaults, :max_filters_per_req, 16)
),
max_filter_limit:
int_env.(
"PARRHESIA_LIMITS_MAX_FILTER_LIMIT",
Keyword.get(limits_defaults, :max_filter_limit, 500)
),
max_tags_per_event:
int_env.(
"PARRHESIA_LIMITS_MAX_TAGS_PER_EVENT",
Keyword.get(limits_defaults, :max_tags_per_event, 256)
),
max_tag_values_per_filter:
int_env.(
"PARRHESIA_LIMITS_MAX_TAG_VALUES_PER_FILTER",
Keyword.get(limits_defaults, :max_tag_values_per_filter, 128)
),
ip_max_event_ingest_per_window:
int_env.(
"PARRHESIA_LIMITS_IP_MAX_EVENT_INGEST_PER_WINDOW",
Keyword.get(limits_defaults, :ip_max_event_ingest_per_window, 1_000)
),
ip_event_ingest_window_seconds:
int_env.(
"PARRHESIA_LIMITS_IP_EVENT_INGEST_WINDOW_SECONDS",
Keyword.get(limits_defaults, :ip_event_ingest_window_seconds, 1)
),
relay_max_event_ingest_per_window:
int_env.(
"PARRHESIA_LIMITS_RELAY_MAX_EVENT_INGEST_PER_WINDOW",
Keyword.get(limits_defaults, :relay_max_event_ingest_per_window, 10_000)
),
relay_event_ingest_window_seconds:
int_env.(
"PARRHESIA_LIMITS_RELAY_EVENT_INGEST_WINDOW_SECONDS",
Keyword.get(limits_defaults, :relay_event_ingest_window_seconds, 1)
),
max_subscriptions_per_connection:
int_env.(
"PARRHESIA_LIMITS_MAX_SUBSCRIPTIONS_PER_CONNECTION",
Keyword.get(limits_defaults, :max_subscriptions_per_connection, 32)
),
max_event_future_skew_seconds:
int_env.(
"PARRHESIA_LIMITS_MAX_EVENT_FUTURE_SKEW_SECONDS",
Keyword.get(limits_defaults, :max_event_future_skew_seconds, 900)
),
max_event_ingest_per_window:
int_env.(
"PARRHESIA_LIMITS_MAX_EVENT_INGEST_PER_WINDOW",
Keyword.get(limits_defaults, :max_event_ingest_per_window, 120)
),
event_ingest_window_seconds:
int_env.(
"PARRHESIA_LIMITS_EVENT_INGEST_WINDOW_SECONDS",
Keyword.get(limits_defaults, :event_ingest_window_seconds, 1)
),
auth_max_age_seconds:
int_env.(
"PARRHESIA_LIMITS_AUTH_MAX_AGE_SECONDS",
Keyword.get(limits_defaults, :auth_max_age_seconds, 600)
),
websocket_ping_interval_seconds:
int_env.(
"PARRHESIA_LIMITS_WEBSOCKET_PING_INTERVAL_SECONDS",
Keyword.get(limits_defaults, :websocket_ping_interval_seconds, 30)
),
websocket_pong_timeout_seconds:
int_env.(
"PARRHESIA_LIMITS_WEBSOCKET_PONG_TIMEOUT_SECONDS",
Keyword.get(limits_defaults, :websocket_pong_timeout_seconds, 10)
),
max_outbound_queue:
int_env.(
"PARRHESIA_LIMITS_MAX_OUTBOUND_QUEUE",
Keyword.get(limits_defaults, :max_outbound_queue, 256)
),
outbound_drain_batch_size:
int_env.(
"PARRHESIA_LIMITS_OUTBOUND_DRAIN_BATCH_SIZE",
Keyword.get(limits_defaults, :outbound_drain_batch_size, 64)
),
outbound_overflow_strategy:
outbound_overflow_strategy_env.(
"PARRHESIA_LIMITS_OUTBOUND_OVERFLOW_STRATEGY",
Keyword.get(limits_defaults, :outbound_overflow_strategy, :close)
),
max_negentropy_payload_bytes:
int_env.(
"PARRHESIA_LIMITS_MAX_NEGENTROPY_PAYLOAD_BYTES",
Keyword.get(limits_defaults, :max_negentropy_payload_bytes, 4096)
),
max_negentropy_sessions_per_connection:
int_env.(
"PARRHESIA_LIMITS_MAX_NEGENTROPY_SESSIONS_PER_CONNECTION",
Keyword.get(limits_defaults, :max_negentropy_sessions_per_connection, 8)
),
max_negentropy_total_sessions:
int_env.(
"PARRHESIA_LIMITS_MAX_NEGENTROPY_TOTAL_SESSIONS",
Keyword.get(limits_defaults, :max_negentropy_total_sessions, 10_000)
),
max_negentropy_items_per_session:
int_env.(
"PARRHESIA_LIMITS_MAX_NEGENTROPY_ITEMS_PER_SESSION",
Keyword.get(limits_defaults, :max_negentropy_items_per_session, 50_000)
),
negentropy_id_list_threshold:
int_env.(
"PARRHESIA_LIMITS_NEGENTROPY_ID_LIST_THRESHOLD",
Keyword.get(limits_defaults, :negentropy_id_list_threshold, 32)
),
negentropy_session_idle_timeout_seconds:
int_env.(
"PARRHESIA_LIMITS_NEGENTROPY_SESSION_IDLE_TIMEOUT_SECONDS",
Keyword.get(limits_defaults, :negentropy_session_idle_timeout_seconds, 60)
),
negentropy_session_sweep_interval_seconds:
int_env.(
"PARRHESIA_LIMITS_NEGENTROPY_SESSION_SWEEP_INTERVAL_SECONDS",
Keyword.get(limits_defaults, :negentropy_session_sweep_interval_seconds, 10)
)
]
policies = [
auth_required_for_writes:
bool_env.(
"PARRHESIA_POLICIES_AUTH_REQUIRED_FOR_WRITES",
Keyword.get(policies_defaults, :auth_required_for_writes, false)
),
auth_required_for_reads:
bool_env.(
"PARRHESIA_POLICIES_AUTH_REQUIRED_FOR_READS",
Keyword.get(policies_defaults, :auth_required_for_reads, false)
),
min_pow_difficulty:
int_env.(
"PARRHESIA_POLICIES_MIN_POW_DIFFICULTY",
Keyword.get(policies_defaults, :min_pow_difficulty, 0)
),
accept_ephemeral_events:
bool_env.(
"PARRHESIA_POLICIES_ACCEPT_EPHEMERAL_EVENTS",
Keyword.get(policies_defaults, :accept_ephemeral_events, true)
),
mls_group_event_ttl_seconds:
int_env.(
"PARRHESIA_POLICIES_MLS_GROUP_EVENT_TTL_SECONDS",
Keyword.get(policies_defaults, :mls_group_event_ttl_seconds, 300)
),
marmot_require_h_for_group_queries:
bool_env.(
"PARRHESIA_POLICIES_MARMOT_REQUIRE_H_FOR_GROUP_QUERIES",
Keyword.get(policies_defaults, :marmot_require_h_for_group_queries, true)
),
marmot_group_max_h_values_per_filter:
int_env.(
"PARRHESIA_POLICIES_MARMOT_GROUP_MAX_H_VALUES_PER_FILTER",
Keyword.get(policies_defaults, :marmot_group_max_h_values_per_filter, 32)
),
marmot_group_max_query_window_seconds:
int_env.(
"PARRHESIA_POLICIES_MARMOT_GROUP_MAX_QUERY_WINDOW_SECONDS",
Keyword.get(policies_defaults, :marmot_group_max_query_window_seconds, 2_592_000)
),
marmot_media_max_imeta_tags_per_event:
int_env.(
"PARRHESIA_POLICIES_MARMOT_MEDIA_MAX_IMETA_TAGS_PER_EVENT",
Keyword.get(policies_defaults, :marmot_media_max_imeta_tags_per_event, 8)
),
marmot_media_max_field_value_bytes:
int_env.(
"PARRHESIA_POLICIES_MARMOT_MEDIA_MAX_FIELD_VALUE_BYTES",
Keyword.get(policies_defaults, :marmot_media_max_field_value_bytes, 1024)
),
marmot_media_max_url_bytes:
int_env.(
"PARRHESIA_POLICIES_MARMOT_MEDIA_MAX_URL_BYTES",
Keyword.get(policies_defaults, :marmot_media_max_url_bytes, 2048)
),
marmot_media_allowed_mime_prefixes:
csv_env.(
"PARRHESIA_POLICIES_MARMOT_MEDIA_ALLOWED_MIME_PREFIXES",
Keyword.get(policies_defaults, :marmot_media_allowed_mime_prefixes, [])
),
marmot_media_reject_mip04_v1:
bool_env.(
"PARRHESIA_POLICIES_MARMOT_MEDIA_REJECT_MIP04_V1",
Keyword.get(policies_defaults, :marmot_media_reject_mip04_v1, true)
),
marmot_push_server_pubkeys:
csv_env.(
"PARRHESIA_POLICIES_MARMOT_PUSH_SERVER_PUBKEYS",
Keyword.get(policies_defaults, :marmot_push_server_pubkeys, [])
),
marmot_push_max_relay_tags:
int_env.(
"PARRHESIA_POLICIES_MARMOT_PUSH_MAX_RELAY_TAGS",
Keyword.get(policies_defaults, :marmot_push_max_relay_tags, 16)
),
marmot_push_max_payload_bytes:
int_env.(
"PARRHESIA_POLICIES_MARMOT_PUSH_MAX_PAYLOAD_BYTES",
Keyword.get(policies_defaults, :marmot_push_max_payload_bytes, 65_536)
),
marmot_push_max_trigger_age_seconds:
int_env.(
"PARRHESIA_POLICIES_MARMOT_PUSH_MAX_TRIGGER_AGE_SECONDS",
Keyword.get(policies_defaults, :marmot_push_max_trigger_age_seconds, 120)
),
marmot_push_require_expiration:
bool_env.(
"PARRHESIA_POLICIES_MARMOT_PUSH_REQUIRE_EXPIRATION",
Keyword.get(policies_defaults, :marmot_push_require_expiration, true)
),
marmot_push_max_expiration_window_seconds:
int_env.(
"PARRHESIA_POLICIES_MARMOT_PUSH_MAX_EXPIRATION_WINDOW_SECONDS",
Keyword.get(policies_defaults, :marmot_push_max_expiration_window_seconds, 120)
),
marmot_push_max_server_recipients:
int_env.(
"PARRHESIA_POLICIES_MARMOT_PUSH_MAX_SERVER_RECIPIENTS",
Keyword.get(policies_defaults, :marmot_push_max_server_recipients, 1)
),
management_auth_required:
bool_env.(
"PARRHESIA_POLICIES_MANAGEMENT_AUTH_REQUIRED",
Keyword.get(policies_defaults, :management_auth_required, true)
)
]
public_listener_defaults = Map.get(listeners_defaults, :public, %{})
public_bind_defaults = Map.get(public_listener_defaults, :bind, %{})
public_transport_defaults = Map.get(public_listener_defaults, :transport, %{})
public_proxy_defaults = Map.get(public_listener_defaults, :proxy, %{})
public_network_defaults = Map.get(public_listener_defaults, :network, %{})
public_features_defaults = Map.get(public_listener_defaults, :features, %{})
public_auth_defaults = Map.get(public_listener_defaults, :auth, %{})
public_metrics_defaults = Map.get(public_features_defaults, :metrics, %{})
public_metrics_access_defaults = Map.get(public_metrics_defaults, :access, %{})
metrics_listener_defaults = Map.get(listeners_defaults, :metrics, %{})
metrics_listener_bind_defaults = Map.get(metrics_listener_defaults, :bind, %{})
metrics_listener_transport_defaults = Map.get(metrics_listener_defaults, :transport, %{})
metrics_listener_network_defaults = Map.get(metrics_listener_defaults, :network, %{})
metrics_listener_metrics_defaults =
metrics_listener_defaults
|> Map.get(:features, %{})
|> Map.get(:metrics, %{})
metrics_listener_metrics_access_defaults =
Map.get(metrics_listener_metrics_defaults, :access, %{})
public_listener = %{
enabled: Map.get(public_listener_defaults, :enabled, true),
bind: %{
ip: Map.get(public_bind_defaults, :ip, {0, 0, 0, 0}),
port: int_env.("PORT", Map.get(public_bind_defaults, :port, 4413))
},
max_connections:
infinity_or_int_env.(
"PARRHESIA_PUBLIC_MAX_CONNECTIONS",
Map.get(public_listener_defaults, :max_connections, 20_000)
),
transport: %{
scheme: Map.get(public_transport_defaults, :scheme, :http),
tls: Map.get(public_transport_defaults, :tls, %{mode: :disabled})
},
proxy: %{
trusted_cidrs:
csv_env.(
"PARRHESIA_TRUSTED_PROXIES",
Map.get(public_proxy_defaults, :trusted_cidrs, [])
),
honor_x_forwarded_for: Map.get(public_proxy_defaults, :honor_x_forwarded_for, true)
},
network: %{
allow_cidrs: Map.get(public_network_defaults, :allow_cidrs, []),
private_networks_only: Map.get(public_network_defaults, :private_networks_only, false),
public: Map.get(public_network_defaults, :public, false),
allow_all: Map.get(public_network_defaults, :allow_all, true)
},
features: %{
nostr: %{
enabled: public_features_defaults |> Map.get(:nostr, %{}) |> Map.get(:enabled, true)
},
admin: %{
enabled: public_features_defaults |> Map.get(:admin, %{}) |> Map.get(:enabled, true)
},
metrics: %{
enabled:
bool_env.(
"PARRHESIA_METRICS_ENABLED_ON_MAIN_ENDPOINT",
Map.get(public_metrics_defaults, :enabled, true)
),
auth_token:
string_env.(
"PARRHESIA_METRICS_AUTH_TOKEN",
Map.get(public_metrics_defaults, :auth_token)
),
access: %{
public:
bool_env.(
"PARRHESIA_METRICS_PUBLIC",
Map.get(public_metrics_access_defaults, :public, false)
),
private_networks_only:
bool_env.(
"PARRHESIA_METRICS_PRIVATE_NETWORKS_ONLY",
Map.get(public_metrics_access_defaults, :private_networks_only, true)
),
allow_cidrs:
csv_env.(
"PARRHESIA_METRICS_ALLOWED_CIDRS",
Map.get(public_metrics_access_defaults, :allow_cidrs, [])
),
allow_all: Map.get(public_metrics_access_defaults, :allow_all, true)
}
}
},
auth: %{
nip42_required: Map.get(public_auth_defaults, :nip42_required, false),
nip98_required_for_admin:
bool_env.(
"PARRHESIA_POLICIES_MANAGEMENT_AUTH_REQUIRED",
Map.get(public_auth_defaults, :nip98_required_for_admin, true)
)
},
baseline_acl: Map.get(public_listener_defaults, :baseline_acl, %{read: [], write: []})
}
listeners =
if Map.get(metrics_listener_defaults, :enabled, false) or
bool_env.("PARRHESIA_METRICS_ENDPOINT_ENABLED", false) do
Map.put(
%{public: public_listener},
:metrics,
%{
enabled: true,
bind: %{
ip: Map.get(metrics_listener_bind_defaults, :ip, {127, 0, 0, 1}),
port:
int_env.(
"PARRHESIA_METRICS_ENDPOINT_PORT",
Map.get(metrics_listener_bind_defaults, :port, 9568)
)
},
max_connections:
infinity_or_int_env.(
"PARRHESIA_METRICS_ENDPOINT_MAX_CONNECTIONS",
Map.get(metrics_listener_defaults, :max_connections, 1_024)
),
transport: %{
scheme: Map.get(metrics_listener_transport_defaults, :scheme, :http),
tls: Map.get(metrics_listener_transport_defaults, :tls, %{mode: :disabled})
},
network: %{
allow_cidrs: Map.get(metrics_listener_network_defaults, :allow_cidrs, []),
private_networks_only:
Map.get(metrics_listener_network_defaults, :private_networks_only, false),
public: Map.get(metrics_listener_network_defaults, :public, false),
allow_all: Map.get(metrics_listener_network_defaults, :allow_all, true)
},
features: %{
nostr: %{enabled: false},
admin: %{enabled: false},
metrics: %{
enabled: true,
auth_token:
string_env.(
"PARRHESIA_METRICS_AUTH_TOKEN",
Map.get(metrics_listener_metrics_defaults, :auth_token)
),
access: %{
public:
bool_env.(
"PARRHESIA_METRICS_PUBLIC",
Map.get(metrics_listener_metrics_access_defaults, :public, false)
),
private_networks_only:
bool_env.(
"PARRHESIA_METRICS_PRIVATE_NETWORKS_ONLY",
Map.get(
metrics_listener_metrics_access_defaults,
:private_networks_only,
true
)
),
allow_cidrs:
csv_env.(
"PARRHESIA_METRICS_ALLOWED_CIDRS",
Map.get(metrics_listener_metrics_access_defaults, :allow_cidrs, [])
),
allow_all: Map.get(metrics_listener_metrics_access_defaults, :allow_all, true)
}
}
},
auth: %{nip42_required: false, nip98_required_for_admin: true},
baseline_acl: %{read: [], write: []}
}
)
else
%{public: public_listener}
end end
config :parrhesia, Parrhesia.Repo, retention = [
url: database_url, check_interval_hours:
pool_size: pool_size, int_env.(
queue_target: queue_target, "PARRHESIA_RETENTION_CHECK_INTERVAL_HOURS",
queue_interval: queue_interval Keyword.get(retention_defaults, :check_interval_hours, 24)
),
months_ahead:
int_env.(
"PARRHESIA_RETENTION_MONTHS_AHEAD",
Keyword.get(retention_defaults, :months_ahead, 2)
),
max_db_bytes:
infinity_or_int_env.(
"PARRHESIA_RETENTION_MAX_DB_BYTES",
Keyword.get(retention_defaults, :max_db_bytes, :infinity)
),
max_months_to_keep:
infinity_or_int_env.(
"PARRHESIA_RETENTION_MAX_MONTHS_TO_KEEP",
Keyword.get(retention_defaults, :max_months_to_keep, :infinity)
),
max_partitions_to_drop_per_run:
int_env.(
"PARRHESIA_RETENTION_MAX_PARTITIONS_TO_DROP_PER_RUN",
Keyword.get(retention_defaults, :max_partitions_to_drop_per_run, 1)
)
]
config :parrhesia, Parrhesia.Web.Endpoint, features = [
port: String.to_integer(System.get_env("PORT") || "4000") verify_event_signatures_locked?:
Keyword.get(features_defaults, :verify_event_signatures_locked?, false),
verify_event_signatures:
if Keyword.get(features_defaults, :verify_event_signatures_locked?, false) do
true
else
Keyword.get(features_defaults, :verify_event_signatures, true)
end,
nip_45_count:
bool_env.(
"PARRHESIA_FEATURES_NIP_45_COUNT",
Keyword.get(features_defaults, :nip_45_count, true)
),
nip_50_search:
bool_env.(
"PARRHESIA_FEATURES_NIP_50_SEARCH",
Keyword.get(features_defaults, :nip_50_search, true)
),
nip_77_negentropy:
bool_env.(
"PARRHESIA_FEATURES_NIP_77_NEGENTROPY",
Keyword.get(features_defaults, :nip_77_negentropy, true)
),
marmot_push_notifications:
bool_env.(
"PARRHESIA_FEATURES_MARMOT_PUSH_NOTIFICATIONS",
Keyword.get(features_defaults, :marmot_push_notifications, false)
)
]
storage =
case storage_backend do
:postgres ->
[
backend: :postgres,
events: Parrhesia.Storage.Adapters.Postgres.Events,
acl: Parrhesia.Storage.Adapters.Postgres.ACL,
moderation: Parrhesia.Storage.Adapters.Postgres.Moderation,
groups: Parrhesia.Storage.Adapters.Postgres.Groups,
admin: Parrhesia.Storage.Adapters.Postgres.Admin
]
:memory ->
[
backend: :memory,
events: Parrhesia.Storage.Adapters.Memory.Events,
acl: Parrhesia.Storage.Adapters.Memory.ACL,
moderation: Parrhesia.Storage.Adapters.Memory.Moderation,
groups: Parrhesia.Storage.Adapters.Memory.Groups,
admin: Parrhesia.Storage.Adapters.Memory.Admin
]
end
if postgres_backend? do
config :parrhesia, Parrhesia.Repo,
url: database_url,
pool_size: pool_size,
queue_target: queue_target,
queue_interval: queue_interval
config :parrhesia, Parrhesia.ReadRepo,
url: database_url,
pool_size: read_pool_size,
queue_target: read_queue_target,
queue_interval: read_queue_interval
end
config :parrhesia,
database: [
separate_read_pool?: separate_read_pool?
],
relay_url: string_env.("PARRHESIA_RELAY_URL", relay_url_default),
metadata: [
name: Keyword.get(metadata_defaults, :name, "Parrhesia"),
version: Keyword.get(metadata_defaults, :version, "0.0.0"),
hide_version?:
bool_env.(
"PARRHESIA_METADATA_HIDE_VERSION",
Keyword.get(metadata_defaults, :hide_version?, true)
)
],
acl: [
protected_filters:
json_env.(
"PARRHESIA_ACL_PROTECTED_FILTERS",
Keyword.get(acl_defaults, :protected_filters, [])
)
],
identity: [
path: string_env.("PARRHESIA_IDENTITY_PATH", nil),
private_key: string_env.("PARRHESIA_IDENTITY_PRIVATE_KEY", nil)
],
sync: [
path: string_env.("PARRHESIA_SYNC_PATH", nil),
start_workers?:
bool_env.(
"PARRHESIA_SYNC_START_WORKERS",
Keyword.get(sync_defaults, :start_workers?, true)
),
relay_guard:
bool_env.(
"PARRHESIA_SYNC_RELAY_GUARD",
Keyword.get(sync_defaults, :relay_guard, false)
)
],
moderation_cache_enabled:
bool_env.("PARRHESIA_MODERATION_CACHE_ENABLED", moderation_cache_enabled_default),
enable_expiration_worker:
bool_env.("PARRHESIA_ENABLE_EXPIRATION_WORKER", enable_expiration_worker_default),
enable_partition_retention_worker:
bool_env.(
"PARRHESIA_ENABLE_PARTITION_RETENTION_WORKER",
enable_partition_retention_worker_default
),
listeners: listeners,
limits: limits,
policies: policies,
retention: retention,
features: features,
storage: storage
case System.get_env("PARRHESIA_EXTRA_CONFIG") do
nil -> :ok
"" -> :ok
path -> import_config path
end
end end

View File

@@ -8,13 +8,34 @@ test_endpoint_port =
value -> String.to_integer(value) value -> String.to_integer(value)
end end
config :parrhesia, Parrhesia.Web.Endpoint, config :parrhesia, :listeners,
port: test_endpoint_port, public: %{
ip: {127, 0, 0, 1} enabled: true,
bind: %{ip: {127, 0, 0, 1}, port: test_endpoint_port},
transport: %{scheme: :http, tls: %{mode: :disabled}},
proxy: %{trusted_cidrs: [], honor_x_forwarded_for: true},
network: %{allow_all: true},
features: %{
nostr: %{enabled: true},
admin: %{enabled: true},
metrics: %{enabled: true, access: %{private_networks_only: true}, auth_token: nil}
},
auth: %{nip42_required: false, nip98_required_for_admin: true},
baseline_acl: %{read: [], write: []}
}
config :parrhesia, config :parrhesia,
enable_expiration_worker: false, enable_expiration_worker: false,
moderation_cache_enabled: false, moderation_cache_enabled: false,
nip66: [enabled: false],
identity: [
path: Path.join(System.tmp_dir!(), "parrhesia_test_identity.json"),
private_key: nil
],
sync: [
path: Path.join(System.tmp_dir!(), "parrhesia_test_sync.json"),
start_workers?: false
],
features: [verify_event_signatures: false] features: [verify_event_signatures: false]
pg_host = System.get_env("PGHOST") pg_host = System.get_env("PGHOST")

View File

@@ -10,7 +10,7 @@
vips, vips,
}: let }: let
pname = "parrhesia"; pname = "parrhesia";
version = "0.3.0"; version = "0.8.0";
beamPackages = beam.packages.erlang_28.extend ( beamPackages = beam.packages.erlang_28.extend (
final: _prev: { final: _prev: {
@@ -48,7 +48,7 @@
beamPackages.fetchMixDeps { beamPackages.fetchMixDeps {
pname = "${pname}-mix-deps"; pname = "${pname}-mix-deps";
inherit version src; inherit version src;
hash = "sha256-0KOyYRbYM0jjmp3tPn64qkp0YkmZKlqkGrlu/wCr4m8="; hash = "sha256-D69wuFnIChQzm1PmpIW+X/1sPpsIcDHe4V5fKmFeJ3k=";
} }
else null; else null;

View File

@@ -87,11 +87,27 @@
"type": "github" "type": "github"
} }
}, },
"nostr-bench-src": {
"flake": false,
"locked": {
"lastModified": 1774020724,
"owner": "serpent213",
"repo": "nostr-bench",
"rev": "8561b84864ce1269b26304808c64219471999caf",
"type": "github"
},
"original": {
"owner": "serpent213",
"repo": "nostr-bench",
"type": "github"
}
},
"root": { "root": {
"inputs": { "inputs": {
"devenv": "devenv", "devenv": "devenv",
"git-hooks": "git-hooks", "git-hooks": "git-hooks",
"nixpkgs": "nixpkgs", "nixpkgs": "nixpkgs",
"nostr-bench-src": "nostr-bench-src",
"pre-commit-hooks": [ "pre-commit-hooks": [
"git-hooks" "git-hooks"
] ]

View File

@@ -73,11 +73,14 @@ in {
vips.overrideAttrs (oldAttrs: { vips.overrideAttrs (oldAttrs: {
buildInputs = oldAttrs.buildInputs ++ [mozjpeg]; buildInputs = oldAttrs.buildInputs ++ [mozjpeg];
}); });
nostr-bench = pkgs.callPackage ./nix/nostr-bench.nix {}; nostr-bench = pkgs.callPackage ./nix/nostr-bench.nix {
nostrBenchSrc = inputs.nostr-bench-src;
};
in in
with pkgs; with pkgs;
[ [
just just
# Mix NIFs
gcc gcc
git git
gnumake gnumake
@@ -85,6 +88,8 @@ in {
automake automake
libtool libtool
pkg-config pkg-config
# for tests
openssl
# Nix code formatter # Nix code formatter
alejandra alejandra
# i18n # i18n
@@ -97,21 +102,21 @@ in {
mermaid-cli mermaid-cli
# Nostr CLI client # Nostr CLI client
nak nak
websocat
# Nostr relay benchmark client # Nostr relay benchmark client
nostr-bench nostr-bench
# Nostr reference servers # Nostr reference servers
nostr-rs-relay nostr-rs-relay
# Benchmark graph
gnuplot
# Cloud benchmarks
hcloud
] ]
++ lib.optionals pkgs.stdenv.hostPlatform.isx86_64 [ ++ lib.optionals pkgs.stdenv.hostPlatform.isx86_64 [
# Nostr reference servers
strfry strfry
]; ];
# https://devenv.sh/tests/
# enterTest = ''
# echo "Running tests"
# git --version | grep "2.42.0"
# '';
# https://devenv.sh/languages/ # https://devenv.sh/languages/
languages = { languages = {
elixir = { elixir = {
@@ -131,7 +136,7 @@ in {
enable = true; enable = true;
package = pkgs.postgresql_18; package = pkgs.postgresql_18;
# Some tuning for the benchmark # Some tuning for the benchmark - doesn't seem to do much
settings = { settings = {
max_connections = 300; max_connections = 300;
shared_buffers = "1GB"; shared_buffers = "1GB";
@@ -151,6 +156,7 @@ in {
initialScript = '' initialScript = ''
CREATE ROLE dev WITH LOGIN PASSWORD 'dev' SUPERUSER; CREATE ROLE dev WITH LOGIN PASSWORD 'dev' SUPERUSER;
-- Make sure we get the right collation
ALTER database template1 is_template=false; ALTER database template1 is_template=false;
DROP database template1; DROP database template1;
@@ -167,12 +173,10 @@ in {
''; '';
}; };
# https://devenv.sh/pre-commit-hooks/
# pre-commit.hooks.shellcheck.enable = true;
dotenv.enable = true; dotenv.enable = true;
devenv.warnOnNewVersion = false; devenv.warnOnNewVersion = false;
# https://devenv.sh/pre-commit-hooks/
git-hooks.hooks = { git-hooks.hooks = {
alejandra.enable = true; alejandra.enable = true;
check-added-large-files = { check-added-large-files = {

View File

@@ -2,6 +2,9 @@
inputs: inputs:
nixpkgs: nixpkgs:
url: github:cachix/devenv-nixpkgs/rolling url: github:cachix/devenv-nixpkgs/rolling
nostr-bench-src:
url: github:serpent213/nostr-bench
flake: false
# If you're using non-OSS software, you can set allowUnfree to true. # If you're using non-OSS software, you can set allowUnfree to true.
# allowUnfree: true # allowUnfree: true

View File

@@ -68,10 +68,10 @@ Notes:
## 3) System architecture (high level) ## 3) System architecture (high level)
```text ```text
WS/HTTP Edge (Bandit/Plug) Configured WS/HTTP Listeners (Bandit/Plug)
-> Protocol Decoder/Encoder -> Protocol Decoder/Encoder
-> Command Router (EVENT/REQ/CLOSE/AUTH/COUNT/NEG-*) -> Command Router (EVENT/REQ/CLOSE/AUTH/COUNT/NEG-*)
-> Policy Pipeline (validation, auth, ACL, PoW, NIP-70) -> Policy Pipeline (listener baseline, validation, auth, ACL, PoW, NIP-70)
-> Event Service / Query Service -> Event Service / Query Service
-> Storage Port (behavior) -> Storage Port (behavior)
-> Postgres Adapter (Ecto) -> Postgres Adapter (Ecto)
@@ -82,23 +82,36 @@ WS/HTTP Edge (Bandit/Plug)
## 4) OTP supervision design ## 4) OTP supervision design
`Parrhesia.Application` children (top-level): `Parrhesia.Runtime` children (top-level):
1. `Parrhesia.Telemetry` metric definitions/reporters 1. `Parrhesia.Telemetry` metric definitions/reporters
2. `Parrhesia.Config` runtime config cache (ETS-backed) 2. `Parrhesia.ConnectionStats` per-listener connection/subscription counters
3. `Parrhesia.Storage.Supervisor` adapter processes (`Repo`, pools) 3. `Parrhesia.Config` runtime config cache (ETS-backed)
4. `Parrhesia.Subscriptions.Supervisor` subscription index + fanout workers 4. `Parrhesia.Web.EventIngestLimiter` relay-wide event ingest rate limiter
5. `Parrhesia.Auth.Supervisor` AUTH challenge/session tracking 5. `Parrhesia.Web.IPEventIngestLimiter` per-IP event ingest rate limiter
6. `Parrhesia.Policy.Supervisor` rate limiters / ACL caches 6. `Parrhesia.Storage.Supervisor` adapter processes (`Repo`, pools)
7. `Parrhesia.Web.Endpoint` WS + HTTP ingress 7. `Parrhesia.Subscriptions.Supervisor` subscription index + fanout workers
8. `Parrhesia.Tasks.Supervisor` background jobs (expiry purge, maintenance) 8. `Parrhesia.Auth.Supervisor` AUTH challenge/session tracking
9. `Parrhesia.Sync.Supervisor` outbound relay sync workers
10. `Parrhesia.Policy.Supervisor` rate limiters / ACL caches
11. `Parrhesia.Web.Endpoint` supervises configured WS + HTTP listeners
12. `Parrhesia.Tasks.Supervisor` background jobs (expiry purge, maintenance)
Failure model: Failure model:
- Connection failures are isolated per socket process. - Connection failures are isolated per socket process.
- Listener failures are isolated per Bandit child and restarted independently.
- Storage outages degrade with explicit `OK/CLOSED` error prefixes (`error:`) per NIP-01. - Storage outages degrade with explicit `OK/CLOSED` error prefixes (`error:`) per NIP-01.
- Non-critical workers are `:transient`; core infra is `:permanent`. - Non-critical workers are `:transient`; core infra is `:permanent`.
Ingress model:
- Ingress is defined through `config :parrhesia, :listeners, ...`.
- Each listener has its own bind/transport settings, TLS mode, proxy trust, network allowlist, enabled features (`nostr`, `admin`, `metrics`), auth requirements, and baseline read/write ACL.
- Listeners can therefore expose different security postures, for example a public relay listener and a VPN-only sync-capable listener.
- TLS-capable listeners support direct server TLS, mutual TLS with optional client pin checks, and proxy-terminated TLS identity on explicitly trusted proxy hops.
- Certificate reload is currently implemented as admin-triggered listener restart from disk rather than background file watching.
## 5) Core runtime components ## 5) Core runtime components
### 5.1 Connection process ### 5.1 Connection process

344
docs/BETA_REVIEW.md Normal file
View File

@@ -0,0 +1,344 @@
# Parrhesia Beta: Production-Readiness Gap Assessment
**Date:** 2026-03-20
**Version:** 0.7.0
**Scope:** Delta analysis from beta promotion — what stands between this codebase and confident public-facing production deployment.
---
## Production Readiness Scorecard
| # | Dimension | Rating | Summary |
|---|----------------------------------|--------|----------------------------------------------|
| 1 | Operational Resilience | 🟡 | Graceful shutdown partial; no DB circuit-breaking |
| 2 | Multi-Node / Clustering | 🟡 | Best-effort only; acceptable for single-node prod |
| 3 | Load & Capacity Characterisation | 🟡 | Benchmarks exist but no defined capacity model |
| 4 | Deployment & Infrastructure | 🟡 | Strong Nix/Docker base; missing runbooks and migration strategy |
| 5 | Security Hardening | 🟢 | Solid for production with reverse proxy |
| 6 | Data Integrity & Consistency | 🟢 | Transaction-wrapped writes with dedup; minor multi-node edge cases |
| 7 | Observability Completeness | 🟡 | Excellent metrics; no dashboards, alerts, or tracing |
| 8 | Technical Debt (Prod Impact) | 🟡 | Manageable; connection.ex size is the main concern |
---
## 1. Operational Resilience — 🟡
### What's good
- **No `Process.sleep` on any hot path.** Zero occurrences in `lib/`. Clean async message passing throughout.
- **WebSocket keepalive** implemented: 30s ping, 10s pong timeout, auto-close on timeout.
- **Outbound queue backpressure** well-designed: bounded queue (256 default), configurable overflow strategy (`:close`/`:drop_oldest`/`:drop_newest`), pressure telemetry at 75% threshold.
- **Connection isolation:** Each WebSocket is a separate process; one crash does not propagate.
- **Graceful connection close on shutdown:** `handle_info({:EXIT, _, :shutdown}, ...)` drains outbound frames before closing with code 1012 ("service restart"). This is good.
### Gaps
**G1.1 — No DB circuit-breaking or backoff on PostgreSQL unavailability.**
Ecto's connection pool (`db_connection`/`DBConnection`) will queue checkout requests up to `queue_target` (1000ms) / `queue_interval` (5000ms), then raise `DBConnection.ConnectionError`. These errors propagate as storage failures in the ingest path and return NOTICE errors to clients. However:
- There is no circuit breaker to fast-reject requests when the DB is known-down, meaning every ingest/query attempt during an outage burns a pool checkout timeout slot.
- On DB recovery, all queued checkouts may succeed simultaneously (thundering herd).
- **Impact:** During a PostgreSQL failover (typically 1030s), connection processes pile up waiting on the pool. Latency spikes for all connected clients. Memory pressure from queued processes.
- **Mitigation:** Ecto's built-in queue management provides partial protection. For a relay with ≤1000 concurrent connections this is likely survivable without circuit-breaking. For higher connection counts, consider a fast-fail wrapper around storage calls when the pool reports consecutive failures.
**G1.2 — Metrics scrape on the hot path.**
`/metrics` calls `TelemetryMetricsPrometheus.Core.scrape/1` synchronously within the HTTP request handler. This serialises metric aggregation and formatting. If the Prometheus reporter's internal state is large (many unique tag combinations), scraping can take 10100ms. This runs on a Bandit acceptor process — it does not block WebSocket connections directly, but a slow scrape under high cardinality could make the health endpoint unresponsive if metrics and health share the same listener.
- **Current mitigation:** Metrics can be isolated to a dedicated listener via `PARRHESIA_METRICS_ENDPOINT_*` config. If deployed this way, impact is isolated.
- **Recommendation:** Document the dedicated metrics listener as required for production. Consider adding a scrape timeout guard.
**G1.3 — Supervisor shutdown timeout is OTP default (5s).**
The `Parrhesia.Runtime` supervisor uses `:one_for_one` strategy with default child shutdown specs. Bandit listeners have their own shutdown behavior, but there is no explicit `shutdown: N` on the endpoint child spec. Under load with many connections, 5s may not be enough to drain all outbound queues.
- **Recommendation:** Set explicit `shutdown: 15_000` on `Parrhesia.Web.Endpoint` child spec. Bandit supports graceful drain on listener stop.
---
## 2. Multi-Node / Clustering — 🟡
### Current state
Per `docs/CLUSTER.md`, clustering is **implemented but explicitly best-effort and untested**:
- `:pg`-based process groups for cross-node fanout.
- No automatic cluster discovery (no libcluster).
- ETS subscription index is node-local.
- No durable inter-node transport; no replay on reconnect.
- No explicit acknowledgement between nodes.
### Assessment for production
**For single-node production deployment: not a blocker.** The clustering code is unconditionally started (`MultiNode` joins `:pg` on init) but with a single node, `get_members/0` returns only self, and the `Enum.reject(&(&1 == self()))` filter means no remote sends occur. No performance overhead.
**For multi-node production: not ready.** Key issues:
- **Subscription inconsistency on netsplit:** Events ingested on node A during a split are never delivered to subscribers on node B. No catch-up mechanism exists. Clients must reconnect and re-query to recover.
- **Node departure drops subscriptions silently:** When a node leaves the cluster, subscribers on that node lose their connections (normal). Subscribers on other nodes are unaffected. But events that were in-flight from the departed node are lost.
- **No cluster health observability:** No metrics for inter-node fanout lag, message drops, or membership changes.
**Recommendation for initial production:** Deploy single-node. Clustering is a Phase B concern per the documented roadmap.
---
## 3. Load & Capacity Characterisation — 🟡
### What exists
- `LoadSoakTest` asserts p95 fanout enqueue/drain < 25ms.
- `bench/` directory with `nostr-bench` submodule for external load testing.
- Cloud bench orchestration scripts (`scripts/cloud_bench_orchestrate.mjs`, `scripts/cloud_bench_server.sh`).
### Gaps
**G3.1 — No documented capacity model.**
There is no documented answer to: "How many connections / events per second can one node handle before degradation?" The `LoadSoakTest` runs locally with synthetic data — useful for regression detection but not representative of production traffic patterns.
**G3.2 — Multi-filter query scaling is in-memory dedup.**
`Postgres.Events.query/3` runs each filter as a separate SQL query, collects all results into memory, and deduplicates with `deduplicate_events/1` (Map.update accumulation). With many overlapping filters or high-cardinality results, this could produce significant memory pressure per-request.
- At realistic scales (< 10 filters, < 1000 results per filter), this is fine.
- At adversarial scales (32 subscriptions × large result sets), a single REQ could allocate substantial memory.
- **Current mitigation:** `max_tag_values_per_filter` (128) and query `LIMIT` bounds exist. The risk is bounded but not eliminated.
**G3.3 — No query performance benchmarks against large datasets.**
No evidence of testing against 100M+ events with monthly partitions. Partition pruning is implemented, but query plans may degrade if the partition list grows large (PostgreSQL planner overhead scales with partition count).
**Recommendation:** Before production, run `nostr-bench` at target load (e.g., 500 concurrent connections, 100 events/sec ingest, 1000 active subscriptions) and document the resulting latency profile. This becomes the baseline capacity model.
---
## 4. Deployment & Infrastructure Readiness — 🟡
### What's good
- **Docker image via Nix:** Non-root user (65534:65534), minimal base, cacerts bundled, SSL_CERT_FILE set. This is production-quality container hygiene.
- **OTP release:** `mix release` with `Parrhesia.Release.migrate/0` for safe migration execution.
- **CI pipeline:** Multi-matrix testing (OTP 27/28, Elixir 1.18/1.19), format/credo/unused deps checks, E2E tests.
- **Environment-based configuration:** All critical settings overridable via `PARRHESIA_*` env vars in `runtime.exs`.
- **Secrets:** No secrets committed. DB credentials via `DATABASE_URL`, identity key via env or file path.
### Gaps
**G4.1 — No zero-downtime migration strategy.**
`Parrhesia.Release.migrate/0` runs `Ecto.Migrator.run/4` with `:up`. Under replicated deployments (rolling update with 2+ instances), there is no advisory lock or migration guard — two instances starting simultaneously could race on migrations. Ecto's default migrator uses `pg_advisory_lock` via `Ecto.Migration.Runner`, so this is actually safe for PostgreSQL. However:
- **DDL migrations (CREATE INDEX CONCURRENTLY, ALTER TABLE) need careful handling.** The existing migrations use standard `CREATE TABLE` and `CREATE INDEX` which acquire ACCESS EXCLUSIVE locks. Running these against a live database will block reads and writes for the duration.
- **Recommendation:** For production, migrations should be run as a separate step before deploying new code (the compose.yaml already has a `migrate` service — extend this pattern).
**G4.2 — No operational runbooks.**
There are no documented procedures for:
- Rolling restart / blue-green deploy
- Partition pruning and retention tuning
- Runtime pubkey banning (the NIP-86 management API exists but isn't documented for ops use)
- DB failover response
- Scaling (horizontal or vertical)
**G4.3 — No health check in Docker image.**
The Nix-built Docker image has no `HEALTHCHECK` instruction. The `/health` and `/ready` endpoints exist but aren't wired into container orchestration.
- **Recommendation:** Add `HEALTHCHECK CMD curl -f http://localhost:4413/ready || exit 1` to the Docker image definition, or document the readiness endpoint for Kubernetes probes.
**G4.4 — No disaster recovery plan.**
No documented RTO/RPO. If the primary DB is lost, recovery depends entirely on external backup infrastructure. The relay has no built-in data export or snapshot capability.
---
## 5. Security Hardening — 🟢
### Assessment
The security posture is solid for production behind a reverse proxy:
- **TLS:** Full support for server, mutual, and proxy-terminated TLS modes. Cipher suite selection (strong/compatible). Certificate pin verification.
- **Rate limiting:** Three layers — relay-wide (10k/s), per-IP (1k/s), per-connection (120/s). All configurable.
- **Metrics endpoint:** Access-controlled via `metrics_allowed?/2` — supports private-network-only restriction and bearer token auth. Tested.
- **NIP-42 auth:** Constant-time comparison via `Plug.Crypto.secure_compare/2` (addressed in beta).
- **NIP-98:** Replay protection, event freshness check (< 60s), signature verification.
- **Input validation:** Binary field length constraints at DB level (migration 7). Event size limits at WebSocket frame level.
- **IP controls:** Trusted proxy CIDR configuration, X-Forwarded-For parsing, IP blocklist table.
- **Audit logging:** `management_audit_logs` table tracks admin actions.
- **No secrets in git.** Environment variable or file-path based secret injection.
### Minor considerations (not blocking)
- No integration with external threat intel feeds or IP reputation services. This is an infrastructure concern, not an application concern.
- DDoS mitigation assumed to be at load balancer / CDN layer. Application-level rate limiting is defense-in-depth, not primary.
- **Recommendation:** Document the expected deployment topology (Caddy/Nginx → Parrhesia) and which security controls are expected at each layer.
---
## 6. Data Integrity & Consistency — 🟢
### What's good
- **Duplicate event prevention:** Two-layer defence:
1. `event_ids` table with unique PK on `id``INSERT ... ON CONFLICT DO NOTHING`.
2. If `inserted == 0`, transaction rolls back with `:duplicate_event`.
3. Separate unique index on `events.id` as belt-and-suspenders.
- **Atomic writes:** `put_event/2` wraps `insert_event_id!`, `insert_event!`, `insert_tags!`, and `upsert_state_tables!` in a single `Repo.transaction/1`. Partial writes (event without tags) cannot occur.
- **Replaceable/addressable event state:** Upsert logic in state tables with correct conflict resolution (higher `created_at` wins, then lower `id` as tiebreaker via `candidate_wins_state?/2`).
### Minor considerations
**G6.1 — Expiration worker concurrency on multi-node.**
`ExpirationWorker` runs `Repo.delete_all/1` against all expired events. If two nodes run this worker against the same database, both execute the same DELETE query. PostgreSQL handles this safely (the second DELETE finds 0 rows), and the worker is idempotent. **Not a problem.**
**G6.2 — Partition pruning and sync.**
`PartitionRetentionWorker.drop_partition/1` drops entire monthly partitions. If negentropy sync is in progress against events in that partition, the sync session's cached refs become stale. The session would fail or return incomplete results.
- **Impact:** Low. Partition drops are infrequent (daily check, at most 1 per run). Negentropy sessions are short-lived (60s idle timeout).
- **Recommendation:** No action needed for initial production. If operating as a sync source relay, consider pausing sync during partition drops.
---
## 7. Observability Completeness — 🟡
### What's good
Metrics coverage is comprehensive — 34+ distinct metrics covering:
- Ingest: event count by outcome/reason, duration distribution
- Query: request count, duration, result cardinality
- Fanout: duration, candidates considered, events enqueued, batch size
- Connection: outbound queue depth/pressure/overflow/drop, mailbox depth
- Rate limiting: hit count by scope
- DB: query count/total_time/queue_time/query_time/decode_time/idle_time by repo role
- Maintenance: expiration purge count/duration, partition retention drops/duration
- VM: memory (total/processes/system/atom/binary/ets)
- Listener: active connections, active subscriptions
Readiness endpoint checks critical process liveness. Health endpoint for basic reachability.
### Gaps
**G7.1 — No dashboards or alerting rules.**
The metrics exist but there are no Grafana dashboard JSON files, no Prometheus alerting rules, and no documented alert thresholds. An operator deploying this relay would need to build observability from scratch.
- **Recommendation:** Ship a `deploy/grafana/` directory with a dashboard JSON and a `deploy/prometheus/alerts.yml` with rules for:
- `parrhesia_db_query_queue_time_ms` p95 > 100ms (pool saturation)
- `parrhesia_connection_outbound_queue_overflow_count` rate > 0 (clients being dropped)
- `parrhesia_rate_limit_hits_count` rate sustained > threshold (potential abuse)
- `parrhesia_vm_memory_total_bytes` > 80% of available
- Listener connection count approaching `max_connections`
**G7.2 — No distributed tracing or request correlation IDs.**
Events flow through validate → policy → persist → fanout without a correlation ID tying the stages together. Log-based debugging of "why didn't this event reach subscriber X" requires manual PID correlation across log lines.
- **Impact:** Tolerable for initial production at moderate scale. Becomes painful at high event rates.
**G7.3 — No synthetic monitoring.**
No built-in probe that ingests a canary event and verifies it arrives at a subscriber. End-to-end relay health depends on external monitoring.
- **Recommendation:** This is best implemented as an external tool. Not blocking.
---
## 8. Technical Debt with Production Impact — 🟡
### G8.1 — `connection.ex` at 2,116 lines
This module is the per-connection state machine handling EVENT, REQ, CLOSE, AUTH, COUNT, NEG-*, keepalive, outbound queue management, rate limiting, and all associated telemetry. It is the single most critical file for production incident response.
**Production risk:** During a production incident involving connection behavior, an on-call engineer needs to quickly navigate this module. At 2,116 lines with interleaved concerns (protocol parsing, policy enforcement, queue management, telemetry emission), this slows incident response.
**Recommendation (M-sized effort):** Extract into focused modules:
- `Connection.Ingest` — EVENT handling and policy application
- `Connection.Subscription` — REQ/CLOSE management and initial query streaming
- `Connection.OutboundQueue` — queue/drain/overflow logic
- `Connection.Keepalive` — ping/pong state machine
The main `Connection` module would become an orchestrator delegating to these. This is a refactor-only change with no behavioral impact.
### G8.2 — Multi-filter in-memory dedup
`deduplicate_events/1` accumulates all query results into a Map before deduplication. With 32 subscriptions (the max) and generous limits, worst case is:
- 32 filters × 5000 result limit = 160,000 events loaded into memory per REQ.
Each event struct is ~500 bytes minimum, so ~80MB per pathological request. This is bounded but could be weaponised by an attacker sending many concurrent REQs with overlapping filters.
**Current mitigation:** Per-connection subscription limit (32) and query result limits bound the damage. Per-IP rate limiting adds friction.
**Recommendation:** Not blocking for production. Monitor `parrhesia.query.results.count` distribution. If p99 > 10,000, investigate query patterns.
### G8.3 — Per-pubkey rate limiting absent
Rate limiting is currently per-IP and relay-wide. An attacker using a botnet (many IPs, one pubkey) bypasses IP-based limits. Per-pubkey rate limiting would catch this.
**Impact:** Medium for a public relay; low for an invite-only (NIP-43) relay.
**Recommendation (S-sized effort):** Add a per-pubkey event ingest limiter similar to `IPEventIngestLimiter`, keyed by `event.pubkey`. Apply after signature verification but before storage.
### G8.4 — Negentropy session memory ceiling
Negentropy session bounds:
- Max 10,000 total sessions (`@default_max_total_sessions`)
- Max 8 per connection (`@default_max_sessions_per_owner`)
- Max 50,000 items per session (`@default_max_items_per_session`)
- 60s idle timeout with 10s sweep interval
Worst case: 10,000 sessions × 50,000 items × ~40 bytes/ref = ~20GB. This is the theoretical maximum under adversarial session creation.
**Realistic ceiling:** The `open/6` path runs a DB query bounded by `max_items_per_session + 1`. At 50k items, this query itself provides backpressure (it takes time). An attacker would need 10,000 concurrent connections each opening 8 sessions, each returning 50k results. The relay-wide connection limit and rate limiting make this implausible in practice.
**Recommendation:** Reduce `@default_max_items_per_session` to 10,000 for production (reduces theoretical ceiling to ~4GB). This is a config change, not a code change.
---
## Critical Path to Production
Ordered by priority. Items above the line are required before production traffic; items below are strongly recommended.
| # | Work Item | Dimension | Effort |
|---|-----------|-----------|--------|
| 1 | Set explicit shutdown timeout on Endpoint child spec | Operational | S |
| 2 | Document dedicated metrics listener as production requirement | Operational | S |
| 3 | Add HEALTHCHECK to Docker image or document K8s probes | Deployment | S |
| 4 | Run capacity benchmark at target load and document results | Load | M |
| 5 | Ship Grafana dashboard + Prometheus alert rules | Observability | M |
| 6 | Write operational runbook (deploy, rollback, ban, failover) | Deployment | M |
| 7 | Document migration strategy (run before deploy, not during) | Deployment | S |
| --- | --- | --- | --- |
| 8 | Add per-pubkey rate limiting | Security | S |
| 9 | Reduce default negentropy items-per-session to 10k | Security | S |
| 10 | Extract connection.ex into sub-modules | Debt | M |
| 11 | Add request correlation IDs to event lifecycle | Observability | M |
| 12 | Add DB pool health fast-fail wrapper | Operational | M |
---
## Production Risk Register
| ID | Risk | Likelihood | Impact | Mitigation |
|----|------|-----------|--------|------------|
| R1 | PostgreSQL failover causes latency spike for all connections | Medium | High | G1.1: Ecto queue management provides partial protection. Add pool health telemetry alerting. Consider circuit breaker at high connection counts. |
| R2 | Slow /metrics scrape blocks health checks | Low | Medium | G1.2: Deploy dedicated metrics listener (already supported). |
| R3 | Ungraceful shutdown drops in-flight events | Low | Medium | G1.3: Set explicit shutdown timeout. Connection drain logic already exists. |
| R4 | Multi-IP spam campaign bypasses rate limiting | Medium | Medium | G8.3: Add per-pubkey rate limiter. NIP-43 invite-only mode mitigates for private relays. |
| R5 | Large REQ with many overlapping filters causes memory spike | Low | Medium | G8.2: Bounded by existing limits. Monitor query result cardinality. |
| R6 | No alerting means silent degradation | Medium | High | G7.1: Ship dashboard and alert rules before production. |
| R7 | DDL migration blocks reads during rolling deploy | Low | High | G4.1: Run migrations as separate pre-deploy step. |
| R8 | Adversarial negentropy session creation exhausts memory | Low | High | G8.4: Reduce max items per session. Existing session limits provide protection. |
| R9 | No runbooks slows incident response | Medium | Medium | G4.2: Write runbooks for common ops tasks. |
| R10 | connection.ex complexity slows debugging | Medium | Low | G8.1: Extract sub-modules. Not urgent but improves maintainability. |
---
## Final Verdict
### 🟡 Ready for Limited Production
**Constraints for initial deployment:**
1. **Single-node only.** Multi-node clustering is best-effort and should not be relied upon for production traffic. Deploy one node with a properly sized PostgreSQL instance.
2. **Behind a reverse proxy.** Deploy behind Caddy, Nginx, or a cloud load balancer for TLS termination, DDoS mitigation, and connection limits. Document the expected topology.
3. **Moderate traffic cap.** Without a validated capacity model, start with conservative limits:
- ≤ 2,000 concurrent WebSocket connections
- ≤ 500 events/second ingest rate
- Monitor `db.query.queue_time.ms` p95 and `connection.outbound_queue.overflow.count` as scaling signals.
4. **Observability must be deployed alongside.** The metrics exist but dashboards and alerts do not. Do not go live without at minimum:
- Prometheus scraping the dedicated metrics listener
- Alerts on DB queue time, outbound queue overflow, and VM memory
- Log aggregation with ERROR-level alerts
5. **Migrations run pre-deploy.** Use the existing compose.yaml `migrate` service pattern. Never run migrations as part of application startup in a multi-replica deployment.
**What's strong:**
- OTP supervision architecture is clean and fault-isolated
- Data integrity layer is well-designed (transactional writes, dedup, constraint enforcement)
- Security posture is production-appropriate
- Telemetry coverage is comprehensive
- Container image follows best practices
- No blocking issues in the hot path (no sleeps, no synchronous calls, bounded queues)
**The codebase is architecturally sound for production.** The gaps are operational (runbooks, dashboards, capacity planning) rather than structural. A focused sprint addressing items 17 from the critical path would clear the way for a controlled production launch.

234
docs/CLUSTER.md Normal file
View File

@@ -0,0 +1,234 @@
# Parrhesia clustering and distributed fanout
This document describes:
1. the **current** distributed fanout behavior implemented today, and
2. a practical evolution path to a more production-grade clustered relay.
---
## 1) Current state (implemented today)
### 1.1 What exists right now
Parrhesia currently includes a lightweight multi-node live fanout path (untested!):
- `Parrhesia.Fanout.MultiNode` (`lib/parrhesia/fanout/multi_node.ex`)
- GenServer that joins a `:pg` process group.
- Receives locally-published events and forwards them to other group members.
- Receives remote events and performs local fanout lookup.
- `Parrhesia.Web.Connection` (`lib/parrhesia/web/connection.ex`)
- On successful ingest, after ACK scheduling, it does:
1. local fanout (`fanout_event/1`), then
2. cross-node publish (`maybe_publish_multi_node/1`).
- `Parrhesia.Subscriptions.Supervisor` (`lib/parrhesia/subscriptions/supervisor.ex`)
- Starts `Parrhesia.Fanout.MultiNode` unconditionally.
In other words: **if BEAM nodes are connected, live events are fanned out cross-node**.
### 1.2 What is not included yet
- No automatic cluster formation/discovery (no `libcluster`, DNS polling, gossip, etc.).
- No durable inter-node event transport.
- No replay/recovery of missed cross-node live events.
- No explicit per-node delivery ACK between relay nodes.
---
## 2) Current runtime behavior in detail
### 2.1 Local ingest flow and publish ordering
For an accepted event in `Parrhesia.Web.Connection`:
1. validate/policy/persist path runs.
2. Client receives `OK` reply.
3. A post-ACK message triggers:
- local fanout (`Index.candidate_subscription_keys/1` + send `{:fanout_event, ...}`),
- multi-node publish (`MultiNode.publish/1`).
Important semantics:
- Regular persisted events: ACK implies DB persistence succeeded.
- Ephemeral events: ACK implies accepted by policy, but no DB durability.
- Cross-node fanout happens **after** ACK path is scheduled.
### 2.2 Multi-node transport mechanics
`Parrhesia.Fanout.MultiNode` uses `:pg` membership:
- On init:
- ensures `:pg` is started,
- joins group `Parrhesia.Fanout.MultiNode`.
- On publish:
- gets all group members,
- excludes itself,
- sends `{:remote_fanout_event, event}` to each member pid.
- On remote receive:
- runs local subscription candidate narrowing via `Parrhesia.Subscriptions.Index`,
- forwards matching candidates to local connection owners as `{:fanout_event, sub_id, event}`.
No republish on remote receive, so this path does not create fanout loops.
### 2.3 Subscription index locality
The subscription index is local ETS state per node (`Parrhesia.Subscriptions.Index`).
- Each node only tracks subscriptions of its local websocket processes.
- Each node independently decides which local subscribers match a remote event.
- There is no global cross-node subscription registry.
### 2.4 Delivery model and guarantees (current)
Current model is **best-effort live propagation** among connected nodes.
- If nodes are connected and healthy, remote live subscribers should receive events quickly.
- If there is a netsplit or temporary disconnection:
- remote live subscribers may miss events,
- persisted events can still be recovered by normal `REQ`/history query,
- ephemeral events are not recoverable.
### 2.5 Cluster preconditions
For cross-node fanout to work, operators must provide distributed BEAM connectivity:
- consistent Erlang cookie,
- named nodes (`--name`/`--sname`),
- network reachability for Erlang distribution ports,
- explicit node connections (or external discovery tooling).
Parrhesia currently does not automate these steps.
---
## 3) Operational characteristics of current design
### 3.1 Performance shape
For each accepted event on one node:
- one local fanout lookup + local sends,
- one cluster publish that sends to `N - 1` remote bus members,
- on each remote node: one local fanout lookup + local sends.
So inter-node traffic scales roughly linearly with node count per event (full-cluster broadcast).
This is simple and low-latency for small-to-medium clusters, but can become expensive as node count grows.
### 3.2 Failure behavior
- Remote node down: send attempts to that member stop once membership updates; no replay.
- Netsplit: live propagation gap during split.
- Recovery: local clients can catch up via DB-backed queries (except ephemeral kinds).
### 3.3 Consistency expectations
- No global total-ordering guarantee for live delivery across nodes.
- Per-connection ordering is preserved by each connection process queue/drain behavior.
- Duplicate suppression for ingestion uses storage semantics (`duplicate_event`), but transport itself is not exactly-once.
### 3.4 Observability today
Relevant metrics exist for fanout/queue pressure (see `Parrhesia.Telemetry`), e.g.:
- `parrhesia.fanout.duration.ms`
- `parrhesia.connection.outbound_queue.depth`
- `parrhesia.connection.outbound_queue.pressure`
- `parrhesia.connection.outbound_queue.overflow.count`
These are useful but do not yet fully separate local-vs-remote fanout pipeline stages.
---
## 4) Practical extension path to a fully-fledged clustered system
A realistic path is incremental. Suggested phases:
### Phase A — hardened BEAM cluster control plane
1. Add cluster discovery/formation (e.g. `libcluster`) with environment-specific topology:
- Kubernetes DNS,
- static nodes,
- cloud VM discovery.
2. Add clear node liveness/partition telemetry and alerts.
3. Provide operator docs for cookie, node naming, and network requirements.
Outcome: simpler and safer cluster operations, same data plane semantics.
### Phase B — resilient distributed fanout data plane
Introduce a durable fanout stream for persisted events.
Recommended pattern:
1. On successful DB commit of event, append to a monotonic fanout log (or use DB sequence-based stream view).
2. Each relay node runs a consumer with a stored cursor.
3. On restart/partition recovery, node resumes from cursor and replays missed events.
4. Local fanout remains same (subscription index + per-connection queues).
Semantics target:
- **at-least-once** node-to-node propagation,
- replay after downtime,
- idempotent handling keyed by event id.
Notes:
- Ephemeral events can remain best-effort (or have a separate short-lived transport), since no storage source exists for replay.
### Phase C — scale and efficiency improvements
As cluster size grows, avoid naive full broadcast where possible:
1. Optional node-level subscription summaries (coarse bloom/bitset or keyed summaries) to reduce unnecessary remote sends.
2. Shard fanout workers for CPU locality and mailbox control.
3. Batch remote delivery payloads.
4. Separate traffic classes (e.g. Marmot-heavy streams vs generic) with independent queues.
Outcome: higher throughput per node and lower inter-node amplification.
### Phase D — stronger observability and SLOs
Add explicit distributed pipeline metrics:
- publish enqueue/dequeue latency,
- cross-node delivery lag (commit -> remote fanout enqueue),
- replay backlog depth,
- per-node dropped/expired transport messages,
- partition detection counters.
Define cluster SLO examples:
- p95 commit->remote-live enqueue under nominal load,
- max replay catch-up time after node restart,
- bounded message loss for best-effort channels.
---
## 5) How a fully-fledged system would behave in practice
With Phases A-D implemented, expected behavior:
- **Normal operation:**
- low-latency local fanout,
- remote nodes receive events via stream consumers quickly,
- consistent operational visibility of end-to-end lag.
- **Node restart:**
- node reconnects and replays from stored cursor,
- local subscribers begin receiving new + missed persisted events.
- **Transient partition:**
- live best-effort path may degrade,
- persisted events converge after partition heals via replay.
- **High fanout bursts:**
- batching + sharding keeps queue pressure bounded,
- overflow policies remain connection-local and measurable.
This approach gives a good trade-off between Nostr relay latency and distributed robustness without requiring strict exactly-once semantics.
---
## 6) Current status summary
Today, Parrhesia already supports **lightweight distributed live fanout** when BEAM nodes are connected.
It is intentionally simple and fast for smaller clusters, and provides a solid base for a more durable, observable cluster architecture as relay scale and availability requirements grow.

174
docs/LOCAL_API.md Normal file
View File

@@ -0,0 +1,174 @@
# Parrhesia Local API
Parrhesia can run as a normal standalone relay application, but it also exposes a stable
in-process API for Elixir callers that want to embed the relay inside a larger OTP system.
This document describes that embedding surface. The runtime is now beta, so treat the API
as usable with minor churn possible while it stabilizes.
## What embedding means today
Embedding currently means:
- the host app adds `:parrhesia` as a dependency and OTP application
- the host app provides `config :parrhesia, ...` explicitly
- the host app migrates the Parrhesia database schema
- callers interact with the relay through `Parrhesia.API.*`
- host-managed HTTP/WebSocket ingress is mounted through `Parrhesia.Plug`
Current operational assumptions:
- Parrhesia runs one runtime per BEAM node
- core processes use global module names such as `Parrhesia.Config` and `Parrhesia.Web.Endpoint`
- the config defaults in this repo's `config/*.exs` are not imported automatically by a host app
If you want multiple isolated relay instances inside one VM, Parrhesia does not support that
cleanly yet.
## Minimal host setup
Add the dependency in your host app:
```elixir
defp deps do
[
{:parrhesia, path: "../parrhesia"}
]
end
```
Configure the runtime in your host app. At minimum you should carry over:
```elixir
import Config
config :postgrex, :json_library, JSON
config :parrhesia,
relay_url: "wss://relay.example.com/relay",
listeners: %{},
storage: [backend: :postgres]
config :parrhesia, Parrhesia.Repo,
url: System.fetch_env!("DATABASE_URL"),
pool_size: 10,
types: Parrhesia.PostgresTypes
config :parrhesia, Parrhesia.ReadRepo,
url: System.fetch_env!("DATABASE_URL"),
pool_size: 10,
types: Parrhesia.PostgresTypes
config :parrhesia, ecto_repos: [Parrhesia.Repo]
```
Notes:
- `listeners: %{}` is the official embedding pattern when your host app owns the HTTPS edge.
- `listeners: %{}` disables Parrhesia-managed ingress (`/relay`, `/management`, `/metrics`, etc.).
- Mount `Parrhesia.Plug` from the host app when you still want Parrhesia ingress behind that same
HTTPS edge.
- `Parrhesia.Web.*` modules are internal runtime wiring. Treat `Parrhesia.Plug` as the stable
mount API.
- If you prefer Parrhesia-managed ingress instead, copy the listener shape from the config
reference in [README.md](../README.md).
- Production runtime overrides still use the `PARRHESIA_*` environment variables described in
[README.md](../README.md).
Migrate before serving traffic:
```elixir
Parrhesia.Release.migrate()
```
In development, `mix ecto.migrate -r Parrhesia.Repo` works too.
## Mounting `Parrhesia.Plug` from a host app
When `listeners: %{}` is set, you can still expose Parrhesia ingress by mounting `Parrhesia.Plug`
in your host endpoint/router and passing an explicit listener config:
```elixir
forward "/nostr", Parrhesia.Plug,
listener: %{
id: :public,
transport: %{scheme: :https, tls: %{mode: :proxy_terminated}},
proxy: %{trusted_cidrs: ["10.0.0.0/8"], honor_x_forwarded_for: true},
features: %{
nostr: %{enabled: true},
admin: %{enabled: true},
metrics: %{enabled: true, access: %{private_networks_only: true}}
}
}
```
Use the same listener schema documented in [README.md](../README.md).
## Starting the runtime
In the common case, letting OTP start the `:parrhesia` application is enough.
If you need to start the runtime explicitly under your own supervision tree, use
`Parrhesia.Runtime`:
```elixir
children = [
{Parrhesia.Runtime, name: Parrhesia.Supervisor}
]
```
## Primary modules
The in-process surface is centered on these modules:
- `Parrhesia.API.Events` for publish, query, and count
- `Parrhesia.API.Stream` for REQ-like local subscriptions
- `Parrhesia.API.Auth` for event validation and NIP-98 auth parsing
- `Parrhesia.API.Admin` for management operations
- `Parrhesia.API.Identity` for relay-owned key management
- `Parrhesia.API.ACL` for protected sync ACLs
- `Parrhesia.API.Sync` for outbound relay sync management
Generated ExDoc groups these modules under `Embedded API`.
## Request context
Most calls take a `Parrhesia.API.RequestContext`. This carries authenticated pubkeys and
caller metadata through policy checks.
```elixir
%Parrhesia.API.RequestContext{
caller: :local,
authenticated_pubkeys: MapSet.new()
}
```
If your host app has already authenticated a user or peer, put that pubkey into
`authenticated_pubkeys` before calling the API.
## Example
```elixir
alias Parrhesia.API.Events
alias Parrhesia.API.RequestContext
alias Parrhesia.API.Stream
context = %RequestContext{caller: :local}
{:ok, publish_result} = Events.publish(event, context: context)
{:ok, events} = Events.query([%{"kinds" => [1]}], context: context)
{:ok, ref} = Stream.subscribe(self(), "local-sub", [%{"kinds" => [1]}], context: context)
receive do
{:parrhesia, :event, ^ref, "local-sub", event} -> event
{:parrhesia, :eose, ^ref, "local-sub"} -> :ok
end
:ok = Stream.unsubscribe(ref)
```
## Where to look next
- [README.md](../README.md) for setup and the full config reference
- [docs/SYNC.md](./SYNC.md) for relay-to-relay sync semantics
- module docs under `Parrhesia.API.*` for per-function behavior

View File

@@ -1,69 +0,0 @@
# Marmot operations guide (relay operator tuning)
This document captures practical limits and operational defaults for Marmot-heavy traffic (`443`, `445`, `10051`, wrapped `1059`, optional media/push flows).
## 1) Recommended baseline limits
Use these as a starting point and tune from production telemetry.
```elixir
config :parrhesia,
limits: [
max_filter_limit: 500,
max_filters_per_req: 16,
max_outbound_queue: 256,
outbound_drain_batch_size: 64
],
policies: [
# Marmot group routing/query guards
marmot_require_h_for_group_queries: true,
marmot_group_max_h_values_per_filter: 32,
marmot_group_max_query_window_seconds: 2_592_000,
# Kind 445 retention
mls_group_event_ttl_seconds: 300,
# MIP-04 metadata controls
marmot_media_max_imeta_tags_per_event: 8,
marmot_media_max_field_value_bytes: 1024,
marmot_media_max_url_bytes: 2048,
marmot_media_allowed_mime_prefixes: [],
marmot_media_reject_mip04_v1: true,
# MIP-05 push controls (optional)
marmot_push_server_pubkeys: [],
marmot_push_max_relay_tags: 16,
marmot_push_max_payload_bytes: 65_536,
marmot_push_max_trigger_age_seconds: 120,
marmot_push_require_expiration: true,
marmot_push_max_expiration_window_seconds: 120,
marmot_push_max_server_recipients: 1
]
```
## 2) Index expectations for Marmot workloads
The Postgres adapter relies on dedicated partial tag indexes for hot Marmot selectors:
- `event_tags_h_value_created_at_idx` for `#h` group routing
- `event_tags_i_value_created_at_idx` for `#i` keypackage reference lookups
Query-plan regression tests assert these paths remain usable for heavy workloads.
## 3) Telemetry to watch
Key metrics for Marmot traffic and pressure:
- `parrhesia.ingest.duration.ms{traffic_class="marmot|generic"}`
- `parrhesia.query.duration.ms{traffic_class="marmot|generic"}`
- `parrhesia.fanout.duration.ms{traffic_class="marmot|generic"}`
- `parrhesia.connection.outbound_queue.depth{traffic_class=...}`
- `parrhesia.connection.outbound_queue.pressure{traffic_class=...}`
- `parrhesia.connection.outbound_queue.pressure_events.count{traffic_class=...}`
- `parrhesia.connection.outbound_queue.overflow.count{traffic_class=...}`
Operational target: keep queue pressure below sustained 0.75 and avoid overflow spikes during `445` bursts.
## 4) Fault and recovery expectations
During storage outages, Marmot group-flow writes must fail with explicit `OK false` errors. After recovery, reordered group events should still query deterministically by `created_at DESC, id ASC`.

354
docs/NIP-DBSYNC.md Normal file
View File

@@ -0,0 +1,354 @@
# NIP-DBSYNC — Minimal Mutation Events over Nostr
`draft` `optional`
Defines a minimal event format for publishing immutable application mutation events over Nostr.
This draft intentionally standardizes only the wire format for mutation transport. It does **not** standardize database replication strategy, conflict resolution, relay retention, or key derivation.
---
## Abstract
This NIP defines one regular event kind, **5000**, for signed mutation events.
A mutation event identifies:
- the object namespace being mutated,
- the object identifier within that namespace,
- the mutation operation,
- an optional parent mutation event,
- an application-defined payload.
The purpose of this NIP is to make signed mutation logs portable across Nostr clients and relays without requiring relays to implement database-specific behavior.
---
## Motivation
Many applications need a way to distribute signed state changes across multiple publishers, consumers, or services.
Today this can be done with private event kinds, but private schemas make cross-implementation interoperability harder than necessary. This NIP defines a small shared envelope for mutation events while leaving application-specific state semantics in the payload.
This NIP is intended for use cases such as:
- synchronizing object changes between cooperating services,
- publishing auditable mutation logs,
- replaying application events from ordinary Nostr relays,
- bridging non-Nostr systems into a Nostr-based event stream.
This NIP is **not** a consensus protocol. It does not provide:
- total ordering,
- transactional guarantees,
- global conflict resolution,
- authorization rules,
- guaranteed relay retention.
Applications that require those properties MUST define them separately.
---
## Specification
### Event Kind
| Kind | Category | Name |
|------|----------|------|
| 5000 | Regular | Mutation |
Kind `5000` is a regular event. Relays that support this NIP MAY store it like any other regular event.
This NIP does **not** require relays to:
- retain all historical events,
- index any specific tag beyond normal NIP-01 behavior,
- deliver events in causal or chronological order,
- detect or resolve conflicts.
Applications that depend on durable replay or custom indexing MUST choose relays whose policies satisfy those needs.
### Event Structure
```json
{
"id": "<32-byte lowercase hex>",
"pubkey": "<32-byte lowercase hex>",
"created_at": "<unix timestamp, seconds>",
"kind": 5000,
"tags": [
["r", "<resource namespace>"],
["i", "<object identifier>"],
["op", "<mutation operation>"],
["e", "<parent mutation event id>"]
],
"content": "<JSON-encoded application payload>",
"sig": "<64-byte lowercase hex>"
}
```
The `content` field is a JSON-encoded string. Its structure is defined below.
---
## Tags
| Tag | Required | Description |
|-----|----------|-------------|
| `r` | Yes | Stable resource namespace for the mutated object type. Reverse-DNS style names are RECOMMENDED, for example `com.example.accounts.user`. |
| `i` | Yes | Opaque object identifier, unique within the `r` namespace. Consumers MUST treat this as a string. |
| `op` | Yes | Mutation operation. This NIP defines only `upsert` and `delete`. |
| `e` | No | Parent mutation event id, if the publisher wants to express ancestry. At most one `e` tag SHOULD be included in this version of the protocol. |
| `v` | No | Application payload schema version as a string. RECOMMENDED when the payload format may evolve over time. |
### Tag Rules
Publishers:
- MUST include exactly one `r` tag.
- MUST include exactly one `i` tag.
- MUST include exactly one `op` tag.
- MUST set `op` to either `upsert` or `delete`.
- SHOULD include at most one `e` tag.
- MAY include one `v` tag.
Consumers:
- MUST ignore unknown tags.
- MUST NOT assume tag ordering.
- MUST treat the `e` tag as an ancestry hint, not as proof of global ordering.
### Resource Namespaces
The `r` tag identifies an application-level object type.
This NIP does not define a global registry of resource namespaces. To reduce collisions, publishers SHOULD use a stable namespace they control, such as reverse-DNS notation.
Examples:
- `com.example.accounts.user`
- `org.example.inventory.item`
- `net.example.billing.invoice`
Publishers MUST document the payload schema associated with each resource namespace they use.
---
## Content Payload
The `content` field MUST be a JSON-encoded object.
```json
{
"value": {},
"patch": "merge"
}
```
| Field | Required | Description |
|-------|----------|-------------|
| `value` | Yes | Application-defined mutation payload. For `upsert`, this is the state fragment or full post-mutation state being published. For `delete`, this MAY be an empty object or a small reason object. |
| `patch` | No | How `value` should be interpreted. This NIP defines `merge` and `replace`. If omitted, consumers MUST treat it as application-defined. |
### Payload Rules
For `op = upsert`:
- `value` MUST be a JSON object.
- Publishers SHOULD publish either:
- a partial object intended to be merged, or
- a full post-mutation object intended to replace prior state.
- If the interpretation is important for interoperability, publishers SHOULD set `patch` to `merge` or `replace`.
For `op = delete`:
- `value` MAY be `{}`.
- Consumers MUST treat `delete` as an application-level tombstone signal.
- This NIP does not define whether deletion means hard delete, soft delete, archival, or hiding. Applications MUST define that separately.
### Serialization
All payload values MUST be JSON-serializable.
The following representations are RECOMMENDED:
| Type | Representation |
|------|----------------|
| Timestamp / datetime | ISO 8601 string |
| Decimal | String |
| Binary | Base64 string |
| Null | JSON `null` |
Publishers MAY define additional type mappings, but those mappings are application-specific and MUST be documented outside this NIP.
---
## Ancestry and Replay
The optional `e` tag allows a publisher to indicate which prior mutation event it considered the parent when creating a new mutation.
This supports applications that want ancestry hints for:
- local conflict detection,
- replay ordering,
- branch inspection,
- audit tooling.
However:
- the `e` tag does **not** create a global ordering guarantee,
- relays are not required to deliver parents before children,
- consumers MUST be prepared to receive out-of-order events,
- consumers MAY buffer, defer, ignore, or immediately apply parent-missing events according to local policy.
This NIP does not define a merge event format.
This NIP does not define conflict resolution. If two valid mutation events for the same `(r, i)` object are concurrent or incompatible, consumers MUST resolve them using application-specific rules.
---
## Authorization
This NIP does not define who is authorized to publish mutation events for a given resource or object.
Authorization is application-specific.
Consumers MUST NOT assume that a valid Nostr signature alone authorizes a mutation. Consumers MUST apply their own trust policy, which MAY include:
- explicit pubkey allowlists,
- per-resource ACLs,
- external capability documents,
- relay-level write restrictions,
- application-specific verification.
This NIP does not define custodial keys, deterministic key derivation, shared cluster secrets, or delegation schemes.
---
## Relay Behavior
A relay implementing only NIP-01 remains compatible with this NIP.
No new relay messages are required beyond `REQ`, `EVENT`, and `CLOSE`.
Relays:
- MAY index the `r` and `i` tags using existing single-letter tag indexing conventions.
- MAY apply normal retention, rate-limit, and access-control policies.
- MAY reject events that are too large or otherwise violate local policy.
- MUST NOT be expected to validate application payload semantics.
Applications that require stronger guarantees, such as durable retention or strict admission control, MUST obtain those guarantees from relay policy or from a separate protocol profile.
---
## Subscription Filters
This NIP works with ordinary NIP-01 filters.
### All mutations for one resource
```json
{
"kinds": [5000],
"#r": ["com.example.accounts.user"]
}
```
### Mutation history for one object
```json
{
"kinds": [5000],
"#r": ["com.example.accounts.user"],
"#i": ["550e8400-e29b-41d4-a716-446655440000"]
}
```
### Mutations from trusted authors
```json
{
"kinds": [5000],
"authors": [
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"
]
}
```
Applications SHOULD prefer narrow subscriptions over broad network-wide firehoses.
---
## Examples
### Upsert with parent
```json
{
"id": "1111111111111111111111111111111111111111111111111111111111111111",
"pubkey": "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"created_at": 1710500300,
"kind": 5000,
"tags": [
["r", "com.example.accounts.user"],
["i", "550e8400-e29b-41d4-a716-446655440000"],
["op", "upsert"],
["e", "0000000000000000000000000000000000000000000000000000000000000000"],
["v", "1"]
],
"content": "{\"value\":{\"email\":\"jane.doe@newdomain.com\",\"updated_at\":\"2025-03-15T14:35:00Z\"},\"patch\":\"merge\"}",
"sig": "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc"
}
```
### Delete tombstone
```json
{
"id": "2222222222222222222222222222222222222222222222222222222222222222",
"pubkey": "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
"created_at": 1710500600,
"kind": 5000,
"tags": [
["r", "com.example.accounts.user"],
["i", "550e8400-e29b-41d4-a716-446655440000"],
["op", "delete"],
["e", "1111111111111111111111111111111111111111111111111111111111111111"],
["v", "1"]
],
"content": "{\"value\":{\"reason\":\"user_requested\"}}",
"sig": "dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd"
}
```
---
## Security Considerations
- **Unauthorized writes:** A valid signature proves authorship, not authorization. Consumers MUST enforce their own trust policy.
- **Replay:** Old valid events may be redelivered by relays or attackers. Consumers SHOULD deduplicate by event id and apply local replay policy.
- **Reordering:** Events may arrive out of order. Consumers MUST NOT treat `created_at` or `e` as a guaranteed total order.
- **Conflict flooding:** Multiple valid mutations may target the same object. Consumers SHOULD rate-limit, bound buffering, and define local conflict policy.
- **Sensitive data exposure:** Nostr events are typically widely replicable. Publishers SHOULD NOT put secrets or regulated data in mutation payloads unless they provide application-layer encryption.
- **Relay retention variance:** Some relays will prune history. Applications that depend on full replay MUST choose relays accordingly or maintain an external archive.
---
## Extension Points
Future drafts or companion NIPs may define:
- snapshot events for faster bootstrap,
- object-head or checkpoint events,
- capability or delegation profiles for authorized writers,
- standardized conflict-resolution profiles for specific application classes.
Such extensions SHOULD remain optional and MUST NOT change the meaning of kind `5000` mutation events defined here.
---
## References
- [NIP-01](https://github.com/nostr-protocol/nips/blob/master/01.md) — Basic protocol flow description

417
docs/SYNC.md Normal file
View File

@@ -0,0 +1,417 @@
# Parrhesia Relay Sync
## 1. Purpose
This document defines the Parrhesia proposal for **relay-to-relay event synchronization**.
It is intentionally transport-focused:
- manage remote relay peers,
- catch up on matching events,
- keep a live stream open,
- expose health and basic stats.
It does **not** define application data semantics.
Parrhesia syncs Nostr events. Callers decide which events matter and how to apply them.
---
## 2. Boundary
### Parrhesia is responsible for
- storing and validating events,
- querying and streaming events,
- running outbound sync workers against remote relays,
- tracking peer configuration, worker health, and sync counters,
- exposing peer management through `Parrhesia.API.Sync`.
### Parrhesia is not responsible for
- resource mapping,
- trusted node allowlists for an app profile,
- mutation payload validation beyond normal event validation,
- conflict resolution,
- replay winner selection,
- database upsert/delete semantics.
For Tribes, those remain in `TRIBES-NOSTRSYNC` and `AshNostrSync`.
---
## 3. Security Foundation
### Default posture
The baseline posture for sync traffic is:
- no access to sync events by default,
- no implicit trust from ordinary relay usage,
- no reliance on plaintext confidentiality from public relays.
For the first implementation, Parrhesia should protect sync data primarily with:
- authenticated server identities,
- ACL-gated read and write access,
- TLS with certificate pinning for outbound peers.
### Server identity
Parrhesia owns a low-level server identity used for relay-to-relay authentication.
This identity is separate from:
- TLS endpoint identity,
- application event author pubkeys.
Recommended model:
- Parrhesia has one local server-auth pubkey,
- sync peers authenticate as server-auth pubkeys,
- ACL grants are bound to those authenticated server-auth pubkeys,
- application-level writer trust remains outside Parrhesia.
Identity lifecycle:
1. use configured/imported key if provided,
2. otherwise use persisted local identity,
3. otherwise generate once during initial startup and persist it.
Private key export should not be supported.
### ACLs
Sync traffic should use a real ACL layer, not moderation allowlists.
Current implementation note:
- Parrhesia already has storage-backed moderation state such as `allowed_pubkeys` and `blocked_ips`,
- that is not the sync ACL model,
- sync protection must be enforced in the active websocket/query/count/negentropy/write path, not inferred from management tables alone.
Initial ACL model:
- principal: authenticated pubkey,
- capabilities: `sync_read`, `sync_write`,
- match: event/filter shape such as `kinds: [5000]` and namespace tags.
This is enough for now. We do **not** need a separate user ACL model and server ACL model yet.
A sync peer is simply an authenticated principal with sync capabilities.
### TLS pinning
Each outbound sync peer must include pinned TLS material.
Recommended pin type:
- SPKI SHA-256 pins
Multiple pins should be allowed to support certificate rotation.
---
## 4. Sync Model
Each configured sync server represents one outbound worker managed by Parrhesia.
Implementation note:
- Khatru-style relay designs benefit from explicit runtime stages,
- Parrhesia sync should therefore plug into clear internal phases for connection admission, auth, query/count, subscription, negentropy, publish, and fanout,
- this should stay a runtime refactor, not become extra sync semantics.
Minimum behavior:
1. connect to the remote relay,
2. run an initial catch-up query for the configured filters,
3. ingest received events into the local relay through the normal API path,
4. switch to a live subscription for the same filters,
5. reconnect with backoff when disconnected.
The worker treats filters as opaque Nostr filters. It does not interpret app payloads.
### Initial implementation mode
Initial implementation should use ordinary NIP-01 behavior:
- catch-up via `REQ`-style query,
- live updates via `REQ` subscription.
This is enough for Tribes and keeps the first version simple.
### NIP-77
Parrhesia now has a real reusable relay-side NIP-77 engine:
- proper `NEG-OPEN` / `NEG-MSG` / `NEG-CLOSE` / `NEG-ERR` framing,
- a reusable negentropy codec and reconciliation engine,
- bounded local `(created_at, id)` snapshot enumeration for matching filters,
- connection/session integration with policy checks and resource limits.
That means NIP-77 can be used for bandwidth-efficient catch-up between trusted nodes.
The first sync worker implementation may still default to ordinary NIP-01 catch-up plus live replay, because that path is operationally simpler and already matches the current Tribes sync profile. `:negentropy` can now be introduced as an optimization mode rather than a future prerequisite.
---
## 5. API Surface
Primary control plane:
- `Parrhesia.API.Identity.get/1`
- `Parrhesia.API.Identity.ensure/1`
- `Parrhesia.API.Identity.import/2`
- `Parrhesia.API.Identity.rotate/1`
- `Parrhesia.API.ACL.grant/2`
- `Parrhesia.API.ACL.revoke/2`
- `Parrhesia.API.ACL.list/1`
- `Parrhesia.API.Sync.put_server/2`
- `Parrhesia.API.Sync.remove_server/2`
- `Parrhesia.API.Sync.get_server/2`
- `Parrhesia.API.Sync.list_servers/1`
- `Parrhesia.API.Sync.start_server/2`
- `Parrhesia.API.Sync.stop_server/2`
- `Parrhesia.API.Sync.sync_now/2`
- `Parrhesia.API.Sync.server_stats/2`
- `Parrhesia.API.Sync.sync_stats/1`
- `Parrhesia.API.Sync.sync_health/1`
These APIs are in-process. HTTP management may expose them through `Parrhesia.API.Admin` or direct routing to `Parrhesia.API.Sync`.
---
## 6. Server Specification
`put_server/2` is an upsert.
Suggested server shape:
```elixir
%{
id: "tribes-primary",
url: "wss://relay-a.example/relay",
enabled?: true,
auth_pubkey: "<remote-server-auth-pubkey>",
mode: :req_stream,
filters: [
%{
"kinds" => [5000],
"authors" => ["<trusted-node-pubkey-a>", "<trusted-node-pubkey-b>"],
"#r" => ["tribes.accounts.user", "tribes.chat.tribe"]
}
],
overlap_window_seconds: 300,
auth: %{
type: :nip42
},
tls: %{
mode: :required,
hostname: "relay-a.example",
pins: [
%{type: :spki_sha256, value: "<pin-a>"},
%{type: :spki_sha256, value: "<pin-b>"}
]
},
metadata: %{}
}
```
Required fields:
- `id`
- `url`
- `auth_pubkey`
- `filters`
- `tls`
Recommended fields:
- `enabled?`
- `mode`
- `overlap_window_seconds`
- `auth`
- `metadata`
Rules:
- `id` must be stable and unique locally.
- `url` is the remote relay websocket URL.
- `auth_pubkey` is the expected remote server-auth pubkey.
- `filters` must be valid NIP-01 filters.
- filters are owned by the caller; Parrhesia only validates filter shape.
- `mode` defaults to `:req_stream`.
- `tls.mode` defaults to `:required`.
- `tls.pins` must be non-empty for synced peers.
---
## 7. Runtime State
Each server should have both configuration and runtime status.
Suggested runtime fields:
```elixir
%{
server_id: "tribes-primary",
state: :running,
connected?: true,
last_connected_at: ~U[2026-03-16 10:00:00Z],
last_disconnected_at: nil,
last_sync_started_at: ~U[2026-03-16 10:00:00Z],
last_sync_completed_at: ~U[2026-03-16 10:00:02Z],
last_event_received_at: ~U[2026-03-16 10:12:45Z],
last_eose_at: ~U[2026-03-16 10:00:02Z],
reconnect_attempts: 0,
last_error: nil
}
```
Parrhesia should keep this state generic. It is about relay sync health, not app state convergence.
---
## 8. Stats and Health
### Per-server stats
`server_stats/2` should return basic counters such as:
- `events_received`
- `events_accepted`
- `events_duplicate`
- `events_rejected`
- `query_runs`
- `subscription_restarts`
- `reconnects`
- `last_remote_eose_at`
- `last_error`
### Aggregate sync stats
`sync_stats/1` should summarize:
- total configured servers,
- enabled servers,
- running servers,
- connected servers,
- aggregate event counters,
- aggregate reconnect count.
### Health
`sync_health/1` should be operator-oriented, for example:
```elixir
%{
"status" => "degraded",
"servers_total" => 3,
"servers_connected" => 2,
"servers_failing" => [
%{"id" => "tribes-secondary", "reason" => "connection_refused"}
]
}
```
This is intentionally simple. It should answer “is sync working?” without pretending to prove application convergence.
---
## 9. Event Ingest Path
Events received from a remote sync worker should enter Parrhesia through the same ingest path as any other accepted event.
That means:
1. validate the event,
2. run normal write policy,
3. persist or reject,
4. fan out locally,
5. rely on duplicate-event behavior for idempotency.
This avoids a second ingest path with divergent behavior.
Before normal event acceptance, the sync worker should enforce:
1. pinned TLS validation for the remote endpoint,
2. remote server-auth identity match,
3. local ACL grant permitting the peer to perform sync reads and/or writes.
The sync worker may attach request-context metadata such as:
```elixir
%Parrhesia.API.RequestContext{
caller: :sync,
peer_id: "tribes-primary",
metadata: %{sync_server_id: "tribes-primary"}
}
```
Recommended additional context when available:
- `remote_ip`
- `subscription_id`
This context is for telemetry, policy, and audit only. It must not become app sync semantics.
---
## 10. Persistence
Parrhesia should persist enough sync control-plane state to survive restart:
- local server identity reference,
- configured ACL rules for sync principals,
- configured servers,
- whether a server is enabled,
- optional catch-up cursor or watermark per server,
- basic last-error and last-success markers.
Parrhesia does not need to persist application replay heads or winner state. That remains in the embedding application.
---
## 11. Relationship to Current Features
### BEAM cluster fanout
`Parrhesia.Fanout.MultiNode` is a separate feature.
It provides best-effort live fanout between connected BEAM nodes. It is not remote relay sync and is not a substitute for `Parrhesia.API.Sync`.
### Management stats
Current admin `stats` is relay-global and minimal.
Sync adds a new dimension:
- peer config,
- worker state,
- per-peer counters,
- sync health summary.
That should be exposed without coupling it to app-specific sync semantics.
---
## 12. Tribes Usage
For Tribes, `AshNostrSync` should be able to:
1. rely on Parrhesias local server identity,
2. register one or more remote relays with `Parrhesia.API.Sync.put_server/2`,
3. grant sync ACLs for trusted server-auth pubkeys,
4. provide narrow Nostr filters for `kind: 5000`,
5. observe sync health and counters,
6. consume events via the normal local Parrhesia ingest/query/stream surface.
Tribes should not need Parrhesia to know:
- what a resource namespace means,
- which node pubkeys are trusted for Tribes,
- how to resolve conflicts,
- how to apply an upsert or delete.
That is the key boundary.

BIN
docs/logo.afdesign Normal file

Binary file not shown.

1
docs/logo.svg Normal file

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 37 KiB

1
docs/nips Submodule

Submodule docs/nips added at 3492eb1aff

351
docs/slop/ALPHA_REVIEW.md Normal file
View File

@@ -0,0 +1,351 @@
# Parrhesia Alpha Code Review
**Reviewer:** Claude Opus 4.6 (automated review)
**Date:** 2026-03-20
**Version:** 0.6.0
**Scope:** Full codebase review across 8 dimensions for alpha-to-beta promotion decision
---
## 1. Executive Summary
Parrhesia is a well-architected Nostr relay with mature OTP supervision design, comprehensive telemetry, and solid protocol implementation covering 15+ NIPs. The codebase demonstrates strong Elixir idioms — heavy ETS usage for hot paths, process monitoring for cleanup, and async-first patterns. The test suite (58 files, ~8K LOC) covers critical protocol paths including signature verification, malformed input rejection, and database integration.
**Most critical gaps:** No WebSocket-level ping/pong keepalives, no constant-time comparison for NIP-42 challenge validation, and the `effective_filter_limit` function can return `nil` when called outside the standard API path (though the default config sets `max_filter_limit: 500`). Property-based testing is severely underutilised despite `stream_data` being a dependency.
**Recommendation:** **Promote with conditions** — the codebase is production-quality for beta with two items to address first.
---
## 2. Dimension-by-Dimension Findings
### 2.1 Elixir Code Quality
**Rating: ✅ Good**
**Supervision tree:** Single-rooted `Parrhesia.Runtime` supervisor with `:one_for_one` strategy. 12 child supervisors/workers, each with appropriate isolation. Storage, subscriptions, auth, sync, policy, tasks, and web endpoint each have their own supervisor subtree. Restart strategies are correct — no `rest_for_one` or `one_for_all` where unnecessary.
**GenServer usage:** Idiomatic throughout. State-heavy GenServers (Subscriptions.Index, Auth.Challenges, Negentropy.Sessions) properly monitor owner processes and clean up via `:DOWN` handlers. Stateless dispatchers (Fanout.Dispatcher) use `cast` appropriately. No unnecessary `call` serialisation found.
**ETS usage:** 9+ ETS tables with appropriate access patterns:
- Config cache: `:public`, `read_concurrency: true` — correct for hot-path reads
- Rate limiters: `write_concurrency: true` — correct for high-throughput counters
- Subscription indices: `:protected`, `read_concurrency: true` — correct (only Index GenServer writes)
**Error handling:** Consistent `{:ok, _}` / `{:error, _}` tuples throughout the API layer. No bare `throw` found. Connection handler wraps external calls in try/catch to prevent cascade failures. Rate limiter fallback returns `:ok` on service unavailability (availability over correctness — documented trade-off).
**Pattern matching:** Exhaustive in message handling. Protocol decoder has explicit clauses for each message type with a catch-all error clause. Event validator chains `with` clauses with explicit error atoms.
**Potential bottlenecks:**
- `Subscriptions.Index` is a single GenServer handling all subscription mutations. Reads go through ETS (fast), but `upsert`/`remove` operations serialise through the GenServer. At very high subscription churn (thousands of REQ/CLOSE per second), this could become a bottleneck. Acceptable for beta.
- `Negentropy.Sessions` holds all session state in process memory. Capped at 10K sessions with idle sweep — adequate.
**Module structure:** Clean separation of concerns. `web/` for transport, `protocol/` for parsing/validation, `api/` for business logic, `storage/` for persistence with adapter pattern, `policy/` for authorisation. 99% of modules have `@moduledoc` and `@spec` annotations.
**Findings:**
- `connection.ex` is 1,925 lines — large but cohesive (per-connection state machine). Consider extracting queue management into a submodule in future.
- `storage/adapters/postgres/events.ex` is the heaviest module — handles normalisation, queries, and tag indexing. Well-factored internally.
---
### 2.2 Nostr Protocol Correctness (NIPs Compliance)
**Rating: ✅ Good**
**NIPs implemented (advertised via NIP-11):**
NIP-1, NIP-9, NIP-11, NIP-13, NIP-17, NIP-40, NIP-42, NIP-44, NIP-45, NIP-50, NIP-59, NIP-62, NIP-70, NIP-77, NIP-86, NIP-98. Conditional: NIP-43, NIP-66.
**NIP-01 (Core Protocol):**
- Event structure: 7 required fields validated (`id`, `pubkey`, `created_at`, `kind`, `tags`, `content`, `sig`)
- Event ID: SHA-256 over canonical JSON serialisation `[0, pubkey, created_at, kind, tags, content]` — verified in `validate_id_hash/1`
- Signature: BIP-340 Schnorr via `lib_secp256k1``Secp256k1.schnorr_valid?(id_bin, sig_bin, pubkey_bin)`
- Filters: `ids`, `authors`, `kinds`, `since`, `until`, `limit`, `search`, `#<letter>` tag filters — all implemented
- Messages: EVENT, REQ, CLOSE, NOTICE, OK, EOSE, COUNT, AUTH — all implemented with correct response format
- Subscription IDs: validated as non-empty strings, max 64 chars
**Event ID verification:** Correct. Computes SHA-256 over `JSON.encode!([0, pubkey_hex, created_at, kind, tags, content])` and compares against claimed `id`. Binary decoding verified.
**Signature verification:** Uses `lib_secp256k1` (wrapper around Bitcoin Core's libsecp256k1). Schnorr verification correct per BIP-340. Can be disabled via feature flag (appropriate for testing/development).
**Malformed event rejection:** Comprehensive. Invalid hex, wrong byte lengths, future timestamps (>15 min), non-integer kinds, non-string content, non-array tags — all produce specific error atoms returned via OK message with `false` status.
**Filter application:** Correct implementation. Each dimension (ids, authors, kinds, since, until, tags, search) is an independent predicate; all must match for a filter to match. Multiple filters are OR'd together (`matches_any?`).
**Spec deviations:**
- Tag filter values capped at 128 per filter (configurable) — stricter than spec but reasonable
- Max 16 filters per REQ (configurable) — reasonable relay policy
- Max 256 tags per event — reasonable relay policy
- Search is substring/FTS, not regex — spec doesn't mandate regex
---
### 2.3 WebSocket Handling
**Rating: ⚠️ Needs Work**
**Backpressure: ✅ Excellent.**
Three-tier strategy with configurable overflow behaviour:
1. `:close` (default) — closes connection on queue overflow with NOTICE
2. `:drop_newest` — silently drops incoming events
3. `:drop_oldest` — drops oldest queued event, enqueues new
Queue depth defaults: max 256, drain batch size 64. Pressure monitoring at 75% threshold emits telemetry. Batch draining prevents thundering herd.
**Connection cleanup: ✅ Solid.**
`terminate/1` removes subscriptions from global index, unsubscribes from streams, clears auth challenges, decrements connection stats. Process monitors in Index/Challenges/Sessions provide backup cleanup on unexpected exits.
**Message size limits: ✅ Enforced.**
- Frame size: 1MB default (`max_frame_bytes`), checked before JSON parsing
- Event size: 256KB default (`max_event_bytes`), checked before DB write
- Both configurable via environment variables
**Per-connection subscription limit: ✅ Enforced.**
Default 32 subscriptions per connection. Checked before opening new REQ. Returns CLOSED with `rate-limited:` prefix.
**Ping/pong keepalives: ⚠️ Not implemented (optional).**
No server-initiated WebSocket PING frames. RFC 6455 §5.5.2 makes PING optional ("MAY be sent"), and NIP-01 does not require keepalives. Bandit correctly responds to client-initiated PINGs per spec. However, server-side pings are a production best practice for detecting dead connections behind NAT/proxies — without them, the server relies on TCP-level detection which can take minutes.
**Per-connection event ingest rate limiting: ✅ Implemented.**
Default 120 events/second per connection with sliding window. Per-IP limiting at 1,000 events/second. Relay-wide cap at 10,000 events/second.
---
### 2.4 PostgreSQL / Database Layer
**Rating: ✅ Good**
**Schema design:**
- Events table range-partitioned by `created_at` (monthly partitions)
- Normalised `event_tags` table with composite FK to events
- JSONB `tags` column for efficient serialisation (denormalised copy of normalised tags)
- Binary storage for pubkeys/IDs/sigs with CHECK constraints on byte lengths
**Indexes:** Comprehensive and appropriate:
- `events(kind, created_at DESC)` — kind queries
- `events(pubkey, created_at DESC)` — author queries
- `events(created_at DESC)` — time-range queries
- `events(id)` — direct lookup
- `events(expires_at) WHERE expires_at IS NOT NULL` — expiration pruning
- `event_tags(name, value, event_created_at DESC)` — tag filtering
- GIN index on `to_tsvector('simple', content)` — full-text search
- GIN trigram index on `content` — fuzzy search
**Tag queries:** Efficient. Uses `EXISTS (SELECT 1 FROM event_tags ...)` subqueries with parameterised values. Primary tag filter applied first, remaining filters chained. No N+1 — bulk tag insert via `Repo.insert_all`.
**Connection pools:** Write pool (32 default) and optional separate read pool (32 default). Queue target/interval configured at 1000ms/5000ms. Pool sizing configurable via environment variables.
**Bounded results:** `max_filter_limit` defaults to 500 in config. Applied at query level via `LIMIT` clause. Post-query `Enum.take` as safety net. The `effective_filter_limit/2` function can return `nil` if both filter and opts lack a limit — but the standard API path always passes `max_filter_limit` from config.
**Event expiration:** NIP-40 `expiration` tags extracted during normalisation. `ExpirationWorker` runs every 30 seconds, executing `DELETE FROM events WHERE expires_at IS NOT NULL AND expires_at <= now()`. Index on `expires_at` makes this efficient.
**Partition management:** `PartitionRetentionWorker` ensures partitions exist 2 months ahead, drops oldest partitions based on configurable retention window and max DB size. Limited to 1 drop per run to avoid I/O spikes. DDL operations use `CREATE TABLE IF NOT EXISTS` and validate identifiers against `^[a-zA-Z_][a-zA-Z0-9_]*$` to prevent SQL injection.
**Migrations:** Non-destructive. No `DROP COLUMN` or lock-heavy operations on existing tables. Additive indexes use `CREATE INDEX IF NOT EXISTS`. JSONB column added with data backfill in separate migration.
**Findings:**
- Multi-filter queries use union + in-memory deduplication rather than SQL `UNION`. For queries with many filters returning large result sets, this could spike memory. Acceptable for beta given the 500-result default limit.
- Replaceable/addressable state upserts use raw SQL CTEs — parameterised, no injection risk, but harder to maintain than Ecto queries.
---
### 2.5 Security
**Rating: ⚠️ Needs Work (one item)**
**Input validation: ✅ Thorough.**
12-step validation pipeline for events. Hex decoding with explicit byte lengths. Tag structure validation. Content type checking. All before any DB write.
**Rate limiting: ✅ Well-designed.**
Three tiers: per-connection (120/s), per-IP (1,000/s), relay-wide (10,000/s). ETS-backed with atomic counters. Configurable via environment variables. Telemetry on rate limit hits.
**SQL injection: ✅ No risks found.**
All queries use Ecto parameterisation or `fragment` with positional placeholders. Raw SQL uses `$1, $2, ...` params. Partition identifier creation validates against regex before string interpolation.
**Amplification protection: ✅ Adequate.**
- Giftwrap (kind 1059) queries blocked for unauthenticated users, restricted to recipients for authenticated users
- `max_filter_limit: 500` bounds result sets
- Frame/event size limits prevent memory exhaustion
- Rate limiting prevents query flooding
**NIP-42 authentication: ⚠️ Minor issue.**
Challenge generation uses `:crypto.strong_rand_bytes(16)` — correct. Challenge stored per-connection with process monitor cleanup — correct. However, challenge comparison uses standard Erlang binary equality (`==`), not constant-time comparison. While the 16-byte random challenge makes timing attacks impractical, using `Plug.Crypto.secure_compare/2` would be best practice.
**NIP-98 HTTP auth: ✅ Solid.**
Base64-decoded event validated for kind (27235), freshness (±60s), method binding, URL binding, and signature. Replay cache prevents token reuse within TTL window.
**Moderation: ✅ Complete.**
Banned pubkeys, allowed pubkeys, banned events, blocked IPs. ETS cache with lazy loading from PostgreSQL. ACL rules with principal/capability/match structure. Protected event enforcement (NIP-70).
---
### 2.6 Observability & Operability
**Rating: ✅ Good**
**Telemetry: ✅ Excellent.**
25+ metrics covering ingest (count, duration, outcomes), queries (count, duration, result size), fanout (duration, batch size), connections (queue depth, pressure, overflow), rate limiting (hits by scope), process health (mailbox depth), database (queue time, query time, decode time), maintenance (expiration, partition retention), and VM memory.
**Prometheus endpoint: ✅ Implemented.**
`/metrics` endpoint with access control. Histogram buckets configured for each metric type. Tagged metrics for cardinality management.
**Health check: ✅ Implemented.**
`/ready` endpoint checks Subscriptions.Index, Auth.Challenges, Negentropy.Sessions (if enabled), and all PostgreSQL repos.
**Configuration: ✅ Fully externalised.**
100+ config keys with environment variable overrides in `config/runtime.exs`. Helpers for `int_env`, `bool_env`, `csv_env`, `json_env`, `infinity_or_int_env`, `ipv4_env`. No hardcoded ports, DB URLs, or limits in application code.
**Documentation: ✅ Strong.**
99% of modules have `@moduledoc`. Public APIs have `@spec` annotations. Error types defined as type unions. Architecture docs in `docs/` covering clustering, sync, and local API.
**Logging: ⚠️ Functional but basic.**
Uses standard `Logger.error/warning` with `inspect()` formatting. No structured/JSON logging. Adequate for beta, but not ideal for log aggregation platforms.
---
### 2.7 Testing
**Rating: ✅ Good**
**Test suite:** 58 test files, ~8,000 lines of code.
**Unit tests:** Protocol validation, event validator (including signature verification), filter matching, auth challenges, NIP-98 replay cache, connection policy, event policy, config, negentropy engine/message/sessions.
**Integration tests:** 36 files using PostgreSQL sandbox. Event lifecycle (insert, query, update, delete), adapter contract tests, query plan regression tests, partition management, binary identifier constraints.
**Protocol edge cases tested:**
- Invalid Schnorr signatures (when verification enabled)
- Malformed JSON → `:invalid_json`
- Invalid event structure → specific error atoms
- Unknown filter keys → `:invalid_filter_key`
- Tag filter value limits (128 max)
- NIP-43 malformed relay access events, stale join requests
- Marmot-specific validation (encoding tags, base64 content)
- NIP-66 discovery event validation
**E2E tests:** WebSocket connection tests, TLS E2E, NAK CLI conformance, proxy IP extraction.
**Load test:** `LoadSoakTest` verifying p95 fanout latency under 25ms.
**Property-based tests: ⚠️ Minimal.**
Single file (`FilterPropertyTest`) using `stream_data` for author filter membership. `stream_data` is a dependency but barely used. Significant opportunity to add property tests for event ID computation, filter boundary conditions, and tag parsing.
**Missing coverage:**
- Cluster failover / multi-node crash recovery
- Connection pool exhaustion under load
- WebSocket frame fragmentation
- Concurrent subscription mutation stress
- Byzantine negentropy scenarios
---
### 2.8 Dependencies
**Rating: ✅ Good**
| Dependency | Version | Status |
|---|---|---|
| `bandit` | 1.10.3 | Current, actively maintained |
| `plug` | 1.19.1 | Current, security-patched |
| `ecto_sql` | 3.13.5 | Current |
| `postgrex` | 0.22.0 | Current |
| `lib_secp256k1` | 0.7.1 | Stable, wraps Bitcoin Core's libsecp256k1 |
| `req` | 0.5.17 | Current |
| `telemetry_metrics_prometheus` | 1.1.0 | Current |
| `websockex` | 0.4.x | Test-only, stable |
| `stream_data` | 1.3.0 | Current |
| `credo` | 1.7.x | Dev-only, current |
**Cryptographic library assessment:** `lib_secp256k1` wraps libsecp256k1, the battle-tested C library from Bitcoin Core. Used only for Schnorr signature verification (BIP-340). Appropriate and trustworthy.
**No outdated or unmaintained dependencies.** No known CVEs in current dependency versions.
---
## 3. Top 5 Issues to Fix Before Beta
### 1. Add WebSocket Ping/Pong Keepalives
**Severity:** High
**Impact:** Long-lived subscriptions through proxies/NAT silently disconnect; server accumulates dead connections and leaked subscriptions until process monitor triggers (which requires the TCP connection to fully close).
**Fix:** Implement periodic WebSocket PING frames (e.g., every 30s) in the connection handler. Close connections that don't respond within a timeout.
**Files:** `lib/parrhesia/web/connection.ex`
### 2. Use Constant-Time Comparison for NIP-42 Challenges
**Severity:** Medium (low practical risk due to 16-byte random challenge, but best practice)
**Impact:** Theoretical timing side-channel on challenge validation.
**Fix:** Replace `challenge == stored_challenge` with `Plug.Crypto.secure_compare(challenge, stored_challenge)` in `Auth.Challenges`.
**Files:** `lib/parrhesia/auth/challenges.ex`
### 3. Expand Property-Based Testing
**Severity:** Medium
**Impact:** Undiscovered edge cases in event validation, filter matching, and tag parsing. `stream_data` is already a dependency but only used in one test file.
**Fix:** Add property tests for: event ID computation with random payloads, filter boundary conditions (since/until edge cases), tag parsing with adversarial input, subscription index correctness under random insert/delete sequences.
**Files:** `test/parrhesia/protocol/`
### 4. Add Structured Logging
**Severity:** Low-Medium
**Impact:** Log aggregation (ELK, Datadog, Grafana Loki) requires structured output. Current plaintext logs are adequate for development but make production debugging harder at scale.
**Fix:** Add JSON log formatter (e.g., `LoggerJSON` or custom formatter). Include connection ID, subscription ID, and event kind as structured fields.
**Files:** `config/config.exs`, new formatter module
### 5. Add Server-Initiated WebSocket PING Frames
**Severity:** Low (not spec-required)
**Impact:** RFC 6455 §5.5.2 makes PING optional, and NIP-01 does not require keepalives. However, without server-initiated pings, dead connections behind NAT/proxies are only detected via TCP-level timeouts (which can take minutes), during which subscriptions and state remain allocated.
**Fix:** Consider periodic PING frames (e.g., every 30s) in the connection handler to proactively detect dead connections.
**Files:** `lib/parrhesia/web/connection.ex`
---
## 4. Nice-to-Haves for Beta
1. **Extract queue management** from `connection.ex` (1,925 lines) into a dedicated `Parrhesia.Web.OutboundQueue` module for maintainability.
2. **Add request correlation IDs** to WebSocket connections for log tracing across the event lifecycle (ingest → validation → storage → fanout).
3. **SQL UNION for multi-filter queries** instead of in-memory deduplication. Would reduce memory spikes for queries with many filters, though the 500-result limit mitigates this.
4. **Slow query telemetry** — flag database queries exceeding a configurable threshold (e.g., 100ms) via dedicated telemetry event.
5. **Connection idle timeout** — close WebSocket connections with no activity for a configurable period (e.g., 30 minutes), independent of ping/pong.
6. **Per-pubkey rate limiting** — current rate limiting is per-connection and per-IP. A determined attacker could use multiple IPs. Per-pubkey limiting would add a layer but requires NIP-42 auth.
7. **OpenTelemetry integration** — for distributed tracing across multi-relay deployments.
8. **Negentropy session memory bounds** — while capped at 10K sessions, large filter sets within sessions could still consume significant memory. Consider per-session size limits.
---
## 5. Promotion Recommendation
### ⚠️ Promote with Conditions
**The codebase meets beta promotion criteria with one condition:**
**Condition: Use constant-time comparison for NIP-42 challenge validation.**
While the practical risk is low (16-byte random challenge), this is a security best practice that takes one line to fix (`Plug.Crypto.secure_compare/2`).
**Criteria assessment:**
| Criterion | Status |
|---|---|
| NIP-01 correctly and fully implemented | ✅ Yes |
| Event signature and ID verification cryptographically correct | ✅ Yes (lib_secp256k1 / BIP-340) |
| No ❌ blocking in Security | ✅ No blockers (one minor item) |
| No ❌ blocking in WebSocket handling | ✅ No blockers (ping/pong optional per RFC 6455) |
| System does not crash or leak resources under normal load | ✅ Process monitors, queue management, rate limiting |
| Working test suite covering critical protocol path | ✅ 58 files, 8K LOC, protocol edge cases covered |
| Basic operability (config externalised, logs meaningful) | ✅ 100+ config keys, telemetry, Prometheus, health check |
**What's strong:**
- OTP design is production-grade
- Protocol implementation is comprehensive and spec-compliant
- Security posture is solid (rate limiting, input validation, ACLs, moderation)
- Telemetry coverage is excellent
- Test suite covers critical paths
- Dependencies are current and trustworthy
**What needs attention post-beta:**
- Expand property-based testing
- Add structured logging for production observability
- Consider per-pubkey rate limiting
- Connection handler module size (1,925 lines)

279
docs/slop/HARDEN.md Normal file
View File

@@ -0,0 +1,279 @@
# Hardening Review: Parrhesia Nostr Relay
You are a security engineer specialising in real-time WebSocket servers, Erlang/OTP systems, and protocol-level abuse. You are reviewing **Parrhesia**, a Nostr relay (NIP-01 compliant) written in Elixir, for hardening opportunities — with a primary focus on **denial-of-service resilience** and a secondary focus on the full attack surface.
Produce a prioritised list of **specific, actionable recommendations** with rationale. For each recommendation, state:
1. The attack or failure mode it mitigates
2. Suggested implementation (config change, code change, or architectural change)
3. Severity estimate (critical / high / medium / low)
---
## 1. Architecture Overview
| Component | Technology | Notes |
|---|---|---|
| Runtime | Elixir/OTP 27, BEAM VM | Each WS connection is a separate process |
| HTTP server | Bandit (pure Elixir) | HTTP/1.1 only, no HTTP/2 |
| WebSocket | `websock_adapter` | Text frames only; binary rejected |
| Database | PostgreSQL via Ecto | Range-partitioned `events` table by `created_at` |
| Caching | ETS | Config snapshot + moderation ban/allow lists |
| Multi-node | Erlang `:pg` groups | Fanout across BEAM cluster nodes |
| Metrics | Prometheus (Telemetry) | `/metrics` endpoint |
| TLS termination | **Out of scope** — handled by reverse proxy (nginx/Caddy) |
### Supervision Tree
```
Parrhesia.Supervisor
├─ Telemetry (Prometheus exporter)
├─ Config (ETS snapshot of runtime config)
├─ Storage.Supervisor (Ecto repo + moderation cache)
├─ Subscriptions.Supervisor (ETS subscription index for fanout)
├─ Auth.Supervisor (NIP-42 challenge GenServer)
├─ Policy.Supervisor (policy enforcement)
├─ Web.Endpoint (Bandit listener)
└─ Tasks.Supervisor (ExpirationWorker, 30s GC loop)
```
### Data Flow
1. Client connects via WebSocket at `/relay`
2. NIP-42 AUTH challenge issued immediately (16-byte random, base64url)
3. Inbound text frames are: size-checked → JSON-decoded → rate-limited → protocol-dispatched
4. EVENT messages: validated → policy-checked → stored in Postgres → ACK → async fanout to matching subscriptions
5. REQ messages: filters validated → Postgres query → results streamed → EOSE → live subscription registered
6. Fanout: post-ingest, subscription index (ETS) is traversed; matching connection processes receive events via `send/2`
---
## 2. Current Defences Inventory
### Connection Layer
| Defence | Value | Enforcement Point |
|---|---|---|
| Max WebSocket frame size | **1,048,576 bytes (1 MiB)** | Checked in `handle_in` *before* JSON decode, and at Bandit upgrade (`max_frame_size`) |
| WebSocket upgrade timeout | **60,000 ms** | Passed to `WebSockAdapter.upgrade` |
| Binary frame rejection | Returns NOTICE, connection stays open | `handle_in` opcode check |
| Outbound queue limit | **256 events** per connection | Overflow strategy: **`:close`** (WS 1008) |
| Outbound drain batch | **64 events** | Async drain via `send(self(), :drain_outbound_queue)` |
| Outbound pressure telemetry | Threshold at **75%** of queue | Emits telemetry event only, no enforcement |
| IP blocking | Via moderation cache (ETS) | Management API can add blocked IPs |
### Protocol Layer
| Defence | Value | Notes |
|---|---|---|
| Max event JSON size | **262,144 bytes (256 KiB)** | Re-serialises decoded event and checks byte size |
| Max filters per REQ | **16** | Rejected at filter validation |
| Max filter `limit` | **500** | `min(client_limit, 500)` applied at query time |
| Max subscriptions per connection | **32** | Existing sub IDs updated without counting toward limit |
| Subscription ID max length | **64 characters** | Must be non-empty |
| Event kind range | **065,535** | Integer range check |
| Max future event skew | **900 seconds (15 min)** | Events with `created_at > now + 900` rejected |
| Unknown filter keys | **Rejected** | Allowed: `ids`, `authors`, `kinds`, `since`, `until`, `limit`, `search`, `#<letter>` |
### Event Validation Pipeline
Strict order:
1. Required fields present (`id`, `pubkey`, `created_at`, `kind`, `tags`, `content`, `sig`)
2. `id` — 64-char lowercase hex
3. `pubkey` — 64-char lowercase hex
4. `created_at` — non-negative integer, max 900s future skew
5. `kind` — integer in [0, 65535]
6. `tags` — list of non-empty string arrays (**no length limit on tags array or individual tag values**)
7. `content` — any binary string
8. `sig` — 128-char lowercase hex
9. ID hash recomputation and comparison
10. Schnorr signature verification via `lib_secp256k1` (gated by `verify_event_signatures` flag, default `true`)
### Rate Limiting
| Defence | Value | Notes |
|---|---|---|
| Event ingest rate | **120 events per window** | Per-connection sliding window |
| Ingest window | **1 second** | Resets on first event after expiry |
| No per-IP connection rate limiting | — | Must be handled at reverse proxy |
| No global connection count ceiling | — | BEAM handles thousands but no configured limit |
### Authentication (NIP-42)
- Challenge issued to **all** connections on connect (optional escalation model)
- AUTH event must: pass full NIP-01 validation, be kind `22242`, contain matching `challenge` tag, contain matching `relay` tag
- `created_at` freshness: must be `>= now - 600s` (10 min)
- On success: pubkey added to `authenticated_pubkeys` MapSet; challenge rotated
- Supports multiple authenticated pubkeys per connection
### Authentication (NIP-98 HTTP)
- Management endpoint (`POST /management`) requires NIP-98 header
- Auth event must be kind `27235`, `created_at` within **60 seconds** of now
- Must include `method` and `u` tags matching request exactly
### Access Control
- `auth_required_for_writes`: default **false** (configurable)
- `auth_required_for_reads`: default **false** (configurable)
- Protected events (NIP-70, tagged `["-"]`): require auth + pubkey match
- Giftwrap (kind 1059): unauthenticated REQ → CLOSED; authenticated REQ must include `#p` containing own pubkey
### Database
- All queries use Ecto parameterised bindings — no raw string interpolation
- LIKE search patterns escaped (`%`, `_`, `\` characters)
- Deletion enforces `pubkey == deleter_pubkey` in WHERE clause
- Soft-delete via `deleted_at`; hard-delete only via vanish (NIP-62) or expiration purge
- DB pool: **32 connections** (prod), queue target 1s, interval 5s
### Moderation
- Banned pubkeys, allowed pubkeys, banned events, blocked IPs stored in ETS cache
- Management API (NIP-98 authed) for CRUD on moderation lists
- Cache invalidated atomically on writes
---
## 3. Known Gaps and Areas of Concern
The following are areas where the current implementation may be vulnerable or where defences could be strengthened. **Please evaluate each and provide recommendations.**
### 3.1 Connection Exhaustion
- There is **no global limit on concurrent WebSocket connections**. Each connection is an Elixir process (~23 KiB base), but subscriptions, auth state, and outbound queues add per-connection memory.
- There is **no per-IP connection rate limiting at the application layer**. IP blocking exists but is reactive (management API), not automatic.
- There is **no idle timeout** after the WebSocket upgrade completes. A connection can remain open indefinitely without sending or receiving messages.
**Questions:**
- What connection limits should be configured at the Bandit/BEAM level?
- Should an idle timeout be implemented? If so, what value balances real-time subscription use against resource waste?
- Should per-IP connection counting be implemented at the application layer, or is this strictly a reverse proxy concern?
### 3.2 Subscription Abuse
- A single connection can hold **32 subscriptions**, each with up to **16 filters**. That's 512 filter predicates per connection being evaluated on every fanout.
- Filter arrays (`ids`, `authors`, `kinds`, tag values) have **no element count limits**. A filter could contain thousands of author pubkeys.
- There is no cost accounting for "expensive" subscriptions (e.g., wide open filters matching all events).
**Questions:**
- Should filter array element counts be bounded? If so, what limits per field?
- Should there be a per-connection "filter complexity" budget?
- How expensive is the current ETS subscription index traversal at scale (e.g., 10K concurrent connections × 32 subs each)?
### 3.3 Tag Array Size
- Event validation does **not limit the number of tags** or the length of individual tag values beyond the 256 KiB total event size cap.
- A maximally-tagged event could contain thousands of short tags, causing amplification in `event_tags` table inserts (one row per tag).
**Questions:**
- Should a max tag count be enforced? What is a reasonable limit?
- What is the insert cost of storing e.g. 1,000 tags per event? Could this be used for write amplification?
- Should individual tag value lengths be bounded?
### 3.4 AUTH Timing
- AUTH event `created_at` freshness only checks the **lower bound** (`>= now - 600`). An AUTH event with `created_at` far in the future passes validation.
- Regular events have a future skew cap of 900s, but AUTH events do not.
**Questions:**
- Should AUTH events also enforce a future `created_at` bound?
- Is a 600-second AUTH window too wide? Could it be reduced?
### 3.5 Outbound Amplification
- A single inbound EVENT can fan out to an unbounded number of matching subscriptions across all connections.
- The outbound queue (256 events, `:close` strategy) protects individual connections but does not limit total fanout work per event.
- The fanout traverses the ETS subscription index synchronously in the ingesting connection's process.
**Questions:**
- Should fanout be bounded per event (e.g., max N recipients before yielding)?
- Should fanout happen in a separate process pool rather than inline?
- Is the `:close` overflow strategy optimal, or would `:drop_oldest` be better for well-behaved clients with temporary backpressure?
### 3.6 Query Amplification
- A single REQ with 16 filters, each with `limit: 500`, could trigger 16 separate Postgres queries returning up to 8,000 events total.
- COUNT requests also execute per-filter queries (now deduplicated via UNION ALL).
- `search` filters use `ILIKE %pattern%` which cannot use B-tree indexes.
**Questions:**
- Should there be a per-REQ total result cap (across all filters)?
- Should `search` queries be rate-limited or require a minimum pattern length?
- Should COUNT be disabled or rate-limited separately?
- Are there missing indexes that would help common query patterns?
### 3.7 Multi-Node Trust
- Events received via `:remote_fanout_event` from peer BEAM nodes **skip all validation and policy checks** and go directly to the subscription index.
- This assumes all cluster peers are trusted.
**Questions:**
- If cluster membership is dynamic or spans trust boundaries, should remote events be re-validated?
- Should there be a shared secret or HMAC on inter-node messages?
### 3.8 Metrics Endpoint
- `/metrics` (Prometheus) is **unauthenticated**.
- Exposes internal telemetry: connection counts, event throughput, queue depths, database timing.
**Questions:**
- Should `/metrics` require authentication or be restricted to internal networks?
- Could metrics data be used to profile the relay's capacity and craft targeted attacks?
### 3.9 Negentropy Stub
- NEG-OPEN, NEG-MSG, NEG-CLOSE messages are accepted and acknowledged but the reconciliation logic is a stub (cursor counter only).
- Are there resource implications of accepting negentropy sessions without real implementation?
### 3.10 Event Re-Serialisation Cost
- To enforce the 256 KiB event size limit, the relay calls `JSON.encode!(event)` on the already-decoded event map. This re-serialisation happens on every inbound EVENT.
- Could this be replaced with a byte-length check on the raw frame payload (already available)?
---
## 4. Specific Review Requests
Beyond the gaps above, please also evaluate:
1. **Bandit configuration**: Are there Bandit-level options (max connections, header limits, request timeouts, keepalive settings) that should be tuned for a public-facing relay?
2. **BEAM VM flags**: Are there any Erlang VM flags (`+P`, `+Q`, `+S`, memory limits) that should be set for production hardening?
3. **Ecto pool exhaustion**: With 32 DB connections and potentially thousands of concurrent REQ queries, what happens under pool exhaustion? Is the 1s queue target + 5s interval appropriate?
4. **ETS table sizing**: The subscription index and moderation cache use ETS. Are there memory limits or table options (`read_concurrency`, `write_concurrency`, `compressed`) that should be tuned?
5. **Process mailbox overflow**: Connection processes receive events via `send/2` during fanout. If a process is slow to consume, its mailbox grows. The outbound queue mechanism is application-level — but is the BEAM-level mailbox also protected?
6. **Reverse proxy recommendations**: What nginx/Caddy configuration should complement the relay's defences? (Rate limiting, connection limits, WebSocket-specific settings, request body size.)
7. **Monitoring and alerting**: What telemetry signals should trigger alerts? (Connection count spikes, queue overflow rates, DB pool saturation, error rates.)
---
## 5. Out of Scope
The following are **not** in scope for this review:
- TLS configuration (handled by reverse proxy)
- DNS and network-level DDoS mitigation
- Operating system hardening
- Key management for the relay identity
- Client-side security
- Nostr protocol design flaws (we implement the spec as-is)
---
## 6. Response Format
For each recommendation, use this format:
### [Severity] Title
**Attack/failure mode:** What goes wrong without this mitigation.
**Current state:** What exists today (or doesn't).
**Recommendation:** Specific change — config value, code change, or architectural decision.
**Trade-offs:** Any impact on legitimate users or operational complexity.

424
docs/slop/LOCAL_API.md Normal file
View File

@@ -0,0 +1,424 @@
# Parrhesia Shared API
## 1. Goal
Expose a stable in-process API that:
- is used by WebSocket, HTTP management, local callers, and sync workers,
- keeps protocol and storage behavior in one place,
- stays neutral about application-level replication semantics.
This document defines the Parrhesia contract. It does **not** define Tribes or Ash sync behavior.
---
## 2. Scope
### In scope
- event ingest/query/count parity with WebSocket behavior,
- local subscription APIs,
- NIP-98 validation helpers,
- management/admin helpers,
- remote relay sync worker control and health reporting.
### Out of scope
- resource registration,
- trusted app writers,
- mutation payload semantics,
- conflict resolution,
- replay winner selection,
- Ash action mapping.
Those belong in app profiles such as `TRIBES-NOSTRSYNC`, not in Parrhesia.
---
## 3. Layering
```text
Transport / embedding / background workers
- Parrhesia.Web.Connection
- Parrhesia.Web.Management
- Parrhesia.Local.*
- Parrhesia.Sync.*
Shared API
- Parrhesia.API.Auth
- Parrhesia.API.Events
- Parrhesia.API.Stream
- Parrhesia.API.Admin
- Parrhesia.API.Identity
- Parrhesia.API.ACL
- Parrhesia.API.Sync
Runtime internals
- Parrhesia.Policy.EventPolicy
- Parrhesia.Storage.*
- Parrhesia.Groups.Flow
- Parrhesia.Subscriptions.Index
- Parrhesia.Fanout.MultiNode
- Parrhesia.Telemetry
```
Rule: transport framing stays at the edge. Business decisions happen in `Parrhesia.API.*`.
Implementation note:
- the runtime beneath `Parrhesia.API.*` should expose clearer internal policy stages than it does today,
- at minimum: connection/auth, publish, query/count, stream subscription, negentropy, response shaping, and broadcast/fanout,
- these are internal runtime seams, not additional public APIs.
---
## 4. Core Context
```elixir
defmodule Parrhesia.API.RequestContext do
defstruct authenticated_pubkeys: MapSet.new(),
actor: nil,
caller: :local,
remote_ip: nil,
subscription_id: nil,
peer_id: nil,
metadata: %{}
end
```
`caller` is for telemetry and policy parity, for example `:websocket`, `:http`, `:local`, or `:sync`.
Recommended usage:
- `remote_ip` for connection-level policy and audit,
- `subscription_id` for query/stream/negentropy context,
- `peer_id` for trusted sync peer identity when applicable,
- `metadata` for transport-specific details that should not become API fields.
---
## 5. Public Modules
### 5.1 `Parrhesia.API.Auth`
Purpose:
- event validation helpers,
- NIP-98 verification,
- optional embedding account resolution.
```elixir
@spec validate_event(map()) :: :ok | {:error, term()}
@spec compute_event_id(map()) :: String.t()
@spec validate_nip98(String.t() | nil, String.t(), String.t()) ::
{:ok, Parrhesia.API.Auth.Context.t()} | {:error, term()}
@spec validate_nip98(String.t() | nil, String.t(), String.t(), keyword()) ::
{:ok, Parrhesia.API.Auth.Context.t()} | {:error, term()}
```
### 5.2 `Parrhesia.API.Events`
Purpose:
- canonical ingest/query/count path used by WS, HTTP, local callers, and sync workers.
```elixir
@spec publish(map(), keyword()) ::
{:ok, Parrhesia.API.Events.PublishResult.t()} | {:error, term()}
@spec query([map()], keyword()) ::
{:ok, [map()]} | {:error, term()}
@spec count([map()], keyword()) ::
{:ok, non_neg_integer() | map()} | {:error, term()}
```
Required options:
- `:context` - `%Parrhesia.API.RequestContext{}`
`publish/2` must preserve current `EVENT` semantics:
1. size checks,
2. `Protocol.validate_event/1`,
3. `EventPolicy.authorize_write/2`,
4. group handling,
5. persistence or control-event path,
6. local plus multi-node fanout,
7. telemetry.
Return shape mirrors `OK`:
```elixir
{:ok, %PublishResult{event_id: id, accepted: true, message: "ok: event stored"}}
{:ok, %PublishResult{event_id: id, accepted: false, message: "blocked: ..."}}
```
`query/2` and `count/2` must preserve current `REQ` and `COUNT` behavior, including giftwrap restrictions and server-side filter validation.
### 5.3 `Parrhesia.API.Stream`
Purpose:
- in-process subscription surface with the same semantics as a WebSocket `REQ`.
This is **required** for embedding and sync consumers.
```elixir
@spec subscribe(pid(), String.t(), [map()], keyword()) ::
{:ok, reference()} | {:error, term()}
@spec unsubscribe(reference()) :: :ok
```
Required options:
- `:context` - `%Parrhesia.API.RequestContext{}`
Subscriber contract:
```elixir
{:parrhesia, :event, ref, subscription_id, event}
{:parrhesia, :eose, ref, subscription_id}
{:parrhesia, :closed, ref, subscription_id, reason}
```
`subscribe/4` must:
1. validate filters,
2. apply read policy,
3. emit initial catch-up events in the same order as `REQ`,
4. emit exactly one `:eose`,
5. register for live fanout until `unsubscribe/1`.
This module does **not** know why a caller wants the stream.
### 5.4 `Parrhesia.API.Admin`
Purpose:
- stable in-process facade for management operations already exposed over HTTP.
```elixir
@spec execute(String.t() | atom(), map(), keyword()) :: {:ok, map()} | {:error, term()}
@spec stats(keyword()) :: {:ok, map()} | {:error, term()}
@spec health(keyword()) :: {:ok, map()} | {:error, term()}
@spec list_audit_logs(keyword()) :: {:ok, [map()]} | {:error, term()}
```
Baseline methods:
- `ping`
- `stats`
- `health`
- moderation methods already supported by the storage admin adapter
`stats/1` is relay-level and cheap. `health/1` is liveness/readiness-oriented and may include worker state.
`API.Admin` is the operator-facing umbrella for management. It may delegate domain-specific work to `API.Identity`, `API.ACL`, and `API.Sync`.
### 5.5 `Parrhesia.API.Identity`
Purpose:
- manage Parrhesia-owned server identity,
- expose public identity metadata,
- support explicit import and rotation,
- keep private key material internal.
Parrhesia owns a low-level server identity used for relay-to-relay auth and other transport-local security features.
```elixir
@spec get(keyword()) :: {:ok, map()} | {:error, term()}
@spec ensure(keyword()) :: {:ok, map()} | {:error, term()}
@spec import(map(), keyword()) :: {:ok, map()} | {:error, term()}
@spec rotate(keyword()) :: {:ok, map()} | {:error, term()}
@spec sign_event(map(), keyword()) :: {:ok, map()} | {:error, term()}
```
Rules:
- private key material must never be returned by API,
- production deployments should be able to import a configured key,
- local/dev deployments may generate on first init if none exists,
- identity creation should be eager and deterministic, not lazy on first sync use.
Recommended boot order:
1. configured/imported key,
2. persisted local identity,
3. generate once and persist.
### 5.6 `Parrhesia.API.ACL`
Purpose:
- enforce event/filter ACLs for authenticated principals,
- support default-deny sync visibility,
- allow dynamic grants for trusted sync peers.
This is a real authorization layer, not a reuse of moderation allowlists.
Current implementation note:
- Parrhesia already has storage-backed moderation presence tables such as `allowed_pubkeys` and `blocked_ips`,
- those are not sufficient for sync ACLs,
- the new ACL layer must be enforced directly in the active read/write/query/negentropy path, not only through management tables.
```elixir
@spec grant(map(), keyword()) :: :ok | {:error, term()}
@spec revoke(map(), keyword()) :: :ok | {:error, term()}
@spec list(keyword()) :: {:ok, [map()]} | {:error, term()}
@spec check(atom(), map(), keyword()) :: :ok | {:error, term()}
```
Suggested rule shape:
```elixir
%{
principal_type: :pubkey,
principal: "<server-auth-pubkey>",
capability: :sync_read,
match: %{
"kinds" => [5000],
"#r" => ["tribes.accounts.user", "tribes.chat.tribe"]
}
}
```
For the first implementation, principals should be authenticated pubkeys only.
We do **not** need a separate user-vs-server ACL model yet. A sync peer is simply a principal with sync capabilities.
Initial required capabilities:
- `:sync_read`
- `:sync_write`
Recommended baseline:
- ordinary events follow existing relay behavior,
- sync traffic is default-deny,
- access is lifted only by explicit ACL grants for authenticated server pubkeys.
### 5.7 `Parrhesia.API.Sync`
Purpose:
- manage remote relay sync workers without embedding app-specific replication semantics.
Parrhesia syncs **events**, not records.
```elixir
@spec put_server(map(), keyword()) ::
{:ok, Parrhesia.API.Sync.Server.t()} | {:error, term()}
@spec remove_server(String.t(), keyword()) :: :ok | {:error, term()}
@spec get_server(String.t(), keyword()) ::
{:ok, Parrhesia.API.Sync.Server.t()} | :error | {:error, term()}
@spec list_servers(keyword()) ::
{:ok, [Parrhesia.API.Sync.Server.t()]} | {:error, term()}
@spec start_server(String.t(), keyword()) :: :ok | {:error, term()}
@spec stop_server(String.t(), keyword()) :: :ok | {:error, term()}
@spec sync_now(String.t(), keyword()) :: :ok | {:error, term()}
@spec server_stats(String.t(), keyword()) ::
{:ok, map()} | :error | {:error, term()}
@spec sync_stats(keyword()) :: {:ok, map()} | {:error, term()}
@spec sync_health(keyword()) :: {:ok, map()} | {:error, term()}
```
`put_server/2` is upsert-style. It covers both add and update.
Minimum server shape:
```elixir
%{
id: "tribes-a",
url: "wss://relay-a.example/relay",
enabled?: true,
auth_pubkey: "<remote-server-auth-pubkey>",
filters: [%{"kinds" => [5000], "#r" => ["tribes.accounts.user"]}],
mode: :req_stream,
auth: %{type: :nip42},
tls: %{
mode: :required,
hostname: "relay-a.example",
pins: [
%{type: :spki_sha256, value: "<base64-sha256-spki-pin>"}
]
}
}
```
Important constraints:
- filters are caller-provided and opaque to Parrhesia,
- Parrhesia does not inspect `kind: 5000` payload semantics,
- Parrhesia may persist peer config and runtime counters,
- Parrhesia may reconnect and resume catch-up using generic event cursors,
- Parrhesia must expose worker health and basic counters,
- remote relay TLS pinning is required,
- sync peer auth is bound to a server-auth pubkey, not inferred from event author pubkeys.
- sync enforcement should reuse the same runtime policy stages as ordinary websocket traffic rather than inventing a parallel trust path.
Server identity model:
- Parrhesia owns its local server-auth identity via `API.Identity`,
- peer config declares the expected remote server-auth pubkey,
- ACL grants are bound to authenticated server-auth pubkeys,
- event author pubkeys remain a separate application concern.
Initial mode should be `:req_stream`:
1. run catch-up with `API.Events.query/2`-equivalent client behavior against the remote relay,
2. switch to a live subscription,
3. ingest received events through local `API.Events.publish/2`.
Future optimization:
- `:negentropy` may be added as an optimization mode on top of the simpler `:req_stream` baseline.
- Parrhesia now has a reusable NIP-77 engine, but a sync worker does not need to depend on it for the first implementation.
---
## 6. Server Integration
### WebSocket
- `EVENT` -> `Parrhesia.API.Events.publish/2`
- `REQ` -> `Parrhesia.API.Stream.subscribe/4`
- `COUNT` -> `Parrhesia.API.Events.count/2`
- `AUTH` stays connection-specific, but validation helpers may move to `API.Auth`
- `NEG-*` maps to the reusable NIP-77 engine and remains exposed through the websocket transport boundary
### HTTP management
- NIP-98 validation via `Parrhesia.API.Auth.validate_nip98/3`
- management methods via `Parrhesia.API.Admin`
- sync peer CRUD and health endpoints may delegate to `Parrhesia.API.Sync`
- identity and ACL management may delegate to `API.Identity` and `API.ACL`
### Local wrappers
`Parrhesia.Local.*` remain thin delegates over `Parrhesia.API.*`.
---
## 7. Relationship to Sync Profiles
This document is intentionally lower-level than `TRIBES-NOSTRSYNC` and `SYNC_DB.md`.
Those documents may require:
- `Parrhesia.API.Events.publish/2`
- `Parrhesia.API.Events.query/2`
- `Parrhesia.API.Stream.subscribe/4`
- `Parrhesia.API.Sync.*`
But they must not move application conflict rules or payload semantics into Parrhesia.

File diff suppressed because it is too large Load Diff

27
flake.lock generated Normal file
View File

@@ -0,0 +1,27 @@
{
"nodes": {
"nixpkgs": {
"locked": {
"lastModified": 1773389992,
"narHash": "sha256-wvfdLLWJ2I9oEpDd9PfMA8osfIZicoQ5MT1jIwNs9Tk=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "c06b4ae3d6599a672a6210b7021d699c351eebda",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixos-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"root": {
"inputs": {
"nixpkgs": "nixpkgs"
}
}
},
"root": "root",
"version": 7
}

76
flake.nix Normal file
View File

@@ -0,0 +1,76 @@
{
description = "Parrhesia Nostr relay";
inputs.nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable";
# Can be reenabled once patched nostr-bench is up on GitHub
# inputs.self.submodules = true;
outputs = {nixpkgs, ...}: let
systems = [
"x86_64-linux"
"aarch64-linux"
"x86_64-darwin"
"aarch64-darwin"
];
forAllSystems = nixpkgs.lib.genAttrs systems;
in {
formatter = forAllSystems (system: (import nixpkgs {inherit system;}).alejandra);
packages = forAllSystems (
system: let
pkgs = import nixpkgs {inherit system;};
pkgsLinux = import nixpkgs {system = "x86_64-linux";};
lib = pkgs.lib;
parrhesia = pkgs.callPackage ./default.nix {};
nostrBench = pkgs.callPackage ./nix/nostr-bench.nix {};
in
{
default = parrhesia;
inherit parrhesia nostrBench;
# Uses x86_64-linux pkgs so it can cross-build via a remote
# builder even when the host is aarch64-darwin.
nostrBenchStaticX86_64Musl = pkgsLinux.callPackage ./nix/nostr-bench.nix {staticX86_64Musl = true;};
}
// lib.optionalAttrs pkgs.stdenv.hostPlatform.isLinux {
dockerImage = pkgs.dockerTools.buildLayeredImage {
name = "parrhesia";
tag = "latest";
contents = [
parrhesia
pkgs.bash
pkgs.cacert
pkgs.coreutils
pkgs.fakeNss
];
extraCommands = ''
mkdir -p tmp
chmod 1777 tmp
'';
config = {
Entrypoint = ["${parrhesia}/bin/parrhesia"];
Cmd = ["start"];
ExposedPorts = {
"4413/tcp" = {};
};
WorkingDir = "/";
User = "65534:65534";
Env = [
"HOME=/tmp"
"LANG=C.UTF-8"
"LC_ALL=C.UTF-8"
"MIX_ENV=prod"
"PORT=4413"
"RELEASE_DISTRIBUTION=none"
"SSL_CERT_FILE=${pkgs.cacert}/etc/ssl/certs/ca-bundle.crt"
];
};
};
}
);
};
}

79
justfile Normal file
View File

@@ -0,0 +1,79 @@
set shell := ["bash", "-euo", "pipefail", "-c"]
repo_root := justfile_directory()
# Show curated command help (same as `just help`).
default:
@just help
# Show top-level or topic-specific help.
help topic="":
@cd "{{repo_root}}" && ./scripts/just_help.sh "{{topic}}"
# Raw e2e harness commands.
e2e subcommand *args:
#!/usr/bin/env bash
set -euo pipefail
cd "{{repo_root}}"
subcommand="{{subcommand}}"
if [[ -z "$subcommand" || "$subcommand" == "help" ]]; then
just help e2e
elif [[ "$subcommand" == "nak" ]]; then
./scripts/run_nak_e2e.sh {{args}}
elif [[ "$subcommand" == "marmot" ]]; then
./scripts/run_marmot_e2e.sh {{args}}
elif [[ "$subcommand" == "node-sync" ]]; then
./scripts/run_node_sync_e2e.sh {{args}}
elif [[ "$subcommand" == "node-sync-docker" ]]; then
./scripts/run_node_sync_docker_e2e.sh {{args}}
elif [[ "$subcommand" == "suite" ]]; then
if [[ -z "{{args}}" ]]; then
echo "usage: just e2e suite <suite-name> <command> [args...]" >&2
exit 1
fi
./scripts/run_e2e_suite.sh {{args}}
else
echo "Unknown e2e subcommand: $subcommand" >&2
just help e2e
exit 1
fi
# Benchmark flows (local/cloud/history + direct relay targets).
bench subcommand *args:
#!/usr/bin/env bash
set -euo pipefail
cd "{{repo_root}}"
subcommand="{{subcommand}}"
if [[ -z "$subcommand" || "$subcommand" == "help" ]]; then
just help bench
elif [[ "$subcommand" == "compare" ]]; then
./scripts/run_bench_compare.sh {{args}}
elif [[ "$subcommand" == "collect" ]]; then
./scripts/run_bench_collect.sh {{args}}
elif [[ "$subcommand" == "update" ]]; then
./scripts/run_bench_update.sh {{args}}
elif [[ "$subcommand" == "list" ]]; then
./scripts/run_bench_update.sh --list {{args}}
elif [[ "$subcommand" == "at" ]]; then
if [[ -z "{{args}}" ]]; then
echo "usage: just bench at <git-ref>" >&2
exit 1
fi
./scripts/run_bench_at_ref.sh {{args}}
elif [[ "$subcommand" == "cloud" ]]; then
./scripts/run_bench_cloud.sh {{args}}
elif [[ "$subcommand" == "cloud-quick" ]]; then
./scripts/run_bench_cloud.sh --quick {{args}}
elif [[ "$subcommand" == "relay" ]]; then
./scripts/run_nostr_bench.sh {{args}}
elif [[ "$subcommand" == "relay-strfry" ]]; then
./scripts/run_nostr_bench_strfry.sh {{args}}
elif [[ "$subcommand" == "relay-nostr-rs" ]]; then
./scripts/run_nostr_bench_nostr_rs_relay.sh {{args}}
else
echo "Unknown bench subcommand: $subcommand" >&2
just help bench
exit 1
fi

View File

@@ -1,17 +1,29 @@
defmodule Parrhesia do defmodule Parrhesia do
@moduledoc """ @moduledoc """
Documentation for `Parrhesia`. Parrhesia is a Nostr relay runtime that can run standalone or as an embedded OTP service.
For embedded use, the main developer-facing surface is `Parrhesia.API.*`.
For host-managed HTTP/WebSocket ingress mounting, use `Parrhesia.Plug`.
Start with:
- `Parrhesia.API.Events`
- `Parrhesia.API.Stream`
- `Parrhesia.API.Admin`
- `Parrhesia.API.Identity`
- `Parrhesia.API.ACL`
- `Parrhesia.API.Sync`
- `Parrhesia.Plug`
The host application is responsible for:
- setting `config :parrhesia, ...`
- migrating the configured Parrhesia repos
- deciding whether to expose listeners or use only the in-process API
See `README.md` and `docs/LOCAL_API.md` for the embedding model and configuration guide.
""" """
@doc """ @doc false
Hello world.
## Examples
iex> Parrhesia.hello()
:world
"""
def hello do def hello do
:world :world
end end

304
lib/parrhesia/api/acl.ex Normal file
View File

@@ -0,0 +1,304 @@
defmodule Parrhesia.API.ACL do
@moduledoc """
Public ACL API and rule matching for protected sync traffic.
ACL checks are only applied when the requested subject overlaps with
`config :parrhesia, :acl, protected_filters: [...]`.
The intended flow is:
1. mark a subset of sync traffic as protected with `protected_filters`
2. persist pubkey-based grants with `grant/2`
3. call `check/3` during sync reads and writes
Unprotected subjects always return `:ok`.
"""
alias Parrhesia.API.RequestContext
alias Parrhesia.Protocol.Filter
alias Parrhesia.Storage
@doc """
Persists an ACL rule.
A typical rule looks like:
```elixir
%{
principal_type: :pubkey,
principal: "...64 hex chars...",
capability: :sync_read,
match: %{"kinds" => [5000], "#r" => ["tribes.accounts.user"]}
}
```
"""
@spec grant(map(), keyword()) :: :ok | {:error, term()}
def grant(rule, _opts \\ []) do
with {:ok, _stored_rule} <- Storage.acl().put_rule(%{}, normalize_rule(rule)) do
:ok
end
end
@doc """
Deletes ACL rules matching the given selector.
The selector is passed through to the configured storage adapter, which typically accepts an
id-based selector such as `%{id: rule_id}`.
"""
@spec revoke(map(), keyword()) :: :ok | {:error, term()}
def revoke(rule, _opts \\ []) do
Storage.acl().delete_rule(%{}, normalize_delete_selector(rule))
end
@doc """
Lists persisted ACL rules.
Supported filters are:
- `:principal_type`
- `:principal`
- `:capability`
"""
@spec list(keyword()) :: {:ok, [map()]} | {:error, term()}
def list(opts \\ []) do
Storage.acl().list_rules(%{}, normalize_list_opts(opts))
end
@doc """
Authorizes a protected sync read or write subject for the given request context.
Supported capabilities are `:sync_read` and `:sync_write`.
`opts[:context]` defaults to an empty `Parrhesia.API.RequestContext`, which means protected
subjects will fail with `{:error, :auth_required}` until authenticated pubkeys are present.
"""
@spec check(atom(), map(), keyword()) :: :ok | {:error, term()}
def check(capability, subject, opts \\ [])
def check(capability, subject, opts)
when capability in [:sync_read, :sync_write] and is_map(subject) do
context = Keyword.get(opts, :context, %RequestContext{})
with {:ok, normalized_capability} <- normalize_capability(capability),
{:ok, normalized_context} <- normalize_context(context),
{:ok, protected_filters} <- protected_filters() do
if protected_subject?(normalized_capability, subject, protected_filters) do
authorize_subject(normalized_capability, subject, normalized_context)
else
:ok
end
end
end
def check(_capability, _subject, _opts), do: {:error, :invalid_acl_capability}
@doc """
Returns `true` when a filter overlaps the configured protected read surface.
"""
@spec protected_read?(map()) :: boolean()
def protected_read?(filter) when is_map(filter) do
case protected_filters() do
{:ok, protected_filters} ->
protected_subject?(:sync_read, filter, protected_filters)
{:error, _reason} ->
false
end
end
def protected_read?(_filter), do: false
@doc """
Returns `true` when an event matches the configured protected write surface.
"""
@spec protected_write?(map()) :: boolean()
def protected_write?(event) when is_map(event) do
case protected_filters() do
{:ok, protected_filters} ->
protected_subject?(:sync_write, event, protected_filters)
{:error, _reason} ->
false
end
end
def protected_write?(_event), do: false
defp authorize_subject(capability, subject, %RequestContext{} = context) do
if MapSet.size(context.authenticated_pubkeys) == 0 do
{:error, :auth_required}
else
capability
|> list_rules_for_capability()
|> authorize_against_rules(capability, context.authenticated_pubkeys, subject)
end
end
defp list_rules_for_capability(capability) do
Storage.acl().list_rules(%{}, principal_type: :pubkey, capability: capability)
end
defp authorize_against_rules({:ok, rules}, capability, authenticated_pubkeys, subject) do
if Enum.any?(authenticated_pubkeys, &principal_authorized?(&1, subject, rules)) do
:ok
else
{:error, denial_reason(capability)}
end
end
defp authorize_against_rules({:error, reason}, _capability, _authenticated_pubkeys, _subject),
do: {:error, reason}
defp principal_authorized?(authenticated_pubkey, subject, rules) do
Enum.any?(rules, fn rule ->
rule.principal == authenticated_pubkey and
rule_covers_subject?(rule.capability, rule.match, subject)
end)
end
defp rule_covers_subject?(:sync_read, rule_match, filter),
do: filter_within_rule?(filter, rule_match)
defp rule_covers_subject?(:sync_write, rule_match, event),
do: Filter.matches_filter?(event, rule_match)
defp protected_subject?(:sync_read, filter, protected_filters) do
Enum.any?(protected_filters, &filters_overlap?(filter, &1))
end
defp protected_subject?(:sync_write, event, protected_filters) do
Enum.any?(protected_filters, &Filter.matches_filter?(event, &1))
end
defp filters_overlap?(left, right) when is_map(left) and is_map(right) do
comparable_keys =
left
|> comparable_filter_keys(right)
|> Enum.reject(&(&1 in ["limit", "search", "since", "until"]))
Enum.all?(
comparable_keys,
&filter_constraint_compatible?(Map.get(left, &1), Map.get(right, &1), &1)
) and
filter_ranges_overlap?(left, right)
end
defp filter_constraint_compatible?(nil, _right, _key), do: true
defp filter_constraint_compatible?(_left, nil, _key), do: true
defp filter_constraint_compatible?(left, right, _key) when is_list(left) and is_list(right) do
MapSet.disjoint?(MapSet.new(left), MapSet.new(right)) == false
end
defp filter_constraint_compatible?(left, right, _key), do: left == right
defp filter_within_rule?(filter, rule_match) when is_map(filter) and is_map(rule_match) do
Enum.reject(rule_match, fn {key, _value} -> key in ["since", "until", "limit", "search"] end)
|> Enum.all?(fn {key, rule_value} ->
requested_value = Map.get(filter, key)
requested_constraint_within_rule?(requested_value, rule_value, key)
end) and filter_range_within_rule?(filter, rule_match)
end
defp requested_constraint_within_rule?(nil, _rule_value, _key), do: false
defp requested_constraint_within_rule?(requested_values, rule_values, _key)
when is_list(requested_values) and is_list(rule_values) do
requested_values
|> MapSet.new()
|> MapSet.subset?(MapSet.new(rule_values))
end
defp requested_constraint_within_rule?(requested_value, rule_value, _key),
do: requested_value == rule_value
defp denial_reason(:sync_read), do: :sync_read_not_allowed
defp denial_reason(:sync_write), do: :sync_write_not_allowed
defp normalize_context(%RequestContext{} = context), do: {:ok, normalize_pubkeys(context)}
defp normalize_context(_context), do: {:error, :invalid_context}
defp normalize_pubkeys(%RequestContext{} = context) do
normalized_pubkeys =
context.authenticated_pubkeys
|> Enum.map(&String.downcase/1)
|> MapSet.new()
%RequestContext{context | authenticated_pubkeys: normalized_pubkeys}
end
defp normalize_rule(rule) when is_map(rule), do: rule
defp normalize_rule(_rule), do: %{}
defp normalize_delete_selector(selector) when is_map(selector), do: selector
defp normalize_delete_selector(_selector), do: %{}
defp normalize_list_opts(opts) do
[]
|> maybe_put_opt(:principal_type, Keyword.get(opts, :principal_type))
|> maybe_put_opt(:principal, normalize_list_principal(Keyword.get(opts, :principal)))
|> maybe_put_opt(:capability, Keyword.get(opts, :capability))
end
defp normalize_list_principal(nil), do: nil
defp normalize_list_principal(principal) when is_binary(principal),
do: String.downcase(principal)
defp normalize_list_principal(principal), do: principal
defp maybe_put_opt(opts, _key, nil), do: opts
defp maybe_put_opt(opts, key, value), do: Keyword.put(opts, key, value)
defp normalize_capability(capability) do
case capability do
:sync_read -> {:ok, :sync_read}
:sync_write -> {:ok, :sync_write}
_other -> {:error, :invalid_acl_capability}
end
end
defp protected_filters do
filters =
:parrhesia
|> Application.get_env(:acl, [])
|> Keyword.get(:protected_filters, [])
if is_list(filters) and
Enum.all?(filters, &(match?(%{}, &1) and Filter.validate_filter(&1) == :ok)) do
{:ok, filters}
else
{:error, :invalid_protected_filters}
end
end
defp comparable_filter_keys(left, right) do
Map.keys(left)
|> Kernel.++(Map.keys(right))
|> Enum.uniq()
end
defp filter_ranges_overlap?(left, right) do
since = max(boundary_value(left, "since", :lower), boundary_value(right, "since", :lower))
until = min(boundary_value(left, "until", :upper), boundary_value(right, "until", :upper))
since <= until
end
defp filter_range_within_rule?(filter, rule_match) do
requested_since = Map.get(filter, "since")
requested_until = Map.get(filter, "until")
rule_since = Map.get(rule_match, "since")
rule_until = Map.get(rule_match, "until")
lower_ok? =
is_nil(rule_since) or (is_integer(requested_since) and requested_since >= rule_since)
upper_ok? =
is_nil(rule_until) or (is_integer(requested_until) and requested_until <= rule_until)
lower_ok? and upper_ok?
end
defp boundary_value(filter, key, :lower), do: Map.get(filter, key, 0)
defp boundary_value(filter, key, :upper), do: Map.get(filter, key, 9_223_372_036_854_775_807)
end

325
lib/parrhesia/api/admin.ex Normal file
View File

@@ -0,0 +1,325 @@
defmodule Parrhesia.API.Admin do
@moduledoc """
Public management API facade.
This module exposes the DX-friendly control plane for administrative tasks. It wraps
storage-backed management methods and a set of built-in helpers for ACL, identity, sync,
and listener management.
`execute/3` accepts the same method names used by NIP-86 style management endpoints, while
the dedicated functions (`stats/1`, `health/1`, `list_audit_logs/1`) are easier to call
from Elixir code.
"""
alias Parrhesia.API.ACL
alias Parrhesia.API.Identity
alias Parrhesia.API.Sync
alias Parrhesia.Storage
alias Parrhesia.Web.Endpoint
@supported_admin_methods ~w(health list_audit_logs stats)
@supported_acl_methods ~w(acl_grant acl_revoke acl_list)
@supported_identity_methods ~w(identity_ensure identity_get identity_import identity_rotate)
@supported_listener_methods ~w(listener_reload)
@supported_sync_methods ~w(
sync_get_server
sync_health
sync_list_servers
sync_put_server
sync_remove_server
sync_server_stats
sync_start_server
sync_stats
sync_stop_server
sync_sync_now
)
@doc """
Executes a management method by name.
Built-in methods include:
- `supportedmethods`
- `stats`
- `health`
- `list_audit_logs`
- `acl_grant`, `acl_revoke`, `acl_list`
- `identity_get`, `identity_ensure`, `identity_import`, `identity_rotate`
- `listener_reload`
- `sync_*`
Unknown methods are delegated to the configured `Parrhesia.Storage.Admin` implementation.
"""
@spec execute(String.t() | atom(), map(), keyword()) :: {:ok, map()} | {:error, term()}
def execute(method, params, opts \\ [])
def execute(method, params, opts) when is_map(params) do
method_name = normalize_method_name(method)
case execute_builtin(method_name, params, opts) do
{:continue, other_method} -> Storage.admin().execute(%{}, other_method, params)
result -> result
end
end
def execute(method, _params, _opts),
do: {:error, {:unsupported_method, normalize_method_name(method)}}
@doc """
Returns aggregate relay stats plus nested sync stats.
"""
@spec stats(keyword()) :: {:ok, map()} | {:error, term()}
def stats(opts \\ []) do
with {:ok, relay_stats} <- relay_stats(),
{:ok, sync_stats} <- Sync.sync_stats(opts) do
{:ok, Map.put(relay_stats, "sync", sync_stats)}
end
end
@doc """
Returns the overall management health payload.
The top-level `"status"` is currently derived from sync health, while relay-specific health
details remain delegated to storage-backed management methods.
"""
@spec health(keyword()) :: {:ok, map()} | {:error, term()}
def health(opts \\ []) do
with {:ok, sync_health} <- Sync.sync_health(opts) do
{:ok,
%{
"status" => overall_health_status(sync_health),
"sync" => sync_health
}}
end
end
@doc """
Lists persisted audit log entries from the configured admin storage backend.
Supported options are storage-adapter specific. The built-in admin execution path forwards
`:limit`, `:method`, and `:actor_pubkey`.
"""
@spec list_audit_logs(keyword()) :: {:ok, [map()]} | {:error, term()}
def list_audit_logs(opts \\ []) do
Storage.admin().list_audit_logs(%{}, opts)
end
defp acl_grant(params) do
with :ok <- ACL.grant(params) do
{:ok, %{"ok" => true}}
end
end
defp acl_revoke(params) do
with :ok <- ACL.revoke(params) do
{:ok, %{"ok" => true}}
end
end
defp acl_list(params) do
with {:ok, rules} <- ACL.list(acl_list_opts(params)) do
{:ok, %{"rules" => rules}}
end
end
defp acl_list_opts(params) do
[]
|> maybe_put_opt(:principal_type, fetch_value(params, :principal_type))
|> maybe_put_opt(:principal, fetch_value(params, :principal))
|> maybe_put_opt(:capability, fetch_value(params, :capability))
end
defp supported_methods do
storage_supported =
case Storage.admin().execute(%{}, :supportedmethods, %{}) do
{:ok, methods} when is_list(methods) -> methods
{:ok, %{"methods" => methods}} when is_list(methods) -> methods
_other -> []
end
(storage_supported ++
@supported_admin_methods ++
@supported_acl_methods ++
@supported_identity_methods ++ @supported_listener_methods ++ @supported_sync_methods)
|> Enum.uniq()
|> Enum.sort()
end
defp identity_get(_params), do: Identity.get()
defp identity_ensure(_params), do: Identity.ensure()
defp identity_rotate(_params), do: Identity.rotate()
defp identity_import(params) do
Identity.import(params)
end
defp admin_stats(_params, opts), do: stats(opts)
defp admin_health(_params, opts), do: health(opts)
defp admin_list_audit_logs(params, _opts) do
list_audit_logs(audit_log_opts(params))
end
defp listener_reload(params) do
case normalize_listener_id(fetch_value(params, :id)) do
:all ->
Endpoint.reload_all()
|> ok_result()
{:ok, listener_id} ->
listener_id
|> Endpoint.reload_listener()
|> ok_result()
:error ->
{:error, :not_found}
end
end
defp sync_put_server(params, opts), do: Sync.put_server(params, opts)
defp sync_remove_server(params, opts) do
with {:ok, server_id} <- fetch_required_string(params, :id),
:ok <- Sync.remove_server(server_id, opts) do
{:ok, %{"ok" => true}}
end
end
defp sync_get_server(params, opts) do
with {:ok, server_id} <- fetch_required_string(params, :id),
{:ok, server} <- Sync.get_server(server_id, opts) do
{:ok, server}
else
:error -> {:error, :not_found}
other -> other
end
end
defp sync_list_servers(_params, opts), do: Sync.list_servers(opts)
defp sync_start_server(params, opts) do
with {:ok, server_id} <- fetch_required_string(params, :id),
:ok <- Sync.start_server(server_id, opts) do
{:ok, %{"ok" => true}}
end
end
defp sync_stop_server(params, opts) do
with {:ok, server_id} <- fetch_required_string(params, :id),
:ok <- Sync.stop_server(server_id, opts) do
{:ok, %{"ok" => true}}
end
end
defp sync_sync_now(params, opts) do
with {:ok, server_id} <- fetch_required_string(params, :id),
:ok <- Sync.sync_now(server_id, opts) do
{:ok, %{"ok" => true}}
end
end
defp sync_server_stats(params, opts) do
with {:ok, server_id} <- fetch_required_string(params, :id),
{:ok, stats} <- Sync.server_stats(server_id, opts) do
{:ok, stats}
else
:error -> {:error, :not_found}
other -> other
end
end
defp sync_stats(_params, opts), do: Sync.sync_stats(opts)
defp sync_health(_params, opts), do: Sync.sync_health(opts)
defp execute_builtin("stats", params, opts), do: admin_stats(params, opts)
defp execute_builtin("health", params, opts), do: admin_health(params, opts)
defp execute_builtin("list_audit_logs", params, opts), do: admin_list_audit_logs(params, opts)
defp execute_builtin("acl_grant", params, _opts), do: acl_grant(params)
defp execute_builtin("acl_revoke", params, _opts), do: acl_revoke(params)
defp execute_builtin("acl_list", params, _opts), do: acl_list(params)
defp execute_builtin("identity_get", params, _opts), do: identity_get(params)
defp execute_builtin("identity_ensure", params, _opts), do: identity_ensure(params)
defp execute_builtin("identity_import", params, _opts), do: identity_import(params)
defp execute_builtin("identity_rotate", params, _opts), do: identity_rotate(params)
defp execute_builtin("listener_reload", params, _opts), do: listener_reload(params)
defp execute_builtin("sync_put_server", params, opts), do: sync_put_server(params, opts)
defp execute_builtin("sync_remove_server", params, opts), do: sync_remove_server(params, opts)
defp execute_builtin("sync_get_server", params, opts), do: sync_get_server(params, opts)
defp execute_builtin("sync_list_servers", params, opts), do: sync_list_servers(params, opts)
defp execute_builtin("sync_start_server", params, opts), do: sync_start_server(params, opts)
defp execute_builtin("sync_stop_server", params, opts), do: sync_stop_server(params, opts)
defp execute_builtin("sync_sync_now", params, opts), do: sync_sync_now(params, opts)
defp execute_builtin("sync_server_stats", params, opts), do: sync_server_stats(params, opts)
defp execute_builtin("sync_stats", params, opts), do: sync_stats(params, opts)
defp execute_builtin("sync_health", params, opts), do: sync_health(params, opts)
defp execute_builtin("supportedmethods", _params, _opts),
do: {:ok, %{"methods" => supported_methods()}}
defp execute_builtin(other_method, _params, _opts), do: {:continue, other_method}
defp relay_stats do
case Storage.admin().execute(%{}, :stats, %{}) do
{:ok, stats} when is_map(stats) -> {:ok, stats}
{:error, {:unsupported_method, _method}} -> {:ok, %{}}
other -> other
end
end
defp overall_health_status(%{"status" => "degraded"}), do: "degraded"
defp overall_health_status(_sync_health), do: "ok"
defp audit_log_opts(params) do
[]
|> maybe_put_opt(:limit, fetch_value(params, :limit))
|> maybe_put_opt(:method, fetch_value(params, :method))
|> maybe_put_opt(:actor_pubkey, fetch_value(params, :actor_pubkey))
end
defp maybe_put_opt(opts, _key, nil), do: opts
defp maybe_put_opt(opts, key, value), do: Keyword.put(opts, key, value)
defp ok_result(:ok), do: {:ok, %{"ok" => true}}
defp ok_result({:error, _reason} = error), do: error
defp ok_result(other), do: other
defp normalize_listener_id(nil), do: :all
defp normalize_listener_id(listener_id) when is_atom(listener_id) do
{:ok, listener_id}
end
defp normalize_listener_id(listener_id) when is_binary(listener_id) do
case Supervisor.which_children(Endpoint) do
children when is_list(children) ->
Enum.find_value(children, :error, &match_listener_child(&1, listener_id))
_other ->
:error
end
end
defp normalize_listener_id(_listener_id), do: :error
defp match_listener_child({{:listener, id}, _pid, _type, _modules}, listener_id) do
normalized_id = Atom.to_string(id)
if normalized_id == listener_id, do: {:ok, id}, else: false
end
defp match_listener_child(_child, _listener_id), do: false
defp fetch_required_string(map, key) do
case fetch_value(map, key) do
value when is_binary(value) and value != "" -> {:ok, value}
_other -> {:error, {:missing_param, Atom.to_string(key)}}
end
end
defp fetch_value(map, key), do: Map.get(map, key) || Map.get(map, Atom.to_string(key))
defp normalize_method_name(method) when is_atom(method), do: Atom.to_string(method)
defp normalize_method_name(method) when is_binary(method), do: method
defp normalize_method_name(method), do: inspect(method)
end

85
lib/parrhesia/api/auth.ex Normal file
View File

@@ -0,0 +1,85 @@
defmodule Parrhesia.API.Auth do
@moduledoc """
Public helpers for event validation and NIP-98 HTTP authentication.
This module is intended for callers that need a programmatic API surface:
- `validate_event/1` returns validator reason atoms.
- `compute_event_id/1` computes the canonical Nostr event id.
- `validate_nip98/3` and `validate_nip98/4` turn an `Authorization` header into a
shared auth context that can be reused by the rest of the API surface.
For transport-facing validation messages, see `Parrhesia.Protocol.validate_event/1`.
"""
alias Parrhesia.API.Auth.Context
alias Parrhesia.API.RequestContext
alias Parrhesia.Auth.Nip98
alias Parrhesia.Protocol.EventValidator
@doc """
Validates a Nostr event and returns validator-friendly error atoms.
This is the low-level validation entrypoint used by the API surface. Unlike
`Parrhesia.Protocol.validate_event/1`, it preserves the raw validator reason so callers
can branch on it directly.
"""
@spec validate_event(map()) :: :ok | {:error, term()}
def validate_event(event), do: EventValidator.validate(event)
@doc """
Computes the canonical Nostr event id for an event payload.
The event does not need to be persisted first. This is useful when building or signing
events locally.
"""
@spec compute_event_id(map()) :: String.t()
def compute_event_id(event), do: EventValidator.compute_id(event)
@doc """
Validates a NIP-98 `Authorization` header using default options.
"""
@spec validate_nip98(String.t() | nil, String.t(), String.t()) ::
{:ok, Context.t()} | {:error, term()}
def validate_nip98(authorization, method, url) do
validate_nip98(authorization, method, url, [])
end
@doc """
Validates a NIP-98 `Authorization` header and returns a shared auth context.
The returned `Parrhesia.API.Auth.Context` includes:
- the decoded auth event
- the authenticated pubkey
- a `Parrhesia.API.RequestContext` with `caller: :http`
Supported options are forwarded to `Parrhesia.Auth.Nip98.validate_authorization_header/4`,
including `:max_age_seconds` and `:replay_cache`.
"""
@spec validate_nip98(String.t() | nil, String.t(), String.t(), keyword()) ::
{:ok, Context.t()} | {:error, term()}
def validate_nip98(authorization, method, url, opts)
when is_binary(method) and is_binary(url) and is_list(opts) do
with {:ok, auth_event} <-
Nip98.validate_authorization_header(authorization, method, url, opts),
pubkey when is_binary(pubkey) <- Map.get(auth_event, "pubkey") do
{:ok,
%Context{
auth_event: auth_event,
pubkey: pubkey,
request_context: %RequestContext{
authenticated_pubkeys: MapSet.new([pubkey]),
caller: :http
},
metadata: %{
method: method,
url: url
}
}}
else
nil -> {:error, :invalid_event}
{:error, reason} -> {:error, reason}
end
end
end

View File

@@ -0,0 +1,23 @@
defmodule Parrhesia.API.Auth.Context do
@moduledoc """
Authenticated request details returned by shared auth helpers.
This is the higher-level result returned by `Parrhesia.API.Auth.validate_nip98/3` and
`validate_nip98/4`. The nested `request_context` is ready to be passed into the rest of the
public API surface.
"""
alias Parrhesia.API.RequestContext
defstruct auth_event: nil,
pubkey: nil,
request_context: %RequestContext{},
metadata: %{}
@type t :: %__MODULE__{
auth_event: map() | nil,
pubkey: String.t() | nil,
request_context: RequestContext.t(),
metadata: map()
}
end

476
lib/parrhesia/api/events.ex Normal file
View File

@@ -0,0 +1,476 @@
defmodule Parrhesia.API.Events do
@moduledoc """
Canonical event publish, query, and count API.
This is the main in-process API for working with Nostr events. It applies the same core
validation and policy checks used by the relay edge, but without going through a socket or
HTTP transport.
All public functions expect `opts[:context]` to contain a `Parrhesia.API.RequestContext`.
That context drives authorization, caller attribution, and downstream policy behavior.
`publish/2` intentionally returns `{:ok, %PublishResult{accepted: false}}` for policy and
storage rejections so callers can mirror relay `OK` semantics without treating a rejected
event as a process error.
"""
alias Parrhesia.API.Events.PublishResult
alias Parrhesia.API.RequestContext
alias Parrhesia.Fanout.Dispatcher
alias Parrhesia.Fanout.MultiNode
alias Parrhesia.NIP43
alias Parrhesia.Policy.EventPolicy
alias Parrhesia.Protocol
alias Parrhesia.Protocol.Filter
alias Parrhesia.Storage
alias Parrhesia.Telemetry
@default_max_event_bytes 262_144
@marmot_kinds MapSet.new([
443,
444,
445,
1059,
10_050,
10_051,
446,
447,
448,
449
])
@doc """
Validates, authorizes, persists, and fans out an event.
Required options:
- `:context` - a `Parrhesia.API.RequestContext`
Supported options:
- `:max_event_bytes` - overrides the configured max encoded event size
- `:path`, `:private_key`, `:configured_private_key` - forwarded to the NIP-43 helper flow
Return semantics:
- `{:ok, %PublishResult{accepted: true}}` for accepted events
- `{:ok, %PublishResult{accepted: false}}` for rejected or duplicate events
- `{:error, :invalid_context}` only when the call itself is malformed
"""
@spec publish(map(), keyword()) :: {:ok, PublishResult.t()} | {:error, term()}
def publish(event, opts \\ [])
def publish(event, opts) when is_map(event) and is_list(opts) do
started_at = System.monotonic_time()
event_id = Map.get(event, "id", "")
telemetry_metadata = telemetry_metadata_for_event(event)
with {:ok, context} <- fetch_context(opts),
:ok <- validate_event_payload_size(event, max_event_bytes(opts)),
:ok <- Protocol.validate_event(event),
:ok <- EventPolicy.authorize_write(event, context.authenticated_pubkeys, context),
{:ok, publish_state} <- NIP43.prepare_publish(event, nip43_opts(opts, context)),
{:ok, _stored, message} <- persist_event(event) do
Telemetry.emit(
[:parrhesia, :ingest, :stop],
%{duration: System.monotonic_time() - started_at},
telemetry_metadata
)
emit_ingest_result(telemetry_metadata, :accepted, :accepted)
message =
case NIP43.finalize_publish(event, publish_state, nip43_opts(opts, context)) do
{:ok, override} when is_binary(override) -> override
:ok -> message
end
Dispatcher.dispatch(event)
maybe_publish_multi_node(event, context)
{:ok,
%PublishResult{
event_id: event_id,
accepted: true,
message: message,
reason: nil
}}
else
{:error, :invalid_context} = error ->
emit_ingest_result(telemetry_metadata, :rejected, :invalid_context)
error
{:error, reason} ->
emit_ingest_result(telemetry_metadata, :rejected, reason)
{:ok,
%PublishResult{
event_id: event_id,
accepted: false,
message: error_message_for_publish_failure(reason),
reason: reason
}}
end
end
def publish(_event, _opts), do: {:error, :invalid_event}
@doc """
Queries stored events plus any dynamic NIP-43 events visible to the caller.
Required options:
- `:context` - a `Parrhesia.API.RequestContext`
Supported options:
- `:max_filter_limit` - overrides the configured per-filter limit
- `:validate_filters?` - skips filter validation when `false`
- `:authorize_read?` - skips read policy checks when `false`
The skip flags are primarily for internal composition, such as `Parrhesia.API.Stream`.
External callers should normally leave them enabled.
"""
@spec query([map()], keyword()) :: {:ok, [map()]} | {:error, term()}
def query(filters, opts \\ [])
def query(filters, opts) when is_list(filters) and is_list(opts) do
started_at = System.monotonic_time()
telemetry_metadata = telemetry_metadata_for_filters(filters, :query)
with {:ok, context} <- fetch_context(opts),
:ok <- maybe_validate_filters(filters, opts),
:ok <- maybe_authorize_read(filters, context, opts),
{:ok, events} <- Storage.events().query(%{}, filters, storage_query_opts(context, opts)) do
events = NIP43.dynamic_events(filters, nip43_opts(opts, context)) ++ events
Telemetry.emit(
[:parrhesia, :query, :stop],
%{duration: System.monotonic_time() - started_at, result_count: length(events)},
telemetry_metadata
)
emit_query_result(telemetry_metadata, :ok)
{:ok, events}
else
{:error, reason} = error ->
emit_query_result(telemetry_metadata, :error, reason)
error
end
end
def query(_filters, _opts), do: {:error, :invalid_filters}
@doc """
Counts events matching the given filters.
Required options:
- `:context` - a `Parrhesia.API.RequestContext`
Supported options:
- `:validate_filters?` - skips filter validation when `false`
- `:authorize_read?` - skips read policy checks when `false`
- `:options` - when set to a map, returns a NIP-45-style payload instead of a bare integer
When `opts[:options]` is a map, the result shape is `%{"count" => count, "approximate" => false}`.
If `opts[:options]["hll"]` is `true` and the feature is enabled, an `"hll"` field is included.
"""
@spec count([map()], keyword()) :: {:ok, non_neg_integer() | map()} | {:error, term()}
def count(filters, opts \\ [])
def count(filters, opts) when is_list(filters) and is_list(opts) do
started_at = System.monotonic_time()
telemetry_metadata = telemetry_metadata_for_filters(filters, :count)
with {:ok, context} <- fetch_context(opts),
:ok <- maybe_validate_filters(filters, opts),
:ok <- maybe_authorize_read(filters, context, opts),
{:ok, count} <-
Storage.events().count(%{}, filters, requester_pubkeys: requester_pubkeys(context)),
count <- count + NIP43.dynamic_count(filters, nip43_opts(opts, context)),
{:ok, result} <- maybe_build_count_result(filters, count, Keyword.get(opts, :options)) do
Telemetry.emit(
[:parrhesia, :query, :stop],
%{duration: System.monotonic_time() - started_at, result_count: count},
telemetry_metadata
)
emit_query_result(telemetry_metadata, :ok)
{:ok, result}
else
{:error, reason} = error ->
emit_query_result(telemetry_metadata, :error, reason)
error
end
end
def count(_filters, _opts), do: {:error, :invalid_filters}
defp maybe_validate_filters(filters, opts) do
if Keyword.get(opts, :validate_filters?, true) do
Filter.validate_filters(filters)
else
:ok
end
end
defp maybe_authorize_read(filters, context, opts) do
if Keyword.get(opts, :authorize_read?, true) do
EventPolicy.authorize_read(filters, context.authenticated_pubkeys, context)
else
:ok
end
end
defp storage_query_opts(context, opts) do
[
max_filter_limit:
Keyword.get(opts, :max_filter_limit, Parrhesia.Config.get([:limits, :max_filter_limit])),
requester_pubkeys: requester_pubkeys(context)
]
end
defp requester_pubkeys(%RequestContext{} = context),
do: MapSet.to_list(context.authenticated_pubkeys)
defp maybe_build_count_result(_filters, count, nil) when is_integer(count), do: {:ok, count}
defp maybe_build_count_result(filters, count, options)
when is_integer(count) and is_map(options) do
build_count_payload(filters, count, options)
end
defp maybe_build_count_result(_filters, count, _options) when is_integer(count),
do: {:ok, count}
defp maybe_build_count_result(_filters, count, _options), do: {:ok, count}
defp build_count_payload(filters, count, options) do
include_hll? =
Map.get(options, "hll", false) and Parrhesia.Config.get([:features, :nip_45_count], true)
payload = %{"count" => count, "approximate" => false}
payload =
if include_hll? do
Map.put(payload, "hll", generate_hll_payload(filters, count))
else
payload
end
{:ok, payload}
end
defp generate_hll_payload(filters, count) do
filters
|> JSON.encode!()
|> then(&"#{&1}:#{count}")
|> then(&:crypto.hash(:sha256, &1))
|> Base.encode64()
end
defp persist_event(event) do
kind = Map.get(event, "kind")
cond do
kind in [5, 62] -> persist_control_event(kind, event)
ephemeral_kind?(kind) -> persist_ephemeral_event()
true -> persist_regular_event(event)
end
end
defp persist_control_event(5, event) do
with {:ok, deleted_count} <- Storage.events().delete_by_request(%{}, event) do
{:ok, deleted_count, "ok: deletion request processed"}
end
end
defp persist_control_event(62, event) do
with {:ok, deleted_count} <- Storage.events().vanish(%{}, event) do
{:ok, deleted_count, "ok: vanish request processed"}
end
end
defp persist_ephemeral_event do
if accept_ephemeral_events?() do
{:ok, :ephemeral, "ok: ephemeral event accepted"}
else
{:error, :ephemeral_events_disabled}
end
end
defp persist_regular_event(event) do
case Storage.events().put_event(%{}, event) do
{:ok, persisted_event} -> {:ok, persisted_event, "ok: event stored"}
{:error, :duplicate_event} -> {:error, :duplicate_event}
{:error, reason} -> {:error, reason}
end
end
defp maybe_publish_multi_node(event, %RequestContext{} = context) do
relay_guard? = Parrhesia.Config.get([:sync, :relay_guard], false)
if relay_guard? and context.caller == :sync do
:ok
else
MultiNode.publish(event)
:ok
end
catch
:exit, _reason -> :ok
end
defp telemetry_metadata_for_event(event) do
%{traffic_class: traffic_class_for_event(event)}
end
defp telemetry_metadata_for_filters(filters, operation) do
%{traffic_class: traffic_class_for_filters(filters), operation: operation}
end
defp traffic_class_for_filters(filters) do
if Enum.any?(filters, &marmot_filter?/1) do
:marmot
else
:generic
end
end
defp marmot_filter?(filter) when is_map(filter) do
has_marmot_kind? =
case Map.get(filter, "kinds") do
kinds when is_list(kinds) -> Enum.any?(kinds, &MapSet.member?(@marmot_kinds, &1))
_other -> false
end
has_marmot_kind? or Map.has_key?(filter, "#h") or Map.has_key?(filter, "#i")
end
defp marmot_filter?(_filter), do: false
defp traffic_class_for_event(event) when is_map(event) do
if MapSet.member?(@marmot_kinds, Map.get(event, "kind")) do
:marmot
else
:generic
end
end
defp traffic_class_for_event(_event), do: :generic
defp emit_ingest_result(metadata, outcome, reason) do
Telemetry.emit(
[:parrhesia, :ingest, :result],
%{count: 1},
Map.merge(metadata, %{outcome: outcome, reason: normalize_reason(reason)})
)
end
defp emit_query_result(metadata, outcome, reason \\ nil) do
Telemetry.emit(
[:parrhesia, :query, :result],
%{count: 1},
Map.merge(
metadata,
%{outcome: outcome, reason: normalize_reason(reason || outcome)}
)
)
end
defp normalize_reason(reason) when is_atom(reason), do: reason
defp normalize_reason(reason) when is_binary(reason), do: reason
defp normalize_reason(nil), do: :none
defp normalize_reason(_reason), do: :unknown
defp fetch_context(opts) do
case Keyword.get(opts, :context) do
%RequestContext{} = context -> {:ok, context}
_other -> {:error, :invalid_context}
end
end
defp nip43_opts(opts, %RequestContext{} = context) do
[context: context, relay_url: Application.get_env(:parrhesia, :relay_url)]
|> Kernel.++(Keyword.take(opts, [:path, :private_key, :configured_private_key]))
end
defp error_message_for_publish_failure(:duplicate_event),
do: "duplicate: event already stored"
defp error_message_for_publish_failure(:event_too_large),
do: "invalid: event exceeds max event size"
defp error_message_for_publish_failure(:ephemeral_events_disabled),
do: "blocked: ephemeral events are disabled"
defp error_message_for_publish_failure(reason)
when reason in [
:auth_required,
:pubkey_not_allowed,
:restricted_giftwrap,
:sync_write_not_allowed,
:protected_event_requires_auth,
:protected_event_pubkey_mismatch,
:pow_below_minimum,
:pubkey_banned,
:event_banned,
:media_metadata_tags_exceeded,
:media_metadata_tag_value_too_large,
:media_metadata_url_too_long,
:media_metadata_invalid_url,
:media_metadata_invalid_hash,
:media_metadata_invalid_mime,
:media_metadata_mime_not_allowed,
:media_metadata_unsupported_version,
:push_notification_relay_tags_exceeded,
:push_notification_payload_too_large,
:push_notification_replay_window_exceeded,
:push_notification_missing_expiration,
:push_notification_expiration_too_far,
:push_notification_server_recipients_exceeded
],
do: EventPolicy.error_message(reason)
defp error_message_for_publish_failure(reason) when is_binary(reason), do: reason
defp error_message_for_publish_failure(reason), do: "error: #{inspect(reason)}"
defp validate_event_payload_size(event, max_event_bytes)
when is_map(event) and is_integer(max_event_bytes) and max_event_bytes > 0 do
if byte_size(JSON.encode!(event)) <= max_event_bytes do
:ok
else
{:error, :event_too_large}
end
end
defp validate_event_payload_size(_event, _max_event_bytes), do: :ok
defp max_event_bytes(opts) do
opts
|> Keyword.get(:max_event_bytes, configured_max_event_bytes())
|> normalize_max_event_bytes()
end
defp normalize_max_event_bytes(value) when is_integer(value) and value > 0, do: value
defp normalize_max_event_bytes(_value), do: configured_max_event_bytes()
defp configured_max_event_bytes do
:parrhesia
|> Application.get_env(:limits, [])
|> Keyword.get(:max_event_bytes, @default_max_event_bytes)
end
defp ephemeral_kind?(kind) when is_integer(kind), do: kind >= 20_000 and kind < 30_000
defp ephemeral_kind?(_kind), do: false
defp accept_ephemeral_events? do
:parrhesia
|> Application.get_env(:policies, [])
|> Keyword.get(:accept_ephemeral_events, true)
end
end

View File

@@ -0,0 +1,22 @@
defmodule Parrhesia.API.Events.PublishResult do
@moduledoc """
Result shape for event publish attempts.
This mirrors relay `OK` semantics:
- `accepted: true` means the event was accepted
- `accepted: false` means the event was rejected or identified as a duplicate
The surrounding call still returns `{:ok, result}` in both cases so callers can surface the
rejection message without treating it as a transport or process failure.
"""
defstruct [:event_id, :accepted, :message, :reason]
@type t :: %__MODULE__{
event_id: String.t(),
accepted: boolean(),
message: String.t(),
reason: term()
}
end

View File

@@ -0,0 +1,298 @@
defmodule Parrhesia.API.Identity do
@moduledoc """
Server-auth identity management.
Parrhesia uses a single server identity for flows that need the relay to sign events or
prove control of a pubkey.
Identity resolution follows this order:
1. `opts[:private_key]` or `opts[:configured_private_key]`
2. `Application.get_env(:parrhesia, :identity)`
3. the persisted file on disk
Supported options across this module:
- `:path` - overrides the identity file path
- `:private_key` / `:configured_private_key` - uses an explicit hex secret key
A configured private key is treated as read-only input and therefore cannot be rotated.
"""
alias Parrhesia.API.Auth
@typedoc """
Public identity metadata returned to callers.
"""
@type identity_metadata :: %{
pubkey: String.t(),
source: :configured | :persisted | :generated | :imported
}
@doc """
Returns the current server identity metadata.
This does not generate a new identity. If no configured or persisted identity exists, it
returns `{:error, :identity_not_found}`.
"""
@spec get(keyword()) :: {:ok, identity_metadata()} | {:error, term()}
def get(opts \\ []) do
with {:ok, identity} <- fetch_existing_identity(opts) do
{:ok, public_identity(identity)}
end
end
@doc """
Returns the current identity, generating and persisting one when necessary.
"""
@spec ensure(keyword()) :: {:ok, identity_metadata()} | {:error, term()}
def ensure(opts \\ []) do
with {:ok, identity} <- ensure_identity(opts) do
{:ok, public_identity(identity)}
end
end
@doc """
Imports an explicit secret key and persists it as the server identity.
The input map must contain `:secret_key` or `"secret_key"` as a 64-character lowercase or
uppercase hex string.
"""
@spec import(map(), keyword()) :: {:ok, identity_metadata()} | {:error, term()}
def import(identity, opts \\ [])
def import(identity, opts) when is_map(identity) do
with {:ok, secret_key} <- fetch_secret_key(identity),
{:ok, normalized_identity} <- build_identity(secret_key, :imported),
:ok <- persist_identity(normalized_identity, opts) do
{:ok, public_identity(normalized_identity)}
end
end
def import(_identity, _opts), do: {:error, :invalid_identity}
@doc """
Generates and persists a fresh server identity.
Rotation is rejected with `{:error, :configured_identity_cannot_rotate}` when the active
identity comes from configuration rather than the persisted file.
"""
@spec rotate(keyword()) :: {:ok, identity_metadata()} | {:error, term()}
def rotate(opts \\ []) do
with :ok <- ensure_rotation_allowed(opts),
{:ok, identity} <- generate_identity(:generated),
:ok <- persist_identity(identity, opts) do
{:ok, public_identity(identity)}
end
end
@doc """
Signs an event with the current server identity.
The incoming event must already include the fields required to compute a Nostr id:
- `"created_at"`
- `"kind"`
- `"tags"`
- `"content"`
On success the returned event includes `"pubkey"`, `"id"`, and `"sig"`.
"""
@spec sign_event(map(), keyword()) :: {:ok, map()} | {:error, term()}
def sign_event(event, opts \\ [])
def sign_event(event, opts) when is_map(event) and is_list(opts) do
with :ok <- validate_signable_event(event),
{:ok, identity} <- ensure_identity(opts),
signed_event <- attach_signature(event, identity) do
{:ok, signed_event}
end
end
def sign_event(_event, _opts), do: {:error, :invalid_event}
@doc """
Returns the default filesystem path for the persisted server identity.
"""
def default_path do
Path.join([default_data_dir(), "server_identity.json"])
end
defp ensure_identity(opts) do
case fetch_existing_identity(opts) do
{:ok, identity} ->
{:ok, identity}
{:error, :identity_not_found} ->
with {:ok, identity} <- generate_identity(:generated),
:ok <- persist_identity(identity, opts) do
{:ok, identity}
end
{:error, reason} ->
{:error, reason}
end
end
defp fetch_existing_identity(opts) do
if configured_private_key = configured_private_key(opts) do
build_identity(configured_private_key, :configured)
else
read_persisted_identity(opts)
end
end
defp ensure_rotation_allowed(opts) do
if configured_private_key(opts) do
{:error, :configured_identity_cannot_rotate}
else
:ok
end
end
defp validate_signable_event(event) do
signable =
is_integer(Map.get(event, "created_at")) and
is_integer(Map.get(event, "kind")) and
is_list(Map.get(event, "tags")) and
is_binary(Map.get(event, "content", ""))
if signable, do: :ok, else: {:error, :invalid_event}
end
defp attach_signature(event, identity) do
unsigned_event =
event
|> Map.put("pubkey", identity.pubkey)
|> Map.put("sig", String.duplicate("0", 128))
event_id =
unsigned_event
|> Auth.compute_event_id()
signature =
event_id
|> Base.decode16!(case: :lower)
|> Secp256k1.schnorr_sign(identity.secret_key)
|> Base.encode16(case: :lower)
unsigned_event
|> Map.put("id", event_id)
|> Map.put("sig", signature)
end
defp read_persisted_identity(opts) do
path = identity_path(opts)
case File.read(path) do
{:ok, payload} ->
with {:ok, decoded} <- JSON.decode(payload),
{:ok, secret_key} <- fetch_secret_key(decoded),
{:ok, identity} <- build_identity(secret_key, :persisted) do
{:ok, identity}
else
{:error, reason} -> {:error, reason}
end
{:error, :enoent} ->
{:error, :identity_not_found}
{:error, reason} ->
{:error, reason}
end
end
defp persist_identity(identity, opts) do
path = identity_path(opts)
temp_path = path <> ".tmp"
with :ok <- File.mkdir_p(Path.dirname(path)),
:ok <- File.write(temp_path, JSON.encode!(persisted_identity(identity))),
:ok <- File.rename(temp_path, path) do
:ok
else
{:error, reason} ->
_ = File.rm(temp_path)
{:error, reason}
end
end
defp persisted_identity(identity) do
%{
"secret_key" => Base.encode16(identity.secret_key, case: :lower),
"pubkey" => identity.pubkey
}
end
defp generate_identity(source) do
{secret_key, pubkey} = Secp256k1.keypair(:xonly)
{:ok,
%{
secret_key: secret_key,
pubkey: Base.encode16(pubkey, case: :lower),
source: source
}}
rescue
_error -> {:error, :identity_generation_failed}
end
defp build_identity(secret_key_hex, source) when is_binary(secret_key_hex) do
with {:ok, secret_key} <- decode_secret_key(secret_key_hex),
pubkey <- Secp256k1.pubkey(secret_key, :xonly) do
{:ok,
%{
secret_key: secret_key,
pubkey: Base.encode16(pubkey, case: :lower),
source: source
}}
end
rescue
_error -> {:error, :invalid_secret_key}
end
defp decode_secret_key(secret_key_hex) when is_binary(secret_key_hex) do
normalized = String.downcase(secret_key_hex)
case Base.decode16(normalized, case: :lower) do
{:ok, <<_::256>> = secret_key} -> {:ok, secret_key}
_other -> {:error, :invalid_secret_key}
end
end
defp fetch_secret_key(identity) when is_map(identity) do
case Map.get(identity, :secret_key) || Map.get(identity, "secret_key") do
secret_key when is_binary(secret_key) -> {:ok, secret_key}
_other -> {:error, :invalid_identity}
end
end
defp configured_private_key(opts) do
opts[:private_key] || opts[:configured_private_key] || config_value(:private_key)
end
defp identity_path(opts) do
opts[:path] || config_value(:path) || default_path()
end
defp public_identity(identity) do
%{
pubkey: identity.pubkey,
source: identity.source
}
end
defp config_value(key) do
:parrhesia
|> Application.get_env(:identity, [])
|> Keyword.get(key)
end
defp default_data_dir do
base_dir =
System.get_env("XDG_DATA_HOME") ||
Path.join(System.user_home!(), ".local/share")
Path.join(base_dir, "parrhesia")
end
end

View File

@@ -0,0 +1,25 @@
defmodule Parrhesia.API.Identity.Manager do
@moduledoc false
use GenServer
alias Parrhesia.API.Identity
require Logger
def start_link(opts \\ []) do
GenServer.start_link(__MODULE__, opts, name: __MODULE__)
end
@impl true
def init(_opts) do
case Identity.ensure() do
{:ok, _identity} ->
{:ok, %{}}
{:error, reason} ->
Logger.error("failed to ensure server identity: #{inspect(reason)}")
{:ok, %{}}
end
end
end

View File

@@ -0,0 +1,44 @@
defmodule Parrhesia.API.RequestContext do
@moduledoc """
Shared request context used across API and policy surfaces.
This struct carries caller identity and transport metadata through authorization and storage
boundaries.
The most important field for external callers is `authenticated_pubkeys`. For example:
- `Parrhesia.API.Events` uses it for read and write policy checks
- `Parrhesia.API.Stream` uses it for subscription authorization
- `Parrhesia.API.ACL` uses it when evaluating protected sync traffic
"""
defstruct authenticated_pubkeys: MapSet.new(),
actor: nil,
caller: :local,
remote_ip: nil,
subscription_id: nil,
peer_id: nil,
transport_identity: nil,
metadata: %{}
@type t :: %__MODULE__{
authenticated_pubkeys: MapSet.t(String.t()),
actor: term(),
caller: atom(),
remote_ip: String.t() | nil,
subscription_id: String.t() | nil,
peer_id: String.t() | nil,
transport_identity: map() | nil,
metadata: map()
}
@doc """
Merges arbitrary metadata into the context.
Existing keys are overwritten by the incoming map.
"""
@spec put_metadata(t(), map()) :: t()
def put_metadata(%__MODULE__{} = context, metadata) when is_map(metadata) do
%__MODULE__{context | metadata: Map.merge(context.metadata, metadata)}
end
end

121
lib/parrhesia/api/stream.ex Normal file
View File

@@ -0,0 +1,121 @@
defmodule Parrhesia.API.Stream do
@moduledoc """
In-process subscription API with relay-equivalent catch-up and live fanout semantics.
Subscriptions are process-local bridges. After subscribing, the caller receives messages in
the same order a relay client would expect:
- `{:parrhesia, :event, ref, subscription_id, event}` for catch-up and live events
- `{:parrhesia, :eose, ref, subscription_id}` after the initial replay finishes
This API requires a `Parrhesia.API.RequestContext` so read policies are applied exactly as
they would be for a transport-backed subscriber.
"""
alias Parrhesia.API.Events
alias Parrhesia.API.RequestContext
alias Parrhesia.API.Stream.Subscription
alias Parrhesia.Policy.EventPolicy
alias Parrhesia.Protocol.Filter
@doc """
Starts an in-process subscription for a subscriber pid.
`opts[:context]` must be a `Parrhesia.API.RequestContext`.
On success the returned reference is both:
- the subscription handle used by `unsubscribe/1`
- the value embedded in emitted subscriber messages
"""
@spec subscribe(pid(), String.t(), [map()], keyword()) :: {:ok, reference()} | {:error, term()}
def subscribe(subscriber, subscription_id, filters, opts \\ [])
def subscribe(subscriber, subscription_id, filters, opts)
when is_pid(subscriber) and is_binary(subscription_id) and is_list(filters) and
is_list(opts) do
with {:ok, context} <- fetch_context(opts),
:ok <- Filter.validate_filters(filters),
:ok <-
EventPolicy.authorize_read(
filters,
context.authenticated_pubkeys,
stream_context(context, subscription_id)
) do
ref = make_ref()
case DynamicSupervisor.start_child(
Parrhesia.API.Stream.Supervisor,
{Subscription,
ref: ref, subscriber: subscriber, subscription_id: subscription_id, filters: filters}
) do
{:ok, pid} ->
finalize_subscription(pid, ref, filters, stream_context(context, subscription_id))
{:error, reason} ->
{:error, reason}
end
end
end
def subscribe(_subscriber, _subscription_id, _filters, _opts),
do: {:error, :invalid_subscription}
@doc """
Stops a subscription previously created with `subscribe/4`.
This function is idempotent. Unknown or already-stopped references return `:ok`.
"""
@spec unsubscribe(reference()) :: :ok
def unsubscribe(ref) when is_reference(ref) do
case Registry.lookup(Parrhesia.API.Stream.Registry, ref) do
[{pid, _value}] ->
try do
:ok = GenServer.stop(pid, :normal)
catch
:exit, _reason -> :ok
end
:ok
[] ->
:ok
end
end
def unsubscribe(_ref), do: :ok
defp fetch_context(opts) do
case Keyword.get(opts, :context) do
%RequestContext{} = context -> {:ok, context}
_other -> {:error, :invalid_context}
end
end
defp finalize_subscription(pid, ref, filters, context) do
with {:ok, initial_events} <-
Events.query(filters,
context: context,
validate_filters?: false,
authorize_read?: false
),
:ok <- Subscription.deliver_initial(pid, initial_events) do
{:ok, ref}
else
{:error, reason} ->
_ = safe_stop_subscription(pid)
{:error, reason}
end
end
defp safe_stop_subscription(pid) do
GenServer.stop(pid, :shutdown)
:ok
catch
:exit, _reason -> :ok
end
defp stream_context(%RequestContext{} = context, subscription_id) do
%RequestContext{context | subscription_id: subscription_id}
end
end

View File

@@ -0,0 +1,192 @@
defmodule Parrhesia.API.Stream.Subscription do
@moduledoc false
use GenServer, restart: :temporary
alias Parrhesia.Protocol.Filter
alias Parrhesia.Subscriptions.Index
alias Parrhesia.Telemetry
defstruct [
:ref,
:subscriber,
:subscriber_monitor_ref,
:subscription_id,
:filters,
ready?: false,
buffered_events: []
]
@type t :: %__MODULE__{
ref: reference(),
subscriber: pid(),
subscriber_monitor_ref: reference(),
subscription_id: String.t(),
filters: [map()],
ready?: boolean(),
buffered_events: [map()]
}
@spec start_link(keyword()) :: GenServer.on_start()
def start_link(opts) when is_list(opts) do
ref = Keyword.fetch!(opts, :ref)
GenServer.start_link(__MODULE__, opts, name: via_tuple(ref))
end
@spec deliver_initial(GenServer.server(), [map()]) :: :ok | {:error, term()}
def deliver_initial(server, initial_events) when is_list(initial_events) do
GenServer.call(server, {:deliver_initial, initial_events})
end
@impl true
def init(opts) do
with {:ok, subscriber} <- fetch_subscriber(opts),
{:ok, subscription_id} <- fetch_subscription_id(opts),
{:ok, filters} <- fetch_filters(opts),
:ok <-
maybe_upsert_index_subscription(subscription_index(opts), subscription_id, filters) do
monitor_ref = Process.monitor(subscriber)
state = %__MODULE__{
ref: Keyword.fetch!(opts, :ref),
subscriber: subscriber,
subscriber_monitor_ref: monitor_ref,
subscription_id: subscription_id,
filters: filters,
ready?: false,
buffered_events: []
}
Telemetry.emit_process_mailbox_depth(:subscription)
{:ok, state}
else
{:error, reason} -> {:stop, reason}
end
end
@impl true
def handle_call({:deliver_initial, initial_events}, _from, %__MODULE__{} = state) do
send_initial_events(state, initial_events)
Enum.each(Enum.reverse(state.buffered_events), fn event ->
send(state.subscriber, {:parrhesia, :event, state.ref, state.subscription_id, event})
end)
{:reply, :ok, %__MODULE__{state | ready?: true, buffered_events: []}}
|> emit_mailbox_depth()
end
@impl true
def handle_info({:fanout_event, subscription_id, event}, %__MODULE__{} = state)
when is_binary(subscription_id) and is_map(event) do
state
|> handle_fanout_event(subscription_id, event)
|> emit_mailbox_depth()
end
def handle_info({:DOWN, monitor_ref, :process, subscriber, _reason}, %__MODULE__{} = state)
when monitor_ref == state.subscriber_monitor_ref and subscriber == state.subscriber do
{:stop, :normal, state}
|> emit_mailbox_depth()
end
def handle_info(_message, %__MODULE__{} = state) do
{:noreply, state}
|> emit_mailbox_depth()
end
@impl true
def terminate(reason, %__MODULE__{} = state) do
:ok = maybe_remove_index_subscription(state.subscription_id)
if reason not in [:normal, :shutdown] do
send(state.subscriber, {:parrhesia, :closed, state.ref, state.subscription_id, reason})
end
:ok
end
defp send_initial_events(state, events) do
Enum.each(events, fn event ->
send(state.subscriber, {:parrhesia, :event, state.ref, state.subscription_id, event})
end)
send(state.subscriber, {:parrhesia, :eose, state.ref, state.subscription_id})
end
defp via_tuple(ref), do: {:via, Registry, {Parrhesia.API.Stream.Registry, ref}}
defp fetch_subscriber(opts) do
case Keyword.get(opts, :subscriber) do
subscriber when is_pid(subscriber) -> {:ok, subscriber}
_other -> {:error, :invalid_subscriber}
end
end
defp fetch_subscription_id(opts) do
case Keyword.get(opts, :subscription_id) do
subscription_id when is_binary(subscription_id) -> {:ok, subscription_id}
_other -> {:error, :invalid_subscription_id}
end
end
defp fetch_filters(opts) do
case Keyword.get(opts, :filters) do
filters when is_list(filters) -> {:ok, filters}
_other -> {:error, :invalid_filters}
end
end
defp subscription_index(opts) do
case Keyword.get(opts, :subscription_index, Index) do
subscription_index when is_pid(subscription_index) or is_atom(subscription_index) ->
subscription_index
_other ->
nil
end
end
defp maybe_upsert_index_subscription(nil, _subscription_id, _filters),
do: {:error, :subscription_index_unavailable}
defp maybe_upsert_index_subscription(subscription_index, subscription_id, filters) do
case Index.upsert(subscription_index, self(), subscription_id, filters) do
:ok -> :ok
{:error, reason} -> {:error, reason}
end
catch
:exit, _reason -> {:error, :subscription_index_unavailable}
end
defp maybe_remove_index_subscription(subscription_id) do
:ok = Index.remove(Index, self(), subscription_id)
:ok
catch
:exit, _reason -> :ok
end
defp handle_fanout_event(%__MODULE__{} = state, subscription_id, event) do
cond do
subscription_id != state.subscription_id ->
{:noreply, state}
not Filter.matches_any?(event, state.filters) ->
{:noreply, state}
state.ready? ->
send(state.subscriber, {:parrhesia, :event, state.ref, state.subscription_id, event})
{:noreply, state}
true ->
buffered_events = [event | state.buffered_events]
{:noreply, %__MODULE__{state | buffered_events: buffered_events}}
end
end
defp emit_mailbox_depth(result) do
Telemetry.emit_process_mailbox_depth(:subscription)
result
end
end

170
lib/parrhesia/api/sync.ex Normal file
View File

@@ -0,0 +1,170 @@
defmodule Parrhesia.API.Sync do
@moduledoc """
Sync server control-plane API.
This module manages outbound relay sync definitions and exposes runtime status for each
configured sync worker.
The main entrypoint is `put_server/2`. Accepted server maps are normalized into a stable
internal shape and persisted by the sync manager. The expected input shape is:
```elixir
%{
"id" => "tribes-primary",
"url" => "wss://relay-a.example/relay",
"enabled?" => true,
"auth_pubkey" => "...64 hex chars...",
"filters" => [%{"kinds" => [5000]}],
"mode" => "req_stream",
"overlap_window_seconds" => 300,
"auth" => %{"type" => "nip42"},
"tls" => %{
"mode" => "required",
"hostname" => "relay-a.example",
"pins" => [%{"type" => "spki_sha256", "value" => "..."}]
},
"metadata" => %{}
}
```
Most functions accept `:manager` or `:name` in `opts` to target a non-default manager.
"""
alias Parrhesia.API.Sync.Manager
@typedoc """
Normalized sync server configuration returned by the sync manager.
"""
@type server :: map()
@doc """
Creates or replaces a sync server definition.
"""
@spec put_server(map(), keyword()) :: {:ok, server()} | {:error, term()}
def put_server(server, opts \\ [])
def put_server(server, opts) when is_map(server) and is_list(opts) do
Manager.put_server(manager_name(opts), server)
end
def put_server(_server, _opts), do: {:error, :invalid_server}
@doc """
Removes a stored sync server definition and stops its worker if it is running.
"""
@spec remove_server(String.t(), keyword()) :: :ok | {:error, term()}
def remove_server(server_id, opts \\ [])
def remove_server(server_id, opts) when is_binary(server_id) and is_list(opts) do
Manager.remove_server(manager_name(opts), server_id)
end
def remove_server(_server_id, _opts), do: {:error, :invalid_server_id}
@doc """
Fetches a single normalized sync server definition.
Returns `:error` when the server id is unknown.
"""
@spec get_server(String.t(), keyword()) :: {:ok, server()} | :error | {:error, term()}
def get_server(server_id, opts \\ [])
def get_server(server_id, opts) when is_binary(server_id) and is_list(opts) do
Manager.get_server(manager_name(opts), server_id)
end
def get_server(_server_id, _opts), do: {:error, :invalid_server_id}
@doc """
Lists all configured sync servers, including their runtime state.
"""
@spec list_servers(keyword()) :: {:ok, [server()]} | {:error, term()}
def list_servers(opts \\ []) when is_list(opts) do
Manager.list_servers(manager_name(opts))
end
@doc """
Marks a sync server as running and reconciles its worker state.
"""
@spec start_server(String.t(), keyword()) :: :ok | {:error, term()}
def start_server(server_id, opts \\ [])
def start_server(server_id, opts) when is_binary(server_id) and is_list(opts) do
Manager.start_server(manager_name(opts), server_id)
end
def start_server(_server_id, _opts), do: {:error, :invalid_server_id}
@doc """
Stops a sync server and records a disconnect timestamp in runtime state.
"""
@spec stop_server(String.t(), keyword()) :: :ok | {:error, term()}
def stop_server(server_id, opts \\ [])
def stop_server(server_id, opts) when is_binary(server_id) and is_list(opts) do
Manager.stop_server(manager_name(opts), server_id)
end
def stop_server(_server_id, _opts), do: {:error, :invalid_server_id}
@doc """
Triggers an immediate sync run for a server.
"""
@spec sync_now(String.t(), keyword()) :: :ok | {:error, term()}
def sync_now(server_id, opts \\ [])
def sync_now(server_id, opts) when is_binary(server_id) and is_list(opts) do
Manager.sync_now(manager_name(opts), server_id)
end
def sync_now(_server_id, _opts), do: {:error, :invalid_server_id}
@doc """
Returns runtime counters and timestamps for a single sync server.
Returns `:error` when the server id is unknown.
"""
@spec server_stats(String.t(), keyword()) :: {:ok, map()} | :error | {:error, term()}
def server_stats(server_id, opts \\ [])
def server_stats(server_id, opts) when is_binary(server_id) and is_list(opts) do
Manager.server_stats(manager_name(opts), server_id)
end
def server_stats(_server_id, _opts), do: {:error, :invalid_server_id}
@doc """
Returns aggregate counters across all configured sync servers.
"""
@spec sync_stats(keyword()) :: {:ok, map()} | {:error, term()}
def sync_stats(opts \\ []) when is_list(opts) do
Manager.sync_stats(manager_name(opts))
end
@doc """
Returns a health summary for the sync subsystem.
"""
@spec sync_health(keyword()) :: {:ok, map()} | {:error, term()}
def sync_health(opts \\ []) when is_list(opts) do
Manager.sync_health(manager_name(opts))
end
@doc """
Returns the default filesystem path for persisted sync server state.
"""
def default_path do
Path.join([default_data_dir(), "sync_servers.json"])
end
defp manager_name(opts) do
opts[:manager] || opts[:name] || Manager
end
defp default_data_dir do
base_dir =
System.get_env("XDG_DATA_HOME") ||
Path.join(System.user_home!(), ".local/share")
Path.join(base_dir, "parrhesia")
end
end

View File

@@ -0,0 +1,938 @@
defmodule Parrhesia.API.Sync.Manager do
@moduledoc false
use GenServer
alias Parrhesia.API.Sync
alias Parrhesia.Protocol.Filter
alias Parrhesia.Sync.Transport.WebSockexClient
alias Parrhesia.Sync.Worker
require Logger
@default_overlap_window_seconds 300
@default_mode :req_stream
@default_auth_type :nip42
@default_tls_mode :required
@hex64 ~r/\A[0-9a-f]{64}\z/
def start_link(opts \\ []) do
name = Keyword.get(opts, :name, __MODULE__)
GenServer.start_link(__MODULE__, opts, name: name)
end
def put_server(name, server), do: GenServer.call(name, {:put_server, server})
def remove_server(name, server_id), do: GenServer.call(name, {:remove_server, server_id})
def get_server(name, server_id), do: GenServer.call(name, {:get_server, server_id})
def list_servers(name), do: GenServer.call(name, :list_servers)
def start_server(name, server_id), do: GenServer.call(name, {:start_server, server_id})
def stop_server(name, server_id), do: GenServer.call(name, {:stop_server, server_id})
def sync_now(name, server_id), do: GenServer.call(name, {:sync_now, server_id})
def server_stats(name, server_id), do: GenServer.call(name, {:server_stats, server_id})
def sync_stats(name), do: GenServer.call(name, :sync_stats)
def sync_health(name), do: GenServer.call(name, :sync_health)
def runtime_event(name, server_id, kind, attrs \\ %{}) do
GenServer.cast(name, {:runtime_event, server_id, kind, attrs})
end
@impl true
def init(opts) do
path = Keyword.get(opts, :path, config_path() || Sync.default_path())
state =
load_state(path)
|> Map.merge(%{
start_workers?: Keyword.get(opts, :start_workers?, config_value(:start_workers?, true)),
worker_supervisor: Keyword.get(opts, :worker_supervisor, Parrhesia.Sync.WorkerSupervisor),
worker_registry: Keyword.get(opts, :worker_registry, Parrhesia.Sync.WorkerRegistry),
transport_module: Keyword.get(opts, :transport_module, WebSockexClient),
relay_info_opts: Keyword.get(opts, :relay_info_opts, []),
transport_opts: Keyword.get(opts, :transport_opts, [])
})
{:ok, state, {:continue, :bootstrap}}
end
@impl true
def handle_continue(:bootstrap, state) do
next_state =
if state.start_workers? do
state.servers
|> Map.keys()
|> Enum.reduce(state, fn server_id, acc -> maybe_start_worker(acc, server_id) end)
else
state
end
{:noreply, next_state}
end
@impl true
def handle_call({:put_server, server}, _from, state) do
case normalize_server(server) do
{:ok, normalized_server} ->
updated_state =
state
|> stop_worker_if_running(normalized_server.id)
|> put_server_state(normalized_server)
|> persist_and_reconcile!(normalized_server.id)
{:reply, {:ok, merged_server(updated_state, normalized_server.id)}, updated_state}
{:error, reason} ->
{:reply, {:error, reason}, state}
end
end
def handle_call({:remove_server, server_id}, _from, state) do
if Map.has_key?(state.servers, server_id) do
next_state =
state
|> stop_worker_if_running(server_id)
|> Map.update!(:servers, &Map.delete(&1, server_id))
|> Map.update!(:runtime, &Map.delete(&1, server_id))
with :ok <- persist_state(next_state) do
{:reply, :ok, next_state}
end
else
{:reply, {:error, :not_found}, state}
end
end
def handle_call({:get_server, server_id}, _from, state) do
case Map.fetch(state.servers, server_id) do
{:ok, _server} -> {:reply, {:ok, merged_server(state, server_id)}, state}
:error -> {:reply, :error, state}
end
end
def handle_call(:list_servers, _from, state) do
servers =
state.servers
|> Map.keys()
|> Enum.sort()
|> Enum.map(&merged_server(state, &1))
{:reply, {:ok, servers}, state}
end
def handle_call({:start_server, server_id}, _from, state) do
case Map.fetch(state.runtime, server_id) do
{:ok, runtime} ->
next_state =
state
|> put_runtime(server_id, %{runtime | state: :running, last_error: nil})
|> persist_and_reconcile!(server_id)
{:reply, :ok, next_state}
:error ->
{:reply, {:error, :not_found}, state}
end
end
def handle_call({:stop_server, server_id}, _from, state) do
case Map.fetch(state.runtime, server_id) do
{:ok, runtime} ->
next_runtime =
runtime
|> Map.put(:state, :stopped)
|> Map.put(:connected?, false)
|> Map.put(:last_disconnected_at, now())
next_state =
state
|> stop_worker_if_running(server_id)
|> put_runtime(server_id, next_runtime)
with :ok <- persist_state(next_state) do
{:reply, :ok, next_state}
end
:error ->
{:reply, {:error, :not_found}, state}
end
end
def handle_call({:sync_now, server_id}, _from, state) do
case {Map.has_key?(state.runtime, server_id), state.start_workers?,
lookup_worker(state, server_id)} do
{false, _start_workers?, _worker_pid} ->
{:reply, {:error, :not_found}, state}
{true, true, worker_pid} when is_pid(worker_pid) ->
Worker.sync_now(worker_pid)
{:reply, :ok, state}
{true, true, nil} ->
next_state =
state
|> put_in([:runtime, server_id, :state], :running)
|> persist_and_reconcile!(server_id)
{:reply, :ok, next_state}
{true, false, _worker_pid} ->
next_state =
apply_runtime_event(state, server_id, :sync_started, %{})
|> apply_runtime_event(server_id, :sync_completed, %{})
with :ok <- persist_state(next_state) do
{:reply, :ok, next_state}
end
end
end
def handle_call({:server_stats, server_id}, _from, state) do
case Map.fetch(state.runtime, server_id) do
{:ok, runtime} -> {:reply, {:ok, runtime_stats(runtime)}, state}
:error -> {:reply, :error, state}
end
end
def handle_call(:sync_stats, _from, state), do: {:reply, {:ok, aggregate_stats(state)}, state}
def handle_call(:sync_health, _from, state), do: {:reply, {:ok, health_summary(state)}, state}
@impl true
def handle_cast({:runtime_event, server_id, kind, attrs}, state) do
next_state =
state
|> apply_runtime_event(server_id, kind, attrs)
|> persist_state_if_known_server(server_id)
{:noreply, next_state}
end
defp persist_state_if_known_server(state, server_id) do
if Map.has_key?(state.runtime, server_id) do
case persist_state(state) do
:ok ->
state
{:error, reason} ->
Logger.warning("failed to persist sync runtime for #{server_id}: #{inspect(reason)}")
state
end
else
state
end
end
defp put_server_state(state, server) do
runtime =
case Map.get(state.runtime, server.id) do
nil -> default_runtime(server)
existing_runtime -> existing_runtime
end
%{
state
| servers: Map.put(state.servers, server.id, server),
runtime: Map.put(state.runtime, server.id, runtime)
}
end
defp put_runtime(state, server_id, runtime) do
%{state | runtime: Map.put(state.runtime, server_id, runtime)}
end
defp persist_and_reconcile!(state, server_id) do
:ok = persist_state(state)
reconcile_worker(state, server_id)
end
defp reconcile_worker(state, server_id) do
cond do
not state.start_workers? ->
state
desired_running?(state, server_id) ->
maybe_start_worker(state, server_id)
true ->
stop_worker_if_running(state, server_id)
end
end
defp maybe_start_worker(state, server_id) do
cond do
not state.start_workers? ->
state
not desired_running?(state, server_id) ->
state
lookup_worker(state, server_id) != nil ->
state
true ->
server = Map.fetch!(state.servers, server_id)
runtime = Map.fetch!(state.runtime, server_id)
child_spec = %{
id: {:sync_worker, server_id},
start:
{Worker, :start_link,
[
[
name: via_tuple(server_id, state.worker_registry),
server: server,
runtime: runtime,
manager: self(),
transport_module: state.transport_module,
relay_info_opts: state.relay_info_opts,
transport_opts: state.transport_opts
]
]},
restart: :transient
}
case DynamicSupervisor.start_child(state.worker_supervisor, child_spec) do
{:ok, _pid} ->
state
{:error, {:already_started, _pid}} ->
state
{:error, reason} ->
Logger.warning("failed to start sync worker #{server_id}: #{inspect(reason)}")
state
end
end
end
defp stop_worker_if_running(state, server_id) do
if worker_pid = lookup_worker(state, server_id) do
_ = Worker.stop(worker_pid)
end
state
end
defp desired_running?(state, server_id) do
case Map.fetch(state.runtime, server_id) do
{:ok, runtime} -> runtime.state == :running
:error -> false
end
end
defp lookup_worker(state, server_id) do
case Registry.lookup(state.worker_registry, server_id) do
[{pid, _value}] -> pid
[] -> nil
end
catch
:exit, _reason -> nil
end
defp via_tuple(server_id, registry) do
{:via, Registry, {registry, server_id}}
end
defp merged_server(state, server_id) do
state.servers
|> Map.fetch!(server_id)
|> Map.put(:runtime, Map.fetch!(state.runtime, server_id))
end
defp runtime_stats(runtime) do
%{
"server_id" => runtime.server_id,
"state" => Atom.to_string(runtime.state),
"connected" => runtime.connected?,
"events_received" => runtime.events_received,
"events_accepted" => runtime.events_accepted,
"events_duplicate" => runtime.events_duplicate,
"events_rejected" => runtime.events_rejected,
"query_runs" => runtime.query_runs,
"subscription_restarts" => runtime.subscription_restarts,
"reconnects" => runtime.reconnects,
"last_sync_started_at" => runtime.last_sync_started_at,
"last_sync_completed_at" => runtime.last_sync_completed_at,
"last_remote_eose_at" => runtime.last_remote_eose_at,
"last_error" => runtime.last_error,
"cursor_created_at" => runtime.cursor_created_at,
"cursor_event_id" => runtime.cursor_event_id
}
end
defp aggregate_stats(state) do
runtimes = Map.values(state.runtime)
%{
"servers_total" => map_size(state.servers),
"servers_enabled" => Enum.count(state.servers, fn {_id, server} -> server.enabled? end),
"servers_running" => Enum.count(runtimes, &(&1.state == :running)),
"servers_connected" => Enum.count(runtimes, & &1.connected?),
"events_received" => Enum.reduce(runtimes, 0, &(&1.events_received + &2)),
"events_accepted" => Enum.reduce(runtimes, 0, &(&1.events_accepted + &2)),
"events_duplicate" => Enum.reduce(runtimes, 0, &(&1.events_duplicate + &2)),
"events_rejected" => Enum.reduce(runtimes, 0, &(&1.events_rejected + &2)),
"query_runs" => Enum.reduce(runtimes, 0, &(&1.query_runs + &2)),
"subscription_restarts" => Enum.reduce(runtimes, 0, &(&1.subscription_restarts + &2)),
"reconnects" => Enum.reduce(runtimes, 0, &(&1.reconnects + &2))
}
end
defp health_summary(state) do
failing_servers =
state.runtime
|> Enum.flat_map(fn {server_id, runtime} ->
if is_binary(runtime.last_error) and runtime.last_error != "" do
[%{"id" => server_id, "reason" => runtime.last_error}]
else
[]
end
end)
%{
"status" => if(failing_servers == [], do: "ok", else: "degraded"),
"servers_total" => map_size(state.servers),
"servers_connected" =>
Enum.count(state.runtime, fn {_id, runtime} -> runtime.connected? end),
"servers_failing" => failing_servers
}
end
defp apply_runtime_event(state, server_id, kind, attrs) do
case Map.fetch(state.runtime, server_id) do
{:ok, runtime} ->
updated_runtime = update_runtime_for_event(runtime, kind, attrs)
put_runtime(state, server_id, updated_runtime)
:error ->
state
end
end
defp update_runtime_for_event(runtime, :connected, _attrs) do
runtime
|> Map.put(:state, :running)
|> Map.put(:connected?, true)
|> Map.put(:last_connected_at, now())
|> Map.put(:last_error, nil)
end
defp update_runtime_for_event(runtime, :disconnected, attrs) do
reason = format_reason(Map.get(attrs, :reason))
runtime
|> Map.put(:connected?, false)
|> Map.put(:last_disconnected_at, now())
|> Map.update!(:reconnects, &(&1 + 1))
|> Map.put(:last_error, reason)
end
defp update_runtime_for_event(runtime, :error, attrs) do
Map.put(runtime, :last_error, format_reason(Map.get(attrs, :reason)))
end
defp update_runtime_for_event(runtime, :sync_started, _attrs) do
runtime
|> Map.put(:last_sync_started_at, now())
|> Map.update!(:query_runs, &(&1 + 1))
end
defp update_runtime_for_event(runtime, :sync_completed, _attrs) do
timestamp = now()
runtime
|> Map.put(:last_sync_completed_at, timestamp)
|> Map.put(:last_eose_at, timestamp)
|> Map.put(:last_remote_eose_at, timestamp)
end
defp update_runtime_for_event(runtime, :subscription_restart, _attrs) do
Map.update!(runtime, :subscription_restarts, &(&1 + 1))
end
defp update_runtime_for_event(runtime, :cursor_advanced, attrs) do
runtime
|> Map.put(:cursor_created_at, Map.get(attrs, :created_at))
|> Map.put(:cursor_event_id, Map.get(attrs, :event_id))
end
defp update_runtime_for_event(runtime, :event_result, attrs) do
event = Map.get(attrs, :event, %{})
result = Map.get(attrs, :result)
runtime
|> Map.update!(:events_received, &(&1 + 1))
|> Map.put(:last_event_received_at, now())
|> increment_result_counter(result)
|> maybe_put_last_error(attrs)
|> maybe_advance_runtime_cursor(event, result)
end
defp update_runtime_for_event(runtime, _kind, _attrs), do: runtime
defp increment_result_counter(runtime, :accepted),
do: Map.update!(runtime, :events_accepted, &(&1 + 1))
defp increment_result_counter(runtime, :duplicate),
do: Map.update!(runtime, :events_duplicate, &(&1 + 1))
defp increment_result_counter(runtime, :rejected),
do: Map.update!(runtime, :events_rejected, &(&1 + 1))
defp increment_result_counter(runtime, _result), do: runtime
defp maybe_put_last_error(runtime, %{reason: nil}), do: runtime
defp maybe_put_last_error(runtime, attrs),
do: Map.put(runtime, :last_error, format_reason(attrs[:reason]))
defp maybe_advance_runtime_cursor(runtime, event, result)
when result in [:accepted, :duplicate] do
created_at = Map.get(event, "created_at")
event_id = Map.get(event, "id")
cond do
not is_integer(created_at) or not is_binary(event_id) ->
runtime
is_nil(runtime.cursor_created_at) ->
runtime
|> Map.put(:cursor_created_at, created_at)
|> Map.put(:cursor_event_id, event_id)
created_at > runtime.cursor_created_at ->
runtime
|> Map.put(:cursor_created_at, created_at)
|> Map.put(:cursor_event_id, event_id)
created_at == runtime.cursor_created_at and event_id > runtime.cursor_event_id ->
runtime
|> Map.put(:cursor_created_at, created_at)
|> Map.put(:cursor_event_id, event_id)
true ->
runtime
end
end
defp maybe_advance_runtime_cursor(runtime, _event, _result), do: runtime
defp format_reason(nil), do: nil
defp format_reason(reason) when is_binary(reason), do: reason
defp format_reason(reason), do: inspect(reason)
defp load_state(path) do
case File.read(path) do
{:ok, payload} ->
case decode_persisted_state(payload, path) do
{:ok, state} ->
state
{:error, reason} ->
Logger.warning("failed to load sync state from #{path}: #{inspect(reason)}")
empty_state(path)
end
{:error, :enoent} ->
empty_state(path)
{:error, reason} ->
Logger.warning("failed to read sync state from #{path}: #{inspect(reason)}")
empty_state(path)
end
end
defp decode_persisted_state(payload, path) do
with {:ok, decoded} <- JSON.decode(payload),
{:ok, servers} <- decode_servers(Map.get(decoded, "servers", %{})),
{:ok, runtime} <- decode_runtime(Map.get(decoded, "runtime", %{}), servers) do
{:ok, %{path: path, servers: servers, runtime: runtime}}
end
end
defp decode_servers(servers) when is_map(servers) do
Enum.reduce_while(servers, {:ok, %{}}, fn {_id, server_payload}, {:ok, acc} ->
case normalize_server(server_payload) do
{:ok, server} -> {:cont, {:ok, Map.put(acc, server.id, server)}}
{:error, reason} -> {:halt, {:error, reason}}
end
end)
end
defp decode_servers(_servers), do: {:error, :invalid_servers_state}
defp decode_runtime(runtime_payload, servers)
when is_map(runtime_payload) and is_map(servers) do
runtime =
Enum.reduce(servers, %{}, fn {server_id, server}, acc ->
decoded_runtime =
runtime_payload
|> Map.get(server_id)
|> normalize_runtime(server)
Map.put(acc, server_id, decoded_runtime)
end)
{:ok, runtime}
end
defp decode_runtime(_runtime_payload, _servers), do: {:error, :invalid_runtime_state}
defp normalize_runtime(nil, server), do: default_runtime(server)
defp normalize_runtime(runtime, server) when is_map(runtime) do
%{
server_id: server.id,
state: normalize_runtime_state(fetch_value(runtime, :state)),
connected?: fetch_boolean(runtime, :connected?) || false,
last_connected_at: fetch_string_or_nil(runtime, :last_connected_at),
last_disconnected_at: fetch_string_or_nil(runtime, :last_disconnected_at),
last_sync_started_at: fetch_string_or_nil(runtime, :last_sync_started_at),
last_sync_completed_at: fetch_string_or_nil(runtime, :last_sync_completed_at),
last_event_received_at: fetch_string_or_nil(runtime, :last_event_received_at),
last_eose_at: fetch_string_or_nil(runtime, :last_eose_at),
reconnect_attempts: fetch_non_neg_integer(runtime, :reconnect_attempts),
last_error: fetch_string_or_nil(runtime, :last_error),
events_received: fetch_non_neg_integer(runtime, :events_received),
events_accepted: fetch_non_neg_integer(runtime, :events_accepted),
events_duplicate: fetch_non_neg_integer(runtime, :events_duplicate),
events_rejected: fetch_non_neg_integer(runtime, :events_rejected),
query_runs: fetch_non_neg_integer(runtime, :query_runs),
subscription_restarts: fetch_non_neg_integer(runtime, :subscription_restarts),
reconnects: fetch_non_neg_integer(runtime, :reconnects),
last_remote_eose_at: fetch_string_or_nil(runtime, :last_remote_eose_at),
cursor_created_at: fetch_optional_integer(runtime, :cursor_created_at),
cursor_event_id: fetch_string_or_nil(runtime, :cursor_event_id)
}
end
defp normalize_runtime(_runtime, server), do: default_runtime(server)
defp persist_state(%{path: path} = state) do
temp_path = path <> ".tmp"
with :ok <- File.mkdir_p(Path.dirname(path)),
:ok <- File.write(temp_path, JSON.encode!(encode_state(state))),
:ok <- File.rename(temp_path, path) do
:ok
else
{:error, reason} ->
_ = File.rm(temp_path)
{:error, reason}
end
end
defp encode_state(state) do
%{
"version" => 2,
"servers" =>
Map.new(state.servers, fn {server_id, server} -> {server_id, encode_server(server)} end),
"runtime" =>
Map.new(state.runtime, fn {server_id, runtime} -> {server_id, encode_runtime(runtime)} end)
}
end
defp encode_server(server) do
%{
"id" => server.id,
"url" => server.url,
"enabled?" => server.enabled?,
"auth_pubkey" => server.auth_pubkey,
"filters" => server.filters,
"mode" => Atom.to_string(server.mode),
"overlap_window_seconds" => server.overlap_window_seconds,
"auth" => %{"type" => Atom.to_string(server.auth.type)},
"tls" => %{
"mode" => Atom.to_string(server.tls.mode),
"hostname" => server.tls.hostname,
"pins" =>
Enum.map(server.tls.pins, fn pin ->
%{
"type" => Atom.to_string(pin.type),
"value" => pin.value
}
end)
},
"metadata" => server.metadata
}
end
defp encode_runtime(runtime) do
%{
"server_id" => runtime.server_id,
"state" => Atom.to_string(runtime.state),
"connected?" => runtime.connected?,
"last_connected_at" => runtime.last_connected_at,
"last_disconnected_at" => runtime.last_disconnected_at,
"last_sync_started_at" => runtime.last_sync_started_at,
"last_sync_completed_at" => runtime.last_sync_completed_at,
"last_event_received_at" => runtime.last_event_received_at,
"last_eose_at" => runtime.last_eose_at,
"reconnect_attempts" => runtime.reconnect_attempts,
"last_error" => runtime.last_error,
"events_received" => runtime.events_received,
"events_accepted" => runtime.events_accepted,
"events_duplicate" => runtime.events_duplicate,
"events_rejected" => runtime.events_rejected,
"query_runs" => runtime.query_runs,
"subscription_restarts" => runtime.subscription_restarts,
"reconnects" => runtime.reconnects,
"last_remote_eose_at" => runtime.last_remote_eose_at,
"cursor_created_at" => runtime.cursor_created_at,
"cursor_event_id" => runtime.cursor_event_id
}
end
defp empty_state(path) do
%{path: path, servers: %{}, runtime: %{}}
end
defp default_runtime(server) do
%{
server_id: server.id,
state: if(server.enabled?, do: :running, else: :stopped),
connected?: false,
last_connected_at: nil,
last_disconnected_at: nil,
last_sync_started_at: nil,
last_sync_completed_at: nil,
last_event_received_at: nil,
last_eose_at: nil,
reconnect_attempts: 0,
last_error: nil,
events_received: 0,
events_accepted: 0,
events_duplicate: 0,
events_rejected: 0,
query_runs: 0,
subscription_restarts: 0,
reconnects: 0,
last_remote_eose_at: nil,
cursor_created_at: nil,
cursor_event_id: nil
}
end
defp normalize_server(server) when is_map(server) do
with {:ok, id} <- normalize_non_empty_string(fetch_value(server, :id), :invalid_server_id),
{:ok, {url, host, scheme}} <- normalize_url(fetch_value(server, :url)),
{:ok, enabled?} <- normalize_boolean(fetch_value(server, :enabled?), true),
{:ok, auth_pubkey} <- normalize_pubkey(fetch_value(server, :auth_pubkey)),
{:ok, filters} <- normalize_filters(fetch_value(server, :filters)),
{:ok, mode} <- normalize_mode(fetch_value(server, :mode)),
{:ok, overlap_window_seconds} <-
normalize_overlap_window(fetch_value(server, :overlap_window_seconds)),
{:ok, auth} <- normalize_auth(fetch_value(server, :auth)),
{:ok, tls} <- normalize_tls(fetch_value(server, :tls), host, scheme),
{:ok, metadata} <- normalize_metadata(fetch_value(server, :metadata)) do
{:ok,
%{
id: id,
url: url,
enabled?: enabled?,
auth_pubkey: auth_pubkey,
filters: filters,
mode: mode,
overlap_window_seconds: overlap_window_seconds,
auth: auth,
tls: tls,
metadata: metadata
}}
end
end
defp normalize_server(_server), do: {:error, :invalid_server}
defp normalize_url(url) when is_binary(url) and url != "" do
uri = URI.parse(url)
if uri.scheme in ["ws", "wss"] and is_binary(uri.host) and uri.host != "" do
{:ok, {URI.to_string(uri), uri.host, uri.scheme}}
else
{:error, :invalid_url}
end
end
defp normalize_url(_url), do: {:error, :invalid_url}
defp normalize_pubkey(pubkey) when is_binary(pubkey) do
normalized = String.downcase(pubkey)
if String.match?(normalized, @hex64) do
{:ok, normalized}
else
{:error, :invalid_auth_pubkey}
end
end
defp normalize_pubkey(_pubkey), do: {:error, :invalid_auth_pubkey}
defp normalize_filters(filters) when is_list(filters) do
normalized_filters = Enum.map(filters, &normalize_filter_map/1)
with :ok <- Filter.validate_filters(normalized_filters) do
{:ok, normalized_filters}
end
end
defp normalize_filters(_filters), do: {:error, :invalid_filters}
defp normalize_mode(nil), do: {:ok, @default_mode}
defp normalize_mode(:req_stream), do: {:ok, :req_stream}
defp normalize_mode("req_stream"), do: {:ok, :req_stream}
defp normalize_mode(_mode), do: {:error, :invalid_mode}
defp normalize_overlap_window(nil), do: {:ok, @default_overlap_window_seconds}
defp normalize_overlap_window(seconds) when is_integer(seconds) and seconds >= 0,
do: {:ok, seconds}
defp normalize_overlap_window(_seconds), do: {:error, :invalid_overlap_window_seconds}
defp normalize_auth(nil), do: {:ok, %{type: @default_auth_type}}
defp normalize_auth(auth) when is_map(auth) do
with {:ok, type} <- normalize_auth_type(fetch_value(auth, :type)) do
{:ok, %{type: type}}
end
end
defp normalize_auth(_auth), do: {:error, :invalid_auth}
defp normalize_auth_type(nil), do: {:ok, @default_auth_type}
defp normalize_auth_type(:nip42), do: {:ok, :nip42}
defp normalize_auth_type("nip42"), do: {:ok, :nip42}
defp normalize_auth_type(_type), do: {:error, :invalid_auth_type}
defp normalize_tls(tls, host, scheme) when is_map(tls) do
with {:ok, mode} <- normalize_tls_mode(fetch_value(tls, :mode)),
:ok <- validate_tls_mode_against_scheme(mode, scheme),
{:ok, hostname} <- normalize_hostname(fetch_value(tls, :hostname) || host),
{:ok, pins} <- normalize_tls_pins(mode, fetch_value(tls, :pins)) do
{:ok, %{mode: mode, hostname: hostname, pins: pins}}
end
end
defp normalize_tls(_tls, _host, _scheme), do: {:error, :invalid_tls}
defp normalize_tls_mode(nil), do: {:ok, @default_tls_mode}
defp normalize_tls_mode(:required), do: {:ok, :required}
defp normalize_tls_mode("required"), do: {:ok, :required}
defp normalize_tls_mode(:disabled), do: {:ok, :disabled}
defp normalize_tls_mode("disabled"), do: {:ok, :disabled}
defp normalize_tls_mode(_mode), do: {:error, :invalid_tls_mode}
defp validate_tls_mode_against_scheme(:required, "wss"), do: :ok
defp validate_tls_mode_against_scheme(:required, _scheme), do: {:error, :invalid_url}
defp validate_tls_mode_against_scheme(:disabled, _scheme), do: :ok
defp normalize_hostname(hostname) when is_binary(hostname) and hostname != "",
do: {:ok, hostname}
defp normalize_hostname(_hostname), do: {:error, :invalid_tls_hostname}
defp normalize_tls_pins(:disabled, nil), do: {:ok, []}
defp normalize_tls_pins(:disabled, pins) when is_list(pins), do: {:ok, []}
defp normalize_tls_pins(:required, pins) when is_list(pins) and pins != [] do
Enum.reduce_while(pins, {:ok, []}, fn pin, {:ok, acc} ->
case normalize_tls_pin(pin) do
{:ok, normalized_pin} -> {:cont, {:ok, [normalized_pin | acc]}}
{:error, reason} -> {:halt, {:error, reason}}
end
end)
|> case do
{:ok, normalized_pins} -> {:ok, Enum.reverse(normalized_pins)}
error -> error
end
end
defp normalize_tls_pins(:required, _pins), do: {:error, :invalid_tls_pins}
defp normalize_tls_pin(pin) when is_map(pin) do
with {:ok, type} <- normalize_tls_pin_type(fetch_value(pin, :type)),
{:ok, value} <- normalize_non_empty_string(fetch_value(pin, :value), :invalid_tls_pin) do
{:ok, %{type: type, value: value}}
end
end
defp normalize_tls_pin(_pin), do: {:error, :invalid_tls_pin}
defp normalize_tls_pin_type(:spki_sha256), do: {:ok, :spki_sha256}
defp normalize_tls_pin_type("spki_sha256"), do: {:ok, :spki_sha256}
defp normalize_tls_pin_type(_type), do: {:error, :invalid_tls_pin}
defp normalize_metadata(nil), do: {:ok, %{}}
defp normalize_metadata(metadata) when is_map(metadata), do: {:ok, metadata}
defp normalize_metadata(_metadata), do: {:error, :invalid_metadata}
defp normalize_boolean(nil, default), do: {:ok, default}
defp normalize_boolean(value, _default) when is_boolean(value), do: {:ok, value}
defp normalize_boolean(_value, _default), do: {:error, :invalid_enabled_flag}
defp normalize_non_empty_string(value, _reason) when is_binary(value) and value != "",
do: {:ok, value}
defp normalize_non_empty_string(_value, reason), do: {:error, reason}
defp normalize_filter_map(filter) when is_map(filter) do
Map.new(filter, fn
{key, value} when is_atom(key) -> {Atom.to_string(key), value}
{key, value} -> {key, value}
end)
end
defp normalize_filter_map(filter), do: filter
defp normalize_runtime_state("running"), do: :running
defp normalize_runtime_state(:running), do: :running
defp normalize_runtime_state("stopped"), do: :stopped
defp normalize_runtime_state(:stopped), do: :stopped
defp normalize_runtime_state(_state), do: :stopped
defp fetch_non_neg_integer(map, key) do
case fetch_value(map, key) do
value when is_integer(value) and value >= 0 -> value
_other -> 0
end
end
defp fetch_optional_integer(map, key) do
case fetch_value(map, key) do
value when is_integer(value) and value >= 0 -> value
_other -> nil
end
end
defp fetch_boolean(map, key) do
case fetch_value(map, key) do
value when is_boolean(value) -> value
_other -> nil
end
end
defp fetch_string_or_nil(map, key) do
case fetch_value(map, key) do
value when is_binary(value) and value != "" -> value
_other -> nil
end
end
defp fetch_value(map, key) when is_map(map) do
Map.get(map, key) || Map.get(map, Atom.to_string(key))
end
defp config_path do
config_value(:path)
end
defp config_value(key, default \\ nil) do
:parrhesia
|> Application.get_env(:sync, [])
|> Keyword.get(key, default)
end
defp now do
DateTime.utc_now()
|> DateTime.truncate(:second)
|> DateTime.to_iso8601()
end
end

View File

@@ -5,19 +5,6 @@ defmodule Parrhesia.Application do
@impl true @impl true
def start(_type, _args) do def start(_type, _args) do
children = [ Parrhesia.Runtime.start_link(name: Parrhesia.Supervisor)
Parrhesia.Telemetry,
Parrhesia.Config,
Parrhesia.Storage.Supervisor,
Parrhesia.Subscriptions.Supervisor,
Parrhesia.Auth.Supervisor,
Parrhesia.Policy.Supervisor,
Parrhesia.Web.Endpoint,
Parrhesia.Web.MetricsEndpoint,
Parrhesia.Tasks.Supervisor
]
opts = [strategy: :one_for_one, name: Parrhesia.Supervisor]
Supervisor.start_link(children, opts)
end end
end end

View File

@@ -67,7 +67,16 @@ defmodule Parrhesia.Auth.Challenges do
end end
def handle_call({:valid?, owner_pid, challenge}, _from, state) do def handle_call({:valid?, owner_pid, challenge}, _from, state) do
{:reply, Map.get(state.entries, owner_pid) == challenge, state} valid? =
case Map.get(state.entries, owner_pid) do
stored_challenge when is_binary(stored_challenge) ->
Plug.Crypto.secure_compare(stored_challenge, challenge)
_other ->
false
end
{:reply, valid?, state}
end end
def handle_call({:clear, owner_pid}, _from, state) do def handle_call({:clear, owner_pid}, _from, state) do

View File

@@ -3,20 +3,29 @@ defmodule Parrhesia.Auth.Nip98 do
Minimal NIP-98 HTTP auth validation. Minimal NIP-98 HTTP auth validation.
""" """
alias Parrhesia.Auth.Nip98ReplayCache
alias Parrhesia.Protocol.EventValidator alias Parrhesia.Protocol.EventValidator
@max_age_seconds 60 @max_age_seconds 60
@spec validate_authorization_header(String.t() | nil, String.t(), String.t()) :: @spec validate_authorization_header(String.t() | nil, String.t(), String.t()) ::
{:ok, map()} | {:error, atom()} {:ok, map()} | {:error, atom()}
def validate_authorization_header(nil, _method, _url), do: {:error, :missing_authorization} def validate_authorization_header(authorization, method, url) do
validate_authorization_header(authorization, method, url, [])
end
def validate_authorization_header("Nostr " <> encoded_event, method, url) @spec validate_authorization_header(String.t() | nil, String.t(), String.t(), keyword()) ::
when is_binary(method) and is_binary(url) do {:ok, map()} | {:error, atom()}
def validate_authorization_header(nil, _method, _url, _opts),
do: {:error, :missing_authorization}
def validate_authorization_header("Nostr " <> encoded_event, method, url, opts)
when is_binary(method) and is_binary(url) and is_list(opts) do
with {:ok, event_json} <- decode_base64(encoded_event), with {:ok, event_json} <- decode_base64(encoded_event),
{:ok, event} <- JSON.decode(event_json), {:ok, event} <- JSON.decode(event_json),
:ok <- validate_event_shape(event), :ok <- validate_event_shape(event, opts),
:ok <- validate_http_binding(event, method, url) do :ok <- validate_http_binding(event, method, url),
:ok <- consume_replay_token(event, opts) do
{:ok, event} {:ok, event}
else else
{:error, reason} -> {:error, reason} {:error, reason} -> {:error, reason}
@@ -24,7 +33,8 @@ defmodule Parrhesia.Auth.Nip98 do
end end
end end
def validate_authorization_header(_header, _method, _url), do: {:error, :invalid_authorization} def validate_authorization_header(_header, _method, _url, _opts),
do: {:error, :invalid_authorization}
defp decode_base64(encoded_event) do defp decode_base64(encoded_event) do
case Base.decode64(encoded_event) do case Base.decode64(encoded_event) do
@@ -33,33 +43,35 @@ defmodule Parrhesia.Auth.Nip98 do
end end
end end
defp validate_event_shape(event) when is_map(event) do defp validate_event_shape(event, opts) when is_map(event) do
with :ok <- EventValidator.validate(event), with :ok <- EventValidator.validate(event),
:ok <- validate_kind(event), :ok <- validate_kind(event),
:ok <- validate_fresh_created_at(event) do :ok <- validate_fresh_created_at(event, opts) do
:ok :ok
else else
:ok -> :ok {:error, :stale_event} -> {:error, :stale_event}
{:error, _reason} -> {:error, :invalid_event} {:error, _reason} -> {:error, :invalid_event}
end end
end end
defp validate_event_shape(_event), do: {:error, :invalid_event} defp validate_event_shape(_event, _opts), do: {:error, :invalid_event}
defp validate_kind(%{"kind" => 27_235}), do: :ok defp validate_kind(%{"kind" => 27_235}), do: :ok
defp validate_kind(_event), do: {:error, :invalid_event} defp validate_kind(_event), do: {:error, :invalid_event}
defp validate_fresh_created_at(%{"created_at" => created_at}) when is_integer(created_at) do defp validate_fresh_created_at(%{"created_at" => created_at}, opts)
when is_integer(created_at) do
now = System.system_time(:second) now = System.system_time(:second)
max_age_seconds = Keyword.get(opts, :max_age_seconds, @max_age_seconds)
if abs(now - created_at) <= @max_age_seconds do if abs(now - created_at) <= max_age_seconds do
:ok :ok
else else
{:error, :stale_event} {:error, :stale_event}
end end
end end
defp validate_fresh_created_at(_event), do: {:error, :invalid_event} defp validate_fresh_created_at(_event, _opts), do: {:error, :invalid_event}
defp validate_http_binding(event, method, url) do defp validate_http_binding(event, method, url) do
tags = Map.get(event, "tags", []) tags = Map.get(event, "tags", [])
@@ -85,4 +97,14 @@ defmodule Parrhesia.Auth.Nip98 do
true -> :ok true -> :ok
end end
end end
defp consume_replay_token(%{"id" => event_id, "created_at" => created_at}, opts)
when is_binary(event_id) and is_integer(created_at) do
case Keyword.get(opts, :replay_cache, Nip98ReplayCache) do
nil -> :ok
replay_cache -> Nip98ReplayCache.consume(replay_cache, event_id, created_at, opts)
end
end
defp consume_replay_token(_event, _opts), do: {:error, :invalid_event}
end end

View File

@@ -0,0 +1,56 @@
defmodule Parrhesia.Auth.Nip98ReplayCache do
@moduledoc """
Tracks recently accepted NIP-98 auth event ids to prevent replay.
"""
use GenServer
@default_max_age_seconds 60
@spec start_link(keyword()) :: GenServer.on_start()
def start_link(opts \\ []) do
case Keyword.get(opts, :name, __MODULE__) do
nil -> GenServer.start_link(__MODULE__, opts)
name -> GenServer.start_link(__MODULE__, opts, name: name)
end
end
@spec consume(GenServer.server(), String.t(), integer(), keyword()) ::
:ok | {:error, :replayed_auth_event}
def consume(server \\ __MODULE__, event_id, created_at, opts \\ [])
when is_binary(event_id) and is_integer(created_at) and is_list(opts) do
GenServer.call(server, {:consume, event_id, created_at, opts})
end
@impl true
def init(_opts) do
{:ok, %{entries: %{}}}
end
@impl true
def handle_call({:consume, event_id, created_at, opts}, _from, state) do
now_ms = System.monotonic_time(:millisecond)
entries = prune_expired(state.entries, now_ms)
case Map.has_key?(entries, event_id) do
true ->
{:reply, {:error, :replayed_auth_event}, %{state | entries: entries}}
false ->
expires_at_ms = replay_expiration_ms(now_ms, created_at, opts)
next_entries = Map.put(entries, event_id, expires_at_ms)
{:reply, :ok, %{state | entries: next_entries}}
end
end
defp prune_expired(entries, now_ms) do
Map.reject(entries, fn {_event_id, expires_at_ms} -> expires_at_ms <= now_ms end)
end
defp replay_expiration_ms(now_ms, created_at, opts) do
max_age_seconds = Keyword.get(opts, :max_age_seconds, max_age_seconds())
max(now_ms, created_at * 1000) + max_age_seconds * 1000
end
defp max_age_seconds, do: @default_max_age_seconds
end

View File

@@ -12,7 +12,9 @@ defmodule Parrhesia.Auth.Supervisor do
@impl true @impl true
def init(_init_arg) do def init(_init_arg) do
children = [ children = [
{Parrhesia.Auth.Challenges, name: Parrhesia.Auth.Challenges} {Parrhesia.Auth.Challenges, name: Parrhesia.Auth.Challenges},
{Parrhesia.Auth.Nip98ReplayCache, name: Parrhesia.Auth.Nip98ReplayCache},
{Parrhesia.API.Identity.Manager, []}
] ]
Supervisor.init(children, strategy: :one_for_one) Supervisor.init(children, strategy: :one_for_one)

View File

@@ -1,6 +1,9 @@
defmodule Parrhesia.Config do defmodule Parrhesia.Config do
@moduledoc """ @moduledoc """
Runtime configuration cache backed by ETS. Runtime configuration cache backed by ETS.
The application environment is copied into ETS at startup so hot-path reads do not need to
traverse the application environment repeatedly.
""" """
use GenServer use GenServer
@@ -8,6 +11,9 @@ defmodule Parrhesia.Config do
@table __MODULE__ @table __MODULE__
@root_key :config @root_key :config
@doc """
Starts the config cache server.
"""
def start_link(init_arg \\ []) do def start_link(init_arg \\ []) do
GenServer.start_link(__MODULE__, init_arg, name: __MODULE__) GenServer.start_link(__MODULE__, init_arg, name: __MODULE__)
end end
@@ -26,6 +32,9 @@ defmodule Parrhesia.Config do
{:ok, %{}} {:ok, %{}}
end end
@doc """
Returns the cached top-level Parrhesia application config.
"""
@spec all() :: map() | keyword() @spec all() :: map() | keyword()
def all do def all do
case :ets.lookup(@table, @root_key) do case :ets.lookup(@table, @root_key) do
@@ -34,6 +43,11 @@ defmodule Parrhesia.Config do
end end
end end
@doc """
Reads a nested config value by path.
The path may traverse maps or keyword lists. Missing paths return `default`.
"""
@spec get([atom()], term()) :: term() @spec get([atom()], term()) :: term()
def get(path, default \\ nil) when is_list(path) do def get(path, default \\ nil) when is_list(path) do
case fetch(path) do case fetch(path) do

View File

@@ -0,0 +1,89 @@
defmodule Parrhesia.ConnectionStats do
@moduledoc """
Per-listener connection and subscription counters.
Tracks active connection and subscription counts per listener and emits
`[:parrhesia, :listener, :population]` telemetry events on each change.
"""
use GenServer
alias Parrhesia.Telemetry
defstruct connections: %{}, subscriptions: %{}
@type state :: %__MODULE__{
connections: %{(atom() | String.t()) => non_neg_integer()},
subscriptions: %{(atom() | String.t()) => non_neg_integer()}
}
@spec start_link(keyword()) :: GenServer.on_start()
def start_link(opts \\ []) do
name = Keyword.get(opts, :name, __MODULE__)
GenServer.start_link(__MODULE__, %__MODULE__{}, name: name)
end
@spec connection_open(atom() | String.t()) :: :ok
def connection_open(listener_id), do: cast({:connection_open, listener_id})
@spec connection_close(atom() | String.t()) :: :ok
def connection_close(listener_id), do: cast({:connection_close, listener_id})
@spec subscriptions_change(atom() | String.t(), integer()) :: :ok
def subscriptions_change(listener_id, delta) when is_integer(delta) do
cast({:subscriptions_change, listener_id, delta})
end
@impl true
def init(%__MODULE__{} = state), do: {:ok, state}
@impl true
def handle_cast({:connection_open, listener_id}, %__MODULE__{} = state) do
listener_id = normalize_listener_id(listener_id)
next_state = %{state | connections: increment(state.connections, listener_id, 1)}
emit_population(listener_id, next_state)
{:noreply, next_state}
end
def handle_cast({:connection_close, listener_id}, %__MODULE__{} = state) do
listener_id = normalize_listener_id(listener_id)
next_state = %{state | connections: increment(state.connections, listener_id, -1)}
emit_population(listener_id, next_state)
{:noreply, next_state}
end
def handle_cast({:subscriptions_change, listener_id, delta}, %__MODULE__{} = state) do
listener_id = normalize_listener_id(listener_id)
next_state = %{state | subscriptions: increment(state.subscriptions, listener_id, delta)}
emit_population(listener_id, next_state)
{:noreply, next_state}
end
defp cast(message) do
GenServer.cast(__MODULE__, message)
:ok
catch
:exit, {:noproc, _details} -> :ok
:exit, {:normal, _details} -> :ok
end
defp increment(counts, key, delta) do
current = Map.get(counts, key, 0)
Map.put(counts, key, max(current + delta, 0))
end
defp emit_population(listener_id, %__MODULE__{} = state) do
Telemetry.emit(
[:parrhesia, :listener, :population],
%{
connections: Map.get(state.connections, listener_id, 0),
subscriptions: Map.get(state.subscriptions, listener_id, 0)
},
%{listener_id: listener_id}
)
end
defp normalize_listener_id(listener_id) when is_atom(listener_id), do: listener_id
defp normalize_listener_id(listener_id) when is_binary(listener_id), do: listener_id
defp normalize_listener_id(_listener_id), do: :unknown
end

View File

@@ -0,0 +1,46 @@
defmodule Parrhesia.Fanout.Dispatcher do
@moduledoc """
Asynchronous local fanout dispatcher.
"""
use GenServer
alias Parrhesia.Subscriptions.Index
@spec start_link(keyword()) :: GenServer.on_start()
def start_link(opts \\ []) do
name = Keyword.get(opts, :name, __MODULE__)
GenServer.start_link(__MODULE__, :ok, name: name)
end
@spec dispatch(map()) :: :ok
def dispatch(event), do: dispatch(__MODULE__, event)
@spec dispatch(GenServer.server(), map()) :: :ok
def dispatch(server, event) when is_map(event) do
GenServer.cast(server, {:dispatch, event})
end
@impl true
def init(:ok), do: {:ok, %{}}
@impl true
def handle_cast({:dispatch, event}, state) do
dispatch_to_candidates(event)
{:noreply, state}
end
defp dispatch_to_candidates(event) do
case Index.candidate_subscription_keys(event) do
candidates when is_list(candidates) ->
Enum.each(candidates, fn {owner_pid, subscription_id} ->
send(owner_pid, {:fanout_event, subscription_id, event})
end)
_other ->
:ok
end
catch
:exit, _reason -> :ok
end
end

View File

@@ -5,7 +5,7 @@ defmodule Parrhesia.Fanout.MultiNode do
use GenServer use GenServer
alias Parrhesia.Subscriptions.Index alias Parrhesia.Fanout.Dispatcher
@group __MODULE__ @group __MODULE__
@@ -44,11 +44,7 @@ defmodule Parrhesia.Fanout.MultiNode do
@impl true @impl true
def handle_info({:remote_fanout_event, event}, state) do def handle_info({:remote_fanout_event, event}, state) do
Index.candidate_subscription_keys(event) Dispatcher.dispatch(event)
|> Enum.each(fn {owner_pid, subscription_id} ->
send(owner_pid, {:fanout_event, subscription_id, event})
end)
{:noreply, state} {:noreply, state}
end end

View File

@@ -1,52 +1,62 @@
defmodule Parrhesia.Groups.Flow do defmodule Parrhesia.Groups.Flow do
@moduledoc """ @moduledoc """
Minimal group and membership flow handling for NIP-29/NIP-43 related kinds. Relay access membership projection backed by the shared group storage adapter.
""" """
alias Parrhesia.Storage alias Parrhesia.Storage
@membership_request_kind 8_000 @relay_access_group_id "__relay_access__"
@membership_approval_kind 8_001 @add_user_kind 8_000
@relay_metadata_kind 28_934 @remove_user_kind 8_001
@relay_admins_kind 28_935 @join_request_kind 28_934
@relay_rules_kind 28_936 @invite_request_kind 28_935
@membership_event_kind 13_534 @leave_request_kind 28_936
@membership_list_kind 13_534
@spec handle_event(map()) :: :ok | {:error, term()} @spec handle_event(map()) :: :ok | {:error, term()}
def handle_event(event) when is_map(event) do def handle_event(event) when is_map(event) do
case Map.get(event, "kind") do case Map.get(event, "kind") do
@membership_request_kind -> upsert_membership(event, "requested") @join_request_kind -> put_member(event, membership_pubkey_from_event(event))
@membership_approval_kind -> upsert_membership(event, "member") @leave_request_kind -> delete_member(event, membership_pubkey_from_event(event))
@membership_event_kind -> upsert_membership(event, "member") @add_user_kind -> put_member(event, tagged_pubkey(event, "p"))
@relay_metadata_kind -> :ok @remove_user_kind -> delete_member(event, tagged_pubkey(event, "p"))
@relay_admins_kind -> :ok @membership_list_kind -> replace_membership_snapshot(event)
@relay_rules_kind -> :ok @invite_request_kind -> :ok
_other -> :ok _other -> :ok
end end
end end
@spec group_related_kind?(non_neg_integer()) :: boolean() @spec relay_access_kind?(non_neg_integer()) :: boolean()
def group_related_kind?(kind) def relay_access_kind?(kind)
when kind in [ when kind in [
@membership_request_kind, @add_user_kind,
@membership_approval_kind, @remove_user_kind,
@relay_metadata_kind, @join_request_kind,
@relay_admins_kind, @invite_request_kind,
@relay_rules_kind, @leave_request_kind,
@membership_event_kind @membership_list_kind
], ],
do: true do: true
def group_related_kind?(_kind), do: false def relay_access_kind?(_kind), do: false
defp upsert_membership(event, role) do @spec get_membership(binary()) :: {:ok, map() | nil} | {:error, term()}
with {:ok, group_id} <- group_id_from_event(event), def get_membership(pubkey) when is_binary(pubkey) do
{:ok, pubkey} <- pubkey_from_event(event) do Storage.groups().get_membership(%{}, @relay_access_group_id, pubkey)
end
@spec list_memberships() :: {:ok, [map()]} | {:error, term()}
def list_memberships do
Storage.groups().list_memberships(%{}, @relay_access_group_id)
end
defp put_member(event, {:ok, pubkey}) do
with {:ok, metadata} <- membership_metadata(event) do
Storage.groups().put_membership(%{}, %{ Storage.groups().put_membership(%{}, %{
group_id: group_id, group_id: @relay_access_group_id,
pubkey: pubkey, pubkey: pubkey,
role: role, role: "member",
metadata: %{"source_kind" => Map.get(event, "kind")} metadata: metadata
}) })
|> case do |> case do
{:ok, _membership} -> :ok {:ok, _membership} -> :ok
@@ -55,21 +65,85 @@ defmodule Parrhesia.Groups.Flow do
end end
end end
defp group_id_from_event(event) do defp put_member(_event, {:error, reason}), do: {:error, reason}
group_id =
event
|> Map.get("tags", [])
|> Enum.find_value(fn
["h", value | _rest] when is_binary(value) and value != "" -> value
_tag -> nil
end)
case group_id do defp delete_member(_event, {:ok, pubkey}) do
nil -> {:error, :missing_group_id} Storage.groups().delete_membership(%{}, @relay_access_group_id, pubkey)
value -> {:ok, value} end
defp delete_member(_event, {:error, reason}), do: {:error, reason}
defp replace_membership_snapshot(event) do
with {:ok, tagged_members} <- tagged_pubkeys(event, "member"),
{:ok, existing_memberships} <- list_memberships() do
incoming_pubkeys = MapSet.new(tagged_members)
existing_pubkeys = MapSet.new(Enum.map(existing_memberships, & &1.pubkey))
remove_members =
existing_pubkeys
|> MapSet.difference(incoming_pubkeys)
|> MapSet.to_list()
add_members =
incoming_pubkeys
|> MapSet.to_list()
:ok = remove_memberships(remove_members)
add_memberships(event, add_members)
else
{:error, reason} -> {:error, reason}
end end
end end
defp pubkey_from_event(%{"pubkey" => pubkey}) when is_binary(pubkey), do: {:ok, pubkey} defp membership_pubkey_from_event(%{"pubkey" => pubkey}) when is_binary(pubkey),
defp pubkey_from_event(_event), do: {:error, :missing_pubkey} do: {:ok, pubkey}
defp membership_pubkey_from_event(_event), do: {:error, :missing_pubkey}
defp tagged_pubkey(event, tag_name) do
event
|> tagged_pubkeys(tag_name)
|> case do
{:ok, [pubkey]} -> {:ok, pubkey}
{:ok, []} -> {:error, :missing_pubkey}
{:ok, _pubkeys} -> {:error, :invalid_pubkey}
end
end
defp tagged_pubkeys(event, tag_name) do
pubkeys =
event
|> Map.get("tags", [])
|> Enum.flat_map(fn
[^tag_name, pubkey | _rest] when is_binary(pubkey) and pubkey != "" -> [pubkey]
_tag -> []
end)
{:ok, Enum.uniq(pubkeys)}
end
defp membership_metadata(event) do
{:ok,
%{
"source_kind" => Map.get(event, "kind"),
"source_event_id" => Map.get(event, "id")
}}
end
defp remove_memberships(pubkeys) when is_list(pubkeys) do
Enum.each(pubkeys, fn pubkey ->
:ok = Storage.groups().delete_membership(%{}, @relay_access_group_id, pubkey)
end)
:ok
end
defp add_memberships(event, pubkeys) when is_list(pubkeys) do
Enum.reduce_while(pubkeys, :ok, fn pubkey, :ok ->
case put_member(event, {:ok, pubkey}) do
:ok -> {:cont, :ok}
{:error, _reason} = error -> {:halt, error}
end
end)
end
end end

48
lib/parrhesia/http.ex Normal file
View File

@@ -0,0 +1,48 @@
defmodule Parrhesia.HTTP do
@moduledoc false
alias Parrhesia.Metadata
@default_headers [{"user-agent", Metadata.user_agent()}]
@spec default_headers() :: [{String.t(), String.t()}]
def default_headers, do: @default_headers
@spec get(Keyword.t()) :: {:ok, Req.Response.t()} | {:error, Exception.t()}
def get(options) when is_list(options) do
Req.get(put_default_headers(options))
end
@spec post(Keyword.t()) :: {:ok, Req.Response.t()} | {:error, Exception.t()}
def post(options) when is_list(options) do
Req.post(put_default_headers(options))
end
@spec put_default_headers(Keyword.t()) :: Keyword.t()
def put_default_headers(options) when is_list(options) do
Keyword.update(options, :headers, @default_headers, &merge_headers(&1, @default_headers))
end
defp merge_headers(headers, defaults) do
existing_names =
headers
|> List.wrap()
|> Enum.reduce(MapSet.new(), fn
{name, _value}, acc -> MapSet.put(acc, normalize_header_name(name))
_other, acc -> acc
end)
headers ++
Enum.reject(defaults, fn {name, _value} ->
MapSet.member?(existing_names, normalize_header_name(name))
end)
end
defp normalize_header_name(name) when is_atom(name) do
name
|> Atom.to_string()
|> String.downcase()
end
defp normalize_header_name(name) when is_binary(name), do: String.downcase(name)
end

29
lib/parrhesia/metadata.ex Normal file
View File

@@ -0,0 +1,29 @@
defmodule Parrhesia.Metadata do
@moduledoc false
@metadata Application.compile_env(:parrhesia, :metadata, [])
@name Keyword.get(@metadata, :name, "Parrhesia")
@version Keyword.get(@metadata, :version, "0.0.0")
@hide_version? Keyword.get(@metadata, :hide_version?, true)
@spec name() :: String.t()
def name, do: @name
@spec version() :: String.t()
def version, do: @version
@spec hide_version?() :: boolean()
def hide_version?, do: @hide_version?
@spec name_and_version() :: String.t()
def name_and_version, do: "#{@name}/#{@version}"
@spec user_agent() :: String.t()
def user_agent do
if hide_version?() do
name()
else
name_and_version()
end
end
end

View File

@@ -0,0 +1,136 @@
defmodule Parrhesia.Negentropy.Engine do
@moduledoc """
Relay/client-agnostic negentropy reconciliation engine.
"""
alias Parrhesia.Negentropy.Message
@default_id_list_threshold 32
@type item :: Message.item()
@spec initial_message([item()], keyword()) :: binary()
def initial_message(items, opts \\ []) when is_list(opts) do
normalized_items = normalize_items(items)
Message.encode([
describe_range(normalized_items, :infinity, id_list_threshold(opts))
])
end
@spec answer([item()], binary(), keyword()) :: {:ok, binary()} | {:error, term()}
def answer(items, incoming_message, opts \\ [])
when is_binary(incoming_message) and is_list(opts) do
normalized_items = normalize_items(items)
threshold = id_list_threshold(opts)
case Message.decode(incoming_message) do
{:ok, ranges} ->
response_ranges =
respond_to_ranges(normalized_items, ranges, Message.initial_lower_bound(), threshold)
{:ok, Message.encode(response_ranges)}
{:unsupported_version, _supported_version} ->
{:ok, Message.supported_version_message()}
{:error, reason} ->
{:error, reason}
end
end
defp respond_to_ranges(_items, [], _lower_bound, _threshold), do: []
defp respond_to_ranges(items, [range | rest], lower_bound, threshold) do
upper_bound = Map.fetch!(range, :upper_bound)
items_in_range =
Enum.filter(items, fn item ->
Message.item_in_range?(item, lower_bound, upper_bound)
end)
response =
case range.mode do
:skip ->
[%{upper_bound: upper_bound, mode: :skip, payload: nil}]
:fingerprint ->
respond_to_fingerprint_range(items_in_range, upper_bound, range.payload, threshold)
:id_list ->
respond_to_id_list_range(items_in_range, upper_bound, range.payload, threshold)
end
response ++ respond_to_ranges(items, rest, upper_bound, threshold)
end
defp respond_to_fingerprint_range(items, upper_bound, remote_fingerprint, threshold) do
if Message.fingerprint(items) == remote_fingerprint do
[%{upper_bound: upper_bound, mode: :skip, payload: nil}]
else
mismatch_response(items, upper_bound, threshold)
end
end
defp respond_to_id_list_range(items, upper_bound, remote_ids, threshold) do
if Enum.map(items, & &1.id) == remote_ids do
[%{upper_bound: upper_bound, mode: :skip, payload: nil}]
else
mismatch_response(items, upper_bound, threshold)
end
end
defp mismatch_response(items, upper_bound, threshold) do
if length(items) <= threshold do
[%{upper_bound: upper_bound, mode: :id_list, payload: Enum.map(items, & &1.id)}]
else
split_response(items, upper_bound, threshold)
end
end
defp split_response(items, upper_bound, threshold) do
midpoint = div(length(items), 2)
left_items = Enum.take(items, midpoint)
right_items = Enum.drop(items, midpoint)
boundary =
left_items
|> List.last()
|> then(&Message.split_bound(&1, hd(right_items)))
[
describe_range(left_items, boundary, threshold),
describe_range(right_items, upper_bound, threshold)
]
end
defp describe_range(items, upper_bound, threshold) do
if length(items) <= threshold do
%{upper_bound: upper_bound, mode: :id_list, payload: Enum.map(items, & &1.id)}
else
%{upper_bound: upper_bound, mode: :fingerprint, payload: Message.fingerprint(items)}
end
end
defp normalize_items(items) do
items
|> Enum.map(&normalize_item/1)
|> Enum.sort(&(Message.compare_items(&1, &2) != :gt))
end
defp normalize_item(%{created_at: created_at, id: id})
when is_integer(created_at) and created_at >= 0 and is_binary(id) and byte_size(id) == 32 do
%{created_at: created_at, id: id}
end
defp normalize_item(item) do
raise ArgumentError, "invalid negentropy item: #{inspect(item)}"
end
defp id_list_threshold(opts) do
case Keyword.get(opts, :id_list_threshold, @default_id_list_threshold) do
threshold when is_integer(threshold) and threshold > 0 -> threshold
_other -> @default_id_list_threshold
end
end
end

View File

@@ -0,0 +1,349 @@
defmodule Parrhesia.Negentropy.Message do
@moduledoc """
NIP-77 negentropy message codec and helpers.
"""
import Bitwise
@protocol_version 0x61
@id_size 32
@fingerprint_size 16
@u256_mod 1 <<< 256
@zero_id <<0::size(256)>>
@type item :: %{created_at: non_neg_integer(), id: binary()}
@type bound :: :infinity | {non_neg_integer(), binary()}
@type range ::
%{
upper_bound: bound(),
mode: :skip | :fingerprint | :id_list,
payload: nil | binary() | [binary()]
}
@spec protocol_version() :: byte()
def protocol_version, do: @protocol_version
@spec supported_version_message() :: binary()
def supported_version_message, do: <<@protocol_version>>
@spec decode(binary()) :: {:ok, [range()]} | {:unsupported_version, byte()} | {:error, term()}
def decode(<<version, _rest::binary>>) when version != @protocol_version,
do: {:unsupported_version, @protocol_version}
def decode(<<@protocol_version, rest::binary>>) do
decode_ranges(rest, 0, initial_lower_bound(), [])
end
def decode(_message), do: {:error, :invalid_message}
@spec encode([range()]) :: binary()
def encode(ranges) when is_list(ranges) do
ranges
|> drop_trailing_skip_ranges()
|> Enum.reduce({[@protocol_version], 0}, fn range, {acc, previous_timestamp} ->
{encoded_range, next_timestamp} = encode_range(range, previous_timestamp)
{[acc, encoded_range], next_timestamp}
end)
|> elem(0)
|> IO.iodata_to_binary()
end
@spec fingerprint([item()]) :: binary()
def fingerprint(items) when is_list(items) do
sum =
Enum.reduce(items, 0, fn %{id: id}, acc ->
<<id_integer::unsigned-little-size(256)>> = id
rem(acc + id_integer, @u256_mod)
end)
payload = [<<sum::unsigned-little-size(256)>>, encode_varint(length(items))]
payload
|> IO.iodata_to_binary()
|> then(&:crypto.hash(:sha256, &1))
|> binary_part(0, @fingerprint_size)
end
@spec compare_items(item(), item()) :: :lt | :eq | :gt
def compare_items(left, right) do
cond do
left.created_at < right.created_at -> :lt
left.created_at > right.created_at -> :gt
left.id < right.id -> :lt
left.id > right.id -> :gt
true -> :eq
end
end
@spec compare_bound(bound(), bound()) :: :lt | :eq | :gt
def compare_bound(:infinity, :infinity), do: :eq
def compare_bound(:infinity, _other), do: :gt
def compare_bound(_other, :infinity), do: :lt
def compare_bound({left_timestamp, left_id}, {right_timestamp, right_id}) do
cond do
left_timestamp < right_timestamp -> :lt
left_timestamp > right_timestamp -> :gt
left_id < right_id -> :lt
left_id > right_id -> :gt
true -> :eq
end
end
@spec item_in_range?(item(), bound(), bound()) :: boolean()
def item_in_range?(item, lower_bound, upper_bound) do
compare_item_to_bound(item, lower_bound) != :lt and
compare_item_to_bound(item, upper_bound) == :lt
end
@spec initial_lower_bound() :: bound()
def initial_lower_bound, do: {0, @zero_id}
@spec zero_id() :: binary()
def zero_id, do: @zero_id
@spec split_bound(item(), item()) :: bound()
def split_bound(previous_item, next_item)
when is_map(previous_item) and is_map(next_item) do
cond do
previous_item.created_at < next_item.created_at ->
{next_item.created_at, @zero_id}
previous_item.created_at == next_item.created_at ->
prefix_length = shared_prefix_length(previous_item.id, next_item.id) + 1
<<prefix::binary-size(prefix_length), _rest::binary>> = next_item.id
{next_item.created_at, prefix <> :binary.copy(<<0>>, @id_size - prefix_length)}
true ->
raise ArgumentError, "split_bound/2 requires previous_item <= next_item"
end
end
defp decode_ranges(<<>>, _previous_timestamp, _lower_bound, ranges),
do: {:ok, Enum.reverse(ranges)}
defp decode_ranges(binary, previous_timestamp, lower_bound, ranges) do
with {:ok, upper_bound, rest, next_timestamp} <- decode_bound(binary, previous_timestamp),
:ok <- validate_upper_bound(lower_bound, upper_bound),
{:ok, mode, payload, tail} <- decode_payload(rest) do
next_ranges = [%{upper_bound: upper_bound, mode: mode, payload: payload} | ranges]
if upper_bound == :infinity and tail != <<>> do
{:error, :invalid_message}
else
decode_ranges(tail, next_timestamp, upper_bound, next_ranges)
end
end
end
defp validate_upper_bound(lower_bound, upper_bound) do
if compare_bound(lower_bound, upper_bound) == :lt do
:ok
else
{:error, :invalid_message}
end
end
defp decode_bound(binary, previous_timestamp) do
with {:ok, encoded_timestamp, rest} <- decode_varint(binary),
{:ok, length, tail} <- decode_varint(rest),
:ok <- validate_bound_prefix_length(length),
{:ok, prefix, remainder} <- decode_prefix(tail, length) do
decode_bound_value(encoded_timestamp, length, prefix, remainder, previous_timestamp)
end
end
defp decode_payload(binary) do
with {:ok, mode_value, rest} <- decode_varint(binary) do
case mode_value do
0 ->
{:ok, :skip, nil, rest}
1 ->
decode_fingerprint_payload(rest)
2 ->
decode_id_list_payload(rest)
_other ->
{:error, :invalid_message}
end
end
end
defp decode_varint(binary), do: decode_varint(binary, 0)
defp decode_varint(<<>>, _acc), do: {:error, :invalid_message}
defp decode_varint(<<byte, rest::binary>>, acc) do
value = acc * 128 + band(byte, 0x7F)
if band(byte, 0x80) == 0 do
{:ok, value, rest}
else
decode_varint(rest, value)
end
end
defp encode_range(range, previous_timestamp) do
{encoded_bound, next_timestamp} = encode_bound(range.upper_bound, previous_timestamp)
{mode, payload} = encode_payload(range)
{[encoded_bound, mode, payload], next_timestamp}
end
defp encode_bound(:infinity, previous_timestamp),
do: {[encode_varint(0), encode_varint(0)], previous_timestamp}
defp encode_bound({timestamp, id}, previous_timestamp) do
prefix_length = id_prefix_length(id)
<<prefix::binary-size(prefix_length), _rest::binary>> = id
{
[encode_varint(timestamp - previous_timestamp + 1), encode_varint(prefix_length), prefix],
timestamp
}
end
defp encode_payload(%{mode: :skip}) do
{encode_varint(0), <<>>}
end
defp encode_payload(%{mode: :fingerprint, payload: fingerprint})
when is_binary(fingerprint) and byte_size(fingerprint) == @fingerprint_size do
{encode_varint(1), fingerprint}
end
defp encode_payload(%{mode: :id_list, payload: ids}) when is_list(ids) do
encoded_ids = Enum.map(ids, fn id -> validate_id!(id) end)
{encode_varint(2), [encode_varint(length(encoded_ids)), encoded_ids]}
end
defp encode_varint(value) when is_integer(value) and value >= 0 do
digits = collect_base128_digits(value, [])
last_index = length(digits) - 1
digits
|> Enum.with_index()
|> Enum.map(fn {digit, index} ->
if index == last_index do
digit
else
digit + 128
end
end)
|> :erlang.list_to_binary()
end
defp collect_base128_digits(value, acc) do
quotient = div(value, 128)
remainder = rem(value, 128)
if quotient == 0 do
[remainder | acc]
else
collect_base128_digits(quotient, [remainder | acc])
end
end
defp unpack_ids(binary), do: unpack_ids(binary, [])
defp unpack_ids(<<>>, acc), do: Enum.reverse(acc)
defp unpack_ids(<<id::binary-size(@id_size), rest::binary>>, acc),
do: unpack_ids(rest, [id | acc])
defp decode_prefix(binary, length) when byte_size(binary) >= length do
<<prefix::binary-size(length), rest::binary>> = binary
{:ok, prefix, rest}
end
defp decode_prefix(_binary, _length), do: {:error, :invalid_message}
defp decode_bound_value(0, 0, _prefix, remainder, previous_timestamp),
do: {:ok, :infinity, remainder, previous_timestamp}
defp decode_bound_value(0, _length, _prefix, _remainder, _previous_timestamp),
do: {:error, :invalid_message}
defp decode_bound_value(encoded_timestamp, length, prefix, remainder, previous_timestamp) do
timestamp = previous_timestamp + encoded_timestamp - 1
id = prefix <> :binary.copy(<<0>>, @id_size - length)
{:ok, {timestamp, id}, remainder, timestamp}
end
defp decode_fingerprint_payload(<<fingerprint::binary-size(@fingerprint_size), tail::binary>>),
do: {:ok, :fingerprint, fingerprint, tail}
defp decode_fingerprint_payload(_payload), do: {:error, :invalid_message}
defp decode_id_list_payload(rest) do
with {:ok, count, tail} <- decode_varint(rest),
{:ok, ids, remainder} <- decode_id_list_bytes(tail, count) do
{:ok, :id_list, ids, remainder}
end
end
defp decode_id_list_bytes(tail, count) do
expected_bytes = count * @id_size
if byte_size(tail) >= expected_bytes do
<<ids::binary-size(expected_bytes), remainder::binary>> = tail
{:ok, unpack_ids(ids), remainder}
else
{:error, :invalid_message}
end
end
defp validate_bound_prefix_length(length)
when is_integer(length) and length >= 0 and length <= @id_size,
do: :ok
defp validate_bound_prefix_length(_length), do: {:error, :invalid_message}
defp id_prefix_length(id) do
id
|> validate_id!()
|> :binary.bin_to_list()
|> Enum.reverse()
|> Enum.drop_while(&(&1 == 0))
|> length()
end
defp shared_prefix_length(left_id, right_id) do
left_id = validate_id!(left_id)
right_id = validate_id!(right_id)
left_id
|> :binary.bin_to_list()
|> Enum.zip(:binary.bin_to_list(right_id))
|> Enum.reduce_while(0, fn
{left_byte, right_byte}, acc when left_byte == right_byte -> {:cont, acc + 1}
_pair, acc -> {:halt, acc}
end)
end
defp drop_trailing_skip_ranges(ranges) do
ranges
|> Enum.reverse()
|> Enum.drop_while(fn range -> range.mode == :skip end)
|> Enum.reverse()
end
defp compare_item_to_bound(_item, :infinity), do: :lt
defp compare_item_to_bound(item, {timestamp, id}) do
cond do
item.created_at < timestamp -> :lt
item.created_at > timestamp -> :gt
item.id < id -> :lt
item.id > id -> :gt
true -> :eq
end
end
defp validate_id!(id) when is_binary(id) and byte_size(id) == @id_size, do: id
defp validate_id!(_id) do
raise ArgumentError, "negentropy ids must be 32-byte binaries"
end
end

View File

@@ -1,10 +1,13 @@
defmodule Parrhesia.Negentropy.Sessions do defmodule Parrhesia.Negentropy.Sessions do
@moduledoc """ @moduledoc """
In-memory NEG-* session tracking. In-memory NIP-77 session tracking over bounded local event snapshots.
""" """
use GenServer use GenServer
alias Parrhesia.Negentropy.Engine
alias Parrhesia.Storage
@type session_key :: {pid(), String.t()} @type session_key :: {pid(), String.t()}
@default_max_payload_bytes 4096 @default_max_payload_bytes 4096
@@ -12,6 +15,8 @@ defmodule Parrhesia.Negentropy.Sessions do
@default_max_total_sessions 10_000 @default_max_total_sessions 10_000
@default_max_idle_seconds 60 @default_max_idle_seconds 60
@default_sweep_interval_seconds 10 @default_sweep_interval_seconds 10
@default_max_items_per_session 50_000
@default_id_list_threshold 32
@sweep_idle_sessions :sweep_idle_sessions @sweep_idle_sessions :sweep_idle_sessions
@spec start_link(keyword()) :: GenServer.on_start() @spec start_link(keyword()) :: GenServer.on_start()
@@ -20,16 +25,19 @@ defmodule Parrhesia.Negentropy.Sessions do
GenServer.start_link(__MODULE__, opts, name: name) GenServer.start_link(__MODULE__, opts, name: name)
end end
@spec open(GenServer.server(), pid(), String.t(), map()) :: {:ok, map()} | {:error, term()} @spec open(GenServer.server(), pid(), String.t(), map(), binary(), keyword()) ::
def open(server \\ __MODULE__, owner_pid, subscription_id, params) {:ok, binary()} | {:error, term()}
when is_pid(owner_pid) and is_binary(subscription_id) and is_map(params) do def open(server \\ __MODULE__, owner_pid, subscription_id, filter, message, opts \\ [])
GenServer.call(server, {:open, owner_pid, subscription_id, params}) when is_pid(owner_pid) and is_binary(subscription_id) and is_map(filter) and
is_binary(message) and is_list(opts) do
GenServer.call(server, {:open, owner_pid, subscription_id, filter, message, opts})
end end
@spec message(GenServer.server(), pid(), String.t(), map()) :: {:ok, map()} | {:error, term()} @spec message(GenServer.server(), pid(), String.t(), binary()) ::
def message(server \\ __MODULE__, owner_pid, subscription_id, payload) {:ok, binary()} | {:error, term()}
when is_pid(owner_pid) and is_binary(subscription_id) and is_map(payload) do def message(server \\ __MODULE__, owner_pid, subscription_id, message)
GenServer.call(server, {:message, owner_pid, subscription_id, payload}) when is_pid(owner_pid) and is_binary(subscription_id) and is_binary(message) do
GenServer.call(server, {:message, owner_pid, subscription_id, message})
end end
@spec close(GenServer.server(), pid(), String.t()) :: :ok @spec close(GenServer.server(), pid(), String.t()) :: :ok
@@ -63,7 +71,17 @@ defmodule Parrhesia.Negentropy.Sessions do
max_total_sessions: max_total_sessions:
normalize_positive_integer(Keyword.get(opts, :max_total_sessions), max_total_sessions()), normalize_positive_integer(Keyword.get(opts, :max_total_sessions), max_total_sessions()),
max_idle_ms: max_idle_ms, max_idle_ms: max_idle_ms,
sweep_interval_ms: sweep_interval_ms sweep_interval_ms: sweep_interval_ms,
max_items_per_session:
normalize_positive_integer(
Keyword.get(opts, :max_items_per_session),
max_items_per_session()
),
id_list_threshold:
normalize_positive_integer(
Keyword.get(opts, :id_list_threshold),
id_list_threshold()
)
} }
:ok = schedule_idle_sweep(sweep_interval_ms) :ok = schedule_idle_sweep(sweep_interval_ms)
@@ -72,16 +90,19 @@ defmodule Parrhesia.Negentropy.Sessions do
end end
@impl true @impl true
def handle_call({:open, owner_pid, subscription_id, params}, _from, state) do def handle_call({:open, owner_pid, subscription_id, filter, message, opts}, _from, state) do
key = {owner_pid, subscription_id} key = {owner_pid, subscription_id}
with :ok <- validate_payload_size(params, state.max_payload_bytes), with :ok <- validate_payload_size(filter, message, state.max_payload_bytes),
:ok <- enforce_session_limits(state, owner_pid, key) do :ok <- enforce_session_limits(state, owner_pid, key),
{:ok, refs} <- fetch_event_refs(filter, opts, state.max_items_per_session),
{:ok, response} <-
Engine.answer(refs, message, id_list_threshold: state.id_list_threshold) do
now_ms = System.monotonic_time(:millisecond) now_ms = System.monotonic_time(:millisecond)
session = %{ session = %{
cursor: 0, filter: filter,
params: params, refs: refs,
opened_at: System.system_time(:second), opened_at: System.system_time(:second),
last_active_at_ms: now_ms last_active_at_ms: now_ms
} }
@@ -91,14 +112,14 @@ defmodule Parrhesia.Negentropy.Sessions do
|> ensure_monitor(owner_pid) |> ensure_monitor(owner_pid)
|> put_in([:sessions, key], session) |> put_in([:sessions, key], session)
{:reply, {:ok, %{"status" => "open", "cursor" => 0}}, state} {:reply, {:ok, response}, state}
else else
{:error, reason} -> {:error, reason} ->
{:reply, {:error, reason}, state} {:reply, {:error, reason}, state}
end end
end end
def handle_call({:message, owner_pid, subscription_id, payload}, _from, state) do def handle_call({:message, owner_pid, subscription_id, message}, _from, state) do
key = {owner_pid, subscription_id} key = {owner_pid, subscription_id}
case Map.get(state.sessions, key) do case Map.get(state.sessions, key) do
@@ -106,20 +127,18 @@ defmodule Parrhesia.Negentropy.Sessions do
{:reply, {:error, :unknown_session}, state} {:reply, {:error, :unknown_session}, state}
session -> session ->
case validate_payload_size(payload, state.max_payload_bytes) do with :ok <- validate_payload_size(session.filter, message, state.max_payload_bytes),
:ok -> {:ok, response} <-
cursor = session.cursor + 1 Engine.answer(session.refs, message, id_list_threshold: state.id_list_threshold) do
next_session = %{
session
| last_active_at_ms: System.monotonic_time(:millisecond)
}
next_session = %{ state = put_in(state, [:sessions, key], next_session)
session
| cursor: cursor,
last_active_at_ms: System.monotonic_time(:millisecond)
}
state = put_in(state, [:sessions, key], next_session)
{:reply, {:ok, %{"status" => "ack", "cursor" => cursor}}, state}
{:reply, {:ok, response}, state}
else
{:error, reason} -> {:error, reason} ->
{:reply, {:error, reason}, state} {:reply, {:error, reason}, state}
end end
@@ -185,6 +204,21 @@ defmodule Parrhesia.Negentropy.Sessions do
def handle_info(_message, state), do: {:noreply, state} def handle_info(_message, state), do: {:noreply, state}
defp fetch_event_refs(filter, opts, max_items_per_session) do
query_opts =
opts
|> Keyword.take([:now, :requester_pubkeys])
|> Keyword.put(:limit, max_items_per_session + 1)
with {:ok, refs} <- Storage.events().query_event_refs(%{}, [filter], query_opts) do
if length(refs) > max_items_per_session do
{:error, :query_too_big}
else
{:ok, refs}
end
end
end
defp clear_monitors_without_sessions(state, owner_pids) do defp clear_monitors_without_sessions(state, owner_pids) do
Enum.reduce(Map.keys(state.monitors), state, fn owner_pid, acc -> Enum.reduce(Map.keys(state.monitors), state, fn owner_pid, acc ->
if MapSet.member?(owner_pids, owner_pid) do if MapSet.member?(owner_pids, owner_pid) do
@@ -203,8 +237,8 @@ defmodule Parrhesia.Negentropy.Sessions do
end) end)
end end
defp validate_payload_size(payload, max_payload_bytes) do defp validate_payload_size(filter, message, max_payload_bytes) do
if :erlang.external_size(payload) <= max_payload_bytes do if :erlang.external_size({filter, message}) <= max_payload_bytes do
:ok :ok
else else
{:error, :payload_too_large} {:error, :payload_too_large}
@@ -296,6 +330,18 @@ defmodule Parrhesia.Negentropy.Sessions do
|> Keyword.get(:negentropy_session_sweep_interval_seconds, @default_sweep_interval_seconds) |> Keyword.get(:negentropy_session_sweep_interval_seconds, @default_sweep_interval_seconds)
end end
defp max_items_per_session do
:parrhesia
|> Application.get_env(:limits, [])
|> Keyword.get(:max_negentropy_items_per_session, @default_max_items_per_session)
end
defp id_list_threshold do
:parrhesia
|> Application.get_env(:limits, [])
|> Keyword.get(:negentropy_id_list_threshold, @default_id_list_threshold)
end
defp normalize_positive_integer(value, _default) when is_integer(value) and value > 0, defp normalize_positive_integer(value, _default) when is_integer(value) and value > 0,
do: value do: value

389
lib/parrhesia/nip43.ex Normal file
View File

@@ -0,0 +1,389 @@
defmodule Parrhesia.NIP43 do
@moduledoc false
alias Parrhesia.API.Events
alias Parrhesia.API.Identity
alias Parrhesia.API.RequestContext
alias Parrhesia.Groups.Flow
alias Parrhesia.Protocol
alias Parrhesia.Protocol.Filter
@join_request_kind 28_934
@invite_request_kind 28_935
@leave_request_kind 28_936
@add_user_kind 8_000
@remove_user_kind 8_001
@membership_list_kind 13_534
@claim_token_kind 31_943
@default_invite_ttl_seconds 900
@type publish_state ::
:ok
| %{action: :join, duplicate?: boolean(), message: String.t()}
| %{action: :leave, duplicate?: boolean(), message: String.t()}
@spec enabled?(keyword()) :: boolean()
def enabled?(opts \\ []) do
config(opts)
|> Keyword.get(:enabled, true)
|> Kernel.==(true)
end
@spec prepare_publish(map(), keyword()) :: {:ok, publish_state()} | {:error, term()}
def prepare_publish(event, opts \\ []) when is_map(event) and is_list(opts) do
if enabled?(opts) do
prepare_enabled_publish(event, opts)
else
prepare_disabled_publish(event)
end
end
@spec finalize_publish(map(), publish_state(), keyword()) :: :ok | {:ok, String.t()}
def finalize_publish(event, publish_state, opts \\ [])
def finalize_publish(event, :ok, _opts) when is_map(event) do
case Map.get(event, "kind") do
kind when kind in [@add_user_kind, @remove_user_kind, @membership_list_kind] ->
Flow.handle_event(event)
_other ->
:ok
end
end
def finalize_publish(event, %{action: :join, duplicate?: true, message: message}, _opts)
when is_map(event) do
{:ok, message}
end
def finalize_publish(event, %{action: :join, duplicate?: false, message: message}, opts)
when is_map(event) do
opts = Keyword.put_new(opts, :now, Map.get(event, "created_at"))
:ok = Flow.handle_event(event)
publish_membership_events(Map.get(event, "pubkey"), :add, opts)
{:ok, message}
end
def finalize_publish(event, %{action: :leave, duplicate?: true, message: message}, _opts)
when is_map(event) do
{:ok, message}
end
def finalize_publish(event, %{action: :leave, duplicate?: false, message: message}, opts)
when is_map(event) do
opts = Keyword.put_new(opts, :now, Map.get(event, "created_at"))
:ok = Flow.handle_event(event)
publish_membership_events(Map.get(event, "pubkey"), :remove, opts)
{:ok, message}
end
@spec dynamic_events([map()], keyword()) :: [map()]
def dynamic_events(filters, opts \\ []) when is_list(filters) and is_list(opts) do
if enabled?(opts) and requests_invite?(filters) do
filters
|> build_invite_event(opts)
|> maybe_wrap_event()
else
[]
end
end
@spec dynamic_count([map()], keyword()) :: non_neg_integer()
def dynamic_count(filters, opts \\ []) do
filters
|> dynamic_events(opts)
|> length()
end
defp prepare_enabled_publish(%{"kind" => @join_request_kind, "pubkey" => pubkey} = event, opts)
when is_binary(pubkey) do
with {:ok, _claim} <- validate_claim_from_event(event),
{:ok, membership} <- Flow.get_membership(pubkey) do
if membership_active?(membership) do
{:ok,
%{
action: :join,
duplicate?: true,
message: "duplicate: you are already a member of this relay."
}}
else
{:ok,
%{
action: :join,
duplicate?: false,
message: "info: welcome to #{relay_url(opts)}!"
}}
end
end
end
defp prepare_enabled_publish(%{"kind" => @leave_request_kind, "pubkey" => pubkey}, _opts)
when is_binary(pubkey) do
with {:ok, membership} <- Flow.get_membership(pubkey) do
if membership_active?(membership) do
{:ok, %{action: :leave, duplicate?: false, message: "info: membership revoked."}}
else
{:ok,
%{
action: :leave,
duplicate?: true,
message: "duplicate: you are not a member of this relay."
}}
end
end
end
defp prepare_enabled_publish(%{"kind" => @invite_request_kind}, _opts) do
{:error, "restricted: kind 28935 invite claims are generated via REQ"}
end
defp prepare_enabled_publish(%{"kind" => kind, "pubkey" => pubkey}, _opts)
when kind in [@add_user_kind, @remove_user_kind, @membership_list_kind] and
is_binary(pubkey) do
case relay_pubkey() do
{:ok, ^pubkey} -> {:ok, :ok}
{:ok, _other} -> {:error, "restricted: relay access metadata must be relay-signed"}
{:error, _reason} -> {:error, "error: relay identity unavailable"}
end
end
defp prepare_enabled_publish(_event, _opts), do: {:ok, :ok}
defp prepare_disabled_publish(%{"kind" => kind})
when kind in [
@join_request_kind,
@invite_request_kind,
@leave_request_kind,
@add_user_kind,
@remove_user_kind,
@membership_list_kind
] do
{:error, "blocked: NIP-43 relay access requests are disabled"}
end
defp prepare_disabled_publish(_event), do: {:ok, :ok}
defp build_invite_event(filters, opts) do
now = Keyword.get(opts, :now, System.system_time(:second))
identity_opts = identity_opts(opts)
with {:ok, claim} <- issue_claim(now, opts),
{:ok, signed_event} <-
%{
"created_at" => now,
"kind" => @invite_request_kind,
"tags" => [["-"], ["claim", claim]],
"content" => ""
}
|> Identity.sign_event(identity_opts),
true <- Filter.matches_any?(signed_event, filters) do
{:ok, signed_event}
else
_other -> :error
end
end
defp maybe_wrap_event({:ok, event}), do: [event]
defp maybe_wrap_event(_other), do: []
defp requests_invite?(filters) do
Enum.any?(filters, fn filter ->
case Map.get(filter, "kinds") do
kinds when is_list(kinds) -> @invite_request_kind in kinds
_other -> false
end
end)
end
defp issue_claim(now, opts) do
ttl_seconds =
config(opts)
|> Keyword.get(:invite_ttl_seconds, @default_invite_ttl_seconds)
|> normalize_positive_integer(@default_invite_ttl_seconds)
identity_opts = identity_opts(opts)
token_event = %{
"created_at" => now,
"kind" => @claim_token_kind,
"tags" => [["exp", Integer.to_string(now + ttl_seconds)]],
"content" => Base.encode16(:crypto.strong_rand_bytes(16), case: :lower)
}
with {:ok, signed_token} <- Identity.sign_event(token_event, identity_opts) do
signed_token
|> JSON.encode!()
|> Base.url_encode64(padding: false)
|> then(&{:ok, &1})
end
end
defp validate_claim_from_event(event) do
claim =
event
|> Map.get("tags", [])
|> Enum.find_value(fn
["claim", value | _rest] when is_binary(value) and value != "" -> value
_tag -> nil
end)
case claim do
nil -> {:error, "restricted: that is an invalid invite code."}
value -> validate_claim(value)
end
end
defp validate_claim(claim) when is_binary(claim) do
with {:ok, payload} <- Base.url_decode64(claim, padding: false),
{:ok, decoded} <- JSON.decode(payload),
:ok <- Protocol.validate_event(decoded),
:ok <- validate_claim_token(decoded) do
{:ok, decoded}
else
{:error, :expired_claim} ->
{:error, "restricted: that invite code is expired."}
_other ->
{:error, "restricted: that is an invalid invite code."}
end
end
defp validate_claim(_claim), do: {:error, "restricted: that is an invalid invite code."}
defp validate_claim_token(%{
"kind" => @claim_token_kind,
"pubkey" => pubkey,
"tags" => tags
}) do
with {:ok, relay_pubkey} <- relay_pubkey(),
true <- pubkey == relay_pubkey,
{:ok, expires_at} <- fetch_expiration(tags),
true <- expires_at >= System.system_time(:second) do
:ok
else
false -> {:error, :invalid_claim}
{:error, _reason} -> {:error, :invalid_claim}
end
end
defp validate_claim_token(_event), do: {:error, :invalid_claim}
defp fetch_expiration(tags) when is_list(tags) do
case Enum.find(tags, &match?(["exp", _value | _rest], &1)) do
["exp", value | _rest] ->
parse_expiration(value)
_other ->
{:error, :invalid_claim}
end
end
defp parse_expiration(value) when is_binary(value) do
case Integer.parse(value) do
{expires_at, ""} when expires_at > 0 -> validate_expiration(expires_at)
_other -> {:error, :invalid_claim}
end
end
defp parse_expiration(_value), do: {:error, :invalid_claim}
defp validate_expiration(expires_at) when is_integer(expires_at) do
if expires_at >= System.system_time(:second) do
{:ok, expires_at}
else
{:error, :expired_claim}
end
end
defp validate_expiration(_expires_at), do: {:error, :expired_claim}
defp publish_membership_events(member_pubkey, action, opts) when is_binary(member_pubkey) do
now = Keyword.get(opts, :now, System.system_time(:second))
identity_opts = identity_opts(opts)
context = Keyword.get(opts, :context, %RequestContext{})
action
|> build_membership_delta_event(member_pubkey, now)
|> sign_and_publish(context, identity_opts)
current_membership_snapshot(now)
|> sign_and_publish(context, identity_opts)
:ok
end
defp build_membership_delta_event(:add, member_pubkey, now) do
%{
"created_at" => now,
"kind" => @add_user_kind,
"tags" => [["-"], ["p", member_pubkey]],
"content" => ""
}
end
defp build_membership_delta_event(:remove, member_pubkey, now) do
%{
"created_at" => now,
"kind" => @remove_user_kind,
"tags" => [["-"], ["p", member_pubkey]],
"content" => ""
}
end
defp current_membership_snapshot(now) do
tags =
case Flow.list_memberships() do
{:ok, memberships} ->
[["-"] | Enum.map(memberships, &["member", &1.pubkey])]
{:error, _reason} ->
[["-"]]
end
%{
"created_at" => now,
"kind" => @membership_list_kind,
"tags" => tags,
"content" => ""
}
end
defp sign_and_publish(unsigned_event, context, identity_opts) do
with {:ok, signed_event} <- Identity.sign_event(unsigned_event, identity_opts),
{:ok, %{accepted: true}} <- Events.publish(signed_event, context: context) do
:ok
else
_other -> :ok
end
end
defp membership_active?(nil), do: false
defp membership_active?(%{role: "member"}), do: true
defp membership_active?(_membership), do: false
defp relay_pubkey do
case Identity.get() do
{:ok, %{pubkey: pubkey}} when is_binary(pubkey) -> {:ok, pubkey}
{:error, reason} -> {:error, reason}
end
end
defp relay_url(opts) do
Keyword.get(opts, :relay_url, Application.get_env(:parrhesia, :relay_url))
end
defp identity_opts(opts) do
opts
|> Keyword.take([:path, :private_key, :configured_private_key])
end
defp config(opts) do
case Keyword.get(opts, :config) do
config when is_list(config) -> config
_other -> Application.get_env(:parrhesia, :nip43, [])
end
end
defp normalize_positive_integer(value, _default) when is_integer(value) and value > 0, do: value
defp normalize_positive_integer(_value, default), do: default
end

400
lib/parrhesia/nip66.ex Normal file
View File

@@ -0,0 +1,400 @@
defmodule Parrhesia.NIP66 do
@moduledoc false
alias Parrhesia.API.Events
alias Parrhesia.API.Identity
alias Parrhesia.API.RequestContext
alias Parrhesia.NIP66.Probe
alias Parrhesia.Web.Listener
alias Parrhesia.Web.RelayInfo
@default_publish_interval_seconds 900
@default_timeout_ms 5_000
@default_checks [:open, :read, :nip11]
@allowed_requirement_keys MapSet.new(~w[auth writes pow payment])
@spec enabled?(keyword()) :: boolean()
def enabled?(opts \\ []) do
config = config(opts)
config_enabled?(config) and active_targets(config, listeners(opts)) != []
end
@spec publish_snapshot(keyword()) :: {:ok, [map()]}
def publish_snapshot(opts \\ []) when is_list(opts) do
config = config(opts)
targets = active_targets(config, listeners(opts))
if config_enabled?(config) and targets != [] do
probe_fun = Keyword.get(opts, :probe_fun, &Probe.probe/3)
context = Keyword.get(opts, :context, %RequestContext{})
now = Keyword.get(opts, :now, System.system_time(:second))
identity_opts = identity_opts(opts)
events =
maybe_publish_monitor_announcement(config, now, context, identity_opts)
|> Kernel.++(
publish_discovery_events(targets, config, probe_fun, now, context, identity_opts)
)
{:ok, events}
else
{:ok, []}
end
end
@spec publish_interval_ms(keyword()) :: pos_integer()
def publish_interval_ms(opts \\ []) when is_list(opts) do
config = config(opts)
config
|> Keyword.get(:publish_interval_seconds, @default_publish_interval_seconds)
|> normalize_positive_integer(@default_publish_interval_seconds)
|> Kernel.*(1_000)
end
defp maybe_publish_monitor_announcement(config, now, context, identity_opts) do
if Keyword.get(config, :publish_monitor_announcement?, true) do
config
|> build_monitor_announcement(now)
|> sign_and_publish(context, identity_opts)
|> maybe_wrap_event()
else
[]
end
end
defp publish_discovery_events(targets, config, probe_fun, now, context, identity_opts) do
probe_opts = [
timeout_ms:
config
|> Keyword.get(:timeout_ms, @default_timeout_ms)
|> normalize_positive_integer(@default_timeout_ms),
checks: normalize_checks(Keyword.get(config, :checks, @default_checks))
]
Enum.flat_map(targets, fn target ->
probe_result =
case probe_fun.(target, probe_opts, identity_opts) do
{:ok, result} when is_map(result) -> result
_other -> %{checks: [], metrics: %{}, relay_info: nil, relay_info_body: nil}
end
target
|> build_discovery_event(now, probe_result, identity_opts)
|> sign_and_publish(context, identity_opts)
|> maybe_wrap_event()
end)
end
defp sign_and_publish(event, context, identity_opts) do
with {:ok, signed_event} <- Identity.sign_event(event, identity_opts),
{:ok, %{accepted: true}} <- Events.publish(signed_event, context: context) do
{:ok, signed_event}
else
_other -> :error
end
end
defp maybe_wrap_event({:ok, event}), do: [event]
defp maybe_wrap_event(_other), do: []
defp build_monitor_announcement(config, now) do
checks = normalize_checks(Keyword.get(config, :checks, @default_checks))
timeout_ms = Keyword.get(config, :timeout_ms, @default_timeout_ms)
frequency = Keyword.get(config, :publish_interval_seconds, @default_publish_interval_seconds)
tags =
[
[
"frequency",
Integer.to_string(
normalize_positive_integer(frequency, @default_publish_interval_seconds)
)
]
] ++
Enum.map(checks, fn check ->
["timeout", Atom.to_string(check), Integer.to_string(timeout_ms)]
end) ++
Enum.map(checks, fn check -> ["c", Atom.to_string(check)] end) ++
maybe_geohash_tag(config)
%{
"created_at" => now,
"kind" => 10_166,
"tags" => tags,
"content" => ""
}
end
defp build_discovery_event(target, now, probe_result, identity_opts) do
relay_info = probe_result[:relay_info] || local_relay_info(target.listener, identity_opts)
content = probe_result[:relay_info_body] || JSON.encode!(relay_info)
tags =
[["d", target.relay_url]]
|> append_network_tag(target)
|> append_relay_type_tag(target)
|> append_geohash_tag(target)
|> append_topic_tags(target)
|> Kernel.++(nip_tags(relay_info))
|> Kernel.++(requirement_tags(relay_info))
|> Kernel.++(rtt_tags(probe_result[:metrics] || %{}))
%{
"created_at" => now,
"kind" => 30_166,
"tags" => tags,
"content" => content
}
end
defp nip_tags(relay_info) do
relay_info
|> Map.get("supported_nips", [])
|> Enum.map(&["N", Integer.to_string(&1)])
end
defp requirement_tags(relay_info) do
limitation = Map.get(relay_info, "limitation", %{})
[
requirement_value("auth", Map.get(limitation, "auth_required", false)),
requirement_value("writes", Map.get(limitation, "restricted_writes", false)),
requirement_value("pow", Map.get(limitation, "min_pow_difficulty", 0) > 0),
requirement_value("payment", Map.get(limitation, "payment_required", false))
]
|> Enum.filter(&MapSet.member?(@allowed_requirement_keys, String.trim_leading(&1, "!")))
|> Enum.map(&["R", &1])
end
defp requirement_value(name, true), do: name
defp requirement_value(name, false), do: "!" <> name
defp rtt_tags(metrics) when is_map(metrics) do
[]
|> maybe_put_metric_tag("rtt-open", Map.get(metrics, :rtt_open_ms))
|> maybe_put_metric_tag("rtt-read", Map.get(metrics, :rtt_read_ms))
|> maybe_put_metric_tag("rtt-write", Map.get(metrics, :rtt_write_ms))
end
defp append_network_tag(tags, target) do
case target.network do
nil -> tags
value -> tags ++ [["n", value]]
end
end
defp append_relay_type_tag(tags, target) do
case target.relay_type do
nil -> tags
value -> tags ++ [["T", value]]
end
end
defp append_geohash_tag(tags, target) do
case target.geohash do
nil -> tags
value -> tags ++ [["g", value]]
end
end
defp append_topic_tags(tags, target) do
tags ++ Enum.map(target.topics, &["t", &1])
end
defp maybe_put_metric_tag(tags, _name, nil), do: tags
defp maybe_put_metric_tag(tags, name, value) when is_integer(value) and value >= 0 do
tags ++ [[name, Integer.to_string(value)]]
end
defp maybe_put_metric_tag(tags, _name, _value), do: tags
defp local_relay_info(listener, identity_opts) do
relay_info = RelayInfo.document(listener)
case Identity.get(identity_opts) do
{:ok, %{pubkey: pubkey}} ->
relay_info
|> Map.put("pubkey", pubkey)
|> Map.put("self", pubkey)
{:error, _reason} ->
relay_info
end
end
defp maybe_geohash_tag(config) do
case fetch_value(config, :geohash) do
value when is_binary(value) and value != "" -> [["g", value]]
_other -> []
end
end
defp active_targets(config, listeners) do
listeners_by_id = Map.new(listeners, &{&1.id, &1})
raw_targets =
case Keyword.get(config, :targets, []) do
[] -> [default_target()]
targets when is_list(targets) -> targets
_other -> []
end
Enum.flat_map(raw_targets, fn raw_target ->
case normalize_target(raw_target, listeners_by_id) do
{:ok, target} -> [target]
:error -> []
end
end)
end
defp normalize_target(target, listeners_by_id) when is_map(target) or is_list(target) do
listener_id = fetch_value(target, :listener) || :public
relay_url = fetch_value(target, :relay_url) || Application.get_env(:parrhesia, :relay_url)
with %{} = listener <- Map.get(listeners_by_id, normalize_listener_id(listener_id)),
true <- listener.enabled and Listener.feature_enabled?(listener, :nostr),
{:ok, normalized_relay_url} <- normalize_relay_url(relay_url) do
{:ok,
%{
listener: listener,
relay_url: normalized_relay_url,
network: normalize_network(fetch_value(target, :network), normalized_relay_url),
relay_type: normalize_optional_string(fetch_value(target, :relay_type)),
geohash: normalize_optional_string(fetch_value(target, :geohash)),
topics: normalize_string_list(fetch_value(target, :topics))
}}
else
_other -> :error
end
end
defp normalize_target(_target, _listeners_by_id), do: :error
defp normalize_relay_url(relay_url) when is_binary(relay_url) and relay_url != "" do
case URI.parse(relay_url) do
%URI{scheme: scheme, host: host} = uri
when scheme in ["ws", "wss"] and is_binary(host) and host != "" ->
normalized_uri = %URI{
uri
| scheme: String.downcase(scheme),
host: String.downcase(host),
path: normalize_path(uri.path),
query: nil,
fragment: nil,
port: normalize_port(uri.port, scheme)
}
{:ok, URI.to_string(normalized_uri)}
_other ->
:error
end
end
defp normalize_relay_url(_relay_url), do: :error
defp normalize_path(nil), do: "/"
defp normalize_path(""), do: "/"
defp normalize_path(path), do: path
defp normalize_port(80, "ws"), do: nil
defp normalize_port(443, "wss"), do: nil
defp normalize_port(port, _scheme), do: port
defp normalize_network(value, _relay_url)
when is_binary(value) and value in ["clearnet", "tor", "i2p", "loki"],
do: value
defp normalize_network(_value, relay_url) do
relay_url
|> URI.parse()
|> Map.get(:host)
|> infer_network()
end
defp infer_network(host) when is_binary(host) do
cond do
String.ends_with?(host, ".onion") -> "tor"
String.ends_with?(host, ".i2p") -> "i2p"
true -> "clearnet"
end
end
defp infer_network(_host), do: "clearnet"
defp normalize_checks(checks) when is_list(checks) do
checks
|> Enum.map(&normalize_check/1)
|> Enum.reject(&is_nil/1)
|> Enum.uniq()
end
defp normalize_checks(_checks), do: @default_checks
defp normalize_check(:open), do: :open
defp normalize_check("open"), do: :open
defp normalize_check(:read), do: :read
defp normalize_check("read"), do: :read
defp normalize_check(:nip11), do: :nip11
defp normalize_check("nip11"), do: :nip11
defp normalize_check(_check), do: nil
defp listeners(opts) do
case Keyword.get(opts, :listeners) do
listeners when is_list(listeners) -> listeners
_other -> Listener.all()
end
end
defp identity_opts(opts) do
opts
|> Keyword.take([:path, :private_key, :configured_private_key])
end
defp config(opts) do
case Keyword.get(opts, :config) do
config when is_list(config) -> config
_other -> Application.get_env(:parrhesia, :nip66, [])
end
end
defp config_enabled?(config), do: Keyword.get(config, :enabled, true)
defp default_target do
%{listener: :public, relay_url: Application.get_env(:parrhesia, :relay_url)}
end
defp normalize_listener_id(value) when is_atom(value), do: value
defp normalize_listener_id(value) when is_binary(value) do
String.to_existing_atom(value)
rescue
ArgumentError -> :public
end
defp normalize_listener_id(_value), do: :public
defp normalize_positive_integer(value, _default) when is_integer(value) and value > 0, do: value
defp normalize_positive_integer(_value, default), do: default
defp normalize_optional_string(value) when is_binary(value) and value != "", do: value
defp normalize_optional_string(_value), do: nil
defp normalize_string_list(values) when is_list(values) do
Enum.filter(values, &(is_binary(&1) and &1 != ""))
end
defp normalize_string_list(_values), do: []
defp fetch_value(map, key) when is_map(map) do
Map.get(map, key) || Map.get(map, Atom.to_string(key))
end
defp fetch_value(list, key) when is_list(list) do
if Keyword.keyword?(list), do: Keyword.get(list, key), else: nil
end
defp fetch_value(_container, _key), do: nil
end

View File

@@ -0,0 +1,218 @@
defmodule Parrhesia.NIP66.Probe do
@moduledoc false
alias Parrhesia.HTTP
alias Parrhesia.Sync.Transport.WebSockexClient
@type result :: %{
checks: [atom()],
metrics: map(),
relay_info: map() | nil,
relay_info_body: String.t() | nil
}
@spec probe(map(), keyword(), keyword()) :: {:ok, result()}
def probe(target, opts \\ [], publish_opts \\ [])
def probe(target, opts, _publish_opts) when is_map(target) and is_list(opts) do
timeout_ms = Keyword.get(opts, :timeout_ms, 5_000)
checks = normalize_checks(Keyword.get(opts, :checks, [:open, :read, :nip11]))
initial = %{checks: [], metrics: %{}, relay_info: nil, relay_info_body: nil}
result =
Enum.reduce(checks, initial, fn check, acc ->
merge_probe_result(acc, check_result(check, target, timeout_ms))
end)
{:ok, result}
end
def probe(_target, _opts, _publish_opts),
do: {:ok, %{checks: [], metrics: %{}, relay_info: nil, relay_info_body: nil}}
defp merge_probe_result(acc, %{check: check, metric_key: metric_key, metric_value: metric_value}) do
acc
|> Map.update!(:checks, &[check | &1])
|> Map.update!(:metrics, &Map.put(&1, metric_key, metric_value))
end
defp merge_probe_result(acc, %{
check: check,
relay_info: relay_info,
relay_info_body: relay_info_body
}) do
acc
|> Map.update!(:checks, &[check | &1])
|> Map.put(:relay_info, relay_info)
|> Map.put(:relay_info_body, relay_info_body)
end
defp merge_probe_result(acc, :skip), do: acc
defp merge_probe_result(acc, {:error, _reason}), do: acc
defp check_result(:open, target, timeout_ms) do
case measure_websocket_connect(Map.fetch!(target, :relay_url), timeout_ms) do
{:ok, metric_value} ->
%{check: :open, metric_key: :rtt_open_ms, metric_value: metric_value}
{:error, reason} ->
{:error, reason}
end
end
defp check_result(:read, %{listener: listener} = target, timeout_ms) do
if listener.auth.nip42_required do
:skip
else
case measure_websocket_read(Map.fetch!(target, :relay_url), timeout_ms) do
{:ok, metric_value} ->
%{check: :read, metric_key: :rtt_read_ms, metric_value: metric_value}
{:error, reason} ->
{:error, reason}
end
end
end
defp check_result(:nip11, target, timeout_ms) do
case fetch_nip11(Map.fetch!(target, :relay_url), timeout_ms) do
{:ok, relay_info, relay_info_body, _metric_value} ->
%{check: :nip11, relay_info: relay_info, relay_info_body: relay_info_body}
{:error, reason} ->
{:error, reason}
end
end
defp check_result(_check, _target, _timeout_ms), do: :skip
defp measure_websocket_connect(relay_url, timeout_ms) do
with {:ok, websocket} <- connect(relay_url, timeout_ms),
{:ok, metric_value} <- await_connected(websocket, timeout_ms) do
:ok = WebSockexClient.close(websocket)
{:ok, metric_value}
end
end
defp measure_websocket_read(relay_url, timeout_ms) do
with {:ok, websocket} <- connect(relay_url, timeout_ms),
{:ok, started_at} <- await_connected_started_at(websocket, timeout_ms),
:ok <- WebSockexClient.send_json(websocket, ["COUNT", "nip66-probe", %{"kinds" => [1]}]),
{:ok, metric_value} <- await_count_response(websocket, timeout_ms, started_at) do
:ok = WebSockexClient.close(websocket)
{:ok, metric_value}
end
end
defp connect(relay_url, timeout_ms) do
server = %{url: relay_url, tls: tls_config(relay_url)}
WebSockexClient.connect(self(), server, websocket_opts: [timeout: timeout_ms, protocols: nil])
end
defp await_connected(websocket, timeout_ms) do
with {:ok, started_at} <- await_connected_started_at(websocket, timeout_ms) do
{:ok, monotonic_duration_ms(started_at)}
end
end
defp await_connected_started_at(websocket, timeout_ms) do
started_at = System.monotonic_time()
receive do
{:sync_transport, ^websocket, :connected, _metadata} -> {:ok, started_at}
{:sync_transport, ^websocket, :disconnected, reason} -> {:error, reason}
after
timeout_ms -> {:error, :timeout}
end
end
defp await_count_response(websocket, timeout_ms, started_at) do
receive do
{:sync_transport, ^websocket, :frame, ["COUNT", "nip66-probe", _payload]} ->
{:ok, monotonic_duration_ms(started_at)}
{:sync_transport, ^websocket, :frame, ["CLOSED", "nip66-probe", _message]} ->
{:error, :closed}
{:sync_transport, ^websocket, :disconnected, reason} ->
{:error, reason}
after
timeout_ms -> {:error, :timeout}
end
end
defp fetch_nip11(relay_url, timeout_ms) do
started_at = System.monotonic_time()
case HTTP.get(
url: relay_info_url(relay_url),
headers: [{"accept", "application/nostr+json"}],
decode_body: false,
connect_options: [timeout: timeout_ms],
receive_timeout: timeout_ms
) do
{:ok, %Req.Response{status: 200, body: body}} when is_binary(body) ->
case JSON.decode(body) do
{:ok, relay_info} when is_map(relay_info) ->
{:ok, relay_info, body, monotonic_duration_ms(started_at)}
{:error, reason} ->
{:error, reason}
_other ->
{:error, :invalid_relay_info}
end
{:ok, %Req.Response{status: status}} ->
{:error, {:relay_info_request_failed, status}}
{:error, reason} ->
{:error, reason}
end
end
defp relay_info_url(relay_url) do
relay_url
|> URI.parse()
|> Map.update!(:scheme, fn
"wss" -> "https"
"ws" -> "http"
end)
|> URI.to_string()
end
defp tls_config(relay_url) do
case URI.parse(relay_url) do
%URI{scheme: "wss", host: host} when is_binary(host) and host != "" ->
%{mode: :required, hostname: host, pins: []}
_other ->
%{mode: :disabled}
end
end
defp normalize_checks(checks) when is_list(checks) do
checks
|> Enum.map(&normalize_check/1)
|> Enum.reject(&is_nil/1)
|> Enum.uniq()
end
defp normalize_checks(_checks), do: []
defp normalize_check(:open), do: :open
defp normalize_check("open"), do: :open
defp normalize_check(:read), do: :read
defp normalize_check("read"), do: :read
defp normalize_check(:nip11), do: :nip11
defp normalize_check("nip11"), do: :nip11
defp normalize_check(_check), do: nil
defp monotonic_duration_ms(started_at) do
System.monotonic_time()
|> Kernel.-(started_at)
|> System.convert_time_unit(:native, :millisecond)
end
end

113
lib/parrhesia/plug.ex Normal file
View File

@@ -0,0 +1,113 @@
defmodule Parrhesia.Plug do
@moduledoc """
Official Plug interface for mounting Parrhesia HTTP/WebSocket ingress in a host app.
This plug serves the same route surface as the built-in listener endpoint:
- `GET /health`
- `GET /ready`
- `GET /relay` (NIP-11 + websocket transport)
- `POST /management`
- `GET /metrics`
## Options
* `:listener` - listener configuration used to authorize and serve requests.
Supported values:
* an atom listener id from `config :parrhesia, :listeners` (for example `:public`)
* a listener config map/keyword list (same schema as `:listeners` entries)
When a host app owns the HTTPS edge, a common pattern is:
config :parrhesia, :listeners, %{}
and mount `Parrhesia.Plug` with an explicit `:listener` map.
"""
@behaviour Plug
alias Parrhesia.Web.Listener
alias Parrhesia.Web.Router
@type listener_option :: atom() | map() | keyword()
@type option :: {:listener, listener_option()}
@spec init([option()]) :: keyword()
@impl Plug
def init(opts) do
opts = Keyword.validate!(opts, listener: :public)
listener = opts |> Keyword.fetch!(:listener) |> resolve_listener!()
[listener: listener]
end
@spec call(Plug.Conn.t(), keyword()) :: Plug.Conn.t()
@impl Plug
def call(conn, opts) do
conn
|> Listener.put_conn(opts)
|> Router.call([])
end
defp resolve_listener!(listener_id) when is_atom(listener_id) do
listeners = Application.get_env(:parrhesia, :listeners, %{})
case lookup_listener_by_id(listeners, listener_id) do
nil ->
raise ArgumentError,
"listener #{inspect(listener_id)} not found in config :parrhesia, :listeners; " <>
"configure it there or pass :listener as a map"
listener ->
Listener.from_opts(listener: listener)
end
end
defp resolve_listener!(listener) when is_map(listener) do
Listener.from_opts(listener: listener)
end
defp resolve_listener!(listener) when is_list(listener) do
if Keyword.keyword?(listener) do
Listener.from_opts(listener: Map.new(listener))
else
raise ArgumentError,
":listener keyword list must be a valid keyword configuration"
end
end
defp resolve_listener!(other) do
raise ArgumentError,
":listener must be an atom id, map, or keyword list, got: #{inspect(other)}"
end
defp lookup_listener_by_id(listeners, listener_id) when is_map(listeners) do
case Map.fetch(listeners, listener_id) do
{:ok, listener} when is_map(listener) ->
Map.put_new(listener, :id, listener_id)
{:ok, listener} when is_list(listener) ->
listener |> Map.new() |> Map.put_new(:id, listener_id)
_other ->
nil
end
end
defp lookup_listener_by_id(listeners, listener_id) when is_list(listeners) do
case Enum.find(listeners, fn
{id, _listener} -> id == listener_id
_other -> false
end) do
{^listener_id, listener} when is_map(listener) ->
Map.put_new(listener, :id, listener_id)
{^listener_id, listener} when is_list(listener) ->
listener |> Map.new() |> Map.put_new(:id, listener_id)
_other ->
nil
end
end
defp lookup_listener_by_id(_listeners, _listener_id), do: nil
end

View File

@@ -0,0 +1,68 @@
defmodule Parrhesia.Policy.ConnectionPolicy do
@moduledoc """
Connection/session-level policy checks shared by websocket and management entrypoints.
"""
alias Parrhesia.Storage
@spec authorize_remote_ip(tuple() | String.t() | nil) :: :ok | {:error, :ip_blocked}
def authorize_remote_ip(remote_ip) do
case normalize_ip(remote_ip) do
nil ->
:ok
normalized_ip ->
case Storage.moderation().ip_blocked?(%{}, normalized_ip) do
{:ok, true} -> {:error, :ip_blocked}
_other -> :ok
end
end
end
@spec authorize_authenticated_pubkey(String.t()) :: :ok | {:error, :pubkey_not_allowed}
def authorize_authenticated_pubkey(pubkey) when is_binary(pubkey) do
if allowlist_active?() do
case Storage.moderation().pubkey_allowed?(%{}, pubkey) do
{:ok, true} -> :ok
_other -> {:error, :pubkey_not_allowed}
end
else
:ok
end
end
@spec authorize_authenticated_pubkeys(MapSet.t(String.t())) ::
:ok | {:error, :auth_required | :pubkey_not_allowed}
def authorize_authenticated_pubkeys(authenticated_pubkeys) do
if allowlist_active?() do
cond do
MapSet.size(authenticated_pubkeys) == 0 ->
{:error, :auth_required}
Enum.any?(authenticated_pubkeys, &(authorize_authenticated_pubkey(&1) == :ok)) ->
:ok
true ->
{:error, :pubkey_not_allowed}
end
else
:ok
end
end
defp allowlist_active? do
case Storage.moderation().has_allowed_pubkeys?(%{}) do
{:ok, true} -> true
_other -> false
end
end
defp normalize_ip(nil), do: nil
defp normalize_ip({_, _, _, _} = remote_ip), do: :inet.ntoa(remote_ip) |> to_string()
defp normalize_ip({_, _, _, _, _, _, _, _} = remote_ip),
do: :inet.ntoa(remote_ip) |> to_string()
defp normalize_ip(remote_ip) when is_binary(remote_ip), do: remote_ip
defp normalize_ip(_remote_ip), do: nil
end

View File

@@ -3,11 +3,17 @@ defmodule Parrhesia.Policy.EventPolicy do
Write/read policy checks for relay operations. Write/read policy checks for relay operations.
""" """
alias Parrhesia.API.ACL
alias Parrhesia.API.RequestContext
alias Parrhesia.Policy.ConnectionPolicy
alias Parrhesia.Storage alias Parrhesia.Storage
@type policy_error :: @type policy_error ::
:auth_required :auth_required
| :pubkey_not_allowed
| :restricted_giftwrap | :restricted_giftwrap
| :sync_read_not_allowed
| :sync_write_not_allowed
| :marmot_group_h_tag_required | :marmot_group_h_tag_required
| :marmot_group_h_values_exceeded | :marmot_group_h_values_exceeded
| :marmot_group_filter_window_too_wide | :marmot_group_filter_window_too_wide
@@ -33,15 +39,31 @@ defmodule Parrhesia.Policy.EventPolicy do
@spec authorize_read([map()], MapSet.t(String.t())) :: :ok | {:error, policy_error()} @spec authorize_read([map()], MapSet.t(String.t())) :: :ok | {:error, policy_error()}
def authorize_read(filters, authenticated_pubkeys) when is_list(filters) do def authorize_read(filters, authenticated_pubkeys) when is_list(filters) do
authorize_read(filters, authenticated_pubkeys, request_context(authenticated_pubkeys))
end
@spec authorize_read([map()], MapSet.t(String.t()), RequestContext.t()) ::
:ok | {:error, policy_error()}
def authorize_read(filters, authenticated_pubkeys, %RequestContext{} = context)
when is_list(filters) do
auth_required? = config_bool([:policies, :auth_required_for_reads], false) auth_required? = config_bool([:policies, :auth_required_for_reads], false)
cond do cond do
match?(
{:error, _reason},
ConnectionPolicy.authorize_authenticated_pubkeys(authenticated_pubkeys)
) ->
ConnectionPolicy.authorize_authenticated_pubkeys(authenticated_pubkeys)
auth_required? and MapSet.size(authenticated_pubkeys) == 0 -> auth_required? and MapSet.size(authenticated_pubkeys) == 0 ->
{:error, :auth_required} {:error, :auth_required}
giftwrap_restricted?(filters, authenticated_pubkeys) -> giftwrap_restricted?(filters, authenticated_pubkeys) ->
{:error, :restricted_giftwrap} {:error, :restricted_giftwrap}
match?({:error, _reason}, authorize_sync_reads(filters, context)) ->
authorize_sync_reads(filters, context)
true -> true ->
enforce_marmot_group_read_guardrails(filters) enforce_marmot_group_read_guardrails(filters)
end end
@@ -49,8 +71,17 @@ defmodule Parrhesia.Policy.EventPolicy do
@spec authorize_write(map(), MapSet.t(String.t())) :: :ok | {:error, policy_error()} @spec authorize_write(map(), MapSet.t(String.t())) :: :ok | {:error, policy_error()}
def authorize_write(event, authenticated_pubkeys) when is_map(event) do def authorize_write(event, authenticated_pubkeys) when is_map(event) do
authorize_write(event, authenticated_pubkeys, request_context(authenticated_pubkeys))
end
@spec authorize_write(map(), MapSet.t(String.t()), RequestContext.t()) ::
:ok | {:error, policy_error()}
def authorize_write(event, authenticated_pubkeys, %RequestContext{} = context)
when is_map(event) do
checks = [ checks = [
fn -> ConnectionPolicy.authorize_authenticated_pubkeys(authenticated_pubkeys) end,
fn -> maybe_require_auth_for_write(authenticated_pubkeys) end, fn -> maybe_require_auth_for_write(authenticated_pubkeys) end,
fn -> authorize_sync_write(event, context) end,
fn -> reject_if_pubkey_banned(event) end, fn -> reject_if_pubkey_banned(event) end,
fn -> reject_if_event_banned(event) end, fn -> reject_if_event_banned(event) end,
fn -> enforce_pow(event) end, fn -> enforce_pow(event) end,
@@ -69,10 +100,17 @@ defmodule Parrhesia.Policy.EventPolicy do
@spec error_message(policy_error()) :: String.t() @spec error_message(policy_error()) :: String.t()
def error_message(:auth_required), do: "auth-required: authentication required" def error_message(:auth_required), do: "auth-required: authentication required"
def error_message(:pubkey_not_allowed), do: "restricted: authenticated pubkey is not allowed"
def error_message(:restricted_giftwrap), def error_message(:restricted_giftwrap),
do: "restricted: giftwrap access requires recipient authentication" do: "restricted: giftwrap access requires recipient authentication"
def error_message(:sync_read_not_allowed),
do: "restricted: sync read not allowed for authenticated pubkey"
def error_message(:sync_write_not_allowed),
do: "restricted: sync write not allowed for authenticated pubkey"
def error_message(:marmot_group_h_tag_required), def error_message(:marmot_group_h_tag_required),
do: "restricted: kind 445 queries must include a #h tag" do: "restricted: kind 445 queries must include a #h tag"
@@ -143,6 +181,19 @@ defmodule Parrhesia.Policy.EventPolicy do
end end
end end
defp authorize_sync_reads(filters, %RequestContext{} = context) do
Enum.reduce_while(filters, :ok, fn filter, :ok ->
case ACL.check(:sync_read, filter, context: context) do
:ok -> {:cont, :ok}
{:error, reason} -> {:halt, {:error, reason}}
end
end)
end
defp authorize_sync_write(event, %RequestContext{} = context) do
ACL.check(:sync_write, event, context: context)
end
defp giftwrap_restricted?(filters, authenticated_pubkeys) do defp giftwrap_restricted?(filters, authenticated_pubkeys) do
if MapSet.size(authenticated_pubkeys) == 0 do if MapSet.size(authenticated_pubkeys) == 0 do
any_filter_targets_giftwrap?(filters) any_filter_targets_giftwrap?(filters)
@@ -635,19 +686,29 @@ defmodule Parrhesia.Policy.EventPolicy do
_tag -> false _tag -> false
end) end)
if protected? do cond do
pubkey = Map.get(event, "pubkey") not protected? ->
:ok
cond do nip43_relay_access_kind?(Map.get(event, "kind")) ->
MapSet.size(authenticated_pubkeys) == 0 -> {:error, :protected_event_requires_auth} :ok
MapSet.member?(authenticated_pubkeys, pubkey) -> :ok
true -> {:error, :protected_event_pubkey_mismatch} true ->
end pubkey = Map.get(event, "pubkey")
else
:ok cond do
MapSet.size(authenticated_pubkeys) == 0 -> {:error, :protected_event_requires_auth}
MapSet.member?(authenticated_pubkeys, pubkey) -> :ok
true -> {:error, :protected_event_pubkey_mismatch}
end
end end
end end
defp nip43_relay_access_kind?(kind) when kind in [8_000, 8_001, 13_534, 28_934, 28_935, 28_936],
do: true
defp nip43_relay_access_kind?(_kind), do: false
defp config_bool([scope, key], default) do defp config_bool([scope, key], default) do
case Application.get_env(:parrhesia, scope, []) |> Keyword.get(key, default) do case Application.get_env(:parrhesia, scope, []) |> Keyword.get(key, default) do
true -> true true -> true
@@ -672,4 +733,8 @@ defmodule Parrhesia.Policy.EventPolicy do
default default
end end
end end
defp request_context(authenticated_pubkeys) do
%RequestContext{authenticated_pubkeys: authenticated_pubkeys}
end
end end

View File

@@ -0,0 +1,73 @@
defmodule Parrhesia.PostgresRepos do
@moduledoc false
alias Parrhesia.Config
alias Parrhesia.ReadRepo
alias Parrhesia.Repo
@spec write() :: module()
def write, do: Repo
@spec read() :: module()
def read do
if separate_read_pool_enabled?() and is_pid(Process.whereis(ReadRepo)) do
ReadRepo
else
Repo
end
end
@spec started_repos() :: [module()]
def started_repos do
cond do
not postgres_enabled?() ->
[]
separate_read_pool_enabled?() ->
[Repo, ReadRepo]
true ->
[Repo]
end
end
@spec postgres_enabled?() :: boolean()
def postgres_enabled? do
case Process.whereis(Config) do
pid when is_pid(pid) ->
Config.get([:storage, :backend], storage_backend_default()) == :postgres
nil ->
storage_backend_default() == :postgres
end
end
@spec separate_read_pool_enabled?() :: boolean()
def separate_read_pool_enabled? do
case {postgres_enabled?(), Process.whereis(Config)} do
{false, _pid} ->
false
{true, pid} when is_pid(pid) ->
Config.get(
[:database, :separate_read_pool?],
application_default(:separate_read_pool?, false)
)
{true, nil} ->
application_default(:separate_read_pool?, false)
end
end
defp application_default(key, default) do
:parrhesia
|> Application.get_env(:database, [])
|> Keyword.get(key, default)
end
defp storage_backend_default do
:parrhesia
|> Application.get_env(:storage, [])
|> Keyword.get(:backend, :postgres)
end
end

View File

@@ -1 +1,4 @@
Postgrex.Types.define(Parrhesia.PostgresTypes, [], json: JSON) Postgrex.Types.define(Parrhesia.PostgresTypes, [],
json: JSON,
moduledoc: "Custom Postgrex type definitions used by `Parrhesia.Repo` and `Parrhesia.ReadRepo`."
)

View File

@@ -1,6 +1,15 @@
defmodule Parrhesia.Protocol do defmodule Parrhesia.Protocol do
@moduledoc """ @moduledoc """
Nostr protocol message decode/encode helpers. Nostr protocol message decode/encode helpers.
This module is transport-oriented: it turns websocket payloads into structured tuples and
back again.
For programmatic API calls inside the application, prefer the `Parrhesia.API.*` modules.
In particular:
- `validate_event/1` returns user-facing error strings
- `Parrhesia.API.Auth.validate_event/1` returns machine-friendly validator atoms
""" """
alias Parrhesia.Protocol.EventValidator alias Parrhesia.Protocol.EventValidator
@@ -14,8 +23,8 @@ defmodule Parrhesia.Protocol do
| {:close, String.t()} | {:close, String.t()}
| {:auth, event()} | {:auth, event()}
| {:count, String.t(), [filter()], map()} | {:count, String.t(), [filter()], map()}
| {:neg_open, String.t(), map()} | {:neg_open, String.t(), filter(), binary()}
| {:neg_msg, String.t(), map()} | {:neg_msg, String.t(), binary()}
| {:neg_close, String.t()} | {:neg_close, String.t()}
@type relay_message :: @type relay_message ::
@@ -26,7 +35,8 @@ defmodule Parrhesia.Protocol do
| {:event, String.t(), event()} | {:event, String.t(), event()}
| {:auth, String.t()} | {:auth, String.t()}
| {:count, String.t(), map()} | {:count, String.t(), map()}
| {:neg_msg, String.t(), map()} | {:neg_msg, String.t(), String.t()}
| {:neg_err, String.t(), String.t()}
@type decode_error :: @type decode_error ::
:invalid_json :invalid_json
@@ -40,6 +50,9 @@ defmodule Parrhesia.Protocol do
@count_options_keys MapSet.new(["hll", "approximate"]) @count_options_keys MapSet.new(["hll", "approximate"])
@doc """
Decodes a client websocket payload into a structured protocol tuple.
"""
@spec decode_client(binary()) :: {:ok, client_message()} | {:error, decode_error()} @spec decode_client(binary()) :: {:ok, client_message()} | {:error, decode_error()}
def decode_client(payload) when is_binary(payload) do def decode_client(payload) when is_binary(payload) do
with {:ok, decoded} <- decode_json(payload) do with {:ok, decoded} <- decode_json(payload) do
@@ -47,6 +60,9 @@ defmodule Parrhesia.Protocol do
end end
end end
@doc """
Validates an event and returns relay-facing error strings.
"""
@spec validate_event(event()) :: :ok | {:error, String.t()} @spec validate_event(event()) :: :ok | {:error, String.t()}
def validate_event(event) do def validate_event(event) do
case EventValidator.validate(event) do case EventValidator.validate(event) do
@@ -55,6 +71,9 @@ defmodule Parrhesia.Protocol do
end end
end end
@doc """
Encodes a relay message tuple into the JSON frame sent to clients.
"""
@spec encode_relay(relay_message()) :: binary() @spec encode_relay(relay_message()) :: binary()
def encode_relay(message) do def encode_relay(message) do
message message
@@ -62,6 +81,9 @@ defmodule Parrhesia.Protocol do
|> JSON.encode!() |> JSON.encode!()
end end
@doc """
Converts a decode error into the relay notice string that should be sent to a client.
"""
@spec decode_error_notice(decode_error()) :: String.t() @spec decode_error_notice(decode_error()) :: String.t()
def decode_error_notice(reason) do def decode_error_notice(reason) do
case reason do case reason do
@@ -122,21 +144,25 @@ defmodule Parrhesia.Protocol do
defp decode_message(["AUTH", _invalid]), do: {:error, :invalid_auth} defp decode_message(["AUTH", _invalid]), do: {:error, :invalid_auth}
defp decode_message(["NEG-OPEN", subscription_id, payload]) defp decode_message(["NEG-OPEN", subscription_id, filter, initial_message])
when is_binary(subscription_id) and is_map(payload) do when is_binary(subscription_id) and is_map(filter) and is_binary(initial_message) do
if valid_subscription_id?(subscription_id) do with true <- valid_subscription_id?(subscription_id),
{:ok, {:neg_open, subscription_id, payload}} {:ok, decoded_message} <- decode_negentropy_hex(initial_message) do
{:ok, {:neg_open, subscription_id, filter, decoded_message}}
else else
{:error, :invalid_subscription_id} false -> {:error, :invalid_subscription_id}
{:error, _reason} -> {:error, :invalid_negentropy}
end end
end end
defp decode_message(["NEG-MSG", subscription_id, payload]) defp decode_message(["NEG-MSG", subscription_id, payload])
when is_binary(subscription_id) and is_map(payload) do when is_binary(subscription_id) and is_binary(payload) do
if valid_subscription_id?(subscription_id) do with true <- valid_subscription_id?(subscription_id),
{:ok, {:neg_msg, subscription_id, payload}} {:ok, decoded_payload} <- decode_negentropy_hex(payload) do
{:ok, {:neg_msg, subscription_id, decoded_payload}}
else else
{:error, :invalid_subscription_id} false -> {:error, :invalid_subscription_id}
{:error, _reason} -> {:error, :invalid_negentropy}
end end
end end
@@ -215,7 +241,19 @@ defmodule Parrhesia.Protocol do
defp relay_frame({:neg_msg, subscription_id, payload}), defp relay_frame({:neg_msg, subscription_id, payload}),
do: ["NEG-MSG", subscription_id, payload] do: ["NEG-MSG", subscription_id, payload]
defp relay_frame({:neg_err, subscription_id, reason}),
do: ["NEG-ERR", subscription_id, reason]
defp valid_subscription_id?(subscription_id) do defp valid_subscription_id?(subscription_id) do
subscription_id != "" and String.length(subscription_id) <= 64 subscription_id != "" and String.length(subscription_id) <= 64
end end
defp decode_negentropy_hex(payload) when is_binary(payload) and payload != "" do
case Base.decode16(payload, case: :mixed) do
{:ok, decoded} when decoded != <<>> -> {:ok, decoded}
_other -> {:error, :invalid_negentropy}
end
end
defp decode_negentropy_hex(_payload), do: {:error, :invalid_negentropy}
end end

View File

@@ -6,6 +6,14 @@ defmodule Parrhesia.Protocol.EventValidator do
@required_fields ~w[id pubkey created_at kind tags content sig] @required_fields ~w[id pubkey created_at kind tags content sig]
@max_kind 65_535 @max_kind 65_535
@default_max_event_future_skew_seconds 900 @default_max_event_future_skew_seconds 900
@default_max_tags_per_event 256
@default_nip43_request_max_age_seconds 300
@verify_event_signatures_locked Application.compile_env(
:parrhesia,
[:features, :verify_event_signatures_locked?],
false
)
@supported_mls_ciphersuites MapSet.new(~w[0x0001 0x0002 0x0003 0x0004 0x0005 0x0006 0x0007]) @supported_mls_ciphersuites MapSet.new(~w[0x0001 0x0002 0x0003 0x0004 0x0005 0x0006 0x0007])
@required_mls_extensions MapSet.new(["0xf2ee", "0x000a"]) @required_mls_extensions MapSet.new(["0xf2ee", "0x000a"])
@supported_keypackage_ref_sizes [32, 48, 64] @supported_keypackage_ref_sizes [32, 48, 64]
@@ -17,6 +25,7 @@ defmodule Parrhesia.Protocol.EventValidator do
| :invalid_created_at | :invalid_created_at
| :created_at_too_far_in_future | :created_at_too_far_in_future
| :invalid_kind | :invalid_kind
| :too_many_tags
| :invalid_tags | :invalid_tags
| :invalid_content | :invalid_content
| :invalid_sig | :invalid_sig
@@ -44,6 +53,22 @@ defmodule Parrhesia.Protocol.EventValidator do
| :missing_marmot_group_tag | :missing_marmot_group_tag
| :invalid_marmot_group_tag | :invalid_marmot_group_tag
| :invalid_marmot_group_content | :invalid_marmot_group_content
| :missing_nip66_d_tag
| :invalid_nip66_d_tag
| :invalid_nip66_discovery_tag
| :missing_nip66_frequency_tag
| :invalid_nip66_frequency_tag
| :invalid_nip66_timeout_tag
| :invalid_nip66_check_tag
| :missing_nip43_protected_tag
| :missing_nip43_claim_tag
| :invalid_nip43_claim_tag
| :missing_nip43_member_tag
| :invalid_nip43_member_tag
| :missing_nip43_pubkey_tag
| :invalid_nip43_pubkey_tag
| :stale_nip43_join_request
| :stale_nip43_leave_request
@spec validate(map()) :: :ok | {:error, error_reason()} @spec validate(map()) :: :ok | {:error, error_reason()}
def validate(event) when is_map(event) do def validate(event) when is_map(event) do
@@ -87,6 +112,7 @@ defmodule Parrhesia.Protocol.EventValidator do
created_at_too_far_in_future: created_at_too_far_in_future:
"invalid: event creation date is too far off from the current time", "invalid: event creation date is too far off from the current time",
invalid_kind: "invalid: kind must be an integer between 0 and 65535", invalid_kind: "invalid: kind must be an integer between 0 and 65535",
too_many_tags: "invalid: event tags exceed configured limit",
invalid_tags: "invalid: tags must be an array of non-empty string arrays", invalid_tags: "invalid: tags must be an array of non-empty string arrays",
invalid_content: "invalid: content must be a string", invalid_content: "invalid: content must be a string",
invalid_sig: "invalid: sig must be 64-byte lowercase hex", invalid_sig: "invalid: sig must be 64-byte lowercase hex",
@@ -127,7 +153,35 @@ defmodule Parrhesia.Protocol.EventValidator do
missing_marmot_group_tag: "invalid: kind 445 must include at least one h tag with a group id", missing_marmot_group_tag: "invalid: kind 445 must include at least one h tag with a group id",
invalid_marmot_group_tag: invalid_marmot_group_tag:
"invalid: kind 445 h tags must contain 32-byte lowercase hex group ids", "invalid: kind 445 h tags must contain 32-byte lowercase hex group ids",
invalid_marmot_group_content: "invalid: kind 445 content must be non-empty base64" invalid_marmot_group_content: "invalid: kind 445 content must be non-empty base64",
missing_nip66_d_tag:
"invalid: kind 30166 must include a single [\"d\", <normalized ws/wss url or relay pubkey>] tag",
invalid_nip66_d_tag:
"invalid: kind 30166 must include a single [\"d\", <normalized ws/wss url or relay pubkey>] tag",
invalid_nip66_discovery_tag: "invalid: kind 30166 includes malformed NIP-66 discovery tags",
missing_nip66_frequency_tag:
"invalid: kind 10166 must include a single [\"frequency\", <seconds>] tag",
invalid_nip66_frequency_tag:
"invalid: kind 10166 must include a single [\"frequency\", <seconds>] tag",
invalid_nip66_timeout_tag:
"invalid: kind 10166 timeout tags must be [\"timeout\", <check>, <ms>]",
invalid_nip66_check_tag: "invalid: kind 10166 c tags must contain lowercase check names",
missing_nip43_protected_tag:
"invalid: NIP-43 events must include a NIP-70 protected [\"-\"] tag",
missing_nip43_claim_tag:
"invalid: kinds 28934 and 28935 must include a single [\"claim\", <invite code>] tag",
invalid_nip43_claim_tag:
"invalid: kinds 28934 and 28935 must include a single [\"claim\", <invite code>] tag",
missing_nip43_member_tag:
"invalid: kind 13534 must include at least one [\"member\", <hex pubkey>] tag",
invalid_nip43_member_tag:
"invalid: kind 13534 member tags must contain lowercase hex pubkeys",
missing_nip43_pubkey_tag:
"invalid: kinds 8000 and 8001 must include a single [\"p\", <hex pubkey>] tag",
invalid_nip43_pubkey_tag:
"invalid: kinds 8000 and 8001 must include a single [\"p\", <hex pubkey>] tag",
stale_nip43_join_request: "invalid: kind 28934 created_at must be recent",
stale_nip43_leave_request: "invalid: kind 28936 created_at must be recent"
} }
@spec error_message(error_reason()) :: String.t() @spec error_message(error_reason()) :: String.t()
@@ -169,16 +223,25 @@ defmodule Parrhesia.Protocol.EventValidator do
defp validate_kind(kind) when is_integer(kind) and kind >= 0 and kind <= @max_kind, do: :ok defp validate_kind(kind) when is_integer(kind) and kind >= 0 and kind <= @max_kind, do: :ok
defp validate_kind(_kind), do: {:error, :invalid_kind} defp validate_kind(_kind), do: {:error, :invalid_kind}
defp validate_tags(tags) when is_list(tags) do defp validate_tags(tags) when is_list(tags), do: validate_tags(tags, max_tags_per_event(), 0)
if Enum.all?(tags, &valid_tag?/1) do
:ok
else
{:error, :invalid_tags}
end
end
defp validate_tags(_tags), do: {:error, :invalid_tags} defp validate_tags(_tags), do: {:error, :invalid_tags}
defp validate_tags([], _max_tags, _count), do: :ok
defp validate_tags([tag | rest], max_tags, count) do
cond do
count + 1 > max_tags ->
{:error, :too_many_tags}
valid_tag?(tag) ->
validate_tags(rest, max_tags, count + 1)
true ->
{:error, :invalid_tags}
end
end
defp validate_content(content) when is_binary(content), do: :ok defp validate_content(content) when is_binary(content), do: :ok
defp validate_content(_content), do: {:error, :invalid_content} defp validate_content(_content), do: {:error, :invalid_content}
@@ -197,7 +260,7 @@ defmodule Parrhesia.Protocol.EventValidator do
end end
defp validate_signature(event) do defp validate_signature(event) do
if verify_event_signatures?() do if @verify_event_signatures_locked or verify_event_signatures?() do
verify_signature(event) verify_signature(event)
else else
:ok :ok
@@ -240,6 +303,27 @@ defmodule Parrhesia.Protocol.EventValidator do
defp validate_kind_specific(%{"kind" => 1059} = event), defp validate_kind_specific(%{"kind" => 1059} = event),
do: validate_giftwrap_event(event) do: validate_giftwrap_event(event)
defp validate_kind_specific(%{"kind" => 30_166} = event),
do: validate_nip66_discovery_event(event)
defp validate_kind_specific(%{"kind" => 10_166} = event),
do: validate_nip66_monitor_announcement(event)
defp validate_kind_specific(%{"kind" => 13_534} = event),
do: validate_nip43_membership_list(event)
defp validate_kind_specific(%{"kind" => kind} = event) when kind in [8_000, 8_001],
do: validate_nip43_membership_delta(event)
defp validate_kind_specific(%{"kind" => 28_934} = event),
do: validate_nip43_join_request(event)
defp validate_kind_specific(%{"kind" => 28_935} = event),
do: validate_nip43_invite_response(event)
defp validate_kind_specific(%{"kind" => 28_936} = event),
do: validate_nip43_leave_request(event)
defp validate_kind_specific(_event), do: :ok defp validate_kind_specific(_event), do: :ok
defp validate_marmot_keypackage_event(event) do defp validate_marmot_keypackage_event(event) do
@@ -313,6 +397,184 @@ defmodule Parrhesia.Protocol.EventValidator do
end end
end end
defp validate_nip66_discovery_event(event) do
tags = Map.get(event, "tags", [])
with :ok <- validate_nip66_d_tag(tags),
:ok <-
validate_optional_single_string_tag_with_predicate(
tags,
"n",
:invalid_nip66_discovery_tag,
&(&1 in ["clearnet", "tor", "i2p", "loki"])
),
:ok <-
validate_optional_single_string_tag_with_predicate(
tags,
"T",
:invalid_nip66_discovery_tag,
&valid_pascal_case?/1
),
:ok <-
validate_optional_single_string_tag_with_predicate(
tags,
"g",
:invalid_nip66_discovery_tag,
&non_empty_string?/1
),
:ok <-
validate_optional_repeated_tag(
tags,
"N",
&positive_integer_string?/1,
:invalid_nip66_discovery_tag
),
:ok <-
validate_optional_repeated_tag(
tags,
"R",
&valid_nip66_requirement_value?/1,
:invalid_nip66_discovery_tag
),
:ok <-
validate_optional_repeated_tag(
tags,
"k",
&valid_nip66_kind_value?/1,
:invalid_nip66_discovery_tag
),
:ok <-
validate_optional_repeated_tag(
tags,
"t",
&non_empty_string?/1,
:invalid_nip66_discovery_tag
),
:ok <-
validate_optional_single_string_tag_with_predicate(
tags,
"rtt-open",
:invalid_nip66_discovery_tag,
&positive_integer_string?/1
),
:ok <-
validate_optional_single_string_tag_with_predicate(
tags,
"rtt-read",
:invalid_nip66_discovery_tag,
&positive_integer_string?/1
) do
validate_optional_single_string_tag_with_predicate(
tags,
"rtt-write",
:invalid_nip66_discovery_tag,
&positive_integer_string?/1
)
end
end
defp validate_nip66_monitor_announcement(event) do
tags = Map.get(event, "tags", [])
with :ok <-
validate_single_string_tag_with_predicate(
tags,
"frequency",
:missing_nip66_frequency_tag,
:invalid_nip66_frequency_tag,
&positive_integer_string?/1
),
:ok <- validate_optional_repeated_timeout_tags(tags),
:ok <-
validate_optional_repeated_tag(
tags,
"c",
&valid_nip66_check_name?/1,
:invalid_nip66_check_tag
) do
validate_optional_single_string_tag_with_predicate(
tags,
"g",
:invalid_nip66_discovery_tag,
&non_empty_string?/1
)
end
end
defp validate_nip43_membership_list(event) do
tags = Map.get(event, "tags", [])
case validate_protected_tag(tags) do
:ok -> validate_optional_repeated_pubkey_tag(tags, "member", :invalid_nip43_member_tag)
{:error, _reason} = error -> error
end
end
defp validate_nip43_membership_delta(event) do
tags = Map.get(event, "tags", [])
case validate_protected_tag(tags) do
:ok ->
validate_single_pubkey_tag(
tags,
"p",
:missing_nip43_pubkey_tag,
:invalid_nip43_pubkey_tag
)
{:error, _reason} = error ->
error
end
end
defp validate_nip43_join_request(event) do
tags = Map.get(event, "tags", [])
case validate_protected_tag(tags) do
:ok ->
with :ok <-
validate_single_string_tag_with_predicate(
tags,
"claim",
:missing_nip43_claim_tag,
:invalid_nip43_claim_tag,
&non_empty_string?/1
) do
validate_recent_created_at(event, :stale_nip43_join_request)
end
{:error, _reason} = error ->
error
end
end
defp validate_nip43_invite_response(event) do
tags = Map.get(event, "tags", [])
case validate_protected_tag(tags) do
:ok ->
validate_single_string_tag_with_predicate(
tags,
"claim",
:missing_nip43_claim_tag,
:invalid_nip43_claim_tag,
&non_empty_string?/1
)
{:error, _reason} = error ->
error
end
end
defp validate_nip43_leave_request(event) do
tags = Map.get(event, "tags", [])
case validate_protected_tag(tags) do
:ok -> validate_recent_created_at(event, :stale_nip43_leave_request)
{:error, _reason} = error -> error
end
end
defp validate_non_empty_base64_content(event), defp validate_non_empty_base64_content(event),
do: validate_non_empty_base64_content(event, :invalid_marmot_keypackage_content) do: validate_non_empty_base64_content(event, :invalid_marmot_keypackage_content)
@@ -394,6 +656,25 @@ defmodule Parrhesia.Protocol.EventValidator do
end end
end end
defp validate_optional_single_string_tag_with_predicate(
tags,
tag_name,
invalid_error,
predicate
)
when is_function(predicate, 1) do
case Enum.filter(tags, &match_tag_name?(&1, tag_name)) do
[] ->
:ok
[[^tag_name, value]] ->
if predicate.(value), do: :ok, else: {:error, invalid_error}
_other ->
{:error, invalid_error}
end
end
defp validate_mls_extensions_tag(tags) do defp validate_mls_extensions_tag(tags) do
with {:ok, ["mls_extensions" | extensions]} <- with {:ok, ["mls_extensions" | extensions]} <-
fetch_single_tag(tags, "mls_extensions", :missing_marmot_extensions_tag), fetch_single_tag(tags, "mls_extensions", :missing_marmot_extensions_tag),
@@ -432,6 +713,89 @@ defmodule Parrhesia.Protocol.EventValidator do
end end
end end
defp validate_nip66_d_tag(tags) do
with {:ok, ["d", value]} <- fetch_single_tag(tags, "d", :missing_nip66_d_tag),
true <- valid_websocket_url?(value) or lowercase_hex?(value, 32) do
:ok
else
{:ok, _invalid_tag_shape} -> {:error, :invalid_nip66_d_tag}
false -> {:error, :invalid_nip66_d_tag}
{:error, _reason} = error -> error
end
end
defp validate_optional_repeated_timeout_tags(tags) do
timeout_tags = Enum.filter(tags, &match_tag_name?(&1, "timeout"))
if Enum.all?(timeout_tags, &valid_nip66_timeout_tag?/1) do
:ok
else
{:error, :invalid_nip66_timeout_tag}
end
end
defp validate_optional_repeated_tag(tags, tag_name, predicate, invalid_error)
when is_function(predicate, 1) do
tags
|> Enum.filter(&match_tag_name?(&1, tag_name))
|> Enum.reduce_while(:ok, fn
[^tag_name, value], :ok ->
if predicate.(value), do: {:cont, :ok}, else: {:halt, {:error, invalid_error}}
_other, :ok ->
{:halt, {:error, invalid_error}}
end)
end
defp validate_protected_tag(tags) do
if Enum.any?(tags, &match?(["-"], &1)) do
:ok
else
{:error, :missing_nip43_protected_tag}
end
end
defp validate_single_pubkey_tag(tags, tag_name, missing_error, invalid_error) do
case fetch_single_tag(tags, tag_name, missing_error) do
{:ok, [^tag_name, value]} ->
if lowercase_hex?(value, 32) do
:ok
else
{:error, invalid_error}
end
{:ok, _invalid_tag_shape} ->
{:error, invalid_error}
{:error, _reason} = error ->
error
end
end
defp validate_optional_repeated_pubkey_tag(tags, tag_name, invalid_error) do
matching_tags = Enum.filter(tags, &match_tag_name?(&1, tag_name))
if Enum.all?(matching_tags, fn
[^tag_name, pubkey | _rest] -> lowercase_hex?(pubkey, 32)
_other -> false
end) do
:ok
else
{:error, invalid_error}
end
end
defp validate_recent_created_at(%{"created_at" => created_at}, error_reason)
when is_integer(created_at) do
if created_at >= System.system_time(:second) - nip43_request_max_age_seconds() do
:ok
else
{:error, error_reason}
end
end
defp validate_recent_created_at(_event, error_reason), do: {:error, error_reason}
defp fetch_single_tag(tags, tag_name, missing_error) do defp fetch_single_tag(tags, tag_name, missing_error) do
case Enum.filter(tags, &match_tag_name?(&1, tag_name)) do case Enum.filter(tags, &match_tag_name?(&1, tag_name)) do
[tag] -> {:ok, tag} [tag] -> {:ok, tag}
@@ -488,6 +852,49 @@ defmodule Parrhesia.Protocol.EventValidator do
defp valid_websocket_url?(_url), do: false defp valid_websocket_url?(_url), do: false
defp valid_nip66_timeout_tag?(["timeout", milliseconds]),
do: positive_integer_string?(milliseconds)
defp valid_nip66_timeout_tag?(["timeout", check, milliseconds]) do
valid_nip66_check_name?(check) and positive_integer_string?(milliseconds)
end
defp valid_nip66_timeout_tag?(_tag), do: false
defp valid_nip66_requirement_value?(value) when is_binary(value) do
normalized = String.trim_leading(value, "!")
normalized in ["auth", "writes", "pow", "payment"]
end
defp valid_nip66_requirement_value?(_value), do: false
defp valid_nip66_kind_value?(<<"!", rest::binary>>), do: positive_integer_string?(rest)
defp valid_nip66_kind_value?(value), do: positive_integer_string?(value)
defp valid_nip66_check_name?(value) when is_binary(value) do
String.match?(value, ~r/^[a-z0-9-]+$/)
end
defp valid_nip66_check_name?(_value), do: false
defp valid_pascal_case?(value) when is_binary(value) do
String.match?(value, ~r/^[A-Z][A-Za-z0-9]*$/)
end
defp valid_pascal_case?(_value), do: false
defp positive_integer_string?(value) when is_binary(value) do
case Integer.parse(value) do
{integer, ""} when integer >= 0 -> true
_other -> false
end
end
defp positive_integer_string?(_value), do: false
defp non_empty_string?(value) when is_binary(value), do: value != ""
defp non_empty_string?(_value), do: false
defp valid_keypackage_ref?(value) when is_binary(value) do defp valid_keypackage_ref?(value) when is_binary(value) do
Enum.any?(@supported_keypackage_ref_sizes, &lowercase_hex?(value, &1)) Enum.any?(@supported_keypackage_ref_sizes, &lowercase_hex?(value, &1))
end end
@@ -510,4 +917,17 @@ defmodule Parrhesia.Protocol.EventValidator do
|> Application.get_env(:limits, []) |> Application.get_env(:limits, [])
|> Keyword.get(:max_event_future_skew_seconds, @default_max_event_future_skew_seconds) |> Keyword.get(:max_event_future_skew_seconds, @default_max_event_future_skew_seconds)
end end
defp max_tags_per_event do
case Application.get_env(:parrhesia, :limits, []) |> Keyword.get(:max_tags_per_event) do
value when is_integer(value) and value > 0 -> value
_other -> @default_max_tags_per_event
end
end
defp nip43_request_max_age_seconds do
:parrhesia
|> Application.get_env(:nip43, [])
|> Keyword.get(:request_max_age_seconds, @default_nip43_request_max_age_seconds)
end
end end

View File

@@ -5,6 +5,7 @@ defmodule Parrhesia.Protocol.Filter do
@max_kind 65_535 @max_kind 65_535
@default_max_filters_per_req 16 @default_max_filters_per_req 16
@default_max_tag_values_per_filter 128
@type validation_error :: @type validation_error ::
:invalid_filters :invalid_filters
@@ -19,6 +20,7 @@ defmodule Parrhesia.Protocol.Filter do
| :invalid_until | :invalid_until
| :invalid_limit | :invalid_limit
| :invalid_search | :invalid_search
| :too_many_tag_values
| :invalid_tag_filter | :invalid_tag_filter
@allowed_keys MapSet.new(["ids", "authors", "kinds", "since", "until", "limit", "search"]) @allowed_keys MapSet.new(["ids", "authors", "kinds", "since", "until", "limit", "search"])
@@ -36,6 +38,7 @@ defmodule Parrhesia.Protocol.Filter do
invalid_until: "invalid: until must be a non-negative integer", invalid_until: "invalid: until must be a non-negative integer",
invalid_limit: "invalid: limit must be a positive integer", invalid_limit: "invalid: limit must be a positive integer",
invalid_search: "invalid: search must be a non-empty string", invalid_search: "invalid: search must be a non-empty string",
too_many_tag_values: "invalid: tag filters exceed configured value limit",
invalid_tag_filter: invalid_tag_filter:
"invalid: tag filters must use #<single-letter> with non-empty string arrays" "invalid: tag filters must use #<single-letter> with non-empty string arrays"
} }
@@ -178,19 +181,33 @@ defmodule Parrhesia.Protocol.Filter do
filter filter
|> Enum.filter(fn {key, _value} -> valid_tag_filter_key?(key) end) |> Enum.filter(fn {key, _value} -> valid_tag_filter_key?(key) end)
|> Enum.reduce_while(:ok, fn {_key, values}, :ok -> |> Enum.reduce_while(:ok, fn {_key, values}, :ok ->
if valid_tag_filter_values?(values) do case validate_tag_filter_values(values) do
{:cont, :ok} :ok -> {:cont, :ok}
else {:error, reason} -> {:halt, {:error, reason}}
{:halt, {:error, :invalid_tag_filter}}
end end
end) end)
end end
defp valid_tag_filter_values?(values) when is_list(values) do defp validate_tag_filter_values(values) when is_list(values),
values != [] and Enum.all?(values, &is_binary/1) do: validate_tag_filter_values(values, max_tag_values_per_filter(), 0)
end
defp valid_tag_filter_values?(_values), do: false defp validate_tag_filter_values(_values), do: {:error, :invalid_tag_filter}
defp validate_tag_filter_values([], _max_values, 0), do: {:error, :invalid_tag_filter}
defp validate_tag_filter_values([], _max_values, _count), do: :ok
defp validate_tag_filter_values([value | rest], max_values, count) do
cond do
count + 1 > max_values ->
{:error, :too_many_tag_values}
is_binary(value) ->
validate_tag_filter_values(rest, max_values, count + 1)
true ->
{:error, :invalid_tag_filter}
end
end
defp filter_predicates(event, filter) do defp filter_predicates(event, filter) do
[ [
@@ -278,4 +295,12 @@ defmodule Parrhesia.Protocol.Filter do
|> Application.get_env(:limits, []) |> Application.get_env(:limits, [])
|> Keyword.get(:max_filters_per_req, @default_max_filters_per_req) |> Keyword.get(:max_filters_per_req, @default_max_filters_per_req)
end end
defp max_tag_values_per_filter do
case Application.get_env(:parrhesia, :limits, [])
|> Keyword.get(:max_tag_values_per_filter) do
value when is_integer(value) and value > 0 -> value
_other -> @default_max_tag_values_per_filter
end
end
end end

View File

@@ -0,0 +1,9 @@
defmodule Parrhesia.ReadRepo do
@moduledoc """
PostgreSQL repository dedicated to read-heavy workloads when a separate read pool is enabled.
"""
use Ecto.Repo,
otp_app: :parrhesia,
adapter: Ecto.Adapters.Postgres
end

46
lib/parrhesia/release.ex Normal file
View File

@@ -0,0 +1,46 @@
defmodule Parrhesia.Release do
@moduledoc """
Helpers for running Ecto tasks from a production release.
Intended for use from a release `eval` command where Mix is not available:
bin/parrhesia eval "Parrhesia.Release.migrate()"
bin/parrhesia eval "Parrhesia.Release.rollback(Parrhesia.Repo, 20260101000000)"
"""
@app :parrhesia
@doc """
Runs all pending Ecto migrations for every configured repo.
"""
def migrate do
load_app()
for repo <- repos() do
{:ok, _, _} =
Ecto.Migrator.with_repo(repo, fn repo ->
Ecto.Migrator.run(repo, :up, all: true)
end)
end
end
@doc """
Rolls back the given `repo` to the specified migration `version`.
"""
def rollback(repo, version) when is_atom(repo) and is_integer(version) do
load_app()
{:ok, _, _} =
Ecto.Migrator.with_repo(repo, fn repo ->
Ecto.Migrator.run(repo, :down, to: version)
end)
end
defp load_app do
Application.load(@app)
end
defp repos do
Application.fetch_env!(@app, :ecto_repos)
end
end

View File

@@ -1,6 +1,9 @@
defmodule Parrhesia.Repo do defmodule Parrhesia.Repo do
@moduledoc """ @moduledoc """
PostgreSQL repository for storage adapter persistence. PostgreSQL repository for write traffic and storage adapter persistence.
Separated from `Parrhesia.ReadRepo` so that ingest writes and read-heavy
queries use independent connection pools.
""" """
use Ecto.Repo, use Ecto.Repo,

52
lib/parrhesia/runtime.ex Normal file
View File

@@ -0,0 +1,52 @@
defmodule Parrhesia.Runtime do
@moduledoc """
Top-level Parrhesia supervisor.
In normal standalone use, the `:parrhesia` application starts this supervisor automatically.
Host applications can also embed it directly under their own supervision tree:
children = [
{Parrhesia.Runtime, name: Parrhesia.Supervisor}
]
Parrhesia currently assumes a single runtime per BEAM node and uses globally registered
process names for core services.
"""
use Supervisor
@doc """
Starts the Parrhesia runtime supervisor.
Accepts a `:name` option (defaults to `Parrhesia.Supervisor`).
"""
def start_link(opts \\ []) do
name = Keyword.get(opts, :name, Parrhesia.Supervisor)
Supervisor.start_link(__MODULE__, opts, name: name)
end
@impl true
def init(_opts) do
Supervisor.init(children(), strategy: :one_for_one)
end
@doc """
Returns the list of child specifications started by the runtime supervisor.
"""
def children do
[
Parrhesia.Telemetry,
Parrhesia.ConnectionStats,
Parrhesia.Config,
Parrhesia.Web.EventIngestLimiter,
Parrhesia.Web.IPEventIngestLimiter,
Parrhesia.Storage.Supervisor,
Parrhesia.Subscriptions.Supervisor,
Parrhesia.Auth.Supervisor,
Parrhesia.Sync.Supervisor,
Parrhesia.Policy.Supervisor,
Parrhesia.Web.Endpoint,
Parrhesia.Tasks.Supervisor
]
end
end

View File

@@ -4,24 +4,46 @@ defmodule Parrhesia.Storage do
Domain/runtime code should resolve behavior modules through this module instead of Domain/runtime code should resolve behavior modules through this module instead of
depending on concrete adapter implementations directly. depending on concrete adapter implementations directly.
Each accessor validates that the configured module is loaded and declares the expected
behaviour before returning it.
""" """
@default_modules [ @default_modules [
events: Parrhesia.Storage.Adapters.Postgres.Events, events: Parrhesia.Storage.Adapters.Postgres.Events,
acl: Parrhesia.Storage.Adapters.Postgres.ACL,
moderation: Parrhesia.Storage.Adapters.Postgres.Moderation, moderation: Parrhesia.Storage.Adapters.Postgres.Moderation,
groups: Parrhesia.Storage.Adapters.Postgres.Groups, groups: Parrhesia.Storage.Adapters.Postgres.Groups,
admin: Parrhesia.Storage.Adapters.Postgres.Admin admin: Parrhesia.Storage.Adapters.Postgres.Admin
] ]
@doc """
Returns the configured events storage module.
"""
@spec events() :: module() @spec events() :: module()
def events, do: fetch_module!(:events, Parrhesia.Storage.Events) def events, do: fetch_module!(:events, Parrhesia.Storage.Events)
@doc """
Returns the configured moderation storage module.
"""
@spec moderation() :: module() @spec moderation() :: module()
def moderation, do: fetch_module!(:moderation, Parrhesia.Storage.Moderation) def moderation, do: fetch_module!(:moderation, Parrhesia.Storage.Moderation)
@doc """
Returns the configured ACL storage module.
"""
@spec acl() :: module()
def acl, do: fetch_module!(:acl, Parrhesia.Storage.ACL)
@doc """
Returns the configured groups storage module.
"""
@spec groups() :: module() @spec groups() :: module()
def groups, do: fetch_module!(:groups, Parrhesia.Storage.Groups) def groups, do: fetch_module!(:groups, Parrhesia.Storage.Groups)
@doc """
Returns the configured admin storage module.
"""
@spec admin() :: module() @spec admin() :: module()
def admin, do: fetch_module!(:admin, Parrhesia.Storage.Admin) def admin, do: fetch_module!(:admin, Parrhesia.Storage.Admin)

View File

@@ -0,0 +1,14 @@
defmodule Parrhesia.Storage.ACL do
@moduledoc """
Storage callbacks for persisted ACL rules.
"""
@type context :: map()
@type rule :: map()
@type opts :: keyword()
@type reason :: term()
@callback put_rule(context(), rule()) :: {:ok, rule()} | {:error, reason()}
@callback delete_rule(context(), map()) :: :ok | {:error, reason()}
@callback list_rules(context(), opts()) :: {:ok, [rule()]} | {:error, reason()}
end

View File

@@ -0,0 +1,157 @@
defmodule Parrhesia.Storage.Adapters.Memory.ACL do
@moduledoc """
In-memory prototype adapter for `Parrhesia.Storage.ACL`.
"""
alias Parrhesia.Storage.Adapters.Memory.Store
@behaviour Parrhesia.Storage.ACL
@impl true
def put_rule(_context, rule) when is_map(rule) do
with {:ok, normalized_rule} <- normalize_rule(rule) do
Store.get_and_update(fn state -> put_rule_in_state(state, normalized_rule) end)
end
end
def put_rule(_context, _rule), do: {:error, :invalid_acl_rule}
@impl true
def delete_rule(_context, selector) when is_map(selector) do
case normalize_delete_selector(selector) do
{:ok, {:id, id}} ->
Store.update(fn state ->
%{state | acl_rules: Enum.reject(state.acl_rules, &(&1.id == id))}
end)
:ok
{:ok, {:exact, rule}} ->
Store.update(fn state ->
%{state | acl_rules: Enum.reject(state.acl_rules, &same_rule?(&1, rule))}
end)
:ok
{:error, reason} ->
{:error, reason}
end
end
def delete_rule(_context, _selector), do: {:error, :invalid_acl_rule}
@impl true
def list_rules(_context, opts) when is_list(opts) do
rules =
Store.get(fn state -> Enum.reverse(state.acl_rules) end)
|> Enum.filter(fn rule ->
matches_principal_type?(rule, Keyword.get(opts, :principal_type)) and
matches_principal?(rule, Keyword.get(opts, :principal)) and
matches_capability?(rule, Keyword.get(opts, :capability))
end)
{:ok, rules}
end
def list_rules(_context, _opts), do: {:error, :invalid_opts}
defp put_rule_in_state(state, normalized_rule) do
case Enum.find(state.acl_rules, &same_rule?(&1, normalized_rule)) do
nil ->
next_id = state.next_acl_rule_id
persisted_rule = Map.put(normalized_rule, :id, next_id)
{{:ok, persisted_rule},
%{
state
| acl_rules: [persisted_rule | state.acl_rules],
next_acl_rule_id: next_id + 1
}}
existing_rule ->
{{:ok, existing_rule}, state}
end
end
defp matches_principal_type?(_rule, nil), do: true
defp matches_principal_type?(rule, principal_type), do: rule.principal_type == principal_type
defp matches_principal?(_rule, nil), do: true
defp matches_principal?(rule, principal), do: rule.principal == principal
defp matches_capability?(_rule, nil), do: true
defp matches_capability?(rule, capability), do: rule.capability == capability
defp same_rule?(left, right) do
left.principal_type == right.principal_type and
left.principal == right.principal and
left.capability == right.capability and
left.match == right.match
end
defp normalize_delete_selector(%{"id" => id}), do: normalize_delete_selector(%{id: id})
defp normalize_delete_selector(%{id: id}) when is_integer(id) and id > 0,
do: {:ok, {:id, id}}
defp normalize_delete_selector(selector) do
case normalize_rule(selector) do
{:ok, rule} -> {:ok, {:exact, rule}}
{:error, reason} -> {:error, reason}
end
end
defp normalize_rule(rule) when is_map(rule) do
with {:ok, principal_type} <- normalize_principal_type(fetch(rule, :principal_type)),
{:ok, principal} <- normalize_principal(fetch(rule, :principal)),
{:ok, capability} <- normalize_capability(fetch(rule, :capability)),
{:ok, match} <- normalize_match(fetch(rule, :match)) do
{:ok,
%{
principal_type: principal_type,
principal: principal,
capability: capability,
match: match
}}
end
end
defp normalize_rule(_rule), do: {:error, :invalid_acl_rule}
defp normalize_principal_type(:pubkey), do: {:ok, :pubkey}
defp normalize_principal_type("pubkey"), do: {:ok, :pubkey}
defp normalize_principal_type(_value), do: {:error, :invalid_acl_principal_type}
defp normalize_principal(value) when is_binary(value) and byte_size(value) == 64,
do: {:ok, String.downcase(value)}
defp normalize_principal(_value), do: {:error, :invalid_acl_principal}
defp normalize_capability(:sync_read), do: {:ok, :sync_read}
defp normalize_capability(:sync_write), do: {:ok, :sync_write}
defp normalize_capability("sync_read"), do: {:ok, :sync_read}
defp normalize_capability("sync_write"), do: {:ok, :sync_write}
defp normalize_capability(_value), do: {:error, :invalid_acl_capability}
defp normalize_match(match) when is_map(match) do
normalized_match =
Enum.reduce(match, %{}, fn
{key, values}, acc when is_binary(key) ->
Map.put(acc, key, values)
{key, values}, acc when is_atom(key) ->
Map.put(acc, Atom.to_string(key), values)
_entry, acc ->
acc
end)
{:ok, normalized_match}
end
defp normalize_match(_match), do: {:error, :invalid_acl_match}
defp fetch(map, key) do
Map.get(map, key) || Map.get(map, Atom.to_string(key))
end
end

View File

@@ -6,6 +6,9 @@ defmodule Parrhesia.Storage.Adapters.Memory.Admin do
alias Parrhesia.Storage.Adapters.Memory.Store alias Parrhesia.Storage.Adapters.Memory.Store
@behaviour Parrhesia.Storage.Admin @behaviour Parrhesia.Storage.Admin
@default_limit 100
@max_limit 1_000
@max_audit_logs 1_000
@impl true @impl true
def execute(_context, method, _params) do def execute(_context, method, _params) do
@@ -17,18 +20,59 @@ defmodule Parrhesia.Storage.Adapters.Memory.Admin do
@impl true @impl true
def append_audit_log(_context, audit_entry) when is_map(audit_entry) do def append_audit_log(_context, audit_entry) when is_map(audit_entry) do
Store.update(fn state -> update_in(state.audit_logs, &[audit_entry | &1]) end) Store.update(fn state ->
update_in(state.audit_logs, fn logs ->
[audit_entry | logs] |> Enum.take(@max_audit_logs)
end)
end)
:ok :ok
end end
def append_audit_log(_context, _audit_entry), do: {:error, :invalid_audit_entry} def append_audit_log(_context, _audit_entry), do: {:error, :invalid_audit_entry}
@impl true @impl true
def list_audit_logs(_context, _opts) do def list_audit_logs(_context, opts) when is_list(opts) do
{:ok, Store.get(fn state -> Enum.reverse(state.audit_logs) end)} limit = normalize_limit(Keyword.get(opts, :limit, @default_limit))
method = normalize_method_filter(Keyword.get(opts, :method))
actor_pubkey = Keyword.get(opts, :actor_pubkey)
logs =
Store.get(fn state ->
state.audit_logs
|> Enum.filter(&matches_filters?(&1, method, actor_pubkey))
|> Enum.take(limit)
end)
{:ok, logs}
end end
def list_audit_logs(_context, _opts), do: {:error, :invalid_opts}
defp normalize_method(method) when is_binary(method), do: method defp normalize_method(method) when is_binary(method), do: method
defp normalize_method(method) when is_atom(method), do: Atom.to_string(method) defp normalize_method(method) when is_atom(method), do: Atom.to_string(method)
defp normalize_method(method), do: inspect(method) defp normalize_method(method), do: inspect(method)
defp normalize_limit(limit) when is_integer(limit) and limit > 0, do: min(limit, @max_limit)
defp normalize_limit(_limit), do: @default_limit
defp normalize_method_filter(nil), do: nil
defp normalize_method_filter(method), do: normalize_method(method)
defp matches_method?(_entry, nil), do: true
defp matches_method?(entry, method) do
normalize_method(Map.get(entry, :method) || Map.get(entry, "method")) == method
end
defp matches_actor_pubkey?(_entry, nil), do: true
defp matches_actor_pubkey?(entry, actor_pubkey) do
Map.get(entry, :actor_pubkey) == actor_pubkey or
Map.get(entry, "actor_pubkey") == actor_pubkey
end
defp matches_filters?(entry, method, actor_pubkey) do
matches_method?(entry, method) and matches_actor_pubkey?(entry, actor_pubkey)
end
end end

View File

@@ -12,53 +12,75 @@ defmodule Parrhesia.Storage.Adapters.Memory.Events do
def put_event(_context, event) do def put_event(_context, event) do
event_id = Map.fetch!(event, "id") event_id = Map.fetch!(event, "id")
result = case Store.put_event(event_id, event) do
Store.get_and_update(fn state -> :ok -> {:ok, event}
if Map.has_key?(state.events, event_id) do {:error, :duplicate_event} -> {:error, :duplicate_event}
{{:error, :duplicate_event}, state} end
else
next_state = put_in(state.events[event_id], event)
{{:ok, event}, next_state}
end
end)
result
end end
@impl true @impl true
def get_event(_context, event_id) do def get_event(_context, event_id) do
deleted? = Store.get(fn state -> MapSet.member?(state.deleted, event_id) end) case Store.get_event(event_id) do
{:ok, _event, true} -> {:ok, nil}
if deleted? do {:ok, event, false} -> {:ok, event}
{:ok, nil} :error -> {:ok, nil}
else
{:ok, Store.get(fn state -> Map.get(state.events, event_id) end)}
end end
end end
@impl true @impl true
def query(_context, filters, opts) do def query(_context, filters, opts) do
with :ok <- Filter.validate_filters(filters) do with :ok <- Filter.validate_filters(filters) do
state = Store.get(& &1)
requester_pubkeys = Keyword.get(opts, :requester_pubkeys, []) requester_pubkeys = Keyword.get(opts, :requester_pubkeys, [])
events = events =
state.events filters
|> Map.values() |> Enum.flat_map(&matching_events_for_filter(&1, requester_pubkeys, opts))
|> Enum.filter(fn event -> |> deduplicate_events()
not MapSet.member?(state.deleted, event["id"]) and |> sort_events()
Filter.matches_any?(event, filters) and |> maybe_apply_query_limit(opts)
giftwrap_visible_to_requester?(event, requester_pubkeys)
end)
{:ok, events} {:ok, events}
end end
end end
@impl true @impl true
def count(context, filters, opts) do def query_event_refs(_context, filters, opts) do
with {:ok, events} <- query(context, filters, opts) do with :ok <- Filter.validate_filters(filters) do
{:ok, length(events)} requester_pubkeys = Keyword.get(opts, :requester_pubkeys, [])
query_opts = Keyword.put(opts, :apply_filter_limits?, false)
{_, refs} =
reduce_unique_matching_events(
filters,
requester_pubkeys,
query_opts,
{MapSet.new(), []},
&append_unique_event_ref/2
)
refs =
refs |> Enum.sort(&(compare_event_refs(&1, &2) != :gt)) |> maybe_limit_event_refs(opts)
{:ok, refs}
end
end
@impl true
def count(_context, filters, opts) do
with :ok <- Filter.validate_filters(filters) do
requester_pubkeys = Keyword.get(opts, :requester_pubkeys, [])
query_opts = Keyword.put(opts, :apply_filter_limits?, false)
{_seen_ids, count} =
reduce_unique_matching_events(
filters,
requester_pubkeys,
query_opts,
{MapSet.new(), 0},
&count_unique_event/2
)
{:ok, count}
end end
end end
@@ -89,22 +111,14 @@ defmodule Parrhesia.Storage.Adapters.Memory.Events do
end) end)
coordinate_delete_ids = coordinate_delete_ids =
Store.get(fn state -> delete_coordinates
state.events |> coordinate_delete_candidates(deleter_pubkey)
|> Map.values() |> Enum.filter(&matches_delete_coordinate?(&1, delete_coordinates, deleter_pubkey))
|> Enum.filter(fn candidate -> |> Enum.map(& &1["id"])
matches_delete_coordinate?(candidate, delete_coordinates, deleter_pubkey)
end)
|> Enum.map(& &1["id"])
end)
all_delete_ids = Enum.uniq(delete_event_ids ++ coordinate_delete_ids) all_delete_ids = Enum.uniq(delete_event_ids ++ coordinate_delete_ids)
Store.update(fn state -> Enum.each(all_delete_ids, &Store.mark_deleted/1)
Enum.reduce(all_delete_ids, state, fn event_id, acc ->
update_in(acc.deleted, &MapSet.put(&1, event_id))
end)
end)
{:ok, length(all_delete_ids)} {:ok, length(all_delete_ids)}
end end
@@ -114,18 +128,11 @@ defmodule Parrhesia.Storage.Adapters.Memory.Events do
pubkey = Map.get(event, "pubkey") pubkey = Map.get(event, "pubkey")
deleted_ids = deleted_ids =
Store.get(fn state -> pubkey
state.events |> vanish_candidates(Map.get(event, "created_at"))
|> Map.values() |> Enum.map(& &1["id"])
|> Enum.filter(fn candidate -> candidate["pubkey"] == pubkey end)
|> Enum.map(& &1["id"])
end)
Store.update(fn state -> Enum.each(deleted_ids, &Store.mark_deleted/1)
Enum.reduce(deleted_ids, state, fn event_id, acc ->
update_in(acc.deleted, &MapSet.put(&1, event_id))
end)
end)
{:ok, length(deleted_ids)} {:ok, length(deleted_ids)}
end end
@@ -189,4 +196,328 @@ defmodule Parrhesia.Storage.Adapters.Memory.Events do
_tag -> false _tag -> false
end) end)
end end
defp compare_event_refs(left, right) do
cond do
left.created_at < right.created_at -> :lt
left.created_at > right.created_at -> :gt
left.id < right.id -> :lt
left.id > right.id -> :gt
true -> :eq
end
end
defp maybe_limit_event_refs(refs, opts) do
case Keyword.get(opts, :limit) do
limit when is_integer(limit) and limit > 0 -> Enum.take(refs, limit)
_other -> refs
end
end
defp matching_events_for_filter(filter, requester_pubkeys, opts) do
cond do
Map.has_key?(filter, "ids") ->
direct_id_lookup_events(filter, requester_pubkeys, opts)
indexed_candidate_spec(filter) != nil ->
indexed_tag_lookup_events(filter, requester_pubkeys, opts)
true ->
scan_filter_matches(filter, requester_pubkeys, opts)
end
end
defp direct_id_lookup_events(filter, requester_pubkeys, opts) do
filter
|> Map.get("ids", [])
|> Enum.reduce([], fn event_id, acc ->
maybe_prepend_direct_lookup_match(acc, event_id, filter, requester_pubkeys)
end)
|> deduplicate_events()
|> sort_events()
|> maybe_take_filter_limit(filter, opts)
end
defp scan_filter_matches(filter, requester_pubkeys, opts) do
limit =
if Keyword.get(opts, :apply_filter_limits?, true) do
effective_filter_limit(filter, opts)
else
nil
end
{matches, _count} =
Store.reduce_events_newest(
{[], 0},
&reduce_scan_match(&1, &2, filter, requester_pubkeys, limit)
)
matches
|> Enum.reverse()
|> sort_events()
end
defp indexed_tag_lookup_events(filter, requester_pubkeys, opts) do
filter
|> indexed_candidate_events()
|> Enum.filter(&filter_match_visible?(&1, filter, requester_pubkeys))
|> maybe_take_filter_limit(filter, opts)
end
defp indexed_tag_filter(filter) do
filter
|> Enum.filter(fn
{"#" <> _tag_name, values} when is_list(values) -> values != []
_entry -> false
end)
|> Enum.sort_by(fn {key, _values} -> key end)
|> List.first()
|> case do
{"#" <> tag_name, values} -> {tag_name, values}
nil -> nil
end
end
defp indexed_candidate_spec(filter) do
authors = Map.get(filter, "authors")
kinds = Map.get(filter, "kinds")
tag_filter = indexed_tag_filter(filter)
cond do
is_tuple(tag_filter) ->
{tag_name, tag_values} = tag_filter
{:tag, tag_name, effective_indexed_tag_values(filter, tag_values)}
is_list(authors) and is_list(kinds) ->
{:pubkey_kind, authors, kinds}
is_list(authors) ->
{:pubkey, authors}
is_list(kinds) ->
{:kind, kinds}
true ->
nil
end
end
defp indexed_candidate_events(filter) do
case indexed_candidate_spec(filter) do
{:tag, tag_name, tag_values} ->
Store.tagged_events(tag_name, tag_values)
{:pubkey_kind, authors, kinds} ->
Store.events_by_pubkeys_and_kinds(authors, kinds)
{:pubkey, authors} ->
Store.events_by_pubkeys(authors)
{:kind, kinds} ->
Store.events_by_kinds(kinds)
nil ->
[]
end
end
defp effective_indexed_tag_values(filter, tag_values) do
case Map.get(filter, "limit") do
limit when is_integer(limit) and limit == 1 ->
Enum.take(tag_values, 1)
_other ->
tag_values
end
end
defp filter_match_visible?(event, filter, requester_pubkeys) do
Filter.matches_filter?(event, filter) and
giftwrap_visible_to_requester?(event, requester_pubkeys)
end
defp maybe_prepend_direct_lookup_match(acc, event_id, filter, requester_pubkeys) do
case Store.get_event(event_id) do
{:ok, event, false} ->
if filter_match_visible?(event, filter, requester_pubkeys) do
[event | acc]
else
acc
end
_other ->
acc
end
end
defp reduce_scan_match(event, {acc, count}, filter, requester_pubkeys, limit) do
if filter_match_visible?(event, filter, requester_pubkeys) do
maybe_halt_scan([event | acc], count + 1, limit)
else
{acc, count}
end
end
defp maybe_halt_scan(acc, count, limit) when is_integer(limit) and count >= limit do
{:halt, {acc, count}}
end
defp maybe_halt_scan(acc, count, _limit), do: {acc, count}
defp reduce_unique_matching_events(filters, requester_pubkeys, opts, acc, reducer) do
Enum.reduce(filters, acc, fn filter, current_acc ->
reduce_matching_events_for_filter(filter, requester_pubkeys, opts, current_acc, reducer)
end)
end
defp reduce_matching_events_for_filter(filter, requester_pubkeys, _opts, acc, reducer) do
cond do
Map.has_key?(filter, "ids") ->
filter
|> Map.get("ids", [])
|> Enum.reduce(acc, &reduce_event_id_match(&1, filter, requester_pubkeys, &2, reducer))
indexed_candidate_spec(filter) != nil ->
filter
|> indexed_candidate_events()
|> Enum.reduce(
acc,
&maybe_reduce_visible_event(&1, filter, requester_pubkeys, &2, reducer)
)
true ->
Store.reduce_events_newest(
acc,
&maybe_reduce_visible_event(&1, filter, requester_pubkeys, &2, reducer)
)
end
end
defp coordinate_delete_candidates(delete_coordinates, deleter_pubkey) do
delete_coordinates
|> Enum.flat_map(fn coordinate ->
cond do
coordinate.pubkey != deleter_pubkey ->
[]
addressable_kind?(coordinate.kind) ->
Store.events_by_addresses([{coordinate.kind, deleter_pubkey, coordinate.d_tag}])
replaceable_kind?(coordinate.kind) ->
Store.events_by_pubkeys_and_kinds([deleter_pubkey], [coordinate.kind])
true ->
[]
end
end)
|> deduplicate_events()
end
defp vanish_candidates(pubkey, created_at) do
own_events =
Store.events_by_pubkeys([pubkey])
|> Enum.filter(&(&1["created_at"] <= created_at))
giftwrap_events =
Store.tagged_events("p", [pubkey])
|> Enum.filter(&(&1["kind"] == 1059 and &1["created_at"] <= created_at))
deduplicate_events(own_events ++ giftwrap_events)
end
defp event_ref(event) do
%{
created_at: Map.fetch!(event, "created_at"),
id: Base.decode16!(Map.fetch!(event, "id"), case: :mixed)
}
end
defp append_unique_event_ref(event, {seen_ids, acc}) do
reduce_unique_event(event, {seen_ids, acc}, fn _event_id, next_seen_ids ->
{next_seen_ids, [event_ref(event) | acc]}
end)
end
defp count_unique_event(event, {seen_ids, acc}) do
reduce_unique_event(event, {seen_ids, acc}, fn _event_id, next_seen_ids ->
{next_seen_ids, acc + 1}
end)
end
defp reduce_unique_event(event, {seen_ids, acc}, fun) do
event_id = Map.fetch!(event, "id")
if MapSet.member?(seen_ids, event_id) do
{seen_ids, acc}
else
fun.(event_id, MapSet.put(seen_ids, event_id))
end
end
defp maybe_reduce_visible_event(event, filter, requester_pubkeys, acc, reducer) do
if filter_match_visible?(event, filter, requester_pubkeys) do
reducer.(event, acc)
else
acc
end
end
defp reduce_event_id_match(event_id, filter, requester_pubkeys, acc, reducer) do
case Store.get_event(event_id) do
{:ok, event, false} ->
maybe_reduce_visible_event(event, filter, requester_pubkeys, acc, reducer)
_other ->
acc
end
end
defp deduplicate_events(events) do
events
|> Enum.reduce(%{}, fn event, acc -> Map.put(acc, event["id"], event) end)
|> Map.values()
end
defp sort_events(events) do
Enum.sort(events, &chronological_sorter/2)
end
defp chronological_sorter(left, right) do
cond do
left["created_at"] > right["created_at"] -> true
left["created_at"] < right["created_at"] -> false
true -> left["id"] < right["id"]
end
end
defp maybe_apply_query_limit(events, opts) do
case Keyword.get(opts, :limit) do
limit when is_integer(limit) and limit > 0 -> Enum.take(events, limit)
_other -> events
end
end
defp maybe_take_filter_limit(events, filter, opts) do
case effective_filter_limit(filter, opts) do
limit when is_integer(limit) and limit > 0 -> Enum.take(events, limit)
_other -> events
end
end
defp effective_filter_limit(filter, opts) do
max_filter_limit = Keyword.get(opts, :max_filter_limit)
case Map.get(filter, "limit") do
limit
when is_integer(limit) and limit > 0 and is_integer(max_filter_limit) and
max_filter_limit > 0 ->
min(limit, max_filter_limit)
limit when is_integer(limit) and limit > 0 ->
limit
_other ->
nil
end
end
end end

View File

@@ -33,6 +33,11 @@ defmodule Parrhesia.Storage.Adapters.Memory.Moderation do
{:ok, Store.get(fn state -> MapSet.member?(state.allowed_pubkeys, pubkey) end)} {:ok, Store.get(fn state -> MapSet.member?(state.allowed_pubkeys, pubkey) end)}
end end
@impl true
def has_allowed_pubkeys?(_context) do
{:ok, Store.get(fn state -> MapSet.size(state.allowed_pubkeys) > 0 end)}
end
@impl true @impl true
def ban_event(_context, event_id), do: update_ban_set(:events, event_id, :add) def ban_event(_context, event_id), do: update_ban_set(:events, event_id, :add)

View File

@@ -4,45 +4,430 @@ defmodule Parrhesia.Storage.Adapters.Memory.Store do
use Agent use Agent
@name __MODULE__ @name __MODULE__
@events_table :parrhesia_memory_events
@events_by_time_table :parrhesia_memory_events_by_time
@events_by_tag_table :parrhesia_memory_events_by_tag
@events_by_pubkey_table :parrhesia_memory_events_by_pubkey
@events_by_kind_table :parrhesia_memory_events_by_kind
@events_by_pubkey_kind_table :parrhesia_memory_events_by_pubkey_kind
@events_by_address_table :parrhesia_memory_events_by_address
@initial_state %{ @initial_state %{
events: %{},
deleted: MapSet.new(),
bans: %{pubkeys: MapSet.new(), events: MapSet.new(), ips: MapSet.new()}, bans: %{pubkeys: MapSet.new(), events: MapSet.new(), ips: MapSet.new()},
allowed_pubkeys: MapSet.new(), allowed_pubkeys: MapSet.new(),
acl_rules: [],
next_acl_rule_id: 1,
groups: %{}, groups: %{},
roles: %{}, roles: %{},
audit_logs: [] audit_logs: []
} }
def start_link(opts \\ []) do
name = Keyword.get(opts, :name, @name)
Agent.start_link(&init_state/0, name: name)
end
def ensure_started do def ensure_started do
if Process.whereis(@name) do case Process.whereis(@name) do
pid when is_pid(pid) -> :ok
nil -> start_store()
end
end
def put_event(event_id, event) when is_binary(event_id) and is_map(event) do
:ok = ensure_started()
if :ets.insert_new(@events_table, {event_id, event, false}) do
true = :ets.insert(@events_by_time_table, {{sort_key(event), event_id}, event_id})
index_event_tags(event_id, event)
index_event_secondary_keys(event_id, event)
:ok :ok
else else
start_store() {:error, :duplicate_event}
end end
end end
defp start_store do def get_event(event_id) when is_binary(event_id) do
case Agent.start_link(fn -> @initial_state end, name: @name) do :ok = ensure_started()
{:ok, _pid} -> :ok
{:error, {:already_started, _pid}} -> :ok case :ets.lookup(@events_table, event_id) do
{:error, reason} -> {:error, reason} [{^event_id, event, deleted?}] -> {:ok, event, deleted?}
[] -> :error
end end
end end
def mark_deleted(event_id) when is_binary(event_id) do
:ok = ensure_started()
case lookup_event(event_id) do
{:ok, event, false} ->
true = :ets.insert(@events_table, {event_id, event, true})
true = :ets.delete(@events_by_time_table, {sort_key(event), event_id})
unindex_event_tags(event_id, event)
unindex_event_secondary_keys(event_id, event)
:ok
{:ok, _event, true} ->
:ok
:error ->
:ok
end
end
def reduce_events(acc, fun) when is_function(fun, 2) do
:ok = ensure_started()
:ets.foldl(
fn {_event_id, event, deleted?}, current_acc ->
if deleted? do
current_acc
else
fun.(event, current_acc)
end
end,
acc,
@events_table
)
end
def reduce_events_newest(acc, fun) when is_function(fun, 2) do
:ok = ensure_started()
reduce_events_newest_from(:ets.first(@events_by_time_table), acc, fun)
end
def tagged_events(tag_name, tag_values) when is_binary(tag_name) and is_list(tag_values) do
:ok = ensure_started()
tag_values
|> Enum.flat_map(&indexed_events_for_value(@events_by_tag_table, {tag_name, &1}))
|> sort_and_deduplicate_events()
end
def events_by_pubkeys(pubkeys) when is_list(pubkeys) do
:ok = ensure_started()
pubkeys
|> Enum.flat_map(&indexed_events_for_value(@events_by_pubkey_table, &1))
|> sort_and_deduplicate_events()
end
def events_by_kinds(kinds) when is_list(kinds) do
:ok = ensure_started()
kinds
|> Enum.flat_map(&indexed_events_for_value(@events_by_kind_table, &1))
|> sort_and_deduplicate_events()
end
def events_by_pubkeys_and_kinds(pubkeys, kinds) when is_list(pubkeys) and is_list(kinds) do
:ok = ensure_started()
pubkeys
|> Enum.flat_map(fn pubkey ->
kinds
|> Enum.flat_map(&indexed_events_for_value(@events_by_pubkey_kind_table, {pubkey, &1}))
end)
|> sort_and_deduplicate_events()
end
def events_by_addresses(addresses) when is_list(addresses) do
:ok = ensure_started()
addresses
|> Enum.flat_map(&indexed_events_for_value(@events_by_address_table, &1))
|> sort_and_deduplicate_events()
end
defp reduce_events_newest_from(:"$end_of_table", acc, _fun), do: acc
defp reduce_events_newest_from(key, acc, fun) do
next_key = :ets.next(@events_by_time_table, key)
acc = reduce_indexed_event(key, acc, fun)
case acc do
{:halt, final_acc} -> final_acc
next_acc -> reduce_events_newest_from(next_key, next_acc, fun)
end
end
defp reduce_indexed_event(key, acc, fun) do
case :ets.lookup(@events_by_time_table, key) do
[{^key, event_id}] -> apply_reduce_fun(event_id, acc, fun)
[] -> acc
end
end
defp apply_reduce_fun(event_id, acc, fun) do
case lookup_event(event_id) do
{:ok, event, false} -> normalize_reduce_result(fun.(event, acc))
_other -> acc
end
end
defp normalize_reduce_result({:halt, next_acc}), do: {:halt, next_acc}
defp normalize_reduce_result(next_acc), do: next_acc
def get(fun) do def get(fun) do
:ok = ensure_started() with_store(fn pid -> Agent.get(pid, fun) end)
Agent.get(@name, fun)
end end
def update(fun) do def update(fun) do
:ok = ensure_started() with_store(fn pid -> Agent.update(pid, fun) end)
Agent.update(@name, fun)
end end
def get_and_update(fun) do def get_and_update(fun) do
:ok = ensure_started() with_store(fn pid -> Agent.get_and_update(pid, fun) end)
Agent.get_and_update(@name, fun)
end end
defp start_store do
case start_link() do
{:ok, _pid} ->
:ok
{:error, {:already_started, pid}} ->
if Process.alive?(pid) do
:ok
else
wait_for_store_exit(pid)
end
{:error, reason} ->
{:error, reason}
end
end
defp with_store(fun, attempts \\ 2)
defp with_store(fun, attempts) when attempts > 0 do
:ok = ensure_started()
case Process.whereis(@name) do
pid when is_pid(pid) ->
try do
fun.(pid)
catch
:exit, reason ->
if noproc_exit?(reason) and attempts > 1 do
with_store(fun, attempts - 1)
else
exit(reason)
end
end
nil ->
with_store(fun, attempts - 1)
end
end
defp with_store(_fun, 0), do: exit(:noproc)
defp wait_for_store_exit(pid) do
ref = Process.monitor(pid)
receive do
{:DOWN, ^ref, :process, ^pid, _reason} -> start_store()
after
100 -> start_store()
end
end
defp noproc_exit?({:noproc, _details}), do: true
defp noproc_exit?(_reason), do: false
defp init_state do
ensure_tables_started()
@initial_state
end
defp ensure_tables_started do
ensure_table(@events_table, [
:named_table,
:public,
:set,
read_concurrency: true,
write_concurrency: true
])
ensure_table(@events_by_time_table, [
:named_table,
:public,
:ordered_set,
read_concurrency: true,
write_concurrency: true
])
ensure_table(@events_by_tag_table, [
:named_table,
:public,
:bag,
read_concurrency: true,
write_concurrency: true
])
ensure_table(@events_by_pubkey_table, [
:named_table,
:public,
:bag,
read_concurrency: true,
write_concurrency: true
])
ensure_table(@events_by_kind_table, [
:named_table,
:public,
:bag,
read_concurrency: true,
write_concurrency: true
])
ensure_table(@events_by_pubkey_kind_table, [
:named_table,
:public,
:bag,
read_concurrency: true,
write_concurrency: true
])
ensure_table(@events_by_address_table, [
:named_table,
:public,
:bag,
read_concurrency: true,
write_concurrency: true
])
end
defp ensure_table(name, options) do
case :ets.whereis(name) do
:undefined -> :ets.new(name, options)
_table -> :ok
end
end
defp lookup_event(event_id) do
case :ets.lookup(@events_table, event_id) do
[{^event_id, event, deleted?}] -> {:ok, event, deleted?}
[] -> :error
end
end
defp index_event_tags(event_id, event) do
event
|> event_tag_index_entries(event_id)
|> Enum.each(fn entry ->
true = :ets.insert(@events_by_tag_table, entry)
end)
end
defp index_event_secondary_keys(event_id, event) do
event
|> secondary_index_entries(event_id)
|> Enum.each(fn {table, entry} ->
true = :ets.insert(table, entry)
end)
end
defp unindex_event_tags(event_id, event) do
event
|> event_tag_index_entries(event_id)
|> Enum.each(&:ets.delete_object(@events_by_tag_table, &1))
end
defp unindex_event_secondary_keys(event_id, event) do
event
|> secondary_index_entries(event_id)
|> Enum.each(fn {table, entry} ->
:ets.delete_object(table, entry)
end)
end
defp event_tag_index_entries(event, event_id) do
created_sort_key = sort_key(event)
event
|> Map.get("tags", [])
|> Enum.flat_map(fn
[tag_name, tag_value | _rest] when is_binary(tag_name) and is_binary(tag_value) ->
[{{tag_name, tag_value}, created_sort_key, event_id}]
_tag ->
[]
end)
end
defp secondary_index_entries(event, event_id) do
created_sort_key = sort_key(event)
pubkey = Map.get(event, "pubkey")
kind = Map.get(event, "kind")
[]
|> maybe_put_secondary_entry(@events_by_pubkey_table, pubkey, created_sort_key, event_id)
|> maybe_put_secondary_entry(@events_by_kind_table, kind, created_sort_key, event_id)
|> maybe_put_pubkey_kind_entry(pubkey, kind, created_sort_key, event_id)
|> maybe_put_address_entry(event, pubkey, kind, event_id)
end
defp maybe_put_secondary_entry(entries, _table, key, _created_sort_key, _event_id)
when is_nil(key),
do: entries
defp maybe_put_secondary_entry(entries, table, key, created_sort_key, event_id) do
[{table, {key, created_sort_key, event_id}} | entries]
end
defp maybe_put_pubkey_kind_entry(entries, pubkey, kind, created_sort_key, event_id)
when is_binary(pubkey) and is_integer(kind) do
[{@events_by_pubkey_kind_table, {{pubkey, kind}, created_sort_key, event_id}} | entries]
end
defp maybe_put_pubkey_kind_entry(entries, _pubkey, _kind, _created_sort_key, _event_id),
do: entries
defp maybe_put_address_entry(entries, event, pubkey, kind, event_id)
when is_binary(pubkey) and is_integer(kind) and kind >= 30_000 and kind < 40_000 do
d_tag =
event
|> Map.get("tags", [])
|> Enum.find_value("", fn
["d", value | _rest] -> value
_tag -> nil
end)
[{@events_by_address_table, {{kind, pubkey, d_tag}, sort_key(event), event_id}} | entries]
end
defp maybe_put_address_entry(entries, _event, _pubkey, _kind, _event_id), do: entries
defp indexed_events_for_value(_table, value)
when not is_binary(value) and not is_integer(value) and not is_tuple(value),
do: []
defp indexed_events_for_value(table, value) do
table
|> :ets.lookup(value)
|> Enum.reduce([], fn {^value, _created_sort_key, event_id}, acc ->
case lookup_event(event_id) do
{:ok, event, false} -> [event | acc]
_other -> acc
end
end)
end
defp sort_and_deduplicate_events(events) do
events
|> Enum.uniq_by(& &1["id"])
|> Enum.sort(&chronological_sorter/2)
end
defp chronological_sorter(left, right) do
cond do
left["created_at"] > right["created_at"] -> true
left["created_at"] < right["created_at"] -> false
true -> left["id"] < right["id"]
end
end
defp sort_key(event), do: -Map.get(event, "created_at", 0)
end end

View File

@@ -0,0 +1,279 @@
defmodule Parrhesia.Storage.Adapters.Postgres.ACL do
@moduledoc """
PostgreSQL-backed implementation for `Parrhesia.Storage.ACL`.
"""
import Ecto.Query
alias Parrhesia.PostgresRepos
alias Parrhesia.Repo
@behaviour Parrhesia.Storage.ACL
@impl true
def put_rule(_context, rule) when is_map(rule) do
with {:ok, normalized_rule} <- normalize_rule(rule) do
normalized_rule
|> find_matching_rule()
|> maybe_insert_rule(normalized_rule)
end
end
def put_rule(_context, _rule), do: {:error, :invalid_acl_rule}
defp maybe_insert_rule(nil, normalized_rule), do: insert_rule(normalized_rule)
defp maybe_insert_rule(existing_rule, _normalized_rule), do: {:ok, existing_rule}
@impl true
def delete_rule(_context, selector) when is_map(selector) do
case normalize_delete_selector(selector) do
{:ok, {:id, id}} ->
query = from(rule in "acl_rules", where: rule.id == ^id)
{_deleted, _result} = Repo.delete_all(query)
:ok
{:ok, {:exact, rule}} ->
query =
from(stored_rule in "acl_rules",
where:
stored_rule.principal_type == ^rule.principal_type and
stored_rule.principal == ^rule.principal and
stored_rule.capability == ^rule.capability and
stored_rule.match == ^rule.match
)
{_deleted, _result} = Repo.delete_all(query)
:ok
{:error, reason} ->
{:error, reason}
end
end
def delete_rule(_context, _selector), do: {:error, :invalid_acl_rule}
@impl true
def list_rules(_context, opts) when is_list(opts) do
query =
from(rule in "acl_rules",
order_by: [
asc: rule.principal_type,
asc: rule.principal,
asc: rule.capability,
asc: rule.id
],
select: %{
id: rule.id,
principal_type: rule.principal_type,
principal: rule.principal,
capability: rule.capability,
match: rule.match,
inserted_at: rule.inserted_at
}
)
|> maybe_filter_principal_type(Keyword.get(opts, :principal_type))
|> maybe_filter_principal(Keyword.get(opts, :principal))
|> maybe_filter_capability(Keyword.get(opts, :capability))
repo = read_repo()
{:ok, Enum.map(repo.all(query), &normalize_persisted_rule/1)}
end
def list_rules(_context, _opts), do: {:error, :invalid_opts}
defp maybe_filter_principal_type(query, nil), do: query
defp maybe_filter_principal_type(query, principal_type) when is_atom(principal_type) do
maybe_filter_principal_type(query, Atom.to_string(principal_type))
end
defp maybe_filter_principal_type(query, principal_type) when is_binary(principal_type) do
where(query, [rule], rule.principal_type == ^principal_type)
end
defp maybe_filter_principal_type(query, _principal_type), do: query
defp maybe_filter_principal(query, nil), do: query
defp maybe_filter_principal(query, principal) when is_binary(principal) do
case decode_hex_or_binary(principal, 32, :invalid_acl_principal) do
{:ok, decoded_principal} -> where(query, [rule], rule.principal == ^decoded_principal)
{:error, _reason} -> where(query, [rule], false)
end
end
defp maybe_filter_principal(query, _principal), do: query
defp maybe_filter_capability(query, nil), do: query
defp maybe_filter_capability(query, capability) when is_atom(capability) do
maybe_filter_capability(query, Atom.to_string(capability))
end
defp maybe_filter_capability(query, capability) when is_binary(capability) do
where(query, [rule], rule.capability == ^capability)
end
defp maybe_filter_capability(query, _capability), do: query
defp find_matching_rule(normalized_rule) do
query =
from(stored_rule in "acl_rules",
where:
stored_rule.principal_type == ^normalized_rule.principal_type and
stored_rule.principal == ^normalized_rule.principal and
stored_rule.capability == ^normalized_rule.capability and
stored_rule.match == ^normalized_rule.match,
limit: 1,
select: %{
id: stored_rule.id,
principal_type: stored_rule.principal_type,
principal: stored_rule.principal,
capability: stored_rule.capability,
match: stored_rule.match,
inserted_at: stored_rule.inserted_at
}
)
repo = read_repo()
case repo.one(query) do
nil -> nil
stored_rule -> normalize_persisted_rule(stored_rule)
end
end
defp read_repo, do: PostgresRepos.read()
defp insert_rule(normalized_rule) do
now = DateTime.utc_now() |> DateTime.truncate(:microsecond)
row = %{
principal_type: normalized_rule.principal_type,
principal: normalized_rule.principal,
capability: normalized_rule.capability,
match: normalized_rule.match,
inserted_at: now
}
case Repo.insert_all("acl_rules", [row], returning: [:id, :inserted_at]) do
{1, [inserted_row]} ->
{:ok, normalize_persisted_rule(Map.merge(row, Map.new(inserted_row)))}
_other ->
{:error, :acl_rule_insert_failed}
end
end
defp normalize_persisted_rule(rule) do
%{
id: rule.id,
principal_type: normalize_principal_type(rule.principal_type),
principal: Base.encode16(rule.principal, case: :lower),
capability: normalize_capability(rule.capability),
match: normalize_match(rule.match),
inserted_at: rule.inserted_at
}
end
defp normalize_delete_selector(%{"id" => id}), do: normalize_delete_selector(%{id: id})
defp normalize_delete_selector(%{id: id}) when is_integer(id) and id > 0,
do: {:ok, {:id, id}}
defp normalize_delete_selector(selector) do
case normalize_rule(selector) do
{:ok, normalized_rule} -> {:ok, {:exact, normalized_rule}}
{:error, reason} -> {:error, reason}
end
end
defp normalize_rule(rule) when is_map(rule) do
with {:ok, principal_type} <- normalize_principal_type_value(fetch(rule, :principal_type)),
{:ok, principal} <-
decode_hex_or_binary(fetch(rule, :principal), 32, :invalid_acl_principal),
{:ok, capability} <- normalize_capability_value(fetch(rule, :capability)),
{:ok, match} <- normalize_match_value(fetch(rule, :match)) do
{:ok,
%{
principal_type: principal_type,
principal: principal,
capability: capability,
match: match
}}
end
end
defp normalize_rule(_rule), do: {:error, :invalid_acl_rule}
defp normalize_principal_type("pubkey"), do: :pubkey
defp normalize_principal_type(principal_type), do: principal_type
defp normalize_capability("sync_read"), do: :sync_read
defp normalize_capability("sync_write"), do: :sync_write
defp normalize_capability(capability), do: capability
defp normalize_principal_type_value(:pubkey), do: {:ok, "pubkey"}
defp normalize_principal_type_value("pubkey"), do: {:ok, "pubkey"}
defp normalize_principal_type_value(_principal_type), do: {:error, :invalid_acl_principal_type}
defp normalize_capability_value(:sync_read), do: {:ok, "sync_read"}
defp normalize_capability_value(:sync_write), do: {:ok, "sync_write"}
defp normalize_capability_value("sync_read"), do: {:ok, "sync_read"}
defp normalize_capability_value("sync_write"), do: {:ok, "sync_write"}
defp normalize_capability_value(_capability), do: {:error, :invalid_acl_capability}
defp normalize_match_value(match) when is_map(match) do
normalized_match =
Enum.reduce(match, %{}, fn
{key, values}, acc when is_binary(key) ->
Map.put(acc, key, values)
{key, values}, acc when is_atom(key) ->
Map.put(acc, Atom.to_string(key), values)
_entry, acc ->
acc
end)
{:ok, normalize_match(normalized_match)}
end
defp normalize_match_value(_match), do: {:error, :invalid_acl_match}
defp normalize_match(match) when is_map(match) do
Enum.reduce(match, %{}, fn
{key, values}, acc when is_binary(key) and is_list(values) ->
Map.put(acc, key, Enum.uniq(values))
{key, value}, acc when is_binary(key) ->
Map.put(acc, key, value)
_entry, acc ->
acc
end)
end
defp normalize_match(_match), do: %{}
defp fetch(map, key) do
Map.get(map, key) || Map.get(map, Atom.to_string(key))
end
defp decode_hex_or_binary(value, expected_bytes, _reason)
when is_binary(value) and byte_size(value) == expected_bytes,
do: {:ok, value}
defp decode_hex_or_binary(value, expected_bytes, reason) when is_binary(value) do
if byte_size(value) == expected_bytes * 2 do
case Base.decode16(value, case: :mixed) do
{:ok, decoded} -> {:ok, decoded}
:error -> {:error, reason}
end
else
{:error, reason}
end
end
defp decode_hex_or_binary(_value, _expected_bytes, reason), do: {:error, reason}
end

View File

@@ -5,6 +5,7 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Admin do
import Ecto.Query import Ecto.Query
alias Parrhesia.PostgresRepos
alias Parrhesia.Repo alias Parrhesia.Repo
@behaviour Parrhesia.Storage.Admin @behaviour Parrhesia.Storage.Admin
@@ -20,6 +21,7 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Admin do
case method_name do case method_name do
"ping" -> {:ok, %{"status" => "ok"}} "ping" -> {:ok, %{"status" => "ok"}}
"stats" -> {:ok, relay_stats()} "stats" -> {:ok, relay_stats()}
"supportedmethods" -> {:ok, %{"methods" => supported_methods()}}
"list_audit_logs" -> list_audit_logs(%{}, audit_list_opts(params)) "list_audit_logs" -> list_audit_logs(%{}, audit_list_opts(params))
_other -> execute_moderation_method(moderation, method_name, params) _other -> execute_moderation_method(moderation, method_name, params)
end end
@@ -72,8 +74,8 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Admin do
|> maybe_filter_actor_pubkey(Keyword.get(opts, :actor_pubkey)) |> maybe_filter_actor_pubkey(Keyword.get(opts, :actor_pubkey))
logs = logs =
query read_repo()
|> Repo.all() |> then(fn repo -> repo.all(query) end)
|> Enum.map(&to_audit_log_map/1) |> Enum.map(&to_audit_log_map/1)
{:ok, logs} {:ok, logs}
@@ -82,17 +84,39 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Admin do
def list_audit_logs(_context, _opts), do: {:error, :invalid_opts} def list_audit_logs(_context, _opts), do: {:error, :invalid_opts}
defp relay_stats do defp relay_stats do
events_count = Repo.aggregate("events", :count, :id) repo = read_repo()
banned_pubkeys = Repo.aggregate("banned_pubkeys", :count, :pubkey) events_count = repo.aggregate("events", :count, :id)
blocked_ips = Repo.aggregate("blocked_ips", :count, :ip) banned_pubkeys = repo.aggregate("banned_pubkeys", :count, :pubkey)
allowed_pubkeys = repo.aggregate("allowed_pubkeys", :count, :pubkey)
blocked_ips = repo.aggregate("blocked_ips", :count, :ip)
acl_rules = repo.aggregate("acl_rules", :count, :id)
%{ %{
"events" => events_count, "events" => events_count,
"banned_pubkeys" => banned_pubkeys, "banned_pubkeys" => banned_pubkeys,
"allowed_pubkeys" => allowed_pubkeys,
"acl_rules" => acl_rules,
"blocked_ips" => blocked_ips "blocked_ips" => blocked_ips
} }
end end
defp supported_methods do
[
"allow_pubkey",
"ban_event",
"ban_pubkey",
"block_ip",
"disallow_pubkey",
"list_audit_logs",
"ping",
"stats",
"supportedmethods",
"unban_event",
"unban_pubkey",
"unblock_ip"
]
end
defp execute_moderation_method(moderation, "ban_pubkey", params), defp execute_moderation_method(moderation, "ban_pubkey", params),
do: execute_pubkey_method(fn ctx, value -> moderation.ban_pubkey(ctx, value) end, params) do: execute_pubkey_method(fn ctx, value -> moderation.ban_pubkey(ctx, value) end, params)
@@ -212,6 +236,8 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Admin do
defp normalize_pubkey(_value), do: {:error, :invalid_actor_pubkey} defp normalize_pubkey(_value), do: {:error, :invalid_actor_pubkey}
defp read_repo, do: PostgresRepos.read()
defp invalid_key_reason(:params), do: :invalid_params defp invalid_key_reason(:params), do: :invalid_params
defp invalid_key_reason(:result), do: :invalid_result defp invalid_key_reason(:result), do: :invalid_result

View File

@@ -5,10 +5,16 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Events do
import Ecto.Query import Ecto.Query
alias Parrhesia.PostgresRepos
alias Parrhesia.Protocol.Filter alias Parrhesia.Protocol.Filter
alias Parrhesia.Repo alias Parrhesia.Repo
@behaviour Parrhesia.Storage.Events @behaviour Parrhesia.Storage.Events
@trigram_fallback_max_single_term_length 4
@trigram_fallback_pattern ~r/[^\p{L}\p{N}\s"]/u
@fts_match_fragment "to_tsvector('simple', ?) @@ websearch_to_tsquery('simple', ?)"
@fts_rank_fragment "ts_rank_cd(to_tsvector('simple', ?), websearch_to_tsquery('simple', ?))"
@trigram_rank_fragment "word_similarity(lower(?), lower(?))"
@type normalized_event :: %{ @type normalized_event :: %{
id: binary(), id: binary(),
@@ -62,7 +68,9 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Events do
} }
) )
case Repo.one(event_query) do repo = read_repo()
case repo.one(event_query) do
nil -> nil ->
{:ok, nil} {:ok, nil}
@@ -76,16 +84,17 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Events do
def query(_context, filters, opts) when is_list(opts) do def query(_context, filters, opts) when is_list(opts) do
with :ok <- Filter.validate_filters(filters) do with :ok <- Filter.validate_filters(filters) do
now = Keyword.get(opts, :now, System.system_time(:second)) now = Keyword.get(opts, :now, System.system_time(:second))
repo = read_repo()
persisted_events = persisted_events =
filters filters
|> Enum.flat_map(fn filter -> |> Enum.flat_map(fn filter ->
filter filter
|> event_query_for_filter(now, opts) |> event_query_for_filter(now, opts)
|> Repo.all() |> repo.all()
end) end)
|> deduplicate_events() |> deduplicate_events()
|> sort_persisted_events() |> sort_persisted_events(filters)
|> maybe_apply_query_limit(opts) |> maybe_apply_query_limit(opts)
{:ok, Enum.map(persisted_events, &to_nostr_event/1)} {:ok, Enum.map(persisted_events, &to_nostr_event/1)}
@@ -94,21 +103,21 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Events do
def query(_context, _filters, _opts), do: {:error, :invalid_opts} def query(_context, _filters, _opts), do: {:error, :invalid_opts}
@impl true
def query_event_refs(_context, filters, opts) when is_list(opts) do
with :ok <- Filter.validate_filters(filters) do
now = Keyword.get(opts, :now, System.system_time(:second))
{:ok, fetch_event_refs(filters, now, opts)}
end
end
def query_event_refs(_context, _filters, _opts), do: {:error, :invalid_opts}
@impl true @impl true
def count(_context, filters, opts) when is_list(opts) do def count(_context, filters, opts) when is_list(opts) do
with :ok <- Filter.validate_filters(filters) do with :ok <- Filter.validate_filters(filters) do
now = Keyword.get(opts, :now, System.system_time(:second)) now = Keyword.get(opts, :now, System.system_time(:second))
{:ok, count_events(filters, now, opts)}
total_count =
filters
|> event_id_union_query_for_filters(now, opts)
|> subquery()
|> then(fn union_query ->
from(event in union_query, select: count(event.id, :distinct))
end)
|> Repo.one()
{:ok, total_count}
end end
end end
@@ -360,30 +369,7 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Events do
defp maybe_upsert_replaceable_state(normalized_event, now, deleted_at) do defp maybe_upsert_replaceable_state(normalized_event, now, deleted_at) do
if replaceable_kind?(normalized_event.kind) do if replaceable_kind?(normalized_event.kind) do
lookup_query = upsert_replaceable_state_table(normalized_event, now, deleted_at)
from(state in "replaceable_event_state",
where:
state.pubkey == ^normalized_event.pubkey and state.kind == ^normalized_event.kind,
select: %{event_created_at: state.event_created_at, event_id: state.event_id}
)
update_query =
from(state in "replaceable_event_state",
where:
state.pubkey == ^normalized_event.pubkey and
state.kind == ^normalized_event.kind
)
upsert_state_table(
"replaceable_event_state",
lookup_query,
update_query,
replaceable_state_row(normalized_event, now),
normalized_event,
now,
deleted_at,
:replaceable_state_update_failed
)
else else
:ok :ok
end end
@@ -391,159 +377,94 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Events do
defp maybe_upsert_addressable_state(normalized_event, now, deleted_at) do defp maybe_upsert_addressable_state(normalized_event, now, deleted_at) do
if addressable_kind?(normalized_event.kind) do if addressable_kind?(normalized_event.kind) do
lookup_query = upsert_addressable_state_table(normalized_event, now, deleted_at)
from(state in "addressable_event_state",
where:
state.pubkey == ^normalized_event.pubkey and
state.kind == ^normalized_event.kind and
state.d_tag == ^normalized_event.d_tag,
select: %{event_created_at: state.event_created_at, event_id: state.event_id}
)
update_query =
from(state in "addressable_event_state",
where:
state.pubkey == ^normalized_event.pubkey and
state.kind == ^normalized_event.kind and
state.d_tag == ^normalized_event.d_tag
)
upsert_state_table(
"addressable_event_state",
lookup_query,
update_query,
addressable_state_row(normalized_event, now),
normalized_event,
now,
deleted_at,
:addressable_state_update_failed
)
else else
:ok :ok
end end
end end
defp upsert_state_table( defp upsert_replaceable_state_table(normalized_event, now, deleted_at) do
table_name, params = [
lookup_query, normalized_event.pubkey,
update_query, normalized_event.kind,
insert_row, normalized_event.created_at,
normalized_event, normalized_event.id,
now, now,
deleted_at, now
failure_reason ]
) do
case Repo.one(lookup_query) do
nil ->
insert_state_or_resolve_race(
table_name,
lookup_query,
update_query,
insert_row,
normalized_event,
now,
deleted_at,
failure_reason
)
current_state -> case Repo.query(replaceable_state_upsert_sql(), params) do
maybe_update_state( {:ok, %{rows: [row]}} ->
update_query, finalize_state_upsert(row, normalized_event, deleted_at, :replaceable_state_update_failed)
normalized_event,
current_state, {:ok, _result} ->
now, Repo.rollback(:replaceable_state_update_failed)
deleted_at,
failure_reason {:error, _reason} ->
) Repo.rollback(:replaceable_state_update_failed)
end end
end end
defp insert_state_or_resolve_race( defp upsert_addressable_state_table(normalized_event, now, deleted_at) do
table_name, params = [
lookup_query, normalized_event.pubkey,
update_query, normalized_event.kind,
insert_row, normalized_event.d_tag,
normalized_event.created_at,
normalized_event.id,
now,
now
]
case Repo.query(addressable_state_upsert_sql(), params) do
{:ok, %{rows: [row]}} ->
finalize_state_upsert(row, normalized_event, deleted_at, :addressable_state_update_failed)
{:ok, _result} ->
Repo.rollback(:addressable_state_update_failed)
{:error, _reason} ->
Repo.rollback(:addressable_state_update_failed)
end
end
defp finalize_state_upsert(
[retired_event_created_at, retired_event_id, winner_event_created_at, winner_event_id],
normalized_event, normalized_event,
now,
deleted_at, deleted_at,
failure_reason failure_reason
) do ) do
case Repo.insert_all(table_name, [insert_row], on_conflict: :nothing) do case {winner_event_created_at, winner_event_id} do
{1, _result} -> {created_at, event_id}
:ok when created_at == normalized_event.created_at and event_id == normalized_event.id ->
maybe_retire_previous_state_event(
{0, _result} -> retired_event_created_at,
resolve_state_race( retired_event_id,
lookup_query,
update_query,
normalized_event,
now,
deleted_at, deleted_at,
failure_reason failure_reason
) )
{_inserted, _result} -> {_created_at, _event_id} ->
Repo.rollback(failure_reason)
end
end
defp resolve_state_race(
lookup_query,
update_query,
normalized_event,
now,
deleted_at,
failure_reason
) do
case Repo.one(lookup_query) do
nil ->
Repo.rollback(failure_reason)
current_state ->
maybe_update_state(
update_query,
normalized_event,
current_state,
now,
deleted_at,
failure_reason
)
end
end
defp maybe_update_state(
update_query,
normalized_event,
current_state,
now,
deleted_at,
failure_reason
) do
if candidate_wins_state?(normalized_event, current_state) do
{updated, _result} =
Repo.update_all(update_query,
set: [
event_created_at: normalized_event.created_at,
event_id: normalized_event.id,
updated_at: now
]
)
if updated == 1 do
retire_event!( retire_event!(
current_state.event_created_at, normalized_event.created_at,
current_state.event_id, normalized_event.id,
deleted_at, deleted_at,
failure_reason failure_reason
) )
else
Repo.rollback(failure_reason)
end
else
retire_event!(normalized_event.created_at, normalized_event.id, deleted_at, failure_reason)
end end
end end
defp maybe_retire_previous_state_event(nil, nil, _deleted_at, _failure_reason), do: :ok
defp maybe_retire_previous_state_event(
retired_event_created_at,
retired_event_id,
deleted_at,
failure_reason
) do
retire_event!(retired_event_created_at, retired_event_id, deleted_at, failure_reason)
end
defp retire_event!(event_created_at, event_id, deleted_at, failure_reason) do defp retire_event!(event_created_at, event_id, deleted_at, failure_reason) do
{updated, _result} = {updated, _result} =
Repo.update_all( Repo.update_all(
@@ -567,27 +488,147 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Events do
defp addressable_kind?(kind), do: kind >= 30_000 and kind < 40_000 defp addressable_kind?(kind), do: kind >= 30_000 and kind < 40_000
defp replaceable_state_row(normalized_event, now) do defp replaceable_state_upsert_sql do
%{ """
pubkey: normalized_event.pubkey, WITH inserted AS (
kind: normalized_event.kind, INSERT INTO replaceable_event_state (
event_created_at: normalized_event.created_at, pubkey,
event_id: normalized_event.id, kind,
inserted_at: now, event_created_at,
updated_at: now event_id,
} inserted_at,
updated_at
)
VALUES ($1, $2, $3, $4, $5, $6)
ON CONFLICT (pubkey, kind) DO NOTHING
RETURNING
NULL::bigint AS retired_event_created_at,
NULL::bytea AS retired_event_id,
event_created_at AS winner_event_created_at,
event_id AS winner_event_id
),
updated AS (
UPDATE replaceable_event_state AS state
SET
event_created_at = $3,
event_id = $4,
updated_at = $6
FROM (
SELECT current.event_created_at, current.event_id
FROM replaceable_event_state AS current
WHERE current.pubkey = $1 AND current.kind = $2
FOR UPDATE
) AS previous
WHERE
NOT EXISTS (SELECT 1 FROM inserted)
AND state.pubkey = $1
AND state.kind = $2
AND (
state.event_created_at < $3
OR (state.event_created_at = $3 AND state.event_id > $4)
)
RETURNING
previous.event_created_at AS retired_event_created_at,
previous.event_id AS retired_event_id,
state.event_created_at AS winner_event_created_at,
state.event_id AS winner_event_id
),
current AS (
SELECT
NULL::bigint AS retired_event_created_at,
NULL::bytea AS retired_event_id,
state.event_created_at AS winner_event_created_at,
state.event_id AS winner_event_id
FROM replaceable_event_state AS state
WHERE
NOT EXISTS (SELECT 1 FROM inserted)
AND NOT EXISTS (SELECT 1 FROM updated)
AND state.pubkey = $1
AND state.kind = $2
)
SELECT *
FROM inserted
UNION ALL
SELECT *
FROM updated
UNION ALL
SELECT *
FROM current
LIMIT 1
"""
end end
defp addressable_state_row(normalized_event, now) do defp addressable_state_upsert_sql do
%{ """
pubkey: normalized_event.pubkey, WITH inserted AS (
kind: normalized_event.kind, INSERT INTO addressable_event_state (
d_tag: normalized_event.d_tag, pubkey,
event_created_at: normalized_event.created_at, kind,
event_id: normalized_event.id, d_tag,
inserted_at: now, event_created_at,
updated_at: now event_id,
} inserted_at,
updated_at
)
VALUES ($1, $2, $3, $4, $5, $6, $7)
ON CONFLICT (pubkey, kind, d_tag) DO NOTHING
RETURNING
NULL::bigint AS retired_event_created_at,
NULL::bytea AS retired_event_id,
event_created_at AS winner_event_created_at,
event_id AS winner_event_id
),
updated AS (
UPDATE addressable_event_state AS state
SET
event_created_at = $4,
event_id = $5,
updated_at = $7
FROM (
SELECT current.event_created_at, current.event_id
FROM addressable_event_state AS current
WHERE current.pubkey = $1 AND current.kind = $2 AND current.d_tag = $3
FOR UPDATE
) AS previous
WHERE
NOT EXISTS (SELECT 1 FROM inserted)
AND state.pubkey = $1
AND state.kind = $2
AND state.d_tag = $3
AND (
state.event_created_at < $4
OR (state.event_created_at = $4 AND state.event_id > $5)
)
RETURNING
previous.event_created_at AS retired_event_created_at,
previous.event_id AS retired_event_id,
state.event_created_at AS winner_event_created_at,
state.event_id AS winner_event_id
),
current AS (
SELECT
NULL::bigint AS retired_event_created_at,
NULL::bytea AS retired_event_id,
state.event_created_at AS winner_event_created_at,
state.event_id AS winner_event_id
FROM addressable_event_state AS state
WHERE
NOT EXISTS (SELECT 1 FROM inserted)
AND NOT EXISTS (SELECT 1 FROM updated)
AND state.pubkey = $1
AND state.kind = $2
AND state.d_tag = $3
)
SELECT *
FROM inserted
UNION ALL
SELECT *
FROM updated
UNION ALL
SELECT *
FROM current
LIMIT 1
"""
end end
defp event_row(normalized_event, now) do defp event_row(normalized_event, now) do
@@ -607,95 +648,219 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Events do
end end
defp event_query_for_filter(filter, now, opts) do defp event_query_for_filter(filter, now, opts) do
base_query = search_plan = search_plan(Map.get(filter, "search"))
from(event in "events", {base_query, remaining_tag_filters} = event_source_query(filter, now)
where: is_nil(event.deleted_at) and (is_nil(event.expires_at) or event.expires_at > ^now),
order_by: [desc: event.created_at, asc: event.id],
select: %{
id: event.id,
pubkey: event.pubkey,
created_at: event.created_at,
kind: event.kind,
tags: event.tags,
content: event.content,
sig: event.sig
}
)
query = base_query
base_query |> apply_common_event_filters(filter, remaining_tag_filters, opts, search_plan)
|> maybe_filter_ids(Map.get(filter, "ids")) |> maybe_order_by_search_rank(search_plan)
|> maybe_filter_authors(Map.get(filter, "authors")) |> select([event: event], %{
|> maybe_filter_kinds(Map.get(filter, "kinds")) id: event.id,
|> maybe_filter_since(Map.get(filter, "since")) pubkey: event.pubkey,
|> maybe_filter_until(Map.get(filter, "until")) created_at: event.created_at,
|> maybe_filter_search(Map.get(filter, "search")) kind: event.kind,
|> filter_by_tags(filter) tags: event.tags,
|> maybe_restrict_giftwrap_access(filter, opts) content: event.content,
sig: event.sig
maybe_limit_query(query, effective_filter_limit(filter, opts)) })
|> maybe_select_search_score(search_plan)
|> maybe_limit_query(effective_filter_limit(filter, opts))
end end
defp event_id_query_for_filter(filter, now, opts) do defp event_id_query_for_filter(filter, now, opts) do
from(event in "events", search_plan = search_plan(Map.get(filter, "search"))
where: is_nil(event.deleted_at) and (is_nil(event.expires_at) or event.expires_at > ^now), {base_query, remaining_tag_filters} = event_source_query(filter, now)
select: event.id
) base_query
|> maybe_filter_ids(Map.get(filter, "ids")) |> apply_common_event_filters(filter, remaining_tag_filters, opts, search_plan)
|> maybe_filter_authors(Map.get(filter, "authors")) |> select([event: event], event.id)
|> maybe_filter_kinds(Map.get(filter, "kinds"))
|> maybe_filter_since(Map.get(filter, "since"))
|> maybe_filter_until(Map.get(filter, "until"))
|> maybe_filter_search(Map.get(filter, "search"))
|> filter_by_tags(filter)
|> maybe_restrict_giftwrap_access(filter, opts)
end end
defp event_id_union_query_for_filters([], now, _opts) do defp event_id_distinct_union_query_for_filters([], now, _opts) do
from(event in "events", from(event in "events",
where: event.created_at > ^now and event.created_at < ^now, where: event.created_at > ^now and event.created_at < ^now,
select: event.id select: event.id
) )
end end
defp event_id_union_query_for_filters([first_filter | rest_filters], now, opts) do defp event_id_distinct_union_query_for_filters([first_filter | rest_filters], now, opts) do
Enum.reduce(rest_filters, event_id_query_for_filter(first_filter, now, opts), fn filter, Enum.reduce(rest_filters, event_id_query_for_filter(first_filter, now, opts), fn filter,
acc -> acc ->
union_all(acc, ^event_id_query_for_filter(filter, now, opts)) union(acc, ^event_id_query_for_filter(filter, now, opts))
end) end)
end end
defp event_ref_query_for_filter(filter, now, opts) do
search_plan = search_plan(Map.get(filter, "search"))
{base_query, remaining_tag_filters} = event_source_query(filter, now)
base_query
|> apply_common_event_filters(filter, remaining_tag_filters, opts, search_plan)
|> order_by([event: event], asc: event.created_at, asc: event.id)
|> select([event: event], %{
created_at: event.created_at,
id: event.id
})
|> maybe_limit_query(effective_filter_limit(filter, opts))
end
defp event_ref_union_query_for_filters([], now, _opts) do
from(event in "events",
where: event.created_at > ^now and event.created_at < ^now,
select: %{created_at: event.created_at, id: event.id}
)
end
defp event_ref_union_query_for_filters([first_filter | rest_filters], now, opts) do
Enum.reduce(rest_filters, event_ref_query_for_filter(first_filter, now, opts), fn filter,
acc ->
union_all(acc, ^event_ref_query_for_filter(filter, now, opts))
end)
end
defp fetch_event_refs([filter], now, opts) do
query =
filter
|> event_ref_query_for_filter(now, opts)
|> maybe_limit_query(Keyword.get(opts, :limit))
read_repo()
|> then(fn repo -> repo.all(query) end)
end
defp fetch_event_refs(filters, now, opts) do
query =
filters
|> event_ref_union_query_for_filters(now, opts)
|> subquery()
|> then(fn union_query ->
from(ref in union_query,
group_by: [ref.created_at, ref.id],
order_by: [asc: ref.created_at, asc: ref.id],
select: %{created_at: ref.created_at, id: ref.id}
)
end)
|> maybe_limit_query(Keyword.get(opts, :limit))
read_repo()
|> then(fn repo -> repo.all(query) end)
end
defp count_events([filter], now, opts) do
query =
filter
|> event_id_query_for_filter(now, opts)
|> subquery()
|> then(fn query ->
from(event in query, select: count())
end)
read_repo()
|> then(fn repo -> repo.one(query) end)
end
defp count_events(filters, now, opts) do
query =
filters
|> event_id_distinct_union_query_for_filters(now, opts)
|> subquery()
|> then(fn union_query ->
from(event in union_query, select: count())
end)
read_repo()
|> then(fn repo -> repo.one(query) end)
end
defp event_source_query(filter, now) do
tag_filters = tag_filters(filter)
case primary_tag_filter(tag_filters) do
nil ->
{from(event in "events",
as: :event,
where:
is_nil(event.deleted_at) and
(is_nil(event.expires_at) or event.expires_at > ^now)
), []}
{tag_name, values} = primary_tag_filter ->
remaining_tag_filters = List.delete(tag_filters, primary_tag_filter)
{from(tag in "event_tags",
as: :primary_tag,
where: tag.name == ^tag_name and tag.value in ^values,
join: event in "events",
as: :event,
on: event.created_at == tag.event_created_at and event.id == tag.event_id,
where:
is_nil(event.deleted_at) and
(is_nil(event.expires_at) or event.expires_at > ^now),
distinct: [event.created_at, event.id]
), remaining_tag_filters}
end
end
defp apply_common_event_filters(query, filter, remaining_tag_filters, opts, search_plan) do
query
|> maybe_filter_ids(Map.get(filter, "ids"))
|> maybe_filter_authors(Map.get(filter, "authors"))
|> maybe_filter_kinds(Map.get(filter, "kinds"))
|> maybe_filter_since(Map.get(filter, "since"))
|> maybe_filter_until(Map.get(filter, "until"))
|> maybe_filter_search(search_plan)
|> filter_by_tag_filters(remaining_tag_filters)
|> maybe_restrict_giftwrap_access(filter, opts)
end
defp primary_tag_filter([]), do: nil
defp primary_tag_filter(tag_filters) do
Enum.find(tag_filters, fn {tag_name, _values} -> tag_name in ["h", "i"] end) ||
List.first(tag_filters)
end
defp maybe_filter_ids(query, nil), do: query defp maybe_filter_ids(query, nil), do: query
defp maybe_filter_ids(query, ids) do defp maybe_filter_ids(query, ids) do
decoded_ids = decode_hex_list(ids, :lower) decoded_ids = decode_hex_list(ids, :lower)
where(query, [event], event.id in ^decoded_ids) where(query, [event: event], event.id in ^decoded_ids)
end end
defp maybe_filter_authors(query, nil), do: query defp maybe_filter_authors(query, nil), do: query
defp maybe_filter_authors(query, authors) do defp maybe_filter_authors(query, authors) do
decoded_authors = decode_hex_list(authors, :lower) decoded_authors = decode_hex_list(authors, :lower)
where(query, [event], event.pubkey in ^decoded_authors) where(query, [event: event], event.pubkey in ^decoded_authors)
end end
defp maybe_filter_kinds(query, nil), do: query defp maybe_filter_kinds(query, nil), do: query
defp maybe_filter_kinds(query, kinds), do: where(query, [event], event.kind in ^kinds) defp maybe_filter_kinds(query, kinds), do: where(query, [event: event], event.kind in ^kinds)
defp maybe_filter_since(query, nil), do: query defp maybe_filter_since(query, nil), do: query
defp maybe_filter_since(query, since), do: where(query, [event], event.created_at >= ^since)
defp maybe_filter_since(query, since),
do: where(query, [event: event], event.created_at >= ^since)
defp maybe_filter_until(query, nil), do: query defp maybe_filter_until(query, nil), do: query
defp maybe_filter_until(query, until), do: where(query, [event], event.created_at <= ^until)
defp maybe_filter_until(query, until),
do: where(query, [event: event], event.created_at <= ^until)
defp maybe_filter_search(query, nil), do: query defp maybe_filter_search(query, nil), do: query
defp maybe_filter_search(query, search) when is_binary(search) and search != "" do defp maybe_filter_search(query, %{mode: :fts, query: search}) do
escaped_search = escape_like_pattern(search) where(
where(query, [event], ilike(event.content, ^"%#{escaped_search}%")) query,
[event: event],
fragment(@fts_match_fragment, event.content, ^search)
)
end end
defp maybe_filter_search(query, _search), do: query defp maybe_filter_search(query, %{mode: :trigram, query: search}) do
escaped_search = escape_like_pattern(search)
where(query, [event: event], ilike(event.content, ^"%#{escaped_search}%"))
end
defp escape_like_pattern(search) do defp escape_like_pattern(search) do
search search
@@ -704,13 +869,11 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Events do
|> String.replace("_", "\\_") |> String.replace("_", "\\_")
end end
defp filter_by_tags(query, filter) do defp filter_by_tag_filters(query, tag_filters) do
filter Enum.reduce(tag_filters, query, fn {tag_name, values}, acc ->
|> tag_filters()
|> Enum.reduce(query, fn {tag_name, values}, acc ->
where( where(
acc, acc,
[event], [event: event],
fragment( fragment(
"EXISTS (SELECT 1 FROM event_tags AS tag WHERE tag.event_created_at = ? AND tag.event_id = ? AND tag.name = ? AND tag.value = ANY(?))", "EXISTS (SELECT 1 FROM event_tags AS tag WHERE tag.event_created_at = ? AND tag.event_id = ? AND tag.name = ? AND tag.value = ANY(?))",
event.created_at, event.created_at,
@@ -740,7 +903,7 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Events do
targets_giftwrap?(filter) and requester_pubkeys != [] -> targets_giftwrap?(filter) and requester_pubkeys != [] ->
where( where(
query, query,
[event], [event: event],
fragment( fragment(
"EXISTS (SELECT 1 FROM event_tags AS tag WHERE tag.event_created_at = ? AND tag.event_id = ? AND tag.name = 'p' AND tag.value = ANY(?))", "EXISTS (SELECT 1 FROM event_tags AS tag WHERE tag.event_created_at = ? AND tag.event_id = ? AND tag.name = 'p' AND tag.value = ANY(?))",
event.created_at, event.created_at,
@@ -750,7 +913,7 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Events do
) )
targets_giftwrap?(filter) -> targets_giftwrap?(filter) ->
where(query, [_event], false) where(query, [event: _event], false)
true -> true ->
query query
@@ -786,20 +949,90 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Events do
defp maybe_limit_query(query, nil), do: query defp maybe_limit_query(query, nil), do: query
defp maybe_limit_query(query, limit), do: limit(query, ^limit) defp maybe_limit_query(query, limit), do: limit(query, ^limit)
defp maybe_order_by_search_rank(query, nil) do
order_by(query, [event: event], desc: event.created_at, asc: event.id)
end
defp maybe_order_by_search_rank(query, %{mode: :fts, query: search}) do
order_by(
query,
[event: event],
desc: fragment(@fts_rank_fragment, event.content, ^search),
desc: event.created_at,
asc: event.id
)
end
defp maybe_order_by_search_rank(query, %{mode: :trigram, query: search}) do
order_by(
query,
[event: event],
desc: fragment(@trigram_rank_fragment, ^search, event.content),
desc: event.created_at,
asc: event.id
)
end
defp maybe_select_search_score(query, nil), do: query
defp maybe_select_search_score(query, %{mode: :fts, query: search}) do
select_merge(
query,
[event: event],
%{search_score: fragment(@fts_rank_fragment, event.content, ^search)}
)
end
defp maybe_select_search_score(query, %{mode: :trigram, query: search}) do
select_merge(
query,
[event: event],
%{search_score: fragment(@trigram_rank_fragment, ^search, event.content)}
)
end
defp search_plan(nil), do: nil
defp search_plan(search) when is_binary(search) do
normalized_search = String.trim(search)
cond do
normalized_search == "" ->
nil
trigram_fallback_search?(normalized_search) ->
%{mode: :trigram, query: normalized_search}
true ->
%{mode: :fts, query: normalized_search}
end
end
defp trigram_fallback_search?(search) do
String.match?(search, @trigram_fallback_pattern) or short_single_term_search?(search)
end
defp short_single_term_search?(search) do
case String.split(search, ~r/\s+/, trim: true) do
[term] -> String.length(term) <= @trigram_fallback_max_single_term_length
_other -> false
end
end
defp deduplicate_events(events) do defp deduplicate_events(events) do
events events
|> Enum.reduce(%{}, fn event, acc -> Map.put_new(acc, event.id, event) end) |> Enum.reduce(%{}, fn event, acc ->
Map.update(acc, event.id, event, fn existing -> preferred_event(existing, event) end)
end)
|> Map.values() |> Map.values()
end end
defp sort_persisted_events(events) do defp sort_persisted_events(events, filters) do
Enum.sort(events, fn left, right -> if Enum.any?(filters, &search_filter?/1) do
cond do Enum.sort(events, &search_result_sorter/2)
left.created_at > right.created_at -> true else
left.created_at < right.created_at -> false Enum.sort(events, &chronological_sorter/2)
true -> left.id < right.id end
end
end)
end end
defp maybe_apply_query_limit(events, opts) do defp maybe_apply_query_limit(events, opts) do
@@ -821,6 +1054,50 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Events do
} }
end end
defp preferred_event(existing, candidate) do
if search_result_sorter(candidate, existing) do
candidate
else
existing
end
end
defp search_filter?(filter) do
filter
|> Map.get("search")
|> search_plan()
|> Kernel.!=(nil)
end
defp search_result_sorter(left, right) do
left_score = search_score(left)
right_score = search_score(right)
cond do
left_score > right_score -> true
left_score < right_score -> false
true -> chronological_sorter(left, right)
end
end
defp chronological_sorter(left, right) do
cond do
left.created_at > right.created_at -> true
left.created_at < right.created_at -> false
true -> left.id < right.id
end
end
defp search_score(event) do
event
|> Map.get(:search_score, 0.0)
|> case do
score when is_float(score) -> score
score when is_integer(score) -> score / 1
_other -> 0.0
end
end
defp normalize_persisted_tags(tags) when is_list(tags), do: tags defp normalize_persisted_tags(tags) when is_list(tags), do: tags
defp normalize_persisted_tags(_tags), do: [] defp normalize_persisted_tags(_tags), do: []
@@ -966,4 +1243,6 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Events do
end end
defp maybe_apply_mls_group_retention(expires_at, _kind, _created_at), do: expires_at defp maybe_apply_mls_group_retention(expires_at, _kind, _created_at), do: expires_at
defp read_repo, do: PostgresRepos.read()
end end

View File

@@ -5,6 +5,7 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Groups do
import Ecto.Query import Ecto.Query
alias Parrhesia.PostgresRepos
alias Parrhesia.Repo alias Parrhesia.Repo
@behaviour Parrhesia.Storage.Groups @behaviour Parrhesia.Storage.Groups
@@ -46,7 +47,9 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Groups do
limit: 1 limit: 1
) )
case Repo.one(query) do repo = read_repo()
case repo.one(query) do
nil -> nil ->
{:ok, nil} {:ok, nil}
@@ -94,8 +97,8 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Groups do
) )
memberships = memberships =
query read_repo()
|> Repo.all() |> then(fn repo -> repo.all(query) end)
|> Enum.map(fn membership -> |> Enum.map(fn membership ->
to_membership_map( to_membership_map(
membership.group_id, membership.group_id,
@@ -163,8 +166,8 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Groups do
) )
roles = roles =
query read_repo()
|> Repo.all() |> then(fn repo -> repo.all(query) end)
|> Enum.map(fn role -> |> Enum.map(fn role ->
to_role_map(role.group_id, role.pubkey, role.role, role.metadata) to_role_map(role.group_id, role.pubkey, role.role, role.metadata)
end) end)
@@ -242,6 +245,7 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Groups do
defp unwrap_transaction_result({:ok, result}), do: {:ok, result} defp unwrap_transaction_result({:ok, result}), do: {:ok, result}
defp unwrap_transaction_result({:error, reason}), do: {:error, reason} defp unwrap_transaction_result({:error, reason}), do: {:error, reason}
defp read_repo, do: PostgresRepos.read()
defp fetch_required_string(map, key) do defp fetch_required_string(map, key) do
map map

View File

@@ -5,6 +5,7 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Moderation do
import Ecto.Query import Ecto.Query
alias Parrhesia.PostgresRepos
alias Parrhesia.Repo alias Parrhesia.Repo
@behaviour Parrhesia.Storage.Moderation @behaviour Parrhesia.Storage.Moderation
@@ -67,6 +68,11 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Moderation do
end end
end end
@impl true
def has_allowed_pubkeys?(_context) do
{:ok, scope_populated?(:allowed_pubkeys)}
end
@impl true @impl true
def ban_event(_context, event_id) do def ban_event(_context, event_id) do
with {:ok, normalized_event_id} <- normalize_hex_or_binary(event_id, 32, :invalid_event_id), with {:ok, normalized_event_id} <- normalize_hex_or_binary(event_id, 32, :invalid_event_id),
@@ -163,6 +169,24 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Moderation do
end end
end end
defp scope_populated?(scope) do
{table, field} = cache_scope_source!(scope)
if moderation_cache_enabled?() do
case cache_table_ref() do
:undefined ->
scope_populated_db?(table, field)
cache_table ->
ensure_cache_scope_loaded(scope, cache_table)
:ets.select_count(cache_table, [{{{:member, scope, :_}, true}, [], [true]}]) > 0
end
else
scope_populated_db?(table, field)
end
end
defp ensure_cache_scope_loaded(scope, table) do defp ensure_cache_scope_loaded(scope, table) do
loaded_key = cache_loaded_key(scope) loaded_key = cache_loaded_key(scope)
@@ -189,7 +213,8 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Moderation do
select: field(record, ^field) select: field(record, ^field)
) )
Repo.all(query) read_repo()
|> then(fn repo -> repo.all(query) end)
end end
defp cache_put(scope, value) do defp cache_put(scope, value) do
@@ -243,7 +268,22 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Moderation do
limit: 1 limit: 1
) )
Repo.one(query) == 1 read_repo()
|> then(fn repo -> repo.one(query) end)
|> Kernel.==(1)
end
defp scope_populated_db?(table, field) do
query =
from(record in table,
select: field(record, ^field),
limit: 1
)
read_repo()
|> then(fn repo -> repo.one(query) end)
|> is_nil()
|> Kernel.not()
end end
defp normalize_hex_or_binary(value, expected_bytes, _reason) defp normalize_hex_or_binary(value, expected_bytes, _reason)
@@ -282,4 +322,6 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Moderation do
defp to_inet({_, _, _, _, _, _, _, _} = ip_tuple), defp to_inet({_, _, _, _, _, _, _, _} = ip_tuple),
do: %Postgrex.INET{address: ip_tuple, netmask: 128} do: %Postgrex.INET{address: ip_tuple, netmask: 128}
defp read_repo, do: PostgresRepos.read()
end end

View File

@@ -1,51 +0,0 @@
defmodule Parrhesia.Storage.Archiver do
@moduledoc """
Partition-aware archival helpers for Postgres event partitions.
"""
import Ecto.Query
alias Parrhesia.Repo
@doc """
Lists all `events_*` partitions excluding the default partition.
"""
@spec list_partitions() :: [String.t()]
def list_partitions do
query =
from(table in "pg_tables",
where: table.schemaname == "public",
where: like(table.tablename, "events_%"),
where: table.tablename != "events_default",
select: table.tablename,
order_by: [asc: table.tablename]
)
Repo.all(query)
end
@identifier_pattern ~r/^[a-zA-Z_][a-zA-Z0-9_]*$/
@doc """
Generates an archive SQL statement for the given partition.
"""
@spec archive_sql(String.t(), String.t()) :: String.t()
def archive_sql(partition_name, archive_table_name) do
quoted_archive_table_name = quote_identifier!(archive_table_name)
quoted_partition_name = quote_identifier!(partition_name)
"INSERT INTO #{quoted_archive_table_name} SELECT * FROM #{quoted_partition_name};"
end
defp quote_identifier!(identifier) when is_binary(identifier) do
if Regex.match?(@identifier_pattern, identifier) do
~s("#{identifier}")
else
raise ArgumentError, "invalid SQL identifier: #{inspect(identifier)}"
end
end
defp quote_identifier!(identifier) do
raise ArgumentError, "invalid SQL identifier: #{inspect(identifier)}"
end
end

View File

@@ -7,6 +7,7 @@ defmodule Parrhesia.Storage.Events do
@type event_id :: binary() @type event_id :: binary()
@type event :: map() @type event :: map()
@type filter :: map() @type filter :: map()
@type event_ref :: %{created_at: non_neg_integer(), id: binary()}
@type query_opts :: keyword() @type query_opts :: keyword()
@type count_result :: non_neg_integer() | %{optional(atom()) => term()} @type count_result :: non_neg_integer() | %{optional(atom()) => term()}
@type reason :: term() @type reason :: term()
@@ -14,6 +15,8 @@ defmodule Parrhesia.Storage.Events do
@callback put_event(context(), event()) :: {:ok, event()} | {:error, reason()} @callback put_event(context(), event()) :: {:ok, event()} | {:error, reason()}
@callback get_event(context(), event_id()) :: {:ok, event() | nil} | {:error, reason()} @callback get_event(context(), event_id()) :: {:ok, event() | nil} | {:error, reason()}
@callback query(context(), [filter()], query_opts()) :: {:ok, [event()]} | {:error, reason()} @callback query(context(), [filter()], query_opts()) :: {:ok, [event()]} | {:error, reason()}
@callback query_event_refs(context(), [filter()], query_opts()) ::
{:ok, [event_ref()]} | {:error, reason()}
@callback count(context(), [filter()], query_opts()) :: @callback count(context(), [filter()], query_opts()) ::
{:ok, count_result()} | {:error, reason()} {:ok, count_result()} | {:error, reason()}
@callback delete_by_request(context(), event()) :: {:ok, non_neg_integer()} | {:error, reason()} @callback delete_by_request(context(), event()) :: {:ok, non_neg_integer()} | {:error, reason()}

View File

@@ -16,6 +16,7 @@ defmodule Parrhesia.Storage.Moderation do
@callback allow_pubkey(context(), pubkey()) :: :ok | {:error, reason()} @callback allow_pubkey(context(), pubkey()) :: :ok | {:error, reason()}
@callback disallow_pubkey(context(), pubkey()) :: :ok | {:error, reason()} @callback disallow_pubkey(context(), pubkey()) :: :ok | {:error, reason()}
@callback pubkey_allowed?(context(), pubkey()) :: {:ok, boolean()} | {:error, reason()} @callback pubkey_allowed?(context(), pubkey()) :: {:ok, boolean()} | {:error, reason()}
@callback has_allowed_pubkeys?(context()) :: {:ok, boolean()} | {:error, reason()}
@callback ban_event(context(), event_id()) :: :ok | {:error, reason()} @callback ban_event(context(), event_id()) :: :ok | {:error, reason()}
@callback unban_event(context(), event_id()) :: :ok | {:error, reason()} @callback unban_event(context(), event_id()) :: :ok | {:error, reason()}

View File

@@ -0,0 +1,318 @@
defmodule Parrhesia.Storage.Partitions do
@moduledoc """
Partition lifecycle helpers for Postgres `events` and `event_tags` monthly partitions.
"""
import Ecto.Query
alias Parrhesia.PostgresRepos
alias Parrhesia.Repo
@identifier_pattern ~r/^[a-zA-Z_][a-zA-Z0-9_]*$/
@monthly_partition_pattern ~r/^events_(\d{4})_(\d{2})$/
@events_partition_prefix "events"
@event_tags_partition_prefix "event_tags"
@default_months_ahead 2
@type monthly_partition :: %{
name: String.t(),
year: pos_integer(),
month: pos_integer(),
month_start_unix: non_neg_integer(),
month_end_unix: non_neg_integer()
}
@doc """
Lists all `events_*` partitions excluding the default partition.
"""
@spec list_partitions() :: [String.t()]
def list_partitions do
query =
from(table in "pg_tables",
where: table.schemaname == "public",
where: like(table.tablename, "events_%"),
where: table.tablename != "events_default",
select: table.tablename,
order_by: [asc: table.tablename]
)
read_repo()
|> then(fn repo -> repo.all(query) end)
end
@doc """
Lists monthly event partitions that match `events_YYYY_MM` naming.
"""
@spec list_monthly_partitions() :: [monthly_partition()]
def list_monthly_partitions do
list_partitions()
|> Enum.map(&parse_monthly_partition/1)
|> Enum.reject(&is_nil/1)
|> Enum.sort_by(&{&1.year, &1.month})
end
@doc """
Ensures monthly partitions exist for the current month and `months_ahead` future months.
"""
@spec ensure_monthly_partitions(keyword()) :: :ok | {:error, term()}
def ensure_monthly_partitions(opts \\ []) when is_list(opts) do
months_ahead =
opts
|> Keyword.get(:months_ahead, @default_months_ahead)
|> normalize_non_negative_integer(@default_months_ahead)
reference_date =
opts
|> Keyword.get(:reference_date, Date.utc_today())
|> normalize_reference_date()
reference_month = month_start(reference_date)
offsets =
if months_ahead == 0 do
[0]
else
Enum.to_list(0..months_ahead)
end
Enum.reduce_while(offsets, :ok, fn offset, :ok ->
target_month = shift_month(reference_month, offset)
case create_monthly_partitions(target_month) do
:ok -> {:cont, :ok}
{:error, reason} -> {:halt, {:error, reason}}
end
end)
end
@doc """
Returns the current database size in bytes.
"""
@spec database_size_bytes() :: {:ok, non_neg_integer()} | {:error, term()}
def database_size_bytes do
repo = read_repo()
case repo.query("SELECT pg_database_size(current_database())") do
{:ok, %{rows: [[size]]}} when is_integer(size) and size >= 0 -> {:ok, size}
{:ok, _result} -> {:error, :unexpected_result}
{:error, reason} -> {:error, reason}
end
end
@doc """
Drops an event partition table by name.
For monthly `events_YYYY_MM` partitions, the matching `event_tags_YYYY_MM`
partition is dropped first to keep partition lifecycle aligned.
"""
@spec drop_partition(String.t()) :: :ok | {:error, term()}
def drop_partition(partition_name) when is_binary(partition_name) do
if protected_partition?(partition_name) do
{:error, :protected_partition}
else
drop_partition_tables(partition_name)
end
end
@doc """
Returns the monthly `events` partition name for a date.
"""
@spec month_partition_name(Date.t()) :: String.t()
def month_partition_name(%Date{} = date) do
monthly_partition_name(@events_partition_prefix, date)
end
@doc """
Returns the monthly `event_tags` partition name for a date.
"""
@spec event_tags_month_partition_name(Date.t()) :: String.t()
def event_tags_month_partition_name(%Date{} = date) do
monthly_partition_name(@event_tags_partition_prefix, date)
end
defp monthly_partition_name(prefix, %Date{} = date) do
month_suffix = date.month |> Integer.to_string() |> String.pad_leading(2, "0")
"#{prefix}_#{date.year}_#{month_suffix}"
end
defp create_monthly_partitions(%Date{} = month_date) do
{start_unix, end_unix} = month_bounds_unix(month_date.year, month_date.month)
case create_monthly_partition(
month_partition_name(month_date),
@events_partition_prefix,
start_unix,
end_unix
) do
:ok ->
create_monthly_partition(
event_tags_month_partition_name(month_date),
@event_tags_partition_prefix,
start_unix,
end_unix
)
{:error, reason} ->
{:error, reason}
end
end
defp create_monthly_partition(partition_name, parent_table_name, start_unix, end_unix) do
quoted_partition_name = quote_identifier!(partition_name)
quoted_parent_table_name = quote_identifier!(parent_table_name)
sql =
"""
CREATE TABLE IF NOT EXISTS #{quoted_partition_name}
PARTITION OF #{quoted_parent_table_name}
FOR VALUES FROM (#{start_unix}) TO (#{end_unix})
"""
case Repo.query(sql) do
{:ok, _result} -> :ok
{:error, reason} -> {:error, reason}
end
end
defp drop_partition_tables(partition_name) do
case parse_monthly_partition(partition_name) do
nil -> drop_table(partition_name)
monthly_partition -> drop_monthly_partition(partition_name, monthly_partition)
end
end
defp drop_monthly_partition(partition_name, %{year: year, month: month}) do
month_date = Date.new!(year, month, 1)
tags_partition_name = monthly_partition_name(@event_tags_partition_prefix, month_date)
with :ok <- maybe_detach_events_partition(partition_name),
:ok <- drop_table(tags_partition_name) do
drop_table(partition_name)
end
end
defp maybe_detach_events_partition(partition_name) do
if attached_partition?(partition_name, @events_partition_prefix) do
quoted_parent_table_name = quote_identifier!(@events_partition_prefix)
quoted_partition_name = quote_identifier!(partition_name)
case Repo.query(
"ALTER TABLE #{quoted_parent_table_name} DETACH PARTITION #{quoted_partition_name}"
) do
{:ok, _result} -> :ok
{:error, reason} -> {:error, reason}
end
else
:ok
end
end
defp attached_partition?(partition_name, parent_table_name) do
query =
"""
SELECT 1
FROM pg_inherits AS inheritance
JOIN pg_class AS child ON child.oid = inheritance.inhrelid
JOIN pg_namespace AS child_ns ON child_ns.oid = child.relnamespace
JOIN pg_class AS parent ON parent.oid = inheritance.inhparent
JOIN pg_namespace AS parent_ns ON parent_ns.oid = parent.relnamespace
WHERE child_ns.nspname = 'public'
AND parent_ns.nspname = 'public'
AND child.relname = $1
AND parent.relname = $2
LIMIT 1
"""
repo = read_repo()
case repo.query(query, [partition_name, parent_table_name]) do
{:ok, %{rows: [[1]]}} -> true
{:ok, %{rows: []}} -> false
{:ok, _result} -> false
{:error, _reason} -> false
end
end
defp drop_table(table_name) do
quoted_table_name = quote_identifier!(table_name)
case Repo.query("DROP TABLE IF EXISTS #{quoted_table_name}") do
{:ok, _result} -> :ok
{:error, reason} -> {:error, reason}
end
end
defp protected_partition?(partition_name) do
partition_name in ["events", "events_default", "event_tags", "event_tags_default"]
end
defp parse_monthly_partition(partition_name) do
case Regex.run(@monthly_partition_pattern, partition_name, capture: :all_but_first) do
[year_text, month_text] ->
{year, ""} = Integer.parse(year_text)
{month, ""} = Integer.parse(month_text)
if month in 1..12 do
{month_start_unix, month_end_unix} = month_bounds_unix(year, month)
%{
name: partition_name,
year: year,
month: month,
month_start_unix: month_start_unix,
month_end_unix: month_end_unix
}
else
nil
end
_other ->
nil
end
end
defp month_bounds_unix(year, month) do
month_date = Date.new!(year, month, 1)
next_month_date = shift_month(month_date, 1)
{date_to_unix(month_date), date_to_unix(next_month_date)}
end
defp date_to_unix(%Date{} = date) do
date
|> DateTime.new!(~T[00:00:00], "Etc/UTC")
|> DateTime.to_unix()
end
defp read_repo, do: PostgresRepos.read()
defp month_start(%Date{} = date), do: Date.new!(date.year, date.month, 1)
defp shift_month(%Date{} = date, month_delta) when is_integer(month_delta) do
month_index = date.year * 12 + date.month - 1 + month_delta
shifted_year = div(month_index, 12)
shifted_month = rem(month_index, 12) + 1
Date.new!(shifted_year, shifted_month, 1)
end
defp normalize_reference_date(%Date{} = date), do: date
defp normalize_reference_date(_other), do: Date.utc_today()
defp normalize_non_negative_integer(value, _default) when is_integer(value) and value >= 0,
do: value
defp normalize_non_negative_integer(_value, default), do: default
defp quote_identifier!(identifier) when is_binary(identifier) do
if Regex.match?(@identifier_pattern, identifier) do
~s("#{identifier}")
else
raise ArgumentError, "invalid SQL identifier: #{inspect(identifier)}"
end
end
defp quote_identifier!(identifier) do
raise ArgumentError, "invalid SQL identifier: #{inspect(identifier)}"
end
end

Some files were not shown because too many files have changed in this diff Show More