20 Commits

Author SHA1 Message Date
b86b5db78c ci: GitHub release action
Some checks failed
CI / Test (OTP 27.2 / Elixir 1.18.2) (push) Failing after 1s
CI / Test (OTP 28.4 / Elixir 1.19.4 + Marmot E2E) (push) Failing after 1s
Release / Release Gate (push) Failing after 1s
Release / Build and publish image (push) Has been skipped
2026-03-14 20:02:05 +01:00
5577445e80 ci: GitHub action 2026-03-14 19:49:07 +01:00
1a4572013d chore: Bump version to 0.4.0 2026-03-14 19:15:31 +01:00
5c2fadc28e Rename archiver to partitions and drop archive SQL helper 2026-03-14 18:31:12 +01:00
7faf8c84c8 Align event_tags partition lifecycle with events 2026-03-14 18:23:21 +01:00
889d630c12 Add monthly partition maintenance and retention pruning 2026-03-14 18:11:40 +01:00
19664ac56c build: Default port to 4413 2026-03-14 17:37:37 +01:00
708e26e4f4 buid/test/docs: Docker image, Darwin fix, logo 2026-03-14 17:08:07 +01:00
8c8d5a8abb chore: bump version to 0.3.0 2026-03-14 13:06:24 +01:00
0fbd7008a1 Use explicit Postgrex JSON types with Elixir JSON module 2026-03-14 11:24:06 +01:00
bfdb06b203 Restrict metrics access and add optional dedicated metrics listener 2026-03-14 04:56:45 +01:00
36365710a8 Harden NEG session handling and gate feature wiring 2026-03-14 04:56:45 +01:00
e12085af2f Add signature verification and lossless event tag storage 2026-03-14 04:20:42 +01:00
18e429e05a Fix medium findings: deletion coords, count SQL, cache startup 2026-03-14 04:15:37 +01:00
c7a9f152f9 Harden ingress limits, AUTH validation, and search escaping 2026-03-14 04:09:02 +01:00
238b44ff03 docs: Opus review 2026-03-14 03:59:26 +01:00
680a73ee33 build: add lib_secp256k1 dep 2026-03-14 03:58:24 +01:00
63d3e7d55f build: Darwin fix 2026-03-14 03:17:36 +01:00
54a54c026b Improve ingest throughput with moderation cache and post-ack fanout 2026-03-14 02:33:37 +01:00
d348eab69e fix/test: benchmark 2026-03-14 02:23:08 +01:00
73 changed files with 5938 additions and 828 deletions

20
.env.example Normal file
View File

@@ -0,0 +1,20 @@
PARRHESIA_IMAGE=parrhesia:latest
PARRHESIA_HOST_PORT=4000
POSTGRES_DB=parrhesia
POSTGRES_USER=parrhesia
POSTGRES_PASSWORD=parrhesia
DATABASE_URL=ecto://parrhesia:parrhesia@db:5432/parrhesia
POOL_SIZE=20
# Optional runtime overrides:
# PARRHESIA_RELAY_URL=ws://localhost:4000/relay
# PARRHESIA_POLICIES_AUTH_REQUIRED_FOR_WRITES=false
# PARRHESIA_POLICIES_AUTH_REQUIRED_FOR_READS=false
# PARRHESIA_POLICIES_MIN_POW_DIFFICULTY=0
# PARRHESIA_FEATURES_VERIFY_EVENT_SIGNATURES=true
# PARRHESIA_METRICS_ENABLED_ON_MAIN_ENDPOINT=true
# PARRHESIA_METRICS_PRIVATE_NETWORKS_ONLY=true
# PARRHESIA_METRICS_AUTH_TOKEN=
# PARRHESIA_EXTRA_CONFIG=/config/parrhesia.runtime.exs

117
.github/workflows/ci.yaml vendored Normal file
View File

@@ -0,0 +1,117 @@
name: CI
on:
push:
branches: ["**"]
pull_request:
branches: ["**"]
env:
MIX_ENV: test
MIX_OS_DEPS_COMPILE_PARTITION_COUNT: 8
permissions:
contents: read
jobs:
test:
name: ${{ matrix.name }}
runs-on: ubuntu-24.04
strategy:
fail-fast: false
matrix:
include:
- name: Test (OTP 27.2 / Elixir 1.18.2)
otp: "27.2"
elixir: "1.18.2"
main: false
- name: Test (OTP 28.4 / Elixir 1.19.4 + Marmot E2E)
otp: "28.4"
elixir: "1.19.4"
main: true
services:
postgres:
image: postgres:16-alpine
env:
POSTGRES_USER: postgres
POSTGRES_PASSWORD: postgres
POSTGRES_DB: app_test
ports:
- 5432:5432
options: >-
--health-cmd "pg_isready -U postgres"
--health-interval 10s
--health-timeout 5s
--health-retries 5
env:
PGHOST: localhost
PGPORT: 5432
PGUSER: postgres
PGPASSWORD: postgres
PGDATABASE: app_test
steps:
- name: Checkout
uses: actions/checkout@v4
with:
submodules: recursive
- name: Set up Elixir + OTP
uses: erlef/setup-beam@v1
with:
otp-version: ${{ matrix.otp }}
elixir-version: ${{ matrix.elixir }}
- name: Set up Node.js
uses: actions/setup-node@v4
with:
node-version: 24
# Cache deps/ directory — keyed on mix.lock
- name: Cache Mix deps
uses: actions/cache@v4
id: deps-cache
with:
path: deps
key: ${{ runner.os }}-mix-deps-${{ hashFiles('mix.lock') }}
restore-keys: |
${{ runner.os }}-mix-deps-
# Cache _build/ — keyed on mix.lock + OTP/Elixir versions
- name: Cache _build
uses: actions/cache@v4
with:
path: _build
key: ${{ runner.os }}-mix-build-${{ matrix.otp }}-${{ matrix.elixir }}-${{ hashFiles('mix.lock') }}
restore-keys: |
${{ runner.os }}-mix-build-${{ matrix.otp }}-${{ matrix.elixir }}-
- name: Install Mix dependencies
if: steps.deps-cache.outputs.cache-hit != 'true'
run: mix deps.get
- name: Compile (warnings as errors)
if: ${{ matrix.main }}
run: mix compile --warnings-as-errors
- name: Check formatting
if: ${{ matrix.main }}
run: mix format --check-formatted
- name: Credo
if: ${{ matrix.main }}
run: mix credo --strict --all
- name: Check for unused locked deps
if: ${{ matrix.main }}
run: |
mix deps.unlock --unused
git diff --exit-code -- mix.lock
- name: Run tests
run: mix test --color
- name: Run Marmot E2E tests
run: mix test.marmot_e2e

171
.github/workflows/release.yaml vendored Normal file
View File

@@ -0,0 +1,171 @@
name: Release
on:
push:
tags:
- "v*.*.*"
workflow_dispatch:
inputs:
push:
description: "Push image to GHCR?"
required: false
default: "true"
type: choice
options: ["true", "false"]
env:
REGISTRY: ghcr.io
IMAGE_NAME: ${{ github.repository }}
FLAKE_OUTPUT: packages.x86_64-linux.dockerImage
permissions:
contents: read
packages: write
id-token: write
jobs:
test:
name: Release Gate
runs-on: ubuntu-24.04
services:
postgres:
image: postgres:16-alpine
env:
POSTGRES_USER: postgres
POSTGRES_PASSWORD: postgres
POSTGRES_DB: app_test
ports:
- 5432:5432
options: >-
--health-cmd "pg_isready -U postgres"
--health-interval 10s
--health-timeout 5s
--health-retries 5
env:
MIX_ENV: test
PGHOST: localhost
PGPORT: 5432
PGUSER: postgres
PGPASSWORD: postgres
PGDATABASE: app_test
steps:
- name: Checkout
uses: actions/checkout@v4
with:
submodules: recursive
- name: Set up Elixir + OTP
uses: erlef/setup-beam@v1
with:
otp-version: "28.4"
elixir-version: "1.19.4"
- name: Set up Node.js
uses: actions/setup-node@v4
with:
node-version: 24
- name: Cache Mix deps
uses: actions/cache@v4
id: deps-cache
with:
path: deps
key: ${{ runner.os }}-mix-deps-${{ hashFiles('mix.lock') }}
restore-keys: |
${{ runner.os }}-mix-deps-
- name: Cache _build
uses: actions/cache@v4
with:
path: _build
key: ${{ runner.os }}-mix-build-28.4-1.19.4-${{ hashFiles('mix.lock') }}
restore-keys: |
${{ runner.os }}-mix-build-28.4-1.19.4-
- name: Install Mix dependencies
if: steps.deps-cache.outputs.cache-hit != 'true'
run: mix deps.get
- name: Compile
run: mix compile --warnings-as-errors
- name: Check formatting
run: mix format --check-formatted
- name: Credo
run: mix credo --strict --all
- name: Run tests
run: mix test --color
- name: Run Marmot E2E
run: mix test.marmot_e2e
- name: Check for unused locked deps
run: |
mix deps.unlock --unused
git diff --exit-code -- mix.lock
build-and-push:
name: Build and publish image
runs-on: ubuntu-24.04
needs: test
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Install Nix
uses: DeterminateSystems/nix-installer-action@main
with:
extra-conf: |
experimental-features = nix-command flakes
substituters = https://cache.nixos.org
trusted-public-keys = cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY=
- name: Magic Nix Cache
uses: DeterminateSystems/magic-nix-cache-action@main
- name: Extract image metadata
id: meta
uses: docker/metadata-action@v5
with:
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
tags: |
type=semver,pattern={{version}}
type=semver,pattern={{major}}.{{minor}}
type=raw,value=latest,enable={{is_default_branch}}
type=raw,value=latest,enable=${{ startsWith(github.ref, 'refs/tags/v') }}
type=sha,prefix=sha-,format=short
- name: Build Docker image with Nix
id: build
run: |
nix build .#${{ env.FLAKE_OUTPUT }} --out-link ./docker-image-result
echo "archive_path=$(readlink -f ./docker-image-result)" >> "$GITHUB_OUTPUT"
- name: Push image to GHCR
env:
TAGS: ${{ steps.meta.outputs.tags }}
SHOULD_PUSH: ${{ github.event.inputs.push != 'false' }}
ARCHIVE_PATH: ${{ steps.build.outputs.archive_path }}
run: |
if [ "$SHOULD_PUSH" != "true" ]; then
echo "Skipping push"
exit 0
fi
IMAGE_ARCHIVE="docker-archive:${ARCHIVE_PATH}"
while IFS= read -r TAG; do
if [ -n "$TAG" ]; then
echo "Pushing $TAG"
nix run nixpkgs#skopeo -- copy \
--dest-creds "${{ github.actor }}:${{ secrets.GITHUB_TOKEN }}" \
"$IMAGE_ARCHIVE" \
"docker://$TAG"
fi
done <<< "$TAGS"

View File

@@ -1,6 +1,6 @@
Running 2 comparison run(s)...
Versions:
parrhesia 0.2.0
parrhesia 0.4.0
strfry 1.0.4 (nixpkgs)
nostr-rs-relay 0.9.0
nostr-bench 0.4.0
@@ -16,18 +16,18 @@ Versions:
=== Bench comparison (averages) ===
metric parrhesia strfry nostr-rs-relay strfry/parrhesia nostr-rs/parrhesia
-------------------------- --------- -------- -------------- ---------------- ------------------
connect avg latency (ms) ↓ 10.00 3.00 2.50 0.30x 0.25x
connect max latency (ms) ↓ 18.50 5.00 4.00 0.27x 0.22x
echo throughput (TPS) ↑ 76972.00 68204.50 158779.00 0.89x 2.06x
echo throughput (MiB/s) ↑ 42.15 38.15 86.95 0.91x 2.06x
event throughput (TPS) ↑ 1749.00 3560.00 787.50 2.04x 0.45x
event throughput (MiB/s) ↑ 1.15 2.30 0.50 2.00x 0.43x
req throughput (TPS) ↑ 2463.00 1808.00 822.00 0.73x 0.33x
req throughput (MiB/s) ↑ 13.00 11.70 2.25 0.90x 0.17x
connect avg latency (ms) ↓ 10.50 4.00 3.00 0.38x 0.29x
connect max latency (ms) ↓ 19.50 7.50 4.00 0.38x 0.21x
echo throughput (TPS) ↑ 78520.00 60353.00 164420.50 0.77x 2.09x
echo throughput (MiB/s) ↑ 43.00 33.75 90.05 0.78x 2.09x
event throughput (TPS) ↑ 1919.50 3520.50 781.00 1.83x 0.41x
event throughput (MiB/s) ↑ 1.25 2.25 0.50 1.80x 0.40x
req throughput (TPS) ↑ 4608.50 1809.50 875.50 0.39x 0.19x
req throughput (MiB/s) ↑ 26.20 11.75 2.40 0.45x 0.09x
Legend: ↑ higher is better, ↓ lower is better.
Ratio columns are server/parrhesia (for ↓ metrics, <1.00x means that server is faster).
Run details:
run 1: parrhesia(echo_tps=78336, event_tps=1796, req_tps=2493, connect_avg_ms=9) | strfry(echo_tps=70189, event_tps=3567, req_tps=1809, connect_avg_ms=3) | nostr-rs-relay(echo_tps=149317, event_tps=786, req_tps=854, connect_avg_ms=2)
run 2: parrhesia(echo_tps=75608, event_tps=1702, req_tps=2433, connect_avg_ms=11) | strfry(echo_tps=66220, event_tps=3553, req_tps=1807, connect_avg_ms=3) | nostr-rs-relay(echo_tps=168241, event_tps=789, req_tps=790, connect_avg_ms=3)
run 1: parrhesia(echo_tps=78892, event_tps=1955, req_tps=4671, connect_avg_ms=10) | strfry(echo_tps=59132, event_tps=3462, req_tps=1806, connect_avg_ms=4) | nostr-rs-relay(echo_tps=159714, event_tps=785, req_tps=873, connect_avg_ms=3)
run 2: parrhesia(echo_tps=78148, event_tps=1884, req_tps=4546, connect_avg_ms=11) | strfry(echo_tps=61574, event_tps=3579, req_tps=1813, connect_avg_ms=4) | nostr-rs-relay(echo_tps=169127, event_tps=777, req_tps=878, connect_avg_ms=3)

View File

@@ -1,86 +0,0 @@
# PROGRESS (ephemeral)
Implementation checklist for Parrhesia relay.
## Phase 0 — foundation
- [x] Confirm architecture doc with final NIP scope (`docs/ARCH.md`)
- [x] Add core deps (websocket/http server, ecto_sql/postgrex, telemetry, test tooling)
- [x] Establish application config structure (limits, policies, feature flags)
- [x] Wire initial supervision tree skeleton
## Phase 1 — protocol core (NIP-01)
- [x] Implement websocket endpoint + per-connection process
- [x] Implement message decode/encode for `EVENT`, `REQ`, `CLOSE`
- [x] Implement strict event validation (`id`, `sig`, shape, timestamps)
- [x] Implement filter evaluation engine (AND/OR semantics)
- [x] Implement subscription lifecycle + `EOSE` behavior
- [x] Implement canonical `OK`, `NOTICE`, `CLOSED` responses + prefixes
## Phase 2 — storage boundary + postgres adapter
- [x] Define `Parrhesia.Storage.*` behaviors (events/moderation/groups/admin)
- [x] Implement Postgres adapter modules behind behaviors
- [x] Create migrations for events, tags, moderation, membership
- [x] Implement replaceable/addressable semantics at storage layer
- [x] Add adapter contract test suite
## Phase 3 — fanout + performance primitives
- [x] Build ETS-backed subscription index
- [x] Implement candidate narrowing by kind/author/tag
- [x] Add bounded outbound queues/backpressure per connection
- [x] Add telemetry for ingest/query/fanout latency + queue depth
## Phase 4 — relay metadata and auth
- [x] NIP-11 endpoint (`application/nostr+json`)
- [x] NIP-42 challenge/auth flow
- [x] Enforce NIP-70 protected events (default reject, auth override)
- [x] Add auth-required/restricted response paths for writes and reqs
## Phase 5 — lifecycle and moderation features
- [x] NIP-09 deletion requests
- [x] NIP-40 expiration handling + purge worker
- [x] NIP-62 vanish requests (hard delete semantics)
- [x] NIP-13 PoW gate (configurable minimum)
- [x] Moderation tables + policy hooks (ban/allow/event/ip)
## Phase 6 — query extensions
- [x] NIP-45 `COUNT` (exact)
- [x] Optional HLL response support
- [x] NIP-50 search (`search` filter + ranking)
- [x] NIP-77 negentropy (`NEG-OPEN/MSG/CLOSE`)
## Phase 7 — private messaging, groups, and MLS
- [x] NIP-17/59 recipient-protected giftwrap read path (`kind:1059`)
- [x] NIP-29 group event policy + relay metadata events
- [x] NIP-43 membership request flow (`28934/28935/28936`, `8000/8001`, `13534`)
- [x] Marmot MIP relay surface: `443`, `445`, `10051` handling
- [x] MLS retention policy + tests for commit race edge cases
## Phase 8 — management API + operations
- [x] NIP-86 HTTP management endpoint
- [x] NIP-98 auth validation for management calls
- [x] Implement supported management methods + audit logging
- [x] Build health/readiness and Prometheus-compatible `/metrics` endpoints
## Phase 9 — full test + hardening pass
- [x] Unit + integration + property test coverage for all critical modules
- [x] End-to-end websocket conformance scenarios
- [x] Load/soak tests with target p95 latency budgets
- [x] Fault-injection tests (DB outages, high churn, restart recovery)
- [x] Final precommit run and fix all issues
## Nice-to-have / backlog
- [x] Multi-node fanout via PG LISTEN/NOTIFY or external bus
- [x] Partitioned event storage + archival strategy
- [x] Alternate storage adapter prototype (non-Postgres)
- [x] Compatibility mode for Marmot protocol transition (not required per user)

View File

@@ -1,61 +0,0 @@
# PROGRESS_MARMOT (ephemeral)
Marmot-specific implementation checklist for Parrhesia relay interoperability.
Spec source: `~/marmot/README.md` + MIP-00..05.
## M0 — spec lock + interoperability profile
- [ ] Freeze target profile to MIP-00..03 (mandatory)
- [ ] Keep MIP-04 and MIP-05 behind feature flags (optional)
- [ ] Document that legacy NIP-EE is superseded and no dedicated transition compatibility mode is planned
- [ ] Publish operator-facing compatibility statement in docs
## M1 — MIP-00 (credentials + keypackages)
- [x] Enforce kind `443` required tags and encoding (`encoding=base64`)
- [x] Validate `mls_protocol_version`, `mls_ciphersuite`, `mls_extensions`, `relays`, and `i` tag shape
- [x] Add efficient `#i` query/index path for KeyPackageRef lookup
- [x] Keep replaceable behavior for kind `10051` relay-list events
- [x] Add conformance tests for valid/invalid KeyPackage envelopes
## M2 — MIP-01 (group construction data expectations)
- [x] Enforce relay-side routing prerequisites for Marmot groups (`#h` query path)
- [x] Keep deterministic ordering for group-event catch-up (`created_at` + `id` tie-break)
- [x] Add guardrails for group metadata traffic volume and filter windows
- [x] Add tests for `#h` routing and ordering invariants
## M3 — MIP-02 (welcome events)
- [x] Support wrapped Welcome delivery via NIP-59 (`1059`) recipient-gated reads
- [x] Validate relay behavior for unsigned inner Welcome semantics (kind `444` envelope expectations)
- [x] Ensure durability/ack semantics support Commit-then-Welcome sequencing requirements
- [x] Add negative tests for malformed wrapped Welcome payloads
## M4 — MIP-03 (group events)
- [x] Enforce kind `445` envelope validation (`#h` tag presence/shape, base64 content shape)
- [x] Keep relay MLS-agnostic (no MLS decrypt/inspect in relay hot path)
- [x] Add configurable retention policy for kind `445` traffic
- [x] Add tests for high-volume fanout behavior and deterministic query results
## M5 — optional MIP-04 (encrypted media)
- [x] Accept/store MIP-04 metadata-bearing events as regular Nostr events
- [x] Add policy hooks for media metadata limits and abuse controls
- [x] Add tests for search/filter interactions with media metadata tags
## M6 — optional MIP-05 (push notifications)
- [x] Accept/store notification coordination events required by enabled profile
- [x] Add policy/rate-limit controls for push-related event traffic
- [x] Add abuse and replay protection tests for notification trigger paths
## M7 — hardening + operations
- [x] Add Marmot-focused telemetry breakdowns (ingest/query/fanout, queue pressure)
- [x] Add query-plan regression checks for `#h` and `#i` heavy workloads
- [x] Add fault-injection scenarios for relay outage/reordering behavior in group flows
- [x] Add docs for operator limits tuned for Marmot traffic patterns
- [x] Final `mix precommit` before merge

342
README.md
View File

@@ -1,11 +1,14 @@
# Parrhesia
<img alt="Parrhesia Logo" src="./docs/logo.svg" width="150" align="right">
Parrhesia is a Nostr relay server written in Elixir/OTP with PostgreSQL storage.
It exposes:
- a WebSocket relay endpoint at `/relay`
- NIP-11 relay info on `GET /relay` with `Accept: application/nostr+json`
- operational HTTP endpoints (`/health`, `/ready`, `/metrics`)
- `/metrics` is restricted by default to private/loopback source IPs
- a NIP-86-style management API at `POST /management` (NIP-98 auth)
## Supported NIPs
@@ -19,6 +22,7 @@ Current `supported_nips` list:
- Elixir `~> 1.19`
- Erlang/OTP 28
- PostgreSQL (18 used in the dev environment; 16+ recommended)
- Docker or Podman plus Docker Compose support if you want to run the published container image
---
@@ -44,19 +48,19 @@ mix setup
mix run --no-halt
```
Server listens on `http://localhost:4000` by default.
Server listens on `http://localhost:4413` by default.
WebSocket clients should connect to:
```text
ws://localhost:4000/relay
ws://localhost:4413/relay
```
### Useful endpoints
- `GET /health` -> `ok`
- `GET /ready` -> readiness status
- `GET /metrics` -> Prometheus metrics
- `GET /metrics` -> Prometheus metrics (private/loopback source IPs by default)
- `GET /relay` + `Accept: application/nostr+json` -> NIP-11 document
- `POST /management` -> management API (requires NIP-98 auth)
@@ -64,65 +68,189 @@ ws://localhost:4000/relay
## Production configuration
### Minimal setup
Before a Nostr client can publish its first event successfully, make sure these pieces are in place:
1. PostgreSQL is reachable from Parrhesia.
Set `DATABASE_URL` and create/migrate the database with `Parrhesia.Release.migrate()` or `mix ecto.migrate`.
2. Parrhesia is reachable behind your reverse proxy.
Parrhesia itself listens on plain HTTP on port `4413`, and the reverse proxy is expected to terminate TLS and forward WebSocket traffic to `/relay`.
3. `:relay_url` matches the public relay URL clients should use.
Set `PARRHESIA_RELAY_URL` to the public relay URL exposed by the reverse proxy.
In the normal deployment model, this should be your public `wss://.../relay` URL.
4. The database schema is migrated before starting normal traffic.
The app image does not auto-run migrations on boot.
That is the actual minimum. With default policy settings, writes do not require auth, event signatures are verified, and no extra Nostr-specific bootstrap step is needed before posting ordinary events.
In `prod`, these environment variables are used:
- `DATABASE_URL` (**required**), e.g. `ecto://USER:PASS@HOST/parrhesia_prod`
- `POOL_SIZE` (optional, default `10`)
- `PORT` (optional, default `4000`)
- `POOL_SIZE` (optional, default `32`)
- `PORT` (optional, default `4413`)
- `PARRHESIA_*` runtime overrides for relay config, limits, policies, metrics, and features
- `PARRHESIA_EXTRA_CONFIG` (optional path to an extra runtime config file)
`config/runtime.exs` reads these values at runtime in production releases.
### Typical relay config
### Runtime env naming
Add/override in config files (for example in `config/prod.exs` or a `config/runtime.exs`):
For runtime overrides, use the `PARRHESIA_...` prefix:
```elixir
config :parrhesia, Parrhesia.Web.Endpoint,
ip: {0, 0, 0, 0},
port: 4000
- `PARRHESIA_RELAY_URL`
- `PARRHESIA_MODERATION_CACHE_ENABLED`
- `PARRHESIA_ENABLE_EXPIRATION_WORKER`
- `PARRHESIA_LIMITS_*`
- `PARRHESIA_POLICIES_*`
- `PARRHESIA_METRICS_*`
- `PARRHESIA_RETENTION_*`
- `PARRHESIA_FEATURES_*`
- `PARRHESIA_METRICS_ENDPOINT_*`
config :parrhesia,
limits: [
max_frame_bytes: 1_048_576,
max_event_bytes: 262_144,
max_filters_per_req: 16,
max_filter_limit: 500,
max_subscriptions_per_connection: 32,
max_event_future_skew_seconds: 900,
max_outbound_queue: 256,
outbound_drain_batch_size: 64,
outbound_overflow_strategy: :close
],
policies: [
auth_required_for_writes: false,
auth_required_for_reads: false,
min_pow_difficulty: 0,
accept_ephemeral_events: true,
mls_group_event_ttl_seconds: 300,
marmot_require_h_for_group_queries: true,
marmot_group_max_h_values_per_filter: 32,
marmot_group_max_query_window_seconds: 2_592_000,
marmot_media_max_imeta_tags_per_event: 8,
marmot_media_max_field_value_bytes: 1024,
marmot_media_max_url_bytes: 2048,
marmot_media_allowed_mime_prefixes: [],
marmot_media_reject_mip04_v1: true,
marmot_push_server_pubkeys: [],
marmot_push_max_relay_tags: 16,
marmot_push_max_payload_bytes: 65_536,
marmot_push_max_trigger_age_seconds: 120,
marmot_push_require_expiration: true,
marmot_push_max_expiration_window_seconds: 120,
marmot_push_max_server_recipients: 1
],
features: [
nip_45_count: true,
nip_50_search: true,
nip_77_negentropy: true,
marmot_push_notifications: false
]
Examples:
```bash
export PARRHESIA_POLICIES_AUTH_REQUIRED_FOR_WRITES=true
export PARRHESIA_FEATURES_VERIFY_EVENT_SIGNATURES=true
export PARRHESIA_METRICS_ALLOWED_CIDRS="10.0.0.0/8,192.168.0.0/16"
export PARRHESIA_LIMITS_OUTBOUND_OVERFLOW_STRATEGY=drop_oldest
```
For settings that are awkward to express as env vars, mount an extra config file and set `PARRHESIA_EXTRA_CONFIG` to its path inside the container.
### Config reference
CSV env vars use comma-separated values. Boolean env vars accept `1/0`, `true/false`, `yes/no`, or `on/off`.
#### Top-level `:parrhesia`
| Atom key | ENV | Default | Notes |
| --- | --- | --- | --- |
| `:relay_url` | `PARRHESIA_RELAY_URL` | `ws://localhost:4413/relay` | Advertised relay URL and auth relay tag target |
| `:moderation_cache_enabled` | `PARRHESIA_MODERATION_CACHE_ENABLED` | `true` | Toggle moderation cache |
| `:enable_expiration_worker` | `PARRHESIA_ENABLE_EXPIRATION_WORKER` | `true` | Toggle background expiration worker |
| `:limits` | `PARRHESIA_LIMITS_*` | see table below | Runtime override group |
| `:policies` | `PARRHESIA_POLICIES_*` | see table below | Runtime override group |
| `:metrics` | `PARRHESIA_METRICS_*` | see table below | Runtime override group |
| `:retention` | `PARRHESIA_RETENTION_*` | see table below | Partition lifecycle and pruning policy |
| `:features` | `PARRHESIA_FEATURES_*` | see table below | Runtime override group |
| `:storage.events` | `-` | `Parrhesia.Storage.Adapters.Postgres.Events` | Config-file override only |
| `:storage.moderation` | `-` | `Parrhesia.Storage.Adapters.Postgres.Moderation` | Config-file override only |
| `:storage.groups` | `-` | `Parrhesia.Storage.Adapters.Postgres.Groups` | Config-file override only |
| `:storage.admin` | `-` | `Parrhesia.Storage.Adapters.Postgres.Admin` | Config-file override only |
#### `Parrhesia.Repo`
| Atom key | ENV | Default | Notes |
| --- | --- | --- | --- |
| `:url` | `DATABASE_URL` | required | Example: `ecto://USER:PASS@HOST/DATABASE` |
| `:pool_size` | `POOL_SIZE` | `32` | DB connection pool size |
| `:queue_target` | `DB_QUEUE_TARGET_MS` | `1000` | Ecto queue target in ms |
| `:queue_interval` | `DB_QUEUE_INTERVAL_MS` | `5000` | Ecto queue interval in ms |
| `:types` | `-` | `Parrhesia.PostgresTypes` | Internal config-file setting |
#### `Parrhesia.Web.Endpoint`
| Atom key | ENV | Default | Notes |
| --- | --- | --- | --- |
| `:port` | `PORT` | `4413` | Main HTTP/WebSocket listener |
#### `Parrhesia.Web.MetricsEndpoint`
| Atom key | ENV | Default | Notes |
| --- | --- | --- | --- |
| `:enabled` | `PARRHESIA_METRICS_ENDPOINT_ENABLED` | `false` | Enables dedicated metrics listener |
| `:ip` | `PARRHESIA_METRICS_ENDPOINT_IP` | `127.0.0.1` | IPv4 only |
| `:port` | `PARRHESIA_METRICS_ENDPOINT_PORT` | `9568` | Dedicated metrics port |
#### `:limits`
| Atom key | ENV | Default |
| --- | --- | --- |
| `:max_frame_bytes` | `PARRHESIA_LIMITS_MAX_FRAME_BYTES` | `1048576` |
| `:max_event_bytes` | `PARRHESIA_LIMITS_MAX_EVENT_BYTES` | `262144` |
| `:max_filters_per_req` | `PARRHESIA_LIMITS_MAX_FILTERS_PER_REQ` | `16` |
| `:max_filter_limit` | `PARRHESIA_LIMITS_MAX_FILTER_LIMIT` | `500` |
| `:max_subscriptions_per_connection` | `PARRHESIA_LIMITS_MAX_SUBSCRIPTIONS_PER_CONNECTION` | `32` |
| `:max_event_future_skew_seconds` | `PARRHESIA_LIMITS_MAX_EVENT_FUTURE_SKEW_SECONDS` | `900` |
| `:max_event_ingest_per_window` | `PARRHESIA_LIMITS_MAX_EVENT_INGEST_PER_WINDOW` | `120` |
| `:event_ingest_window_seconds` | `PARRHESIA_LIMITS_EVENT_INGEST_WINDOW_SECONDS` | `1` |
| `:auth_max_age_seconds` | `PARRHESIA_LIMITS_AUTH_MAX_AGE_SECONDS` | `600` |
| `:max_outbound_queue` | `PARRHESIA_LIMITS_MAX_OUTBOUND_QUEUE` | `256` |
| `:outbound_drain_batch_size` | `PARRHESIA_LIMITS_OUTBOUND_DRAIN_BATCH_SIZE` | `64` |
| `:outbound_overflow_strategy` | `PARRHESIA_LIMITS_OUTBOUND_OVERFLOW_STRATEGY` | `:close` |
| `:max_negentropy_payload_bytes` | `PARRHESIA_LIMITS_MAX_NEGENTROPY_PAYLOAD_BYTES` | `4096` |
| `:max_negentropy_sessions_per_connection` | `PARRHESIA_LIMITS_MAX_NEGENTROPY_SESSIONS_PER_CONNECTION` | `8` |
| `:max_negentropy_total_sessions` | `PARRHESIA_LIMITS_MAX_NEGENTROPY_TOTAL_SESSIONS` | `10000` |
| `:negentropy_session_idle_timeout_seconds` | `PARRHESIA_LIMITS_NEGENTROPY_SESSION_IDLE_TIMEOUT_SECONDS` | `60` |
| `:negentropy_session_sweep_interval_seconds` | `PARRHESIA_LIMITS_NEGENTROPY_SESSION_SWEEP_INTERVAL_SECONDS` | `10` |
#### `:policies`
| Atom key | ENV | Default |
| --- | --- | --- |
| `:auth_required_for_writes` | `PARRHESIA_POLICIES_AUTH_REQUIRED_FOR_WRITES` | `false` |
| `:auth_required_for_reads` | `PARRHESIA_POLICIES_AUTH_REQUIRED_FOR_READS` | `false` |
| `:min_pow_difficulty` | `PARRHESIA_POLICIES_MIN_POW_DIFFICULTY` | `0` |
| `:accept_ephemeral_events` | `PARRHESIA_POLICIES_ACCEPT_EPHEMERAL_EVENTS` | `true` |
| `:mls_group_event_ttl_seconds` | `PARRHESIA_POLICIES_MLS_GROUP_EVENT_TTL_SECONDS` | `300` |
| `:marmot_require_h_for_group_queries` | `PARRHESIA_POLICIES_MARMOT_REQUIRE_H_FOR_GROUP_QUERIES` | `true` |
| `:marmot_group_max_h_values_per_filter` | `PARRHESIA_POLICIES_MARMOT_GROUP_MAX_H_VALUES_PER_FILTER` | `32` |
| `:marmot_group_max_query_window_seconds` | `PARRHESIA_POLICIES_MARMOT_GROUP_MAX_QUERY_WINDOW_SECONDS` | `2592000` |
| `:marmot_media_max_imeta_tags_per_event` | `PARRHESIA_POLICIES_MARMOT_MEDIA_MAX_IMETA_TAGS_PER_EVENT` | `8` |
| `:marmot_media_max_field_value_bytes` | `PARRHESIA_POLICIES_MARMOT_MEDIA_MAX_FIELD_VALUE_BYTES` | `1024` |
| `:marmot_media_max_url_bytes` | `PARRHESIA_POLICIES_MARMOT_MEDIA_MAX_URL_BYTES` | `2048` |
| `:marmot_media_allowed_mime_prefixes` | `PARRHESIA_POLICIES_MARMOT_MEDIA_ALLOWED_MIME_PREFIXES` | `[]` |
| `:marmot_media_reject_mip04_v1` | `PARRHESIA_POLICIES_MARMOT_MEDIA_REJECT_MIP04_V1` | `true` |
| `:marmot_push_server_pubkeys` | `PARRHESIA_POLICIES_MARMOT_PUSH_SERVER_PUBKEYS` | `[]` |
| `:marmot_push_max_relay_tags` | `PARRHESIA_POLICIES_MARMOT_PUSH_MAX_RELAY_TAGS` | `16` |
| `:marmot_push_max_payload_bytes` | `PARRHESIA_POLICIES_MARMOT_PUSH_MAX_PAYLOAD_BYTES` | `65536` |
| `:marmot_push_max_trigger_age_seconds` | `PARRHESIA_POLICIES_MARMOT_PUSH_MAX_TRIGGER_AGE_SECONDS` | `120` |
| `:marmot_push_require_expiration` | `PARRHESIA_POLICIES_MARMOT_PUSH_REQUIRE_EXPIRATION` | `true` |
| `:marmot_push_max_expiration_window_seconds` | `PARRHESIA_POLICIES_MARMOT_PUSH_MAX_EXPIRATION_WINDOW_SECONDS` | `120` |
| `:marmot_push_max_server_recipients` | `PARRHESIA_POLICIES_MARMOT_PUSH_MAX_SERVER_RECIPIENTS` | `1` |
| `:management_auth_required` | `PARRHESIA_POLICIES_MANAGEMENT_AUTH_REQUIRED` | `true` |
#### `:metrics`
| Atom key | ENV | Default |
| --- | --- | --- |
| `:enabled_on_main_endpoint` | `PARRHESIA_METRICS_ENABLED_ON_MAIN_ENDPOINT` | `true` |
| `:public` | `PARRHESIA_METRICS_PUBLIC` | `false` |
| `:private_networks_only` | `PARRHESIA_METRICS_PRIVATE_NETWORKS_ONLY` | `true` |
| `:allowed_cidrs` | `PARRHESIA_METRICS_ALLOWED_CIDRS` | `[]` |
| `:auth_token` | `PARRHESIA_METRICS_AUTH_TOKEN` | `nil` |
#### `:retention`
| Atom key | ENV | Default | Notes |
| --- | --- | --- | --- |
| `:check_interval_hours` | `PARRHESIA_RETENTION_CHECK_INTERVAL_HOURS` | `24` | Partition maintenance + pruning cadence |
| `:months_ahead` | `PARRHESIA_RETENTION_MONTHS_AHEAD` | `2` | Pre-create current month plus N future monthly partitions for `events` and `event_tags` |
| `:max_db_bytes` | `PARRHESIA_RETENTION_MAX_DB_BYTES` | `:infinity` | Interpreted as GiB threshold; accepts integer or `infinity` |
| `:max_months_to_keep` | `PARRHESIA_RETENTION_MAX_MONTHS_TO_KEEP` | `:infinity` | Keep at most N months (including current month); accepts integer or `infinity` |
| `:max_partitions_to_drop_per_run` | `PARRHESIA_RETENTION_MAX_PARTITIONS_TO_DROP_PER_RUN` | `1` | Safety cap for each maintenance run |
#### `:features`
| Atom key | ENV | Default |
| --- | --- | --- |
| `:verify_event_signatures` | `PARRHESIA_FEATURES_VERIFY_EVENT_SIGNATURES` | `true` |
| `:nip_45_count` | `PARRHESIA_FEATURES_NIP_45_COUNT` | `true` |
| `:nip_50_search` | `PARRHESIA_FEATURES_NIP_50_SEARCH` | `true` |
| `:nip_77_negentropy` | `PARRHESIA_FEATURES_NIP_77_NEGENTROPY` | `true` |
| `:marmot_push_notifications` | `PARRHESIA_FEATURES_MARMOT_PUSH_NOTIFICATIONS` | `false` |
#### Extra runtime config
| Atom key | ENV | Default | Notes |
| --- | --- | --- | --- |
| extra runtime config file | `PARRHESIA_EXTRA_CONFIG` | unset | Imports an additional runtime `.exs` file |
---
## Deploy
@@ -136,15 +264,15 @@ export POOL_SIZE=20
mix deps.get --only prod
mix compile
mix ecto.migrate
mix release
_build/prod/rel/parrhesia/bin/parrhesia eval "Parrhesia.Release.migrate()"
_build/prod/rel/parrhesia/bin/parrhesia foreground
```
For systemd/process managers, run the release command in foreground mode.
### Option B: Nix package (`default.nix`)
### Option B: Nix release package (`default.nix`)
Build:
@@ -154,6 +282,110 @@ nix-build
Run the built release from `./result/bin/parrhesia` (release command interface).
### Option C: Docker image via Nix flake
Build the image tarball:
```bash
nix build .#dockerImage
# or with explicit build target:
nix build .#packages.x86_64-linux.dockerImage
```
Load it into Docker:
```bash
docker load < result
```
Run database migrations:
```bash
docker run --rm \
-e DATABASE_URL="ecto://USER:PASS@HOST/parrhesia_prod" \
parrhesia:latest \
eval "Parrhesia.Release.migrate()"
```
Start the relay:
```bash
docker run --rm \
-p 4413:4413 \
-e DATABASE_URL="ecto://USER:PASS@HOST/parrhesia_prod" \
-e POOL_SIZE=20 \
parrhesia:latest
```
### Option D: Docker Compose with PostgreSQL
The repo includes [`compose.yaml`](./compose.yaml) and [`.env.example`](./.env.example) so Docker users can run Postgres and Parrhesia together.
Set up the environment file:
```bash
cp .env.example .env
```
If you are building locally from source, build and load the image first:
```bash
nix build .#dockerImage
docker load < result
```
Then start the stack:
```bash
docker compose up -d db
docker compose run --rm migrate
docker compose up -d parrhesia
```
The relay will be available on:
```text
ws://localhost:4413/relay
```
Notes:
- `compose.yaml` keeps PostgreSQL in a separate container; the Parrhesia image only runs the app release.
- The container listens on port `4413`; use `PARRHESIA_HOST_PORT` if you want a different published host port.
- Migrations are run explicitly through the one-shot `migrate` service instead of on every app boot.
- Common runtime overrides can go straight into `.env`; see [`.env.example`](./.env.example) for examples.
- For more specialized overrides, mount a file and set `PARRHESIA_EXTRA_CONFIG=/path/in/container/runtime.exs`.
- When a GHCR image is published, set `PARRHESIA_IMAGE=ghcr.io/<owner>/parrhesia:<tag>` in `.env` and reuse the same compose flow.
---
## Benchmark
The benchmark compares Parrhesia against [`strfry`](https://github.com/hoytech/strfry) and [`nostr-rs-relay`](https://sr.ht/~gheartsfield/nostr-rs-relay/) using [`nostr-bench`](https://github.com/rnostr/nostr-bench).
Run it with:
```bash
mix bench
```
Current comparison results from [BENCHMARK.md](./BENCHMARK.md):
| metric | parrhesia | strfry | nostr-rs-relay | strfry/parrhesia | nostr-rs/parrhesia |
| --- | ---: | ---: | ---: | ---: | ---: |
| connect avg latency (ms) ↓ | 13.50 | 3.00 | 2.00 | **0.22x** | **0.15x** |
| connect max latency (ms) ↓ | 22.50 | 5.50 | 3.00 | **0.24x** | **0.13x** |
| echo throughput (TPS) ↑ | 80385.00 | 61673.00 | 164516.00 | 0.77x | **2.05x** |
| echo throughput (MiB/s) ↑ | 44.00 | 34.45 | 90.10 | 0.78x | **2.05x** |
| event throughput (TPS) ↑ | 2000.00 | 3404.50 | 788.00 | **1.70x** | 0.39x |
| event throughput (MiB/s) ↑ | 1.30 | 2.20 | 0.50 | **1.69x** | 0.38x |
| req throughput (TPS) ↑ | 3664.00 | 1808.50 | 877.50 | 0.49x | 0.24x |
| req throughput (MiB/s) ↑ | 20.75 | 11.75 | 2.45 | 0.57x | 0.12x |
Higher is better for `↑` metrics. Lower is better for `↓` metrics.
(Results from a Linux container on a 6-core Intel i5-8400T with NVMe drive, PostgreSQL 18)
---
## Development quality checks
@@ -164,13 +396,13 @@ Before opening a PR:
mix precommit
```
For external CLI end-to-end checks with `nak`:
Additional external CLI end-to-end checks with `nak`:
```bash
mix test.nak_e2e
```
For Marmot client end-to-end checks (TypeScript/Node suite using `marmot-ts`):
For Marmot client end-to-end checks (TypeScript/Node suite using `marmot-ts`, included in `precommit`):
```bash
mix test.marmot_e2e

42
compose.yaml Normal file
View File

@@ -0,0 +1,42 @@
services:
db:
image: postgres:17
restart: unless-stopped
environment:
POSTGRES_DB: ${POSTGRES_DB:-parrhesia}
POSTGRES_USER: ${POSTGRES_USER:-parrhesia}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-parrhesia}
healthcheck:
test: ["CMD-SHELL", "pg_isready -U $$POSTGRES_USER -d $$POSTGRES_DB"]
interval: 5s
timeout: 5s
retries: 12
volumes:
- postgres-data:/var/lib/postgresql/data
migrate:
image: ${PARRHESIA_IMAGE:-parrhesia:latest}
profiles: ["tools"]
restart: "no"
depends_on:
db:
condition: service_healthy
environment:
DATABASE_URL: ${DATABASE_URL:-ecto://parrhesia:parrhesia@db:5432/parrhesia}
POOL_SIZE: ${POOL_SIZE:-20}
command: ["eval", "Parrhesia.Release.migrate()"]
parrhesia:
image: ${PARRHESIA_IMAGE:-parrhesia:latest}
restart: unless-stopped
depends_on:
db:
condition: service_healthy
environment:
DATABASE_URL: ${DATABASE_URL:-ecto://parrhesia:parrhesia@db:5432/parrhesia}
POOL_SIZE: ${POOL_SIZE:-20}
ports:
- "${PARRHESIA_HOST_PORT:-4413}:4413"
volumes:
postgres-data:

View File

@@ -1,6 +1,10 @@
import Config
config :postgrex, :json_library, JSON
config :parrhesia,
moderation_cache_enabled: true,
relay_url: "ws://localhost:4413/relay",
limits: [
max_frame_bytes: 1_048_576,
max_event_bytes: 262_144,
@@ -8,9 +12,17 @@ config :parrhesia,
max_filter_limit: 500,
max_subscriptions_per_connection: 32,
max_event_future_skew_seconds: 900,
max_event_ingest_per_window: 120,
event_ingest_window_seconds: 1,
auth_max_age_seconds: 600,
max_outbound_queue: 256,
outbound_drain_batch_size: 64,
outbound_overflow_strategy: :close
outbound_overflow_strategy: :close,
max_negentropy_payload_bytes: 4096,
max_negentropy_sessions_per_connection: 8,
max_negentropy_total_sessions: 10_000,
negentropy_session_idle_timeout_seconds: 60,
negentropy_session_sweep_interval_seconds: 10
],
policies: [
auth_required_for_writes: false,
@@ -35,7 +47,22 @@ config :parrhesia,
marmot_push_max_server_recipients: 1,
management_auth_required: true
],
metrics: [
enabled_on_main_endpoint: true,
public: false,
private_networks_only: true,
allowed_cidrs: [],
auth_token: nil
],
retention: [
check_interval_hours: 24,
months_ahead: 2,
max_db_bytes: :infinity,
max_months_to_keep: :infinity,
max_partitions_to_drop_per_run: 1
],
features: [
verify_event_signatures: true,
nip_45_count: true,
nip_50_search: true,
nip_77_negentropy: true,
@@ -48,7 +75,14 @@ config :parrhesia,
admin: Parrhesia.Storage.Adapters.Postgres.Admin
]
config :parrhesia, Parrhesia.Web.Endpoint, port: 4000
config :parrhesia, Parrhesia.Web.Endpoint, port: 4413
config :parrhesia, Parrhesia.Web.MetricsEndpoint,
enabled: false,
ip: {127, 0, 0, 1},
port: 9568
config :parrhesia, Parrhesia.Repo, types: Parrhesia.PostgresTypes
config :parrhesia, ecto_repos: [Parrhesia.Repo]

View File

@@ -1,3 +1,8 @@
import Config
config :parrhesia, Parrhesia.Repo,
pool_size: 32,
queue_target: 1_000,
queue_interval: 5_000
# Production runtime configuration lives in config/runtime.exs.

View File

@@ -1,14 +1,459 @@
import Config
string_env = fn name, default ->
case System.get_env(name) do
nil -> default
"" -> default
value -> value
end
end
int_env = fn name, default ->
case System.get_env(name) do
nil -> default
value -> String.to_integer(value)
end
end
bool_env = fn name, default ->
case System.get_env(name) do
nil ->
default
value ->
case String.downcase(value) do
"1" -> true
"true" -> true
"yes" -> true
"on" -> true
"0" -> false
"false" -> false
"no" -> false
"off" -> false
_other -> raise "environment variable #{name} must be a boolean value"
end
end
end
csv_env = fn name, default ->
case System.get_env(name) do
nil ->
default
value ->
value
|> String.split(",", trim: true)
|> Enum.map(&String.trim/1)
|> Enum.reject(&(&1 == ""))
end
end
infinity_or_int_env = fn name, default ->
case System.get_env(name) do
nil ->
default
value ->
normalized = value |> String.trim() |> String.downcase()
if normalized == "infinity" do
:infinity
else
String.to_integer(value)
end
end
end
outbound_overflow_strategy_env = fn name, default ->
case System.get_env(name) do
nil ->
default
"close" ->
:close
"drop_oldest" ->
:drop_oldest
"drop_newest" ->
:drop_newest
_other ->
raise "environment variable #{name} must be one of: close, drop_oldest, drop_newest"
end
end
ipv4_env = fn name, default ->
case System.get_env(name) do
nil ->
default
value ->
case String.split(value, ".", parts: 4) do
[a, b, c, d] ->
octets = Enum.map([a, b, c, d], &String.to_integer/1)
if Enum.all?(octets, &(&1 >= 0 and &1 <= 255)) do
List.to_tuple(octets)
else
raise "environment variable #{name} must be a valid IPv4 address"
end
_other ->
raise "environment variable #{name} must be a valid IPv4 address"
end
end
end
if config_env() == :prod do
database_url =
System.get_env("DATABASE_URL") ||
raise "environment variable DATABASE_URL is missing. Example: ecto://USER:PASS@HOST/DATABASE"
repo_defaults = Application.get_env(:parrhesia, Parrhesia.Repo, [])
relay_url_default = Application.get_env(:parrhesia, :relay_url)
moderation_cache_enabled_default =
Application.get_env(:parrhesia, :moderation_cache_enabled, true)
enable_expiration_worker_default =
Application.get_env(:parrhesia, :enable_expiration_worker, true)
limits_defaults = Application.get_env(:parrhesia, :limits, [])
policies_defaults = Application.get_env(:parrhesia, :policies, [])
metrics_defaults = Application.get_env(:parrhesia, :metrics, [])
retention_defaults = Application.get_env(:parrhesia, :retention, [])
features_defaults = Application.get_env(:parrhesia, :features, [])
metrics_endpoint_defaults = Application.get_env(:parrhesia, Parrhesia.Web.MetricsEndpoint, [])
default_pool_size = Keyword.get(repo_defaults, :pool_size, 32)
default_queue_target = Keyword.get(repo_defaults, :queue_target, 1_000)
default_queue_interval = Keyword.get(repo_defaults, :queue_interval, 5_000)
pool_size = int_env.("POOL_SIZE", default_pool_size)
queue_target = int_env.("DB_QUEUE_TARGET_MS", default_queue_target)
queue_interval = int_env.("DB_QUEUE_INTERVAL_MS", default_queue_interval)
limits = [
max_frame_bytes:
int_env.(
"PARRHESIA_LIMITS_MAX_FRAME_BYTES",
Keyword.get(limits_defaults, :max_frame_bytes, 1_048_576)
),
max_event_bytes:
int_env.(
"PARRHESIA_LIMITS_MAX_EVENT_BYTES",
Keyword.get(limits_defaults, :max_event_bytes, 262_144)
),
max_filters_per_req:
int_env.(
"PARRHESIA_LIMITS_MAX_FILTERS_PER_REQ",
Keyword.get(limits_defaults, :max_filters_per_req, 16)
),
max_filter_limit:
int_env.(
"PARRHESIA_LIMITS_MAX_FILTER_LIMIT",
Keyword.get(limits_defaults, :max_filter_limit, 500)
),
max_subscriptions_per_connection:
int_env.(
"PARRHESIA_LIMITS_MAX_SUBSCRIPTIONS_PER_CONNECTION",
Keyword.get(limits_defaults, :max_subscriptions_per_connection, 32)
),
max_event_future_skew_seconds:
int_env.(
"PARRHESIA_LIMITS_MAX_EVENT_FUTURE_SKEW_SECONDS",
Keyword.get(limits_defaults, :max_event_future_skew_seconds, 900)
),
max_event_ingest_per_window:
int_env.(
"PARRHESIA_LIMITS_MAX_EVENT_INGEST_PER_WINDOW",
Keyword.get(limits_defaults, :max_event_ingest_per_window, 120)
),
event_ingest_window_seconds:
int_env.(
"PARRHESIA_LIMITS_EVENT_INGEST_WINDOW_SECONDS",
Keyword.get(limits_defaults, :event_ingest_window_seconds, 1)
),
auth_max_age_seconds:
int_env.(
"PARRHESIA_LIMITS_AUTH_MAX_AGE_SECONDS",
Keyword.get(limits_defaults, :auth_max_age_seconds, 600)
),
max_outbound_queue:
int_env.(
"PARRHESIA_LIMITS_MAX_OUTBOUND_QUEUE",
Keyword.get(limits_defaults, :max_outbound_queue, 256)
),
outbound_drain_batch_size:
int_env.(
"PARRHESIA_LIMITS_OUTBOUND_DRAIN_BATCH_SIZE",
Keyword.get(limits_defaults, :outbound_drain_batch_size, 64)
),
outbound_overflow_strategy:
outbound_overflow_strategy_env.(
"PARRHESIA_LIMITS_OUTBOUND_OVERFLOW_STRATEGY",
Keyword.get(limits_defaults, :outbound_overflow_strategy, :close)
),
max_negentropy_payload_bytes:
int_env.(
"PARRHESIA_LIMITS_MAX_NEGENTROPY_PAYLOAD_BYTES",
Keyword.get(limits_defaults, :max_negentropy_payload_bytes, 4096)
),
max_negentropy_sessions_per_connection:
int_env.(
"PARRHESIA_LIMITS_MAX_NEGENTROPY_SESSIONS_PER_CONNECTION",
Keyword.get(limits_defaults, :max_negentropy_sessions_per_connection, 8)
),
max_negentropy_total_sessions:
int_env.(
"PARRHESIA_LIMITS_MAX_NEGENTROPY_TOTAL_SESSIONS",
Keyword.get(limits_defaults, :max_negentropy_total_sessions, 10_000)
),
negentropy_session_idle_timeout_seconds:
int_env.(
"PARRHESIA_LIMITS_NEGENTROPY_SESSION_IDLE_TIMEOUT_SECONDS",
Keyword.get(limits_defaults, :negentropy_session_idle_timeout_seconds, 60)
),
negentropy_session_sweep_interval_seconds:
int_env.(
"PARRHESIA_LIMITS_NEGENTROPY_SESSION_SWEEP_INTERVAL_SECONDS",
Keyword.get(limits_defaults, :negentropy_session_sweep_interval_seconds, 10)
)
]
policies = [
auth_required_for_writes:
bool_env.(
"PARRHESIA_POLICIES_AUTH_REQUIRED_FOR_WRITES",
Keyword.get(policies_defaults, :auth_required_for_writes, false)
),
auth_required_for_reads:
bool_env.(
"PARRHESIA_POLICIES_AUTH_REQUIRED_FOR_READS",
Keyword.get(policies_defaults, :auth_required_for_reads, false)
),
min_pow_difficulty:
int_env.(
"PARRHESIA_POLICIES_MIN_POW_DIFFICULTY",
Keyword.get(policies_defaults, :min_pow_difficulty, 0)
),
accept_ephemeral_events:
bool_env.(
"PARRHESIA_POLICIES_ACCEPT_EPHEMERAL_EVENTS",
Keyword.get(policies_defaults, :accept_ephemeral_events, true)
),
mls_group_event_ttl_seconds:
int_env.(
"PARRHESIA_POLICIES_MLS_GROUP_EVENT_TTL_SECONDS",
Keyword.get(policies_defaults, :mls_group_event_ttl_seconds, 300)
),
marmot_require_h_for_group_queries:
bool_env.(
"PARRHESIA_POLICIES_MARMOT_REQUIRE_H_FOR_GROUP_QUERIES",
Keyword.get(policies_defaults, :marmot_require_h_for_group_queries, true)
),
marmot_group_max_h_values_per_filter:
int_env.(
"PARRHESIA_POLICIES_MARMOT_GROUP_MAX_H_VALUES_PER_FILTER",
Keyword.get(policies_defaults, :marmot_group_max_h_values_per_filter, 32)
),
marmot_group_max_query_window_seconds:
int_env.(
"PARRHESIA_POLICIES_MARMOT_GROUP_MAX_QUERY_WINDOW_SECONDS",
Keyword.get(policies_defaults, :marmot_group_max_query_window_seconds, 2_592_000)
),
marmot_media_max_imeta_tags_per_event:
int_env.(
"PARRHESIA_POLICIES_MARMOT_MEDIA_MAX_IMETA_TAGS_PER_EVENT",
Keyword.get(policies_defaults, :marmot_media_max_imeta_tags_per_event, 8)
),
marmot_media_max_field_value_bytes:
int_env.(
"PARRHESIA_POLICIES_MARMOT_MEDIA_MAX_FIELD_VALUE_BYTES",
Keyword.get(policies_defaults, :marmot_media_max_field_value_bytes, 1024)
),
marmot_media_max_url_bytes:
int_env.(
"PARRHESIA_POLICIES_MARMOT_MEDIA_MAX_URL_BYTES",
Keyword.get(policies_defaults, :marmot_media_max_url_bytes, 2048)
),
marmot_media_allowed_mime_prefixes:
csv_env.(
"PARRHESIA_POLICIES_MARMOT_MEDIA_ALLOWED_MIME_PREFIXES",
Keyword.get(policies_defaults, :marmot_media_allowed_mime_prefixes, [])
),
marmot_media_reject_mip04_v1:
bool_env.(
"PARRHESIA_POLICIES_MARMOT_MEDIA_REJECT_MIP04_V1",
Keyword.get(policies_defaults, :marmot_media_reject_mip04_v1, true)
),
marmot_push_server_pubkeys:
csv_env.(
"PARRHESIA_POLICIES_MARMOT_PUSH_SERVER_PUBKEYS",
Keyword.get(policies_defaults, :marmot_push_server_pubkeys, [])
),
marmot_push_max_relay_tags:
int_env.(
"PARRHESIA_POLICIES_MARMOT_PUSH_MAX_RELAY_TAGS",
Keyword.get(policies_defaults, :marmot_push_max_relay_tags, 16)
),
marmot_push_max_payload_bytes:
int_env.(
"PARRHESIA_POLICIES_MARMOT_PUSH_MAX_PAYLOAD_BYTES",
Keyword.get(policies_defaults, :marmot_push_max_payload_bytes, 65_536)
),
marmot_push_max_trigger_age_seconds:
int_env.(
"PARRHESIA_POLICIES_MARMOT_PUSH_MAX_TRIGGER_AGE_SECONDS",
Keyword.get(policies_defaults, :marmot_push_max_trigger_age_seconds, 120)
),
marmot_push_require_expiration:
bool_env.(
"PARRHESIA_POLICIES_MARMOT_PUSH_REQUIRE_EXPIRATION",
Keyword.get(policies_defaults, :marmot_push_require_expiration, true)
),
marmot_push_max_expiration_window_seconds:
int_env.(
"PARRHESIA_POLICIES_MARMOT_PUSH_MAX_EXPIRATION_WINDOW_SECONDS",
Keyword.get(policies_defaults, :marmot_push_max_expiration_window_seconds, 120)
),
marmot_push_max_server_recipients:
int_env.(
"PARRHESIA_POLICIES_MARMOT_PUSH_MAX_SERVER_RECIPIENTS",
Keyword.get(policies_defaults, :marmot_push_max_server_recipients, 1)
),
management_auth_required:
bool_env.(
"PARRHESIA_POLICIES_MANAGEMENT_AUTH_REQUIRED",
Keyword.get(policies_defaults, :management_auth_required, true)
)
]
metrics = [
enabled_on_main_endpoint:
bool_env.(
"PARRHESIA_METRICS_ENABLED_ON_MAIN_ENDPOINT",
Keyword.get(metrics_defaults, :enabled_on_main_endpoint, true)
),
public:
bool_env.(
"PARRHESIA_METRICS_PUBLIC",
Keyword.get(metrics_defaults, :public, false)
),
private_networks_only:
bool_env.(
"PARRHESIA_METRICS_PRIVATE_NETWORKS_ONLY",
Keyword.get(metrics_defaults, :private_networks_only, true)
),
allowed_cidrs:
csv_env.(
"PARRHESIA_METRICS_ALLOWED_CIDRS",
Keyword.get(metrics_defaults, :allowed_cidrs, [])
),
auth_token:
string_env.(
"PARRHESIA_METRICS_AUTH_TOKEN",
Keyword.get(metrics_defaults, :auth_token)
)
]
retention = [
check_interval_hours:
int_env.(
"PARRHESIA_RETENTION_CHECK_INTERVAL_HOURS",
Keyword.get(retention_defaults, :check_interval_hours, 24)
),
months_ahead:
int_env.(
"PARRHESIA_RETENTION_MONTHS_AHEAD",
Keyword.get(retention_defaults, :months_ahead, 2)
),
max_db_bytes:
infinity_or_int_env.(
"PARRHESIA_RETENTION_MAX_DB_BYTES",
Keyword.get(retention_defaults, :max_db_bytes, :infinity)
),
max_months_to_keep:
infinity_or_int_env.(
"PARRHESIA_RETENTION_MAX_MONTHS_TO_KEEP",
Keyword.get(retention_defaults, :max_months_to_keep, :infinity)
),
max_partitions_to_drop_per_run:
int_env.(
"PARRHESIA_RETENTION_MAX_PARTITIONS_TO_DROP_PER_RUN",
Keyword.get(retention_defaults, :max_partitions_to_drop_per_run, 1)
)
]
features = [
verify_event_signatures:
bool_env.(
"PARRHESIA_FEATURES_VERIFY_EVENT_SIGNATURES",
Keyword.get(features_defaults, :verify_event_signatures, true)
),
nip_45_count:
bool_env.(
"PARRHESIA_FEATURES_NIP_45_COUNT",
Keyword.get(features_defaults, :nip_45_count, true)
),
nip_50_search:
bool_env.(
"PARRHESIA_FEATURES_NIP_50_SEARCH",
Keyword.get(features_defaults, :nip_50_search, true)
),
nip_77_negentropy:
bool_env.(
"PARRHESIA_FEATURES_NIP_77_NEGENTROPY",
Keyword.get(features_defaults, :nip_77_negentropy, true)
),
marmot_push_notifications:
bool_env.(
"PARRHESIA_FEATURES_MARMOT_PUSH_NOTIFICATIONS",
Keyword.get(features_defaults, :marmot_push_notifications, false)
)
]
config :parrhesia, Parrhesia.Repo,
url: database_url,
pool_size: String.to_integer(System.get_env("POOL_SIZE") || "10")
pool_size: pool_size,
queue_target: queue_target,
queue_interval: queue_interval
config :parrhesia, Parrhesia.Web.Endpoint,
port: String.to_integer(System.get_env("PORT") || "4000")
config :parrhesia, Parrhesia.Web.Endpoint, port: int_env.("PORT", 4413)
config :parrhesia, Parrhesia.Web.MetricsEndpoint,
enabled:
bool_env.(
"PARRHESIA_METRICS_ENDPOINT_ENABLED",
Keyword.get(metrics_endpoint_defaults, :enabled, false)
),
ip:
ipv4_env.(
"PARRHESIA_METRICS_ENDPOINT_IP",
Keyword.get(metrics_endpoint_defaults, :ip, {127, 0, 0, 1})
),
port:
int_env.(
"PARRHESIA_METRICS_ENDPOINT_PORT",
Keyword.get(metrics_endpoint_defaults, :port, 9568)
)
config :parrhesia,
relay_url: string_env.("PARRHESIA_RELAY_URL", relay_url_default),
moderation_cache_enabled:
bool_env.("PARRHESIA_MODERATION_CACHE_ENABLED", moderation_cache_enabled_default),
enable_expiration_worker:
bool_env.("PARRHESIA_ENABLE_EXPIRATION_WORKER", enable_expiration_worker_default),
limits: limits,
policies: policies,
metrics: metrics,
retention: retention,
features: features
case System.get_env("PARRHESIA_EXTRA_CONFIG") do
nil -> :ok
"" -> :ok
path -> import_config path
end
end

View File

@@ -12,7 +12,10 @@ config :parrhesia, Parrhesia.Web.Endpoint,
port: test_endpoint_port,
ip: {127, 0, 0, 1}
config :parrhesia, enable_expiration_worker: false
config :parrhesia,
enable_expiration_worker: false,
moderation_cache_enabled: false,
features: [verify_event_signatures: false]
pg_host = System.get_env("PGHOST")

View File

@@ -1,11 +1,16 @@
{
lib,
beam,
fetchFromGitHub,
runCommand,
autoconf,
automake,
libtool,
pkg-config,
vips,
}: let
pname = "parrhesia";
version = "0.1.0";
version = "0.4.0";
beamPackages = beam.packages.erlang_28.extend (
final: _prev: {
@@ -43,16 +48,69 @@
beamPackages.fetchMixDeps {
pname = "${pname}-mix-deps";
inherit version src;
hash = "sha256-1v2+Q1MHbu09r5OBaLehiR+JfMP0Q5OHaWuwrQDzZJU=";
hash = "sha256-I09Q2PG22lOrZjjXoq8Py3P3o5dgaz9LhKJSmP+/r6k=";
}
else null;
# lib_secp256k1 is a :make dep and may not be present in fetchMixDeps output.
# Inject the Hex package explicitly, then vendor upstream bitcoin-core/secp256k1
# sources to avoid build-time network access.
libSecp256k1Hex = beamPackages.fetchHex {
pkg = "lib_secp256k1";
version = "0.7.1";
sha256 = "sha256-eL3TZhoXRIr/Wu7FynTI3bwJsB8Oz6O6Gro+iuR6srM=";
};
elixirMakeHex = beamPackages.fetchHex {
pkg = "elixir_make";
version = "0.9.0";
sha256 = "sha256-2yPU/Yt1dGKtAviqc0MaQm/mZxyAsgDZcQyvPR3Q/9s=";
};
secp256k1Src = fetchFromGitHub {
owner = "bitcoin-core";
repo = "secp256k1";
rev = "v0.7.1";
hash = "sha256-DnBgetf+98n7B1JGtyTdxyc+yQ51A3+ueTIPPSWCm4E=";
};
patchedMixFodDeps =
if mixFodDeps == null
then null
else
runCommand mixFodDeps.name {} ''
mkdir -p $out
cp -r --no-preserve=mode ${mixFodDeps}/. $out
chmod -R u+w $out
rm -rf $out/lib_secp256k1
cp -r ${libSecp256k1Hex} $out/lib_secp256k1
chmod -R u+w $out/lib_secp256k1
rm -rf $out/elixir_make
cp -r ${elixirMakeHex} $out/elixir_make
rm -rf $out/lib_secp256k1/c_src/secp256k1
mkdir -p $out/lib_secp256k1/c_src/secp256k1
cp -r ${secp256k1Src}/. $out/lib_secp256k1/c_src/secp256k1/
chmod -R u+w $out/lib_secp256k1/c_src/secp256k1
# mixRelease may copy deps without preserving +x bits, so avoid relying
# on executable mode for autogen.sh.
substituteInPlace $out/lib_secp256k1/Makefile \
--replace-fail "./autogen.sh" "sh ./autogen.sh"
touch $out/lib_secp256k1/c_src/secp256k1/.fetched
'';
in
beamPackages.mixRelease {
inherit pname version src mixFodDeps;
inherit pname version src;
mixFodDeps = patchedMixFodDeps;
mixEnv = "prod";
removeCookie = false;
nativeBuildInputs = [pkg-config];
nativeBuildInputs = [pkg-config autoconf automake libtool];
buildInputs = [vips];
preConfigure = ''

View File

@@ -75,9 +75,16 @@ in {
});
nostr-bench = pkgs.callPackage ./nix/nostr-bench.nix {};
in
with pkgs; [
with pkgs;
[
just
gcc
git
gnumake
autoconf
automake
libtool
pkg-config
# Nix code formatter
alejandra
# i18n
@@ -94,15 +101,11 @@ in {
nostr-bench
# Nostr reference servers
nostr-rs-relay
]
++ lib.optionals pkgs.stdenv.hostPlatform.isx86_64 [
strfry
];
# https://devenv.sh/tests/
# enterTest = ''
# echo "Running tests"
# git --version | grep "2.42.0"
# '';
# https://devenv.sh/languages/
languages = {
elixir = {
@@ -121,15 +124,33 @@ in {
services.postgres = {
enable = true;
package = pkgs.postgresql_18;
# Some tuning for the benchmark - doesn't seem to do much
settings = {
max_connections = 300;
shared_buffers = "1GB";
effective_cache_size = "3GB";
work_mem = "16MB";
maintenance_work_mem = "256MB";
wal_compression = "on";
checkpoint_timeout = "15min";
checkpoint_completion_target = 0.9;
min_wal_size = "1GB";
max_wal_size = "4GB";
random_page_cost = 1.1;
effective_io_concurrency = 200;
};
initialDatabases = [{name = "parrhesia_dev";} {name = "parrhesia_test";}];
initialScript = ''
CREATE ROLE dev WITH LOGIN PASSWORD 'dev' SUPERUSER;
-- Make sure we get the right collation
ALTER database template1 is_template=false;
DROP database template1;
CREATE DATABASE template1 WITH OWNER = agent
CREATE DATABASE template1
ENCODING = 'UTF8'
TABLESPACE = pg_default
LC_COLLATE = 'de_DE.UTF-8'
@@ -141,12 +162,10 @@ in {
'';
};
# https://devenv.sh/pre-commit-hooks/
# pre-commit.hooks.shellcheck.enable = true;
dotenv.enable = true;
devenv.warnOnNewVersion = false;
# https://devenv.sh/pre-commit-hooks/
git-hooks.hooks = {
alejandra.enable = true;
check-added-large-files = {

234
docs/CLUSTER.md Normal file
View File

@@ -0,0 +1,234 @@
# Parrhesia clustering and distributed fanout
This document describes:
1. the **current** distributed fanout behavior implemented today, and
2. a practical evolution path to a more production-grade clustered relay.
---
## 1) Current state (implemented today)
### 1.1 What exists right now
Parrhesia currently includes a lightweight multi-node live fanout path (untested!):
- `Parrhesia.Fanout.MultiNode` (`lib/parrhesia/fanout/multi_node.ex`)
- GenServer that joins a `:pg` process group.
- Receives locally-published events and forwards them to other group members.
- Receives remote events and performs local fanout lookup.
- `Parrhesia.Web.Connection` (`lib/parrhesia/web/connection.ex`)
- On successful ingest, after ACK scheduling, it does:
1. local fanout (`fanout_event/1`), then
2. cross-node publish (`maybe_publish_multi_node/1`).
- `Parrhesia.Subscriptions.Supervisor` (`lib/parrhesia/subscriptions/supervisor.ex`)
- Starts `Parrhesia.Fanout.MultiNode` unconditionally.
In other words: **if BEAM nodes are connected, live events are fanned out cross-node**.
### 1.2 What is not included yet
- No automatic cluster formation/discovery (no `libcluster`, DNS polling, gossip, etc.).
- No durable inter-node event transport.
- No replay/recovery of missed cross-node live events.
- No explicit per-node delivery ACK between relay nodes.
---
## 2) Current runtime behavior in detail
### 2.1 Local ingest flow and publish ordering
For an accepted event in `Parrhesia.Web.Connection`:
1. validate/policy/persist path runs.
2. Client receives `OK` reply.
3. A post-ACK message triggers:
- local fanout (`Index.candidate_subscription_keys/1` + send `{:fanout_event, ...}`),
- multi-node publish (`MultiNode.publish/1`).
Important semantics:
- Regular persisted events: ACK implies DB persistence succeeded.
- Ephemeral events: ACK implies accepted by policy, but no DB durability.
- Cross-node fanout happens **after** ACK path is scheduled.
### 2.2 Multi-node transport mechanics
`Parrhesia.Fanout.MultiNode` uses `:pg` membership:
- On init:
- ensures `:pg` is started,
- joins group `Parrhesia.Fanout.MultiNode`.
- On publish:
- gets all group members,
- excludes itself,
- sends `{:remote_fanout_event, event}` to each member pid.
- On remote receive:
- runs local subscription candidate narrowing via `Parrhesia.Subscriptions.Index`,
- forwards matching candidates to local connection owners as `{:fanout_event, sub_id, event}`.
No republish on remote receive, so this path does not create fanout loops.
### 2.3 Subscription index locality
The subscription index is local ETS state per node (`Parrhesia.Subscriptions.Index`).
- Each node only tracks subscriptions of its local websocket processes.
- Each node independently decides which local subscribers match a remote event.
- There is no global cross-node subscription registry.
### 2.4 Delivery model and guarantees (current)
Current model is **best-effort live propagation** among connected nodes.
- If nodes are connected and healthy, remote live subscribers should receive events quickly.
- If there is a netsplit or temporary disconnection:
- remote live subscribers may miss events,
- persisted events can still be recovered by normal `REQ`/history query,
- ephemeral events are not recoverable.
### 2.5 Cluster preconditions
For cross-node fanout to work, operators must provide distributed BEAM connectivity:
- consistent Erlang cookie,
- named nodes (`--name`/`--sname`),
- network reachability for Erlang distribution ports,
- explicit node connections (or external discovery tooling).
Parrhesia currently does not automate these steps.
---
## 3) Operational characteristics of current design
### 3.1 Performance shape
For each accepted event on one node:
- one local fanout lookup + local sends,
- one cluster publish that sends to `N - 1` remote bus members,
- on each remote node: one local fanout lookup + local sends.
So inter-node traffic scales roughly linearly with node count per event (full-cluster broadcast).
This is simple and low-latency for small-to-medium clusters, but can become expensive as node count grows.
### 3.2 Failure behavior
- Remote node down: send attempts to that member stop once membership updates; no replay.
- Netsplit: live propagation gap during split.
- Recovery: local clients can catch up via DB-backed queries (except ephemeral kinds).
### 3.3 Consistency expectations
- No global total-ordering guarantee for live delivery across nodes.
- Per-connection ordering is preserved by each connection process queue/drain behavior.
- Duplicate suppression for ingestion uses storage semantics (`duplicate_event`), but transport itself is not exactly-once.
### 3.4 Observability today
Relevant metrics exist for fanout/queue pressure (see `Parrhesia.Telemetry`), e.g.:
- `parrhesia.fanout.duration.ms`
- `parrhesia.connection.outbound_queue.depth`
- `parrhesia.connection.outbound_queue.pressure`
- `parrhesia.connection.outbound_queue.overflow.count`
These are useful but do not yet fully separate local-vs-remote fanout pipeline stages.
---
## 4) Practical extension path to a fully-fledged clustered system
A realistic path is incremental. Suggested phases:
### Phase A — hardened BEAM cluster control plane
1. Add cluster discovery/formation (e.g. `libcluster`) with environment-specific topology:
- Kubernetes DNS,
- static nodes,
- cloud VM discovery.
2. Add clear node liveness/partition telemetry and alerts.
3. Provide operator docs for cookie, node naming, and network requirements.
Outcome: simpler and safer cluster operations, same data plane semantics.
### Phase B — resilient distributed fanout data plane
Introduce a durable fanout stream for persisted events.
Recommended pattern:
1. On successful DB commit of event, append to a monotonic fanout log (or use DB sequence-based stream view).
2. Each relay node runs a consumer with a stored cursor.
3. On restart/partition recovery, node resumes from cursor and replays missed events.
4. Local fanout remains same (subscription index + per-connection queues).
Semantics target:
- **at-least-once** node-to-node propagation,
- replay after downtime,
- idempotent handling keyed by event id.
Notes:
- Ephemeral events can remain best-effort (or have a separate short-lived transport), since no storage source exists for replay.
### Phase C — scale and efficiency improvements
As cluster size grows, avoid naive full broadcast where possible:
1. Optional node-level subscription summaries (coarse bloom/bitset or keyed summaries) to reduce unnecessary remote sends.
2. Shard fanout workers for CPU locality and mailbox control.
3. Batch remote delivery payloads.
4. Separate traffic classes (e.g. Marmot-heavy streams vs generic) with independent queues.
Outcome: higher throughput per node and lower inter-node amplification.
### Phase D — stronger observability and SLOs
Add explicit distributed pipeline metrics:
- publish enqueue/dequeue latency,
- cross-node delivery lag (commit -> remote fanout enqueue),
- replay backlog depth,
- per-node dropped/expired transport messages,
- partition detection counters.
Define cluster SLO examples:
- p95 commit->remote-live enqueue under nominal load,
- max replay catch-up time after node restart,
- bounded message loss for best-effort channels.
---
## 5) How a fully-fledged system would behave in practice
With Phases A-D implemented, expected behavior:
- **Normal operation:**
- low-latency local fanout,
- remote nodes receive events via stream consumers quickly,
- consistent operational visibility of end-to-end lag.
- **Node restart:**
- node reconnects and replays from stored cursor,
- local subscribers begin receiving new + missed persisted events.
- **Transient partition:**
- live best-effort path may degrade,
- persisted events converge after partition heals via replay.
- **High fanout bursts:**
- batching + sharding keeps queue pressure bounded,
- overflow policies remain connection-local and measurable.
This approach gives a good trade-off between Nostr relay latency and distributed robustness without requiring strict exactly-once semantics.
---
## 6) Current status summary
Today, Parrhesia already supports **lightweight distributed live fanout** when BEAM nodes are connected.
It is intentionally simple and fast for smaller clusters, and provides a solid base for a more durable, observable cluster architecture as relay scale and availability requirements grow.

View File

@@ -1,69 +0,0 @@
# Marmot operations guide (relay operator tuning)
This document captures practical limits and operational defaults for Marmot-heavy traffic (`443`, `445`, `10051`, wrapped `1059`, optional media/push flows).
## 1) Recommended baseline limits
Use these as a starting point and tune from production telemetry.
```elixir
config :parrhesia,
limits: [
max_filter_limit: 500,
max_filters_per_req: 16,
max_outbound_queue: 256,
outbound_drain_batch_size: 64
],
policies: [
# Marmot group routing/query guards
marmot_require_h_for_group_queries: true,
marmot_group_max_h_values_per_filter: 32,
marmot_group_max_query_window_seconds: 2_592_000,
# Kind 445 retention
mls_group_event_ttl_seconds: 300,
# MIP-04 metadata controls
marmot_media_max_imeta_tags_per_event: 8,
marmot_media_max_field_value_bytes: 1024,
marmot_media_max_url_bytes: 2048,
marmot_media_allowed_mime_prefixes: [],
marmot_media_reject_mip04_v1: true,
# MIP-05 push controls (optional)
marmot_push_server_pubkeys: [],
marmot_push_max_relay_tags: 16,
marmot_push_max_payload_bytes: 65_536,
marmot_push_max_trigger_age_seconds: 120,
marmot_push_require_expiration: true,
marmot_push_max_expiration_window_seconds: 120,
marmot_push_max_server_recipients: 1
]
```
## 2) Index expectations for Marmot workloads
The Postgres adapter relies on dedicated partial tag indexes for hot Marmot selectors:
- `event_tags_h_value_created_at_idx` for `#h` group routing
- `event_tags_i_value_created_at_idx` for `#i` keypackage reference lookups
Query-plan regression tests assert these paths remain usable for heavy workloads.
## 3) Telemetry to watch
Key metrics for Marmot traffic and pressure:
- `parrhesia.ingest.duration.ms{traffic_class="marmot|generic"}`
- `parrhesia.query.duration.ms{traffic_class="marmot|generic"}`
- `parrhesia.fanout.duration.ms{traffic_class="marmot|generic"}`
- `parrhesia.connection.outbound_queue.depth{traffic_class=...}`
- `parrhesia.connection.outbound_queue.pressure{traffic_class=...}`
- `parrhesia.connection.outbound_queue.pressure_events.count{traffic_class=...}`
- `parrhesia.connection.outbound_queue.overflow.count{traffic_class=...}`
Operational target: keep queue pressure below sustained 0.75 and avoid overflow spikes during `445` bursts.
## 4) Fault and recovery expectations
During storage outages, Marmot group-flow writes must fail with explicit `OK false` errors. After recovery, reordered group events should still query deterministically by `created_at DESC, id ASC`.

BIN
docs/logo.afdesign Normal file

Binary file not shown.

1
docs/logo.svg Normal file

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 37 KiB

279
docs/slop/HARDEN.md Normal file
View File

@@ -0,0 +1,279 @@
# Hardening Review: Parrhesia Nostr Relay
You are a security engineer specialising in real-time WebSocket servers, Erlang/OTP systems, and protocol-level abuse. You are reviewing **Parrhesia**, a Nostr relay (NIP-01 compliant) written in Elixir, for hardening opportunities — with a primary focus on **denial-of-service resilience** and a secondary focus on the full attack surface.
Produce a prioritised list of **specific, actionable recommendations** with rationale. For each recommendation, state:
1. The attack or failure mode it mitigates
2. Suggested implementation (config change, code change, or architectural change)
3. Severity estimate (critical / high / medium / low)
---
## 1. Architecture Overview
| Component | Technology | Notes |
|---|---|---|
| Runtime | Elixir/OTP 27, BEAM VM | Each WS connection is a separate process |
| HTTP server | Bandit (pure Elixir) | HTTP/1.1 only, no HTTP/2 |
| WebSocket | `websock_adapter` | Text frames only; binary rejected |
| Database | PostgreSQL via Ecto | Range-partitioned `events` table by `created_at` |
| Caching | ETS | Config snapshot + moderation ban/allow lists |
| Multi-node | Erlang `:pg` groups | Fanout across BEAM cluster nodes |
| Metrics | Prometheus (Telemetry) | `/metrics` endpoint |
| TLS termination | **Out of scope** — handled by reverse proxy (nginx/Caddy) |
### Supervision Tree
```
Parrhesia.Supervisor
├─ Telemetry (Prometheus exporter)
├─ Config (ETS snapshot of runtime config)
├─ Storage.Supervisor (Ecto repo + moderation cache)
├─ Subscriptions.Supervisor (ETS subscription index for fanout)
├─ Auth.Supervisor (NIP-42 challenge GenServer)
├─ Policy.Supervisor (policy enforcement)
├─ Web.Endpoint (Bandit listener)
└─ Tasks.Supervisor (ExpirationWorker, 30s GC loop)
```
### Data Flow
1. Client connects via WebSocket at `/relay`
2. NIP-42 AUTH challenge issued immediately (16-byte random, base64url)
3. Inbound text frames are: size-checked → JSON-decoded → rate-limited → protocol-dispatched
4. EVENT messages: validated → policy-checked → stored in Postgres → ACK → async fanout to matching subscriptions
5. REQ messages: filters validated → Postgres query → results streamed → EOSE → live subscription registered
6. Fanout: post-ingest, subscription index (ETS) is traversed; matching connection processes receive events via `send/2`
---
## 2. Current Defences Inventory
### Connection Layer
| Defence | Value | Enforcement Point |
|---|---|---|
| Max WebSocket frame size | **1,048,576 bytes (1 MiB)** | Checked in `handle_in` *before* JSON decode, and at Bandit upgrade (`max_frame_size`) |
| WebSocket upgrade timeout | **60,000 ms** | Passed to `WebSockAdapter.upgrade` |
| Binary frame rejection | Returns NOTICE, connection stays open | `handle_in` opcode check |
| Outbound queue limit | **256 events** per connection | Overflow strategy: **`:close`** (WS 1008) |
| Outbound drain batch | **64 events** | Async drain via `send(self(), :drain_outbound_queue)` |
| Outbound pressure telemetry | Threshold at **75%** of queue | Emits telemetry event only, no enforcement |
| IP blocking | Via moderation cache (ETS) | Management API can add blocked IPs |
### Protocol Layer
| Defence | Value | Notes |
|---|---|---|
| Max event JSON size | **262,144 bytes (256 KiB)** | Re-serialises decoded event and checks byte size |
| Max filters per REQ | **16** | Rejected at filter validation |
| Max filter `limit` | **500** | `min(client_limit, 500)` applied at query time |
| Max subscriptions per connection | **32** | Existing sub IDs updated without counting toward limit |
| Subscription ID max length | **64 characters** | Must be non-empty |
| Event kind range | **065,535** | Integer range check |
| Max future event skew | **900 seconds (15 min)** | Events with `created_at > now + 900` rejected |
| Unknown filter keys | **Rejected** | Allowed: `ids`, `authors`, `kinds`, `since`, `until`, `limit`, `search`, `#<letter>` |
### Event Validation Pipeline
Strict order:
1. Required fields present (`id`, `pubkey`, `created_at`, `kind`, `tags`, `content`, `sig`)
2. `id` — 64-char lowercase hex
3. `pubkey` — 64-char lowercase hex
4. `created_at` — non-negative integer, max 900s future skew
5. `kind` — integer in [0, 65535]
6. `tags` — list of non-empty string arrays (**no length limit on tags array or individual tag values**)
7. `content` — any binary string
8. `sig` — 128-char lowercase hex
9. ID hash recomputation and comparison
10. Schnorr signature verification via `lib_secp256k1` (gated by `verify_event_signatures` flag, default `true`)
### Rate Limiting
| Defence | Value | Notes |
|---|---|---|
| Event ingest rate | **120 events per window** | Per-connection sliding window |
| Ingest window | **1 second** | Resets on first event after expiry |
| No per-IP connection rate limiting | — | Must be handled at reverse proxy |
| No global connection count ceiling | — | BEAM handles thousands but no configured limit |
### Authentication (NIP-42)
- Challenge issued to **all** connections on connect (optional escalation model)
- AUTH event must: pass full NIP-01 validation, be kind `22242`, contain matching `challenge` tag, contain matching `relay` tag
- `created_at` freshness: must be `>= now - 600s` (10 min)
- On success: pubkey added to `authenticated_pubkeys` MapSet; challenge rotated
- Supports multiple authenticated pubkeys per connection
### Authentication (NIP-98 HTTP)
- Management endpoint (`POST /management`) requires NIP-98 header
- Auth event must be kind `27235`, `created_at` within **60 seconds** of now
- Must include `method` and `u` tags matching request exactly
### Access Control
- `auth_required_for_writes`: default **false** (configurable)
- `auth_required_for_reads`: default **false** (configurable)
- Protected events (NIP-70, tagged `["-"]`): require auth + pubkey match
- Giftwrap (kind 1059): unauthenticated REQ → CLOSED; authenticated REQ must include `#p` containing own pubkey
### Database
- All queries use Ecto parameterised bindings — no raw string interpolation
- LIKE search patterns escaped (`%`, `_`, `\` characters)
- Deletion enforces `pubkey == deleter_pubkey` in WHERE clause
- Soft-delete via `deleted_at`; hard-delete only via vanish (NIP-62) or expiration purge
- DB pool: **32 connections** (prod), queue target 1s, interval 5s
### Moderation
- Banned pubkeys, allowed pubkeys, banned events, blocked IPs stored in ETS cache
- Management API (NIP-98 authed) for CRUD on moderation lists
- Cache invalidated atomically on writes
---
## 3. Known Gaps and Areas of Concern
The following are areas where the current implementation may be vulnerable or where defences could be strengthened. **Please evaluate each and provide recommendations.**
### 3.1 Connection Exhaustion
- There is **no global limit on concurrent WebSocket connections**. Each connection is an Elixir process (~23 KiB base), but subscriptions, auth state, and outbound queues add per-connection memory.
- There is **no per-IP connection rate limiting at the application layer**. IP blocking exists but is reactive (management API), not automatic.
- There is **no idle timeout** after the WebSocket upgrade completes. A connection can remain open indefinitely without sending or receiving messages.
**Questions:**
- What connection limits should be configured at the Bandit/BEAM level?
- Should an idle timeout be implemented? If so, what value balances real-time subscription use against resource waste?
- Should per-IP connection counting be implemented at the application layer, or is this strictly a reverse proxy concern?
### 3.2 Subscription Abuse
- A single connection can hold **32 subscriptions**, each with up to **16 filters**. That's 512 filter predicates per connection being evaluated on every fanout.
- Filter arrays (`ids`, `authors`, `kinds`, tag values) have **no element count limits**. A filter could contain thousands of author pubkeys.
- There is no cost accounting for "expensive" subscriptions (e.g., wide open filters matching all events).
**Questions:**
- Should filter array element counts be bounded? If so, what limits per field?
- Should there be a per-connection "filter complexity" budget?
- How expensive is the current ETS subscription index traversal at scale (e.g., 10K concurrent connections × 32 subs each)?
### 3.3 Tag Array Size
- Event validation does **not limit the number of tags** or the length of individual tag values beyond the 256 KiB total event size cap.
- A maximally-tagged event could contain thousands of short tags, causing amplification in `event_tags` table inserts (one row per tag).
**Questions:**
- Should a max tag count be enforced? What is a reasonable limit?
- What is the insert cost of storing e.g. 1,000 tags per event? Could this be used for write amplification?
- Should individual tag value lengths be bounded?
### 3.4 AUTH Timing
- AUTH event `created_at` freshness only checks the **lower bound** (`>= now - 600`). An AUTH event with `created_at` far in the future passes validation.
- Regular events have a future skew cap of 900s, but AUTH events do not.
**Questions:**
- Should AUTH events also enforce a future `created_at` bound?
- Is a 600-second AUTH window too wide? Could it be reduced?
### 3.5 Outbound Amplification
- A single inbound EVENT can fan out to an unbounded number of matching subscriptions across all connections.
- The outbound queue (256 events, `:close` strategy) protects individual connections but does not limit total fanout work per event.
- The fanout traverses the ETS subscription index synchronously in the ingesting connection's process.
**Questions:**
- Should fanout be bounded per event (e.g., max N recipients before yielding)?
- Should fanout happen in a separate process pool rather than inline?
- Is the `:close` overflow strategy optimal, or would `:drop_oldest` be better for well-behaved clients with temporary backpressure?
### 3.6 Query Amplification
- A single REQ with 16 filters, each with `limit: 500`, could trigger 16 separate Postgres queries returning up to 8,000 events total.
- COUNT requests also execute per-filter queries (now deduplicated via UNION ALL).
- `search` filters use `ILIKE %pattern%` which cannot use B-tree indexes.
**Questions:**
- Should there be a per-REQ total result cap (across all filters)?
- Should `search` queries be rate-limited or require a minimum pattern length?
- Should COUNT be disabled or rate-limited separately?
- Are there missing indexes that would help common query patterns?
### 3.7 Multi-Node Trust
- Events received via `:remote_fanout_event` from peer BEAM nodes **skip all validation and policy checks** and go directly to the subscription index.
- This assumes all cluster peers are trusted.
**Questions:**
- If cluster membership is dynamic or spans trust boundaries, should remote events be re-validated?
- Should there be a shared secret or HMAC on inter-node messages?
### 3.8 Metrics Endpoint
- `/metrics` (Prometheus) is **unauthenticated**.
- Exposes internal telemetry: connection counts, event throughput, queue depths, database timing.
**Questions:**
- Should `/metrics` require authentication or be restricted to internal networks?
- Could metrics data be used to profile the relay's capacity and craft targeted attacks?
### 3.9 Negentropy Stub
- NEG-OPEN, NEG-MSG, NEG-CLOSE messages are accepted and acknowledged but the reconciliation logic is a stub (cursor counter only).
- Are there resource implications of accepting negentropy sessions without real implementation?
### 3.10 Event Re-Serialisation Cost
- To enforce the 256 KiB event size limit, the relay calls `JSON.encode!(event)` on the already-decoded event map. This re-serialisation happens on every inbound EVENT.
- Could this be replaced with a byte-length check on the raw frame payload (already available)?
---
## 4. Specific Review Requests
Beyond the gaps above, please also evaluate:
1. **Bandit configuration**: Are there Bandit-level options (max connections, header limits, request timeouts, keepalive settings) that should be tuned for a public-facing relay?
2. **BEAM VM flags**: Are there any Erlang VM flags (`+P`, `+Q`, `+S`, memory limits) that should be set for production hardening?
3. **Ecto pool exhaustion**: With 32 DB connections and potentially thousands of concurrent REQ queries, what happens under pool exhaustion? Is the 1s queue target + 5s interval appropriate?
4. **ETS table sizing**: The subscription index and moderation cache use ETS. Are there memory limits or table options (`read_concurrency`, `write_concurrency`, `compressed`) that should be tuned?
5. **Process mailbox overflow**: Connection processes receive events via `send/2` during fanout. If a process is slow to consume, its mailbox grows. The outbound queue mechanism is application-level — but is the BEAM-level mailbox also protected?
6. **Reverse proxy recommendations**: What nginx/Caddy configuration should complement the relay's defences? (Rate limiting, connection limits, WebSocket-specific settings, request body size.)
7. **Monitoring and alerting**: What telemetry signals should trigger alerts? (Connection count spikes, queue overflow rates, DB pool saturation, error rates.)
---
## 5. Out of Scope
The following are **not** in scope for this review:
- TLS configuration (handled by reverse proxy)
- DNS and network-level DDoS mitigation
- Operating system hardening
- Key management for the relay identity
- Client-side security
- Nostr protocol design flaws (we implement the spec as-is)
---
## 6. Response Format
For each recommendation, use this format:
### [Severity] Title
**Attack/failure mode:** What goes wrong without this mitigation.
**Current state:** What exists today (or doesn't).
**Recommendation:** Specific change — config value, code change, or architectural decision.
**Trade-offs:** Any impact on legitimate users or operational complexity.

398
docs/slop/LOCAL_API.md Normal file
View File

@@ -0,0 +1,398 @@
# Parrhesia Shared API + Local API Design (Option 1)
## 1) Goal
Expose a stable in-process API for embedding apps **and** refactor server transports to consume the same API.
Desired end state:
- WebSocket server, HTTP management, and embedding app all call one shared core API.
- Transport layers (WS/HTTP/local) only do framing, auth header extraction, and response encoding.
- Policy/storage/fanout/business semantics live in one place.
This keeps everything in the same dependency (`:parrhesia`) and avoids a second package.
---
## 2) Key architectural decision
Previous direction: `Parrhesia.Local.*` as primary public API.
Updated direction (this doc):
- Introduce **shared core API modules** under `Parrhesia.API.*`.
- Make server code (`Parrhesia.Web.Connection`, management handlers) delegate to `Parrhesia.API.*`.
- Keep `Parrhesia.Local.*` as optional convenience wrappers over `Parrhesia.API.*`.
This ensures no divergence between local embedding behavior and websocket behavior.
---
## 3) Layered design
```text
Transport layer
- Parrhesia.Web.Connection (WS)
- Parrhesia.Web.Management (HTTP)
- Parrhesia.Local.* wrappers (in-process)
Shared API layer
- Parrhesia.API.Auth
- Parrhesia.API.Events
- Parrhesia.API.Stream (optional)
- Parrhesia.API.Admin (optional, for management methods)
Domain/runtime dependencies
- Parrhesia.Policy.EventPolicy
- Parrhesia.Storage.* adapters
- Parrhesia.Groups.Flow
- Parrhesia.Subscriptions.Index
- Parrhesia.Fanout.MultiNode
- Parrhesia.Telemetry
```
Rule: all ingest/query/count decisions happen in `Parrhesia.API.Events`.
---
## 4) Public module plan
## 4.1 `Parrhesia.API.Auth`
Purpose:
- event validation helpers
- NIP-98 verification
- optional embedding account resolution hook
Proposed functions:
```elixir
@spec validate_event(map()) :: :ok | {:error, term()}
@spec compute_event_id(map()) :: String.t()
@spec validate_nip98(String.t() | nil, String.t(), String.t()) ::
{:ok, Parrhesia.API.Auth.Context.t()} | {:error, term()}
@spec validate_nip98(String.t() | nil, String.t(), String.t(), keyword()) ::
{:ok, Parrhesia.API.Auth.Context.t()} | {:error, term()}
```
`validate_nip98/4` options:
```elixir
account_resolver: (pubkey_hex :: String.t(), auth_event :: map() ->
{:ok, account :: term()} | {:error, term()})
```
Context struct:
```elixir
defmodule Parrhesia.API.Auth.Context do
@enforce_keys [:pubkey, :auth_event]
defstruct [:pubkey, :auth_event, :account, claims: %{}]
end
```
---
## 4.2 `Parrhesia.API.Events`
Purpose:
- canonical ingress/query/count API used by WS + local + HTTP integrations.
Proposed functions:
```elixir
@spec publish(map(), keyword()) :: {:ok, Parrhesia.API.Events.PublishResult.t()} | {:error, term()}
@spec query([map()], keyword()) :: {:ok, [map()]} | {:error, term()}
@spec count([map()], keyword()) :: {:ok, non_neg_integer() | map()} | {:error, term()}
```
Request context:
```elixir
defmodule Parrhesia.API.RequestContext do
defstruct authenticated_pubkeys: MapSet.new(),
actor: nil,
metadata: %{}
end
```
Publish result:
```elixir
defmodule Parrhesia.API.Events.PublishResult do
@enforce_keys [:event_id, :accepted, :message]
defstruct [:event_id, :accepted, :message]
end
```
### Publish semantics (must match websocket EVENT)
Pipeline in `publish/2`:
1. frame/event size limits
2. `Parrhesia.Protocol.validate_event/1`
3. `Parrhesia.Policy.EventPolicy.authorize_write/2`
4. group handling (`Parrhesia.Groups.Flow.handle_event/1`)
5. persistence path (`put_event`, deletion, vanish, ephemeral rules)
6. fanout (local + multi-node)
7. telemetry emit
Return shape mirrors Nostr `OK` semantics:
```elixir
{:ok, %PublishResult{event_id: id, accepted: true, message: "ok: event stored"}}
{:ok, %PublishResult{event_id: id, accepted: false, message: "blocked: ..."}}
```
### Query/count semantics (must match websocket REQ/COUNT)
`query/2` and `count/2`:
1. validate filters
2. run read policy (`EventPolicy.authorize_read/2`)
3. call storage with `requester_pubkeys` from context
4. return ordered events/count payload
Giftwrap restrictions (`kind 1059`) must remain identical to websocket behavior.
---
## 4.3 `Parrhesia.API.Stream` (optional but recommended)
Purpose:
- local in-process subscriptions using same subscription index/fanout model.
Proposed functions:
```elixir
@spec subscribe(pid(), String.t(), [map()], keyword()) :: {:ok, reference()} | {:error, term()}
@spec unsubscribe(reference()) :: :ok
```
Subscriber contract:
```elixir
{:parrhesia, :event, ref, subscription_id, event}
{:parrhesia, :eose, ref, subscription_id}
{:parrhesia, :closed, ref, subscription_id, reason}
```
---
## 4.4 `Parrhesia.Local.*` wrappers
`Parrhesia.Local.*` remain as convenience API for embedding apps, implemented as thin wrappers:
- `Parrhesia.Local.Auth` -> delegates to `Parrhesia.API.Auth`
- `Parrhesia.Local.Events` -> delegates to `Parrhesia.API.Events`
- `Parrhesia.Local.Stream` -> delegates to `Parrhesia.API.Stream`
- `Parrhesia.Local.Client` -> use-case helpers (posts + private messages)
No business logic in wrappers.
---
## 5) Server integration plan (critical)
## 5.1 WebSocket (`Parrhesia.Web.Connection`)
After decode:
- `EVENT` -> `Parrhesia.API.Events.publish/2`
- `REQ` -> `Parrhesia.API.Events.query/2`
- `COUNT` -> `Parrhesia.API.Events.count/2`
- `AUTH` keep transport-specific challenge/session flow, but can use `API.Auth.validate_event/1` internally
WebSocket keeps responsibility for:
- websocket framing
- subscription lifecycle per connection
- AUTH challenge rotation protocol frames
## 5.2 HTTP management (`Parrhesia.Web.Management`)
- NIP-98 header validation via `Parrhesia.API.Auth.validate_nip98/3`
- command execution via `Parrhesia.API.Admin` (or existing storage admin adapter via API facade)
---
## 6) High-level client helpers for embedding app use case
These helpers are optional and live in `Parrhesia.Local.Client`.
## 6.1 Public posts
```elixir
@spec publish_post(Parrhesia.API.Auth.Context.t(), String.t(), keyword()) ::
{:ok, Parrhesia.API.Events.PublishResult.t()} | {:error, term()}
@spec list_posts(keyword()) :: {:ok, [map()]} | {:error, term()}
@spec stream_posts(pid(), keyword()) :: {:ok, reference()} | {:error, term()}
```
`publish_post/3` options:
- `:tags`
- `:created_at`
- `:signer` callback (required unless fully signed event provided)
Signer contract:
```elixir
(unsigned_event_map -> {:ok, signed_event_map} | {:error, term()})
```
Parrhesia does not store or manage private keys.
## 6.2 Private messages (giftwrap kind 1059)
```elixir
@spec send_private_message(
Parrhesia.API.Auth.Context.t(),
recipient_pubkey :: String.t(),
encrypted_payload :: String.t(),
keyword()
) :: {:ok, Parrhesia.API.Events.PublishResult.t()} | {:error, term()}
@spec inbox(Parrhesia.API.Auth.Context.t(), keyword()) :: {:ok, [map()]} | {:error, term()}
@spec stream_inbox(pid(), Parrhesia.API.Auth.Context.t(), keyword()) :: {:ok, reference()} | {:error, term()}
```
Behavior:
- `send_private_message/4` builds event template with kind `1059` and `p` tag.
- host signer signs template.
- publish through `API.Events.publish/2`.
- `inbox/2` queries `%{"kinds" => [1059], "#p" => [auth.pubkey]}` with authenticated context.
---
## 7) Error model
Shared API should normalize output regardless of transport.
Guideline:
- protocol/policy rejection -> `{:ok, %{accepted: false, message: "..."}}`
- runtime/system failure -> `{:error, term()}`
Common reason mapping:
| Reason | Message prefix |
|---|---|
| `:auth_required` | `auth-required:` |
| `:restricted_giftwrap` | `restricted:` |
| `:invalid_event` | `invalid:` |
| `:duplicate_event` | `duplicate:` |
| `:event_rate_limited` | `rate-limited:` |
---
## 8) Telemetry
Emit shared events in API layer (not transport-specific):
- `[:parrhesia, :api, :publish, :stop]`
- `[:parrhesia, :api, :query, :stop]`
- `[:parrhesia, :api, :count, :stop]`
- `[:parrhesia, :api, :auth, :stop]`
Metadata:
- `traffic_class`
- `caller` (`:websocket | :http | :local`)
- optional `account_present?`
Transport-level telemetry can remain separate where needed.
---
## 9) Refactor sequence
### Phase 1: Extract shared API
1. Create `Parrhesia.API.Events` with publish/query/count from current `Web.Connection` paths.
2. Create `Parrhesia.API.Auth` wrappers for NIP-98/event validation.
3. Add API-level tests.
### Phase 2: Migrate transports
1. Update `Parrhesia.Web.Connection` to delegate publish/query/count to `API.Events`.
2. Update `Parrhesia.Web.Management` to use `API.Auth`.
3. Keep behavior unchanged.
### Phase 3: Add local wrappers/helpers
1. Implement `Parrhesia.Local.Auth/Events/Stream` as thin delegates.
2. Add `Parrhesia.Local.Client` post/inbox/send helpers.
3. Add embedding documentation.
### Phase 4: Lock parity
1. Add parity tests: WS vs Local API for same inputs and policy outcomes.
2. Add property tests for query/count equivalence where feasible.
---
## 10) Testing requirements
1. **Transport parity tests**
- Same signed event via WS and API => same accepted/message semantics.
2. **Policy parity tests**
- Giftwrap visibility and auth-required behavior identical across WS/API/local.
3. **Auth tests**
- NIP-98 success/failure + account resolver success/failure.
4. **Fanout tests**
- publish via API reaches local stream subscribers and WS subscribers.
5. **Failure tests**
- storage failures surface deterministic errors in all transports.
---
## 11) Backwards compatibility
- No breaking change to websocket protocol.
- No breaking change to management endpoint contract.
- New API modules are additive.
- Existing apps can ignore local API entirely.
---
## 12) Embedding example flow
### 12.1 Login/auth
```elixir
with {:ok, auth} <- Parrhesia.API.Auth.validate_nip98(header, method, url,
account_resolver: &MyApp.Accounts.resolve_nostr_pubkey/2
) do
# use auth.pubkey/auth.account in host session
end
```
### 12.2 Post publish
```elixir
Parrhesia.Local.Client.publish_post(auth, "hello", signer: &MyApp.NostrSigner.sign/1)
```
### 12.3 Private message
```elixir
Parrhesia.Local.Client.send_private_message(
auth,
recipient_pubkey,
encrypted_payload,
signer: &MyApp.NostrSigner.sign/1
)
```
### 12.4 Inbox
```elixir
Parrhesia.Local.Client.inbox(auth, limit: 100)
```
---
## 13) Summary
Yes, this can and should be extracted into a shared API module. The server should consume it too.
That gives:
- one canonical behavior path,
- cleaner embedding,
- easier testing,
- lower long-term maintenance cost.

670
docs/slop/REVIEW.md Normal file
View File

@@ -0,0 +1,670 @@
# Parrhesia Relay — Technical Review
**Reviewer:** Case, Senior Systems & Protocol Engineer
**Date:** 2026-03-14
**Commit:** `63d3e7d` (master)
**Scope:** Full codebase review against Nostr NIPs, MARMOT specs, and production readiness criteria
---
# Executive Summary
Parrhesia is a well-structured Nostr relay built on Elixir/OTP with PostgreSQL storage. The architecture is clean — clear separation between web, protocol, policy, and storage layers with a pluggable adapter pattern. Code quality is above average: consistent error handling, good use of `with` chains, comprehensive policy enforcement for MARMOT-specific concerns, and thoughtful outbound backpressure management. The developer clearly understands both the BEAM and the Nostr protocol.
However, the relay has **two critical defects** that make it unsuitable for any deployment beyond trusted local development: (1) **no Schnorr signature verification** — any client can forge events with arbitrary pubkeys, and (2) **lossy tag storage** — events returned from queries have truncated tags, violating NIP-01's data integrity guarantees. Several additional high-severity issues (no ephemeral event handling, missing NIP-42 relay tag validation, SQL LIKE injection vector, no ingest rate limiting) compound the risk.
**Overall risk rating: Critical**
This relay is **not production-ready** for any public deployment. It is suitable for local development and internal testing with trusted clients. With the critical and high findings addressed, it could serve as a solid private relay. Public internet deployment requires significant additional hardening.
---
# Top Findings
## [Critical] No Schnorr Signature Verification
**Area:** protocol correctness, security
**Why it matters:**
NIP-01 mandates that relays MUST verify event signatures using Schnorr signatures over secp256k1. Without signature verification, any client can publish events with any pubkey. This completely breaks the identity and trust model of the Nostr protocol. Authentication (NIP-42), protected events (NIP-70), deletion (NIP-09), replaceable events — all rely on pubkey authenticity.
**Evidence:**
`lib/parrhesia/protocol/event_validator.ex` validates the event ID hash (`validate_id_hash/1` at line 188) but never verifies the `sig` field against the `pubkey` using Schnorr/secp256k1. A `grep` for `schnorr`, `secp256k1`, `verify`, and `:crypto.verify` across the entire `lib/` directory returns zero results. The `validate_sig/1` function (line 182) only checks that `sig` is 64-byte lowercase hex — a format check, not a cryptographic verification.
**Spec reference:**
NIP-01: "Each user has a keypair. Signatures, public key, and encodings are done according to the Schnorr signatures standard for the curve secp256k1." The relay is expected to verify signatures to ensure event integrity.
**Attack scenario:**
An unauthenticated client connects and publishes `["EVENT", {"id": "<valid-hash>", "pubkey": "<victim-pubkey>", "sig": "<any-64-byte-hex>", ...}]`. The relay stores and fans out the forged event as if the victim authored it. This enables impersonation, reputation attacks, and poisoning of replaceable events (kind 0 profile, kind 3 contacts, kind 10002 relay lists).
**Recommended fix:**
Add a secp256k1 library dependency (e.g., `ex_secp256k1` or `:crypto` with OTP 26+ Schnorr support) and add signature verification to `EventValidator.validate/1` after `validate_id_hash/1`. This is the single most important fix.
**Confidence:** High
---
## [Critical] Lossy Tag Storage — Events Returned With Truncated Tags
**Area:** protocol correctness, database
**Why it matters:**
NIP-01 events have tags with arbitrary numbers of elements (e.g., `["e", "<event-id>", "<relay-url>", "<marker>"]`, `["p", "<pubkey>", "<relay-url>"]`, `["a", "<kind>:<pubkey>:<d-tag>", "<relay-url>"]`). The relay only stores the first two elements (`name` and `value`) of each tag in the `event_tags` table, and single-element tags (like `["-"]` for NIP-70 protected events) are dropped entirely. When events are queried back, the reconstructed tags are truncated.
**Evidence:**
`lib/parrhesia/storage/adapters/postgres/events.ex`:
- `insert_tags!/2` (line 266): pattern matches `[name, value | _rest]` — discards `_rest`, ignores tags with fewer than 2 elements.
- `load_tags/1` (line 739): reconstructs tags as `[tag.name, tag.value]` — only 2 elements.
- `to_nostr_event/2` (line 763): uses the truncated tags directly.
The `events` table itself does not store the full tag array. The full tags exist only in the original JSON during ingest, then are lost.
**Spec reference:**
NIP-01: Tags are arrays of arbitrary strings. Relay implementations MUST return events with their complete, unmodified tags. Relay hints in `e`/`p` tags, markers, and other metadata are essential for client operation.
**Attack/failure scenario:**
1. Client publishes event with `["e", "<id>", "wss://relay.example.com", "reply"]`.
2. Another client queries and receives `["e", "<id>"]` — relay hint and marker lost.
3. Client cannot follow the event reference to the correct relay.
4. Protected events with `["-"]` tag lose their protection marker on retrieval, breaking NIP-70 semantics.
**Recommended fix:**
Either (a) store the full tag JSON array in the events table (e.g., a `tags` JSONB column), using `event_tags` only as a query index, or (b) add additional columns to `event_tags` to preserve all elements (e.g., a `rest` text array column or store the full tag as a JSONB column). Option (a) is simpler and more correct.
**Confidence:** High
---
## [High] No Ephemeral Event Handling (Kind 2000029999)
**Area:** protocol correctness, performance
**Why it matters:**
NIP-01 defines kinds 2000029999 as ephemeral events that relays are NOT expected to store. They should be fanned out to matching subscribers but never persisted. The relay currently persists all events regardless of kind, which wastes storage and violates client expectations.
**Evidence:**
`lib/parrhesia/storage/adapters/postgres/events.ex`:
- `replaceable_kind?/1` (line 515): handles kinds 0, 3, 1000019999.
- `addressable_kind?/1` (line 517): handles kinds 3000039999.
- No function checks for ephemeral kinds (2000029999).
- `put_event/2` persists all non-deletion, non-vanish events unconditionally.
`lib/parrhesia/web/connection.ex`:
- `persist_event/1` (line 420): routes kind 5 to deletion, kind 62 to vanish, everything else to `put_event`. No ephemeral bypass.
The config has `accept_ephemeral_events: true` but it's never checked anywhere.
**Spec reference:**
NIP-01: "Upon receiving an ephemeral event, a relay is NOT expected to store it and SHOULD send it directly to the clients that have matching filters open."
**Recommended fix:**
In `persist_event/1`, check if the event kind is in the ephemeral range. If so, skip DB persistence and only fan out. The `accept_ephemeral_events` config should gate whether ephemeral events are accepted at all.
**Confidence:** High
---
## [High] NIP-42 AUTH Missing Relay Tag Validation
**Area:** protocol correctness, security
**Why it matters:**
NIP-42 requires AUTH events to include a `["relay", "<relay-url>"]` tag that matches the relay's URL. Without this check, an AUTH event created for relay A can be replayed against relay B, enabling cross-relay authentication bypass.
**Evidence:**
`lib/parrhesia/web/connection.ex`:
- `validate_auth_event/1` (line 573): checks kind 22242 and presence of `challenge` tag.
- `validate_auth_challenge/2` (line 590): checks challenge value matches.
- **No validation of `relay` tag** anywhere in the auth flow.
**Spec reference:**
NIP-42: AUTH event "MUST include `['relay', '<relay-url>']` tag". The relay MUST verify this tag matches its own URL.
**Attack scenario:**
Attacker obtains an AUTH event from user for relay A (which may be the attacker's relay). Attacker replays this AUTH event against Parrhesia, which accepts it because the challenge is the only thing checked. If the challenge can be predicted or leaked, authentication is fully bypassed.
**Recommended fix:**
Add relay URL validation to `validate_auth_event/1`. The relay should know its own canonical URL (from config or NIP-11 document) and verify the `relay` tag matches.
**Confidence:** High
---
## [High] SQL LIKE Pattern Injection in NIP-50 Search
**Area:** security, performance
**Why it matters:**
The NIP-50 search implementation uses PostgreSQL `ILIKE` with unsanitized user input interpolated into the pattern. While not traditional SQL injection (the value is parameterized), LIKE metacharacters (`%`, `_`) in the search string alter the matching semantics and can cause catastrophic performance.
**Evidence:**
`lib/parrhesia/storage/adapters/postgres/events.ex` line 627:
```elixir
where(query, [event], ilike(event.content, ^"%#{search}%"))
```
The `search` variable is directly interpolated into the LIKE pattern. User-supplied values like `%a%b%c%d%e%f%g%h%i%j%` create pathological patterns that force PostgreSQL into exponential backtracking against the full `content` column of every matching row.
**Attack scenario:**
Client sends `["REQ", "sub1", {"search": "%a%b%c%d%e%f%g%h%i%j%k%l%m%n%o%p%q%r%", "kinds": [1]}]`. PostgreSQL executes an expensive sequential scan with exponential LIKE pattern matching. A handful of concurrent requests with adversarial patterns can saturate the DB connection pool and CPU.
**Recommended fix:**
1. Escape `%` and `_` characters in user search input before interpolation: `search |> String.replace("%", "\\%") |> String.replace("_", "\\_")`.
2. Consider PostgreSQL full-text search (`tsvector`/`tsquery`) instead of ILIKE for better performance and correct semantics.
3. Add a minimum search term length (e.g., 3 characters).
**Confidence:** High
---
## [High] No Per-Connection or Per-IP Rate Limiting on Event Ingestion
**Area:** security, robustness
**Why it matters:**
There is no rate limiting on EVENT submissions. A single client can flood the relay with events at wire speed, consuming DB connections, CPU (for validation), and disk I/O. The outbound queue has backpressure, but the ingest path is completely unbounded.
**Evidence:**
`lib/parrhesia/web/connection.ex`:
- `handle_event_ingest/2` (line 186): processes every EVENT message immediately with no throttle.
- No token bucket, sliding window, or any rate-limiting mechanism anywhere in the codebase.
- `grep` for `rate.limit`, `throttle`, `rate_limit` across `lib/` returns only error message strings, not actual rate-limiting logic.
**Attack scenario:**
A single WebSocket connection sends 10,000 EVENT messages per second. Each triggers validation, policy checks, and a DB transaction. The DB connection pool (default 32) saturates within milliseconds. All other clients experience timeouts.
**Recommended fix:**
Implement per-connection rate limiting in the WebSocket handler (token bucket per connection state). Consider also per-pubkey and per-IP rate limiting as a separate layer. Start with a simple `{count, window_start}` in connection state.
**Confidence:** High
---
## [High] max_frame_bytes and max_event_bytes Not Enforced
**Area:** security, robustness
**Why it matters:**
The configuration defines `max_frame_bytes: 1_048_576` and `max_event_bytes: 262_144` but neither value is actually used to limit incoming data. The max_frame_bytes is only reported in the NIP-11 document. An attacker can send arbitrarily large WebSocket frames and events.
**Evidence:**
- `grep` for `max_frame_bytes` in `lib/`: only found in `relay_info.ex` for NIP-11 output.
- `grep` for `max_event_bytes` in `lib/`: no results at all.
- The Bandit WebSocket upgrade in `router.ex` line 53 passes `timeout: 60_000` but no `max_frame_size` option.
- No payload size check in `handle_in/2` before JSON decoding.
**Attack scenario:**
Client sends a 100MB WebSocket frame containing a single event with a massive `content` field or millions of tags. The relay attempts to JSON-decode the entire payload in memory, potentially causing OOM or extreme GC pressure.
**Recommended fix:**
1. Pass `max_frame_size` to Bandit's WebSocket upgrade options.
2. Check `byte_size(payload)` in `handle_in/2` before calling `Protocol.decode_client/1`.
3. Optionally check individual event size after JSON decoding.
**Confidence:** High
---
## [Medium] NIP-09 Deletion Missing "a" Tag Support for Addressable Events
**Area:** protocol correctness
**Why it matters:**
NIP-09 specifies that deletion events (kind 5) can reference addressable/replaceable events via `"a"` tags (format: `"<kind>:<pubkey>:<d-tag>"`). The current implementation only handles `"e"` tags.
**Evidence:**
`lib/parrhesia/storage/adapters/postgres/events.ex`:
- `extract_delete_event_ids/1` (line 821): only extracts `["e", event_id | _rest]` tags.
- No handling of `["a", ...]` tags.
- No query against addressable_event_state or events by kind+pubkey+d_tag.
**Spec reference:**
NIP-09: "The deletion event MAY contain `a` tags pointing to the replaceable/addressable events to be deleted."
**Recommended fix:**
Extract `"a"` tags from the deletion event, parse the `kind:pubkey:d_tag` format, and soft-delete matching events from the addressable/replaceable state tables, ensuring the deleter's pubkey matches.
**Confidence:** High
---
## [Medium] Subscription Index GenServer Is a Single-Point Bottleneck
**Area:** performance, OTP/design
**Why it matters:**
Every event fanout goes through `Index.candidate_subscription_keys/1`, which is a synchronous `GenServer.call` to a single process. Under load with many connections and high event throughput, this process becomes the serialization point for all fanout operations.
**Evidence:**
`lib/parrhesia/subscriptions/index.ex`:
- `candidate_subscription_keys/2` (line 68): `GenServer.call(server, {:candidate_subscription_keys, event})`
- This is called from every connection process for every ingested event (via `fanout_event/1` in `connection.ex` line 688).
- The ETS tables are `:protected`, meaning only the owning GenServer can write but any process can read.
**Recommended fix:**
Since the ETS tables are already `:protected` (readable by all processes), make `candidate_subscription_keys/1` read directly from ETS without going through the GenServer. Only mutations (upsert/remove) need to go through the GenServer. This eliminates the serialization bottleneck entirely.
Actually, the tables are `:protected` which means other processes CAN read. Refactor `candidate_subscription_keys` to read ETS directly from the caller's process, bypassing the GenServer.
**Confidence:** High
---
## [Medium] Moderation Cache ETS Table Creation Race Condition
**Area:** robustness, OTP/design
**Why it matters:**
The moderation cache ETS table is lazily created on first access via `cache_table_ref/0`. If two processes simultaneously call a moderation function before the table exists, both will attempt `ets.new(:parrhesia_moderation_cache, [:named_table, ...])` — one will succeed and one will hit the rescue clause. While the rescue catches the `ArgumentError`, this is a race-prone pattern.
**Evidence:**
`lib/parrhesia/storage/adapters/postgres/moderation.ex` lines 211231:
```elixir
defp cache_table_ref do
case :ets.whereis(@cache_table) do
:undefined ->
try do
:ets.new(@cache_table, [...])
rescue
ArgumentError -> @cache_table
end
@cache_table
_table_ref ->
@cache_table
end
end
```
Additionally, `ensure_cache_scope_loaded/1` has a TOCTOU race: it checks `ets.member(table, loaded_key)`, then loads from DB and inserts — two processes could both load and insert simultaneously, though this is less harmful (just redundant work).
**Recommended fix:**
Create the ETS table in a supervised process (e.g., in `Parrhesia.Policy.Supervisor` or `Parrhesia.Storage.Supervisor`) at startup, not lazily. This eliminates the race entirely.
**Confidence:** High
---
## [Medium] Archiver SQL Injection
**Area:** security
**Why it matters:**
The `Parrhesia.Storage.Archiver.archive_sql/2` function directly interpolates arguments into a SQL string without any sanitization or quoting.
**Evidence:**
`lib/parrhesia/storage/archiver.ex` line 32:
```elixir
def archive_sql(partition_name, archive_table_name) do
"INSERT INTO #{archive_table_name} SELECT * FROM #{partition_name};"
end
```
If either argument is derived from user input or external configuration, this is a SQL injection vector.
**Attack scenario:**
If the management API or any admin tool passes user-controlled input to this function (e.g., a partition name from a web request), an attacker could inject: `archive_sql("events_default; DROP TABLE events; --", "archive")`.
**Recommended fix:**
Quote identifiers using `~s("#{identifier}")` or better, use Ecto's `Ecto.Adapters.SQL.query/3` with proper identifier quoting. Validate that inputs match expected partition name patterns (e.g., `events_YYYYMM`).
**Confidence:** Medium (depends on whether this function is exposed to external input)
---
## [Medium] Count Query Materialises All Matching Event IDs in Memory
**Area:** performance
**Why it matters:**
The COUNT implementation fetches all matching event IDs into Elixir memory, deduplicates them with `MapSet.new()`, then counts. For large result sets, this is orders of magnitude slower and more memory-intensive than a SQL `COUNT(DISTINCT id)`.
**Evidence:**
`lib/parrhesia/storage/adapters/postgres/events.ex` lines 111127:
```elixir
def count(_context, filters, opts) when is_list(opts) do
...
total_count =
filters
|> Enum.flat_map(fn filter ->
filter
|> event_id_query_for_filter(now, opts)
|> Repo.all() # fetches ALL matching IDs
end)
|> MapSet.new() # deduplicates in memory
|> MapSet.size()
...
end
```
**Attack scenario:**
Client sends `["COUNT", "c1", {"kinds": [1]}]` on a relay with 10 million kind-1 events. The relay fetches 10 million binary IDs into memory, builds a MapSet, then counts. This could use hundreds of megabytes of RAM per request.
**Recommended fix:**
For single-filter counts, use `SELECT COUNT(*)` or `SELECT COUNT(DISTINCT id)` directly in SQL. For multi-filter counts where deduplication is needed, use `UNION` in SQL rather than materialising in Elixir.
**Confidence:** High
---
## [Medium] NIP-42 AUTH Does Not Validate created_at Freshness
**Area:** protocol correctness, security
**Why it matters:**
NIP-42 suggests AUTH events should have a `created_at` close to current time (within ~10 minutes). The relay's AUTH handler validates the event (which includes a future-skew check of 15 minutes) but does not check if the event is too old. An AUTH event from days ago with a matching challenge could be replayed.
**Evidence:**
`lib/parrhesia/web/connection.ex`:
- `handle_auth/2` calls `Protocol.validate_event(auth_event)` which checks future skew but not past staleness.
- `validate_auth_event/1` (line 573) only checks kind and challenge tag.
- No `created_at` freshness check for AUTH events.
The NIP-98 implementation (`auth/nip98.ex`) DOES have a 60-second freshness check, but the WebSocket AUTH path does not.
**Recommended fix:**
Add a staleness check: reject AUTH events where `created_at` is more than N seconds in the past (e.g., 600 seconds matching NIP-42 suggestion).
**Confidence:** High
---
## [Low] NIP-11 Missing CORS Headers
**Area:** protocol correctness
**Why it matters:**
NIP-11 states relays MUST accept CORS requests by sending appropriate headers. The relay info endpoint does not set CORS headers.
**Evidence:**
`lib/parrhesia/web/router.ex` line 4455: the `/relay` GET handler returns NIP-11 JSON but does not set `Access-Control-Allow-Origin`, `Access-Control-Allow-Headers`, or `Access-Control-Allow-Methods` headers. No CORS plug is configured in the router.
**Recommended fix:**
Add CORS headers to the NIP-11 response, at minimum `Access-Control-Allow-Origin: *`.
**Confidence:** High
---
## [Low] Event Query Deduplication Done in Elixir Instead of SQL
**Area:** performance
**Why it matters:**
When a REQ has multiple filters, each filter runs a separate DB query, results are merged and deduplicated in Elixir using `Map.put_new/3`. This means the relay may fetch duplicate events from the DB and transfer them over the wire from PostgreSQL, only to discard them.
**Evidence:**
`lib/parrhesia/storage/adapters/postgres/events.ex` lines 8595:
```elixir
persisted_events =
filters
|> Enum.flat_map(fn filter ->
filter |> event_query_for_filter(now, opts) |> Repo.all()
end)
|> deduplicate_events()
|> sort_persisted_events()
```
**Recommended fix:**
For multiple filters, consider using SQL `UNION` or `UNION ALL` with a final `DISTINCT ON` to push deduplication to the database. Alternatively, for the common case of a single filter (which is the majority of REQ messages), this is fine as-is.
**Confidence:** Medium
---
## [Low] No Validation of Subscription ID Content
**Area:** robustness
**Why it matters:**
Subscription IDs are validated for non-emptiness and max length (64 chars) but not for content. NIP-01 says subscription IDs are "arbitrary" strings, but allowing control characters, null bytes, or extremely long Unicode sequences could cause issues with logging, telemetry, or downstream systems.
**Evidence:**
`lib/parrhesia/protocol.ex` line 218:
```elixir
defp valid_subscription_id?(subscription_id) do
subscription_id != "" and String.length(subscription_id) <= 64
end
```
`String.length/1` counts Unicode graphemes, not bytes. A subscription ID of 64 emoji characters could be hundreds of bytes.
**Recommended fix:**
Consider validating that subscription IDs contain only printable ASCII, or at least limit by byte size rather than grapheme count.
**Confidence:** Medium
---
# Protocol Compliance Review
## NIPs Implemented
- **NIP-01**: Core protocol — substantially implemented. Critical gaps: no signature verification, lossy tags, no ephemeral handling.
- **NIP-09**: Event deletion — partially implemented (kind 5 with `e` tags only, missing `a` tag deletion).
- **NIP-11**: Relay information — implemented, missing CORS headers.
- **NIP-22**: Event `created_at` limits — implemented (future skew check, configurable).
- **NIP-40**: Expiration — implemented (storage, query filtering, periodic cleanup). Does not reject already-expired events on publish (SHOULD per spec).
- **NIP-42**: Authentication — implemented with challenge-response. Missing relay tag validation, AUTH event staleness check.
- **NIP-45**: COUNT — implemented with basic and HLL support. Performance concern with in-memory deduplication.
- **NIP-50**: Search — implemented via ILIKE. SQL injection concern. No full-text search.
- **NIP-70**: Protected events — implemented (tag check, pubkey match). Note: protected tag `["-"]` is lost on retrieval due to single-element tag storage bug.
- **NIP-77**: Negentropy — stub implementation (session tracking only, no actual reconciliation logic).
- **NIP-86**: Relay management — implemented with NIP-98 auth and audit logging.
- **NIP-98**: HTTP auth — implemented with freshness check.
- **MARMOT**: Kinds 443449, 1059, 1005010051 — validation and policy enforcement implemented.
## Non-Compliant Behaviours
1. **No signature verification** — violates NIP-01 MUST.
2. **Lossy tag storage** — violates NIP-01 data integrity.
3. **Ephemeral events persisted** — violates NIP-01 SHOULD NOT store.
4. **AUTH missing relay tag check** — violates NIP-42 MUST.
5. **NIP-09 missing `a` tag deletion** — partial implementation.
6. **NIP-40: expired events accepted on publish** — violates SHOULD reject.
7. **NIP-11 missing CORS** — violates MUST.
## Ambiguous Areas
- **NIP-01 replaceable event tie-breaking**: implemented correctly (lowest ID wins).
- **Deletion event storage**: kind 5 events are stored (correct — relay SHOULD continue publishing deletion requests).
- **NIP-45 HLL**: the HLL payload generation is a placeholder (hash of filter+count), not actual HyperLogLog registers. Clients expecting real HLL data will get nonsense.
---
# Robustness Review
The relay handles several failure modes well:
- WebSocket binary frames are rejected with a clear notice.
- Invalid JSON returns a structured NOTICE.
- GenServer exits are caught with `catch :exit` patterns throughout the connection handler.
- Outbound queue has configurable backpressure (close, drop_oldest, drop_newest).
- Subscription limits are enforced per connection.
- Process monitors clean up subscription index entries when connections die.
**Key resilience gaps:**
1. **No ingest rate limiting** — one client can monopolise the relay.
2. **No payload size enforcement** — oversized frames/events are processed.
3. **Unbounded tag count** — an event with 100,000 tags will generate 100,000 DB inserts in a single transaction.
4. **No filter complexity limits** — a filter with hundreds of tag values generates large `ANY(...)` queries.
5. **COUNT query memory explosion** — large counts materialise all IDs in memory.
6. **No timeout on DB queries** — a slow query (e.g., adversarial search pattern) blocks the connection process indefinitely.
7. **Single-GenServer bottleneck** — Subscription Index serialises all fanout lookups.
**Can one bad client destabilise the relay?** Yes. Through event spam (no rate limit), adversarial search patterns (LIKE injection), or large COUNT queries (memory exhaustion).
---
# Security Review
**Primary Attack Surfaces:**
1. **WebSocket ingress** — unauthenticated by default, no rate limiting, no payload size enforcement.
2. **NIP-50 search** — LIKE pattern injection enables CPU/IO exhaustion.
3. **NIP-86 management API** — properly gated by NIP-98, but `management_auth_required` is a config flag that defaults to `true`. If misconfigured, management API is open.
4. **Event forgery** — no signature verification means complete trust of client-provided pubkeys.
**DoS Vectors (ranked by impact):**
1. Event spam flood (unbounded ingest rate).
2. Adversarial ILIKE search patterns (DB CPU exhaustion).
3. Large COUNT queries (memory exhaustion).
4. Many concurrent subscriptions with broad filters (fanout amplification).
5. Oversized events with thousands of tags (transaction bloat).
6. Rapid REQ/CLOSE cycling (subscription index churn through single GenServer).
**Authentication/Authorization:**
- NIP-42 AUTH flow works but is weakened by missing relay tag validation.
- Protected event enforcement is correct (pubkey match required).
- Giftwrap (kind 1059) access control is properly implemented.
- Management API NIP-98 auth is solid with freshness check.
**No dynamic atom creation risks found.** Method names in admin are handled as strings. No `String.to_atom` or unsafe deserialization patterns detected.
**Information leakage:** Error messages in some paths use `inspect(reason)` which could leak internal Elixir terms to clients (e.g., `connection.ex` line 297, line 353, line 389). Consider sanitising.
---
# Performance Review
**Likely Hotspots:**
1. **Event ingest path**: validation → policy check → DB transaction (3 inserts + possible state table upsert). The transaction is the bottleneck — each event requires at minimum 2 DB round-trips (event_ids + events insert), plus tag inserts.
2. **Subscription fanout**: `Index.candidate_subscription_keys/1` through GenServer.call — serialisation point.
3. **Query path**: per-filter DB queries without UNION, Elixir-side deduplication and sorting.
4. **COUNT path**: materialises all matching IDs in memory.
5. **Search (ILIKE)**: sequential scan without text search index.
**Missing Indexes:**
- No index on `events.content` for search (NIP-50). ILIKE requires sequential scan.
- No composite index on `events (pubkey, kind, created_at)` for replaceable event queries.
- The `event_tags` index on `(name, value, event_created_at)` is good for tag queries.
**Scaling Ceiling:**
- **DB-bound** at moderate load (event ingest transactions).
- **CPU-bound** at high event rates if signature verification is added.
- **Memory-bound** if adversarial COUNT queries are submitted.
- **GenServer-bound** on fanout at high subscription counts.
**Top 3 Performance Improvements by Impact:**
1. **Make subscription index reads lock-free** — read ETS directly instead of through GenServer (effort: S, impact: High).
2. **Push COUNT to SQL**`SELECT COUNT(DISTINCT id)` instead of materialising (effort: S, impact: High).
3. **Add full-text search index**`GIN` index on `tsvector` column for NIP-50, replacing ILIKE (effort: M, impact: High).
---
# Database and Schema Review
**Strengths:**
- Range partitioning on `events.created_at` — good for time-based queries and partition pruning.
- Composite primary key `(created_at, id)` enables partition pruning on most queries.
- `event_ids` table for deduplication with `ON CONFLICT :nothing` — clean idempotency.
- State tables for replaceable/addressable events — correct approach with proper upsert/retire logic.
- Partial indexes on `expires_at` and `deleted_at` — avoids indexing NULLs.
- FK cascade from `event_tags` to `events` — ensures tag cleanup on delete.
**Weaknesses:**
1. **No unique index on `events.id`** — only a non-unique index. Two events with the same ID but different `created_at` could theoretically exist (the `event_ids` table prevents this at the application level, but there's no DB-level constraint on the events table).
2. **`event_tags` stores only name+value** — data loss for multi-element tags (Critical finding above).
3. **No `content` index for search** — ILIKE without index = sequential scan.
4. **`events.kind` is `integer` (4 bytes)** — NIP-01 allows kinds 065535, so `smallint` (2 bytes) would suffice and save space.
5. **No retention/partitioning strategy documented** — the default partition catches everything. No automated partition creation or cleanup.
6. **`d_tag` column in events table** — redundant with tag storage (but useful for addressable event queries). Not indexed, so no direct benefit. The addressable_event_state table handles this.
7. **No index on `events (id, created_at)` for deletion queries**`delete_by_request` queries by `id` and `pubkey` but the `id` index doesn't include `pubkey`.
**Missing DB-Level Invariants:**
- Events table should have a unique constraint on `id` (across partitions, which is tricky with range partitioning — the `event_ids` table compensates).
- No CHECK constraint on `kind >= 0`.
- No CHECK constraint on `created_at >= 0`.
---
# Test Review
**Well-Covered Areas:**
- Protocol encode/decode (`protocol_test.exs`)
- Filter validation and matching, including property-based tests (`filter_test.exs`, `filter_property_test.exs`)
- Event validation including MARMOT-specific kinds (`event_validator_marmot_test.exs`)
- Policy enforcement (`event_policy_test.exs`)
- Storage adapter contract compliance (`adapter_contract_test.exs`, `behaviour_contracts_test.exs`)
- PostgreSQL event lifecycle (put, query, delete, replace) (`events_lifecycle_test.exs`)
- WebSocket connection lifecycle (`connection_test.exs`)
- Auth challenges (`challenges_test.exs`)
- NIP-98 HTTP auth (`nip98_test.exs`)
- Fault injection (`fault_injection_test.exs`)
- Query plan regression (`query_plan_regression_test.exs`) — excellent practice
**Missing Critical Tests:**
1. **No signature verification tests** (because the feature doesn't exist).
2. **No test for tag data integrity** — round-trip test that verifies events with multi-element tags are returned unchanged.
3. **No ephemeral event test** — verifying kind 20000+ events are not persisted.
4. **No NIP-09 `a` tag deletion test**.
5. **No adversarial input tests** — LIKE injection patterns, oversized payloads, events with extreme tag counts.
6. **No concurrent write tests** — multiple processes writing the same replaceable event simultaneously.
7. **No AUTH relay tag validation test**.
8. **No test for expired event rejection on publish** (NIP-40).
**5 Most Valuable Tests to Add:**
1. Round-trip tag integrity: publish event with multi-element tags, query back, verify tags are identical.
2. Signature verification: publish event with wrong signature, verify rejection.
3. Concurrent replaceable event upsert: 10 processes writing same pubkey+kind, verify only one winner.
4. Adversarial search pattern: verify ILIKE with `%` metacharacters doesn't cause excessive query time.
5. Ingest rate limiting under load: verify relay remains responsive under event flood.
---
# Quick Wins
| Change | Impact | Effort |
|--------|--------|--------|
| Add Schnorr signature verification | Critical | M |
| Store full tags (add `tags` JSONB column to events) | Critical | M |
| Escape LIKE metacharacters in search | High | S |
| Read subscription index ETS directly (bypass GenServer for reads) | High | S |
| Push COUNT to SQL `COUNT(DISTINCT)` | High | S |
| Add `max_frame_size` to Bandit WebSocket options | High | S |
| Add AUTH relay tag validation | High | S |
| Skip persistence for ephemeral events | High | S |
| Add payload size check before JSON decode | High | S |
| Add CORS headers to NIP-11 endpoint | Low | S |
| Create ETS moderation cache table in supervisor | Medium | S |
| Add `created_at` staleness check to AUTH handler | Medium | S |
---
# Deep Refactor Opportunities
1. **Full-text search for NIP-50**: Replace ILIKE with PostgreSQL `tsvector`/`tsquery` and a GIN index. This eliminates the LIKE injection vector and dramatically improves search performance. Effort: M. Worth it if search is a used feature.
2. **SQL UNION for multi-filter queries**: Instead of running N queries and deduplicating in Elixir, build a single SQL query with UNION ALL and DISTINCT. Reduces DB round-trips and pushes deduplication to the engine. Effort: M.
3. **Per-connection rate limiter**: Add a token-bucket rate limiter to the connection state that throttles EVENT submissions. Consider a pluggable rate-limiting behaviour for flexibility. Effort: M.
4. **Event partitioning strategy**: Automate partition creation (monthly or weekly) and implement partition detach/archive for old data. The current default partition will accumulate all data forever. Effort: L.
5. **Batched tag insertion**: Instead of `Repo.insert_all` for tags within the transaction, accumulate tags and use a single multi-row insert with explicit conflict handling. Reduces round-trips for events with many tags. Effort: S.
---
# Final Verdict
**Would I trust this relay:**
- **For local development:** Yes, with awareness of the signature bypass.
- **For a small private relay (trusted clients):** Conditionally, after fixing lossy tags. The signature gap is tolerable only if all clients are trusted.
- **For a medium public relay:** No. Missing rate limiting, signature verification, and the LIKE injection vector make it unsafe.
- **For a hostile public internet deployment:** Absolutely not.
---
**Ship now?** No.
**Top blockers before deployment:**
1. **Add Schnorr signature verification** — without this, the relay has no identity security.
2. **Fix lossy tag storage** — store full tag arrays so events survive round-trips intact.
3. **Handle ephemeral events** — don't persist kinds 2000029999.
4. **Escape LIKE metacharacters in search** — prevent DoS via adversarial patterns.
5. **Enforce payload size limits** — pass `max_frame_size` to Bandit, check payload size before decode.
6. **Add basic ingest rate limiting** — per-connection token bucket at minimum.
7. **Add AUTH relay tag validation** — prevent cross-relay AUTH replay.
After these seven fixes, the relay would be suitable for a private deployment with moderate trust. Public deployment would additionally require:
- Per-IP rate limiting
- Full-text search index (replacing ILIKE)
- SQL-based COUNT
- Lock-free subscription index reads
- Ephemeral event NIP-09 `a` tag deletion
- Comprehensive adversarial input testing

27
flake.lock generated Normal file
View File

@@ -0,0 +1,27 @@
{
"nodes": {
"nixpkgs": {
"locked": {
"lastModified": 1773389992,
"narHash": "sha256-wvfdLLWJ2I9oEpDd9PfMA8osfIZicoQ5MT1jIwNs9Tk=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "c06b4ae3d6599a672a6210b7021d699c351eebda",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixos-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"root": {
"inputs": {
"nixpkgs": "nixpkgs"
}
}
},
"root": "root",
"version": 7
}

68
flake.nix Normal file
View File

@@ -0,0 +1,68 @@
{
description = "Parrhesia Nostr relay";
inputs.nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable";
outputs = {nixpkgs, ...}: let
systems = [
"x86_64-linux"
"aarch64-linux"
"x86_64-darwin"
"aarch64-darwin"
];
forAllSystems = nixpkgs.lib.genAttrs systems;
in {
formatter = forAllSystems (system: (import nixpkgs {inherit system;}).alejandra);
packages = forAllSystems (
system: let
pkgs = import nixpkgs {inherit system;};
lib = pkgs.lib;
parrhesia = pkgs.callPackage ./default.nix {};
in
{
default = parrhesia;
inherit parrhesia;
}
// lib.optionalAttrs pkgs.stdenv.hostPlatform.isLinux {
dockerImage = pkgs.dockerTools.buildLayeredImage {
name = "parrhesia";
tag = "latest";
contents = [
parrhesia
pkgs.bash
pkgs.cacert
pkgs.coreutils
pkgs.fakeNss
];
extraCommands = ''
mkdir -p tmp
chmod 1777 tmp
'';
config = {
Entrypoint = ["${parrhesia}/bin/parrhesia"];
Cmd = ["foreground"];
ExposedPorts = {
"4413/tcp" = {};
};
WorkingDir = "/";
User = "65534:65534";
Env = [
"HOME=/tmp"
"LANG=C.UTF-8"
"LC_ALL=C.UTF-8"
"MIX_ENV=prod"
"PORT=4413"
"RELEASE_DISTRIBUTION=none"
"SSL_CERT_FILE=${pkgs.cacert}/etc/ssl/certs/ca-bundle.crt"
];
};
};
}
);
};
}

View File

@@ -13,6 +13,7 @@ defmodule Parrhesia.Application do
Parrhesia.Auth.Supervisor,
Parrhesia.Policy.Supervisor,
Parrhesia.Web.Endpoint,
Parrhesia.Web.MetricsEndpoint,
Parrhesia.Tasks.Supervisor
]

View File

@@ -7,10 +7,17 @@ defmodule Parrhesia.Negentropy.Sessions do
@type session_key :: {pid(), String.t()}
@default_max_payload_bytes 4096
@default_max_sessions_per_owner 8
@default_max_total_sessions 10_000
@default_max_idle_seconds 60
@default_sweep_interval_seconds 10
@sweep_idle_sessions :sweep_idle_sessions
@spec start_link(keyword()) :: GenServer.on_start()
def start_link(opts \\ []) do
name = Keyword.get(opts, :name, __MODULE__)
GenServer.start_link(__MODULE__, :ok, name: name)
GenServer.start_link(__MODULE__, opts, name: name)
end
@spec open(GenServer.server(), pid(), String.t(), map()) :: {:ok, map()} | {:error, term()}
@@ -32,18 +39,51 @@ defmodule Parrhesia.Negentropy.Sessions do
end
@impl true
def init(:ok) do
{:ok, %{sessions: %{}, monitors: %{}}}
def init(opts) do
max_idle_ms =
normalize_positive_integer(Keyword.get(opts, :max_idle_seconds), max_idle_seconds()) * 1000
sweep_interval_ms =
normalize_positive_integer(
Keyword.get(opts, :sweep_interval_seconds),
sweep_interval_seconds()
) *
1000
state = %{
sessions: %{},
monitors: %{},
max_payload_bytes:
normalize_positive_integer(Keyword.get(opts, :max_payload_bytes), max_payload_bytes()),
max_sessions_per_owner:
normalize_positive_integer(
Keyword.get(opts, :max_sessions_per_owner),
max_sessions_per_owner()
),
max_total_sessions:
normalize_positive_integer(Keyword.get(opts, :max_total_sessions), max_total_sessions()),
max_idle_ms: max_idle_ms,
sweep_interval_ms: sweep_interval_ms
}
:ok = schedule_idle_sweep(sweep_interval_ms)
{:ok, state}
end
@impl true
def handle_call({:open, owner_pid, subscription_id, params}, _from, state) do
key = {owner_pid, subscription_id}
with :ok <- validate_payload_size(params, state.max_payload_bytes),
:ok <- enforce_session_limits(state, owner_pid, key) do
now_ms = System.monotonic_time(:millisecond)
session = %{
cursor: 0,
params: params,
opened_at: System.system_time(:second)
opened_at: System.system_time(:second),
last_active_at_ms: now_ms
}
state =
@@ -52,6 +92,10 @@ defmodule Parrhesia.Negentropy.Sessions do
|> put_in([:sessions, key], session)
{:reply, {:ok, %{"status" => "open", "cursor" => 0}}, state}
else
{:error, reason} ->
{:reply, {:error, reason}, state}
end
end
def handle_call({:message, owner_pid, subscription_id, payload}, _from, state) do
@@ -62,22 +106,68 @@ defmodule Parrhesia.Negentropy.Sessions do
{:reply, {:error, :unknown_session}, state}
session ->
case validate_payload_size(payload, state.max_payload_bytes) do
:ok ->
cursor = session.cursor + 1
next_session = %{session | cursor: cursor, params: Map.merge(session.params, payload)}
next_session = %{
session
| cursor: cursor,
last_active_at_ms: System.monotonic_time(:millisecond)
}
state = put_in(state, [:sessions, key], next_session)
{:reply, {:ok, %{"status" => "ack", "cursor" => cursor}}, state}
{:error, reason} ->
{:reply, {:error, reason}, state}
end
end
end
def handle_call({:close, owner_pid, subscription_id}, _from, state) do
key = {owner_pid, subscription_id}
state = update_in(state.sessions, &Map.delete(&1, key))
state =
state
|> update_in([:sessions], &Map.delete(&1, key))
|> maybe_remove_monitor_if_owner_has_no_sessions(owner_pid)
{:reply, :ok, state}
end
@impl true
def handle_info(@sweep_idle_sessions, state) do
now_ms = System.monotonic_time(:millisecond)
sessions =
Enum.reduce(state.sessions, %{}, fn {key, session}, acc ->
idle_ms = now_ms - Map.get(session, :last_active_at_ms, now_ms)
if idle_ms >= state.max_idle_ms do
acc
else
Map.put(acc, key, session)
end
end)
owner_pids =
sessions
|> Map.keys()
|> Enum.map(fn {owner_pid, _subscription_id} -> owner_pid end)
|> MapSet.new()
state =
state
|> Map.put(:sessions, sessions)
|> clear_monitors_without_sessions(owner_pids)
:ok = schedule_idle_sweep(state.sweep_interval_ms)
{:noreply, state}
end
def handle_info({:DOWN, monitor_ref, :process, owner_pid, _reason}, state) do
case Map.get(state.monitors, owner_pid) do
^monitor_ref ->
@@ -95,6 +185,16 @@ defmodule Parrhesia.Negentropy.Sessions do
def handle_info(_message, state), do: {:noreply, state}
defp clear_monitors_without_sessions(state, owner_pids) do
Enum.reduce(Map.keys(state.monitors), state, fn owner_pid, acc ->
if MapSet.member?(owner_pids, owner_pid) do
acc
else
maybe_remove_monitor(acc, owner_pid)
end
end)
end
defp remove_owner_sessions(state, owner_pid) do
update_in(state.sessions, fn sessions ->
sessions
@@ -103,6 +203,39 @@ defmodule Parrhesia.Negentropy.Sessions do
end)
end
defp validate_payload_size(payload, max_payload_bytes) do
if :erlang.external_size(payload) <= max_payload_bytes do
:ok
else
{:error, :payload_too_large}
end
end
defp enforce_session_limits(state, owner_pid, key) do
if Map.has_key?(state.sessions, key) do
:ok
else
total_sessions = map_size(state.sessions)
cond do
total_sessions >= state.max_total_sessions ->
{:error, :session_limit_reached}
owner_session_count(state.sessions, owner_pid) >= state.max_sessions_per_owner ->
{:error, :owner_session_limit_reached}
true ->
:ok
end
end
end
defp owner_session_count(sessions, owner_pid) do
Enum.count(sessions, fn {{session_owner, _subscription_id}, _session} ->
session_owner == owner_pid
end)
end
defp ensure_monitor(state, owner_pid) do
case Map.has_key?(state.monitors, owner_pid) do
true -> state
@@ -110,6 +243,14 @@ defmodule Parrhesia.Negentropy.Sessions do
end
end
defp maybe_remove_monitor_if_owner_has_no_sessions(state, owner_pid) do
if owner_session_count(state.sessions, owner_pid) == 0 do
maybe_remove_monitor(state, owner_pid)
else
state
end
end
defp maybe_remove_monitor(state, owner_pid) do
{monitor_ref, monitors} = Map.pop(state.monitors, owner_pid)
@@ -119,4 +260,44 @@ defmodule Parrhesia.Negentropy.Sessions do
Map.put(state, :monitors, monitors)
end
defp schedule_idle_sweep(sweep_interval_ms) do
_timer_ref = Process.send_after(self(), @sweep_idle_sessions, sweep_interval_ms)
:ok
end
defp max_payload_bytes do
:parrhesia
|> Application.get_env(:limits, [])
|> Keyword.get(:max_negentropy_payload_bytes, @default_max_payload_bytes)
end
defp max_sessions_per_owner do
:parrhesia
|> Application.get_env(:limits, [])
|> Keyword.get(:max_negentropy_sessions_per_connection, @default_max_sessions_per_owner)
end
defp max_total_sessions do
:parrhesia
|> Application.get_env(:limits, [])
|> Keyword.get(:max_negentropy_total_sessions, @default_max_total_sessions)
end
defp max_idle_seconds do
:parrhesia
|> Application.get_env(:limits, [])
|> Keyword.get(:negentropy_session_idle_timeout_seconds, @default_max_idle_seconds)
end
defp sweep_interval_seconds do
:parrhesia
|> Application.get_env(:limits, [])
|> Keyword.get(:negentropy_session_sweep_interval_seconds, @default_sweep_interval_seconds)
end
defp normalize_positive_integer(value, _default) when is_integer(value) and value > 0,
do: value
defp normalize_positive_integer(_value, default), do: default
end

View File

@@ -0,0 +1 @@
Postgrex.Types.define(Parrhesia.PostgresTypes, [], json: JSON)

View File

@@ -21,6 +21,7 @@ defmodule Parrhesia.Protocol.EventValidator do
| :invalid_content
| :invalid_sig
| :invalid_id_hash
| :invalid_signature
| :invalid_marmot_keypackage_content
| :missing_marmot_encoding_tag
| :invalid_marmot_encoding_tag
@@ -54,7 +55,8 @@ defmodule Parrhesia.Protocol.EventValidator do
:ok <- validate_tags(event["tags"]),
:ok <- validate_content(event["content"]),
:ok <- validate_sig(event["sig"]),
:ok <- validate_id_hash(event) do
:ok <- validate_id_hash(event),
:ok <- validate_signature(event) do
validate_kind_specific(event)
end
end
@@ -89,6 +91,7 @@ defmodule Parrhesia.Protocol.EventValidator do
invalid_content: "invalid: content must be a string",
invalid_sig: "invalid: sig must be 64-byte lowercase hex",
invalid_id_hash: "invalid: event id does not match serialized event",
invalid_signature: "invalid: event signature is invalid",
invalid_marmot_keypackage_content: "invalid: kind 443 content must be non-empty base64",
missing_marmot_encoding_tag: "invalid: kind 443 must include [\"encoding\", \"base64\"]",
invalid_marmot_encoding_tag: "invalid: kind 443 must include [\"encoding\", \"base64\"]",
@@ -193,6 +196,29 @@ defmodule Parrhesia.Protocol.EventValidator do
end
end
defp validate_signature(event) do
if verify_event_signatures?() do
verify_signature(event)
else
:ok
end
end
defp verify_signature(%{"id" => id, "pubkey" => pubkey, "sig" => sig}) do
with {:ok, id_bin} <- Base.decode16(id, case: :lower),
{:ok, pubkey_bin} <- Base.decode16(pubkey, case: :lower),
{:ok, sig_bin} <- Base.decode16(sig, case: :lower),
true <- Secp256k1.schnorr_valid?(sig_bin, id_bin, pubkey_bin) do
:ok
else
_other -> {:error, :invalid_signature}
end
rescue
_error -> {:error, :invalid_signature}
end
defp verify_signature(_event), do: {:error, :invalid_signature}
defp valid_tag?(tag) when is_list(tag) do
tag != [] and Enum.all?(tag, &is_binary/1)
end
@@ -473,6 +499,12 @@ defmodule Parrhesia.Protocol.EventValidator do
match?({:ok, _decoded}, Base.decode16(value, case: :lower))
end
defp verify_event_signatures? do
:parrhesia
|> Application.get_env(:features, [])
|> Keyword.get(:verify_event_signatures, true)
end
defp max_event_future_skew_seconds do
:parrhesia
|> Application.get_env(:limits, [])

35
lib/parrhesia/release.ex Normal file
View File

@@ -0,0 +1,35 @@
defmodule Parrhesia.Release do
@moduledoc """
Helpers for running Ecto tasks from a production release.
"""
@app :parrhesia
def migrate do
load_app()
for repo <- repos() do
{:ok, _, _} =
Ecto.Migrator.with_repo(repo, fn repo ->
Ecto.Migrator.run(repo, :up, all: true)
end)
end
end
def rollback(repo, version) when is_atom(repo) and is_integer(version) do
load_app()
{:ok, _, _} =
Ecto.Migrator.with_repo(repo, fn repo ->
Ecto.Migrator.run(repo, :down, to: version)
end)
end
defp load_app do
Application.load(@app)
end
defp repos do
Application.fetch_env!(@app, :ecto_repos)
end
end

View File

@@ -64,21 +64,49 @@ defmodule Parrhesia.Storage.Adapters.Memory.Events do
@impl true
def delete_by_request(_context, event) do
delete_ids =
deleter_pubkey = Map.get(event, "pubkey")
delete_event_ids =
event
|> Map.get("tags", [])
|> Enum.flat_map(fn
["e", event_id | _rest] -> [event_id]
["e", event_id | _rest] when is_binary(event_id) -> [event_id]
_tag -> []
end)
delete_coordinates =
event
|> Map.get("tags", [])
|> Enum.flat_map(fn
["a", coordinate | _rest] when is_binary(coordinate) ->
case parse_delete_coordinate(coordinate) do
{:ok, parsed_coordinate} -> [parsed_coordinate]
{:error, _reason} -> []
end
_tag ->
[]
end)
coordinate_delete_ids =
Store.get(fn state ->
state.events
|> Map.values()
|> Enum.filter(fn candidate ->
matches_delete_coordinate?(candidate, delete_coordinates, deleter_pubkey)
end)
|> Enum.map(& &1["id"])
end)
all_delete_ids = Enum.uniq(delete_event_ids ++ coordinate_delete_ids)
Store.update(fn state ->
Enum.reduce(delete_ids, state, fn event_id, acc ->
Enum.reduce(all_delete_ids, state, fn event_id, acc ->
update_in(acc.deleted, &MapSet.put(&1, event_id))
end)
end)
{:ok, length(delete_ids)}
{:ok, length(all_delete_ids)}
end
@impl true
@@ -105,6 +133,47 @@ defmodule Parrhesia.Storage.Adapters.Memory.Events do
@impl true
def purge_expired(_opts), do: {:ok, 0}
defp parse_delete_coordinate(coordinate) do
case String.split(coordinate, ":", parts: 3) do
[kind_part, pubkey, d_tag] ->
case Integer.parse(kind_part) do
{kind, ""} when kind >= 0 -> {:ok, %{kind: kind, pubkey: pubkey, d_tag: d_tag}}
_other -> {:error, :invalid_coordinate}
end
_other ->
{:error, :invalid_coordinate}
end
end
defp matches_delete_coordinate?(candidate, delete_coordinates, deleter_pubkey) do
Enum.any?(delete_coordinates, fn coordinate ->
coordinate.pubkey == deleter_pubkey and
candidate["pubkey"] == deleter_pubkey and
candidate["kind"] == coordinate.kind and
coordinate_match_for_kind?(candidate, coordinate)
end)
end
defp coordinate_match_for_kind?(candidate, coordinate) do
if addressable_kind?(coordinate.kind) do
candidate_d_tag =
candidate
|> Map.get("tags", [])
|> Enum.find_value("", fn
["d", value | _rest] -> value
_tag -> nil
end)
candidate_d_tag == coordinate.d_tag
else
replaceable_kind?(coordinate.kind)
end
end
defp replaceable_kind?(kind), do: kind in [0, 3] or (kind >= 10_000 and kind < 20_000)
defp addressable_kind?(kind), do: kind >= 30_000 and kind < 40_000
defp giftwrap_visible_to_requester?(%{"kind" => 1059} = event, requester_pubkeys) do
requester_pubkeys != [] and
event_targets_any_recipient?(event, requester_pubkeys)

View File

@@ -56,6 +56,7 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Events do
pubkey: event.pubkey,
created_at: event.created_at,
kind: event.kind,
tags: event.tags,
content: event.content,
sig: event.sig
}
@@ -66,13 +67,7 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Events do
{:ok, nil}
persisted_event ->
tags = load_tags([{persisted_event.created_at, persisted_event.id}])
{:ok,
to_nostr_event(
persisted_event,
Map.get(tags, {persisted_event.created_at, persisted_event.id}, [])
)}
{:ok, to_nostr_event(persisted_event)}
end
end
end
@@ -93,15 +88,7 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Events do
|> sort_persisted_events()
|> maybe_apply_query_limit(opts)
event_keys = Enum.map(persisted_events, fn event -> {event.created_at, event.id} end)
tags_by_event = load_tags(event_keys)
nostr_events =
Enum.map(persisted_events, fn event ->
to_nostr_event(event, Map.get(tags_by_event, {event.created_at, event.id}, []))
end)
{:ok, nostr_events}
{:ok, Enum.map(persisted_events, &to_nostr_event/1)}
end
end
@@ -114,13 +101,12 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Events do
total_count =
filters
|> Enum.flat_map(fn filter ->
filter
|> event_id_query_for_filter(now, opts)
|> Repo.all()
|> event_id_union_query_for_filters(now, opts)
|> subquery()
|> then(fn union_query ->
from(event in union_query, select: count(event.id, :distinct))
end)
|> MapSet.new()
|> MapSet.size()
|> Repo.one()
{:ok, total_count}
end
@@ -131,7 +117,26 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Events do
@impl true
def delete_by_request(_context, event) do
with {:ok, deleter_pubkey} <- decode_hex(Map.get(event, "pubkey"), 32, :invalid_pubkey),
{:ok, delete_ids} <- extract_delete_event_ids(event) do
{:ok, delete_targets} <- extract_delete_targets(event) do
deleted_at = System.system_time(:second)
deleted_by_id_count =
delete_targets
|> Map.get(:event_ids, [])
|> delete_events_by_ids(deleter_pubkey, deleted_at)
deleted_by_coordinate_count =
delete_targets
|> Map.get(:coordinates, [])
|> delete_events_by_coordinates(deleter_pubkey, deleted_at)
{:ok, deleted_by_id_count + deleted_by_coordinate_count}
end
end
defp delete_events_by_ids([], _deleter_pubkey, _deleted_at), do: 0
defp delete_events_by_ids(delete_ids, deleter_pubkey, deleted_at) do
query =
from(stored_event in "events",
where:
@@ -140,9 +145,55 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Events do
is_nil(stored_event.deleted_at)
)
deleted_at = System.system_time(:second)
{count, _result} = Repo.update_all(query, set: [deleted_at: deleted_at])
{:ok, count}
count
end
defp delete_events_by_coordinates([], _deleter_pubkey, _deleted_at), do: 0
defp delete_events_by_coordinates(coordinates, deleter_pubkey, deleted_at) do
relevant_coordinates =
Enum.filter(coordinates, fn coordinate ->
coordinate.pubkey == deleter_pubkey and
(replaceable_kind?(coordinate.kind) or addressable_kind?(coordinate.kind))
end)
if relevant_coordinates == [] do
0
else
dynamic_conditions =
Enum.reduce(relevant_coordinates, dynamic(false), fn coordinate, acc ->
coordinate_condition =
coordinate_delete_condition(coordinate, deleter_pubkey)
dynamic([stored_event], ^acc or ^coordinate_condition)
end)
query =
from(stored_event in "events",
where: is_nil(stored_event.deleted_at)
)
|> where(^dynamic_conditions)
{count, _result} = Repo.update_all(query, set: [deleted_at: deleted_at])
count
end
end
defp coordinate_delete_condition(coordinate, deleter_pubkey) do
if addressable_kind?(coordinate.kind) do
dynamic(
[stored_event],
stored_event.kind == ^coordinate.kind and
stored_event.pubkey == ^deleter_pubkey and
stored_event.d_tag == ^coordinate.d_tag
)
else
dynamic(
[stored_event],
stored_event.kind == ^coordinate.kind and
stored_event.pubkey == ^deleter_pubkey
)
end
end
@@ -545,6 +596,7 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Events do
pubkey: normalized_event.pubkey,
created_at: normalized_event.created_at,
kind: normalized_event.kind,
tags: normalized_event.tags,
content: normalized_event.content,
sig: normalized_event.sig,
d_tag: normalized_event.d_tag,
@@ -564,6 +616,7 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Events do
pubkey: event.pubkey,
created_at: event.created_at,
kind: event.kind,
tags: event.tags,
content: event.content,
sig: event.sig
}
@@ -598,6 +651,20 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Events do
|> maybe_restrict_giftwrap_access(filter, opts)
end
defp event_id_union_query_for_filters([], now, _opts) do
from(event in "events",
where: event.created_at > ^now and event.created_at < ^now,
select: event.id
)
end
defp event_id_union_query_for_filters([first_filter | rest_filters], now, opts) do
Enum.reduce(rest_filters, event_id_query_for_filter(first_filter, now, opts), fn filter,
acc ->
union_all(acc, ^event_id_query_for_filter(filter, now, opts))
end)
end
defp maybe_filter_ids(query, nil), do: query
defp maybe_filter_ids(query, ids) do
@@ -624,11 +691,19 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Events do
defp maybe_filter_search(query, nil), do: query
defp maybe_filter_search(query, search) when is_binary(search) and search != "" do
where(query, [event], ilike(event.content, ^"%#{search}%"))
escaped_search = escape_like_pattern(search)
where(query, [event], ilike(event.content, ^"%#{escaped_search}%"))
end
defp maybe_filter_search(query, _search), do: query
defp escape_like_pattern(search) do
search
|> String.replace("\\", "\\\\")
|> String.replace("%", "\\%")
|> String.replace("_", "\\_")
end
defp filter_by_tags(query, filter) do
filter
|> tag_filters()
@@ -734,44 +809,21 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Events do
end
end
defp load_tags([]), do: %{}
defp load_tags(event_keys) when is_list(event_keys) do
created_at_values = Enum.map(event_keys, fn {created_at, _event_id} -> created_at end)
event_id_values = Enum.map(event_keys, fn {_created_at, event_id} -> event_id end)
query =
from(tag in "event_tags",
where: tag.event_created_at in ^created_at_values and tag.event_id in ^event_id_values,
order_by: [asc: tag.idx],
select: %{
event_created_at: tag.event_created_at,
event_id: tag.event_id,
name: tag.name,
value: tag.value
}
)
query
|> Repo.all()
|> Enum.group_by(
fn tag -> {tag.event_created_at, tag.event_id} end,
fn tag -> [tag.name, tag.value] end
)
end
defp to_nostr_event(persisted_event, tags) do
defp to_nostr_event(persisted_event) do
%{
"id" => Base.encode16(persisted_event.id, case: :lower),
"pubkey" => Base.encode16(persisted_event.pubkey, case: :lower),
"created_at" => persisted_event.created_at,
"kind" => persisted_event.kind,
"tags" => tags,
"tags" => normalize_persisted_tags(persisted_event.tags),
"content" => persisted_event.content,
"sig" => Base.encode16(persisted_event.sig, case: :lower)
}
end
defp normalize_persisted_tags(tags) when is_list(tags), do: tags
defp normalize_persisted_tags(_tags), do: []
defp decode_hex(value, bytes, reason) when is_binary(value) do
if byte_size(value) == bytes * 2 do
case Base.decode16(value, case: :mixed) do
@@ -818,23 +870,69 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Events do
end)
end
defp extract_delete_event_ids(event) do
delete_ids =
event
|> Map.get("tags", [])
|> Enum.reduce([], fn
["e", event_id | _rest], acc when is_binary(event_id) -> [event_id | acc]
_tag, acc -> acc
end)
|> Enum.uniq()
defp extract_delete_targets(event) do
with {:ok, targets} <- parse_delete_targets(Map.get(event, "tags", [])) do
event_ids = targets.event_ids |> Enum.uniq()
coordinates = targets.coordinates |> Enum.uniq()
if delete_ids == [] do
if event_ids == [] and coordinates == [] do
{:error, :no_delete_targets}
else
{:ok, Enum.map(delete_ids, &Base.decode16!(&1, case: :mixed))}
{:ok, %{event_ids: event_ids, coordinates: coordinates}}
end
end
end
defp parse_delete_targets(tags) when is_list(tags) do
Enum.reduce_while(tags, {:ok, %{event_ids: [], coordinates: []}}, fn tag, {:ok, acc} ->
case parse_delete_target(tag) do
{:ok, {:event_id, event_id}} ->
{:cont, {:ok, %{acc | event_ids: [event_id | acc.event_ids]}}}
{:ok, {:coordinate, coordinate}} ->
{:cont, {:ok, %{acc | coordinates: [coordinate | acc.coordinates]}}}
:ignore ->
{:cont, {:ok, acc}}
{:error, _reason} = error ->
{:halt, error}
end
end)
end
defp parse_delete_targets(_tags), do: {:error, :invalid_delete_target}
defp parse_delete_target(["e", event_id | _rest]) when is_binary(event_id) do
case decode_hex(event_id, 32, :invalid_delete_target) do
{:ok, decoded_event_id} -> {:ok, {:event_id, decoded_event_id}}
{:error, _reason} -> {:error, :invalid_delete_target}
end
end
defp parse_delete_target(["a", coordinate | _rest]) when is_binary(coordinate) do
case parse_address_coordinate(coordinate) do
{:ok, parsed_coordinate} -> {:ok, {:coordinate, parsed_coordinate}}
{:error, _reason} -> {:error, :invalid_delete_target}
end
end
defp parse_delete_target(_tag), do: :ignore
defp parse_address_coordinate(coordinate) do
case String.split(coordinate, ":", parts: 3) do
[kind_part, pubkey_hex, d_tag] ->
with {kind, ""} <- Integer.parse(kind_part),
true <- kind >= 0,
{:ok, pubkey} <- decode_hex(pubkey_hex, 32, :invalid_delete_target) do
{:ok, %{kind: kind, pubkey: pubkey, d_tag: d_tag}}
else
_other -> {:error, :invalid_delete_target}
end
_other ->
{:error, :invalid_delete_target}
end
rescue
ArgumentError -> {:error, :invalid_delete_target}
end
defp extract_expiration(tags) do

View File

@@ -9,87 +9,111 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Moderation do
@behaviour Parrhesia.Storage.Moderation
@cache_table :parrhesia_moderation_cache
@cache_scope_sources %{
banned_pubkeys: {"banned_pubkeys", :pubkey},
allowed_pubkeys: {"allowed_pubkeys", :pubkey},
banned_events: {"banned_events", :event_id},
blocked_ips: {"blocked_ips", :ip}
}
@impl true
def ban_pubkey(_context, pubkey) do
with {:ok, normalized_pubkey} <- normalize_hex_or_binary(pubkey, 32, :invalid_pubkey) do
upsert_presence_table("banned_pubkeys", :pubkey, normalized_pubkey)
with {:ok, normalized_pubkey} <- normalize_hex_or_binary(pubkey, 32, :invalid_pubkey),
:ok <- upsert_presence_table("banned_pubkeys", :pubkey, normalized_pubkey) do
cache_put(:banned_pubkeys, normalized_pubkey)
:ok
end
end
@impl true
def unban_pubkey(_context, pubkey) do
with {:ok, normalized_pubkey} <- normalize_hex_or_binary(pubkey, 32, :invalid_pubkey) do
delete_from_table("banned_pubkeys", :pubkey, normalized_pubkey)
with {:ok, normalized_pubkey} <- normalize_hex_or_binary(pubkey, 32, :invalid_pubkey),
:ok <- delete_from_table("banned_pubkeys", :pubkey, normalized_pubkey) do
cache_delete(:banned_pubkeys, normalized_pubkey)
:ok
end
end
@impl true
def pubkey_banned?(_context, pubkey) do
with {:ok, normalized_pubkey} <- normalize_hex_or_binary(pubkey, 32, :invalid_pubkey) do
{:ok, exists_in_table?("banned_pubkeys", :pubkey, normalized_pubkey)}
{:ok, exists_in_scope?(:banned_pubkeys, normalized_pubkey)}
end
end
@impl true
def allow_pubkey(_context, pubkey) do
with {:ok, normalized_pubkey} <- normalize_hex_or_binary(pubkey, 32, :invalid_pubkey) do
upsert_presence_table("allowed_pubkeys", :pubkey, normalized_pubkey)
with {:ok, normalized_pubkey} <- normalize_hex_or_binary(pubkey, 32, :invalid_pubkey),
:ok <- upsert_presence_table("allowed_pubkeys", :pubkey, normalized_pubkey) do
cache_put(:allowed_pubkeys, normalized_pubkey)
:ok
end
end
@impl true
def disallow_pubkey(_context, pubkey) do
with {:ok, normalized_pubkey} <- normalize_hex_or_binary(pubkey, 32, :invalid_pubkey) do
delete_from_table("allowed_pubkeys", :pubkey, normalized_pubkey)
with {:ok, normalized_pubkey} <- normalize_hex_or_binary(pubkey, 32, :invalid_pubkey),
:ok <- delete_from_table("allowed_pubkeys", :pubkey, normalized_pubkey) do
cache_delete(:allowed_pubkeys, normalized_pubkey)
:ok
end
end
@impl true
def pubkey_allowed?(_context, pubkey) do
with {:ok, normalized_pubkey} <- normalize_hex_or_binary(pubkey, 32, :invalid_pubkey) do
{:ok, exists_in_table?("allowed_pubkeys", :pubkey, normalized_pubkey)}
{:ok, exists_in_scope?(:allowed_pubkeys, normalized_pubkey)}
end
end
@impl true
def ban_event(_context, event_id) do
with {:ok, normalized_event_id} <- normalize_hex_or_binary(event_id, 32, :invalid_event_id) do
upsert_presence_table("banned_events", :event_id, normalized_event_id)
with {:ok, normalized_event_id} <- normalize_hex_or_binary(event_id, 32, :invalid_event_id),
:ok <- upsert_presence_table("banned_events", :event_id, normalized_event_id) do
cache_put(:banned_events, normalized_event_id)
:ok
end
end
@impl true
def unban_event(_context, event_id) do
with {:ok, normalized_event_id} <- normalize_hex_or_binary(event_id, 32, :invalid_event_id) do
delete_from_table("banned_events", :event_id, normalized_event_id)
with {:ok, normalized_event_id} <- normalize_hex_or_binary(event_id, 32, :invalid_event_id),
:ok <- delete_from_table("banned_events", :event_id, normalized_event_id) do
cache_delete(:banned_events, normalized_event_id)
:ok
end
end
@impl true
def event_banned?(_context, event_id) do
with {:ok, normalized_event_id} <- normalize_hex_or_binary(event_id, 32, :invalid_event_id) do
{:ok, exists_in_table?("banned_events", :event_id, normalized_event_id)}
{:ok, exists_in_scope?(:banned_events, normalized_event_id)}
end
end
@impl true
def block_ip(_context, ip_address) do
with {:ok, normalized_ip} <- normalize_ip(ip_address) do
upsert_presence_table("blocked_ips", :ip, normalized_ip)
with {:ok, normalized_ip} <- normalize_ip(ip_address),
:ok <- upsert_presence_table("blocked_ips", :ip, normalized_ip) do
cache_put(:blocked_ips, normalized_ip)
:ok
end
end
@impl true
def unblock_ip(_context, ip_address) do
with {:ok, normalized_ip} <- normalize_ip(ip_address) do
delete_from_table("blocked_ips", :ip, normalized_ip)
with {:ok, normalized_ip} <- normalize_ip(ip_address),
:ok <- delete_from_table("blocked_ips", :ip, normalized_ip) do
cache_delete(:blocked_ips, normalized_ip)
:ok
end
end
@impl true
def ip_blocked?(_context, ip_address) do
with {:ok, normalized_ip} <- normalize_ip(ip_address) do
{:ok, exists_in_table?("blocked_ips", :ip, normalized_ip)}
{:ok, exists_in_scope?(:blocked_ips, normalized_ip)}
end
end
@@ -122,7 +146,96 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Moderation do
:ok
end
defp exists_in_table?(table, field, value) do
defp exists_in_scope?(scope, value) do
{table, field} = cache_scope_source!(scope)
if moderation_cache_enabled?() do
case cache_table_ref() do
:undefined ->
exists_in_table_db?(table, field, value)
cache_table ->
ensure_cache_scope_loaded(scope, cache_table)
:ets.member(cache_table, cache_member_key(scope, value))
end
else
exists_in_table_db?(table, field, value)
end
end
defp ensure_cache_scope_loaded(scope, table) do
loaded_key = cache_loaded_key(scope)
if :ets.member(table, loaded_key) do
:ok
else
{db_table, db_field} = cache_scope_source!(scope)
values = load_scope_values(db_table, db_field)
entries = Enum.map(values, &{cache_member_key(scope, &1), true})
if entries != [] do
true = :ets.insert(table, entries)
end
true = :ets.insert(table, {loaded_key, true})
:ok
end
end
defp load_scope_values(table, field) do
query =
from(record in table,
select: field(record, ^field)
)
Repo.all(query)
end
defp cache_put(scope, value) do
if moderation_cache_enabled?() do
case cache_table_ref() do
:undefined -> :ok
cache_table -> true = :ets.insert(cache_table, {cache_member_key(scope, value), true})
end
end
:ok
end
defp cache_delete(scope, value) do
if moderation_cache_enabled?() do
case cache_table_ref() do
:undefined -> :ok
cache_table -> true = :ets.delete(cache_table, cache_member_key(scope, value))
end
end
:ok
end
defp cache_scope_source!(scope), do: Map.fetch!(@cache_scope_sources, scope)
defp cache_loaded_key(scope), do: {:loaded, scope}
defp cache_member_key(scope, value), do: {:member, scope, value}
defp cache_table_ref do
case :ets.whereis(@cache_table) do
:undefined -> :undefined
_table_ref -> @cache_table
end
end
defp moderation_cache_enabled? do
case Application.get_env(:parrhesia, :moderation_cache_enabled, true) do
true -> true
false -> false
_other -> true
end
end
defp exists_in_table_db?(table, field, value) do
query =
from(record in table,
where: field(record, ^field) == ^value,

View File

@@ -0,0 +1,28 @@
defmodule Parrhesia.Storage.Adapters.Postgres.ModerationCache do
@moduledoc """
ETS owner process for moderation cache tables.
"""
use GenServer
@cache_table :parrhesia_moderation_cache
@spec start_link(keyword()) :: GenServer.on_start()
def start_link(opts \\ []) do
GenServer.start_link(__MODULE__, :ok, opts)
end
@impl true
def init(:ok) do
_table =
:ets.new(@cache_table, [
:named_table,
:set,
:public,
read_concurrency: true,
write_concurrency: true
])
{:ok, %{}}
end
end

View File

@@ -1,34 +0,0 @@
defmodule Parrhesia.Storage.Archiver do
@moduledoc """
Partition-aware archival helpers for Postgres event partitions.
"""
import Ecto.Query
alias Parrhesia.Repo
@doc """
Lists all `events_*` partitions excluding the default partition.
"""
@spec list_partitions() :: [String.t()]
def list_partitions do
query =
from(table in "pg_tables",
where: table.schemaname == "public",
where: like(table.tablename, "events_%"),
where: table.tablename != "events_default",
select: table.tablename,
order_by: [asc: table.tablename]
)
Repo.all(query)
end
@doc """
Generates an archive SQL statement for the given partition.
"""
@spec archive_sql(String.t(), String.t()) :: String.t()
def archive_sql(partition_name, archive_table_name) do
"INSERT INTO #{archive_table_name} SELECT * FROM #{partition_name};"
end
end

View File

@@ -0,0 +1,310 @@
defmodule Parrhesia.Storage.Partitions do
@moduledoc """
Partition lifecycle helpers for Postgres `events` and `event_tags` monthly partitions.
"""
import Ecto.Query
alias Parrhesia.Repo
@identifier_pattern ~r/^[a-zA-Z_][a-zA-Z0-9_]*$/
@monthly_partition_pattern ~r/^events_(\d{4})_(\d{2})$/
@events_partition_prefix "events"
@event_tags_partition_prefix "event_tags"
@default_months_ahead 2
@type monthly_partition :: %{
name: String.t(),
year: pos_integer(),
month: pos_integer(),
month_start_unix: non_neg_integer(),
month_end_unix: non_neg_integer()
}
@doc """
Lists all `events_*` partitions excluding the default partition.
"""
@spec list_partitions() :: [String.t()]
def list_partitions do
query =
from(table in "pg_tables",
where: table.schemaname == "public",
where: like(table.tablename, "events_%"),
where: table.tablename != "events_default",
select: table.tablename,
order_by: [asc: table.tablename]
)
Repo.all(query)
end
@doc """
Lists monthly event partitions that match `events_YYYY_MM` naming.
"""
@spec list_monthly_partitions() :: [monthly_partition()]
def list_monthly_partitions do
list_partitions()
|> Enum.map(&parse_monthly_partition/1)
|> Enum.reject(&is_nil/1)
|> Enum.sort_by(&{&1.year, &1.month})
end
@doc """
Ensures monthly partitions exist for the current month and `months_ahead` future months.
"""
@spec ensure_monthly_partitions(keyword()) :: :ok | {:error, term()}
def ensure_monthly_partitions(opts \\ []) when is_list(opts) do
months_ahead =
opts
|> Keyword.get(:months_ahead, @default_months_ahead)
|> normalize_non_negative_integer(@default_months_ahead)
reference_date =
opts
|> Keyword.get(:reference_date, Date.utc_today())
|> normalize_reference_date()
reference_month = month_start(reference_date)
offsets =
if months_ahead == 0 do
[0]
else
Enum.to_list(0..months_ahead)
end
Enum.reduce_while(offsets, :ok, fn offset, :ok ->
target_month = shift_month(reference_month, offset)
case create_monthly_partitions(target_month) do
:ok -> {:cont, :ok}
{:error, reason} -> {:halt, {:error, reason}}
end
end)
end
@doc """
Returns the current database size in bytes.
"""
@spec database_size_bytes() :: {:ok, non_neg_integer()} | {:error, term()}
def database_size_bytes do
case Repo.query("SELECT pg_database_size(current_database())") do
{:ok, %{rows: [[size]]}} when is_integer(size) and size >= 0 -> {:ok, size}
{:ok, _result} -> {:error, :unexpected_result}
{:error, reason} -> {:error, reason}
end
end
@doc """
Drops an event partition table by name.
For monthly `events_YYYY_MM` partitions, the matching `event_tags_YYYY_MM`
partition is dropped first to keep partition lifecycle aligned.
"""
@spec drop_partition(String.t()) :: :ok | {:error, term()}
def drop_partition(partition_name) when is_binary(partition_name) do
if protected_partition?(partition_name) do
{:error, :protected_partition}
else
drop_partition_tables(partition_name)
end
end
@doc """
Returns the monthly `events` partition name for a date.
"""
@spec month_partition_name(Date.t()) :: String.t()
def month_partition_name(%Date{} = date) do
monthly_partition_name(@events_partition_prefix, date)
end
@doc """
Returns the monthly `event_tags` partition name for a date.
"""
@spec event_tags_month_partition_name(Date.t()) :: String.t()
def event_tags_month_partition_name(%Date{} = date) do
monthly_partition_name(@event_tags_partition_prefix, date)
end
defp monthly_partition_name(prefix, %Date{} = date) do
month_suffix = date.month |> Integer.to_string() |> String.pad_leading(2, "0")
"#{prefix}_#{date.year}_#{month_suffix}"
end
defp create_monthly_partitions(%Date{} = month_date) do
{start_unix, end_unix} = month_bounds_unix(month_date.year, month_date.month)
case create_monthly_partition(
month_partition_name(month_date),
@events_partition_prefix,
start_unix,
end_unix
) do
:ok ->
create_monthly_partition(
event_tags_month_partition_name(month_date),
@event_tags_partition_prefix,
start_unix,
end_unix
)
{:error, reason} ->
{:error, reason}
end
end
defp create_monthly_partition(partition_name, parent_table_name, start_unix, end_unix) do
quoted_partition_name = quote_identifier!(partition_name)
quoted_parent_table_name = quote_identifier!(parent_table_name)
sql =
"""
CREATE TABLE IF NOT EXISTS #{quoted_partition_name}
PARTITION OF #{quoted_parent_table_name}
FOR VALUES FROM (#{start_unix}) TO (#{end_unix})
"""
case Repo.query(sql) do
{:ok, _result} -> :ok
{:error, reason} -> {:error, reason}
end
end
defp drop_partition_tables(partition_name) do
case parse_monthly_partition(partition_name) do
nil -> drop_table(partition_name)
monthly_partition -> drop_monthly_partition(partition_name, monthly_partition)
end
end
defp drop_monthly_partition(partition_name, %{year: year, month: month}) do
month_date = Date.new!(year, month, 1)
tags_partition_name = monthly_partition_name(@event_tags_partition_prefix, month_date)
with :ok <- maybe_detach_events_partition(partition_name),
:ok <- drop_table(tags_partition_name) do
drop_table(partition_name)
end
end
defp maybe_detach_events_partition(partition_name) do
if attached_partition?(partition_name, @events_partition_prefix) do
quoted_parent_table_name = quote_identifier!(@events_partition_prefix)
quoted_partition_name = quote_identifier!(partition_name)
case Repo.query(
"ALTER TABLE #{quoted_parent_table_name} DETACH PARTITION #{quoted_partition_name}"
) do
{:ok, _result} -> :ok
{:error, reason} -> {:error, reason}
end
else
:ok
end
end
defp attached_partition?(partition_name, parent_table_name) do
query =
"""
SELECT 1
FROM pg_inherits AS inheritance
JOIN pg_class AS child ON child.oid = inheritance.inhrelid
JOIN pg_namespace AS child_ns ON child_ns.oid = child.relnamespace
JOIN pg_class AS parent ON parent.oid = inheritance.inhparent
JOIN pg_namespace AS parent_ns ON parent_ns.oid = parent.relnamespace
WHERE child_ns.nspname = 'public'
AND parent_ns.nspname = 'public'
AND child.relname = $1
AND parent.relname = $2
LIMIT 1
"""
case Repo.query(query, [partition_name, parent_table_name]) do
{:ok, %{rows: [[1]]}} -> true
{:ok, %{rows: []}} -> false
{:ok, _result} -> false
{:error, _reason} -> false
end
end
defp drop_table(table_name) do
quoted_table_name = quote_identifier!(table_name)
case Repo.query("DROP TABLE IF EXISTS #{quoted_table_name}") do
{:ok, _result} -> :ok
{:error, reason} -> {:error, reason}
end
end
defp protected_partition?(partition_name) do
partition_name in ["events", "events_default", "event_tags", "event_tags_default"]
end
defp parse_monthly_partition(partition_name) do
case Regex.run(@monthly_partition_pattern, partition_name, capture: :all_but_first) do
[year_text, month_text] ->
{year, ""} = Integer.parse(year_text)
{month, ""} = Integer.parse(month_text)
if month in 1..12 do
{month_start_unix, month_end_unix} = month_bounds_unix(year, month)
%{
name: partition_name,
year: year,
month: month,
month_start_unix: month_start_unix,
month_end_unix: month_end_unix
}
else
nil
end
_other ->
nil
end
end
defp month_bounds_unix(year, month) do
month_date = Date.new!(year, month, 1)
next_month_date = shift_month(month_date, 1)
{date_to_unix(month_date), date_to_unix(next_month_date)}
end
defp date_to_unix(%Date{} = date) do
date
|> DateTime.new!(~T[00:00:00], "Etc/UTC")
|> DateTime.to_unix()
end
defp month_start(%Date{} = date), do: Date.new!(date.year, date.month, 1)
defp shift_month(%Date{} = date, month_delta) when is_integer(month_delta) do
month_index = date.year * 12 + date.month - 1 + month_delta
shifted_year = div(month_index, 12)
shifted_month = rem(month_index, 12) + 1
Date.new!(shifted_year, shifted_month, 1)
end
defp normalize_reference_date(%Date{} = date), do: date
defp normalize_reference_date(_other), do: Date.utc_today()
defp normalize_non_negative_integer(value, _default) when is_integer(value) and value >= 0,
do: value
defp normalize_non_negative_integer(_value, default), do: default
defp quote_identifier!(identifier) when is_binary(identifier) do
if Regex.match?(@identifier_pattern, identifier) do
~s("#{identifier}")
else
raise ArgumentError, "invalid SQL identifier: #{inspect(identifier)}"
end
end
defp quote_identifier!(identifier) do
raise ArgumentError, "invalid SQL identifier: #{inspect(identifier)}"
end
end

View File

@@ -12,6 +12,8 @@ defmodule Parrhesia.Storage.Supervisor do
@impl true
def init(_init_arg) do
children = [
{Parrhesia.Storage.Adapters.Postgres.ModerationCache,
name: Parrhesia.Storage.Adapters.Postgres.ModerationCache},
Parrhesia.Repo
]

View File

@@ -11,6 +11,13 @@ defmodule Parrhesia.Subscriptions.Index do
alias Parrhesia.Protocol.Filter
@wildcard_key :all
@subscriptions_table_name :parrhesia_subscriptions_table
@kind_index_table_name :parrhesia_subscription_kind_index
@author_index_table_name :parrhesia_subscription_author_index
@tag_index_table_name :parrhesia_subscription_tag_index
@kind_wildcard_table_name :parrhesia_subscription_kind_wildcard_index
@author_wildcard_table_name :parrhesia_subscription_author_wildcard_index
@tag_wildcard_table_name :parrhesia_subscription_tag_wildcard_index
@type subscription_id :: String.t()
@type owner :: pid()
@@ -20,11 +27,12 @@ defmodule Parrhesia.Subscriptions.Index do
@spec start_link(keyword()) :: GenServer.on_start()
def start_link(opts \\ []) do
name = Keyword.get(opts, :name)
init_arg = %{named_tables?: name == __MODULE__}
if is_nil(name) do
GenServer.start_link(__MODULE__, :ok)
GenServer.start_link(__MODULE__, init_arg)
else
GenServer.start_link(__MODULE__, :ok, name: name)
GenServer.start_link(__MODULE__, init_arg, name: name)
end
end
@@ -65,6 +73,13 @@ defmodule Parrhesia.Subscriptions.Index do
end
@spec candidate_subscription_keys(GenServer.server(), map()) :: [subscription_key()]
def candidate_subscription_keys(__MODULE__, event) do
case named_tables() do
{:ok, tables} -> candidate_subscription_keys_for_tables(tables, event)
:error -> GenServer.call(__MODULE__, {:candidate_subscription_keys, event})
end
end
def candidate_subscription_keys(server, event) do
GenServer.call(server, {:candidate_subscription_keys, event})
end
@@ -76,20 +91,15 @@ defmodule Parrhesia.Subscriptions.Index do
end
@impl true
def init(:ok) do
def init(%{named_tables?: named_tables?}) do
tables = create_tables(named_tables?)
{:ok,
%{
subscriptions_table: :ets.new(:subscriptions_table, [:set, :protected]),
kind_index_table: :ets.new(:subscription_kind_index, [:bag, :protected]),
author_index_table: :ets.new(:subscription_author_index, [:bag, :protected]),
tag_index_table: :ets.new(:subscription_tag_index, [:bag, :protected]),
kind_wildcard_table: :ets.new(:subscription_kind_wildcard_index, [:bag, :protected]),
author_wildcard_table: :ets.new(:subscription_author_wildcard_index, [:bag, :protected]),
tag_wildcard_table: :ets.new(:subscription_tag_wildcard_index, [:bag, :protected]),
Map.merge(tables, %{
owner_subscriptions: %{},
owner_monitors: %{},
monitor_owners: %{}
}}
})}
end
@impl true
@@ -128,14 +138,7 @@ defmodule Parrhesia.Subscriptions.Index do
end
def handle_call({:candidate_subscription_keys, event}, _from, state) do
candidates =
state
|> kind_candidates(event)
|> MapSet.intersection(author_candidates(state, event))
|> MapSet.intersection(tag_candidates(state, event))
|> MapSet.to_list()
{:reply, candidates, state}
{:reply, candidate_subscription_keys_for_tables(state, event), state}
end
def handle_call({:fetch_filters, owner_pid, subscription_id}, _from, state) do
@@ -371,28 +374,110 @@ defmodule Parrhesia.Subscriptions.Index do
|> update_in([:owner_subscriptions], &Map.delete(&1, owner_pid))
end
defp kind_candidates(state, event) do
defp create_tables(true) do
%{
subscriptions_table:
:ets.new(@subscriptions_table_name, [
:set,
:protected,
:named_table,
read_concurrency: true
]),
kind_index_table:
:ets.new(@kind_index_table_name, [:bag, :protected, :named_table, read_concurrency: true]),
author_index_table:
:ets.new(@author_index_table_name, [
:bag,
:protected,
:named_table,
read_concurrency: true
]),
tag_index_table:
:ets.new(@tag_index_table_name, [:bag, :protected, :named_table, read_concurrency: true]),
kind_wildcard_table:
:ets.new(@kind_wildcard_table_name, [
:bag,
:protected,
:named_table,
read_concurrency: true
]),
author_wildcard_table:
:ets.new(@author_wildcard_table_name, [
:bag,
:protected,
:named_table,
read_concurrency: true
]),
tag_wildcard_table:
:ets.new(@tag_wildcard_table_name, [
:bag,
:protected,
:named_table,
read_concurrency: true
])
}
end
defp create_tables(false) do
%{
subscriptions_table: :ets.new(:subscriptions_table, [:set, :protected]),
kind_index_table: :ets.new(:subscription_kind_index, [:bag, :protected]),
author_index_table: :ets.new(:subscription_author_index, [:bag, :protected]),
tag_index_table: :ets.new(:subscription_tag_index, [:bag, :protected]),
kind_wildcard_table: :ets.new(:subscription_kind_wildcard_index, [:bag, :protected]),
author_wildcard_table: :ets.new(:subscription_author_wildcard_index, [:bag, :protected]),
tag_wildcard_table: :ets.new(:subscription_tag_wildcard_index, [:bag, :protected])
}
end
defp named_tables do
tables = %{
subscriptions_table: :ets.whereis(@subscriptions_table_name),
kind_index_table: :ets.whereis(@kind_index_table_name),
author_index_table: :ets.whereis(@author_index_table_name),
tag_index_table: :ets.whereis(@tag_index_table_name),
kind_wildcard_table: :ets.whereis(@kind_wildcard_table_name),
author_wildcard_table: :ets.whereis(@author_wildcard_table_name),
tag_wildcard_table: :ets.whereis(@tag_wildcard_table_name)
}
if Enum.any?(tables, fn {_key, table_ref} -> table_ref == :undefined end) do
:error
else
{:ok, tables}
end
end
defp candidate_subscription_keys_for_tables(tables, event) do
tables
|> kind_candidates(event)
|> MapSet.intersection(author_candidates(tables, event))
|> MapSet.intersection(tag_candidates(tables, event))
|> MapSet.to_list()
end
defp kind_candidates(tables, event) do
event
|> Map.get("kind")
|> index_candidates_for_value(state.kind_index_table, state.kind_wildcard_table)
|> index_candidates_for_value(tables.kind_index_table, tables.kind_wildcard_table)
end
defp author_candidates(state, event) do
defp author_candidates(tables, event) do
event
|> Map.get("pubkey")
|> index_candidates_for_value(state.author_index_table, state.author_wildcard_table)
|> index_candidates_for_value(tables.author_index_table, tables.author_wildcard_table)
end
defp tag_candidates(state, event) do
defp tag_candidates(tables, event) do
tag_pairs = event_tag_pairs(Map.get(event, "tags"))
wildcard_candidates = lookup_candidates(state.tag_wildcard_table, @wildcard_key)
wildcard_candidates = lookup_candidates(tables.tag_wildcard_table, @wildcard_key)
if MapSet.size(tag_pairs) == 0 do
wildcard_candidates
else
matched_candidates =
Enum.reduce(tag_pairs, MapSet.new(), fn {tag_name, value}, acc ->
MapSet.union(acc, lookup_candidates(state.tag_index_table, {tag_name, value}))
MapSet.union(acc, lookup_candidates(tables.tag_index_table, {tag_name, value}))
end)
MapSet.union(matched_candidates, wildcard_candidates)

View File

@@ -11,12 +11,26 @@ defmodule Parrhesia.Subscriptions.Supervisor do
@impl true
def init(_init_arg) do
children = [
{Parrhesia.Subscriptions.Index, name: Parrhesia.Subscriptions.Index},
{Parrhesia.Negentropy.Sessions, name: Parrhesia.Negentropy.Sessions},
{Parrhesia.Fanout.MultiNode, name: Parrhesia.Fanout.MultiNode}
]
children =
[
{Parrhesia.Subscriptions.Index, name: Parrhesia.Subscriptions.Index}
] ++
negentropy_children() ++ [{Parrhesia.Fanout.MultiNode, name: Parrhesia.Fanout.MultiNode}]
Supervisor.init(children, strategy: :one_for_one)
end
defp negentropy_children do
if negentropy_enabled?() do
[{Parrhesia.Negentropy.Sessions, name: Parrhesia.Negentropy.Sessions}]
else
[]
end
end
defp negentropy_enabled? do
:parrhesia
|> Application.get_env(:features, [])
|> Keyword.get(:nip_77_negentropy, true)
end
end

View File

@@ -0,0 +1,280 @@
defmodule Parrhesia.Tasks.PartitionRetentionWorker do
@moduledoc """
Periodic worker that ensures monthly event partitions and applies retention pruning.
"""
use GenServer
alias Parrhesia.Storage.Partitions
alias Parrhesia.Telemetry
@default_check_interval_hours 24
@default_months_ahead 2
@default_max_partitions_to_drop_per_run 1
@bytes_per_gib 1_073_741_824
@type monthly_partition :: Partitions.monthly_partition()
@spec start_link(keyword()) :: GenServer.on_start()
def start_link(opts \\ []) do
name = Keyword.get(opts, :name, __MODULE__)
GenServer.start_link(__MODULE__, opts, name: name)
end
@impl true
def init(opts) do
retention_config = Application.get_env(:parrhesia, :retention, [])
state = %{
partition_ops: Keyword.get(opts, :partition_ops, Partitions),
interval_ms: interval_ms(opts, retention_config),
months_ahead: months_ahead(opts, retention_config),
max_db_gib: max_db_gib(opts, retention_config),
max_months_to_keep: max_months_to_keep(opts, retention_config),
max_partitions_to_drop_per_run: max_partitions_to_drop_per_run(opts, retention_config),
today_fun: today_fun(opts)
}
schedule_tick(0)
{:ok, state}
end
@impl true
def handle_info(:tick, state) do
started_at = System.monotonic_time()
{dropped_count, status} =
case run_maintenance(state) do
{:ok, count} -> {count, :ok}
{:error, _reason} -> {0, :error}
end
Telemetry.emit(
[:parrhesia, :maintenance, :partition_retention, :stop],
%{
duration: System.monotonic_time() - started_at,
dropped_partitions: dropped_count
},
%{status: status}
)
schedule_tick(state.interval_ms)
{:noreply, state}
end
def handle_info(_message, state), do: {:noreply, state}
defp run_maintenance(state) do
case state.partition_ops.ensure_monthly_partitions(months_ahead: state.months_ahead) do
:ok -> maybe_drop_oldest_partitions(state)
{:error, reason} -> {:error, reason}
end
end
defp maybe_drop_oldest_partitions(%{max_partitions_to_drop_per_run: max_drops})
when max_drops <= 0,
do: {:ok, 0}
defp maybe_drop_oldest_partitions(state) do
1..state.max_partitions_to_drop_per_run
|> Enum.reduce_while({:ok, 0}, fn _attempt, {:ok, dropped_count} ->
drop_oldest_partition_once(state, dropped_count)
end)
end
defp drop_oldest_partition_once(state, dropped_count) do
case next_partition_to_drop(state) do
{:ok, partition} -> apply_partition_drop(state, partition, dropped_count)
{:error, reason} -> {:halt, {:error, reason}}
end
end
defp apply_partition_drop(_state, nil, dropped_count), do: {:halt, {:ok, dropped_count}}
defp apply_partition_drop(state, partition, dropped_count) do
case state.partition_ops.drop_partition(partition.name) do
:ok -> {:cont, {:ok, dropped_count + 1}}
{:error, reason} -> {:halt, {:error, reason}}
end
end
defp next_partition_to_drop(state) do
partitions = state.partition_ops.list_monthly_partitions()
current_month_index = current_month_index(state.today_fun)
month_limit_candidate =
oldest_partition_exceeding_month_limit(
partitions,
state.max_months_to_keep,
current_month_index
)
with {:ok, size_limit_candidate} <-
oldest_partition_exceeding_size_limit(
partitions,
state.max_db_gib,
current_month_index,
state.partition_ops
) do
{:ok, pick_oldest_partition(month_limit_candidate, size_limit_candidate)}
end
end
defp oldest_partition_exceeding_month_limit(_partitions, :infinity, _current_month_index),
do: nil
defp oldest_partition_exceeding_month_limit(partitions, max_months_to_keep, current_month_index)
when is_integer(max_months_to_keep) and max_months_to_keep > 0 do
oldest_month_to_keep_index = current_month_index - (max_months_to_keep - 1)
partitions
|> Enum.filter(fn partition ->
month_index(partition) < current_month_index and
month_index(partition) < oldest_month_to_keep_index
end)
|> Enum.min_by(&month_index/1, fn -> nil end)
end
defp oldest_partition_exceeding_month_limit(
_partitions,
_max_months_to_keep,
_current_month_index
),
do: nil
defp oldest_partition_exceeding_size_limit(
_partitions,
:infinity,
_current_month_index,
_archiver
),
do: {:ok, nil}
defp oldest_partition_exceeding_size_limit(
partitions,
max_db_gib,
current_month_index,
archiver
)
when is_integer(max_db_gib) and max_db_gib > 0 do
with {:ok, current_size_bytes} <- archiver.database_size_bytes() do
max_size_bytes = max_db_gib * @bytes_per_gib
if current_size_bytes > max_size_bytes do
{:ok, oldest_completed_partition(partitions, current_month_index)}
else
{:ok, nil}
end
end
end
defp oldest_partition_exceeding_size_limit(
_partitions,
_max_db_gib,
_current_month_index,
_archiver
),
do: {:ok, nil}
defp oldest_completed_partition(partitions, current_month_index) do
partitions
|> Enum.filter(&(month_index(&1) < current_month_index))
|> Enum.min_by(&month_index/1, fn -> nil end)
end
defp pick_oldest_partition(nil, nil), do: nil
defp pick_oldest_partition(partition, nil), do: partition
defp pick_oldest_partition(nil, partition), do: partition
defp pick_oldest_partition(left, right) do
if month_index(left) <= month_index(right) do
left
else
right
end
end
defp month_index(%{year: year, month: month}) when is_integer(year) and is_integer(month) do
year * 12 + month
end
defp current_month_index(today_fun) do
today = today_fun.()
today.year * 12 + today.month
end
defp interval_ms(opts, retention_config) do
case Keyword.get(opts, :interval_ms) do
value when is_integer(value) and value > 0 ->
value
_other ->
retention_config
|> Keyword.get(:check_interval_hours, @default_check_interval_hours)
|> normalize_positive_integer(@default_check_interval_hours)
|> hours_to_ms()
end
end
defp months_ahead(opts, retention_config) do
opts
|> Keyword.get(
:months_ahead,
Keyword.get(retention_config, :months_ahead, @default_months_ahead)
)
|> normalize_non_negative_integer(@default_months_ahead)
end
defp max_db_gib(opts, retention_config) do
opts
|> Keyword.get(:max_db_bytes, Keyword.get(retention_config, :max_db_bytes, :infinity))
|> normalize_limit()
end
defp max_months_to_keep(opts, retention_config) do
opts
|> Keyword.get(
:max_months_to_keep,
Keyword.get(retention_config, :max_months_to_keep, :infinity)
)
|> normalize_limit()
end
defp max_partitions_to_drop_per_run(opts, retention_config) do
opts
|> Keyword.get(
:max_partitions_to_drop_per_run,
Keyword.get(
retention_config,
:max_partitions_to_drop_per_run,
@default_max_partitions_to_drop_per_run
)
)
|> normalize_non_negative_integer(@default_max_partitions_to_drop_per_run)
end
defp today_fun(opts) do
case Keyword.get(opts, :today_fun, &Date.utc_today/0) do
function when is_function(function, 0) -> function
_other -> &Date.utc_today/0
end
end
defp normalize_limit(:infinity), do: :infinity
defp normalize_limit(value) when is_integer(value) and value > 0, do: value
defp normalize_limit(_value), do: :infinity
defp normalize_positive_integer(value, _default) when is_integer(value) and value > 0, do: value
defp normalize_positive_integer(_value, default), do: default
defp normalize_non_negative_integer(value, _default) when is_integer(value) and value >= 0,
do: value
defp normalize_non_negative_integer(_value, default), do: default
defp hours_to_ms(hours), do: hours * 60 * 60 * 1000
defp schedule_tick(interval_ms) do
Process.send_after(self(), :tick, interval_ms)
end
end

View File

@@ -11,13 +11,22 @@ defmodule Parrhesia.Tasks.Supervisor do
@impl true
def init(_init_arg) do
children =
children = expiration_children() ++ partition_retention_children()
Supervisor.init(children, strategy: :one_for_one)
end
defp expiration_children do
if Application.get_env(:parrhesia, :enable_expiration_worker, true) do
[{Parrhesia.Tasks.ExpirationWorker, name: Parrhesia.Tasks.ExpirationWorker}]
else
[]
end
end
Supervisor.init(children, strategy: :one_for_one)
defp partition_retention_children do
[
{Parrhesia.Tasks.PartitionRetentionWorker, name: Parrhesia.Tasks.PartitionRetentionWorker}
]
end
end

View File

@@ -0,0 +1,52 @@
defmodule Parrhesia.TestSupport.PartitionRetentionStubPartitions do
@moduledoc false
use Agent
@spec start_link(keyword()) :: Agent.on_start()
def start_link(opts \\ []) do
name = Keyword.get(opts, :name, __MODULE__)
initial_state = %{
partitions: Keyword.get(opts, :partitions, []),
db_size_bytes: Keyword.get(opts, :db_size_bytes, 0),
test_pid: Keyword.get(opts, :test_pid)
}
Agent.start_link(fn -> initial_state end, name: name)
end
@spec ensure_monthly_partitions(keyword()) :: :ok
def ensure_monthly_partitions(opts \\ []) do
notify({:ensure_monthly_partitions, opts})
:ok
end
@spec list_monthly_partitions() :: [map()]
def list_monthly_partitions do
Agent.get(__MODULE__, & &1.partitions)
end
@spec database_size_bytes() :: {:ok, non_neg_integer()}
def database_size_bytes do
notify(:database_size_bytes)
{:ok, Agent.get(__MODULE__, & &1.db_size_bytes)}
end
@spec drop_partition(String.t()) :: :ok
def drop_partition(partition_name) when is_binary(partition_name) do
Agent.update(__MODULE__, fn state ->
%{state | partitions: Enum.reject(state.partitions, &(&1.name == partition_name))}
end)
notify({:drop_partition, partition_name})
:ok
end
defp notify(message) do
case Agent.get(__MODULE__, & &1.test_pid) do
pid when is_pid(pid) -> send(pid, message)
_other -> :ok
end
end
end

View File

@@ -20,7 +20,13 @@ defmodule Parrhesia.Web.Connection do
@default_max_outbound_queue 256
@default_outbound_drain_batch_size 64
@default_outbound_overflow_strategy :close
@default_max_frame_bytes 1_048_576
@default_max_event_bytes 262_144
@default_event_ingest_rate_limit 120
@default_event_ingest_window_seconds 1
@default_auth_max_age_seconds 600
@drain_outbound_queue :drain_outbound_queue
@post_ack_ingest :post_ack_ingest
@outbound_queue_pressure_threshold 0.75
@marmot_kinds MapSet.new([
@@ -42,13 +48,21 @@ defmodule Parrhesia.Web.Connection do
subscription_index: Index,
auth_challenges: Challenges,
auth_challenge: nil,
relay_url: nil,
negentropy_sessions: Sessions,
outbound_queue: :queue.new(),
outbound_queue_size: 0,
max_outbound_queue: @default_max_outbound_queue,
outbound_overflow_strategy: @default_outbound_overflow_strategy,
outbound_drain_batch_size: @default_outbound_drain_batch_size,
drain_scheduled?: false
drain_scheduled?: false,
max_frame_bytes: @default_max_frame_bytes,
max_event_bytes: @default_max_event_bytes,
max_event_ingest_per_window: @default_event_ingest_rate_limit,
event_ingest_window_seconds: @default_event_ingest_window_seconds,
event_ingest_window_started_at_ms: 0,
event_ingest_count: 0,
auth_max_age_seconds: @default_auth_max_age_seconds
@type overflow_strategy :: :close | :drop_oldest | :drop_newest
@@ -64,13 +78,21 @@ defmodule Parrhesia.Web.Connection do
subscription_index: GenServer.server() | nil,
auth_challenges: GenServer.server() | nil,
auth_challenge: String.t() | nil,
relay_url: String.t() | nil,
negentropy_sessions: GenServer.server() | nil,
outbound_queue: :queue.queue({String.t(), map()}),
outbound_queue_size: non_neg_integer(),
max_outbound_queue: pos_integer(),
outbound_overflow_strategy: overflow_strategy(),
outbound_drain_batch_size: pos_integer(),
drain_scheduled?: boolean()
drain_scheduled?: boolean(),
max_frame_bytes: pos_integer(),
max_event_bytes: pos_integer(),
max_event_ingest_per_window: pos_integer(),
event_ingest_window_seconds: pos_integer(),
event_ingest_window_started_at_ms: integer(),
event_ingest_count: non_neg_integer(),
auth_max_age_seconds: pos_integer()
}
@impl true
@@ -82,10 +104,17 @@ defmodule Parrhesia.Web.Connection do
subscription_index: subscription_index(opts),
auth_challenges: auth_challenges,
auth_challenge: maybe_issue_auth_challenge(auth_challenges),
relay_url: relay_url(opts),
negentropy_sessions: negentropy_sessions(opts),
max_outbound_queue: max_outbound_queue(opts),
outbound_overflow_strategy: outbound_overflow_strategy(opts),
outbound_drain_batch_size: outbound_drain_batch_size(opts)
outbound_drain_batch_size: outbound_drain_batch_size(opts),
max_frame_bytes: max_frame_bytes(opts),
max_event_bytes: max_event_bytes(opts),
max_event_ingest_per_window: max_event_ingest_per_window(opts),
event_ingest_window_seconds: event_ingest_window_seconds(opts),
event_ingest_window_started_at_ms: System.monotonic_time(:millisecond),
auth_max_age_seconds: auth_max_age_seconds(opts)
}
{:ok, state}
@@ -93,6 +122,15 @@ defmodule Parrhesia.Web.Connection do
@impl true
def handle_in({payload, [opcode: :text]}, %__MODULE__{} = state) do
if byte_size(payload) > state.max_frame_bytes do
response =
Protocol.encode_relay({
:notice,
"invalid: websocket frame exceeds max frame size"
})
{:push, {:text, response}, state}
else
case Protocol.decode_client(payload) do
{:ok, decoded_message} ->
handle_decoded_message(decoded_message, state)
@@ -102,6 +140,7 @@ defmodule Parrhesia.Web.Connection do
{:push, {:text, response}, state}
end
end
end
@impl true
def handle_in({_payload, [opcode: :binary]}, %__MODULE__{} = state) do
@@ -155,6 +194,12 @@ defmodule Parrhesia.Web.Connection do
handle_fanout_events(state, fanout_events)
end
def handle_info({@post_ack_ingest, event}, %__MODULE__{} = state) when is_map(event) do
fanout_event(event)
maybe_publish_multi_node(event)
{:ok, state}
end
def handle_info(@drain_outbound_queue, %__MODULE__{} = state) do
{frames, next_state} = drain_outbound_frames(state)
@@ -180,23 +225,53 @@ defmodule Parrhesia.Web.Connection do
started_at = System.monotonic_time()
event_id = Map.get(event, "id", "")
with :ok <- Protocol.validate_event(event),
:ok <- EventPolicy.authorize_write(event, state.authenticated_pubkeys),
case maybe_allow_event_ingest(state) do
{:ok, next_state} ->
result =
with :ok <- validate_event_payload_size(event, next_state.max_event_bytes),
:ok <- Protocol.validate_event(event),
:ok <- EventPolicy.authorize_write(event, next_state.authenticated_pubkeys),
:ok <- maybe_process_group_event(event),
{:ok, _result, message} <- persist_event(event) do
{:ok, message}
end
handle_event_ingest_result(result, next_state, event, event_id, started_at)
{:error, reason} ->
ingest_error_response(state, event_id, reason)
end
end
defp handle_event_ingest_result(
{:ok, message},
%__MODULE__{} = state,
event,
event_id,
started_at
) do
Telemetry.emit(
[:parrhesia, :ingest, :stop],
%{duration: System.monotonic_time() - started_at},
telemetry_metadata_for_event(event)
)
fanout_event(event)
maybe_publish_multi_node(event)
send(self(), {@post_ack_ingest, event})
response = Protocol.encode_relay({:ok, event_id, true, message})
{:push, {:text, response}, state}
else
{:error, reason} ->
end
defp handle_event_ingest_result(
{:error, reason},
%__MODULE__{} = state,
_event,
event_id,
_started_at
),
do: ingest_error_response(state, event_id, reason)
defp ingest_error_response(%__MODULE__{} = state, event_id, reason) do
message = error_message_for_ingest_failure(reason)
response = Protocol.encode_relay({:ok, event_id, false, message})
@@ -206,7 +281,6 @@ defmodule Parrhesia.Web.Connection do
{:push, {:text, response}, state}
end
end
end
defp handle_req(%__MODULE__{} = state, subscription_id, filters) do
started_at = System.monotonic_time()
@@ -353,7 +427,7 @@ defmodule Parrhesia.Web.Connection do
event_id = Map.get(auth_event, "id", "")
with :ok <- Protocol.validate_event(auth_event),
:ok <- validate_auth_event(auth_event),
:ok <- validate_auth_event(state, auth_event),
:ok <- validate_auth_challenge(state, auth_event) do
pubkey = Map.get(auth_event, "pubkey")
@@ -412,29 +486,55 @@ defmodule Parrhesia.Web.Connection do
end
defp persist_event(event) do
case Map.get(event, "kind") do
5 ->
kind = Map.get(event, "kind")
cond do
kind in [5, 62] -> persist_control_event(kind, event)
ephemeral_kind?(kind) -> persist_ephemeral_event()
true -> persist_regular_event(event)
end
end
defp persist_control_event(5, event) do
with {:ok, deleted_count} <- Storage.events().delete_by_request(%{}, event) do
{:ok, deleted_count, "ok: deletion request processed"}
end
end
62 ->
defp persist_control_event(62, event) do
with {:ok, deleted_count} <- Storage.events().vanish(%{}, event) do
{:ok, deleted_count, "ok: vanish request processed"}
end
end
_other ->
defp persist_ephemeral_event do
if accept_ephemeral_events?() do
{:ok, :ephemeral, "ok: ephemeral event accepted"}
else
{:error, :ephemeral_events_disabled}
end
end
defp persist_regular_event(event) do
case Storage.events().put_event(%{}, event) do
{:ok, persisted_event} -> {:ok, persisted_event, "ok: event stored"}
{:error, :duplicate_event} -> {:error, :duplicate_event}
{:error, reason} -> {:error, reason}
end
end
end
defp error_message_for_ingest_failure(:duplicate_event),
do: "duplicate: event already stored"
defp error_message_for_ingest_failure(:event_rate_limited),
do: "rate-limited: too many EVENT messages"
defp error_message_for_ingest_failure(:event_too_large),
do: "invalid: event exceeds max event size"
defp error_message_for_ingest_failure(:ephemeral_events_disabled),
do: "blocked: ephemeral events are disabled"
defp error_message_for_ingest_failure(reason)
when reason in [
:auth_required,
@@ -564,7 +664,7 @@ defmodule Parrhesia.Web.Connection do
with_auth_challenge_frame(state, {:push, {:text, response}, state})
end
defp validate_auth_event(%{"kind" => 22_242} = auth_event) do
defp validate_auth_event(%__MODULE__{} = state, %{"kind" => 22_242} = auth_event) do
tags = Map.get(auth_event, "tags", [])
challenge_tag? =
@@ -573,10 +673,13 @@ defmodule Parrhesia.Web.Connection do
_tag -> false
end)
if challenge_tag?, do: :ok, else: {:error, :missing_challenge_tag}
with :ok <- maybe_validate(challenge_tag?, :missing_challenge_tag),
:ok <- validate_auth_relay_tag(state, tags) do
validate_auth_created_at_freshness(auth_event, state.auth_max_age_seconds)
end
end
defp validate_auth_event(_auth_event), do: {:error, :invalid_auth_kind}
defp validate_auth_event(_state, _auth_event), do: {:error, :invalid_auth_kind}
defp validate_auth_challenge(%__MODULE__{auth_challenge: nil}, _auth_event),
do: {:error, :missing_challenge}
@@ -593,8 +696,45 @@ defmodule Parrhesia.Web.Connection do
if challenge_tag_matches?, do: :ok, else: {:error, :challenge_mismatch}
end
defp validate_auth_relay_tag(%__MODULE__{relay_url: relay_url}, tags)
when is_binary(relay_url) do
relay_tag_matches? =
Enum.any?(tags, fn
["relay", ^relay_url | _rest] -> true
_tag -> false
end)
if relay_tag_matches?, do: :ok, else: {:error, :invalid_relay_tag}
end
defp validate_auth_relay_tag(%__MODULE__{relay_url: nil}, _tags),
do: {:error, :missing_relay_configuration}
defp validate_auth_created_at_freshness(auth_event, max_age_seconds)
when is_integer(max_age_seconds) and max_age_seconds > 0 do
created_at = Map.get(auth_event, "created_at", -1)
now = System.system_time(:second)
if created_at >= now - max_age_seconds do
:ok
else
{:error, :auth_event_too_old}
end
end
defp validate_auth_created_at_freshness(_auth_event, _max_age_seconds), do: :ok
defp maybe_validate(true, _reason), do: :ok
defp maybe_validate(false, reason), do: {:error, reason}
defp auth_error_message(:invalid_auth_kind), do: "invalid: AUTH event kind must be 22242"
defp auth_error_message(:missing_challenge_tag), do: "invalid: AUTH event missing challenge tag"
defp auth_error_message(:invalid_relay_tag), do: "invalid: AUTH relay tag mismatch"
defp auth_error_message(:missing_relay_configuration),
do: "invalid: relay URL is not configured"
defp auth_error_message(:auth_event_too_old), do: "invalid: AUTH event is too old"
defp auth_error_message(:challenge_mismatch), do: "invalid: AUTH challenge mismatch"
defp auth_error_message(:missing_challenge), do: "invalid: AUTH challenge unavailable"
defp auth_error_message(reason) when is_binary(reason), do: reason
@@ -1015,17 +1155,31 @@ defmodule Parrhesia.Web.Connection do
defp negentropy_sessions(opts) when is_list(opts) do
opts
|> Keyword.get(:negentropy_sessions, Sessions)
|> Keyword.get(:negentropy_sessions, configured_negentropy_sessions())
|> normalize_server_ref()
end
defp negentropy_sessions(opts) when is_map(opts) do
opts
|> Map.get(:negentropy_sessions, Sessions)
|> Map.get(:negentropy_sessions, configured_negentropy_sessions())
|> normalize_server_ref()
end
defp negentropy_sessions(_opts), do: Sessions
defp negentropy_sessions(_opts), do: configured_negentropy_sessions()
defp configured_negentropy_sessions do
if negentropy_enabled?() do
Sessions
else
nil
end
end
defp negentropy_enabled? do
:parrhesia
|> Application.get_env(:features, [])
|> Keyword.get(:nip_77_negentropy, true)
end
defp normalize_server_ref(server_ref) when is_pid(server_ref) or is_atom(server_ref),
do: server_ref
@@ -1131,4 +1285,200 @@ defmodule Parrhesia.Web.Connection do
|> Application.get_env(:limits, [])
|> Keyword.get(:outbound_overflow_strategy, @default_outbound_overflow_strategy)
end
defp relay_url(opts) when is_list(opts) do
opts
|> Keyword.get(:relay_url)
|> normalize_relay_url()
|> maybe_default_relay_url()
end
defp relay_url(opts) when is_map(opts) do
opts
|> Map.get(:relay_url)
|> normalize_relay_url()
|> maybe_default_relay_url()
end
defp relay_url(_opts), do: configured_relay_url()
defp normalize_relay_url(relay_url) when is_binary(relay_url) and relay_url != "", do: relay_url
defp normalize_relay_url(_relay_url), do: nil
defp maybe_default_relay_url(nil), do: configured_relay_url()
defp maybe_default_relay_url(relay_url), do: relay_url
defp configured_relay_url do
:parrhesia
|> Application.get_env(:relay_url)
|> normalize_relay_url()
end
defp max_frame_bytes(opts) when is_list(opts) do
opts
|> Keyword.get(:max_frame_bytes)
|> normalize_max_frame_bytes()
end
defp max_frame_bytes(opts) when is_map(opts) do
opts
|> Map.get(:max_frame_bytes)
|> normalize_max_frame_bytes()
end
defp max_frame_bytes(_opts), do: configured_max_frame_bytes()
defp normalize_max_frame_bytes(value) when is_integer(value) and value > 0, do: value
defp normalize_max_frame_bytes(_value), do: configured_max_frame_bytes()
defp configured_max_frame_bytes do
:parrhesia
|> Application.get_env(:limits, [])
|> Keyword.get(:max_frame_bytes, @default_max_frame_bytes)
end
defp max_event_bytes(opts) when is_list(opts) do
opts
|> Keyword.get(:max_event_bytes)
|> normalize_max_event_bytes()
end
defp max_event_bytes(opts) when is_map(opts) do
opts
|> Map.get(:max_event_bytes)
|> normalize_max_event_bytes()
end
defp max_event_bytes(_opts), do: configured_max_event_bytes()
defp normalize_max_event_bytes(value) when is_integer(value) and value > 0, do: value
defp normalize_max_event_bytes(_value), do: configured_max_event_bytes()
defp configured_max_event_bytes do
:parrhesia
|> Application.get_env(:limits, [])
|> Keyword.get(:max_event_bytes, @default_max_event_bytes)
end
defp max_event_ingest_per_window(opts) when is_list(opts) do
opts
|> Keyword.get(:max_event_ingest_per_window)
|> normalize_max_event_ingest_per_window()
end
defp max_event_ingest_per_window(opts) when is_map(opts) do
opts
|> Map.get(:max_event_ingest_per_window)
|> normalize_max_event_ingest_per_window()
end
defp max_event_ingest_per_window(_opts), do: configured_max_event_ingest_per_window()
defp normalize_max_event_ingest_per_window(value) when is_integer(value) and value > 0,
do: value
defp normalize_max_event_ingest_per_window(_value),
do: configured_max_event_ingest_per_window()
defp configured_max_event_ingest_per_window do
:parrhesia
|> Application.get_env(:limits, [])
|> Keyword.get(:max_event_ingest_per_window, @default_event_ingest_rate_limit)
end
defp event_ingest_window_seconds(opts) when is_list(opts) do
opts
|> Keyword.get(:event_ingest_window_seconds)
|> normalize_event_ingest_window_seconds()
end
defp event_ingest_window_seconds(opts) when is_map(opts) do
opts
|> Map.get(:event_ingest_window_seconds)
|> normalize_event_ingest_window_seconds()
end
defp event_ingest_window_seconds(_opts), do: configured_event_ingest_window_seconds()
defp normalize_event_ingest_window_seconds(value) when is_integer(value) and value > 0,
do: value
defp normalize_event_ingest_window_seconds(_value), do: configured_event_ingest_window_seconds()
defp configured_event_ingest_window_seconds do
:parrhesia
|> Application.get_env(:limits, [])
|> Keyword.get(:event_ingest_window_seconds, @default_event_ingest_window_seconds)
end
defp auth_max_age_seconds(opts) when is_list(opts) do
opts
|> Keyword.get(:auth_max_age_seconds)
|> normalize_auth_max_age_seconds()
end
defp auth_max_age_seconds(opts) when is_map(opts) do
opts
|> Map.get(:auth_max_age_seconds)
|> normalize_auth_max_age_seconds()
end
defp auth_max_age_seconds(_opts), do: configured_auth_max_age_seconds()
defp normalize_auth_max_age_seconds(value) when is_integer(value) and value > 0, do: value
defp normalize_auth_max_age_seconds(_value), do: configured_auth_max_age_seconds()
defp configured_auth_max_age_seconds do
:parrhesia
|> Application.get_env(:limits, [])
|> Keyword.get(:auth_max_age_seconds, @default_auth_max_age_seconds)
end
defp maybe_allow_event_ingest(
%__MODULE__{
event_ingest_window_started_at_ms: window_started_at_ms,
event_ingest_window_seconds: window_seconds,
event_ingest_count: count,
max_event_ingest_per_window: max_event_ingest_per_window
} = state
) do
now_ms = System.monotonic_time(:millisecond)
window_ms = window_seconds * 1000
cond do
now_ms - window_started_at_ms >= window_ms ->
{:ok,
%__MODULE__{
state
| event_ingest_window_started_at_ms: now_ms,
event_ingest_count: 1
}}
count < max_event_ingest_per_window ->
{:ok, %__MODULE__{state | event_ingest_count: count + 1}}
true ->
{:error, :event_rate_limited}
end
end
defp validate_event_payload_size(event, max_event_bytes)
when is_map(event) and is_integer(max_event_bytes) and max_event_bytes > 0 do
if byte_size(JSON.encode!(event)) <= max_event_bytes do
:ok
else
{:error, :event_too_large}
end
end
defp validate_event_payload_size(_event, _max_event_bytes), do: :ok
defp ephemeral_kind?(kind) when is_integer(kind), do: kind >= 20_000 and kind < 30_000
defp ephemeral_kind?(_kind), do: false
defp accept_ephemeral_events? do
:parrhesia
|> Application.get_env(:policies, [])
|> Keyword.get(:accept_ephemeral_events, true)
end
end

View File

@@ -0,0 +1,28 @@
defmodule Parrhesia.Web.Metrics do
@moduledoc false
import Plug.Conn
alias Parrhesia.Telemetry
alias Parrhesia.Web.MetricsAccess
@spec enabled_on_main_endpoint?() :: boolean()
def enabled_on_main_endpoint? do
:parrhesia
|> Application.get_env(:metrics, [])
|> Keyword.get(:enabled_on_main_endpoint, true)
end
@spec handle(Plug.Conn.t()) :: Plug.Conn.t()
def handle(conn) do
if MetricsAccess.allowed?(conn) do
body = TelemetryMetricsPrometheus.Core.scrape(Telemetry.prometheus_reporter())
conn
|> put_resp_content_type("text/plain")
|> send_resp(200, body)
else
send_resp(conn, 403, "forbidden")
end
end
end

View File

@@ -0,0 +1,138 @@
defmodule Parrhesia.Web.MetricsAccess do
@moduledoc false
import Plug.Conn
import Bitwise
@private_cidrs [
"127.0.0.0/8",
"10.0.0.0/8",
"172.16.0.0/12",
"192.168.0.0/16",
"169.254.0.0/16",
"::1/128",
"fc00::/7",
"fe80::/10"
]
@spec allowed?(Plug.Conn.t()) :: boolean()
def allowed?(conn) do
if metrics_public?() do
true
else
token_allowed?(conn) and network_allowed?(conn)
end
end
defp token_allowed?(conn) do
case configured_auth_token() do
nil ->
true
token ->
provided_token(conn) == token
end
end
defp provided_token(conn) do
conn
|> get_req_header("authorization")
|> List.first()
|> normalize_authorization_header()
end
defp normalize_authorization_header("Bearer " <> token), do: token
defp normalize_authorization_header(token) when is_binary(token), do: token
defp normalize_authorization_header(_header), do: nil
defp network_allowed?(conn) do
remote_ip = conn.remote_ip
cond do
configured_allowed_cidrs() != [] ->
Enum.any?(configured_allowed_cidrs(), &ip_in_cidr?(remote_ip, &1))
metrics_private_networks_only?() ->
Enum.any?(@private_cidrs, &ip_in_cidr?(remote_ip, &1))
true ->
true
end
end
defp ip_in_cidr?(ip, cidr) do
with {network, prefix_len} <- parse_cidr(cidr),
{:ok, ip_size, ip_value} <- ip_to_int(ip),
{:ok, network_size, network_value} <- ip_to_int(network),
true <- ip_size == network_size,
true <- prefix_len >= 0,
true <- prefix_len <= ip_size do
mask = network_mask(ip_size, prefix_len)
(ip_value &&& mask) == (network_value &&& mask)
else
_other -> false
end
end
defp parse_cidr(cidr) when is_binary(cidr) do
case String.split(cidr, "/", parts: 2) do
[address, prefix_str] ->
with {prefix_len, ""} <- Integer.parse(prefix_str),
{:ok, ip} <- :inet.parse_address(String.to_charlist(address)) do
{ip, prefix_len}
else
_other -> :error
end
_other ->
:error
end
end
defp parse_cidr(_cidr), do: :error
defp ip_to_int({a, b, c, d}) do
{:ok, 32, (a <<< 24) + (b <<< 16) + (c <<< 8) + d}
end
defp ip_to_int({a, b, c, d, e, f, g, h}) do
{:ok, 128,
(a <<< 112) + (b <<< 96) + (c <<< 80) + (d <<< 64) + (e <<< 48) + (f <<< 32) + (g <<< 16) +
h}
end
defp ip_to_int(_ip), do: :error
defp network_mask(_size, 0), do: 0
defp network_mask(size, prefix_len) do
all_ones = (1 <<< size) - 1
all_ones <<< (size - prefix_len)
end
defp configured_allowed_cidrs do
:parrhesia
|> Application.get_env(:metrics, [])
|> Keyword.get(:allowed_cidrs, [])
|> Enum.filter(&is_binary/1)
end
defp configured_auth_token do
case :parrhesia |> Application.get_env(:metrics, []) |> Keyword.get(:auth_token) do
token when is_binary(token) and token != "" -> token
_other -> nil
end
end
defp metrics_public? do
:parrhesia
|> Application.get_env(:metrics, [])
|> Keyword.get(:public, false)
end
defp metrics_private_networks_only? do
:parrhesia
|> Application.get_env(:metrics, [])
|> Keyword.get(:private_networks_only, true)
end
end

View File

@@ -0,0 +1,34 @@
defmodule Parrhesia.Web.MetricsEndpoint do
@moduledoc """
Optional dedicated HTTP listener for Prometheus metrics scraping.
"""
use Supervisor
def start_link(init_arg \\ []) do
Supervisor.start_link(__MODULE__, init_arg, name: __MODULE__)
end
@impl true
def init(init_arg) do
options = bandit_options(init_arg)
children =
if Keyword.get(options, :enabled, false) do
[{Bandit, Keyword.delete(options, :enabled)}]
else
[]
end
Supervisor.init(children, strategy: :one_for_one)
end
defp bandit_options(overrides) do
configured = Application.get_env(:parrhesia, __MODULE__, [])
configured
|> Keyword.merge(overrides)
|> Keyword.put_new(:scheme, :http)
|> Keyword.put_new(:plug, Parrhesia.Web.MetricsRouter)
end
end

View File

@@ -0,0 +1,18 @@
defmodule Parrhesia.Web.MetricsRouter do
@moduledoc false
use Plug.Router
alias Parrhesia.Web.Metrics
plug(:match)
plug(:dispatch)
get "/metrics" do
Metrics.handle(conn)
end
match _ do
send_resp(conn, 404, "not found")
end
end

View File

@@ -5,10 +5,24 @@ defmodule Parrhesia.Web.Readiness do
def ready? do
process_ready?(Parrhesia.Subscriptions.Index) and
process_ready?(Parrhesia.Auth.Challenges) and
process_ready?(Parrhesia.Negentropy.Sessions) and
negentropy_ready?() and
process_ready?(Parrhesia.Repo)
end
defp negentropy_ready? do
if negentropy_enabled?() do
process_ready?(Parrhesia.Negentropy.Sessions)
else
true
end
end
defp negentropy_enabled? do
:parrhesia
|> Application.get_env(:features, [])
|> Keyword.get(:nip_77_negentropy, true)
end
defp process_ready?(name) do
case Process.whereis(name) do
pid when is_pid(pid) -> true

View File

@@ -17,26 +17,16 @@ defmodule Parrhesia.Web.RelayInfo do
end
defp supported_nips do
[
1,
9,
11,
13,
17,
40,
42,
43,
44,
45,
50,
59,
62,
66,
70,
77,
86,
98
]
base = [1, 9, 11, 13, 17, 40, 42, 43, 44, 45, 50, 59, 62, 66, 70]
with_negentropy =
if negentropy_enabled?() do
base ++ [77]
else
base
end
with_negentropy ++ [86, 98]
end
defp limitations do
@@ -48,4 +38,10 @@ defmodule Parrhesia.Web.RelayInfo do
"auth_required" => Parrhesia.Config.get([:policies, :auth_required_for_reads], false)
}
end
defp negentropy_enabled? do
:parrhesia
|> Application.get_env(:features, [])
|> Keyword.get(:nip_77_negentropy, true)
end
end

View File

@@ -3,8 +3,8 @@ defmodule Parrhesia.Web.Router do
use Plug.Router
alias Parrhesia.Telemetry
alias Parrhesia.Web.Management
alias Parrhesia.Web.Metrics
alias Parrhesia.Web.Readiness
alias Parrhesia.Web.RelayInfo
@@ -30,11 +30,11 @@ defmodule Parrhesia.Web.Router do
end
get "/metrics" do
body = TelemetryMetricsPrometheus.Core.scrape(Telemetry.prometheus_reporter())
conn
|> put_resp_content_type("text/plain")
|> send_resp(200, body)
if Metrics.enabled_on_main_endpoint?() do
Metrics.handle(conn)
else
send_resp(conn, 404, "not found")
end
end
post "/management" do
@@ -50,7 +50,12 @@ defmodule Parrhesia.Web.Router do
|> send_resp(200, body)
else
conn
|> WebSockAdapter.upgrade(Parrhesia.Web.Connection, %{}, timeout: 60_000)
|> WebSockAdapter.upgrade(
Parrhesia.Web.Connection,
%{relay_url: relay_url(conn)},
timeout: 60_000,
max_frame_size: max_frame_bytes()
)
|> halt()
end
end
@@ -64,4 +69,25 @@ defmodule Parrhesia.Web.Router do
|> get_req_header("accept")
|> Enum.any?(&String.contains?(&1, "application/nostr+json"))
end
defp relay_url(conn) do
ws_scheme = if conn.scheme == :https, do: "wss", else: "ws"
port_segment =
if default_http_port?(conn.scheme, conn.port) do
""
else
":#{conn.port}"
end
"#{ws_scheme}://#{conn.host}#{port_segment}#{conn.request_path}"
end
defp default_http_port?(:http, 80), do: true
defp default_http_port?(:https, 443), do: true
defp default_http_port?(_scheme, _port), do: false
defp max_frame_bytes do
Parrhesia.Config.get([:limits, :max_frame_bytes], 1_048_576)
end
end

View File

@@ -4,8 +4,8 @@ defmodule Parrhesia.MixProject do
def project do
[
app: :parrhesia,
version: "0.2.0",
elixir: "~> 1.19",
version: "0.4.0",
elixir: "~> 1.18",
start_permanent: Mix.env() == :prod,
deps: deps(),
aliases: aliases()
@@ -31,6 +31,7 @@ defmodule Parrhesia.MixProject do
{:bandit, "~> 1.5"},
{:plug, "~> 1.15"},
{:websock_adapter, "~> 0.5"},
{:lib_secp256k1, "~> 0.7"},
# Runtime: storage adapter (Postgres first)
{:ecto_sql, "~> 3.12"},
@@ -43,8 +44,6 @@ defmodule Parrhesia.MixProject do
# Test tooling
{:stream_data, "~> 1.0", only: :test},
{:mox, "~> 1.1", only: :test},
{:bypass, "~> 2.1", only: :test},
{:websockex, "~> 0.4", only: :test},
# Project tooling
@@ -71,7 +70,7 @@ defmodule Parrhesia.MixProject do
"credo --strict --all",
"deps.unlock --unused",
"test",
"test.nak_e2e",
# "test.nak_e2e",
"test.marmot_e2e"
]
]

View File

@@ -1,7 +1,6 @@
%{
"bandit": {:hex, :bandit, "1.10.3", "1e5d168fa79ec8de2860d1b4d878d97d4fbbe2fdbe7b0a7d9315a4359d1d4bb9", [:mix], [{:hpax, "~> 1.0", [hex: :hpax, repo: "hexpm", optional: false]}, {:plug, "~> 1.18", [hex: :plug, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}, {:thousand_island, "~> 1.0", [hex: :thousand_island, repo: "hexpm", optional: false]}, {:websock, "~> 0.5", [hex: :websock, repo: "hexpm", optional: false]}], "hexpm", "99a52d909c48db65ca598e1962797659e3c0f1d06e825a50c3d75b74a5e2db18"},
"bunt": {:hex, :bunt, "1.0.0", "081c2c665f086849e6d57900292b3a161727ab40431219529f13c4ddcf3e7a44", [:mix], [], "hexpm", "dc5f86aa08a5f6fa6b8096f0735c4e76d54ae5c9fa2c143e5a1fc7c1cd9bb6b5"},
"bypass": {:hex, :bypass, "2.1.0", "909782781bf8e20ee86a9cabde36b259d44af8b9f38756173e8f5e2e1fabb9b1", [:mix], [{:plug, "~> 1.7", [hex: :plug, repo: "hexpm", optional: false]}, {:plug_cowboy, "~> 2.0", [hex: :plug_cowboy, repo: "hexpm", optional: false]}, {:ranch, "~> 1.3", [hex: :ranch, repo: "hexpm", optional: false]}], "hexpm", "d9b5df8fa5b7a6efa08384e9bbecfe4ce61c77d28a4282f79e02f1ef78d96b80"},
"cowboy": {:hex, :cowboy, "2.14.2", "4008be1df6ade45e4f2a4e9e2d22b36d0b5aba4e20b0a0d7049e28d124e34847", [:make, :rebar3], [{:cowlib, ">= 2.16.0 and < 3.0.0", [hex: :cowlib, repo: "hexpm", optional: false]}, {:ranch, ">= 1.8.0 and < 3.0.0", [hex: :ranch, repo: "hexpm", optional: false]}], "hexpm", "569081da046e7b41b5df36aa359be71a0c8874e5b9cff6f747073fc57baf1ab9"},
"cowboy_telemetry": {:hex, :cowboy_telemetry, "0.4.0", "f239f68b588efa7707abce16a84d0d2acf3a0f50571f8bb7f56a15865aae820c", [:rebar3], [{:cowboy, "~> 2.7", [hex: :cowboy, repo: "hexpm", optional: false]}, {:telemetry, "~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "7d98bac1ee4565d31b62d59f8823dfd8356a169e7fcbb83831b8a5397404c9de"},
"cowlib": {:hex, :cowlib, "2.16.0", "54592074ebbbb92ee4746c8a8846e5605052f29309d3a873468d76cdf932076f", [:make, :rebar3], [], "hexpm", "7f478d80d66b747344f0ea7708c187645cfcc08b11aa424632f78e25bf05db51"},
@@ -11,24 +10,24 @@
"deps_changelog": {:hex, :deps_changelog, "0.3.5", "65981997d9bc893b8027a0c03da093a4083328c00b17f562df269c2b61d44073", [:mix], [], "hexpm", "298fcd7794395d8e61dba8d29ce8fcee09f1df4d48adb273a41e8f4a1736491e"},
"ecto": {:hex, :ecto, "3.13.5", "9d4a69700183f33bf97208294768e561f5c7f1ecf417e0fa1006e4a91713a834", [:mix], [{:decimal, "~> 2.0", [hex: :decimal, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "df9efebf70cf94142739ba357499661ef5dbb559ef902b68ea1f3c1fabce36de"},
"ecto_sql": {:hex, :ecto_sql, "3.13.5", "2f8282b2ad97bf0f0d3217ea0a6fff320ead9e2f8770f810141189d182dc304e", [:mix], [{:db_connection, "~> 2.4.1 or ~> 2.5", [hex: :db_connection, repo: "hexpm", optional: false]}, {:ecto, "~> 3.13.0", [hex: :ecto, repo: "hexpm", optional: false]}, {:myxql, "~> 0.7", [hex: :myxql, repo: "hexpm", optional: true]}, {:postgrex, "~> 0.19 or ~> 1.0", [hex: :postgrex, repo: "hexpm", optional: true]}, {:tds, "~> 2.1.1 or ~> 2.2", [hex: :tds, repo: "hexpm", optional: true]}, {:telemetry, "~> 0.4.0 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "aa36751f4e6a2b56ae79efb0e088042e010ff4935fc8684e74c23b1f49e25fdc"},
"elixir_make": {:hex, :elixir_make, "0.9.0", "6484b3cd8c0cee58f09f05ecaf1a140a8c97670671a6a0e7ab4dc326c3109726", [:mix], [], "hexpm", "db23d4fd8b757462ad02f8aa73431a426fe6671c80b200d9710caf3d1dd0ffdb"},
"file_system": {:hex, :file_system, "1.1.1", "31864f4685b0148f25bd3fbef2b1228457c0c89024ad67f7a81a3ffbc0bbad3a", [:mix], [], "hexpm", "7a15ff97dfe526aeefb090a7a9d3d03aa907e100e262a0f8f7746b78f8f87a5d"},
"finch": {:hex, :finch, "0.21.0", "b1c3b2d48af02d0c66d2a9ebfb5622be5c5ecd62937cf79a88a7f98d48a8290c", [:mix], [{:mime, "~> 1.0 or ~> 2.0", [hex: :mime, repo: "hexpm", optional: false]}, {:mint, "~> 1.6.2 or ~> 1.7", [hex: :mint, repo: "hexpm", optional: false]}, {:nimble_options, "~> 0.4 or ~> 1.0", [hex: :nimble_options, repo: "hexpm", optional: false]}, {:nimble_pool, "~> 1.1", [hex: :nimble_pool, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "87dc6e169794cb2570f75841a19da99cfde834249568f2a5b121b809588a4377"},
"glob_ex": {:hex, :glob_ex, "0.1.11", "cb50d3f1ef53f6ca04d6252c7fde09fd7a1cf63387714fe96f340a1349e62c93", [:mix], [], "hexpm", "342729363056e3145e61766b416769984c329e4378f1d558b63e341020525de4"},
"hpax": {:hex, :hpax, "1.0.3", "ed67ef51ad4df91e75cc6a1494f851850c0bd98ebc0be6e81b026e765ee535aa", [:mix], [], "hexpm", "8eab6e1cfa8d5918c2ce4ba43588e894af35dbd8e91e6e55c817bca5847df34a"},
"igniter": {:hex, :igniter, "0.7.4", "b5f9dd512eb1e672f1c141b523142b5b4602fcca231df5b4e362999df4b88e14", [:mix], [{:glob_ex, "~> 0.1.7", [hex: :glob_ex, repo: "hexpm", optional: false]}, {:jason, "~> 1.4", [hex: :jason, repo: "hexpm", optional: false]}, {:owl, "~> 0.11", [hex: :owl, repo: "hexpm", optional: false]}, {:phx_new, "~> 1.7", [hex: :phx_new, repo: "hexpm", optional: true]}, {:req, "~> 0.5", [hex: :req, repo: "hexpm", optional: false]}, {:rewrite, ">= 1.1.1 and < 2.0.0-0", [hex: :rewrite, repo: "hexpm", optional: false]}, {:sourceror, "~> 1.4", [hex: :sourceror, repo: "hexpm", optional: false]}, {:spitfire, ">= 0.1.3 and < 1.0.0-0", [hex: :spitfire, repo: "hexpm", optional: false]}], "hexpm", "971b240ee916a06b1af56381a262d9eeaff9610eddc299d61a213cd7a9d79efd"},
"jason": {:hex, :jason, "1.4.4", "b9226785a9aa77b6857ca22832cffa5d5011a667207eb2a0ad56adb5db443b8a", [:mix], [{:decimal, "~> 1.0 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: true]}], "hexpm", "c5eb0cab91f094599f94d55bc63409236a8ec69a21a67814529e8d5f6cc90b3b"},
"lib_secp256k1": {:hex, :lib_secp256k1, "0.7.1", "53cad778b8da3a29e453a7a477517d99fb5f13f615c8050eb2db8fd1dce7a1db", [:make, :mix], [{:elixir_make, "~> 0.9", [hex: :elixir_make, repo: "hexpm", optional: false]}], "hexpm", "78bdd3661a17448aff5aeec5ca74c8ddbc09b01f0ecfa3ba1aba3e8ae47ab2b3"},
"mime": {:hex, :mime, "2.0.7", "b8d739037be7cd402aee1ba0306edfdef982687ee7e9859bee6198c1e7e2f128", [:mix], [], "hexpm", "6171188e399ee16023ffc5b76ce445eb6d9672e2e241d2df6050f3c771e80ccd"},
"mint": {:hex, :mint, "1.7.1", "113fdb2b2f3b59e47c7955971854641c61f378549d73e829e1768de90fc1abf1", [:mix], [{:castore, "~> 0.1.0 or ~> 1.0", [hex: :castore, repo: "hexpm", optional: true]}, {:hpax, "~> 0.1.1 or ~> 0.2.0 or ~> 1.0", [hex: :hpax, repo: "hexpm", optional: false]}], "hexpm", "fceba0a4d0f24301ddee3024ae116df1c3f4bb7a563a731f45fdfeb9d39a231b"},
"mox": {:hex, :mox, "1.2.0", "a2cd96b4b80a3883e3100a221e8adc1b98e4c3a332a8fc434c39526babafd5b3", [:mix], [{:nimble_ownership, "~> 1.0", [hex: :nimble_ownership, repo: "hexpm", optional: false]}], "hexpm", "c7b92b3cc69ee24a7eeeaf944cd7be22013c52fcb580c1f33f50845ec821089a"},
"nimble_options": {:hex, :nimble_options, "1.1.1", "e3a492d54d85fc3fd7c5baf411d9d2852922f66e69476317787a7b2bb000a61b", [:mix], [], "hexpm", "821b2470ca9442c4b6984882fe9bb0389371b8ddec4d45a9504f00a66f650b44"},
"nimble_ownership": {:hex, :nimble_ownership, "1.0.2", "fa8a6f2d8c592ad4d79b2ca617473c6aefd5869abfa02563a77682038bf916cf", [:mix], [], "hexpm", "098af64e1f6f8609c6672127cfe9e9590a5d3fcdd82bc17a377b8692fd81a879"},
"nimble_pool": {:hex, :nimble_pool, "1.1.0", "bf9c29fbdcba3564a8b800d1eeb5a3c58f36e1e11d7b7fb2e084a643f645f06b", [:mix], [], "hexpm", "af2e4e6b34197db81f7aad230c1118eac993acc0dae6bc83bac0126d4ae0813a"},
"owl": {:hex, :owl, "0.13.0", "26010e066d5992774268f3163506972ddac0a7e77bfe57fa42a250f24d6b876e", [:mix], [{:ucwidth, "~> 0.2", [hex: :ucwidth, repo: "hexpm", optional: true]}], "hexpm", "59bf9d11ce37a4db98f57cb68fbfd61593bf419ec4ed302852b6683d3d2f7475"},
"plug": {:hex, :plug, "1.19.1", "09bac17ae7a001a68ae393658aa23c7e38782be5c5c00c80be82901262c394c0", [:mix], [{:mime, "~> 1.0 or ~> 2.0", [hex: :mime, repo: "hexpm", optional: false]}, {:plug_crypto, "~> 1.1.1 or ~> 1.2 or ~> 2.0", [hex: :plug_crypto, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4.3 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "560a0017a8f6d5d30146916862aaf9300b7280063651dd7e532b8be168511e62"},
"plug_cowboy": {:hex, :plug_cowboy, "2.8.0", "07789e9c03539ee51bb14a07839cc95aa96999fd8846ebfd28c97f0b50c7b612", [:mix], [{:cowboy, "~> 2.7", [hex: :cowboy, repo: "hexpm", optional: false]}, {:cowboy_telemetry, "~> 0.3", [hex: :cowboy_telemetry, repo: "hexpm", optional: false]}, {:plug, "~> 1.14", [hex: :plug, repo: "hexpm", optional: false]}], "hexpm", "9cbfaaf17463334ca31aed38ea7e08a68ee37cabc077b1e9be6d2fb68e0171d0"},
"plug_crypto": {:hex, :plug_crypto, "2.1.1", "19bda8184399cb24afa10be734f84a16ea0a2bc65054e23a62bb10f06bc89491", [:mix], [], "hexpm", "6470bce6ffe41c8bd497612ffde1a7e4af67f36a15eea5f921af71cf3e11247c"},
"postgrex": {:hex, :postgrex, "0.22.0", "fb027b58b6eab1f6de5396a2abcdaaeb168f9ed4eccbb594e6ac393b02078cbd", [:mix], [{:db_connection, "~> 2.9", [hex: :db_connection, repo: "hexpm", optional: false]}, {:decimal, "~> 1.5 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}, {:table, "~> 0.1.0", [hex: :table, repo: "hexpm", optional: true]}], "hexpm", "a68c4261e299597909e03e6f8ff5a13876f5caadaddd0d23af0d0a61afcc5d84"},
"ranch": {:hex, :ranch, "1.8.1", "208169e65292ac5d333d6cdbad49388c1ae198136e4697ae2f474697140f201c", [:make, :rebar3], [], "hexpm", "aed58910f4e21deea992a67bf51632b6d60114895eb03bb392bb733064594dd0"},
"ranch": {:hex, :ranch, "2.2.0", "25528f82bc8d7c6152c57666ca99ec716510fe0925cb188172f41ce93117b1b0", [:make, :rebar3], [], "hexpm", "fa0b99a1780c80218a4197a59ea8d3bdae32fbff7e88527d7d8a4787eff4f8e7"},
"req": {:hex, :req, "0.5.17", "0096ddd5b0ed6f576a03dde4b158a0c727215b15d2795e59e0916c6971066ede", [:mix], [{:brotli, "~> 0.3.1", [hex: :brotli, repo: "hexpm", optional: true]}, {:ezstd, "~> 1.0", [hex: :ezstd, repo: "hexpm", optional: true]}, {:finch, "~> 0.17", [hex: :finch, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}, {:mime, "~> 2.0.6 or ~> 2.1", [hex: :mime, repo: "hexpm", optional: false]}, {:nimble_csv, "~> 1.0", [hex: :nimble_csv, repo: "hexpm", optional: true]}, {:plug, "~> 1.0", [hex: :plug, repo: "hexpm", optional: true]}], "hexpm", "0b8bc6ffdfebbc07968e59d3ff96d52f2202d0536f10fef4dc11dc02a2a43e39"},
"rewrite": {:hex, :rewrite, "1.3.0", "67448ba7975690b35ba7e7f35717efcce317dbd5963cb0577aa7325c1923121a", [:mix], [{:glob_ex, "~> 0.1", [hex: :glob_ex, repo: "hexpm", optional: false]}, {:sourceror, "~> 1.0", [hex: :sourceror, repo: "hexpm", optional: false]}, {:text_diff, "~> 0.1", [hex: :text_diff, repo: "hexpm", optional: false]}], "hexpm", "d111ac7ff3a58a802ef4f193bbd1831e00a9c57b33276e5068e8390a212714a5"},
"sourceror": {:hex, :sourceror, "1.12.0", "da354c5f35aad3cc1132f5d5b0d8437d865e2661c263260480bab51b5eedb437", [:mix], [], "hexpm", "755703683bd014ebcd5de9acc24b68fb874a660a568d1d63f8f98cd8a6ef9cd0"},

View File

@@ -30,7 +30,10 @@ defmodule Parrhesia.Repo.Migrations.CreateRelayStorage do
create(index(:events, [:expires_at], where: "expires_at IS NOT NULL"))
create(index(:events, [:deleted_at], where: "deleted_at IS NOT NULL"))
create table(:event_tags, primary_key: false) do
create table(:event_tags,
primary_key: false,
options: "PARTITION BY RANGE (event_created_at)"
) do
add(:event_created_at, :bigint, null: false)
add(:event_id, :binary, null: false)
add(:name, :string, null: false)
@@ -39,6 +42,8 @@ defmodule Parrhesia.Repo.Migrations.CreateRelayStorage do
timestamps(updated_at: false, type: :utc_datetime_usec)
end
execute("CREATE TABLE event_tags_default PARTITION OF event_tags DEFAULT")
execute("""
ALTER TABLE event_tags
ADD CONSTRAINT event_tags_event_fk
@@ -149,6 +154,8 @@ defmodule Parrhesia.Repo.Migrations.CreateRelayStorage do
drop(table(:banned_pubkeys))
drop(table(:addressable_event_state))
drop(table(:replaceable_event_state))
execute("DROP TABLE event_tags_default")
drop(table(:event_tags))
execute("DROP TABLE events_default")

View File

@@ -0,0 +1,24 @@
defmodule Parrhesia.Repo.Migrations.AddEventsTagsJsonb do
use Ecto.Migration
def up do
execute("ALTER TABLE events ADD COLUMN tags jsonb NOT NULL DEFAULT '[]'::jsonb")
execute("""
UPDATE events AS event
SET tags = COALESCE(
(
SELECT jsonb_agg(jsonb_build_array(tag.name, tag.value) ORDER BY tag.idx)
FROM event_tags AS tag
WHERE tag.event_created_at = event.created_at
AND tag.event_id = event.id
),
'[]'::jsonb
)
""")
end
def down do
execute("ALTER TABLE events DROP COLUMN tags")
end
end

View File

@@ -10,9 +10,9 @@ usage:
./scripts/run_bench_compare.sh
Runs the same nostr-bench suite against:
1) Parrhesia (temporary test relay via run_e2e_suite.sh)
2) strfry (ephemeral instance)
3) nostr-rs-relay (ephemeral sqlite instance)
1) Parrhesia (temporary prod relay via run_e2e_suite.sh)
2) strfry (ephemeral instance) — optional, skipped if not in PATH
3) nostr-rs-relay (ephemeral sqlite instance) — optional, skipped if not in PATH
Environment:
PARRHESIA_BENCH_RUNS Number of comparison runs (default: 2)
@@ -50,19 +50,28 @@ if ! command -v nostr-bench >/dev/null 2>&1; then
exit 1
fi
if ! command -v strfry >/dev/null 2>&1; then
echo "strfry not found in PATH. Enter devenv shell first." >&2
exit 1
# port_listening PORT — cross-platform check (Darwin: lsof, Linux: ss)
port_listening() {
local port="$1"
if command -v ss >/dev/null 2>&1; then
ss -ltn | grep -q ":${port} "
else
lsof -iTCP:"${port}" -sTCP:LISTEN -P -n >/dev/null 2>&1
fi
}
HAS_STRFRY=0
if command -v strfry >/dev/null 2>&1; then
HAS_STRFRY=1
else
echo "strfry not found in PATH — skipping strfry benchmarks"
fi
if ! command -v nostr-rs-relay >/dev/null 2>&1; then
echo "nostr-rs-relay not found in PATH. Enter devenv shell first." >&2
exit 1
fi
if ! command -v ss >/dev/null 2>&1; then
echo "ss command not found; cannot detect strfry readiness." >&2
exit 1
HAS_NOSTR_RS=0
if command -v nostr-rs-relay >/dev/null 2>&1; then
HAS_NOSTR_RS=1
else
echo "nostr-rs-relay not found in PATH — skipping nostr-rs-relay benchmarks"
fi
RUNS="${PARRHESIA_BENCH_RUNS:-2}"
@@ -98,8 +107,16 @@ resolve_strfry_version() {
printf '%s\n' "$cli_version"
}
STRFRY_VERSION="$(resolve_strfry_version)"
NOSTR_RS_RELAY_VERSION="$(nostr-rs-relay --version 2>/dev/null | head -n 1 | tr -d '\r')"
STRFRY_VERSION=""
if (( HAS_STRFRY )); then
STRFRY_VERSION="$(resolve_strfry_version)"
fi
NOSTR_RS_RELAY_VERSION=""
if (( HAS_NOSTR_RS )); then
NOSTR_RS_RELAY_VERSION="$(nostr-rs-relay --version 2>/dev/null | head -n 1 | tr -d '\r')"
fi
NOSTR_BENCH_VERSION="$(nostr-bench --version 2>/dev/null | head -n 1 | tr -d '\r')"
export PARRHESIA_BENCH_CONNECT_COUNT="${PARRHESIA_BENCH_CONNECT_COUNT:-200}"
@@ -158,7 +175,7 @@ EOF
STRFRY_PID=$!
for _ in {1..100}; do
if ss -ltn | grep -q ":${port} "; then
if port_listening "${port}"; then
return 0
fi
sleep 0.1
@@ -198,7 +215,7 @@ EOF
NOSTR_RS_PID=$!
for _ in {1..100}; do
if ss -ltn | grep -q ":${port} "; then
if port_listening "${port}"; then
return 0
fi
sleep 0.1
@@ -220,8 +237,12 @@ stop_nostr_rs_relay() {
echo "Running ${RUNS} comparison run(s)..."
echo "Versions:"
echo " parrhesia ${PARRHESIA_VERSION}"
echo " ${STRFRY_VERSION}"
echo " ${NOSTR_RS_RELAY_VERSION}"
if (( HAS_STRFRY )); then
echo " ${STRFRY_VERSION}"
fi
if (( HAS_NOSTR_RS )); then
echo " ${NOSTR_RS_RELAY_VERSION}"
fi
echo " ${NOSTR_BENCH_VERSION}"
echo
@@ -234,6 +255,7 @@ for run in $(seq 1 "$RUNS"); do
exit 1
fi
if (( HAS_STRFRY )); then
echo "[run ${run}/${RUNS}] strfry"
strfry_log="$WORK_DIR/strfry_${run}.log"
strfry_port=$((49000 + run))
@@ -246,7 +268,9 @@ for run in $(seq 1 "$RUNS"); do
exit 1
fi
stop_strfry
fi
if (( HAS_NOSTR_RS )); then
echo "[run ${run}/${RUNS}] nostr-rs-relay"
nostr_rs_log="$WORK_DIR/nostr_rs_relay_${run}.log"
nostr_rs_port=$((50000 + run))
@@ -259,17 +283,20 @@ for run in $(seq 1 "$RUNS"); do
exit 1
fi
stop_nostr_rs_relay
fi
echo
done
node - "$WORK_DIR" "$RUNS" <<'NODE'
node - "$WORK_DIR" "$RUNS" "$HAS_STRFRY" "$HAS_NOSTR_RS" <<'NODE'
const fs = require("node:fs");
const path = require("node:path");
const workDir = process.argv[2];
const runs = Number(process.argv[3]);
const hasStrfry = process.argv[4] === "1";
const hasNostrRs = process.argv[5] === "1";
function parseLog(filePath) {
const content = fs.readFileSync(filePath, "utf8");
@@ -337,124 +364,61 @@ function loadRuns(prefix) {
}
const parrhesiaRuns = loadRuns("parrhesia");
const strfryRuns = loadRuns("strfry");
const nostrRsRuns = loadRuns("nostr_rs_relay");
const strfryRuns = hasStrfry ? loadRuns("strfry") : [];
const nostrRsRuns = hasNostrRs ? loadRuns("nostr_rs_relay") : [];
const summary = {
parrhesia: {
connectAvgMs: mean(parrhesiaRuns.map((m) => m.connectAvgMs)),
connectMaxMs: mean(parrhesiaRuns.map((m) => m.connectMaxMs)),
echoTps: mean(parrhesiaRuns.map((m) => m.echoTps)),
echoSizeMiBS: mean(parrhesiaRuns.map((m) => m.echoSizeMiBS)),
eventTps: mean(parrhesiaRuns.map((m) => m.eventTps)),
eventSizeMiBS: mean(parrhesiaRuns.map((m) => m.eventSizeMiBS)),
reqTps: mean(parrhesiaRuns.map((m) => m.reqTps)),
reqSizeMiBS: mean(parrhesiaRuns.map((m) => m.reqSizeMiBS)),
},
strfry: {
connectAvgMs: mean(strfryRuns.map((m) => m.connectAvgMs)),
connectMaxMs: mean(strfryRuns.map((m) => m.connectMaxMs)),
echoTps: mean(strfryRuns.map((m) => m.echoTps)),
echoSizeMiBS: mean(strfryRuns.map((m) => m.echoSizeMiBS)),
eventTps: mean(strfryRuns.map((m) => m.eventTps)),
eventSizeMiBS: mean(strfryRuns.map((m) => m.eventSizeMiBS)),
reqTps: mean(strfryRuns.map((m) => m.reqTps)),
reqSizeMiBS: mean(strfryRuns.map((m) => m.reqSizeMiBS)),
},
nostrRsRelay: {
connectAvgMs: mean(nostrRsRuns.map((m) => m.connectAvgMs)),
connectMaxMs: mean(nostrRsRuns.map((m) => m.connectMaxMs)),
echoTps: mean(nostrRsRuns.map((m) => m.echoTps)),
echoSizeMiBS: mean(nostrRsRuns.map((m) => m.echoSizeMiBS)),
eventTps: mean(nostrRsRuns.map((m) => m.eventTps)),
eventSizeMiBS: mean(nostrRsRuns.map((m) => m.eventSizeMiBS)),
reqTps: mean(nostrRsRuns.map((m) => m.reqTps)),
reqSizeMiBS: mean(nostrRsRuns.map((m) => m.reqSizeMiBS)),
},
};
const metrics = [
"connectAvgMs", "connectMaxMs",
"echoTps", "echoSizeMiBS",
"eventTps", "eventSizeMiBS",
"reqTps", "reqSizeMiBS",
];
function summarise(allRuns) {
const out = {};
for (const m of metrics) {
out[m] = mean(allRuns.map((r) => r[m]));
}
return out;
}
const summary = { parrhesia: summarise(parrhesiaRuns) };
if (hasStrfry) summary.strfry = summarise(strfryRuns);
if (hasNostrRs) summary.nostrRsRelay = summarise(nostrRsRuns);
function ratioVsParrhesia(serverKey, metric) {
const p = summary.parrhesia[metric];
const other = summary[serverKey][metric];
const other = summary[serverKey]?.[metric];
if (!Number.isFinite(p) || !Number.isFinite(other) || p === 0) return "n/a";
return `${(other / p).toFixed(2)}x`;
}
const rows = [
[
"connect avg latency (ms) ↓",
toFixed(summary.parrhesia.connectAvgMs),
toFixed(summary.strfry.connectAvgMs),
toFixed(summary.nostrRsRelay.connectAvgMs),
ratioVsParrhesia("strfry", "connectAvgMs"),
ratioVsParrhesia("nostrRsRelay", "connectAvgMs"),
],
[
"connect max latency (ms) ↓",
toFixed(summary.parrhesia.connectMaxMs),
toFixed(summary.strfry.connectMaxMs),
toFixed(summary.nostrRsRelay.connectMaxMs),
ratioVsParrhesia("strfry", "connectMaxMs"),
ratioVsParrhesia("nostrRsRelay", "connectMaxMs"),
],
[
"echo throughput (TPS) ↑",
toFixed(summary.parrhesia.echoTps),
toFixed(summary.strfry.echoTps),
toFixed(summary.nostrRsRelay.echoTps),
ratioVsParrhesia("strfry", "echoTps"),
ratioVsParrhesia("nostrRsRelay", "echoTps"),
],
[
"echo throughput (MiB/s) ↑",
toFixed(summary.parrhesia.echoSizeMiBS),
toFixed(summary.strfry.echoSizeMiBS),
toFixed(summary.nostrRsRelay.echoSizeMiBS),
ratioVsParrhesia("strfry", "echoSizeMiBS"),
ratioVsParrhesia("nostrRsRelay", "echoSizeMiBS"),
],
[
"event throughput (TPS) ↑",
toFixed(summary.parrhesia.eventTps),
toFixed(summary.strfry.eventTps),
toFixed(summary.nostrRsRelay.eventTps),
ratioVsParrhesia("strfry", "eventTps"),
ratioVsParrhesia("nostrRsRelay", "eventTps"),
],
[
"event throughput (MiB/s) ↑",
toFixed(summary.parrhesia.eventSizeMiBS),
toFixed(summary.strfry.eventSizeMiBS),
toFixed(summary.nostrRsRelay.eventSizeMiBS),
ratioVsParrhesia("strfry", "eventSizeMiBS"),
ratioVsParrhesia("nostrRsRelay", "eventSizeMiBS"),
],
[
"req throughput (TPS) ↑",
toFixed(summary.parrhesia.reqTps),
toFixed(summary.strfry.reqTps),
toFixed(summary.nostrRsRelay.reqTps),
ratioVsParrhesia("strfry", "reqTps"),
ratioVsParrhesia("nostrRsRelay", "reqTps"),
],
[
"req throughput (MiB/s) ↑",
toFixed(summary.parrhesia.reqSizeMiBS),
toFixed(summary.strfry.reqSizeMiBS),
toFixed(summary.nostrRsRelay.reqSizeMiBS),
ratioVsParrhesia("strfry", "reqSizeMiBS"),
ratioVsParrhesia("nostrRsRelay", "reqSizeMiBS"),
],
const metricLabels = [
["connect avg latency (ms) ↓", "connectAvgMs"],
["connect max latency (ms) ↓", "connectMaxMs"],
["echo throughput (TPS) ↑", "echoTps"],
["echo throughput (MiB/s) ↑", "echoSizeMiBS"],
["event throughput (TPS) ↑", "eventTps"],
["event throughput (MiB/s) ↑", "eventSizeMiBS"],
["req throughput (TPS) ↑", "reqTps"],
["req throughput (MiB/s) ↑", "reqSizeMiBS"],
];
const headers = [
"metric",
"parrhesia",
"strfry",
"nostr-rs-relay",
"strfry/parrhesia",
"nostr-rs/parrhesia",
];
const headers = ["metric", "parrhesia"];
if (hasStrfry) headers.push("strfry");
if (hasNostrRs) headers.push("nostr-rs-relay");
if (hasStrfry) headers.push("strfry/parrhesia");
if (hasNostrRs) headers.push("nostr-rs/parrhesia");
const rows = metricLabels.map(([label, key]) => {
const row = [label, toFixed(summary.parrhesia[key])];
if (hasStrfry) row.push(toFixed(summary.strfry[key]));
if (hasNostrRs) row.push(toFixed(summary.nostrRsRelay[key]));
if (hasStrfry) row.push(ratioVsParrhesia("strfry", key));
if (hasNostrRs) row.push(ratioVsParrhesia("nostrRsRelay", key));
return row;
});
const widths = headers.map((h, i) => Math.max(h.length, ...rows.map((r) => r[i].length)));
function fmtRow(cols) {
@@ -471,18 +435,25 @@ for (const row of rows) {
}
console.log("\nLegend: ↑ higher is better, ↓ lower is better.");
console.log("Ratio columns are server/parrhesia (for ↓ metrics, <1.00x means that server is faster).\n");
if (hasStrfry || hasNostrRs) {
console.log("Ratio columns are server/parrhesia (for ↓ metrics, <1.00x means that server is faster).\n");
} else {
console.log("");
}
console.log("Run details:");
for (let i = 0; i < runs; i += 1) {
const p = parrhesiaRuns[i];
let line = ` run ${i + 1}: ` +
`parrhesia(echo_tps=${toFixed(p.echoTps, 0)}, event_tps=${toFixed(p.eventTps, 0)}, req_tps=${toFixed(p.reqTps, 0)}, connect_avg_ms=${toFixed(p.connectAvgMs, 0)})`;
if (hasStrfry) {
const s = strfryRuns[i];
line += ` | strfry(echo_tps=${toFixed(s.echoTps, 0)}, event_tps=${toFixed(s.eventTps, 0)}, req_tps=${toFixed(s.reqTps, 0)}, connect_avg_ms=${toFixed(s.connectAvgMs, 0)})`;
}
if (hasNostrRs) {
const n = nostrRsRuns[i];
console.log(
` run ${i + 1}: ` +
`parrhesia(echo_tps=${toFixed(p.echoTps, 0)}, event_tps=${toFixed(p.eventTps, 0)}, req_tps=${toFixed(p.reqTps, 0)}, connect_avg_ms=${toFixed(p.connectAvgMs, 0)}) | ` +
`strfry(echo_tps=${toFixed(s.echoTps, 0)}, event_tps=${toFixed(s.eventTps, 0)}, req_tps=${toFixed(s.reqTps, 0)}, connect_avg_ms=${toFixed(s.connectAvgMs, 0)}) | ` +
`nostr-rs-relay(echo_tps=${toFixed(n.echoTps, 0)}, event_tps=${toFixed(n.eventTps, 0)}, req_tps=${toFixed(n.reqTps, 0)}, connect_avg_ms=${toFixed(n.connectAvgMs, 0)})`
);
line += ` | nostr-rs-relay(echo_tps=${toFixed(n.echoTps, 0)}, event_tps=${toFixed(n.eventTps, 0)}, req_tps=${toFixed(n.reqTps, 0)}, connect_avg_ms=${toFixed(n.connectAvgMs, 0)})`;
}
console.log(line);
}
NODE

View File

@@ -12,7 +12,12 @@ shift
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
cd "$ROOT_DIR"
export MIX_ENV=test
MIX_ENV="${PARRHESIA_E2E_MIX_ENV:-test}"
if [[ "$MIX_ENV" != "test" && "$MIX_ENV" != "prod" ]]; then
echo "PARRHESIA_E2E_MIX_ENV must be test or prod, got: $MIX_ENV" >&2
exit 1
fi
export MIX_ENV
SUITE_SLUG="$(printf '%s' "$SUITE_NAME" | tr '[:upper:]' '[:lower:]' | tr -c 'a-z0-9' '_')"
SUITE_UPPER="$(printf '%s' "$SUITE_SLUG" | tr '[:lower:]' '[:upper:]')"
@@ -26,12 +31,40 @@ printf -v "$PORT_ENV_VAR" '%s' "$TEST_HTTP_PORT"
export "$PORT_ENV_VAR"
if [[ -z "${PGDATABASE:-}" ]]; then
export PGDATABASE="parrhesia_${SUITE_SLUG}_test"
export PGDATABASE="parrhesia_${SUITE_SLUG}_${MIX_ENV}"
fi
PARRHESIA_TEST_HTTP_PORT=0 mix ecto.drop --quiet || true
PARRHESIA_TEST_HTTP_PORT=0 mix ecto.create --quiet
PARRHESIA_TEST_HTTP_PORT=0 mix ecto.migrate --quiet
if [[ -z "${DATABASE_URL:-}" ]]; then
PGUSER_EFFECTIVE="${PGUSER:-${USER:-agent}}"
PGHOST_EFFECTIVE="${PGHOST:-localhost}"
PGPORT_EFFECTIVE="${PGPORT:-5432}"
# Ecto requires a URL host to be present. For unix sockets we keep a dummy
# TCP host and pass the socket directory as query option.
if [[ "$PGHOST_EFFECTIVE" == /* ]]; then
if [[ -n "${PGPASSWORD:-}" ]]; then
export DATABASE_URL="ecto://${PGUSER_EFFECTIVE}:${PGPASSWORD}@localhost/${PGDATABASE}?socket_dir=${PGHOST_EFFECTIVE}&port=${PGPORT_EFFECTIVE}"
else
export DATABASE_URL="ecto://${PGUSER_EFFECTIVE}@localhost/${PGDATABASE}?socket_dir=${PGHOST_EFFECTIVE}&port=${PGPORT_EFFECTIVE}"
fi
else
if [[ -n "${PGPASSWORD:-}" ]]; then
export DATABASE_URL="ecto://${PGUSER_EFFECTIVE}:${PGPASSWORD}@${PGHOST_EFFECTIVE}:${PGPORT_EFFECTIVE}/${PGDATABASE}"
else
export DATABASE_URL="ecto://${PGUSER_EFFECTIVE}@${PGHOST_EFFECTIVE}:${PGPORT_EFFECTIVE}/${PGDATABASE}"
fi
fi
fi
if [[ "$MIX_ENV" == "test" ]]; then
PARRHESIA_TEST_HTTP_PORT=0 mix ecto.drop --quiet --force || true
PARRHESIA_TEST_HTTP_PORT=0 mix ecto.create --quiet
PARRHESIA_TEST_HTTP_PORT=0 mix ecto.migrate --quiet
else
mix ecto.drop --quiet --force || true
mix ecto.create --quiet
mix ecto.migrate --quiet
fi
SERVER_LOG="${ROOT_DIR}/.${SUITE_SLUG}-e2e-server.log"
: > "$SERVER_LOG"
@@ -41,16 +74,45 @@ cleanup() {
kill "$SERVER_PID" 2>/dev/null || true
wait "$SERVER_PID" 2>/dev/null || true
fi
if [[ "${PARRHESIA_E2E_DROP_DB_ON_EXIT:-0}" == "1" ]]; then
if [[ "$MIX_ENV" == "test" ]]; then
PARRHESIA_TEST_HTTP_PORT=0 mix ecto.drop --quiet --force || true
else
mix ecto.drop --quiet --force || true
fi
fi
}
trap cleanup EXIT INT TERM
if ss -ltn "( sport = :${TEST_HTTP_PORT} )" | tail -n +2 | grep -q .; then
port_in_use() {
local port="$1"
if command -v ss >/dev/null 2>&1; then
ss -ltn "( sport = :${port} )" | tail -n +2 | grep -q .
return
fi
if command -v lsof >/dev/null 2>&1; then
lsof -nP -iTCP:"${port}" -sTCP:LISTEN >/dev/null 2>&1
return
fi
echo "Neither ss nor lsof is available for checking whether port ${port} is already in use." >&2
exit 1
}
if port_in_use "$TEST_HTTP_PORT"; then
echo "Port ${TEST_HTTP_PORT} is already in use. Set ${PORT_ENV_VAR} to a free port." >&2
exit 1
fi
PARRHESIA_TEST_HTTP_PORT="$TEST_HTTP_PORT" mix run --no-halt >"$SERVER_LOG" 2>&1 &
if [[ "$MIX_ENV" == "test" ]]; then
PARRHESIA_TEST_HTTP_PORT="$TEST_HTTP_PORT" mix run --no-halt >"$SERVER_LOG" 2>&1 &
else
PORT="$TEST_HTTP_PORT" mix run --no-halt >"$SERVER_LOG" 2>&1 &
fi
SERVER_PID=$!
READY=0
@@ -68,4 +130,8 @@ if [[ "$READY" -ne 1 ]]; then
exit 1
fi
PARRHESIA_TEST_HTTP_PORT=0 "$@"
if [[ "$MIX_ENV" == "test" ]]; then
PARRHESIA_TEST_HTTP_PORT=0 "$@"
else
"$@"
fi

View File

@@ -2,6 +2,24 @@
set -euo pipefail
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
# Check if the marmot-ts submodule is initialised
if [[ ! -f "$ROOT_DIR/marmot-ts/package.json" ]]; then
echo "marmot-ts submodule is not initialised." >&2
if [[ -t 0 ]]; then
read -rp "Initialise it now? [y/N] " answer
if [[ "$answer" =~ ^[Yy]$ ]]; then
git -C "$ROOT_DIR" submodule update --init marmot-ts
else
echo "Skipping marmot e2e tests."
exit 0
fi
else
echo "Run 'git submodule update --init marmot-ts' to initialise it." >&2
exit 1
fi
fi
cd "$ROOT_DIR/marmot-ts"
if [[ ! -d node_modules ]]; then

View File

@@ -10,9 +10,18 @@ usage:
./scripts/run_nostr_bench.sh [all]
./scripts/run_nostr_bench.sh <connect|echo|event|req> [nostr-bench options...]
Runs nostr-bench against a temporary Parrhesia test server started via
Runs nostr-bench against a temporary Parrhesia prod server started via
./scripts/run_e2e_suite.sh.
Pool tuning:
POOL_SIZE optional override for prod pool size
DB_QUEUE_TARGET_MS optional Repo queue target override
DB_QUEUE_INTERVAL_MS optional Repo queue interval override
Database lifecycle:
PGDATABASE optional override (auto-generated by default)
PARRHESIA_E2E_DROP_DB_ON_EXIT=1 drop benchmark DB on exit (default: 1)
Examples:
./scripts/run_nostr_bench.sh
./scripts/run_nostr_bench.sh connect -c 500 -r 100
@@ -54,6 +63,13 @@ if [[ "$MODE" == "all" && $# -gt 0 ]]; then
exit 1
fi
if [[ -z "${PGDATABASE:-}" ]]; then
export PGDATABASE="parrhesia_bench_prod_$(date +%s)_$RANDOM"
fi
export PARRHESIA_E2E_DROP_DB_ON_EXIT="${PARRHESIA_E2E_DROP_DB_ON_EXIT:-1}"
PARRHESIA_E2E_MIX_ENV="prod" \
exec ./scripts/run_e2e_suite.sh \
bench \
bash -lc '

View File

@@ -350,7 +350,7 @@ async function requestGiftWrapsWithAuth({ relayUrl, relayHttpUrl, signer, recipi
created_at: unixNow(),
tags: [
["challenge", challenge],
["relay", relayHttpUrl],
["relay", relayUrl],
],
content: "",
});

View File

@@ -10,6 +10,7 @@ defmodule Parrhesia.ApplicationTest do
assert is_pid(Process.whereis(Parrhesia.Auth.Supervisor))
assert is_pid(Process.whereis(Parrhesia.Policy.Supervisor))
assert is_pid(Process.whereis(Parrhesia.Web.Endpoint))
assert is_pid(Process.whereis(Parrhesia.Web.MetricsEndpoint))
assert is_pid(Process.whereis(Parrhesia.Tasks.Supervisor))
assert Enum.any?(Supervisor.which_children(Parrhesia.Web.Endpoint), fn {_id, pid, _type,
@@ -18,6 +19,15 @@ defmodule Parrhesia.ApplicationTest do
end)
assert is_pid(Process.whereis(Parrhesia.Auth.Challenges))
if negentropy_enabled?() do
assert is_pid(Process.whereis(Parrhesia.Negentropy.Sessions))
end
end
defp negentropy_enabled? do
:parrhesia
|> Application.get_env(:features, [])
|> Keyword.get(:nip_77_negentropy, true)
end
end

View File

@@ -5,12 +5,17 @@ defmodule Parrhesia.ConfigTest do
assert Parrhesia.Config.get([:limits, :max_frame_bytes]) == 1_048_576
assert Parrhesia.Config.get([:limits, :max_event_bytes]) == 262_144
assert Parrhesia.Config.get([:limits, :max_event_future_skew_seconds]) == 900
assert Parrhesia.Config.get([:limits, :max_event_ingest_per_window]) == 120
assert Parrhesia.Config.get([:limits, :event_ingest_window_seconds]) == 1
assert Parrhesia.Config.get([:limits, :auth_max_age_seconds]) == 600
assert Parrhesia.Config.get([:limits, :max_outbound_queue]) == 256
assert Parrhesia.Config.get([:limits, :max_filter_limit]) == 500
assert Parrhesia.Config.get([:relay_url]) == "ws://localhost:4413/relay"
assert Parrhesia.Config.get([:policies, :auth_required_for_writes]) == false
assert Parrhesia.Config.get([:policies, :marmot_media_max_imeta_tags_per_event]) == 8
assert Parrhesia.Config.get([:policies, :marmot_media_reject_mip04_v1]) == true
assert Parrhesia.Config.get([:policies, :marmot_push_max_trigger_age_seconds]) == 120
assert Parrhesia.Config.get([:features, :verify_event_signatures]) == false
assert Parrhesia.Config.get([:features, :nip_50_search]) == true
assert Parrhesia.Config.get([:features, :marmot_push_notifications]) == false
end

View File

@@ -43,7 +43,7 @@ defmodule Parrhesia.FaultInjectionGroupFlowTest do
payload = JSON.encode!(["EVENT", group_event])
assert {:push, {:text, error_response}, ^state} =
assert {:push, {:text, error_response}, _next_state} =
Connection.handle_in({payload, [opcode: :text]}, state)
assert JSON.decode!(error_response) == ["OK", group_event["id"], false, "error: :db_down"]
@@ -54,7 +54,7 @@ defmodule Parrhesia.FaultInjectionGroupFlowTest do
previous_storage |> Keyword.put(:moderation, PermissiveModeration)
)
assert {:push, {:text, ok_response}, ^state} =
assert {:push, {:text, ok_response}, _next_state} =
Connection.handle_in({payload, [opcode: :text]}, state)
assert JSON.decode!(ok_response) == ["OK", group_event["id"], true, "ok: event stored"]
@@ -87,7 +87,7 @@ defmodule Parrhesia.FaultInjectionGroupFlowTest do
"content" => Base.encode64("newer")
})
assert {:push, {:text, outage_response}, ^state} =
assert {:push, {:text, outage_response}, _next_state} =
Connection.handle_in(
{JSON.encode!(["EVENT", older_event]), [opcode: :text]},
state
@@ -101,7 +101,7 @@ defmodule Parrhesia.FaultInjectionGroupFlowTest do
previous_storage |> Keyword.put(:moderation, PermissiveModeration)
)
assert {:push, {:text, newer_response}, ^state} =
assert {:push, {:text, newer_response}, _next_state} =
Connection.handle_in(
{JSON.encode!(["EVENT", newer_event]), [opcode: :text]},
state
@@ -109,7 +109,7 @@ defmodule Parrhesia.FaultInjectionGroupFlowTest do
assert JSON.decode!(newer_response) == ["OK", newer_event["id"], true, "ok: event stored"]
assert {:push, {:text, older_response}, ^state} =
assert {:push, {:text, older_response}, _next_state} =
Connection.handle_in(
{JSON.encode!(["EVENT", older_event]), [opcode: :text]},
state

View File

@@ -29,7 +29,7 @@ defmodule Parrhesia.FaultInjectionTest do
{:ok, state} = Connection.init(subscription_index: nil)
event = valid_event()
assert {:push, {:text, response}, ^state} =
assert {:push, {:text, response}, _next_state} =
Connection.handle_in({JSON.encode!(["EVENT", event]), [opcode: :text]}, state)
assert JSON.decode!(response) == ["OK", event["id"], false, "error: :db_down"]

View File

@@ -15,4 +15,39 @@ defmodule Parrhesia.Negentropy.SessionsTest do
assert :ok = Sessions.close(server, self(), "sub-neg")
assert {:error, :unknown_session} = Sessions.message(server, self(), "sub-neg", %{})
end
test "rejects oversized NEG payloads" do
server =
start_supervised!(
{Sessions,
name: nil,
max_payload_bytes: 32,
max_sessions_per_owner: 8,
max_total_sessions: 16,
max_idle_seconds: 60,
sweep_interval_seconds: 60}
)
assert {:error, :payload_too_large} =
Sessions.open(server, self(), "sub-neg", %{"delta" => String.duplicate("a", 256)})
end
test "enforces per-owner session limits" do
server =
start_supervised!(
{Sessions,
name: nil,
max_payload_bytes: 1024,
max_sessions_per_owner: 1,
max_total_sessions: 16,
max_idle_seconds: 60,
sweep_interval_seconds: 60}
)
assert {:ok, %{"status" => "open", "cursor" => 0}} =
Sessions.open(server, self(), "sub-1", %{})
assert {:error, :owner_session_limit_reached} =
Sessions.open(server, self(), "sub-2", %{})
end
end

View File

@@ -0,0 +1,63 @@
defmodule Parrhesia.Protocol.EventValidatorSignatureTest do
use ExUnit.Case, async: false
alias Parrhesia.Protocol.EventValidator
test "accepts valid Schnorr signatures when verification is enabled" do
previous_features = Application.get_env(:parrhesia, :features, [])
Application.put_env(
:parrhesia,
:features,
Keyword.put(previous_features, :verify_event_signatures, true)
)
on_exit(fn ->
Application.put_env(:parrhesia, :features, previous_features)
end)
event = signed_event()
assert :ok = EventValidator.validate(event)
end
test "rejects invalid Schnorr signatures when verification is enabled" do
previous_features = Application.get_env(:parrhesia, :features, [])
Application.put_env(
:parrhesia,
:features,
Keyword.put(previous_features, :verify_event_signatures, true)
)
on_exit(fn ->
Application.put_env(:parrhesia, :features, previous_features)
end)
event =
signed_event()
|> Map.put("sig", String.duplicate("0", 128))
assert {:error, :invalid_signature} = EventValidator.validate(event)
end
defp signed_event do
{seckey, pubkey} = Secp256k1.keypair(:xonly)
event = %{
"pubkey" => Base.encode16(pubkey, case: :lower),
"created_at" => System.system_time(:second),
"kind" => 1,
"tags" => [["e", String.duplicate("a", 64), "wss://relay.example", "reply"]],
"content" => "signed"
}
id = EventValidator.compute_id(event)
{:ok, id_bin} = Base.decode16(id, case: :lower)
sig = Secp256k1.schnorr_sign(id_bin, seckey)
event
|> Map.put("id", id)
|> Map.put("sig", Base.encode16(sig, case: :lower))
end
end

View File

@@ -11,6 +11,24 @@ defmodule Parrhesia.Storage.Adapters.Postgres.EventsLifecycleTest do
:ok
end
test "event tags round-trip without truncation" do
tagged_event =
event(%{
"kind" => 1,
"tags" => [
["e", String.duplicate("a", 64), "wss://relay.example", "reply"],
["-"],
["p", String.duplicate("b", 64), "wss://hint.example"]
],
"content" => "tag-roundtrip"
})
assert {:ok, _event} = Events.put_event(%{}, tagged_event)
assert {:ok, persisted_tagged_event} = Events.get_event(%{}, tagged_event["id"])
assert persisted_tagged_event["tags"] == tagged_event["tags"]
end
test "delete_by_request tombstones owned target events" do
target = event(%{"kind" => 1, "content" => "target"})
assert {:ok, _event} = Events.put_event(%{}, target)
@@ -26,6 +44,31 @@ defmodule Parrhesia.Storage.Adapters.Postgres.EventsLifecycleTest do
assert {:ok, nil} = Events.get_event(%{}, target["id"])
end
test "delete_by_request tombstones addressable targets referenced via a tags" do
author = String.duplicate("4", 64)
target =
event(%{
"pubkey" => author,
"kind" => 30_023,
"tags" => [["d", "topic"]],
"content" => "addressable-target"
})
assert {:ok, _event} = Events.put_event(%{}, target)
delete_request =
event(%{
"pubkey" => author,
"kind" => 5,
"tags" => [["a", "30023:#{author}:topic"]],
"content" => "delete-addressable"
})
assert {:ok, 1} = Events.delete_by_request(%{}, delete_request)
assert {:ok, nil} = Events.get_event(%{}, target["id"])
end
test "vanish hard-deletes events authored by pubkey" do
author = String.duplicate("3", 64)

View File

@@ -248,6 +248,26 @@ defmodule Parrhesia.Storage.Adapters.Postgres.EventsQueryCountTest do
assert {:ok, 0} = Events.count(%{}, filters, requester_pubkeys: [])
end
test "search treats % and _ as literals" do
matching =
persist_event(%{
"kind" => 1,
"content" => "literal 100%_match value"
})
_other =
persist_event(%{
"kind" => 1,
"content" => "literal 100Xmatch value"
})
filters = [%{"kinds" => [1], "search" => "100%_match"}]
assert {:ok, [result]} = Events.query(%{}, filters, [])
assert result["id"] == matching["id"]
assert {:ok, 1} = Events.count(%{}, filters, [])
end
test "query/3 combines search and media metadata tag filters" do
media_hash = String.duplicate("a", 64)

View File

@@ -1,5 +1,5 @@
defmodule Parrhesia.Storage.Adapters.Postgres.EventsTest do
use ExUnit.Case, async: true
use ExUnit.Case, async: false
alias Parrhesia.Protocol.EventValidator
alias Parrhesia.Storage.Adapters.Postgres.Events

View File

@@ -64,7 +64,8 @@ defmodule Parrhesia.Storage.Adapters.Postgres.QueryPlanRegressionTest do
)
plan = Enum.map_join(explain.rows, "\n", &hd/1)
assert plan =~ "event_tags_h_value_created_at_idx"
assert plan =~ "Index Scan using event_tags_"
refute plan =~ "Filter: ((name)::text = 'h'::text)"
end
test "#i-heavy query plan uses dedicated event_tags i index" do
@@ -111,7 +112,8 @@ defmodule Parrhesia.Storage.Adapters.Postgres.QueryPlanRegressionTest do
)
plan = Enum.map_join(explain.rows, "\n", &hd/1)
assert plan =~ "event_tags_i_value_created_at_idx"
assert plan =~ "Index Scan using event_tags_"
refute plan =~ "Filter: ((name)::text = 'i'::text)"
end
defp persist_event(overrides) do

View File

@@ -1,22 +0,0 @@
defmodule Parrhesia.Storage.ArchiverTest do
use ExUnit.Case, async: false
alias Ecto.Adapters.SQL.Sandbox
alias Parrhesia.Repo
alias Parrhesia.Storage.Archiver
setup do
:ok = Sandbox.checkout(Repo)
:ok
end
test "list_partitions returns partition tables" do
partitions = Archiver.list_partitions()
assert is_list(partitions)
end
test "archive_sql builds insert-select statement" do
assert Archiver.archive_sql("events_2026_03", "events_archive") ==
"INSERT INTO events_archive SELECT * FROM events_2026_03;"
end
end

View File

@@ -0,0 +1,66 @@
defmodule Parrhesia.Storage.PartitionsTest do
use ExUnit.Case, async: false
alias Ecto.Adapters.SQL.Sandbox
alias Parrhesia.Repo
alias Parrhesia.Storage.Partitions
setup do
:ok = Sandbox.checkout(Repo)
:ok
end
test "list_partitions returns partition tables" do
partitions = Partitions.list_partitions()
assert is_list(partitions)
end
test "ensure_monthly_partitions creates aligned monthly partitions for events and event_tags" do
assert :ok =
Partitions.ensure_monthly_partitions(reference_date: ~D[2026-06-14], months_ahead: 1)
monthly_partition_names =
Partitions.list_monthly_partitions()
|> Enum.map(& &1.name)
assert "events_2026_06" in monthly_partition_names
assert "events_2026_07" in monthly_partition_names
assert table_exists?("event_tags_2026_06")
assert table_exists?("event_tags_2026_07")
end
test "drop_partition returns an error for protected partitions" do
assert {:error, :protected_partition} = Partitions.drop_partition("events_default")
assert {:error, :protected_partition} = Partitions.drop_partition("events")
assert {:error, :protected_partition} = Partitions.drop_partition("event_tags_default")
assert {:error, :protected_partition} = Partitions.drop_partition("event_tags")
end
test "drop_partition removes aligned event_tags partition for monthly event partition" do
assert :ok =
Partitions.ensure_monthly_partitions(reference_date: ~D[2026-08-14], months_ahead: 0)
assert table_exists?("events_2026_08")
assert table_exists?("event_tags_2026_08")
assert :ok = Partitions.drop_partition("events_2026_08")
refute table_exists?("events_2026_08")
refute table_exists?("event_tags_2026_08")
end
test "database_size_bytes returns the current database size" do
assert {:ok, size} = Partitions.database_size_bytes()
assert is_integer(size)
assert size >= 0
end
defp table_exists?(table_name) when is_binary(table_name) do
case Repo.query("SELECT to_regclass($1)", ["public." <> table_name]) do
{:ok, %{rows: [[nil]]}} -> false
{:ok, %{rows: [[_relation_name]]}} -> true
_other -> false
end
end
end

View File

@@ -0,0 +1,124 @@
defmodule Parrhesia.Tasks.PartitionRetentionWorkerTest do
use ExUnit.Case, async: false
alias Parrhesia.Tasks.PartitionRetentionWorker
alias Parrhesia.TestSupport.PartitionRetentionStubPartitions
@bytes_per_gib 1_073_741_824
test "drops oldest partition when max_months_to_keep is exceeded" do
start_supervised!(
{PartitionRetentionStubPartitions,
partitions: [
partition(2026, 1),
partition(2026, 2),
partition(2026, 3),
partition(2026, 4),
partition(2026, 5)
],
db_size_bytes: 2 * @bytes_per_gib,
test_pid: self()}
)
worker =
start_supervised!(
{PartitionRetentionWorker,
name: nil,
partition_ops: PartitionRetentionStubPartitions,
interval_ms: :timer.hours(24),
months_ahead: 0,
max_db_bytes: :infinity,
max_months_to_keep: 3,
max_partitions_to_drop_per_run: 1,
today_fun: fn -> ~D[2026-06-15] end}
)
assert is_pid(worker)
assert_receive {:ensure_monthly_partitions, [months_ahead: 0]}
assert_receive {:drop_partition, "events_2026_01"}
refute_receive {:drop_partition, _partition_name}, 20
refute_receive :database_size_bytes, 20
end
test "drops oldest completed partition when size exceeds max_db_bytes" do
start_supervised!(
{PartitionRetentionStubPartitions,
partitions: [partition(2026, 3), partition(2026, 4), partition(2026, 5)],
db_size_bytes: 12 * @bytes_per_gib,
test_pid: self()}
)
worker =
start_supervised!(
{PartitionRetentionWorker,
name: nil,
partition_ops: PartitionRetentionStubPartitions,
interval_ms: :timer.hours(24),
months_ahead: 0,
max_db_bytes: 10,
max_months_to_keep: :infinity,
max_partitions_to_drop_per_run: 1,
today_fun: fn -> ~D[2026-06-15] end}
)
assert is_pid(worker)
assert_receive {:ensure_monthly_partitions, [months_ahead: 0]}
assert_receive :database_size_bytes
assert_receive {:drop_partition, "events_2026_03"}
end
test "does not drop partitions when both limits are infinity" do
start_supervised!(
{PartitionRetentionStubPartitions,
partitions: [partition(2026, 1), partition(2026, 2), partition(2026, 3)],
db_size_bytes: 50 * @bytes_per_gib,
test_pid: self()}
)
worker =
start_supervised!(
{PartitionRetentionWorker,
name: nil,
partition_ops: PartitionRetentionStubPartitions,
interval_ms: :timer.hours(24),
months_ahead: 0,
max_db_bytes: :infinity,
max_months_to_keep: :infinity,
max_partitions_to_drop_per_run: 1,
today_fun: fn -> ~D[2026-06-15] end}
)
assert is_pid(worker)
assert_receive {:ensure_monthly_partitions, [months_ahead: 0]}
refute_receive :database_size_bytes, 20
refute_receive {:drop_partition, _partition_name}, 20
end
defp partition(year, month) when is_integer(year) and is_integer(month) do
month_name = month |> Integer.to_string() |> String.pad_leading(2, "0")
month_start = Date.new!(year, month, 1)
next_month_start = shift_month(month_start, 1)
%{
name: "events_#{year}_#{month_name}",
year: year,
month: month,
month_start_unix: date_to_unix(month_start),
month_end_unix: date_to_unix(next_month_start)
}
end
defp shift_month(%Date{} = date, month_delta) when is_integer(month_delta) do
month_index = date.year * 12 + date.month - 1 + month_delta
shifted_year = div(month_index, 12)
shifted_month = rem(month_index, 12) + 1
Date.new!(shifted_year, shifted_month, 1)
end
defp date_to_unix(%Date{} = date) do
date
|> DateTime.new!(~T[00:00:00], "Etc/UTC")
|> DateTime.to_unix()
end
end

View File

@@ -37,7 +37,7 @@ defmodule Parrhesia.Web.ConformanceTest do
event = valid_event()
assert {:push, {:text, frame}, ^state} =
assert {:push, {:text, frame}, _next_state} =
Connection.handle_in({JSON.encode!(["EVENT", event]), [opcode: :text]}, state)
assert JSON.decode!(frame) == ["OK", event["id"], true, "ok: event stored"]
@@ -54,7 +54,7 @@ defmodule Parrhesia.Web.ConformanceTest do
"content" => "encrypted-welcome-payload"
})
assert {:push, {:text, ok_frame}, ^state} =
assert {:push, {:text, ok_frame}, _next_state} =
Connection.handle_in(
{JSON.encode!(["EVENT", wrapped_welcome]), [opcode: :text]},
state
@@ -64,7 +64,7 @@ defmodule Parrhesia.Web.ConformanceTest do
req_payload = JSON.encode!(["REQ", "sub-welcome", %{"kinds" => [1059], "#p" => [recipient]}])
assert {:push, restricted_frames, ^state} =
assert {:push, restricted_frames, _next_state} =
Connection.handle_in({req_payload, [opcode: :text]}, state)
decoded_restricted =
@@ -106,7 +106,7 @@ defmodule Parrhesia.Web.ConformanceTest do
"content" => Base.encode64("commit-envelope")
})
assert {:push, {:text, commit_ok_frame}, ^state} =
assert {:push, {:text, commit_ok_frame}, _next_state} =
Connection.handle_in(
{JSON.encode!(["EVENT", commit_event]), [opcode: :text]},
state
@@ -124,7 +124,7 @@ defmodule Parrhesia.Web.ConformanceTest do
"content" => "encrypted-welcome-payload"
})
assert {:push, {:text, welcome_ok_frame}, ^state} =
assert {:push, {:text, welcome_ok_frame}, _next_state} =
Connection.handle_in(
{JSON.encode!(["EVENT", wrapped_welcome]), [opcode: :text]},
state
@@ -187,7 +187,7 @@ defmodule Parrhesia.Web.ConformanceTest do
"content" => "encrypted-push"
})
assert {:push, {:text, relay_ok_frame}, ^state} =
assert {:push, {:text, relay_ok_frame}, _next_state} =
Connection.handle_in(
{JSON.encode!(["EVENT", relay_list_event]), [opcode: :text]},
state
@@ -200,7 +200,7 @@ defmodule Parrhesia.Web.ConformanceTest do
"ok: event stored"
]
assert {:push, {:text, trigger_ok_frame}, ^state} =
assert {:push, {:text, trigger_ok_frame}, _next_state} =
Connection.handle_in(
{JSON.encode!(["EVENT", push_trigger]), [opcode: :text]},
state
@@ -232,11 +232,13 @@ defmodule Parrhesia.Web.ConformanceTest do
end
defp valid_auth_event(challenge, pubkey) do
relay_url = Parrhesia.Config.get([:relay_url])
event = %{
"pubkey" => pubkey,
"created_at" => System.system_time(:second),
"kind" => 22_242,
"tags" => [["challenge", challenge]],
"tags" => [["challenge", challenge], ["relay", relay_url]],
"content" => "",
"sig" => String.duplicate("8", 128)
}

View File

@@ -34,7 +34,7 @@ defmodule Parrhesia.Web.ConnectionTest do
payload = JSON.encode!(["COUNT", "sub-count", %{"kinds" => [1]}])
assert {:push, {:text, response}, ^state} =
assert {:push, {:text, response}, _next_state} =
Connection.handle_in({payload, [opcode: :text]}, state)
assert ["COUNT", "sub-count", payload] = JSON.decode!(response)
@@ -62,7 +62,7 @@ defmodule Parrhesia.Web.ConnectionTest do
auth_event = valid_auth_event("wrong-challenge")
payload = JSON.encode!(["AUTH", auth_event])
assert {:push, frames, ^state} = Connection.handle_in({payload, [opcode: :text]}, state)
assert {:push, frames, _next_state} = Connection.handle_in({payload, [opcode: :text]}, state)
decoded = Enum.map(frames, fn {:text, frame} -> JSON.decode!(frame) end)
@@ -73,6 +73,38 @@ defmodule Parrhesia.Web.ConnectionTest do
end)
end
test "AUTH rejects relay tag mismatch" do
state = connection_state(relay_url: "ws://localhost:4413/relay")
auth_event = valid_auth_event(state.auth_challenge, relay_url: "ws://attacker.example/relay")
payload = JSON.encode!(["AUTH", auth_event])
assert {:push, frames, _next_state} = Connection.handle_in({payload, [opcode: :text]}, state)
decoded = Enum.map(frames, fn {:text, frame} -> JSON.decode!(frame) end)
assert ["OK", _, false, "invalid: AUTH relay tag mismatch"] =
Enum.find(decoded, fn frame -> List.first(frame) == "OK" end)
end
test "AUTH rejects stale events" do
state = connection_state(auth_max_age_seconds: 600)
stale_auth_event =
valid_auth_event(state.auth_challenge,
created_at: System.system_time(:second) - 601
)
payload = JSON.encode!(["AUTH", stale_auth_event])
assert {:push, frames, _next_state} = Connection.handle_in({payload, [opcode: :text]}, state)
decoded = Enum.map(frames, fn {:text, frame} -> JSON.decode!(frame) end)
assert ["OK", _, false, "invalid: AUTH event is too old"] =
Enum.find(decoded, fn frame -> List.first(frame) == "OK" end)
end
test "protected event is rejected unless authenticated" do
state = connection_state()
@@ -83,7 +115,7 @@ defmodule Parrhesia.Web.ConnectionTest do
payload = JSON.encode!(["EVENT", event])
assert {:push, frames, ^state} = Connection.handle_in({payload, [opcode: :text]}, state)
assert {:push, frames, _next_state} = Connection.handle_in({payload, [opcode: :text]}, state)
decoded = Enum.map(frames, fn {:text, frame} -> JSON.decode!(frame) end)
@@ -98,7 +130,8 @@ defmodule Parrhesia.Web.ConnectionTest do
req_payload = JSON.encode!(["REQ", "sub-445", %{"kinds" => [445]}])
assert {:push, frames, ^state} = Connection.handle_in({req_payload, [opcode: :text]}, state)
assert {:push, frames, _next_state} =
Connection.handle_in({req_payload, [opcode: :text]}, state)
decoded = Enum.map(frames, fn {:text, frame} -> JSON.decode!(frame) end)
@@ -112,19 +145,99 @@ defmodule Parrhesia.Web.ConnectionTest do
event = valid_event()
payload = JSON.encode!(["EVENT", event])
assert {:push, {:text, response}, ^state} =
assert {:push, {:text, response}, _next_state} =
Connection.handle_in({payload, [opcode: :text]}, state)
assert JSON.decode!(response) == ["OK", event["id"], true, "ok: event stored"]
end
test "ephemeral events are accepted without persistence" do
previous_policies = Application.get_env(:parrhesia, :policies, [])
Application.put_env(
:parrhesia,
:policies,
Keyword.put(previous_policies, :accept_ephemeral_events, true)
)
on_exit(fn ->
Application.put_env(:parrhesia, :policies, previous_policies)
end)
state = connection_state()
event = valid_event() |> Map.put("kind", 20_001) |> recalculate_event_id()
payload = JSON.encode!(["EVENT", event])
assert {:push, {:text, response}, _next_state} =
Connection.handle_in({payload, [opcode: :text]}, state)
assert JSON.decode!(response) == ["OK", event["id"], true, "ok: ephemeral event accepted"]
assert {:ok, nil} = Parrhesia.Storage.events().get_event(%{}, event["id"])
end
test "EVENT ingest enforces per-connection rate limits" do
state = connection_state(max_event_ingest_per_window: 1, event_ingest_window_seconds: 60)
first_event = valid_event(%{"content" => "first"})
second_event = valid_event(%{"content" => "second"})
assert {:push, {:text, first_response}, next_state} =
Connection.handle_in({JSON.encode!(["EVENT", first_event]), [opcode: :text]}, state)
assert JSON.decode!(first_response) == ["OK", first_event["id"], true, "ok: event stored"]
assert {:push, {:text, second_response}, ^next_state} =
Connection.handle_in(
{JSON.encode!(["EVENT", second_event]), [opcode: :text]},
next_state
)
assert JSON.decode!(second_response) == [
"OK",
second_event["id"],
false,
"rate-limited: too many EVENT messages"
]
end
test "EVENT ingest enforces max event bytes" do
state = connection_state(max_event_bytes: 128)
large_event =
valid_event(%{"content" => String.duplicate("x", 256)})
|> recalculate_event_id()
assert {:push, {:text, response}, _next_state} =
Connection.handle_in({JSON.encode!(["EVENT", large_event]), [opcode: :text]}, state)
assert JSON.decode!(response) == [
"OK",
large_event["id"],
false,
"invalid: event exceeds max event size"
]
end
test "text frame size is rejected before JSON decoding" do
state = connection_state(max_frame_bytes: 16)
assert {:push, {:text, response}, _next_state} =
Connection.handle_in({String.duplicate("x", 17), [opcode: :text]}, state)
assert JSON.decode!(response) == [
"NOTICE",
"invalid: websocket frame exceeds max frame size"
]
end
test "invalid EVENT replies with OK false invalid prefix" do
state = connection_state()
event = valid_event() |> Map.put("sig", "nope")
payload = JSON.encode!(["EVENT", event])
assert {:push, {:text, response}, ^state} =
assert {:push, {:text, response}, _next_state} =
Connection.handle_in({payload, [opcode: :text]}, state)
assert JSON.decode!(response) == [
@@ -147,7 +260,7 @@ defmodule Parrhesia.Web.ConnectionTest do
payload = JSON.encode!(["EVENT", event])
assert {:push, {:text, response}, ^state} =
assert {:push, {:text, response}, _next_state} =
Connection.handle_in({payload, [opcode: :text]}, state)
assert JSON.decode!(response) == [
@@ -170,7 +283,7 @@ defmodule Parrhesia.Web.ConnectionTest do
payload = JSON.encode!(["EVENT", event])
assert {:push, {:text, response}, ^state} =
assert {:push, {:text, response}, _next_state} =
Connection.handle_in({payload, [opcode: :text]}, state)
assert JSON.decode!(response) == [
@@ -204,7 +317,7 @@ defmodule Parrhesia.Web.ConnectionTest do
payload = JSON.encode!(["EVENT", event])
assert {:push, {:text, response}, ^state} =
assert {:push, {:text, response}, _next_state} =
Connection.handle_in({payload, [opcode: :text]}, state)
assert JSON.decode!(response) == [
@@ -255,7 +368,7 @@ defmodule Parrhesia.Web.ConnectionTest do
payload = JSON.encode!(["EVENT", event])
assert {:push, {:text, response}, ^state} =
assert {:push, {:text, response}, _next_state} =
Connection.handle_in({payload, [opcode: :text]}, state)
assert JSON.decode!(response) == [
@@ -306,12 +419,12 @@ defmodule Parrhesia.Web.ConnectionTest do
payload = JSON.encode!(["EVENT", event])
assert {:push, {:text, first_response}, ^state} =
assert {:push, {:text, first_response}, _next_state} =
Connection.handle_in({payload, [opcode: :text]}, state)
assert JSON.decode!(first_response) == ["OK", event["id"], true, "ok: event stored"]
assert {:push, {:text, second_response}, ^state} =
assert {:push, {:text, second_response}, _next_state} =
Connection.handle_in({payload, [opcode: :text]}, state)
assert JSON.decode!(second_response) == [
@@ -327,7 +440,7 @@ defmodule Parrhesia.Web.ConnectionTest do
open_payload = JSON.encode!(["NEG-OPEN", "neg-1", %{"cursor" => 0}])
assert {:push, {:text, open_response}, ^state} =
assert {:push, {:text, open_response}, _next_state} =
Connection.handle_in({open_payload, [opcode: :text]}, state)
assert ["NEG-MSG", "neg-1", %{"status" => "open", "cursor" => 0}] =
@@ -335,7 +448,7 @@ defmodule Parrhesia.Web.ConnectionTest do
close_payload = JSON.encode!(["NEG-CLOSE", "neg-1"])
assert {:push, {:text, close_response}, ^state} =
assert {:push, {:text, close_response}, _next_state} =
Connection.handle_in({close_payload, [opcode: :text]}, state)
assert JSON.decode!(close_response) == ["NEG-MSG", "neg-1", %{"status" => "closed"}]
@@ -470,14 +583,15 @@ defmodule Parrhesia.Web.ConnectionTest do
}
end
defp valid_auth_event(challenge) do
now = System.system_time(:second)
defp valid_auth_event(challenge, opts \\ []) do
now = Keyword.get(opts, :created_at, System.system_time(:second))
relay_url = Keyword.get(opts, :relay_url, Parrhesia.Config.get([:relay_url]))
base = %{
"pubkey" => String.duplicate("9", 64),
"created_at" => now,
"kind" => 22_242,
"tags" => [["challenge", challenge]],
"tags" => [["challenge", challenge], ["relay", relay_url]],
"content" => "",
"sig" => String.duplicate("8", 128)
}
@@ -510,7 +624,7 @@ defmodule Parrhesia.Web.ConnectionTest do
end
end
defp valid_event do
defp valid_event(overrides \\ %{}) do
base_event = %{
"pubkey" => String.duplicate("1", 64),
"created_at" => System.system_time(:second),
@@ -520,6 +634,12 @@ defmodule Parrhesia.Web.ConnectionTest do
"sig" => String.duplicate("3", 128)
}
Map.put(base_event, "id", EventValidator.compute_id(base_event))
base_event
|> Map.merge(overrides)
|> recalculate_event_id()
end
defp recalculate_event_id(event) do
Map.put(event, "id", EventValidator.compute_id(event))
end
end

View File

@@ -43,13 +43,68 @@ defmodule Parrhesia.Web.RouterTest do
assert 11 in body["supported_nips"]
end
test "GET /metrics returns prometheus payload" do
test "GET /metrics returns prometheus payload for private-network clients" do
conn = conn(:get, "/metrics") |> Router.call([])
assert conn.status == 200
assert get_resp_header(conn, "content-type") == ["text/plain; charset=utf-8"]
end
test "GET /metrics denies public-network clients by default" do
conn = conn(:get, "/metrics")
conn = %{conn | remote_ip: {8, 8, 8, 8}}
conn = Router.call(conn, [])
assert conn.status == 403
assert conn.resp_body == "forbidden"
end
test "GET /metrics can be disabled on the main endpoint" do
previous_metrics = Application.get_env(:parrhesia, :metrics, [])
Application.put_env(
:parrhesia,
:metrics,
Keyword.put(previous_metrics, :enabled_on_main_endpoint, false)
)
on_exit(fn ->
Application.put_env(:parrhesia, :metrics, previous_metrics)
end)
conn = conn(:get, "/metrics") |> Router.call([])
assert conn.status == 404
assert conn.resp_body == "not found"
end
test "GET /metrics accepts bearer auth when configured" do
previous_metrics = Application.get_env(:parrhesia, :metrics, [])
Application.put_env(
:parrhesia,
:metrics,
previous_metrics
|> Keyword.put(:private_networks_only, false)
|> Keyword.put(:auth_token, "secret-token")
)
on_exit(fn ->
Application.put_env(:parrhesia, :metrics, previous_metrics)
end)
denied_conn = conn(:get, "/metrics") |> Router.call([])
assert denied_conn.status == 403
allowed_conn =
conn(:get, "/metrics")
|> put_req_header("authorization", "Bearer secret-token")
|> Router.call([])
assert allowed_conn.status == 200
end
test "POST /management requires authorization" do
conn =
conn(:post, "/management", JSON.encode!(%{"method" => "ping", "params" => %{}}))