Compare commits
39 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| e557eba437 | |||
| a652bf3448 | |||
| f518a25cab | |||
| 02f2584757 | |||
| e8fd6c7328 | |||
| 1f608ee2bd | |||
| 5f4f086d28 | |||
| 9be3b6ca52 | |||
| 769177a63e | |||
| 987415d80c | |||
| d119d21d99 | |||
| 5d4d181d00 | |||
| fd17026c32 | |||
| 14fb0f7ffb | |||
| 186d0f98ee | |||
| 39dbc069a7 | |||
| 4c2c93deb3 | |||
| b628770517 | |||
| 705971cbc4 | |||
| b86b5db78c | |||
| 5577445e80 | |||
| 1a4572013d | |||
| 5c2fadc28e | |||
| 7faf8c84c8 | |||
| 889d630c12 | |||
| 19664ac56c | |||
| 708e26e4f4 | |||
| 8c8d5a8abb | |||
| 0fbd7008a1 | |||
| bfdb06b203 | |||
| 36365710a8 | |||
| e12085af2f | |||
| 18e429e05a | |||
| c7a9f152f9 | |||
| 238b44ff03 | |||
| 680a73ee33 | |||
| 63d3e7d55f | |||
| 54a54c026b | |||
| d348eab69e |
20
.env.example
Normal file
20
.env.example
Normal file
@@ -0,0 +1,20 @@
|
||||
PARRHESIA_IMAGE=parrhesia:latest
|
||||
PARRHESIA_HOST_PORT=4000
|
||||
|
||||
POSTGRES_DB=parrhesia
|
||||
POSTGRES_USER=parrhesia
|
||||
POSTGRES_PASSWORD=parrhesia
|
||||
|
||||
DATABASE_URL=ecto://parrhesia:parrhesia@db:5432/parrhesia
|
||||
POOL_SIZE=20
|
||||
|
||||
# Optional runtime overrides:
|
||||
# PARRHESIA_RELAY_URL=ws://localhost:4000/relay
|
||||
# PARRHESIA_POLICIES_AUTH_REQUIRED_FOR_WRITES=false
|
||||
# PARRHESIA_POLICIES_AUTH_REQUIRED_FOR_READS=false
|
||||
# PARRHESIA_POLICIES_MIN_POW_DIFFICULTY=0
|
||||
# PARRHESIA_FEATURES_VERIFY_EVENT_SIGNATURES=true
|
||||
# PARRHESIA_METRICS_ENABLED_ON_MAIN_ENDPOINT=true
|
||||
# PARRHESIA_METRICS_PRIVATE_NETWORKS_ONLY=true
|
||||
# PARRHESIA_METRICS_AUTH_TOKEN=
|
||||
# PARRHESIA_EXTRA_CONFIG=/config/parrhesia.runtime.exs
|
||||
121
.github/workflows/ci.yaml
vendored
Normal file
121
.github/workflows/ci.yaml
vendored
Normal file
@@ -0,0 +1,121 @@
|
||||
name: CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: ["**"]
|
||||
pull_request:
|
||||
branches: ["**"]
|
||||
|
||||
env:
|
||||
MIX_ENV: test
|
||||
MIX_OS_DEPS_COMPILE_PARTITION_COUNT: 8
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
test:
|
||||
name: ${{ matrix.name }}
|
||||
runs-on: ubuntu-24.04
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- name: Test (OTP 27.2 / Elixir 1.18.2)
|
||||
otp: "27.2"
|
||||
elixir: "1.18.2"
|
||||
main: false
|
||||
- name: Test (OTP 28.4 / Elixir 1.19.4 + Marmot E2E)
|
||||
otp: "28.4"
|
||||
elixir: "1.19.4"
|
||||
main: true
|
||||
|
||||
services:
|
||||
postgres:
|
||||
image: postgres:16-alpine
|
||||
env:
|
||||
POSTGRES_USER: postgres
|
||||
POSTGRES_PASSWORD: postgres
|
||||
POSTGRES_DB: app_test
|
||||
ports:
|
||||
- 5432:5432
|
||||
options: >-
|
||||
--health-cmd "pg_isready -U postgres"
|
||||
--health-interval 10s
|
||||
--health-timeout 5s
|
||||
--health-retries 5
|
||||
|
||||
env:
|
||||
PGHOST: localhost
|
||||
PGPORT: 5432
|
||||
PGUSER: postgres
|
||||
PGPASSWORD: postgres
|
||||
PGDATABASE: app_test
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: recursive
|
||||
|
||||
- name: Set up Elixir + OTP
|
||||
uses: erlef/setup-beam@v1
|
||||
with:
|
||||
otp-version: ${{ matrix.otp }}
|
||||
elixir-version: ${{ matrix.elixir }}
|
||||
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 24
|
||||
|
||||
# Cache deps/ directory — keyed on mix.lock
|
||||
- name: Cache Mix deps
|
||||
uses: actions/cache@v4
|
||||
id: deps-cache
|
||||
with:
|
||||
path: deps
|
||||
key: ${{ runner.os }}-mix-deps-${{ hashFiles('mix.lock') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-mix-deps-
|
||||
|
||||
# Cache _build/ — keyed on mix.lock + OTP/Elixir versions
|
||||
- name: Cache _build
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: _build
|
||||
key: ${{ runner.os }}-mix-build-${{ matrix.otp }}-${{ matrix.elixir }}-${{ hashFiles('mix.lock') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-mix-build-${{ matrix.otp }}-${{ matrix.elixir }}-
|
||||
|
||||
- name: Install Mix dependencies
|
||||
if: steps.deps-cache.outputs.cache-hit != 'true'
|
||||
run: mix deps.get
|
||||
|
||||
- name: Compile (warnings as errors)
|
||||
if: ${{ matrix.main }}
|
||||
run: mix compile --warnings-as-errors
|
||||
|
||||
- name: Check formatting
|
||||
if: ${{ matrix.main }}
|
||||
run: mix format --check-formatted
|
||||
|
||||
- name: Credo
|
||||
if: ${{ matrix.main }}
|
||||
run: mix credo --strict --all
|
||||
|
||||
- name: Check for unused locked deps
|
||||
if: ${{ matrix.main }}
|
||||
run: |
|
||||
mix deps.unlock --unused
|
||||
git diff --exit-code -- mix.lock
|
||||
|
||||
- name: Run tests
|
||||
run: mix test --color
|
||||
|
||||
- name: Run Node Sync E2E tests
|
||||
if: ${{ matrix.main }}
|
||||
run: mix test.node_sync_e2e
|
||||
|
||||
- name: Run Marmot E2E tests
|
||||
run: mix test.marmot_e2e
|
||||
185
.github/workflows/release.yaml
vendored
Normal file
185
.github/workflows/release.yaml
vendored
Normal file
@@ -0,0 +1,185 @@
|
||||
name: Release
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- "v*.*.*"
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
push:
|
||||
description: "Push image to GHCR?"
|
||||
required: false
|
||||
default: "true"
|
||||
type: choice
|
||||
options: ["true", "false"]
|
||||
|
||||
env:
|
||||
REGISTRY: ghcr.io
|
||||
IMAGE_NAME: ${{ github.repository }}
|
||||
FLAKE_OUTPUT: packages.x86_64-linux.dockerImage
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
id-token: write
|
||||
|
||||
jobs:
|
||||
test:
|
||||
name: Release Gate
|
||||
runs-on: ubuntu-24.04
|
||||
|
||||
services:
|
||||
postgres:
|
||||
image: postgres:16-alpine
|
||||
env:
|
||||
POSTGRES_USER: postgres
|
||||
POSTGRES_PASSWORD: postgres
|
||||
POSTGRES_DB: app_test
|
||||
ports:
|
||||
- 5432:5432
|
||||
options: >-
|
||||
--health-cmd "pg_isready -U postgres"
|
||||
--health-interval 10s
|
||||
--health-timeout 5s
|
||||
--health-retries 5
|
||||
|
||||
env:
|
||||
MIX_ENV: test
|
||||
PGHOST: localhost
|
||||
PGPORT: 5432
|
||||
PGUSER: postgres
|
||||
PGPASSWORD: postgres
|
||||
PGDATABASE: app_test
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: recursive
|
||||
|
||||
- name: Set up Elixir + OTP
|
||||
uses: erlef/setup-beam@v1
|
||||
with:
|
||||
otp-version: "28.4"
|
||||
elixir-version: "1.19.4"
|
||||
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 24
|
||||
|
||||
- name: Cache Mix deps
|
||||
uses: actions/cache@v4
|
||||
id: deps-cache
|
||||
with:
|
||||
path: deps
|
||||
key: ${{ runner.os }}-mix-deps-${{ hashFiles('mix.lock') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-mix-deps-
|
||||
|
||||
- name: Cache _build
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: _build
|
||||
key: ${{ runner.os }}-mix-build-28.4-1.19.4-${{ hashFiles('mix.lock') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-mix-build-28.4-1.19.4-
|
||||
|
||||
- name: Install Mix dependencies
|
||||
if: steps.deps-cache.outputs.cache-hit != 'true'
|
||||
run: mix deps.get
|
||||
|
||||
- name: Check tag matches Mix version
|
||||
if: ${{ startsWith(github.ref, 'refs/tags/v') }}
|
||||
run: |
|
||||
TAG_VERSION="${GITHUB_REF_NAME#v}"
|
||||
MIX_VERSION="$(mix run --no-start -e 'IO.puts(Mix.Project.config()[:version])' | tail -n 1)"
|
||||
|
||||
if [ "$TAG_VERSION" != "$MIX_VERSION" ]; then
|
||||
echo "Tag version $TAG_VERSION does not match mix.exs version $MIX_VERSION"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Compile
|
||||
run: mix compile --warnings-as-errors
|
||||
|
||||
- name: Check formatting
|
||||
run: mix format --check-formatted
|
||||
|
||||
- name: Credo
|
||||
run: mix credo --strict --all
|
||||
|
||||
- name: Run tests
|
||||
run: mix test --color
|
||||
|
||||
- name: Run Node Sync E2E
|
||||
run: mix test.node_sync_e2e
|
||||
|
||||
- name: Run Marmot E2E
|
||||
run: mix test.marmot_e2e
|
||||
|
||||
- name: Check for unused locked deps
|
||||
run: |
|
||||
mix deps.unlock --unused
|
||||
git diff --exit-code -- mix.lock
|
||||
|
||||
build-and-push:
|
||||
name: Build and publish image
|
||||
runs-on: ubuntu-24.04
|
||||
needs: test
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Install Nix
|
||||
uses: DeterminateSystems/nix-installer-action@main
|
||||
with:
|
||||
extra-conf: |
|
||||
experimental-features = nix-command flakes
|
||||
substituters = https://cache.nixos.org
|
||||
trusted-public-keys = cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY=
|
||||
|
||||
- name: Magic Nix Cache
|
||||
uses: DeterminateSystems/magic-nix-cache-action@main
|
||||
|
||||
- name: Extract image metadata
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
||||
tags: |
|
||||
type=semver,pattern={{version}}
|
||||
type=semver,pattern={{major}}.{{minor}}
|
||||
type=raw,value=latest,enable={{is_default_branch}}
|
||||
type=raw,value=latest,enable=${{ startsWith(github.ref, 'refs/tags/v') }}
|
||||
type=sha,prefix=sha-,format=short
|
||||
|
||||
- name: Build Docker image with Nix
|
||||
id: build
|
||||
run: |
|
||||
nix build .#${{ env.FLAKE_OUTPUT }} --out-link ./docker-image-result
|
||||
echo "archive_path=$(readlink -f ./docker-image-result)" >> "$GITHUB_OUTPUT"
|
||||
|
||||
- name: Push image to GHCR
|
||||
env:
|
||||
TAGS: ${{ steps.meta.outputs.tags }}
|
||||
SHOULD_PUSH: ${{ github.event.inputs.push != 'false' }}
|
||||
ARCHIVE_PATH: ${{ steps.build.outputs.archive_path }}
|
||||
run: |
|
||||
if [ "$SHOULD_PUSH" != "true" ]; then
|
||||
echo "Skipping push"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
IMAGE_ARCHIVE="docker-archive:${ARCHIVE_PATH}"
|
||||
|
||||
while IFS= read -r TAG; do
|
||||
if [ -n "$TAG" ]; then
|
||||
echo "Pushing $TAG"
|
||||
nix run nixpkgs#skopeo -- copy \
|
||||
--dest-creds "${{ github.actor }}:${{ secrets.GITHUB_TOKEN }}" \
|
||||
"$IMAGE_ARCHIVE" \
|
||||
"docker://$TAG"
|
||||
fi
|
||||
done <<< "$TAGS"
|
||||
22
BENCHMARK.md
22
BENCHMARK.md
@@ -1,6 +1,6 @@
|
||||
Running 2 comparison run(s)...
|
||||
Versions:
|
||||
parrhesia 0.2.0
|
||||
parrhesia 0.4.0
|
||||
strfry 1.0.4 (nixpkgs)
|
||||
nostr-rs-relay 0.9.0
|
||||
nostr-bench 0.4.0
|
||||
@@ -16,18 +16,18 @@ Versions:
|
||||
=== Bench comparison (averages) ===
|
||||
metric parrhesia strfry nostr-rs-relay strfry/parrhesia nostr-rs/parrhesia
|
||||
-------------------------- --------- -------- -------------- ---------------- ------------------
|
||||
connect avg latency (ms) ↓ 10.00 3.00 2.50 0.30x 0.25x
|
||||
connect max latency (ms) ↓ 18.50 5.00 4.00 0.27x 0.22x
|
||||
echo throughput (TPS) ↑ 76972.00 68204.50 158779.00 0.89x 2.06x
|
||||
echo throughput (MiB/s) ↑ 42.15 38.15 86.95 0.91x 2.06x
|
||||
event throughput (TPS) ↑ 1749.00 3560.00 787.50 2.04x 0.45x
|
||||
event throughput (MiB/s) ↑ 1.15 2.30 0.50 2.00x 0.43x
|
||||
req throughput (TPS) ↑ 2463.00 1808.00 822.00 0.73x 0.33x
|
||||
req throughput (MiB/s) ↑ 13.00 11.70 2.25 0.90x 0.17x
|
||||
connect avg latency (ms) ↓ 10.50 4.00 3.00 0.38x 0.29x
|
||||
connect max latency (ms) ↓ 19.50 7.50 4.00 0.38x 0.21x
|
||||
echo throughput (TPS) ↑ 78520.00 60353.00 164420.50 0.77x 2.09x
|
||||
echo throughput (MiB/s) ↑ 43.00 33.75 90.05 0.78x 2.09x
|
||||
event throughput (TPS) ↑ 1919.50 3520.50 781.00 1.83x 0.41x
|
||||
event throughput (MiB/s) ↑ 1.25 2.25 0.50 1.80x 0.40x
|
||||
req throughput (TPS) ↑ 4608.50 1809.50 875.50 0.39x 0.19x
|
||||
req throughput (MiB/s) ↑ 26.20 11.75 2.40 0.45x 0.09x
|
||||
|
||||
Legend: ↑ higher is better, ↓ lower is better.
|
||||
Ratio columns are server/parrhesia (for ↓ metrics, <1.00x means that server is faster).
|
||||
|
||||
Run details:
|
||||
run 1: parrhesia(echo_tps=78336, event_tps=1796, req_tps=2493, connect_avg_ms=9) | strfry(echo_tps=70189, event_tps=3567, req_tps=1809, connect_avg_ms=3) | nostr-rs-relay(echo_tps=149317, event_tps=786, req_tps=854, connect_avg_ms=2)
|
||||
run 2: parrhesia(echo_tps=75608, event_tps=1702, req_tps=2433, connect_avg_ms=11) | strfry(echo_tps=66220, event_tps=3553, req_tps=1807, connect_avg_ms=3) | nostr-rs-relay(echo_tps=168241, event_tps=789, req_tps=790, connect_avg_ms=3)
|
||||
run 1: parrhesia(echo_tps=78892, event_tps=1955, req_tps=4671, connect_avg_ms=10) | strfry(echo_tps=59132, event_tps=3462, req_tps=1806, connect_avg_ms=4) | nostr-rs-relay(echo_tps=159714, event_tps=785, req_tps=873, connect_avg_ms=3)
|
||||
run 2: parrhesia(echo_tps=78148, event_tps=1884, req_tps=4546, connect_avg_ms=11) | strfry(echo_tps=61574, event_tps=3579, req_tps=1813, connect_avg_ms=4) | nostr-rs-relay(echo_tps=169127, event_tps=777, req_tps=878, connect_avg_ms=3)
|
||||
|
||||
86
PROGRESS.md
86
PROGRESS.md
@@ -1,86 +0,0 @@
|
||||
# PROGRESS (ephemeral)
|
||||
|
||||
Implementation checklist for Parrhesia relay.
|
||||
|
||||
## Phase 0 — foundation
|
||||
|
||||
- [x] Confirm architecture doc with final NIP scope (`docs/ARCH.md`)
|
||||
- [x] Add core deps (websocket/http server, ecto_sql/postgrex, telemetry, test tooling)
|
||||
- [x] Establish application config structure (limits, policies, feature flags)
|
||||
- [x] Wire initial supervision tree skeleton
|
||||
|
||||
## Phase 1 — protocol core (NIP-01)
|
||||
|
||||
- [x] Implement websocket endpoint + per-connection process
|
||||
- [x] Implement message decode/encode for `EVENT`, `REQ`, `CLOSE`
|
||||
- [x] Implement strict event validation (`id`, `sig`, shape, timestamps)
|
||||
- [x] Implement filter evaluation engine (AND/OR semantics)
|
||||
- [x] Implement subscription lifecycle + `EOSE` behavior
|
||||
- [x] Implement canonical `OK`, `NOTICE`, `CLOSED` responses + prefixes
|
||||
|
||||
## Phase 2 — storage boundary + postgres adapter
|
||||
|
||||
- [x] Define `Parrhesia.Storage.*` behaviors (events/moderation/groups/admin)
|
||||
- [x] Implement Postgres adapter modules behind behaviors
|
||||
- [x] Create migrations for events, tags, moderation, membership
|
||||
- [x] Implement replaceable/addressable semantics at storage layer
|
||||
- [x] Add adapter contract test suite
|
||||
|
||||
## Phase 3 — fanout + performance primitives
|
||||
|
||||
- [x] Build ETS-backed subscription index
|
||||
- [x] Implement candidate narrowing by kind/author/tag
|
||||
- [x] Add bounded outbound queues/backpressure per connection
|
||||
- [x] Add telemetry for ingest/query/fanout latency + queue depth
|
||||
|
||||
## Phase 4 — relay metadata and auth
|
||||
|
||||
- [x] NIP-11 endpoint (`application/nostr+json`)
|
||||
- [x] NIP-42 challenge/auth flow
|
||||
- [x] Enforce NIP-70 protected events (default reject, auth override)
|
||||
- [x] Add auth-required/restricted response paths for writes and reqs
|
||||
|
||||
## Phase 5 — lifecycle and moderation features
|
||||
|
||||
- [x] NIP-09 deletion requests
|
||||
- [x] NIP-40 expiration handling + purge worker
|
||||
- [x] NIP-62 vanish requests (hard delete semantics)
|
||||
- [x] NIP-13 PoW gate (configurable minimum)
|
||||
- [x] Moderation tables + policy hooks (ban/allow/event/ip)
|
||||
|
||||
## Phase 6 — query extensions
|
||||
|
||||
- [x] NIP-45 `COUNT` (exact)
|
||||
- [x] Optional HLL response support
|
||||
- [x] NIP-50 search (`search` filter + ranking)
|
||||
- [x] NIP-77 negentropy (`NEG-OPEN/MSG/CLOSE`)
|
||||
|
||||
## Phase 7 — private messaging, groups, and MLS
|
||||
|
||||
- [x] NIP-17/59 recipient-protected giftwrap read path (`kind:1059`)
|
||||
- [x] NIP-29 group event policy + relay metadata events
|
||||
- [x] NIP-43 membership request flow (`28934/28935/28936`, `8000/8001`, `13534`)
|
||||
- [x] Marmot MIP relay surface: `443`, `445`, `10051` handling
|
||||
- [x] MLS retention policy + tests for commit race edge cases
|
||||
|
||||
## Phase 8 — management API + operations
|
||||
|
||||
- [x] NIP-86 HTTP management endpoint
|
||||
- [x] NIP-98 auth validation for management calls
|
||||
- [x] Implement supported management methods + audit logging
|
||||
- [x] Build health/readiness and Prometheus-compatible `/metrics` endpoints
|
||||
|
||||
## Phase 9 — full test + hardening pass
|
||||
|
||||
- [x] Unit + integration + property test coverage for all critical modules
|
||||
- [x] End-to-end websocket conformance scenarios
|
||||
- [x] Load/soak tests with target p95 latency budgets
|
||||
- [x] Fault-injection tests (DB outages, high churn, restart recovery)
|
||||
- [x] Final precommit run and fix all issues
|
||||
|
||||
## Nice-to-have / backlog
|
||||
|
||||
- [x] Multi-node fanout via PG LISTEN/NOTIFY or external bus
|
||||
- [x] Partitioned event storage + archival strategy
|
||||
- [x] Alternate storage adapter prototype (non-Postgres)
|
||||
- [x] Compatibility mode for Marmot protocol transition (not required per user)
|
||||
@@ -1,61 +0,0 @@
|
||||
# PROGRESS_MARMOT (ephemeral)
|
||||
|
||||
Marmot-specific implementation checklist for Parrhesia relay interoperability.
|
||||
|
||||
Spec source: `~/marmot/README.md` + MIP-00..05.
|
||||
|
||||
## M0 — spec lock + interoperability profile
|
||||
|
||||
- [ ] Freeze target profile to MIP-00..03 (mandatory)
|
||||
- [ ] Keep MIP-04 and MIP-05 behind feature flags (optional)
|
||||
- [ ] Document that legacy NIP-EE is superseded and no dedicated transition compatibility mode is planned
|
||||
- [ ] Publish operator-facing compatibility statement in docs
|
||||
|
||||
## M1 — MIP-00 (credentials + keypackages)
|
||||
|
||||
- [x] Enforce kind `443` required tags and encoding (`encoding=base64`)
|
||||
- [x] Validate `mls_protocol_version`, `mls_ciphersuite`, `mls_extensions`, `relays`, and `i` tag shape
|
||||
- [x] Add efficient `#i` query/index path for KeyPackageRef lookup
|
||||
- [x] Keep replaceable behavior for kind `10051` relay-list events
|
||||
- [x] Add conformance tests for valid/invalid KeyPackage envelopes
|
||||
|
||||
## M2 — MIP-01 (group construction data expectations)
|
||||
|
||||
- [x] Enforce relay-side routing prerequisites for Marmot groups (`#h` query path)
|
||||
- [x] Keep deterministic ordering for group-event catch-up (`created_at` + `id` tie-break)
|
||||
- [x] Add guardrails for group metadata traffic volume and filter windows
|
||||
- [x] Add tests for `#h` routing and ordering invariants
|
||||
|
||||
## M3 — MIP-02 (welcome events)
|
||||
|
||||
- [x] Support wrapped Welcome delivery via NIP-59 (`1059`) recipient-gated reads
|
||||
- [x] Validate relay behavior for unsigned inner Welcome semantics (kind `444` envelope expectations)
|
||||
- [x] Ensure durability/ack semantics support Commit-then-Welcome sequencing requirements
|
||||
- [x] Add negative tests for malformed wrapped Welcome payloads
|
||||
|
||||
## M4 — MIP-03 (group events)
|
||||
|
||||
- [x] Enforce kind `445` envelope validation (`#h` tag presence/shape, base64 content shape)
|
||||
- [x] Keep relay MLS-agnostic (no MLS decrypt/inspect in relay hot path)
|
||||
- [x] Add configurable retention policy for kind `445` traffic
|
||||
- [x] Add tests for high-volume fanout behavior and deterministic query results
|
||||
|
||||
## M5 — optional MIP-04 (encrypted media)
|
||||
|
||||
- [x] Accept/store MIP-04 metadata-bearing events as regular Nostr events
|
||||
- [x] Add policy hooks for media metadata limits and abuse controls
|
||||
- [x] Add tests for search/filter interactions with media metadata tags
|
||||
|
||||
## M6 — optional MIP-05 (push notifications)
|
||||
|
||||
- [x] Accept/store notification coordination events required by enabled profile
|
||||
- [x] Add policy/rate-limit controls for push-related event traffic
|
||||
- [x] Add abuse and replay protection tests for notification trigger paths
|
||||
|
||||
## M7 — hardening + operations
|
||||
|
||||
- [x] Add Marmot-focused telemetry breakdowns (ingest/query/fanout, queue pressure)
|
||||
- [x] Add query-plan regression checks for `#h` and `#i` heavy workloads
|
||||
- [x] Add fault-injection scenarios for relay outage/reordering behavior in group flows
|
||||
- [x] Add docs for operator limits tuned for Marmot traffic patterns
|
||||
- [x] Final `mix precommit` before merge
|
||||
390
README.md
390
README.md
@@ -1,12 +1,30 @@
|
||||
# Parrhesia
|
||||
|
||||
<img alt="Parrhesia Logo" src="./docs/logo.svg" width="150" align="right">
|
||||
|
||||
Parrhesia is a Nostr relay server written in Elixir/OTP with PostgreSQL storage.
|
||||
|
||||
**ALPHA CONDITION – BREAKING CHANGES MIGHT HAPPEN!**
|
||||
|
||||
- Advanced Querying: Full-text search (NIP-50) and COUNT queries (NIP-45).
|
||||
- Secure Messaging: First-class support for Marmot MLS-encrypted groups and NIP-17/44/59 gift-wrapped DMs.
|
||||
- Identity & Auth: NIP-42 authentication flows and NIP-86 management API with NIP-98 HTTP auth.
|
||||
- Data Integrity: Negentropy-based synchronization and NIP-62 vanish flows.
|
||||
|
||||
It exposes:
|
||||
- a WebSocket relay endpoint at `/relay`
|
||||
|
||||
- listener-configurable WS/HTTP ingress, with a default `public` listener on port `4413`
|
||||
- a WebSocket relay endpoint at `/relay` on listeners that enable the `nostr` feature
|
||||
- NIP-11 relay info on `GET /relay` with `Accept: application/nostr+json`
|
||||
- operational HTTP endpoints (`/health`, `/ready`, `/metrics`)
|
||||
- a NIP-86-style management API at `POST /management` (NIP-98 auth)
|
||||
- operational HTTP endpoints such as `/health`, `/ready`, and `/metrics` on listeners that enable them
|
||||
- a NIP-86-style management API at `POST /management` on listeners that enable the `admin` feature
|
||||
|
||||
Listeners can run in plain HTTP, HTTPS, mutual TLS, or proxy-terminated TLS modes. The current TLS implementation supports:
|
||||
|
||||
- server TLS on listener sockets
|
||||
- optional client certificate admission with listener-side client pin checks
|
||||
- proxy-asserted client TLS identity on trusted proxy hops
|
||||
- admin-triggered certificate reload by restarting an individual listener from disk
|
||||
|
||||
## Supported NIPs
|
||||
|
||||
@@ -19,6 +37,7 @@ Current `supported_nips` list:
|
||||
- Elixir `~> 1.19`
|
||||
- Erlang/OTP 28
|
||||
- PostgreSQL (18 used in the dev environment; 16+ recommended)
|
||||
- Docker or Podman plus Docker Compose support if you want to run the published container image
|
||||
|
||||
---
|
||||
|
||||
@@ -44,85 +63,232 @@ mix setup
|
||||
mix run --no-halt
|
||||
```
|
||||
|
||||
Server listens on `http://localhost:4000` by default.
|
||||
The default `public` listener binds to `http://localhost:4413`.
|
||||
|
||||
WebSocket clients should connect to:
|
||||
|
||||
```text
|
||||
ws://localhost:4000/relay
|
||||
ws://localhost:4413/relay
|
||||
```
|
||||
|
||||
### Useful endpoints
|
||||
|
||||
- `GET /health` -> `ok`
|
||||
- `GET /ready` -> readiness status
|
||||
- `GET /metrics` -> Prometheus metrics
|
||||
- `GET /metrics` -> Prometheus metrics (private/loopback source IPs by default)
|
||||
- `GET /relay` + `Accept: application/nostr+json` -> NIP-11 document
|
||||
- `POST /management` -> management API (requires NIP-98 auth)
|
||||
|
||||
---
|
||||
|
||||
## Test suites
|
||||
|
||||
Primary test entrypoints:
|
||||
|
||||
- `mix test` for the ExUnit suite
|
||||
- `mix test.marmot_e2e` for the Marmot client end-to-end suite
|
||||
- `mix test.node_sync_e2e` for the two-node relay sync end-to-end suite
|
||||
- `mix test.node_sync_docker_e2e` for the release-image Docker two-node relay sync suite
|
||||
|
||||
The node-sync harnesses are driven by:
|
||||
|
||||
- [`scripts/run_node_sync_e2e.sh`](./scripts/run_node_sync_e2e.sh)
|
||||
- [`scripts/run_node_sync_docker_e2e.sh`](./scripts/run_node_sync_docker_e2e.sh)
|
||||
- [`scripts/node_sync_e2e.exs`](./scripts/node_sync_e2e.exs)
|
||||
- [`compose.node-sync-e2e.yaml`](./compose.node-sync-e2e.yaml)
|
||||
|
||||
`mix test.node_sync_e2e` runs two real Parrhesia nodes against separate PostgreSQL databases, verifies catch-up and live sync, restarts one node, and verifies persisted resume behavior. `mix test.node_sync_docker_e2e` runs the same scenario against the release Docker image.
|
||||
|
||||
GitHub CI currently runs the non-Docker node-sync e2e on the main Linux matrix job. The Docker node-sync e2e remains an explicit/manual check because it depends on release-image build/runtime fidelity and a working Docker host.
|
||||
|
||||
---
|
||||
|
||||
## Production configuration
|
||||
|
||||
### Minimal setup
|
||||
|
||||
Before a Nostr client can publish its first event successfully, make sure these pieces are in place:
|
||||
|
||||
1. PostgreSQL is reachable from Parrhesia.
|
||||
Set `DATABASE_URL` and create/migrate the database with `Parrhesia.Release.migrate()` or `mix ecto.migrate`.
|
||||
|
||||
2. Parrhesia listeners are configured for your deployment.
|
||||
The default config exposes a `public` listener on plain HTTP port `4413`, and a reverse proxy can terminate TLS and forward WebSocket traffic to `/relay`. Additional listeners can be defined in `config/*.exs`.
|
||||
|
||||
3. `:relay_url` matches the public relay URL clients should use.
|
||||
Set `PARRHESIA_RELAY_URL` to the public relay URL exposed by the reverse proxy.
|
||||
In the normal deployment model, this should be your public `wss://.../relay` URL.
|
||||
|
||||
4. The database schema is migrated before starting normal traffic.
|
||||
The app image does not auto-run migrations on boot.
|
||||
|
||||
That is the actual minimum. With default policy settings, writes do not require auth, event signatures are verified, and no extra Nostr-specific bootstrap step is needed before posting ordinary events.
|
||||
|
||||
In `prod`, these environment variables are used:
|
||||
|
||||
- `DATABASE_URL` (**required**), e.g. `ecto://USER:PASS@HOST/parrhesia_prod`
|
||||
- `POOL_SIZE` (optional, default `10`)
|
||||
- `PORT` (optional, default `4000`)
|
||||
- `POOL_SIZE` (optional, default `32`)
|
||||
- `PORT` (optional, default `4413`)
|
||||
- `PARRHESIA_*` runtime overrides for relay config, limits, policies, listener-related metrics helpers, and features
|
||||
- `PARRHESIA_EXTRA_CONFIG` (optional path to an extra runtime config file)
|
||||
|
||||
`config/runtime.exs` reads these values at runtime in production releases.
|
||||
|
||||
### Typical relay config
|
||||
### Runtime env naming
|
||||
|
||||
Add/override in config files (for example in `config/prod.exs` or a `config/runtime.exs`):
|
||||
For runtime overrides, use the `PARRHESIA_...` prefix:
|
||||
|
||||
```elixir
|
||||
config :parrhesia, Parrhesia.Web.Endpoint,
|
||||
ip: {0, 0, 0, 0},
|
||||
port: 4000
|
||||
- `PARRHESIA_RELAY_URL`
|
||||
- `PARRHESIA_TRUSTED_PROXIES`
|
||||
- `PARRHESIA_MODERATION_CACHE_ENABLED`
|
||||
- `PARRHESIA_ENABLE_EXPIRATION_WORKER`
|
||||
- `PARRHESIA_LIMITS_*`
|
||||
- `PARRHESIA_POLICIES_*`
|
||||
- `PARRHESIA_METRICS_*`
|
||||
- `PARRHESIA_RETENTION_*`
|
||||
- `PARRHESIA_FEATURES_*`
|
||||
- `PARRHESIA_METRICS_ENDPOINT_*`
|
||||
|
||||
config :parrhesia,
|
||||
limits: [
|
||||
max_frame_bytes: 1_048_576,
|
||||
max_event_bytes: 262_144,
|
||||
max_filters_per_req: 16,
|
||||
max_filter_limit: 500,
|
||||
max_subscriptions_per_connection: 32,
|
||||
max_event_future_skew_seconds: 900,
|
||||
max_outbound_queue: 256,
|
||||
outbound_drain_batch_size: 64,
|
||||
outbound_overflow_strategy: :close
|
||||
],
|
||||
policies: [
|
||||
auth_required_for_writes: false,
|
||||
auth_required_for_reads: false,
|
||||
min_pow_difficulty: 0,
|
||||
accept_ephemeral_events: true,
|
||||
mls_group_event_ttl_seconds: 300,
|
||||
marmot_require_h_for_group_queries: true,
|
||||
marmot_group_max_h_values_per_filter: 32,
|
||||
marmot_group_max_query_window_seconds: 2_592_000,
|
||||
marmot_media_max_imeta_tags_per_event: 8,
|
||||
marmot_media_max_field_value_bytes: 1024,
|
||||
marmot_media_max_url_bytes: 2048,
|
||||
marmot_media_allowed_mime_prefixes: [],
|
||||
marmot_media_reject_mip04_v1: true,
|
||||
marmot_push_server_pubkeys: [],
|
||||
marmot_push_max_relay_tags: 16,
|
||||
marmot_push_max_payload_bytes: 65_536,
|
||||
marmot_push_max_trigger_age_seconds: 120,
|
||||
marmot_push_require_expiration: true,
|
||||
marmot_push_max_expiration_window_seconds: 120,
|
||||
marmot_push_max_server_recipients: 1
|
||||
],
|
||||
features: [
|
||||
nip_45_count: true,
|
||||
nip_50_search: true,
|
||||
nip_77_negentropy: true,
|
||||
marmot_push_notifications: false
|
||||
]
|
||||
Examples:
|
||||
|
||||
```bash
|
||||
export PARRHESIA_POLICIES_AUTH_REQUIRED_FOR_WRITES=true
|
||||
export PARRHESIA_FEATURES_VERIFY_EVENT_SIGNATURES=true
|
||||
export PARRHESIA_METRICS_ALLOWED_CIDRS="10.0.0.0/8,192.168.0.0/16"
|
||||
export PARRHESIA_LIMITS_OUTBOUND_OVERFLOW_STRATEGY=drop_oldest
|
||||
```
|
||||
|
||||
Listeners themselves are primarily configured under `config :parrhesia, :listeners, ...`. The current runtime env helpers tune the default public listener and the optional dedicated metrics listener.
|
||||
|
||||
For settings that are awkward to express as env vars, mount an extra config file and set `PARRHESIA_EXTRA_CONFIG` to its path inside the container.
|
||||
|
||||
### Config reference
|
||||
|
||||
CSV env vars use comma-separated values. Boolean env vars accept `1/0`, `true/false`, `yes/no`, or `on/off`.
|
||||
|
||||
#### Top-level `:parrhesia`
|
||||
|
||||
| Atom key | ENV | Default | Notes |
|
||||
| --- | --- | --- | --- |
|
||||
| `:relay_url` | `PARRHESIA_RELAY_URL` | `ws://localhost:4413/relay` | Advertised relay URL and auth relay tag target |
|
||||
| `:moderation_cache_enabled` | `PARRHESIA_MODERATION_CACHE_ENABLED` | `true` | Toggle moderation cache |
|
||||
| `:enable_expiration_worker` | `PARRHESIA_ENABLE_EXPIRATION_WORKER` | `true` | Toggle background expiration worker |
|
||||
| `:limits` | `PARRHESIA_LIMITS_*` | see table below | Runtime override group |
|
||||
| `:policies` | `PARRHESIA_POLICIES_*` | see table below | Runtime override group |
|
||||
| `:listeners` | config-file driven | see notes below | Ingress listeners with bind, transport, feature, auth, network, and baseline ACL settings |
|
||||
| `:retention` | `PARRHESIA_RETENTION_*` | see table below | Partition lifecycle and pruning policy |
|
||||
| `:features` | `PARRHESIA_FEATURES_*` | see table below | Runtime override group |
|
||||
| `:storage.events` | `-` | `Parrhesia.Storage.Adapters.Postgres.Events` | Config-file override only |
|
||||
| `:storage.moderation` | `-` | `Parrhesia.Storage.Adapters.Postgres.Moderation` | Config-file override only |
|
||||
| `:storage.groups` | `-` | `Parrhesia.Storage.Adapters.Postgres.Groups` | Config-file override only |
|
||||
| `:storage.admin` | `-` | `Parrhesia.Storage.Adapters.Postgres.Admin` | Config-file override only |
|
||||
|
||||
#### `Parrhesia.Repo`
|
||||
|
||||
| Atom key | ENV | Default | Notes |
|
||||
| --- | --- | --- | --- |
|
||||
| `:url` | `DATABASE_URL` | required | Example: `ecto://USER:PASS@HOST/DATABASE` |
|
||||
| `:pool_size` | `POOL_SIZE` | `32` | DB connection pool size |
|
||||
| `:queue_target` | `DB_QUEUE_TARGET_MS` | `1000` | Ecto queue target in ms |
|
||||
| `:queue_interval` | `DB_QUEUE_INTERVAL_MS` | `5000` | Ecto queue interval in ms |
|
||||
| `:types` | `-` | `Parrhesia.PostgresTypes` | Internal config-file setting |
|
||||
|
||||
#### `:listeners`
|
||||
|
||||
| Atom key | ENV | Default | Notes |
|
||||
| --- | --- | --- | --- |
|
||||
| `:public.bind.port` | `PORT` | `4413` | Default public listener port |
|
||||
| `:public.proxy.trusted_cidrs` | `PARRHESIA_TRUSTED_PROXIES` | `[]` | Trusted reverse proxies for forwarded IP handling |
|
||||
| `:public.features.metrics.*` | `PARRHESIA_METRICS_*` | see below | Convenience runtime overrides for metrics on the public listener |
|
||||
| `:metrics.bind.port` | `PARRHESIA_METRICS_ENDPOINT_PORT` | `9568` | Optional dedicated metrics listener port |
|
||||
| `:metrics.enabled` | `PARRHESIA_METRICS_ENDPOINT_ENABLED` | `false` | Enables the optional dedicated metrics listener |
|
||||
|
||||
Listener `transport.tls` supports `:disabled`, `:server`, `:mutual`, and `:proxy_terminated`. For TLS-enabled listeners, the main config-file fields are `certfile`, `keyfile`, optional `cacertfile`, optional `cipher_suite`, optional `client_pins`, and `proxy_headers` for proxy-terminated identity.
|
||||
|
||||
#### `:limits`
|
||||
|
||||
| Atom key | ENV | Default |
|
||||
| --- | --- | --- |
|
||||
| `:max_frame_bytes` | `PARRHESIA_LIMITS_MAX_FRAME_BYTES` | `1048576` |
|
||||
| `:max_event_bytes` | `PARRHESIA_LIMITS_MAX_EVENT_BYTES` | `262144` |
|
||||
| `:max_filters_per_req` | `PARRHESIA_LIMITS_MAX_FILTERS_PER_REQ` | `16` |
|
||||
| `:max_filter_limit` | `PARRHESIA_LIMITS_MAX_FILTER_LIMIT` | `500` |
|
||||
| `:max_subscriptions_per_connection` | `PARRHESIA_LIMITS_MAX_SUBSCRIPTIONS_PER_CONNECTION` | `32` |
|
||||
| `:max_event_future_skew_seconds` | `PARRHESIA_LIMITS_MAX_EVENT_FUTURE_SKEW_SECONDS` | `900` |
|
||||
| `:max_event_ingest_per_window` | `PARRHESIA_LIMITS_MAX_EVENT_INGEST_PER_WINDOW` | `120` |
|
||||
| `:event_ingest_window_seconds` | `PARRHESIA_LIMITS_EVENT_INGEST_WINDOW_SECONDS` | `1` |
|
||||
| `:auth_max_age_seconds` | `PARRHESIA_LIMITS_AUTH_MAX_AGE_SECONDS` | `600` |
|
||||
| `:max_outbound_queue` | `PARRHESIA_LIMITS_MAX_OUTBOUND_QUEUE` | `256` |
|
||||
| `:outbound_drain_batch_size` | `PARRHESIA_LIMITS_OUTBOUND_DRAIN_BATCH_SIZE` | `64` |
|
||||
| `:outbound_overflow_strategy` | `PARRHESIA_LIMITS_OUTBOUND_OVERFLOW_STRATEGY` | `:close` |
|
||||
| `:max_negentropy_payload_bytes` | `PARRHESIA_LIMITS_MAX_NEGENTROPY_PAYLOAD_BYTES` | `4096` |
|
||||
| `:max_negentropy_sessions_per_connection` | `PARRHESIA_LIMITS_MAX_NEGENTROPY_SESSIONS_PER_CONNECTION` | `8` |
|
||||
| `:max_negentropy_total_sessions` | `PARRHESIA_LIMITS_MAX_NEGENTROPY_TOTAL_SESSIONS` | `10000` |
|
||||
| `:negentropy_session_idle_timeout_seconds` | `PARRHESIA_LIMITS_NEGENTROPY_SESSION_IDLE_TIMEOUT_SECONDS` | `60` |
|
||||
| `:negentropy_session_sweep_interval_seconds` | `PARRHESIA_LIMITS_NEGENTROPY_SESSION_SWEEP_INTERVAL_SECONDS` | `10` |
|
||||
|
||||
#### `:policies`
|
||||
|
||||
| Atom key | ENV | Default |
|
||||
| --- | --- | --- |
|
||||
| `:auth_required_for_writes` | `PARRHESIA_POLICIES_AUTH_REQUIRED_FOR_WRITES` | `false` |
|
||||
| `:auth_required_for_reads` | `PARRHESIA_POLICIES_AUTH_REQUIRED_FOR_READS` | `false` |
|
||||
| `:min_pow_difficulty` | `PARRHESIA_POLICIES_MIN_POW_DIFFICULTY` | `0` |
|
||||
| `:accept_ephemeral_events` | `PARRHESIA_POLICIES_ACCEPT_EPHEMERAL_EVENTS` | `true` |
|
||||
| `:mls_group_event_ttl_seconds` | `PARRHESIA_POLICIES_MLS_GROUP_EVENT_TTL_SECONDS` | `300` |
|
||||
| `:marmot_require_h_for_group_queries` | `PARRHESIA_POLICIES_MARMOT_REQUIRE_H_FOR_GROUP_QUERIES` | `true` |
|
||||
| `:marmot_group_max_h_values_per_filter` | `PARRHESIA_POLICIES_MARMOT_GROUP_MAX_H_VALUES_PER_FILTER` | `32` |
|
||||
| `:marmot_group_max_query_window_seconds` | `PARRHESIA_POLICIES_MARMOT_GROUP_MAX_QUERY_WINDOW_SECONDS` | `2592000` |
|
||||
| `:marmot_media_max_imeta_tags_per_event` | `PARRHESIA_POLICIES_MARMOT_MEDIA_MAX_IMETA_TAGS_PER_EVENT` | `8` |
|
||||
| `:marmot_media_max_field_value_bytes` | `PARRHESIA_POLICIES_MARMOT_MEDIA_MAX_FIELD_VALUE_BYTES` | `1024` |
|
||||
| `:marmot_media_max_url_bytes` | `PARRHESIA_POLICIES_MARMOT_MEDIA_MAX_URL_BYTES` | `2048` |
|
||||
| `:marmot_media_allowed_mime_prefixes` | `PARRHESIA_POLICIES_MARMOT_MEDIA_ALLOWED_MIME_PREFIXES` | `[]` |
|
||||
| `:marmot_media_reject_mip04_v1` | `PARRHESIA_POLICIES_MARMOT_MEDIA_REJECT_MIP04_V1` | `true` |
|
||||
| `:marmot_push_server_pubkeys` | `PARRHESIA_POLICIES_MARMOT_PUSH_SERVER_PUBKEYS` | `[]` |
|
||||
| `:marmot_push_max_relay_tags` | `PARRHESIA_POLICIES_MARMOT_PUSH_MAX_RELAY_TAGS` | `16` |
|
||||
| `:marmot_push_max_payload_bytes` | `PARRHESIA_POLICIES_MARMOT_PUSH_MAX_PAYLOAD_BYTES` | `65536` |
|
||||
| `:marmot_push_max_trigger_age_seconds` | `PARRHESIA_POLICIES_MARMOT_PUSH_MAX_TRIGGER_AGE_SECONDS` | `120` |
|
||||
| `:marmot_push_require_expiration` | `PARRHESIA_POLICIES_MARMOT_PUSH_REQUIRE_EXPIRATION` | `true` |
|
||||
| `:marmot_push_max_expiration_window_seconds` | `PARRHESIA_POLICIES_MARMOT_PUSH_MAX_EXPIRATION_WINDOW_SECONDS` | `120` |
|
||||
| `:marmot_push_max_server_recipients` | `PARRHESIA_POLICIES_MARMOT_PUSH_MAX_SERVER_RECIPIENTS` | `1` |
|
||||
| `:management_auth_required` | `PARRHESIA_POLICIES_MANAGEMENT_AUTH_REQUIRED` | `true` |
|
||||
|
||||
#### Listener-related Metrics Helpers
|
||||
|
||||
| Atom key | ENV | Default |
|
||||
| --- | --- | --- |
|
||||
| `:public.features.metrics.enabled` | `PARRHESIA_METRICS_ENABLED_ON_MAIN_ENDPOINT` | `true` |
|
||||
| `:public` | `PARRHESIA_METRICS_PUBLIC` | `false` |
|
||||
| `:private_networks_only` | `PARRHESIA_METRICS_PRIVATE_NETWORKS_ONLY` | `true` |
|
||||
| `:allowed_cidrs` | `PARRHESIA_METRICS_ALLOWED_CIDRS` | `[]` |
|
||||
| `:auth_token` | `PARRHESIA_METRICS_AUTH_TOKEN` | `nil` |
|
||||
|
||||
#### `:retention`
|
||||
|
||||
| Atom key | ENV | Default | Notes |
|
||||
| --- | --- | --- | --- |
|
||||
| `:check_interval_hours` | `PARRHESIA_RETENTION_CHECK_INTERVAL_HOURS` | `24` | Partition maintenance + pruning cadence |
|
||||
| `:months_ahead` | `PARRHESIA_RETENTION_MONTHS_AHEAD` | `2` | Pre-create current month plus N future monthly partitions for `events` and `event_tags` |
|
||||
| `:max_db_bytes` | `PARRHESIA_RETENTION_MAX_DB_BYTES` | `:infinity` | Interpreted as GiB threshold; accepts integer or `infinity` |
|
||||
| `:max_months_to_keep` | `PARRHESIA_RETENTION_MAX_MONTHS_TO_KEEP` | `:infinity` | Keep at most N months (including current month); accepts integer or `infinity` |
|
||||
| `:max_partitions_to_drop_per_run` | `PARRHESIA_RETENTION_MAX_PARTITIONS_TO_DROP_PER_RUN` | `1` | Safety cap for each maintenance run |
|
||||
|
||||
#### `:features`
|
||||
|
||||
| Atom key | ENV | Default |
|
||||
| --- | --- | --- |
|
||||
| `:verify_event_signatures` | `PARRHESIA_FEATURES_VERIFY_EVENT_SIGNATURES` | `true` |
|
||||
| `:nip_45_count` | `PARRHESIA_FEATURES_NIP_45_COUNT` | `true` |
|
||||
| `:nip_50_search` | `PARRHESIA_FEATURES_NIP_50_SEARCH` | `true` |
|
||||
| `:nip_77_negentropy` | `PARRHESIA_FEATURES_NIP_77_NEGENTROPY` | `true` |
|
||||
| `:marmot_push_notifications` | `PARRHESIA_FEATURES_MARMOT_PUSH_NOTIFICATIONS` | `false` |
|
||||
|
||||
#### Extra runtime config
|
||||
|
||||
| Atom key | ENV | Default | Notes |
|
||||
| --- | --- | --- | --- |
|
||||
| extra runtime config file | `PARRHESIA_EXTRA_CONFIG` | unset | Imports an additional runtime `.exs` file |
|
||||
|
||||
---
|
||||
|
||||
## Deploy
|
||||
@@ -136,15 +302,15 @@ export POOL_SIZE=20
|
||||
|
||||
mix deps.get --only prod
|
||||
mix compile
|
||||
mix ecto.migrate
|
||||
mix release
|
||||
|
||||
_build/prod/rel/parrhesia/bin/parrhesia foreground
|
||||
_build/prod/rel/parrhesia/bin/parrhesia eval "Parrhesia.Release.migrate()"
|
||||
_build/prod/rel/parrhesia/bin/parrhesia start
|
||||
```
|
||||
|
||||
For systemd/process managers, run the release command in foreground mode.
|
||||
For systemd/process managers, run the release command with `start`.
|
||||
|
||||
### Option B: Nix package (`default.nix`)
|
||||
### Option B: Nix release package (`default.nix`)
|
||||
|
||||
Build:
|
||||
|
||||
@@ -154,6 +320,110 @@ nix-build
|
||||
|
||||
Run the built release from `./result/bin/parrhesia` (release command interface).
|
||||
|
||||
### Option C: Docker image via Nix flake
|
||||
|
||||
Build the image tarball:
|
||||
|
||||
```bash
|
||||
nix build .#dockerImage
|
||||
# or with explicit build target:
|
||||
nix build .#packages.x86_64-linux.dockerImage
|
||||
```
|
||||
|
||||
Load it into Docker:
|
||||
|
||||
```bash
|
||||
docker load < result
|
||||
```
|
||||
|
||||
Run database migrations:
|
||||
|
||||
```bash
|
||||
docker run --rm \
|
||||
-e DATABASE_URL="ecto://USER:PASS@HOST/parrhesia_prod" \
|
||||
parrhesia:latest \
|
||||
eval "Parrhesia.Release.migrate()"
|
||||
```
|
||||
|
||||
Start the relay:
|
||||
|
||||
```bash
|
||||
docker run --rm \
|
||||
-p 4413:4413 \
|
||||
-e DATABASE_URL="ecto://USER:PASS@HOST/parrhesia_prod" \
|
||||
-e POOL_SIZE=20 \
|
||||
parrhesia:latest
|
||||
```
|
||||
|
||||
### Option D: Docker Compose with PostgreSQL
|
||||
|
||||
The repo includes [`compose.yaml`](./compose.yaml) and [`.env.example`](./.env.example) so Docker users can run Postgres and Parrhesia together.
|
||||
|
||||
Set up the environment file:
|
||||
|
||||
```bash
|
||||
cp .env.example .env
|
||||
```
|
||||
|
||||
If you are building locally from source, build and load the image first:
|
||||
|
||||
```bash
|
||||
nix build .#dockerImage
|
||||
docker load < result
|
||||
```
|
||||
|
||||
Then start the stack:
|
||||
|
||||
```bash
|
||||
docker compose up -d db
|
||||
docker compose run --rm migrate
|
||||
docker compose up -d parrhesia
|
||||
```
|
||||
|
||||
The relay will be available on:
|
||||
|
||||
```text
|
||||
ws://localhost:4413/relay
|
||||
```
|
||||
|
||||
Notes:
|
||||
|
||||
- `compose.yaml` keeps PostgreSQL in a separate container; the Parrhesia image only runs the app release.
|
||||
- The container listens on port `4413`; use `PARRHESIA_HOST_PORT` if you want a different published host port.
|
||||
- Migrations are run explicitly through the one-shot `migrate` service instead of on every app boot.
|
||||
- Common runtime overrides can go straight into `.env`; see [`.env.example`](./.env.example) for examples.
|
||||
- For more specialized overrides, mount a file and set `PARRHESIA_EXTRA_CONFIG=/path/in/container/runtime.exs`.
|
||||
- When a GHCR image is published, set `PARRHESIA_IMAGE=ghcr.io/<owner>/parrhesia:<tag>` in `.env` and reuse the same compose flow.
|
||||
|
||||
---
|
||||
|
||||
## Benchmark
|
||||
|
||||
The benchmark compares Parrhesia against [`strfry`](https://github.com/hoytech/strfry) and [`nostr-rs-relay`](https://sr.ht/~gheartsfield/nostr-rs-relay/) using [`nostr-bench`](https://github.com/rnostr/nostr-bench).
|
||||
|
||||
Run it with:
|
||||
|
||||
```bash
|
||||
mix bench
|
||||
```
|
||||
|
||||
Current comparison results from [BENCHMARK.md](./BENCHMARK.md):
|
||||
|
||||
| metric | parrhesia | strfry | nostr-rs-relay | strfry/parrhesia | nostr-rs/parrhesia |
|
||||
| --- | ---: | ---: | ---: | ---: | ---: |
|
||||
| connect avg latency (ms) ↓ | 13.50 | 3.00 | 2.00 | **0.22x** | **0.15x** |
|
||||
| connect max latency (ms) ↓ | 22.50 | 5.50 | 3.00 | **0.24x** | **0.13x** |
|
||||
| echo throughput (TPS) ↑ | 80385.00 | 61673.00 | 164516.00 | 0.77x | **2.05x** |
|
||||
| echo throughput (MiB/s) ↑ | 44.00 | 34.45 | 90.10 | 0.78x | **2.05x** |
|
||||
| event throughput (TPS) ↑ | 2000.00 | 3404.50 | 788.00 | **1.70x** | 0.39x |
|
||||
| event throughput (MiB/s) ↑ | 1.30 | 2.20 | 0.50 | **1.69x** | 0.38x |
|
||||
| req throughput (TPS) ↑ | 3664.00 | 1808.50 | 877.50 | 0.49x | 0.24x |
|
||||
| req throughput (MiB/s) ↑ | 20.75 | 11.75 | 2.45 | 0.57x | 0.12x |
|
||||
|
||||
Higher is better for `↑` metrics. Lower is better for `↓` metrics.
|
||||
|
||||
(Results from a Linux container on a 6-core Intel i5-8400T with NVMe drive, PostgreSQL 18)
|
||||
|
||||
---
|
||||
|
||||
## Development quality checks
|
||||
@@ -164,13 +434,13 @@ Before opening a PR:
|
||||
mix precommit
|
||||
```
|
||||
|
||||
For external CLI end-to-end checks with `nak`:
|
||||
Additional external CLI end-to-end checks with `nak`:
|
||||
|
||||
```bash
|
||||
mix test.nak_e2e
|
||||
```
|
||||
|
||||
For Marmot client end-to-end checks (TypeScript/Node suite using `marmot-ts`):
|
||||
For Marmot client end-to-end checks (TypeScript/Node suite using `marmot-ts`, included in `precommit`):
|
||||
|
||||
```bash
|
||||
mix test.marmot_e2e
|
||||
|
||||
92
compose.node-sync-e2e.yaml
Normal file
92
compose.node-sync-e2e.yaml
Normal file
@@ -0,0 +1,92 @@
|
||||
services:
|
||||
db-a:
|
||||
image: postgres:17
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
POSTGRES_DB: parrhesia_a
|
||||
POSTGRES_USER: parrhesia
|
||||
POSTGRES_PASSWORD: parrhesia
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -U $$POSTGRES_USER -d $$POSTGRES_DB"]
|
||||
interval: 5s
|
||||
timeout: 5s
|
||||
retries: 12
|
||||
volumes:
|
||||
- postgres-a-data:/var/lib/postgresql/data
|
||||
|
||||
db-b:
|
||||
image: postgres:17
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
POSTGRES_DB: parrhesia_b
|
||||
POSTGRES_USER: parrhesia
|
||||
POSTGRES_PASSWORD: parrhesia
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -U $$POSTGRES_USER -d $$POSTGRES_DB"]
|
||||
interval: 5s
|
||||
timeout: 5s
|
||||
retries: 12
|
||||
volumes:
|
||||
- postgres-b-data:/var/lib/postgresql/data
|
||||
|
||||
migrate-a:
|
||||
image: ${PARRHESIA_IMAGE:-parrhesia:latest}
|
||||
restart: "no"
|
||||
depends_on:
|
||||
db-a:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
DATABASE_URL: ecto://parrhesia:parrhesia@db-a:5432/parrhesia_a
|
||||
POOL_SIZE: ${POOL_SIZE:-20}
|
||||
PARRHESIA_ACL_PROTECTED_FILTERS: ${PARRHESIA_ACL_PROTECTED_FILTERS}
|
||||
command: ["eval", "Parrhesia.Release.migrate()"]
|
||||
|
||||
migrate-b:
|
||||
image: ${PARRHESIA_IMAGE:-parrhesia:latest}
|
||||
restart: "no"
|
||||
depends_on:
|
||||
db-b:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
DATABASE_URL: ecto://parrhesia:parrhesia@db-b:5432/parrhesia_b
|
||||
POOL_SIZE: ${POOL_SIZE:-20}
|
||||
PARRHESIA_ACL_PROTECTED_FILTERS: ${PARRHESIA_ACL_PROTECTED_FILTERS}
|
||||
command: ["eval", "Parrhesia.Release.migrate()"]
|
||||
|
||||
parrhesia-a:
|
||||
image: ${PARRHESIA_IMAGE:-parrhesia:latest}
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
db-a:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
DATABASE_URL: ecto://parrhesia:parrhesia@db-a:5432/parrhesia_a
|
||||
POOL_SIZE: ${POOL_SIZE:-20}
|
||||
PORT: 4413
|
||||
PARRHESIA_RELAY_URL: ${PARRHESIA_NODE_A_RELAY_URL:-ws://parrhesia-a:4413/relay}
|
||||
PARRHESIA_ACL_PROTECTED_FILTERS: ${PARRHESIA_ACL_PROTECTED_FILTERS}
|
||||
PARRHESIA_IDENTITY_PATH: /tmp/parrhesia-a/server_identity.json
|
||||
PARRHESIA_SYNC_PATH: /tmp/parrhesia-a/sync_servers.json
|
||||
ports:
|
||||
- "${PARRHESIA_NODE_A_HOST_PORT:-45131}:4413"
|
||||
|
||||
parrhesia-b:
|
||||
image: ${PARRHESIA_IMAGE:-parrhesia:latest}
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
db-b:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
DATABASE_URL: ecto://parrhesia:parrhesia@db-b:5432/parrhesia_b
|
||||
POOL_SIZE: ${POOL_SIZE:-20}
|
||||
PORT: 4413
|
||||
PARRHESIA_RELAY_URL: ${PARRHESIA_NODE_B_RELAY_URL:-ws://parrhesia-b:4413/relay}
|
||||
PARRHESIA_ACL_PROTECTED_FILTERS: ${PARRHESIA_ACL_PROTECTED_FILTERS}
|
||||
PARRHESIA_IDENTITY_PATH: /tmp/parrhesia-b/server_identity.json
|
||||
PARRHESIA_SYNC_PATH: /tmp/parrhesia-b/sync_servers.json
|
||||
ports:
|
||||
- "${PARRHESIA_NODE_B_HOST_PORT:-45132}:4413"
|
||||
|
||||
volumes:
|
||||
postgres-a-data:
|
||||
postgres-b-data:
|
||||
42
compose.yaml
Normal file
42
compose.yaml
Normal file
@@ -0,0 +1,42 @@
|
||||
services:
|
||||
db:
|
||||
image: postgres:17
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
POSTGRES_DB: ${POSTGRES_DB:-parrhesia}
|
||||
POSTGRES_USER: ${POSTGRES_USER:-parrhesia}
|
||||
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-parrhesia}
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -U $$POSTGRES_USER -d $$POSTGRES_DB"]
|
||||
interval: 5s
|
||||
timeout: 5s
|
||||
retries: 12
|
||||
volumes:
|
||||
- postgres-data:/var/lib/postgresql/data
|
||||
|
||||
migrate:
|
||||
image: ${PARRHESIA_IMAGE:-parrhesia:latest}
|
||||
profiles: ["tools"]
|
||||
restart: "no"
|
||||
depends_on:
|
||||
db:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
DATABASE_URL: ${DATABASE_URL:-ecto://parrhesia:parrhesia@db:5432/parrhesia}
|
||||
POOL_SIZE: ${POOL_SIZE:-20}
|
||||
command: ["eval", "Parrhesia.Release.migrate()"]
|
||||
|
||||
parrhesia:
|
||||
image: ${PARRHESIA_IMAGE:-parrhesia:latest}
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
db:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
DATABASE_URL: ${DATABASE_URL:-ecto://parrhesia:parrhesia@db:5432/parrhesia}
|
||||
POOL_SIZE: ${POOL_SIZE:-20}
|
||||
ports:
|
||||
- "${PARRHESIA_HOST_PORT:-4413}:4413"
|
||||
|
||||
volumes:
|
||||
postgres-data:
|
||||
@@ -1,6 +1,18 @@
|
||||
import Config
|
||||
|
||||
config :postgrex, :json_library, JSON
|
||||
|
||||
config :parrhesia,
|
||||
moderation_cache_enabled: true,
|
||||
relay_url: "ws://localhost:4413/relay",
|
||||
identity: [
|
||||
path: nil,
|
||||
private_key: nil
|
||||
],
|
||||
sync: [
|
||||
path: nil,
|
||||
start_workers?: true
|
||||
],
|
||||
limits: [
|
||||
max_frame_bytes: 1_048_576,
|
||||
max_event_bytes: 262_144,
|
||||
@@ -8,9 +20,19 @@ config :parrhesia,
|
||||
max_filter_limit: 500,
|
||||
max_subscriptions_per_connection: 32,
|
||||
max_event_future_skew_seconds: 900,
|
||||
max_event_ingest_per_window: 120,
|
||||
event_ingest_window_seconds: 1,
|
||||
auth_max_age_seconds: 600,
|
||||
max_outbound_queue: 256,
|
||||
outbound_drain_batch_size: 64,
|
||||
outbound_overflow_strategy: :close
|
||||
outbound_overflow_strategy: :close,
|
||||
max_negentropy_payload_bytes: 4096,
|
||||
max_negentropy_sessions_per_connection: 8,
|
||||
max_negentropy_total_sessions: 10_000,
|
||||
max_negentropy_items_per_session: 50_000,
|
||||
negentropy_id_list_threshold: 32,
|
||||
negentropy_session_idle_timeout_seconds: 60,
|
||||
negentropy_session_sweep_interval_seconds: 10
|
||||
],
|
||||
policies: [
|
||||
auth_required_for_writes: false,
|
||||
@@ -35,7 +57,35 @@ config :parrhesia,
|
||||
marmot_push_max_server_recipients: 1,
|
||||
management_auth_required: true
|
||||
],
|
||||
listeners: %{
|
||||
public: %{
|
||||
enabled: true,
|
||||
bind: %{ip: {0, 0, 0, 0}, port: 4413},
|
||||
transport: %{scheme: :http, tls: %{mode: :disabled}},
|
||||
proxy: %{trusted_cidrs: [], honor_x_forwarded_for: true},
|
||||
network: %{allow_all: true},
|
||||
features: %{
|
||||
nostr: %{enabled: true},
|
||||
admin: %{enabled: true},
|
||||
metrics: %{
|
||||
enabled: true,
|
||||
access: %{private_networks_only: true},
|
||||
auth_token: nil
|
||||
}
|
||||
},
|
||||
auth: %{nip42_required: false, nip98_required_for_admin: true},
|
||||
baseline_acl: %{read: [], write: []}
|
||||
}
|
||||
},
|
||||
retention: [
|
||||
check_interval_hours: 24,
|
||||
months_ahead: 2,
|
||||
max_db_bytes: :infinity,
|
||||
max_months_to_keep: :infinity,
|
||||
max_partitions_to_drop_per_run: 1
|
||||
],
|
||||
features: [
|
||||
verify_event_signatures: true,
|
||||
nip_45_count: true,
|
||||
nip_50_search: true,
|
||||
nip_77_negentropy: true,
|
||||
@@ -48,7 +98,7 @@ config :parrhesia,
|
||||
admin: Parrhesia.Storage.Adapters.Postgres.Admin
|
||||
]
|
||||
|
||||
config :parrhesia, Parrhesia.Web.Endpoint, port: 4000
|
||||
config :parrhesia, Parrhesia.Repo, types: Parrhesia.PostgresTypes
|
||||
|
||||
config :parrhesia, ecto_repos: [Parrhesia.Repo]
|
||||
|
||||
|
||||
@@ -1,3 +1,8 @@
|
||||
import Config
|
||||
|
||||
config :parrhesia, Parrhesia.Repo,
|
||||
pool_size: 32,
|
||||
queue_target: 1_000,
|
||||
queue_interval: 5_000
|
||||
|
||||
# Production runtime configuration lives in config/runtime.exs.
|
||||
|
||||
@@ -1,14 +1,625 @@
|
||||
import Config
|
||||
|
||||
string_env = fn name, default ->
|
||||
case System.get_env(name) do
|
||||
nil -> default
|
||||
"" -> default
|
||||
value -> value
|
||||
end
|
||||
end
|
||||
|
||||
int_env = fn name, default ->
|
||||
case System.get_env(name) do
|
||||
nil -> default
|
||||
value -> String.to_integer(value)
|
||||
end
|
||||
end
|
||||
|
||||
bool_env = fn name, default ->
|
||||
case System.get_env(name) do
|
||||
nil ->
|
||||
default
|
||||
|
||||
value ->
|
||||
case String.downcase(value) do
|
||||
"1" -> true
|
||||
"true" -> true
|
||||
"yes" -> true
|
||||
"on" -> true
|
||||
"0" -> false
|
||||
"false" -> false
|
||||
"no" -> false
|
||||
"off" -> false
|
||||
_other -> raise "environment variable #{name} must be a boolean value"
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
csv_env = fn name, default ->
|
||||
case System.get_env(name) do
|
||||
nil ->
|
||||
default
|
||||
|
||||
value ->
|
||||
value
|
||||
|> String.split(",", trim: true)
|
||||
|> Enum.map(&String.trim/1)
|
||||
|> Enum.reject(&(&1 == ""))
|
||||
end
|
||||
end
|
||||
|
||||
json_env = fn name, default ->
|
||||
case System.get_env(name) do
|
||||
nil ->
|
||||
default
|
||||
|
||||
"" ->
|
||||
default
|
||||
|
||||
value ->
|
||||
case JSON.decode(value) do
|
||||
{:ok, decoded} ->
|
||||
decoded
|
||||
|
||||
{:error, reason} ->
|
||||
raise "environment variable #{name} must contain valid JSON: #{inspect(reason)}"
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
infinity_or_int_env = fn name, default ->
|
||||
case System.get_env(name) do
|
||||
nil ->
|
||||
default
|
||||
|
||||
value ->
|
||||
normalized = value |> String.trim() |> String.downcase()
|
||||
|
||||
if normalized == "infinity" do
|
||||
:infinity
|
||||
else
|
||||
String.to_integer(value)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
outbound_overflow_strategy_env = fn name, default ->
|
||||
case System.get_env(name) do
|
||||
nil ->
|
||||
default
|
||||
|
||||
"close" ->
|
||||
:close
|
||||
|
||||
"drop_oldest" ->
|
||||
:drop_oldest
|
||||
|
||||
"drop_newest" ->
|
||||
:drop_newest
|
||||
|
||||
_other ->
|
||||
raise "environment variable #{name} must be one of: close, drop_oldest, drop_newest"
|
||||
end
|
||||
end
|
||||
|
||||
ipv4_env = fn name, default ->
|
||||
case System.get_env(name) do
|
||||
nil ->
|
||||
default
|
||||
|
||||
value ->
|
||||
case String.split(value, ".", parts: 4) do
|
||||
[a, b, c, d] ->
|
||||
octets = Enum.map([a, b, c, d], &String.to_integer/1)
|
||||
|
||||
if Enum.all?(octets, &(&1 >= 0 and &1 <= 255)) do
|
||||
List.to_tuple(octets)
|
||||
else
|
||||
raise "environment variable #{name} must be a valid IPv4 address"
|
||||
end
|
||||
|
||||
_other ->
|
||||
raise "environment variable #{name} must be a valid IPv4 address"
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
if config_env() == :prod do
|
||||
database_url =
|
||||
System.get_env("DATABASE_URL") ||
|
||||
raise "environment variable DATABASE_URL is missing. Example: ecto://USER:PASS@HOST/DATABASE"
|
||||
|
||||
repo_defaults = Application.get_env(:parrhesia, Parrhesia.Repo, [])
|
||||
relay_url_default = Application.get_env(:parrhesia, :relay_url)
|
||||
|
||||
moderation_cache_enabled_default =
|
||||
Application.get_env(:parrhesia, :moderation_cache_enabled, true)
|
||||
|
||||
enable_expiration_worker_default =
|
||||
Application.get_env(:parrhesia, :enable_expiration_worker, true)
|
||||
|
||||
limits_defaults = Application.get_env(:parrhesia, :limits, [])
|
||||
policies_defaults = Application.get_env(:parrhesia, :policies, [])
|
||||
listeners_defaults = Application.get_env(:parrhesia, :listeners, %{})
|
||||
retention_defaults = Application.get_env(:parrhesia, :retention, [])
|
||||
features_defaults = Application.get_env(:parrhesia, :features, [])
|
||||
acl_defaults = Application.get_env(:parrhesia, :acl, [])
|
||||
|
||||
default_pool_size = Keyword.get(repo_defaults, :pool_size, 32)
|
||||
default_queue_target = Keyword.get(repo_defaults, :queue_target, 1_000)
|
||||
default_queue_interval = Keyword.get(repo_defaults, :queue_interval, 5_000)
|
||||
|
||||
pool_size = int_env.("POOL_SIZE", default_pool_size)
|
||||
queue_target = int_env.("DB_QUEUE_TARGET_MS", default_queue_target)
|
||||
queue_interval = int_env.("DB_QUEUE_INTERVAL_MS", default_queue_interval)
|
||||
|
||||
limits = [
|
||||
max_frame_bytes:
|
||||
int_env.(
|
||||
"PARRHESIA_LIMITS_MAX_FRAME_BYTES",
|
||||
Keyword.get(limits_defaults, :max_frame_bytes, 1_048_576)
|
||||
),
|
||||
max_event_bytes:
|
||||
int_env.(
|
||||
"PARRHESIA_LIMITS_MAX_EVENT_BYTES",
|
||||
Keyword.get(limits_defaults, :max_event_bytes, 262_144)
|
||||
),
|
||||
max_filters_per_req:
|
||||
int_env.(
|
||||
"PARRHESIA_LIMITS_MAX_FILTERS_PER_REQ",
|
||||
Keyword.get(limits_defaults, :max_filters_per_req, 16)
|
||||
),
|
||||
max_filter_limit:
|
||||
int_env.(
|
||||
"PARRHESIA_LIMITS_MAX_FILTER_LIMIT",
|
||||
Keyword.get(limits_defaults, :max_filter_limit, 500)
|
||||
),
|
||||
max_subscriptions_per_connection:
|
||||
int_env.(
|
||||
"PARRHESIA_LIMITS_MAX_SUBSCRIPTIONS_PER_CONNECTION",
|
||||
Keyword.get(limits_defaults, :max_subscriptions_per_connection, 32)
|
||||
),
|
||||
max_event_future_skew_seconds:
|
||||
int_env.(
|
||||
"PARRHESIA_LIMITS_MAX_EVENT_FUTURE_SKEW_SECONDS",
|
||||
Keyword.get(limits_defaults, :max_event_future_skew_seconds, 900)
|
||||
),
|
||||
max_event_ingest_per_window:
|
||||
int_env.(
|
||||
"PARRHESIA_LIMITS_MAX_EVENT_INGEST_PER_WINDOW",
|
||||
Keyword.get(limits_defaults, :max_event_ingest_per_window, 120)
|
||||
),
|
||||
event_ingest_window_seconds:
|
||||
int_env.(
|
||||
"PARRHESIA_LIMITS_EVENT_INGEST_WINDOW_SECONDS",
|
||||
Keyword.get(limits_defaults, :event_ingest_window_seconds, 1)
|
||||
),
|
||||
auth_max_age_seconds:
|
||||
int_env.(
|
||||
"PARRHESIA_LIMITS_AUTH_MAX_AGE_SECONDS",
|
||||
Keyword.get(limits_defaults, :auth_max_age_seconds, 600)
|
||||
),
|
||||
max_outbound_queue:
|
||||
int_env.(
|
||||
"PARRHESIA_LIMITS_MAX_OUTBOUND_QUEUE",
|
||||
Keyword.get(limits_defaults, :max_outbound_queue, 256)
|
||||
),
|
||||
outbound_drain_batch_size:
|
||||
int_env.(
|
||||
"PARRHESIA_LIMITS_OUTBOUND_DRAIN_BATCH_SIZE",
|
||||
Keyword.get(limits_defaults, :outbound_drain_batch_size, 64)
|
||||
),
|
||||
outbound_overflow_strategy:
|
||||
outbound_overflow_strategy_env.(
|
||||
"PARRHESIA_LIMITS_OUTBOUND_OVERFLOW_STRATEGY",
|
||||
Keyword.get(limits_defaults, :outbound_overflow_strategy, :close)
|
||||
),
|
||||
max_negentropy_payload_bytes:
|
||||
int_env.(
|
||||
"PARRHESIA_LIMITS_MAX_NEGENTROPY_PAYLOAD_BYTES",
|
||||
Keyword.get(limits_defaults, :max_negentropy_payload_bytes, 4096)
|
||||
),
|
||||
max_negentropy_sessions_per_connection:
|
||||
int_env.(
|
||||
"PARRHESIA_LIMITS_MAX_NEGENTROPY_SESSIONS_PER_CONNECTION",
|
||||
Keyword.get(limits_defaults, :max_negentropy_sessions_per_connection, 8)
|
||||
),
|
||||
max_negentropy_total_sessions:
|
||||
int_env.(
|
||||
"PARRHESIA_LIMITS_MAX_NEGENTROPY_TOTAL_SESSIONS",
|
||||
Keyword.get(limits_defaults, :max_negentropy_total_sessions, 10_000)
|
||||
),
|
||||
max_negentropy_items_per_session:
|
||||
int_env.(
|
||||
"PARRHESIA_LIMITS_MAX_NEGENTROPY_ITEMS_PER_SESSION",
|
||||
Keyword.get(limits_defaults, :max_negentropy_items_per_session, 50_000)
|
||||
),
|
||||
negentropy_id_list_threshold:
|
||||
int_env.(
|
||||
"PARRHESIA_LIMITS_NEGENTROPY_ID_LIST_THRESHOLD",
|
||||
Keyword.get(limits_defaults, :negentropy_id_list_threshold, 32)
|
||||
),
|
||||
negentropy_session_idle_timeout_seconds:
|
||||
int_env.(
|
||||
"PARRHESIA_LIMITS_NEGENTROPY_SESSION_IDLE_TIMEOUT_SECONDS",
|
||||
Keyword.get(limits_defaults, :negentropy_session_idle_timeout_seconds, 60)
|
||||
),
|
||||
negentropy_session_sweep_interval_seconds:
|
||||
int_env.(
|
||||
"PARRHESIA_LIMITS_NEGENTROPY_SESSION_SWEEP_INTERVAL_SECONDS",
|
||||
Keyword.get(limits_defaults, :negentropy_session_sweep_interval_seconds, 10)
|
||||
)
|
||||
]
|
||||
|
||||
policies = [
|
||||
auth_required_for_writes:
|
||||
bool_env.(
|
||||
"PARRHESIA_POLICIES_AUTH_REQUIRED_FOR_WRITES",
|
||||
Keyword.get(policies_defaults, :auth_required_for_writes, false)
|
||||
),
|
||||
auth_required_for_reads:
|
||||
bool_env.(
|
||||
"PARRHESIA_POLICIES_AUTH_REQUIRED_FOR_READS",
|
||||
Keyword.get(policies_defaults, :auth_required_for_reads, false)
|
||||
),
|
||||
min_pow_difficulty:
|
||||
int_env.(
|
||||
"PARRHESIA_POLICIES_MIN_POW_DIFFICULTY",
|
||||
Keyword.get(policies_defaults, :min_pow_difficulty, 0)
|
||||
),
|
||||
accept_ephemeral_events:
|
||||
bool_env.(
|
||||
"PARRHESIA_POLICIES_ACCEPT_EPHEMERAL_EVENTS",
|
||||
Keyword.get(policies_defaults, :accept_ephemeral_events, true)
|
||||
),
|
||||
mls_group_event_ttl_seconds:
|
||||
int_env.(
|
||||
"PARRHESIA_POLICIES_MLS_GROUP_EVENT_TTL_SECONDS",
|
||||
Keyword.get(policies_defaults, :mls_group_event_ttl_seconds, 300)
|
||||
),
|
||||
marmot_require_h_for_group_queries:
|
||||
bool_env.(
|
||||
"PARRHESIA_POLICIES_MARMOT_REQUIRE_H_FOR_GROUP_QUERIES",
|
||||
Keyword.get(policies_defaults, :marmot_require_h_for_group_queries, true)
|
||||
),
|
||||
marmot_group_max_h_values_per_filter:
|
||||
int_env.(
|
||||
"PARRHESIA_POLICIES_MARMOT_GROUP_MAX_H_VALUES_PER_FILTER",
|
||||
Keyword.get(policies_defaults, :marmot_group_max_h_values_per_filter, 32)
|
||||
),
|
||||
marmot_group_max_query_window_seconds:
|
||||
int_env.(
|
||||
"PARRHESIA_POLICIES_MARMOT_GROUP_MAX_QUERY_WINDOW_SECONDS",
|
||||
Keyword.get(policies_defaults, :marmot_group_max_query_window_seconds, 2_592_000)
|
||||
),
|
||||
marmot_media_max_imeta_tags_per_event:
|
||||
int_env.(
|
||||
"PARRHESIA_POLICIES_MARMOT_MEDIA_MAX_IMETA_TAGS_PER_EVENT",
|
||||
Keyword.get(policies_defaults, :marmot_media_max_imeta_tags_per_event, 8)
|
||||
),
|
||||
marmot_media_max_field_value_bytes:
|
||||
int_env.(
|
||||
"PARRHESIA_POLICIES_MARMOT_MEDIA_MAX_FIELD_VALUE_BYTES",
|
||||
Keyword.get(policies_defaults, :marmot_media_max_field_value_bytes, 1024)
|
||||
),
|
||||
marmot_media_max_url_bytes:
|
||||
int_env.(
|
||||
"PARRHESIA_POLICIES_MARMOT_MEDIA_MAX_URL_BYTES",
|
||||
Keyword.get(policies_defaults, :marmot_media_max_url_bytes, 2048)
|
||||
),
|
||||
marmot_media_allowed_mime_prefixes:
|
||||
csv_env.(
|
||||
"PARRHESIA_POLICIES_MARMOT_MEDIA_ALLOWED_MIME_PREFIXES",
|
||||
Keyword.get(policies_defaults, :marmot_media_allowed_mime_prefixes, [])
|
||||
),
|
||||
marmot_media_reject_mip04_v1:
|
||||
bool_env.(
|
||||
"PARRHESIA_POLICIES_MARMOT_MEDIA_REJECT_MIP04_V1",
|
||||
Keyword.get(policies_defaults, :marmot_media_reject_mip04_v1, true)
|
||||
),
|
||||
marmot_push_server_pubkeys:
|
||||
csv_env.(
|
||||
"PARRHESIA_POLICIES_MARMOT_PUSH_SERVER_PUBKEYS",
|
||||
Keyword.get(policies_defaults, :marmot_push_server_pubkeys, [])
|
||||
),
|
||||
marmot_push_max_relay_tags:
|
||||
int_env.(
|
||||
"PARRHESIA_POLICIES_MARMOT_PUSH_MAX_RELAY_TAGS",
|
||||
Keyword.get(policies_defaults, :marmot_push_max_relay_tags, 16)
|
||||
),
|
||||
marmot_push_max_payload_bytes:
|
||||
int_env.(
|
||||
"PARRHESIA_POLICIES_MARMOT_PUSH_MAX_PAYLOAD_BYTES",
|
||||
Keyword.get(policies_defaults, :marmot_push_max_payload_bytes, 65_536)
|
||||
),
|
||||
marmot_push_max_trigger_age_seconds:
|
||||
int_env.(
|
||||
"PARRHESIA_POLICIES_MARMOT_PUSH_MAX_TRIGGER_AGE_SECONDS",
|
||||
Keyword.get(policies_defaults, :marmot_push_max_trigger_age_seconds, 120)
|
||||
),
|
||||
marmot_push_require_expiration:
|
||||
bool_env.(
|
||||
"PARRHESIA_POLICIES_MARMOT_PUSH_REQUIRE_EXPIRATION",
|
||||
Keyword.get(policies_defaults, :marmot_push_require_expiration, true)
|
||||
),
|
||||
marmot_push_max_expiration_window_seconds:
|
||||
int_env.(
|
||||
"PARRHESIA_POLICIES_MARMOT_PUSH_MAX_EXPIRATION_WINDOW_SECONDS",
|
||||
Keyword.get(policies_defaults, :marmot_push_max_expiration_window_seconds, 120)
|
||||
),
|
||||
marmot_push_max_server_recipients:
|
||||
int_env.(
|
||||
"PARRHESIA_POLICIES_MARMOT_PUSH_MAX_SERVER_RECIPIENTS",
|
||||
Keyword.get(policies_defaults, :marmot_push_max_server_recipients, 1)
|
||||
),
|
||||
management_auth_required:
|
||||
bool_env.(
|
||||
"PARRHESIA_POLICIES_MANAGEMENT_AUTH_REQUIRED",
|
||||
Keyword.get(policies_defaults, :management_auth_required, true)
|
||||
)
|
||||
]
|
||||
|
||||
public_listener_defaults = Map.get(listeners_defaults, :public, %{})
|
||||
public_bind_defaults = Map.get(public_listener_defaults, :bind, %{})
|
||||
public_transport_defaults = Map.get(public_listener_defaults, :transport, %{})
|
||||
public_proxy_defaults = Map.get(public_listener_defaults, :proxy, %{})
|
||||
public_network_defaults = Map.get(public_listener_defaults, :network, %{})
|
||||
public_features_defaults = Map.get(public_listener_defaults, :features, %{})
|
||||
public_auth_defaults = Map.get(public_listener_defaults, :auth, %{})
|
||||
public_metrics_defaults = Map.get(public_features_defaults, :metrics, %{})
|
||||
public_metrics_access_defaults = Map.get(public_metrics_defaults, :access, %{})
|
||||
|
||||
metrics_listener_defaults = Map.get(listeners_defaults, :metrics, %{})
|
||||
metrics_listener_bind_defaults = Map.get(metrics_listener_defaults, :bind, %{})
|
||||
metrics_listener_transport_defaults = Map.get(metrics_listener_defaults, :transport, %{})
|
||||
metrics_listener_network_defaults = Map.get(metrics_listener_defaults, :network, %{})
|
||||
|
||||
metrics_listener_metrics_defaults =
|
||||
metrics_listener_defaults
|
||||
|> Map.get(:features, %{})
|
||||
|> Map.get(:metrics, %{})
|
||||
|
||||
metrics_listener_metrics_access_defaults =
|
||||
Map.get(metrics_listener_metrics_defaults, :access, %{})
|
||||
|
||||
public_listener = %{
|
||||
enabled: Map.get(public_listener_defaults, :enabled, true),
|
||||
bind: %{
|
||||
ip: Map.get(public_bind_defaults, :ip, {0, 0, 0, 0}),
|
||||
port: int_env.("PORT", Map.get(public_bind_defaults, :port, 4413))
|
||||
},
|
||||
transport: %{
|
||||
scheme: Map.get(public_transport_defaults, :scheme, :http),
|
||||
tls: Map.get(public_transport_defaults, :tls, %{mode: :disabled})
|
||||
},
|
||||
proxy: %{
|
||||
trusted_cidrs:
|
||||
csv_env.(
|
||||
"PARRHESIA_TRUSTED_PROXIES",
|
||||
Map.get(public_proxy_defaults, :trusted_cidrs, [])
|
||||
),
|
||||
honor_x_forwarded_for: Map.get(public_proxy_defaults, :honor_x_forwarded_for, true)
|
||||
},
|
||||
network: %{
|
||||
allow_cidrs: Map.get(public_network_defaults, :allow_cidrs, []),
|
||||
private_networks_only: Map.get(public_network_defaults, :private_networks_only, false),
|
||||
public: Map.get(public_network_defaults, :public, false),
|
||||
allow_all: Map.get(public_network_defaults, :allow_all, true)
|
||||
},
|
||||
features: %{
|
||||
nostr: %{
|
||||
enabled: public_features_defaults |> Map.get(:nostr, %{}) |> Map.get(:enabled, true)
|
||||
},
|
||||
admin: %{
|
||||
enabled: public_features_defaults |> Map.get(:admin, %{}) |> Map.get(:enabled, true)
|
||||
},
|
||||
metrics: %{
|
||||
enabled:
|
||||
bool_env.(
|
||||
"PARRHESIA_METRICS_ENABLED_ON_MAIN_ENDPOINT",
|
||||
Map.get(public_metrics_defaults, :enabled, true)
|
||||
),
|
||||
auth_token:
|
||||
string_env.(
|
||||
"PARRHESIA_METRICS_AUTH_TOKEN",
|
||||
Map.get(public_metrics_defaults, :auth_token)
|
||||
),
|
||||
access: %{
|
||||
public:
|
||||
bool_env.(
|
||||
"PARRHESIA_METRICS_PUBLIC",
|
||||
Map.get(public_metrics_access_defaults, :public, false)
|
||||
),
|
||||
private_networks_only:
|
||||
bool_env.(
|
||||
"PARRHESIA_METRICS_PRIVATE_NETWORKS_ONLY",
|
||||
Map.get(public_metrics_access_defaults, :private_networks_only, true)
|
||||
),
|
||||
allow_cidrs:
|
||||
csv_env.(
|
||||
"PARRHESIA_METRICS_ALLOWED_CIDRS",
|
||||
Map.get(public_metrics_access_defaults, :allow_cidrs, [])
|
||||
),
|
||||
allow_all: Map.get(public_metrics_access_defaults, :allow_all, true)
|
||||
}
|
||||
}
|
||||
},
|
||||
auth: %{
|
||||
nip42_required: Map.get(public_auth_defaults, :nip42_required, false),
|
||||
nip98_required_for_admin:
|
||||
bool_env.(
|
||||
"PARRHESIA_POLICIES_MANAGEMENT_AUTH_REQUIRED",
|
||||
Map.get(public_auth_defaults, :nip98_required_for_admin, true)
|
||||
)
|
||||
},
|
||||
baseline_acl: Map.get(public_listener_defaults, :baseline_acl, %{read: [], write: []})
|
||||
}
|
||||
|
||||
listeners =
|
||||
if Map.get(metrics_listener_defaults, :enabled, false) or
|
||||
bool_env.("PARRHESIA_METRICS_ENDPOINT_ENABLED", false) do
|
||||
Map.put(
|
||||
%{public: public_listener},
|
||||
:metrics,
|
||||
%{
|
||||
enabled: true,
|
||||
bind: %{
|
||||
ip: Map.get(metrics_listener_bind_defaults, :ip, {127, 0, 0, 1}),
|
||||
port:
|
||||
int_env.(
|
||||
"PARRHESIA_METRICS_ENDPOINT_PORT",
|
||||
Map.get(metrics_listener_bind_defaults, :port, 9568)
|
||||
)
|
||||
},
|
||||
transport: %{
|
||||
scheme: Map.get(metrics_listener_transport_defaults, :scheme, :http),
|
||||
tls: Map.get(metrics_listener_transport_defaults, :tls, %{mode: :disabled})
|
||||
},
|
||||
network: %{
|
||||
allow_cidrs: Map.get(metrics_listener_network_defaults, :allow_cidrs, []),
|
||||
private_networks_only:
|
||||
Map.get(metrics_listener_network_defaults, :private_networks_only, false),
|
||||
public: Map.get(metrics_listener_network_defaults, :public, false),
|
||||
allow_all: Map.get(metrics_listener_network_defaults, :allow_all, true)
|
||||
},
|
||||
features: %{
|
||||
nostr: %{enabled: false},
|
||||
admin: %{enabled: false},
|
||||
metrics: %{
|
||||
enabled: true,
|
||||
auth_token:
|
||||
string_env.(
|
||||
"PARRHESIA_METRICS_AUTH_TOKEN",
|
||||
Map.get(metrics_listener_metrics_defaults, :auth_token)
|
||||
),
|
||||
access: %{
|
||||
public:
|
||||
bool_env.(
|
||||
"PARRHESIA_METRICS_PUBLIC",
|
||||
Map.get(metrics_listener_metrics_access_defaults, :public, false)
|
||||
),
|
||||
private_networks_only:
|
||||
bool_env.(
|
||||
"PARRHESIA_METRICS_PRIVATE_NETWORKS_ONLY",
|
||||
Map.get(
|
||||
metrics_listener_metrics_access_defaults,
|
||||
:private_networks_only,
|
||||
true
|
||||
)
|
||||
),
|
||||
allow_cidrs:
|
||||
csv_env.(
|
||||
"PARRHESIA_METRICS_ALLOWED_CIDRS",
|
||||
Map.get(metrics_listener_metrics_access_defaults, :allow_cidrs, [])
|
||||
),
|
||||
allow_all: Map.get(metrics_listener_metrics_access_defaults, :allow_all, true)
|
||||
}
|
||||
}
|
||||
},
|
||||
auth: %{nip42_required: false, nip98_required_for_admin: true},
|
||||
baseline_acl: %{read: [], write: []}
|
||||
}
|
||||
)
|
||||
else
|
||||
%{public: public_listener}
|
||||
end
|
||||
|
||||
retention = [
|
||||
check_interval_hours:
|
||||
int_env.(
|
||||
"PARRHESIA_RETENTION_CHECK_INTERVAL_HOURS",
|
||||
Keyword.get(retention_defaults, :check_interval_hours, 24)
|
||||
),
|
||||
months_ahead:
|
||||
int_env.(
|
||||
"PARRHESIA_RETENTION_MONTHS_AHEAD",
|
||||
Keyword.get(retention_defaults, :months_ahead, 2)
|
||||
),
|
||||
max_db_bytes:
|
||||
infinity_or_int_env.(
|
||||
"PARRHESIA_RETENTION_MAX_DB_BYTES",
|
||||
Keyword.get(retention_defaults, :max_db_bytes, :infinity)
|
||||
),
|
||||
max_months_to_keep:
|
||||
infinity_or_int_env.(
|
||||
"PARRHESIA_RETENTION_MAX_MONTHS_TO_KEEP",
|
||||
Keyword.get(retention_defaults, :max_months_to_keep, :infinity)
|
||||
),
|
||||
max_partitions_to_drop_per_run:
|
||||
int_env.(
|
||||
"PARRHESIA_RETENTION_MAX_PARTITIONS_TO_DROP_PER_RUN",
|
||||
Keyword.get(retention_defaults, :max_partitions_to_drop_per_run, 1)
|
||||
)
|
||||
]
|
||||
|
||||
features = [
|
||||
verify_event_signatures:
|
||||
bool_env.(
|
||||
"PARRHESIA_FEATURES_VERIFY_EVENT_SIGNATURES",
|
||||
Keyword.get(features_defaults, :verify_event_signatures, true)
|
||||
),
|
||||
nip_45_count:
|
||||
bool_env.(
|
||||
"PARRHESIA_FEATURES_NIP_45_COUNT",
|
||||
Keyword.get(features_defaults, :nip_45_count, true)
|
||||
),
|
||||
nip_50_search:
|
||||
bool_env.(
|
||||
"PARRHESIA_FEATURES_NIP_50_SEARCH",
|
||||
Keyword.get(features_defaults, :nip_50_search, true)
|
||||
),
|
||||
nip_77_negentropy:
|
||||
bool_env.(
|
||||
"PARRHESIA_FEATURES_NIP_77_NEGENTROPY",
|
||||
Keyword.get(features_defaults, :nip_77_negentropy, true)
|
||||
),
|
||||
marmot_push_notifications:
|
||||
bool_env.(
|
||||
"PARRHESIA_FEATURES_MARMOT_PUSH_NOTIFICATIONS",
|
||||
Keyword.get(features_defaults, :marmot_push_notifications, false)
|
||||
)
|
||||
]
|
||||
|
||||
config :parrhesia, Parrhesia.Repo,
|
||||
url: database_url,
|
||||
pool_size: String.to_integer(System.get_env("POOL_SIZE") || "10")
|
||||
pool_size: pool_size,
|
||||
queue_target: queue_target,
|
||||
queue_interval: queue_interval
|
||||
|
||||
config :parrhesia, Parrhesia.Web.Endpoint,
|
||||
port: String.to_integer(System.get_env("PORT") || "4000")
|
||||
config :parrhesia,
|
||||
relay_url: string_env.("PARRHESIA_RELAY_URL", relay_url_default),
|
||||
acl: [
|
||||
protected_filters:
|
||||
json_env.(
|
||||
"PARRHESIA_ACL_PROTECTED_FILTERS",
|
||||
Keyword.get(acl_defaults, :protected_filters, [])
|
||||
)
|
||||
],
|
||||
identity: [
|
||||
path: string_env.("PARRHESIA_IDENTITY_PATH", nil),
|
||||
private_key: string_env.("PARRHESIA_IDENTITY_PRIVATE_KEY", nil)
|
||||
],
|
||||
sync: [
|
||||
path: string_env.("PARRHESIA_SYNC_PATH", nil),
|
||||
start_workers?:
|
||||
bool_env.(
|
||||
"PARRHESIA_SYNC_START_WORKERS",
|
||||
Keyword.get(Application.get_env(:parrhesia, :sync, []), :start_workers?, true)
|
||||
)
|
||||
],
|
||||
moderation_cache_enabled:
|
||||
bool_env.("PARRHESIA_MODERATION_CACHE_ENABLED", moderation_cache_enabled_default),
|
||||
enable_expiration_worker:
|
||||
bool_env.("PARRHESIA_ENABLE_EXPIRATION_WORKER", enable_expiration_worker_default),
|
||||
listeners: listeners,
|
||||
limits: limits,
|
||||
policies: policies,
|
||||
retention: retention,
|
||||
features: features
|
||||
|
||||
case System.get_env("PARRHESIA_EXTRA_CONFIG") do
|
||||
nil -> :ok
|
||||
"" -> :ok
|
||||
path -> import_config path
|
||||
end
|
||||
end
|
||||
|
||||
@@ -8,11 +8,34 @@ test_endpoint_port =
|
||||
value -> String.to_integer(value)
|
||||
end
|
||||
|
||||
config :parrhesia, Parrhesia.Web.Endpoint,
|
||||
port: test_endpoint_port,
|
||||
ip: {127, 0, 0, 1}
|
||||
config :parrhesia, :listeners,
|
||||
public: %{
|
||||
enabled: true,
|
||||
bind: %{ip: {127, 0, 0, 1}, port: test_endpoint_port},
|
||||
transport: %{scheme: :http, tls: %{mode: :disabled}},
|
||||
proxy: %{trusted_cidrs: [], honor_x_forwarded_for: true},
|
||||
network: %{allow_all: true},
|
||||
features: %{
|
||||
nostr: %{enabled: true},
|
||||
admin: %{enabled: true},
|
||||
metrics: %{enabled: true, access: %{private_networks_only: true}, auth_token: nil}
|
||||
},
|
||||
auth: %{nip42_required: false, nip98_required_for_admin: true},
|
||||
baseline_acl: %{read: [], write: []}
|
||||
}
|
||||
|
||||
config :parrhesia, enable_expiration_worker: false
|
||||
config :parrhesia,
|
||||
enable_expiration_worker: false,
|
||||
moderation_cache_enabled: false,
|
||||
identity: [
|
||||
path: Path.join(System.tmp_dir!(), "parrhesia_test_identity.json"),
|
||||
private_key: nil
|
||||
],
|
||||
sync: [
|
||||
path: Path.join(System.tmp_dir!(), "parrhesia_test_sync.json"),
|
||||
start_workers?: false
|
||||
],
|
||||
features: [verify_event_signatures: false]
|
||||
|
||||
pg_host = System.get_env("PGHOST")
|
||||
|
||||
|
||||
66
default.nix
66
default.nix
@@ -1,11 +1,16 @@
|
||||
{
|
||||
lib,
|
||||
beam,
|
||||
fetchFromGitHub,
|
||||
runCommand,
|
||||
autoconf,
|
||||
automake,
|
||||
libtool,
|
||||
pkg-config,
|
||||
vips,
|
||||
}: let
|
||||
pname = "parrhesia";
|
||||
version = "0.1.0";
|
||||
version = "0.5.0";
|
||||
|
||||
beamPackages = beam.packages.erlang_28.extend (
|
||||
final: _prev: {
|
||||
@@ -43,16 +48,69 @@
|
||||
beamPackages.fetchMixDeps {
|
||||
pname = "${pname}-mix-deps";
|
||||
inherit version src;
|
||||
hash = "sha256-1v2+Q1MHbu09r5OBaLehiR+JfMP0Q5OHaWuwrQDzZJU=";
|
||||
hash = "sha256-D69wuFnIChQzm1PmpIW+X/1sPpsIcDHe4V5fKmFeJ3k=";
|
||||
}
|
||||
else null;
|
||||
|
||||
# lib_secp256k1 is a :make dep and may not be present in fetchMixDeps output.
|
||||
# Inject the Hex package explicitly, then vendor upstream bitcoin-core/secp256k1
|
||||
# sources to avoid build-time network access.
|
||||
libSecp256k1Hex = beamPackages.fetchHex {
|
||||
pkg = "lib_secp256k1";
|
||||
version = "0.7.1";
|
||||
sha256 = "sha256-eL3TZhoXRIr/Wu7FynTI3bwJsB8Oz6O6Gro+iuR6srM=";
|
||||
};
|
||||
|
||||
elixirMakeHex = beamPackages.fetchHex {
|
||||
pkg = "elixir_make";
|
||||
version = "0.9.0";
|
||||
sha256 = "sha256-2yPU/Yt1dGKtAviqc0MaQm/mZxyAsgDZcQyvPR3Q/9s=";
|
||||
};
|
||||
|
||||
secp256k1Src = fetchFromGitHub {
|
||||
owner = "bitcoin-core";
|
||||
repo = "secp256k1";
|
||||
rev = "v0.7.1";
|
||||
hash = "sha256-DnBgetf+98n7B1JGtyTdxyc+yQ51A3+ueTIPPSWCm4E=";
|
||||
};
|
||||
|
||||
patchedMixFodDeps =
|
||||
if mixFodDeps == null
|
||||
then null
|
||||
else
|
||||
runCommand mixFodDeps.name {} ''
|
||||
mkdir -p $out
|
||||
cp -r --no-preserve=mode ${mixFodDeps}/. $out
|
||||
chmod -R u+w $out
|
||||
|
||||
rm -rf $out/lib_secp256k1
|
||||
cp -r ${libSecp256k1Hex} $out/lib_secp256k1
|
||||
chmod -R u+w $out/lib_secp256k1
|
||||
|
||||
rm -rf $out/elixir_make
|
||||
cp -r ${elixirMakeHex} $out/elixir_make
|
||||
|
||||
rm -rf $out/lib_secp256k1/c_src/secp256k1
|
||||
mkdir -p $out/lib_secp256k1/c_src/secp256k1
|
||||
cp -r ${secp256k1Src}/. $out/lib_secp256k1/c_src/secp256k1/
|
||||
chmod -R u+w $out/lib_secp256k1/c_src/secp256k1
|
||||
|
||||
# mixRelease may copy deps without preserving +x bits, so avoid relying
|
||||
# on executable mode for autogen.sh.
|
||||
substituteInPlace $out/lib_secp256k1/Makefile \
|
||||
--replace-fail "./autogen.sh" "sh ./autogen.sh"
|
||||
|
||||
touch $out/lib_secp256k1/c_src/secp256k1/.fetched
|
||||
'';
|
||||
in
|
||||
beamPackages.mixRelease {
|
||||
inherit pname version src mixFodDeps;
|
||||
inherit pname version src;
|
||||
|
||||
mixFodDeps = patchedMixFodDeps;
|
||||
|
||||
mixEnv = "prod";
|
||||
removeCookie = false;
|
||||
nativeBuildInputs = [pkg-config];
|
||||
nativeBuildInputs = [pkg-config autoconf automake libtool];
|
||||
buildInputs = [vips];
|
||||
|
||||
preConfigure = ''
|
||||
|
||||
81
devenv.nix
81
devenv.nix
@@ -75,33 +75,36 @@ in {
|
||||
});
|
||||
nostr-bench = pkgs.callPackage ./nix/nostr-bench.nix {};
|
||||
in
|
||||
with pkgs; [
|
||||
just
|
||||
git
|
||||
# Nix code formatter
|
||||
alejandra
|
||||
# i18n
|
||||
icu
|
||||
# PostgreSQL client utilities
|
||||
postgresql
|
||||
# image processing library
|
||||
vips-mozjpeg
|
||||
# Mermaid diagram generator
|
||||
mermaid-cli
|
||||
# Nostr CLI client
|
||||
nak
|
||||
# Nostr relay benchmark client
|
||||
nostr-bench
|
||||
# Nostr reference servers
|
||||
nostr-rs-relay
|
||||
strfry
|
||||
];
|
||||
|
||||
# https://devenv.sh/tests/
|
||||
# enterTest = ''
|
||||
# echo "Running tests"
|
||||
# git --version | grep "2.42.0"
|
||||
# '';
|
||||
with pkgs;
|
||||
[
|
||||
just
|
||||
gcc
|
||||
git
|
||||
gnumake
|
||||
autoconf
|
||||
automake
|
||||
libtool
|
||||
pkg-config
|
||||
# Nix code formatter
|
||||
alejandra
|
||||
# i18n
|
||||
icu
|
||||
# PostgreSQL client utilities
|
||||
postgresql
|
||||
# image processing library
|
||||
vips-mozjpeg
|
||||
# Mermaid diagram generator
|
||||
mermaid-cli
|
||||
# Nostr CLI client
|
||||
nak
|
||||
# Nostr relay benchmark client
|
||||
nostr-bench
|
||||
# Nostr reference servers
|
||||
nostr-rs-relay
|
||||
]
|
||||
++ lib.optionals pkgs.stdenv.hostPlatform.isx86_64 [
|
||||
strfry
|
||||
];
|
||||
|
||||
# https://devenv.sh/languages/
|
||||
languages = {
|
||||
@@ -121,15 +124,33 @@ in {
|
||||
services.postgres = {
|
||||
enable = true;
|
||||
package = pkgs.postgresql_18;
|
||||
|
||||
# Some tuning for the benchmark - doesn't seem to do much
|
||||
settings = {
|
||||
max_connections = 300;
|
||||
shared_buffers = "1GB";
|
||||
effective_cache_size = "3GB";
|
||||
work_mem = "16MB";
|
||||
maintenance_work_mem = "256MB";
|
||||
wal_compression = "on";
|
||||
checkpoint_timeout = "15min";
|
||||
checkpoint_completion_target = 0.9;
|
||||
min_wal_size = "1GB";
|
||||
max_wal_size = "4GB";
|
||||
random_page_cost = 1.1;
|
||||
effective_io_concurrency = 200;
|
||||
};
|
||||
|
||||
initialDatabases = [{name = "parrhesia_dev";} {name = "parrhesia_test";}];
|
||||
initialScript = ''
|
||||
CREATE ROLE dev WITH LOGIN PASSWORD 'dev' SUPERUSER;
|
||||
|
||||
-- Make sure we get the right collation
|
||||
ALTER database template1 is_template=false;
|
||||
|
||||
DROP database template1;
|
||||
|
||||
CREATE DATABASE template1 WITH OWNER = agent
|
||||
CREATE DATABASE template1
|
||||
ENCODING = 'UTF8'
|
||||
TABLESPACE = pg_default
|
||||
LC_COLLATE = 'de_DE.UTF-8'
|
||||
@@ -141,12 +162,10 @@ in {
|
||||
'';
|
||||
};
|
||||
|
||||
# https://devenv.sh/pre-commit-hooks/
|
||||
# pre-commit.hooks.shellcheck.enable = true;
|
||||
|
||||
dotenv.enable = true;
|
||||
devenv.warnOnNewVersion = false;
|
||||
|
||||
# https://devenv.sh/pre-commit-hooks/
|
||||
git-hooks.hooks = {
|
||||
alejandra.enable = true;
|
||||
check-added-large-files = {
|
||||
|
||||
15
docs/ARCH.md
15
docs/ARCH.md
@@ -68,10 +68,10 @@ Notes:
|
||||
## 3) System architecture (high level)
|
||||
|
||||
```text
|
||||
WS/HTTP Edge (Bandit/Plug)
|
||||
Configured WS/HTTP Listeners (Bandit/Plug)
|
||||
-> Protocol Decoder/Encoder
|
||||
-> Command Router (EVENT/REQ/CLOSE/AUTH/COUNT/NEG-*)
|
||||
-> Policy Pipeline (validation, auth, ACL, PoW, NIP-70)
|
||||
-> Policy Pipeline (listener baseline, validation, auth, ACL, PoW, NIP-70)
|
||||
-> Event Service / Query Service
|
||||
-> Storage Port (behavior)
|
||||
-> Postgres Adapter (Ecto)
|
||||
@@ -90,15 +90,24 @@ WS/HTTP Edge (Bandit/Plug)
|
||||
4. `Parrhesia.Subscriptions.Supervisor` – subscription index + fanout workers
|
||||
5. `Parrhesia.Auth.Supervisor` – AUTH challenge/session tracking
|
||||
6. `Parrhesia.Policy.Supervisor` – rate limiters / ACL caches
|
||||
7. `Parrhesia.Web.Endpoint` – WS + HTTP ingress
|
||||
7. `Parrhesia.Web.Endpoint` – supervises configured WS + HTTP listeners
|
||||
8. `Parrhesia.Tasks.Supervisor` – background jobs (expiry purge, maintenance)
|
||||
|
||||
Failure model:
|
||||
|
||||
- Connection failures are isolated per socket process.
|
||||
- Listener failures are isolated per Bandit child and restarted independently.
|
||||
- Storage outages degrade with explicit `OK/CLOSED` error prefixes (`error:`) per NIP-01.
|
||||
- Non-critical workers are `:transient`; core infra is `:permanent`.
|
||||
|
||||
Ingress model:
|
||||
|
||||
- Ingress is defined through `config :parrhesia, :listeners, ...`.
|
||||
- Each listener has its own bind/transport settings, TLS mode, proxy trust, network allowlist, enabled features (`nostr`, `admin`, `metrics`), auth requirements, and baseline read/write ACL.
|
||||
- Listeners can therefore expose different security postures, for example a public relay listener and a VPN-only sync-capable listener.
|
||||
- TLS-capable listeners support direct server TLS, mutual TLS with optional client pin checks, and proxy-terminated TLS identity on explicitly trusted proxy hops.
|
||||
- Certificate reload is currently implemented as admin-triggered listener restart from disk rather than background file watching.
|
||||
|
||||
## 5) Core runtime components
|
||||
|
||||
### 5.1 Connection process
|
||||
|
||||
234
docs/CLUSTER.md
Normal file
234
docs/CLUSTER.md
Normal file
@@ -0,0 +1,234 @@
|
||||
# Parrhesia clustering and distributed fanout
|
||||
|
||||
This document describes:
|
||||
|
||||
1. the **current** distributed fanout behavior implemented today, and
|
||||
2. a practical evolution path to a more production-grade clustered relay.
|
||||
|
||||
---
|
||||
|
||||
## 1) Current state (implemented today)
|
||||
|
||||
### 1.1 What exists right now
|
||||
|
||||
Parrhesia currently includes a lightweight multi-node live fanout path (untested!):
|
||||
|
||||
- `Parrhesia.Fanout.MultiNode` (`lib/parrhesia/fanout/multi_node.ex`)
|
||||
- GenServer that joins a `:pg` process group.
|
||||
- Receives locally-published events and forwards them to other group members.
|
||||
- Receives remote events and performs local fanout lookup.
|
||||
- `Parrhesia.Web.Connection` (`lib/parrhesia/web/connection.ex`)
|
||||
- On successful ingest, after ACK scheduling, it does:
|
||||
1. local fanout (`fanout_event/1`), then
|
||||
2. cross-node publish (`maybe_publish_multi_node/1`).
|
||||
- `Parrhesia.Subscriptions.Supervisor` (`lib/parrhesia/subscriptions/supervisor.ex`)
|
||||
- Starts `Parrhesia.Fanout.MultiNode` unconditionally.
|
||||
|
||||
In other words: **if BEAM nodes are connected, live events are fanned out cross-node**.
|
||||
|
||||
### 1.2 What is not included yet
|
||||
|
||||
- No automatic cluster formation/discovery (no `libcluster`, DNS polling, gossip, etc.).
|
||||
- No durable inter-node event transport.
|
||||
- No replay/recovery of missed cross-node live events.
|
||||
- No explicit per-node delivery ACK between relay nodes.
|
||||
|
||||
---
|
||||
|
||||
## 2) Current runtime behavior in detail
|
||||
|
||||
### 2.1 Local ingest flow and publish ordering
|
||||
|
||||
For an accepted event in `Parrhesia.Web.Connection`:
|
||||
|
||||
1. validate/policy/persist path runs.
|
||||
2. Client receives `OK` reply.
|
||||
3. A post-ACK message triggers:
|
||||
- local fanout (`Index.candidate_subscription_keys/1` + send `{:fanout_event, ...}`),
|
||||
- multi-node publish (`MultiNode.publish/1`).
|
||||
|
||||
Important semantics:
|
||||
|
||||
- Regular persisted events: ACK implies DB persistence succeeded.
|
||||
- Ephemeral events: ACK implies accepted by policy, but no DB durability.
|
||||
- Cross-node fanout happens **after** ACK path is scheduled.
|
||||
|
||||
### 2.2 Multi-node transport mechanics
|
||||
|
||||
`Parrhesia.Fanout.MultiNode` uses `:pg` membership:
|
||||
|
||||
- On init:
|
||||
- ensures `:pg` is started,
|
||||
- joins group `Parrhesia.Fanout.MultiNode`.
|
||||
- On publish:
|
||||
- gets all group members,
|
||||
- excludes itself,
|
||||
- sends `{:remote_fanout_event, event}` to each member pid.
|
||||
- On remote receive:
|
||||
- runs local subscription candidate narrowing via `Parrhesia.Subscriptions.Index`,
|
||||
- forwards matching candidates to local connection owners as `{:fanout_event, sub_id, event}`.
|
||||
|
||||
No republish on remote receive, so this path does not create fanout loops.
|
||||
|
||||
### 2.3 Subscription index locality
|
||||
|
||||
The subscription index is local ETS state per node (`Parrhesia.Subscriptions.Index`).
|
||||
|
||||
- Each node only tracks subscriptions of its local websocket processes.
|
||||
- Each node independently decides which local subscribers match a remote event.
|
||||
- There is no global cross-node subscription registry.
|
||||
|
||||
### 2.4 Delivery model and guarantees (current)
|
||||
|
||||
Current model is **best-effort live propagation** among connected nodes.
|
||||
|
||||
- If nodes are connected and healthy, remote live subscribers should receive events quickly.
|
||||
- If there is a netsplit or temporary disconnection:
|
||||
- remote live subscribers may miss events,
|
||||
- persisted events can still be recovered by normal `REQ`/history query,
|
||||
- ephemeral events are not recoverable.
|
||||
|
||||
### 2.5 Cluster preconditions
|
||||
|
||||
For cross-node fanout to work, operators must provide distributed BEAM connectivity:
|
||||
|
||||
- consistent Erlang cookie,
|
||||
- named nodes (`--name`/`--sname`),
|
||||
- network reachability for Erlang distribution ports,
|
||||
- explicit node connections (or external discovery tooling).
|
||||
|
||||
Parrhesia currently does not automate these steps.
|
||||
|
||||
---
|
||||
|
||||
## 3) Operational characteristics of current design
|
||||
|
||||
### 3.1 Performance shape
|
||||
|
||||
For each accepted event on one node:
|
||||
|
||||
- one local fanout lookup + local sends,
|
||||
- one cluster publish that sends to `N - 1` remote bus members,
|
||||
- on each remote node: one local fanout lookup + local sends.
|
||||
|
||||
So inter-node traffic scales roughly linearly with node count per event (full-cluster broadcast).
|
||||
|
||||
This is simple and low-latency for small-to-medium clusters, but can become expensive as node count grows.
|
||||
|
||||
### 3.2 Failure behavior
|
||||
|
||||
- Remote node down: send attempts to that member stop once membership updates; no replay.
|
||||
- Netsplit: live propagation gap during split.
|
||||
- Recovery: local clients can catch up via DB-backed queries (except ephemeral kinds).
|
||||
|
||||
### 3.3 Consistency expectations
|
||||
|
||||
- No global total-ordering guarantee for live delivery across nodes.
|
||||
- Per-connection ordering is preserved by each connection process queue/drain behavior.
|
||||
- Duplicate suppression for ingestion uses storage semantics (`duplicate_event`), but transport itself is not exactly-once.
|
||||
|
||||
### 3.4 Observability today
|
||||
|
||||
Relevant metrics exist for fanout/queue pressure (see `Parrhesia.Telemetry`), e.g.:
|
||||
|
||||
- `parrhesia.fanout.duration.ms`
|
||||
- `parrhesia.connection.outbound_queue.depth`
|
||||
- `parrhesia.connection.outbound_queue.pressure`
|
||||
- `parrhesia.connection.outbound_queue.overflow.count`
|
||||
|
||||
These are useful but do not yet fully separate local-vs-remote fanout pipeline stages.
|
||||
|
||||
---
|
||||
|
||||
## 4) Practical extension path to a fully-fledged clustered system
|
||||
|
||||
A realistic path is incremental. Suggested phases:
|
||||
|
||||
### Phase A — hardened BEAM cluster control plane
|
||||
|
||||
1. Add cluster discovery/formation (e.g. `libcluster`) with environment-specific topology:
|
||||
- Kubernetes DNS,
|
||||
- static nodes,
|
||||
- cloud VM discovery.
|
||||
2. Add clear node liveness/partition telemetry and alerts.
|
||||
3. Provide operator docs for cookie, node naming, and network requirements.
|
||||
|
||||
Outcome: simpler and safer cluster operations, same data plane semantics.
|
||||
|
||||
### Phase B — resilient distributed fanout data plane
|
||||
|
||||
Introduce a durable fanout stream for persisted events.
|
||||
|
||||
Recommended pattern:
|
||||
|
||||
1. On successful DB commit of event, append to a monotonic fanout log (or use DB sequence-based stream view).
|
||||
2. Each relay node runs a consumer with a stored cursor.
|
||||
3. On restart/partition recovery, node resumes from cursor and replays missed events.
|
||||
4. Local fanout remains same (subscription index + per-connection queues).
|
||||
|
||||
Semantics target:
|
||||
|
||||
- **at-least-once** node-to-node propagation,
|
||||
- replay after downtime,
|
||||
- idempotent handling keyed by event id.
|
||||
|
||||
Notes:
|
||||
|
||||
- Ephemeral events can remain best-effort (or have a separate short-lived transport), since no storage source exists for replay.
|
||||
|
||||
### Phase C — scale and efficiency improvements
|
||||
|
||||
As cluster size grows, avoid naive full broadcast where possible:
|
||||
|
||||
1. Optional node-level subscription summaries (coarse bloom/bitset or keyed summaries) to reduce unnecessary remote sends.
|
||||
2. Shard fanout workers for CPU locality and mailbox control.
|
||||
3. Batch remote delivery payloads.
|
||||
4. Separate traffic classes (e.g. Marmot-heavy streams vs generic) with independent queues.
|
||||
|
||||
Outcome: higher throughput per node and lower inter-node amplification.
|
||||
|
||||
### Phase D — stronger observability and SLOs
|
||||
|
||||
Add explicit distributed pipeline metrics:
|
||||
|
||||
- publish enqueue/dequeue latency,
|
||||
- cross-node delivery lag (commit -> remote fanout enqueue),
|
||||
- replay backlog depth,
|
||||
- per-node dropped/expired transport messages,
|
||||
- partition detection counters.
|
||||
|
||||
Define cluster SLO examples:
|
||||
|
||||
- p95 commit->remote-live enqueue under nominal load,
|
||||
- max replay catch-up time after node restart,
|
||||
- bounded message loss for best-effort channels.
|
||||
|
||||
---
|
||||
|
||||
## 5) How a fully-fledged system would behave in practice
|
||||
|
||||
With Phases A-D implemented, expected behavior:
|
||||
|
||||
- **Normal operation:**
|
||||
- low-latency local fanout,
|
||||
- remote nodes receive events via stream consumers quickly,
|
||||
- consistent operational visibility of end-to-end lag.
|
||||
- **Node restart:**
|
||||
- node reconnects and replays from stored cursor,
|
||||
- local subscribers begin receiving new + missed persisted events.
|
||||
- **Transient partition:**
|
||||
- live best-effort path may degrade,
|
||||
- persisted events converge after partition heals via replay.
|
||||
- **High fanout bursts:**
|
||||
- batching + sharding keeps queue pressure bounded,
|
||||
- overflow policies remain connection-local and measurable.
|
||||
|
||||
This approach gives a good trade-off between Nostr relay latency and distributed robustness without requiring strict exactly-once semantics.
|
||||
|
||||
---
|
||||
|
||||
## 6) Current status summary
|
||||
|
||||
Today, Parrhesia already supports **lightweight distributed live fanout** when BEAM nodes are connected.
|
||||
|
||||
It is intentionally simple and fast for smaller clusters, and provides a solid base for a more durable, observable cluster architecture as relay scale and availability requirements grow.
|
||||
140
docs/KHATRU.md
Normal file
140
docs/KHATRU.md
Normal file
@@ -0,0 +1,140 @@
|
||||
# Khatru-Inspired Runtime Improvements
|
||||
|
||||
This document collects refactoring and extension ideas learned from studying Khatru-style relay design.
|
||||
|
||||
It is intentionally **not** about the new public API surface or the sync ACL model. Those live in `docs/slop/LOCAL_API.md` and `docs/SYNC.md`.
|
||||
|
||||
The focus here is runtime shape, protocol behavior, and operator-visible relay features.
|
||||
|
||||
---
|
||||
|
||||
## 1. Why This Matters
|
||||
|
||||
Khatru appears mature mainly because it exposes clearer relay pipeline stages.
|
||||
|
||||
That gives three practical benefits:
|
||||
|
||||
- less policy drift between storage, websocket, and management code,
|
||||
- easier feature addition without hard-coding more branches into one connection module,
|
||||
- better composability for relay profiles with different trust and traffic models.
|
||||
|
||||
Parrhesia should borrow that clarity without copying Khatru's code-first hook model wholesale.
|
||||
|
||||
---
|
||||
|
||||
## 2. Proposed Runtime Refactors
|
||||
|
||||
### 2.1 Staged policy pipeline
|
||||
|
||||
Parrhesia should stop treating policy as one coarse `EventPolicy` module plus scattered special cases.
|
||||
|
||||
Recommended internal stages:
|
||||
|
||||
1. connection admission
|
||||
2. authentication challenge and validation
|
||||
3. publish/write authorization
|
||||
4. query/count authorization
|
||||
5. stream subscription authorization
|
||||
6. negentropy authorization
|
||||
7. response shaping
|
||||
8. broadcast/fanout suppression
|
||||
|
||||
This is an internal runtime refactor. It does not imply a new public API.
|
||||
|
||||
### 2.2 Richer internal request context
|
||||
|
||||
The runtime should carry a structured request context through all stages.
|
||||
|
||||
Useful fields:
|
||||
|
||||
- authenticated pubkeys
|
||||
- caller kind
|
||||
- remote IP
|
||||
- subscription id
|
||||
- peer id
|
||||
- negentropy session flag
|
||||
- internal-call flag
|
||||
|
||||
This reduces ad-hoc branching and makes audit/telemetry more coherent.
|
||||
|
||||
### 2.3 Separate policy from storage presence tables
|
||||
|
||||
Moderation state should remain data.
|
||||
|
||||
Runtime enforcement should be a first-class layer that consumes that data, not a side effect of whether a table exists.
|
||||
|
||||
This is especially important for:
|
||||
|
||||
- blocked IP enforcement,
|
||||
- pubkey allowlists,
|
||||
- future kind- or tag-scoped restrictions.
|
||||
|
||||
---
|
||||
|
||||
## 3. Protocol and Relay Features
|
||||
|
||||
### 3.1 Real COUNT sketches
|
||||
|
||||
Parrhesia currently returns a synthetic `hll` payload for NIP-45-style count responses.
|
||||
|
||||
If approximate count exchange matters, implement a real reusable HLL sketch path instead of hashing `filters + count`.
|
||||
|
||||
### 3.2 Relay identity in NIP-11
|
||||
|
||||
Once Parrhesia owns a stable server identity, NIP-11 should expose the relay pubkey instead of returning `nil`.
|
||||
|
||||
This is useful beyond sync:
|
||||
|
||||
- operator visibility,
|
||||
- relay fingerprinting,
|
||||
- future trust tooling.
|
||||
|
||||
### 3.3 Connection-level IP enforcement
|
||||
|
||||
Blocked IP support should be enforced on actual connection admission, not only stored in management tables.
|
||||
|
||||
This should happen early, before expensive protocol handling.
|
||||
|
||||
### 3.4 Better response shaping
|
||||
|
||||
Introduce a narrow internal response shaping layer for cases where returned events or counts need controlled rewriting or suppression.
|
||||
|
||||
Examples:
|
||||
|
||||
- hide fields for specific relay profiles,
|
||||
- suppress rebroadcast of locally-ingested remote sync traffic,
|
||||
- shape relay notices consistently.
|
||||
|
||||
This should stay narrow and deterministic. It should not become arbitrary app semantics.
|
||||
|
||||
---
|
||||
|
||||
## 4. Suggested Extension Points
|
||||
|
||||
These should be internal runtime seams, not necessarily public interfaces:
|
||||
|
||||
- `ConnectionPolicy`
|
||||
- `AuthPolicy`
|
||||
- `ReadPolicy`
|
||||
- `WritePolicy`
|
||||
- `NegentropyPolicy`
|
||||
- `ResponsePolicy`
|
||||
- `BroadcastPolicy`
|
||||
|
||||
They may initially be plain modules with well-defined callbacks or functions.
|
||||
|
||||
The point is not pluggability for its own sake. The point is to make policy stages explicit and testable.
|
||||
|
||||
---
|
||||
|
||||
## 5. Near-Term Priority
|
||||
|
||||
Recommended order:
|
||||
|
||||
1. enforce blocked IPs and any future connection-gating on the real connection path
|
||||
2. split the current websocket flow into explicit read/write/negentropy policy stages
|
||||
3. enrich runtime request context and telemetry metadata
|
||||
4. expose relay pubkey in NIP-11 once identity lands
|
||||
5. replace fake HLL payloads with a real approximate-count implementation if NIP-45 support matters operationally
|
||||
|
||||
This keeps the runtime improvements incremental and independent from the ongoing API and ACL implementation.
|
||||
@@ -1,69 +0,0 @@
|
||||
# Marmot operations guide (relay operator tuning)
|
||||
|
||||
This document captures practical limits and operational defaults for Marmot-heavy traffic (`443`, `445`, `10051`, wrapped `1059`, optional media/push flows).
|
||||
|
||||
## 1) Recommended baseline limits
|
||||
|
||||
Use these as a starting point and tune from production telemetry.
|
||||
|
||||
```elixir
|
||||
config :parrhesia,
|
||||
limits: [
|
||||
max_filter_limit: 500,
|
||||
max_filters_per_req: 16,
|
||||
max_outbound_queue: 256,
|
||||
outbound_drain_batch_size: 64
|
||||
],
|
||||
policies: [
|
||||
# Marmot group routing/query guards
|
||||
marmot_require_h_for_group_queries: true,
|
||||
marmot_group_max_h_values_per_filter: 32,
|
||||
marmot_group_max_query_window_seconds: 2_592_000,
|
||||
|
||||
# Kind 445 retention
|
||||
mls_group_event_ttl_seconds: 300,
|
||||
|
||||
# MIP-04 metadata controls
|
||||
marmot_media_max_imeta_tags_per_event: 8,
|
||||
marmot_media_max_field_value_bytes: 1024,
|
||||
marmot_media_max_url_bytes: 2048,
|
||||
marmot_media_allowed_mime_prefixes: [],
|
||||
marmot_media_reject_mip04_v1: true,
|
||||
|
||||
# MIP-05 push controls (optional)
|
||||
marmot_push_server_pubkeys: [],
|
||||
marmot_push_max_relay_tags: 16,
|
||||
marmot_push_max_payload_bytes: 65_536,
|
||||
marmot_push_max_trigger_age_seconds: 120,
|
||||
marmot_push_require_expiration: true,
|
||||
marmot_push_max_expiration_window_seconds: 120,
|
||||
marmot_push_max_server_recipients: 1
|
||||
]
|
||||
```
|
||||
|
||||
## 2) Index expectations for Marmot workloads
|
||||
|
||||
The Postgres adapter relies on dedicated partial tag indexes for hot Marmot selectors:
|
||||
|
||||
- `event_tags_h_value_created_at_idx` for `#h` group routing
|
||||
- `event_tags_i_value_created_at_idx` for `#i` keypackage reference lookups
|
||||
|
||||
Query-plan regression tests assert these paths remain usable for heavy workloads.
|
||||
|
||||
## 3) Telemetry to watch
|
||||
|
||||
Key metrics for Marmot traffic and pressure:
|
||||
|
||||
- `parrhesia.ingest.duration.ms{traffic_class="marmot|generic"}`
|
||||
- `parrhesia.query.duration.ms{traffic_class="marmot|generic"}`
|
||||
- `parrhesia.fanout.duration.ms{traffic_class="marmot|generic"}`
|
||||
- `parrhesia.connection.outbound_queue.depth{traffic_class=...}`
|
||||
- `parrhesia.connection.outbound_queue.pressure{traffic_class=...}`
|
||||
- `parrhesia.connection.outbound_queue.pressure_events.count{traffic_class=...}`
|
||||
- `parrhesia.connection.outbound_queue.overflow.count{traffic_class=...}`
|
||||
|
||||
Operational target: keep queue pressure below sustained 0.75 and avoid overflow spikes during `445` bursts.
|
||||
|
||||
## 4) Fault and recovery expectations
|
||||
|
||||
During storage outages, Marmot group-flow writes must fail with explicit `OK false` errors. After recovery, reordered group events should still query deterministically by `created_at DESC, id ASC`.
|
||||
354
docs/NIP-DBSYNC.md
Normal file
354
docs/NIP-DBSYNC.md
Normal file
@@ -0,0 +1,354 @@
|
||||
# NIP-DBSYNC — Minimal Mutation Events over Nostr
|
||||
|
||||
`draft` `optional`
|
||||
|
||||
Defines a minimal event format for publishing immutable application mutation events over Nostr.
|
||||
|
||||
This draft intentionally standardizes only the wire format for mutation transport. It does **not** standardize database replication strategy, conflict resolution, relay retention, or key derivation.
|
||||
|
||||
---
|
||||
|
||||
## Abstract
|
||||
|
||||
This NIP defines one regular event kind, **5000**, for signed mutation events.
|
||||
|
||||
A mutation event identifies:
|
||||
|
||||
- the object namespace being mutated,
|
||||
- the object identifier within that namespace,
|
||||
- the mutation operation,
|
||||
- an optional parent mutation event,
|
||||
- an application-defined payload.
|
||||
|
||||
The purpose of this NIP is to make signed mutation logs portable across Nostr clients and relays without requiring relays to implement database-specific behavior.
|
||||
|
||||
---
|
||||
|
||||
## Motivation
|
||||
|
||||
Many applications need a way to distribute signed state changes across multiple publishers, consumers, or services.
|
||||
|
||||
Today this can be done with private event kinds, but private schemas make cross-implementation interoperability harder than necessary. This NIP defines a small shared envelope for mutation events while leaving application-specific state semantics in the payload.
|
||||
|
||||
This NIP is intended for use cases such as:
|
||||
|
||||
- synchronizing object changes between cooperating services,
|
||||
- publishing auditable mutation logs,
|
||||
- replaying application events from ordinary Nostr relays,
|
||||
- bridging non-Nostr systems into a Nostr-based event stream.
|
||||
|
||||
This NIP is **not** a consensus protocol. It does not provide:
|
||||
|
||||
- total ordering,
|
||||
- transactional guarantees,
|
||||
- global conflict resolution,
|
||||
- authorization rules,
|
||||
- guaranteed relay retention.
|
||||
|
||||
Applications that require those properties MUST define them separately.
|
||||
|
||||
---
|
||||
|
||||
## Specification
|
||||
|
||||
### Event Kind
|
||||
|
||||
| Kind | Category | Name |
|
||||
|------|----------|------|
|
||||
| 5000 | Regular | Mutation |
|
||||
|
||||
Kind `5000` is a regular event. Relays that support this NIP MAY store it like any other regular event.
|
||||
|
||||
This NIP does **not** require relays to:
|
||||
|
||||
- retain all historical events,
|
||||
- index any specific tag beyond normal NIP-01 behavior,
|
||||
- deliver events in causal or chronological order,
|
||||
- detect or resolve conflicts.
|
||||
|
||||
Applications that depend on durable replay or custom indexing MUST choose relays whose policies satisfy those needs.
|
||||
|
||||
### Event Structure
|
||||
|
||||
```json
|
||||
{
|
||||
"id": "<32-byte lowercase hex>",
|
||||
"pubkey": "<32-byte lowercase hex>",
|
||||
"created_at": "<unix timestamp, seconds>",
|
||||
"kind": 5000,
|
||||
"tags": [
|
||||
["r", "<resource namespace>"],
|
||||
["i", "<object identifier>"],
|
||||
["op", "<mutation operation>"],
|
||||
["e", "<parent mutation event id>"]
|
||||
],
|
||||
"content": "<JSON-encoded application payload>",
|
||||
"sig": "<64-byte lowercase hex>"
|
||||
}
|
||||
```
|
||||
|
||||
The `content` field is a JSON-encoded string. Its structure is defined below.
|
||||
|
||||
---
|
||||
|
||||
## Tags
|
||||
|
||||
| Tag | Required | Description |
|
||||
|-----|----------|-------------|
|
||||
| `r` | Yes | Stable resource namespace for the mutated object type. Reverse-DNS style names are RECOMMENDED, for example `com.example.accounts.user`. |
|
||||
| `i` | Yes | Opaque object identifier, unique within the `r` namespace. Consumers MUST treat this as a string. |
|
||||
| `op` | Yes | Mutation operation. This NIP defines only `upsert` and `delete`. |
|
||||
| `e` | No | Parent mutation event id, if the publisher wants to express ancestry. At most one `e` tag SHOULD be included in this version of the protocol. |
|
||||
| `v` | No | Application payload schema version as a string. RECOMMENDED when the payload format may evolve over time. |
|
||||
|
||||
### Tag Rules
|
||||
|
||||
Publishers:
|
||||
|
||||
- MUST include exactly one `r` tag.
|
||||
- MUST include exactly one `i` tag.
|
||||
- MUST include exactly one `op` tag.
|
||||
- MUST set `op` to either `upsert` or `delete`.
|
||||
- SHOULD include at most one `e` tag.
|
||||
- MAY include one `v` tag.
|
||||
|
||||
Consumers:
|
||||
|
||||
- MUST ignore unknown tags.
|
||||
- MUST NOT assume tag ordering.
|
||||
- MUST treat the `e` tag as an ancestry hint, not as proof of global ordering.
|
||||
|
||||
### Resource Namespaces
|
||||
|
||||
The `r` tag identifies an application-level object type.
|
||||
|
||||
This NIP does not define a global registry of resource namespaces. To reduce collisions, publishers SHOULD use a stable namespace they control, such as reverse-DNS notation.
|
||||
|
||||
Examples:
|
||||
|
||||
- `com.example.accounts.user`
|
||||
- `org.example.inventory.item`
|
||||
- `net.example.billing.invoice`
|
||||
|
||||
Publishers MUST document the payload schema associated with each resource namespace they use.
|
||||
|
||||
---
|
||||
|
||||
## Content Payload
|
||||
|
||||
The `content` field MUST be a JSON-encoded object.
|
||||
|
||||
```json
|
||||
{
|
||||
"value": {},
|
||||
"patch": "merge"
|
||||
}
|
||||
```
|
||||
|
||||
| Field | Required | Description |
|
||||
|-------|----------|-------------|
|
||||
| `value` | Yes | Application-defined mutation payload. For `upsert`, this is the state fragment or full post-mutation state being published. For `delete`, this MAY be an empty object or a small reason object. |
|
||||
| `patch` | No | How `value` should be interpreted. This NIP defines `merge` and `replace`. If omitted, consumers MUST treat it as application-defined. |
|
||||
|
||||
### Payload Rules
|
||||
|
||||
For `op = upsert`:
|
||||
|
||||
- `value` MUST be a JSON object.
|
||||
- Publishers SHOULD publish either:
|
||||
- a partial object intended to be merged, or
|
||||
- a full post-mutation object intended to replace prior state.
|
||||
- If the interpretation is important for interoperability, publishers SHOULD set `patch` to `merge` or `replace`.
|
||||
|
||||
For `op = delete`:
|
||||
|
||||
- `value` MAY be `{}`.
|
||||
- Consumers MUST treat `delete` as an application-level tombstone signal.
|
||||
- This NIP does not define whether deletion means hard delete, soft delete, archival, or hiding. Applications MUST define that separately.
|
||||
|
||||
### Serialization
|
||||
|
||||
All payload values MUST be JSON-serializable.
|
||||
|
||||
The following representations are RECOMMENDED:
|
||||
|
||||
| Type | Representation |
|
||||
|------|----------------|
|
||||
| Timestamp / datetime | ISO 8601 string |
|
||||
| Decimal | String |
|
||||
| Binary | Base64 string |
|
||||
| Null | JSON `null` |
|
||||
|
||||
Publishers MAY define additional type mappings, but those mappings are application-specific and MUST be documented outside this NIP.
|
||||
|
||||
---
|
||||
|
||||
## Ancestry and Replay
|
||||
|
||||
The optional `e` tag allows a publisher to indicate which prior mutation event it considered the parent when creating a new mutation.
|
||||
|
||||
This supports applications that want ancestry hints for:
|
||||
|
||||
- local conflict detection,
|
||||
- replay ordering,
|
||||
- branch inspection,
|
||||
- audit tooling.
|
||||
|
||||
However:
|
||||
|
||||
- the `e` tag does **not** create a global ordering guarantee,
|
||||
- relays are not required to deliver parents before children,
|
||||
- consumers MUST be prepared to receive out-of-order events,
|
||||
- consumers MAY buffer, defer, ignore, or immediately apply parent-missing events according to local policy.
|
||||
|
||||
This NIP does not define a merge event format.
|
||||
|
||||
This NIP does not define conflict resolution. If two valid mutation events for the same `(r, i)` object are concurrent or incompatible, consumers MUST resolve them using application-specific rules.
|
||||
|
||||
---
|
||||
|
||||
## Authorization
|
||||
|
||||
This NIP does not define who is authorized to publish mutation events for a given resource or object.
|
||||
|
||||
Authorization is application-specific.
|
||||
|
||||
Consumers MUST NOT assume that a valid Nostr signature alone authorizes a mutation. Consumers MUST apply their own trust policy, which MAY include:
|
||||
|
||||
- explicit pubkey allowlists,
|
||||
- per-resource ACLs,
|
||||
- external capability documents,
|
||||
- relay-level write restrictions,
|
||||
- application-specific verification.
|
||||
|
||||
This NIP does not define custodial keys, deterministic key derivation, shared cluster secrets, or delegation schemes.
|
||||
|
||||
---
|
||||
|
||||
## Relay Behavior
|
||||
|
||||
A relay implementing only NIP-01 remains compatible with this NIP.
|
||||
|
||||
No new relay messages are required beyond `REQ`, `EVENT`, and `CLOSE`.
|
||||
|
||||
Relays:
|
||||
|
||||
- MAY index the `r` and `i` tags using existing single-letter tag indexing conventions.
|
||||
- MAY apply normal retention, rate-limit, and access-control policies.
|
||||
- MAY reject events that are too large or otherwise violate local policy.
|
||||
- MUST NOT be expected to validate application payload semantics.
|
||||
|
||||
Applications that require stronger guarantees, such as durable retention or strict admission control, MUST obtain those guarantees from relay policy or from a separate protocol profile.
|
||||
|
||||
---
|
||||
|
||||
## Subscription Filters
|
||||
|
||||
This NIP works with ordinary NIP-01 filters.
|
||||
|
||||
### All mutations for one resource
|
||||
|
||||
```json
|
||||
{
|
||||
"kinds": [5000],
|
||||
"#r": ["com.example.accounts.user"]
|
||||
}
|
||||
```
|
||||
|
||||
### Mutation history for one object
|
||||
|
||||
```json
|
||||
{
|
||||
"kinds": [5000],
|
||||
"#r": ["com.example.accounts.user"],
|
||||
"#i": ["550e8400-e29b-41d4-a716-446655440000"]
|
||||
}
|
||||
```
|
||||
|
||||
### Mutations from trusted authors
|
||||
|
||||
```json
|
||||
{
|
||||
"kinds": [5000],
|
||||
"authors": [
|
||||
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
|
||||
"bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
Applications SHOULD prefer narrow subscriptions over broad network-wide firehoses.
|
||||
|
||||
---
|
||||
|
||||
## Examples
|
||||
|
||||
### Upsert with parent
|
||||
|
||||
```json
|
||||
{
|
||||
"id": "1111111111111111111111111111111111111111111111111111111111111111",
|
||||
"pubkey": "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
|
||||
"created_at": 1710500300,
|
||||
"kind": 5000,
|
||||
"tags": [
|
||||
["r", "com.example.accounts.user"],
|
||||
["i", "550e8400-e29b-41d4-a716-446655440000"],
|
||||
["op", "upsert"],
|
||||
["e", "0000000000000000000000000000000000000000000000000000000000000000"],
|
||||
["v", "1"]
|
||||
],
|
||||
"content": "{\"value\":{\"email\":\"jane.doe@newdomain.com\",\"updated_at\":\"2025-03-15T14:35:00Z\"},\"patch\":\"merge\"}",
|
||||
"sig": "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc"
|
||||
}
|
||||
```
|
||||
|
||||
### Delete tombstone
|
||||
|
||||
```json
|
||||
{
|
||||
"id": "2222222222222222222222222222222222222222222222222222222222222222",
|
||||
"pubkey": "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
|
||||
"created_at": 1710500600,
|
||||
"kind": 5000,
|
||||
"tags": [
|
||||
["r", "com.example.accounts.user"],
|
||||
["i", "550e8400-e29b-41d4-a716-446655440000"],
|
||||
["op", "delete"],
|
||||
["e", "1111111111111111111111111111111111111111111111111111111111111111"],
|
||||
["v", "1"]
|
||||
],
|
||||
"content": "{\"value\":{\"reason\":\"user_requested\"}}",
|
||||
"sig": "dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd"
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Security Considerations
|
||||
|
||||
- **Unauthorized writes:** A valid signature proves authorship, not authorization. Consumers MUST enforce their own trust policy.
|
||||
- **Replay:** Old valid events may be redelivered by relays or attackers. Consumers SHOULD deduplicate by event id and apply local replay policy.
|
||||
- **Reordering:** Events may arrive out of order. Consumers MUST NOT treat `created_at` or `e` as a guaranteed total order.
|
||||
- **Conflict flooding:** Multiple valid mutations may target the same object. Consumers SHOULD rate-limit, bound buffering, and define local conflict policy.
|
||||
- **Sensitive data exposure:** Nostr events are typically widely replicable. Publishers SHOULD NOT put secrets or regulated data in mutation payloads unless they provide application-layer encryption.
|
||||
- **Relay retention variance:** Some relays will prune history. Applications that depend on full replay MUST choose relays accordingly or maintain an external archive.
|
||||
|
||||
---
|
||||
|
||||
## Extension Points
|
||||
|
||||
Future drafts or companion NIPs may define:
|
||||
|
||||
- snapshot events for faster bootstrap,
|
||||
- object-head or checkpoint events,
|
||||
- capability or delegation profiles for authorized writers,
|
||||
- standardized conflict-resolution profiles for specific application classes.
|
||||
|
||||
Such extensions SHOULD remain optional and MUST NOT change the meaning of kind `5000` mutation events defined here.
|
||||
|
||||
---
|
||||
|
||||
## References
|
||||
|
||||
- [NIP-01](https://github.com/nostr-protocol/nips/blob/master/01.md) — Basic protocol flow description
|
||||
417
docs/SYNC.md
Normal file
417
docs/SYNC.md
Normal file
@@ -0,0 +1,417 @@
|
||||
# Parrhesia Relay Sync
|
||||
|
||||
## 1. Purpose
|
||||
|
||||
This document defines the Parrhesia proposal for **relay-to-relay event synchronization**.
|
||||
|
||||
It is intentionally transport-focused:
|
||||
|
||||
- manage remote relay peers,
|
||||
- catch up on matching events,
|
||||
- keep a live stream open,
|
||||
- expose health and basic stats.
|
||||
|
||||
It does **not** define application data semantics.
|
||||
|
||||
Parrhesia syncs Nostr events. Callers decide which events matter and how to apply them.
|
||||
|
||||
---
|
||||
|
||||
## 2. Boundary
|
||||
|
||||
### Parrhesia is responsible for
|
||||
|
||||
- storing and validating events,
|
||||
- querying and streaming events,
|
||||
- running outbound sync workers against remote relays,
|
||||
- tracking peer configuration, worker health, and sync counters,
|
||||
- exposing peer management through `Parrhesia.API.Sync`.
|
||||
|
||||
### Parrhesia is not responsible for
|
||||
|
||||
- resource mapping,
|
||||
- trusted node allowlists for an app profile,
|
||||
- mutation payload validation beyond normal event validation,
|
||||
- conflict resolution,
|
||||
- replay winner selection,
|
||||
- database upsert/delete semantics.
|
||||
|
||||
For Tribes, those remain in `TRIBES-NOSTRSYNC` and `AshNostrSync`.
|
||||
|
||||
---
|
||||
|
||||
## 3. Security Foundation
|
||||
|
||||
### Default posture
|
||||
|
||||
The baseline posture for sync traffic is:
|
||||
|
||||
- no access to sync events by default,
|
||||
- no implicit trust from ordinary relay usage,
|
||||
- no reliance on plaintext confidentiality from public relays.
|
||||
|
||||
For the first implementation, Parrhesia should protect sync data primarily with:
|
||||
|
||||
- authenticated server identities,
|
||||
- ACL-gated read and write access,
|
||||
- TLS with certificate pinning for outbound peers.
|
||||
|
||||
### Server identity
|
||||
|
||||
Parrhesia owns a low-level server identity used for relay-to-relay authentication.
|
||||
|
||||
This identity is separate from:
|
||||
|
||||
- TLS endpoint identity,
|
||||
- application event author pubkeys.
|
||||
|
||||
Recommended model:
|
||||
|
||||
- Parrhesia has one local server-auth pubkey,
|
||||
- sync peers authenticate as server-auth pubkeys,
|
||||
- ACL grants are bound to those authenticated server-auth pubkeys,
|
||||
- application-level writer trust remains outside Parrhesia.
|
||||
|
||||
Identity lifecycle:
|
||||
|
||||
1. use configured/imported key if provided,
|
||||
2. otherwise use persisted local identity,
|
||||
3. otherwise generate once during initial startup and persist it.
|
||||
|
||||
Private key export should not be supported.
|
||||
|
||||
### ACLs
|
||||
|
||||
Sync traffic should use a real ACL layer, not moderation allowlists.
|
||||
|
||||
Current implementation note:
|
||||
|
||||
- Parrhesia already has storage-backed moderation state such as `allowed_pubkeys` and `blocked_ips`,
|
||||
- that is not the sync ACL model,
|
||||
- sync protection must be enforced in the active websocket/query/count/negentropy/write path, not inferred from management tables alone.
|
||||
|
||||
Initial ACL model:
|
||||
|
||||
- principal: authenticated pubkey,
|
||||
- capabilities: `sync_read`, `sync_write`,
|
||||
- match: event/filter shape such as `kinds: [5000]` and namespace tags.
|
||||
|
||||
This is enough for now. We do **not** need a separate user ACL model and server ACL model yet.
|
||||
|
||||
A sync peer is simply an authenticated principal with sync capabilities.
|
||||
|
||||
### TLS pinning
|
||||
|
||||
Each outbound sync peer must include pinned TLS material.
|
||||
|
||||
Recommended pin type:
|
||||
|
||||
- SPKI SHA-256 pins
|
||||
|
||||
Multiple pins should be allowed to support certificate rotation.
|
||||
|
||||
---
|
||||
|
||||
## 4. Sync Model
|
||||
|
||||
Each configured sync server represents one outbound worker managed by Parrhesia.
|
||||
|
||||
Implementation note:
|
||||
|
||||
- Khatru-style relay designs benefit from explicit runtime stages,
|
||||
- Parrhesia sync should therefore plug into clear internal phases for connection admission, auth, query/count, subscription, negentropy, publish, and fanout,
|
||||
- this should stay a runtime refactor, not become extra sync semantics.
|
||||
|
||||
Minimum behavior:
|
||||
|
||||
1. connect to the remote relay,
|
||||
2. run an initial catch-up query for the configured filters,
|
||||
3. ingest received events into the local relay through the normal API path,
|
||||
4. switch to a live subscription for the same filters,
|
||||
5. reconnect with backoff when disconnected.
|
||||
|
||||
The worker treats filters as opaque Nostr filters. It does not interpret app payloads.
|
||||
|
||||
### Initial implementation mode
|
||||
|
||||
Initial implementation should use ordinary NIP-01 behavior:
|
||||
|
||||
- catch-up via `REQ`-style query,
|
||||
- live updates via `REQ` subscription.
|
||||
|
||||
This is enough for Tribes and keeps the first version simple.
|
||||
|
||||
### NIP-77
|
||||
|
||||
Parrhesia now has a real reusable relay-side NIP-77 engine:
|
||||
|
||||
- proper `NEG-OPEN` / `NEG-MSG` / `NEG-CLOSE` / `NEG-ERR` framing,
|
||||
- a reusable negentropy codec and reconciliation engine,
|
||||
- bounded local `(created_at, id)` snapshot enumeration for matching filters,
|
||||
- connection/session integration with policy checks and resource limits.
|
||||
|
||||
That means NIP-77 can be used for bandwidth-efficient catch-up between trusted nodes.
|
||||
|
||||
The first sync worker implementation may still default to ordinary NIP-01 catch-up plus live replay, because that path is operationally simpler and already matches the current Tribes sync profile. `:negentropy` can now be introduced as an optimization mode rather than a future prerequisite.
|
||||
|
||||
---
|
||||
|
||||
## 5. API Surface
|
||||
|
||||
Primary control plane:
|
||||
|
||||
- `Parrhesia.API.Identity.get/1`
|
||||
- `Parrhesia.API.Identity.ensure/1`
|
||||
- `Parrhesia.API.Identity.import/2`
|
||||
- `Parrhesia.API.Identity.rotate/1`
|
||||
- `Parrhesia.API.ACL.grant/2`
|
||||
- `Parrhesia.API.ACL.revoke/2`
|
||||
- `Parrhesia.API.ACL.list/1`
|
||||
- `Parrhesia.API.Sync.put_server/2`
|
||||
- `Parrhesia.API.Sync.remove_server/2`
|
||||
- `Parrhesia.API.Sync.get_server/2`
|
||||
- `Parrhesia.API.Sync.list_servers/1`
|
||||
- `Parrhesia.API.Sync.start_server/2`
|
||||
- `Parrhesia.API.Sync.stop_server/2`
|
||||
- `Parrhesia.API.Sync.sync_now/2`
|
||||
- `Parrhesia.API.Sync.server_stats/2`
|
||||
- `Parrhesia.API.Sync.sync_stats/1`
|
||||
- `Parrhesia.API.Sync.sync_health/1`
|
||||
|
||||
These APIs are in-process. HTTP management may expose them through `Parrhesia.API.Admin` or direct routing to `Parrhesia.API.Sync`.
|
||||
|
||||
---
|
||||
|
||||
## 6. Server Specification
|
||||
|
||||
`put_server/2` is an upsert.
|
||||
|
||||
Suggested server shape:
|
||||
|
||||
```elixir
|
||||
%{
|
||||
id: "tribes-primary",
|
||||
url: "wss://relay-a.example/relay",
|
||||
enabled?: true,
|
||||
auth_pubkey: "<remote-server-auth-pubkey>",
|
||||
mode: :req_stream,
|
||||
filters: [
|
||||
%{
|
||||
"kinds" => [5000],
|
||||
"authors" => ["<trusted-node-pubkey-a>", "<trusted-node-pubkey-b>"],
|
||||
"#r" => ["tribes.accounts.user", "tribes.chat.tribe"]
|
||||
}
|
||||
],
|
||||
overlap_window_seconds: 300,
|
||||
auth: %{
|
||||
type: :nip42
|
||||
},
|
||||
tls: %{
|
||||
mode: :required,
|
||||
hostname: "relay-a.example",
|
||||
pins: [
|
||||
%{type: :spki_sha256, value: "<pin-a>"},
|
||||
%{type: :spki_sha256, value: "<pin-b>"}
|
||||
]
|
||||
},
|
||||
metadata: %{}
|
||||
}
|
||||
```
|
||||
|
||||
Required fields:
|
||||
|
||||
- `id`
|
||||
- `url`
|
||||
- `auth_pubkey`
|
||||
- `filters`
|
||||
- `tls`
|
||||
|
||||
Recommended fields:
|
||||
|
||||
- `enabled?`
|
||||
- `mode`
|
||||
- `overlap_window_seconds`
|
||||
- `auth`
|
||||
- `metadata`
|
||||
|
||||
Rules:
|
||||
|
||||
- `id` must be stable and unique locally.
|
||||
- `url` is the remote relay websocket URL.
|
||||
- `auth_pubkey` is the expected remote server-auth pubkey.
|
||||
- `filters` must be valid NIP-01 filters.
|
||||
- filters are owned by the caller; Parrhesia only validates filter shape.
|
||||
- `mode` defaults to `:req_stream`.
|
||||
- `tls.mode` defaults to `:required`.
|
||||
- `tls.pins` must be non-empty for synced peers.
|
||||
|
||||
---
|
||||
|
||||
## 7. Runtime State
|
||||
|
||||
Each server should have both configuration and runtime status.
|
||||
|
||||
Suggested runtime fields:
|
||||
|
||||
```elixir
|
||||
%{
|
||||
server_id: "tribes-primary",
|
||||
state: :running,
|
||||
connected?: true,
|
||||
last_connected_at: ~U[2026-03-16 10:00:00Z],
|
||||
last_disconnected_at: nil,
|
||||
last_sync_started_at: ~U[2026-03-16 10:00:00Z],
|
||||
last_sync_completed_at: ~U[2026-03-16 10:00:02Z],
|
||||
last_event_received_at: ~U[2026-03-16 10:12:45Z],
|
||||
last_eose_at: ~U[2026-03-16 10:00:02Z],
|
||||
reconnect_attempts: 0,
|
||||
last_error: nil
|
||||
}
|
||||
```
|
||||
|
||||
Parrhesia should keep this state generic. It is about relay sync health, not app state convergence.
|
||||
|
||||
---
|
||||
|
||||
## 8. Stats and Health
|
||||
|
||||
### Per-server stats
|
||||
|
||||
`server_stats/2` should return basic counters such as:
|
||||
|
||||
- `events_received`
|
||||
- `events_accepted`
|
||||
- `events_duplicate`
|
||||
- `events_rejected`
|
||||
- `query_runs`
|
||||
- `subscription_restarts`
|
||||
- `reconnects`
|
||||
- `last_remote_eose_at`
|
||||
- `last_error`
|
||||
|
||||
### Aggregate sync stats
|
||||
|
||||
`sync_stats/1` should summarize:
|
||||
|
||||
- total configured servers,
|
||||
- enabled servers,
|
||||
- running servers,
|
||||
- connected servers,
|
||||
- aggregate event counters,
|
||||
- aggregate reconnect count.
|
||||
|
||||
### Health
|
||||
|
||||
`sync_health/1` should be operator-oriented, for example:
|
||||
|
||||
```elixir
|
||||
%{
|
||||
"status" => "degraded",
|
||||
"servers_total" => 3,
|
||||
"servers_connected" => 2,
|
||||
"servers_failing" => [
|
||||
%{"id" => "tribes-secondary", "reason" => "connection_refused"}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
This is intentionally simple. It should answer “is sync working?” without pretending to prove application convergence.
|
||||
|
||||
---
|
||||
|
||||
## 9. Event Ingest Path
|
||||
|
||||
Events received from a remote sync worker should enter Parrhesia through the same ingest path as any other accepted event.
|
||||
|
||||
That means:
|
||||
|
||||
1. validate the event,
|
||||
2. run normal write policy,
|
||||
3. persist or reject,
|
||||
4. fan out locally,
|
||||
5. rely on duplicate-event behavior for idempotency.
|
||||
|
||||
This avoids a second ingest path with divergent behavior.
|
||||
|
||||
Before normal event acceptance, the sync worker should enforce:
|
||||
|
||||
1. pinned TLS validation for the remote endpoint,
|
||||
2. remote server-auth identity match,
|
||||
3. local ACL grant permitting the peer to perform sync reads and/or writes.
|
||||
|
||||
The sync worker may attach request-context metadata such as:
|
||||
|
||||
```elixir
|
||||
%Parrhesia.API.RequestContext{
|
||||
caller: :sync,
|
||||
peer_id: "tribes-primary",
|
||||
metadata: %{sync_server_id: "tribes-primary"}
|
||||
}
|
||||
```
|
||||
|
||||
Recommended additional context when available:
|
||||
|
||||
- `remote_ip`
|
||||
- `subscription_id`
|
||||
|
||||
This context is for telemetry, policy, and audit only. It must not become app sync semantics.
|
||||
|
||||
---
|
||||
|
||||
## 10. Persistence
|
||||
|
||||
Parrhesia should persist enough sync control-plane state to survive restart:
|
||||
|
||||
- local server identity reference,
|
||||
- configured ACL rules for sync principals,
|
||||
- configured servers,
|
||||
- whether a server is enabled,
|
||||
- optional catch-up cursor or watermark per server,
|
||||
- basic last-error and last-success markers.
|
||||
|
||||
Parrhesia does not need to persist application replay heads or winner state. That remains in the embedding application.
|
||||
|
||||
---
|
||||
|
||||
## 11. Relationship to Current Features
|
||||
|
||||
### BEAM cluster fanout
|
||||
|
||||
`Parrhesia.Fanout.MultiNode` is a separate feature.
|
||||
|
||||
It provides best-effort live fanout between connected BEAM nodes. It is not remote relay sync and is not a substitute for `Parrhesia.API.Sync`.
|
||||
|
||||
### Management stats
|
||||
|
||||
Current admin `stats` is relay-global and minimal.
|
||||
|
||||
Sync adds a new dimension:
|
||||
|
||||
- peer config,
|
||||
- worker state,
|
||||
- per-peer counters,
|
||||
- sync health summary.
|
||||
|
||||
That should be exposed without coupling it to app-specific sync semantics.
|
||||
|
||||
---
|
||||
|
||||
## 12. Tribes Usage
|
||||
|
||||
For Tribes, `AshNostrSync` should be able to:
|
||||
|
||||
1. rely on Parrhesia’s local server identity,
|
||||
2. register one or more remote relays with `Parrhesia.API.Sync.put_server/2`,
|
||||
3. grant sync ACLs for trusted server-auth pubkeys,
|
||||
4. provide narrow Nostr filters for `kind: 5000`,
|
||||
5. observe sync health and counters,
|
||||
6. consume events via the normal local Parrhesia ingest/query/stream surface.
|
||||
|
||||
Tribes should not need Parrhesia to know:
|
||||
|
||||
- what a resource namespace means,
|
||||
- which node pubkeys are trusted for Tribes,
|
||||
- how to resolve conflicts,
|
||||
- how to apply an upsert or delete.
|
||||
|
||||
That is the key boundary.
|
||||
BIN
docs/logo.afdesign
Normal file
BIN
docs/logo.afdesign
Normal file
Binary file not shown.
1
docs/logo.svg
Normal file
1
docs/logo.svg
Normal file
File diff suppressed because one or more lines are too long
|
After Width: | Height: | Size: 37 KiB |
279
docs/slop/HARDEN.md
Normal file
279
docs/slop/HARDEN.md
Normal file
@@ -0,0 +1,279 @@
|
||||
# Hardening Review: Parrhesia Nostr Relay
|
||||
|
||||
You are a security engineer specialising in real-time WebSocket servers, Erlang/OTP systems, and protocol-level abuse. You are reviewing **Parrhesia**, a Nostr relay (NIP-01 compliant) written in Elixir, for hardening opportunities — with a primary focus on **denial-of-service resilience** and a secondary focus on the full attack surface.
|
||||
|
||||
Produce a prioritised list of **specific, actionable recommendations** with rationale. For each recommendation, state:
|
||||
1. The attack or failure mode it mitigates
|
||||
2. Suggested implementation (config change, code change, or architectural change)
|
||||
3. Severity estimate (critical / high / medium / low)
|
||||
|
||||
---
|
||||
|
||||
## 1. Architecture Overview
|
||||
|
||||
| Component | Technology | Notes |
|
||||
|---|---|---|
|
||||
| Runtime | Elixir/OTP 27, BEAM VM | Each WS connection is a separate process |
|
||||
| HTTP server | Bandit (pure Elixir) | HTTP/1.1 only, no HTTP/2 |
|
||||
| WebSocket | `websock_adapter` | Text frames only; binary rejected |
|
||||
| Database | PostgreSQL via Ecto | Range-partitioned `events` table by `created_at` |
|
||||
| Caching | ETS | Config snapshot + moderation ban/allow lists |
|
||||
| Multi-node | Erlang `:pg` groups | Fanout across BEAM cluster nodes |
|
||||
| Metrics | Prometheus (Telemetry) | `/metrics` endpoint |
|
||||
| TLS termination | **Out of scope** — handled by reverse proxy (nginx/Caddy) |
|
||||
|
||||
### Supervision Tree
|
||||
|
||||
```
|
||||
Parrhesia.Supervisor
|
||||
├─ Telemetry (Prometheus exporter)
|
||||
├─ Config (ETS snapshot of runtime config)
|
||||
├─ Storage.Supervisor (Ecto repo + moderation cache)
|
||||
├─ Subscriptions.Supervisor (ETS subscription index for fanout)
|
||||
├─ Auth.Supervisor (NIP-42 challenge GenServer)
|
||||
├─ Policy.Supervisor (policy enforcement)
|
||||
├─ Web.Endpoint (Bandit listener)
|
||||
└─ Tasks.Supervisor (ExpirationWorker, 30s GC loop)
|
||||
```
|
||||
|
||||
### Data Flow
|
||||
|
||||
1. Client connects via WebSocket at `/relay`
|
||||
2. NIP-42 AUTH challenge issued immediately (16-byte random, base64url)
|
||||
3. Inbound text frames are: size-checked → JSON-decoded → rate-limited → protocol-dispatched
|
||||
4. EVENT messages: validated → policy-checked → stored in Postgres → ACK → async fanout to matching subscriptions
|
||||
5. REQ messages: filters validated → Postgres query → results streamed → EOSE → live subscription registered
|
||||
6. Fanout: post-ingest, subscription index (ETS) is traversed; matching connection processes receive events via `send/2`
|
||||
|
||||
---
|
||||
|
||||
## 2. Current Defences Inventory
|
||||
|
||||
### Connection Layer
|
||||
|
||||
| Defence | Value | Enforcement Point |
|
||||
|---|---|---|
|
||||
| Max WebSocket frame size | **1,048,576 bytes (1 MiB)** | Checked in `handle_in` *before* JSON decode, and at Bandit upgrade (`max_frame_size`) |
|
||||
| WebSocket upgrade timeout | **60,000 ms** | Passed to `WebSockAdapter.upgrade` |
|
||||
| Binary frame rejection | Returns NOTICE, connection stays open | `handle_in` opcode check |
|
||||
| Outbound queue limit | **256 events** per connection | Overflow strategy: **`:close`** (WS 1008) |
|
||||
| Outbound drain batch | **64 events** | Async drain via `send(self(), :drain_outbound_queue)` |
|
||||
| Outbound pressure telemetry | Threshold at **75%** of queue | Emits telemetry event only, no enforcement |
|
||||
| IP blocking | Via moderation cache (ETS) | Management API can add blocked IPs |
|
||||
|
||||
### Protocol Layer
|
||||
|
||||
| Defence | Value | Notes |
|
||||
|---|---|---|
|
||||
| Max event JSON size | **262,144 bytes (256 KiB)** | Re-serialises decoded event and checks byte size |
|
||||
| Max filters per REQ | **16** | Rejected at filter validation |
|
||||
| Max filter `limit` | **500** | `min(client_limit, 500)` applied at query time |
|
||||
| Max subscriptions per connection | **32** | Existing sub IDs updated without counting toward limit |
|
||||
| Subscription ID max length | **64 characters** | Must be non-empty |
|
||||
| Event kind range | **0–65,535** | Integer range check |
|
||||
| Max future event skew | **900 seconds (15 min)** | Events with `created_at > now + 900` rejected |
|
||||
| Unknown filter keys | **Rejected** | Allowed: `ids`, `authors`, `kinds`, `since`, `until`, `limit`, `search`, `#<letter>` |
|
||||
|
||||
### Event Validation Pipeline
|
||||
|
||||
Strict order:
|
||||
1. Required fields present (`id`, `pubkey`, `created_at`, `kind`, `tags`, `content`, `sig`)
|
||||
2. `id` — 64-char lowercase hex
|
||||
3. `pubkey` — 64-char lowercase hex
|
||||
4. `created_at` — non-negative integer, max 900s future skew
|
||||
5. `kind` — integer in [0, 65535]
|
||||
6. `tags` — list of non-empty string arrays (**no length limit on tags array or individual tag values**)
|
||||
7. `content` — any binary string
|
||||
8. `sig` — 128-char lowercase hex
|
||||
9. ID hash recomputation and comparison
|
||||
10. Schnorr signature verification via `lib_secp256k1` (gated by `verify_event_signatures` flag, default `true`)
|
||||
|
||||
### Rate Limiting
|
||||
|
||||
| Defence | Value | Notes |
|
||||
|---|---|---|
|
||||
| Event ingest rate | **120 events per window** | Per-connection sliding window |
|
||||
| Ingest window | **1 second** | Resets on first event after expiry |
|
||||
| No per-IP connection rate limiting | — | Must be handled at reverse proxy |
|
||||
| No global connection count ceiling | — | BEAM handles thousands but no configured limit |
|
||||
|
||||
### Authentication (NIP-42)
|
||||
|
||||
- Challenge issued to **all** connections on connect (optional escalation model)
|
||||
- AUTH event must: pass full NIP-01 validation, be kind `22242`, contain matching `challenge` tag, contain matching `relay` tag
|
||||
- `created_at` freshness: must be `>= now - 600s` (10 min)
|
||||
- On success: pubkey added to `authenticated_pubkeys` MapSet; challenge rotated
|
||||
- Supports multiple authenticated pubkeys per connection
|
||||
|
||||
### Authentication (NIP-98 HTTP)
|
||||
|
||||
- Management endpoint (`POST /management`) requires NIP-98 header
|
||||
- Auth event must be kind `27235`, `created_at` within **60 seconds** of now
|
||||
- Must include `method` and `u` tags matching request exactly
|
||||
|
||||
### Access Control
|
||||
|
||||
- `auth_required_for_writes`: default **false** (configurable)
|
||||
- `auth_required_for_reads`: default **false** (configurable)
|
||||
- Protected events (NIP-70, tagged `["-"]`): require auth + pubkey match
|
||||
- Giftwrap (kind 1059): unauthenticated REQ → CLOSED; authenticated REQ must include `#p` containing own pubkey
|
||||
|
||||
### Database
|
||||
|
||||
- All queries use Ecto parameterised bindings — no raw string interpolation
|
||||
- LIKE search patterns escaped (`%`, `_`, `\` characters)
|
||||
- Deletion enforces `pubkey == deleter_pubkey` in WHERE clause
|
||||
- Soft-delete via `deleted_at`; hard-delete only via vanish (NIP-62) or expiration purge
|
||||
- DB pool: **32 connections** (prod), queue target 1s, interval 5s
|
||||
|
||||
### Moderation
|
||||
|
||||
- Banned pubkeys, allowed pubkeys, banned events, blocked IPs stored in ETS cache
|
||||
- Management API (NIP-98 authed) for CRUD on moderation lists
|
||||
- Cache invalidated atomically on writes
|
||||
|
||||
---
|
||||
|
||||
## 3. Known Gaps and Areas of Concern
|
||||
|
||||
The following are areas where the current implementation may be vulnerable or where defences could be strengthened. **Please evaluate each and provide recommendations.**
|
||||
|
||||
### 3.1 Connection Exhaustion
|
||||
|
||||
- There is **no global limit on concurrent WebSocket connections**. Each connection is an Elixir process (~2–3 KiB base), but subscriptions, auth state, and outbound queues add per-connection memory.
|
||||
- There is **no per-IP connection rate limiting at the application layer**. IP blocking exists but is reactive (management API), not automatic.
|
||||
- There is **no idle timeout** after the WebSocket upgrade completes. A connection can remain open indefinitely without sending or receiving messages.
|
||||
|
||||
**Questions:**
|
||||
- What connection limits should be configured at the Bandit/BEAM level?
|
||||
- Should an idle timeout be implemented? If so, what value balances real-time subscription use against resource waste?
|
||||
- Should per-IP connection counting be implemented at the application layer, or is this strictly a reverse proxy concern?
|
||||
|
||||
### 3.2 Subscription Abuse
|
||||
|
||||
- A single connection can hold **32 subscriptions**, each with up to **16 filters**. That's 512 filter predicates per connection being evaluated on every fanout.
|
||||
- Filter arrays (`ids`, `authors`, `kinds`, tag values) have **no element count limits**. A filter could contain thousands of author pubkeys.
|
||||
- There is no cost accounting for "expensive" subscriptions (e.g., wide open filters matching all events).
|
||||
|
||||
**Questions:**
|
||||
- Should filter array element counts be bounded? If so, what limits per field?
|
||||
- Should there be a per-connection "filter complexity" budget?
|
||||
- How expensive is the current ETS subscription index traversal at scale (e.g., 10K concurrent connections × 32 subs each)?
|
||||
|
||||
### 3.3 Tag Array Size
|
||||
|
||||
- Event validation does **not limit the number of tags** or the length of individual tag values beyond the 256 KiB total event size cap.
|
||||
- A maximally-tagged event could contain thousands of short tags, causing amplification in `event_tags` table inserts (one row per tag).
|
||||
|
||||
**Questions:**
|
||||
- Should a max tag count be enforced? What is a reasonable limit?
|
||||
- What is the insert cost of storing e.g. 1,000 tags per event? Could this be used for write amplification?
|
||||
- Should individual tag value lengths be bounded?
|
||||
|
||||
### 3.4 AUTH Timing
|
||||
|
||||
- AUTH event `created_at` freshness only checks the **lower bound** (`>= now - 600`). An AUTH event with `created_at` far in the future passes validation.
|
||||
- Regular events have a future skew cap of 900s, but AUTH events do not.
|
||||
|
||||
**Questions:**
|
||||
- Should AUTH events also enforce a future `created_at` bound?
|
||||
- Is a 600-second AUTH window too wide? Could it be reduced?
|
||||
|
||||
### 3.5 Outbound Amplification
|
||||
|
||||
- A single inbound EVENT can fan out to an unbounded number of matching subscriptions across all connections.
|
||||
- The outbound queue (256 events, `:close` strategy) protects individual connections but does not limit total fanout work per event.
|
||||
- The fanout traverses the ETS subscription index synchronously in the ingesting connection's process.
|
||||
|
||||
**Questions:**
|
||||
- Should fanout be bounded per event (e.g., max N recipients before yielding)?
|
||||
- Should fanout happen in a separate process pool rather than inline?
|
||||
- Is the `:close` overflow strategy optimal, or would `:drop_oldest` be better for well-behaved clients with temporary backpressure?
|
||||
|
||||
### 3.6 Query Amplification
|
||||
|
||||
- A single REQ with 16 filters, each with `limit: 500`, could trigger 16 separate Postgres queries returning up to 8,000 events total.
|
||||
- COUNT requests also execute per-filter queries (now deduplicated via UNION ALL).
|
||||
- `search` filters use `ILIKE %pattern%` which cannot use B-tree indexes.
|
||||
|
||||
**Questions:**
|
||||
- Should there be a per-REQ total result cap (across all filters)?
|
||||
- Should `search` queries be rate-limited or require a minimum pattern length?
|
||||
- Should COUNT be disabled or rate-limited separately?
|
||||
- Are there missing indexes that would help common query patterns?
|
||||
|
||||
### 3.7 Multi-Node Trust
|
||||
|
||||
- Events received via `:remote_fanout_event` from peer BEAM nodes **skip all validation and policy checks** and go directly to the subscription index.
|
||||
- This assumes all cluster peers are trusted.
|
||||
|
||||
**Questions:**
|
||||
- If cluster membership is dynamic or spans trust boundaries, should remote events be re-validated?
|
||||
- Should there be a shared secret or HMAC on inter-node messages?
|
||||
|
||||
### 3.8 Metrics Endpoint
|
||||
|
||||
- `/metrics` (Prometheus) is **unauthenticated**.
|
||||
- Exposes internal telemetry: connection counts, event throughput, queue depths, database timing.
|
||||
|
||||
**Questions:**
|
||||
- Should `/metrics` require authentication or be restricted to internal networks?
|
||||
- Could metrics data be used to profile the relay's capacity and craft targeted attacks?
|
||||
|
||||
### 3.9 Negentropy Stub
|
||||
|
||||
- NEG-OPEN, NEG-MSG, NEG-CLOSE messages are accepted and acknowledged but the reconciliation logic is a stub (cursor counter only).
|
||||
- Are there resource implications of accepting negentropy sessions without real implementation?
|
||||
|
||||
### 3.10 Event Re-Serialisation Cost
|
||||
|
||||
- To enforce the 256 KiB event size limit, the relay calls `JSON.encode!(event)` on the already-decoded event map. This re-serialisation happens on every inbound EVENT.
|
||||
- Could this be replaced with a byte-length check on the raw frame payload (already available)?
|
||||
|
||||
---
|
||||
|
||||
## 4. Specific Review Requests
|
||||
|
||||
Beyond the gaps above, please also evaluate:
|
||||
|
||||
1. **Bandit configuration**: Are there Bandit-level options (max connections, header limits, request timeouts, keepalive settings) that should be tuned for a public-facing relay?
|
||||
|
||||
2. **BEAM VM flags**: Are there any Erlang VM flags (`+P`, `+Q`, `+S`, memory limits) that should be set for production hardening?
|
||||
|
||||
3. **Ecto pool exhaustion**: With 32 DB connections and potentially thousands of concurrent REQ queries, what happens under pool exhaustion? Is the 1s queue target + 5s interval appropriate?
|
||||
|
||||
4. **ETS table sizing**: The subscription index and moderation cache use ETS. Are there memory limits or table options (`read_concurrency`, `write_concurrency`, `compressed`) that should be tuned?
|
||||
|
||||
5. **Process mailbox overflow**: Connection processes receive events via `send/2` during fanout. If a process is slow to consume, its mailbox grows. The outbound queue mechanism is application-level — but is the BEAM-level mailbox also protected?
|
||||
|
||||
6. **Reverse proxy recommendations**: What nginx/Caddy configuration should complement the relay's defences? (Rate limiting, connection limits, WebSocket-specific settings, request body size.)
|
||||
|
||||
7. **Monitoring and alerting**: What telemetry signals should trigger alerts? (Connection count spikes, queue overflow rates, DB pool saturation, error rates.)
|
||||
|
||||
---
|
||||
|
||||
## 5. Out of Scope
|
||||
|
||||
The following are **not** in scope for this review:
|
||||
- TLS configuration (handled by reverse proxy)
|
||||
- DNS and network-level DDoS mitigation
|
||||
- Operating system hardening
|
||||
- Key management for the relay identity
|
||||
- Client-side security
|
||||
- Nostr protocol design flaws (we implement the spec as-is)
|
||||
|
||||
---
|
||||
|
||||
## 6. Response Format
|
||||
|
||||
For each recommendation, use this format:
|
||||
|
||||
### [Severity] Title
|
||||
|
||||
**Attack/failure mode:** What goes wrong without this mitigation.
|
||||
|
||||
**Current state:** What exists today (or doesn't).
|
||||
|
||||
**Recommendation:** Specific change — config value, code change, or architectural decision.
|
||||
|
||||
**Trade-offs:** Any impact on legitimate users or operational complexity.
|
||||
424
docs/slop/LOCAL_API.md
Normal file
424
docs/slop/LOCAL_API.md
Normal file
@@ -0,0 +1,424 @@
|
||||
# Parrhesia Shared API
|
||||
|
||||
## 1. Goal
|
||||
|
||||
Expose a stable in-process API that:
|
||||
|
||||
- is used by WebSocket, HTTP management, local callers, and sync workers,
|
||||
- keeps protocol and storage behavior in one place,
|
||||
- stays neutral about application-level replication semantics.
|
||||
|
||||
This document defines the Parrhesia contract. It does **not** define Tribes or Ash sync behavior.
|
||||
|
||||
---
|
||||
|
||||
## 2. Scope
|
||||
|
||||
### In scope
|
||||
|
||||
- event ingest/query/count parity with WebSocket behavior,
|
||||
- local subscription APIs,
|
||||
- NIP-98 validation helpers,
|
||||
- management/admin helpers,
|
||||
- remote relay sync worker control and health reporting.
|
||||
|
||||
### Out of scope
|
||||
|
||||
- resource registration,
|
||||
- trusted app writers,
|
||||
- mutation payload semantics,
|
||||
- conflict resolution,
|
||||
- replay winner selection,
|
||||
- Ash action mapping.
|
||||
|
||||
Those belong in app profiles such as `TRIBES-NOSTRSYNC`, not in Parrhesia.
|
||||
|
||||
---
|
||||
|
||||
## 3. Layering
|
||||
|
||||
```text
|
||||
Transport / embedding / background workers
|
||||
- Parrhesia.Web.Connection
|
||||
- Parrhesia.Web.Management
|
||||
- Parrhesia.Local.*
|
||||
- Parrhesia.Sync.*
|
||||
|
||||
Shared API
|
||||
- Parrhesia.API.Auth
|
||||
- Parrhesia.API.Events
|
||||
- Parrhesia.API.Stream
|
||||
- Parrhesia.API.Admin
|
||||
- Parrhesia.API.Identity
|
||||
- Parrhesia.API.ACL
|
||||
- Parrhesia.API.Sync
|
||||
|
||||
Runtime internals
|
||||
- Parrhesia.Policy.EventPolicy
|
||||
- Parrhesia.Storage.*
|
||||
- Parrhesia.Groups.Flow
|
||||
- Parrhesia.Subscriptions.Index
|
||||
- Parrhesia.Fanout.MultiNode
|
||||
- Parrhesia.Telemetry
|
||||
```
|
||||
|
||||
Rule: transport framing stays at the edge. Business decisions happen in `Parrhesia.API.*`.
|
||||
|
||||
Implementation note:
|
||||
|
||||
- the runtime beneath `Parrhesia.API.*` should expose clearer internal policy stages than it does today,
|
||||
- at minimum: connection/auth, publish, query/count, stream subscription, negentropy, response shaping, and broadcast/fanout,
|
||||
- these are internal runtime seams, not additional public APIs.
|
||||
|
||||
---
|
||||
|
||||
## 4. Core Context
|
||||
|
||||
```elixir
|
||||
defmodule Parrhesia.API.RequestContext do
|
||||
defstruct authenticated_pubkeys: MapSet.new(),
|
||||
actor: nil,
|
||||
caller: :local,
|
||||
remote_ip: nil,
|
||||
subscription_id: nil,
|
||||
peer_id: nil,
|
||||
metadata: %{}
|
||||
end
|
||||
```
|
||||
|
||||
`caller` is for telemetry and policy parity, for example `:websocket`, `:http`, `:local`, or `:sync`.
|
||||
|
||||
Recommended usage:
|
||||
|
||||
- `remote_ip` for connection-level policy and audit,
|
||||
- `subscription_id` for query/stream/negentropy context,
|
||||
- `peer_id` for trusted sync peer identity when applicable,
|
||||
- `metadata` for transport-specific details that should not become API fields.
|
||||
|
||||
---
|
||||
|
||||
## 5. Public Modules
|
||||
|
||||
### 5.1 `Parrhesia.API.Auth`
|
||||
|
||||
Purpose:
|
||||
|
||||
- event validation helpers,
|
||||
- NIP-98 verification,
|
||||
- optional embedding account resolution.
|
||||
|
||||
```elixir
|
||||
@spec validate_event(map()) :: :ok | {:error, term()}
|
||||
@spec compute_event_id(map()) :: String.t()
|
||||
|
||||
@spec validate_nip98(String.t() | nil, String.t(), String.t()) ::
|
||||
{:ok, Parrhesia.API.Auth.Context.t()} | {:error, term()}
|
||||
|
||||
@spec validate_nip98(String.t() | nil, String.t(), String.t(), keyword()) ::
|
||||
{:ok, Parrhesia.API.Auth.Context.t()} | {:error, term()}
|
||||
```
|
||||
|
||||
### 5.2 `Parrhesia.API.Events`
|
||||
|
||||
Purpose:
|
||||
|
||||
- canonical ingest/query/count path used by WS, HTTP, local callers, and sync workers.
|
||||
|
||||
```elixir
|
||||
@spec publish(map(), keyword()) ::
|
||||
{:ok, Parrhesia.API.Events.PublishResult.t()} | {:error, term()}
|
||||
|
||||
@spec query([map()], keyword()) ::
|
||||
{:ok, [map()]} | {:error, term()}
|
||||
|
||||
@spec count([map()], keyword()) ::
|
||||
{:ok, non_neg_integer() | map()} | {:error, term()}
|
||||
```
|
||||
|
||||
Required options:
|
||||
|
||||
- `:context` - `%Parrhesia.API.RequestContext{}`
|
||||
|
||||
`publish/2` must preserve current `EVENT` semantics:
|
||||
|
||||
1. size checks,
|
||||
2. `Protocol.validate_event/1`,
|
||||
3. `EventPolicy.authorize_write/2`,
|
||||
4. group handling,
|
||||
5. persistence or control-event path,
|
||||
6. local plus multi-node fanout,
|
||||
7. telemetry.
|
||||
|
||||
Return shape mirrors `OK`:
|
||||
|
||||
```elixir
|
||||
{:ok, %PublishResult{event_id: id, accepted: true, message: "ok: event stored"}}
|
||||
{:ok, %PublishResult{event_id: id, accepted: false, message: "blocked: ..."}}
|
||||
```
|
||||
|
||||
`query/2` and `count/2` must preserve current `REQ` and `COUNT` behavior, including giftwrap restrictions and server-side filter validation.
|
||||
|
||||
### 5.3 `Parrhesia.API.Stream`
|
||||
|
||||
Purpose:
|
||||
|
||||
- in-process subscription surface with the same semantics as a WebSocket `REQ`.
|
||||
|
||||
This is **required** for embedding and sync consumers.
|
||||
|
||||
```elixir
|
||||
@spec subscribe(pid(), String.t(), [map()], keyword()) ::
|
||||
{:ok, reference()} | {:error, term()}
|
||||
|
||||
@spec unsubscribe(reference()) :: :ok
|
||||
```
|
||||
|
||||
Required options:
|
||||
|
||||
- `:context` - `%Parrhesia.API.RequestContext{}`
|
||||
|
||||
Subscriber contract:
|
||||
|
||||
```elixir
|
||||
{:parrhesia, :event, ref, subscription_id, event}
|
||||
{:parrhesia, :eose, ref, subscription_id}
|
||||
{:parrhesia, :closed, ref, subscription_id, reason}
|
||||
```
|
||||
|
||||
`subscribe/4` must:
|
||||
|
||||
1. validate filters,
|
||||
2. apply read policy,
|
||||
3. emit initial catch-up events in the same order as `REQ`,
|
||||
4. emit exactly one `:eose`,
|
||||
5. register for live fanout until `unsubscribe/1`.
|
||||
|
||||
This module does **not** know why a caller wants the stream.
|
||||
|
||||
### 5.4 `Parrhesia.API.Admin`
|
||||
|
||||
Purpose:
|
||||
|
||||
- stable in-process facade for management operations already exposed over HTTP.
|
||||
|
||||
```elixir
|
||||
@spec execute(String.t() | atom(), map(), keyword()) :: {:ok, map()} | {:error, term()}
|
||||
@spec stats(keyword()) :: {:ok, map()} | {:error, term()}
|
||||
@spec health(keyword()) :: {:ok, map()} | {:error, term()}
|
||||
@spec list_audit_logs(keyword()) :: {:ok, [map()]} | {:error, term()}
|
||||
```
|
||||
|
||||
Baseline methods:
|
||||
|
||||
- `ping`
|
||||
- `stats`
|
||||
- `health`
|
||||
- moderation methods already supported by the storage admin adapter
|
||||
|
||||
`stats/1` is relay-level and cheap. `health/1` is liveness/readiness-oriented and may include worker state.
|
||||
|
||||
`API.Admin` is the operator-facing umbrella for management. It may delegate domain-specific work to `API.Identity`, `API.ACL`, and `API.Sync`.
|
||||
|
||||
### 5.5 `Parrhesia.API.Identity`
|
||||
|
||||
Purpose:
|
||||
|
||||
- manage Parrhesia-owned server identity,
|
||||
- expose public identity metadata,
|
||||
- support explicit import and rotation,
|
||||
- keep private key material internal.
|
||||
|
||||
Parrhesia owns a low-level server identity used for relay-to-relay auth and other transport-local security features.
|
||||
|
||||
```elixir
|
||||
@spec get(keyword()) :: {:ok, map()} | {:error, term()}
|
||||
@spec ensure(keyword()) :: {:ok, map()} | {:error, term()}
|
||||
@spec import(map(), keyword()) :: {:ok, map()} | {:error, term()}
|
||||
@spec rotate(keyword()) :: {:ok, map()} | {:error, term()}
|
||||
@spec sign_event(map(), keyword()) :: {:ok, map()} | {:error, term()}
|
||||
```
|
||||
|
||||
Rules:
|
||||
|
||||
- private key material must never be returned by API,
|
||||
- production deployments should be able to import a configured key,
|
||||
- local/dev deployments may generate on first init if none exists,
|
||||
- identity creation should be eager and deterministic, not lazy on first sync use.
|
||||
|
||||
Recommended boot order:
|
||||
|
||||
1. configured/imported key,
|
||||
2. persisted local identity,
|
||||
3. generate once and persist.
|
||||
|
||||
### 5.6 `Parrhesia.API.ACL`
|
||||
|
||||
Purpose:
|
||||
|
||||
- enforce event/filter ACLs for authenticated principals,
|
||||
- support default-deny sync visibility,
|
||||
- allow dynamic grants for trusted sync peers.
|
||||
|
||||
This is a real authorization layer, not a reuse of moderation allowlists.
|
||||
|
||||
Current implementation note:
|
||||
|
||||
- Parrhesia already has storage-backed moderation presence tables such as `allowed_pubkeys` and `blocked_ips`,
|
||||
- those are not sufficient for sync ACLs,
|
||||
- the new ACL layer must be enforced directly in the active read/write/query/negentropy path, not only through management tables.
|
||||
|
||||
```elixir
|
||||
@spec grant(map(), keyword()) :: :ok | {:error, term()}
|
||||
@spec revoke(map(), keyword()) :: :ok | {:error, term()}
|
||||
@spec list(keyword()) :: {:ok, [map()]} | {:error, term()}
|
||||
@spec check(atom(), map(), keyword()) :: :ok | {:error, term()}
|
||||
```
|
||||
|
||||
Suggested rule shape:
|
||||
|
||||
```elixir
|
||||
%{
|
||||
principal_type: :pubkey,
|
||||
principal: "<server-auth-pubkey>",
|
||||
capability: :sync_read,
|
||||
match: %{
|
||||
"kinds" => [5000],
|
||||
"#r" => ["tribes.accounts.user", "tribes.chat.tribe"]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
For the first implementation, principals should be authenticated pubkeys only.
|
||||
|
||||
We do **not** need a separate user-vs-server ACL model yet. A sync peer is simply a principal with sync capabilities.
|
||||
|
||||
Initial required capabilities:
|
||||
|
||||
- `:sync_read`
|
||||
- `:sync_write`
|
||||
|
||||
Recommended baseline:
|
||||
|
||||
- ordinary events follow existing relay behavior,
|
||||
- sync traffic is default-deny,
|
||||
- access is lifted only by explicit ACL grants for authenticated server pubkeys.
|
||||
|
||||
### 5.7 `Parrhesia.API.Sync`
|
||||
|
||||
Purpose:
|
||||
|
||||
- manage remote relay sync workers without embedding app-specific replication semantics.
|
||||
|
||||
Parrhesia syncs **events**, not records.
|
||||
|
||||
```elixir
|
||||
@spec put_server(map(), keyword()) ::
|
||||
{:ok, Parrhesia.API.Sync.Server.t()} | {:error, term()}
|
||||
|
||||
@spec remove_server(String.t(), keyword()) :: :ok | {:error, term()}
|
||||
@spec get_server(String.t(), keyword()) ::
|
||||
{:ok, Parrhesia.API.Sync.Server.t()} | :error | {:error, term()}
|
||||
|
||||
@spec list_servers(keyword()) ::
|
||||
{:ok, [Parrhesia.API.Sync.Server.t()]} | {:error, term()}
|
||||
|
||||
@spec start_server(String.t(), keyword()) :: :ok | {:error, term()}
|
||||
@spec stop_server(String.t(), keyword()) :: :ok | {:error, term()}
|
||||
@spec sync_now(String.t(), keyword()) :: :ok | {:error, term()}
|
||||
|
||||
@spec server_stats(String.t(), keyword()) ::
|
||||
{:ok, map()} | :error | {:error, term()}
|
||||
|
||||
@spec sync_stats(keyword()) :: {:ok, map()} | {:error, term()}
|
||||
@spec sync_health(keyword()) :: {:ok, map()} | {:error, term()}
|
||||
```
|
||||
|
||||
`put_server/2` is upsert-style. It covers both add and update.
|
||||
|
||||
Minimum server shape:
|
||||
|
||||
```elixir
|
||||
%{
|
||||
id: "tribes-a",
|
||||
url: "wss://relay-a.example/relay",
|
||||
enabled?: true,
|
||||
auth_pubkey: "<remote-server-auth-pubkey>",
|
||||
filters: [%{"kinds" => [5000], "#r" => ["tribes.accounts.user"]}],
|
||||
mode: :req_stream,
|
||||
auth: %{type: :nip42},
|
||||
tls: %{
|
||||
mode: :required,
|
||||
hostname: "relay-a.example",
|
||||
pins: [
|
||||
%{type: :spki_sha256, value: "<base64-sha256-spki-pin>"}
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Important constraints:
|
||||
|
||||
- filters are caller-provided and opaque to Parrhesia,
|
||||
- Parrhesia does not inspect `kind: 5000` payload semantics,
|
||||
- Parrhesia may persist peer config and runtime counters,
|
||||
- Parrhesia may reconnect and resume catch-up using generic event cursors,
|
||||
- Parrhesia must expose worker health and basic counters,
|
||||
- remote relay TLS pinning is required,
|
||||
- sync peer auth is bound to a server-auth pubkey, not inferred from event author pubkeys.
|
||||
- sync enforcement should reuse the same runtime policy stages as ordinary websocket traffic rather than inventing a parallel trust path.
|
||||
|
||||
Server identity model:
|
||||
|
||||
- Parrhesia owns its local server-auth identity via `API.Identity`,
|
||||
- peer config declares the expected remote server-auth pubkey,
|
||||
- ACL grants are bound to authenticated server-auth pubkeys,
|
||||
- event author pubkeys remain a separate application concern.
|
||||
|
||||
Initial mode should be `:req_stream`:
|
||||
|
||||
1. run catch-up with `API.Events.query/2`-equivalent client behavior against the remote relay,
|
||||
2. switch to a live subscription,
|
||||
3. ingest received events through local `API.Events.publish/2`.
|
||||
|
||||
Future optimization:
|
||||
|
||||
- `:negentropy` may be added as an optimization mode on top of the simpler `:req_stream` baseline.
|
||||
- Parrhesia now has a reusable NIP-77 engine, but a sync worker does not need to depend on it for the first implementation.
|
||||
|
||||
---
|
||||
|
||||
## 6. Server Integration
|
||||
|
||||
### WebSocket
|
||||
|
||||
- `EVENT` -> `Parrhesia.API.Events.publish/2`
|
||||
- `REQ` -> `Parrhesia.API.Stream.subscribe/4`
|
||||
- `COUNT` -> `Parrhesia.API.Events.count/2`
|
||||
- `AUTH` stays connection-specific, but validation helpers may move to `API.Auth`
|
||||
- `NEG-*` maps to the reusable NIP-77 engine and remains exposed through the websocket transport boundary
|
||||
|
||||
### HTTP management
|
||||
|
||||
- NIP-98 validation via `Parrhesia.API.Auth.validate_nip98/3`
|
||||
- management methods via `Parrhesia.API.Admin`
|
||||
- sync peer CRUD and health endpoints may delegate to `Parrhesia.API.Sync`
|
||||
- identity and ACL management may delegate to `API.Identity` and `API.ACL`
|
||||
|
||||
### Local wrappers
|
||||
|
||||
`Parrhesia.Local.*` remain thin delegates over `Parrhesia.API.*`.
|
||||
|
||||
---
|
||||
|
||||
## 7. Relationship to Sync Profiles
|
||||
|
||||
This document is intentionally lower-level than `TRIBES-NOSTRSYNC` and `SYNC_DB.md`.
|
||||
|
||||
Those documents may require:
|
||||
|
||||
- `Parrhesia.API.Events.publish/2`
|
||||
- `Parrhesia.API.Events.query/2`
|
||||
- `Parrhesia.API.Stream.subscribe/4`
|
||||
- `Parrhesia.API.Sync.*`
|
||||
|
||||
But they must not move application conflict rules or payload semantics into Parrhesia.
|
||||
670
docs/slop/REVIEW.md
Normal file
670
docs/slop/REVIEW.md
Normal file
@@ -0,0 +1,670 @@
|
||||
# Parrhesia Relay — Technical Review
|
||||
|
||||
**Reviewer:** Case, Senior Systems & Protocol Engineer
|
||||
**Date:** 2026-03-14
|
||||
**Commit:** `63d3e7d` (master)
|
||||
**Scope:** Full codebase review against Nostr NIPs, MARMOT specs, and production readiness criteria
|
||||
|
||||
---
|
||||
|
||||
# Executive Summary
|
||||
|
||||
Parrhesia is a well-structured Nostr relay built on Elixir/OTP with PostgreSQL storage. The architecture is clean — clear separation between web, protocol, policy, and storage layers with a pluggable adapter pattern. Code quality is above average: consistent error handling, good use of `with` chains, comprehensive policy enforcement for MARMOT-specific concerns, and thoughtful outbound backpressure management. The developer clearly understands both the BEAM and the Nostr protocol.
|
||||
|
||||
However, the relay has **two critical defects** that make it unsuitable for any deployment beyond trusted local development: (1) **no Schnorr signature verification** — any client can forge events with arbitrary pubkeys, and (2) **lossy tag storage** — events returned from queries have truncated tags, violating NIP-01's data integrity guarantees. Several additional high-severity issues (no ephemeral event handling, missing NIP-42 relay tag validation, SQL LIKE injection vector, no ingest rate limiting) compound the risk.
|
||||
|
||||
**Overall risk rating: Critical**
|
||||
|
||||
This relay is **not production-ready** for any public deployment. It is suitable for local development and internal testing with trusted clients. With the critical and high findings addressed, it could serve as a solid private relay. Public internet deployment requires significant additional hardening.
|
||||
|
||||
---
|
||||
|
||||
# Top Findings
|
||||
|
||||
## [Critical] No Schnorr Signature Verification
|
||||
|
||||
**Area:** protocol correctness, security
|
||||
|
||||
**Why it matters:**
|
||||
NIP-01 mandates that relays MUST verify event signatures using Schnorr signatures over secp256k1. Without signature verification, any client can publish events with any pubkey. This completely breaks the identity and trust model of the Nostr protocol. Authentication (NIP-42), protected events (NIP-70), deletion (NIP-09), replaceable events — all rely on pubkey authenticity.
|
||||
|
||||
**Evidence:**
|
||||
`lib/parrhesia/protocol/event_validator.ex` validates the event ID hash (`validate_id_hash/1` at line 188) but never verifies the `sig` field against the `pubkey` using Schnorr/secp256k1. A `grep` for `schnorr`, `secp256k1`, `verify`, and `:crypto.verify` across the entire `lib/` directory returns zero results. The `validate_sig/1` function (line 182) only checks that `sig` is 64-byte lowercase hex — a format check, not a cryptographic verification.
|
||||
|
||||
**Spec reference:**
|
||||
NIP-01: "Each user has a keypair. Signatures, public key, and encodings are done according to the Schnorr signatures standard for the curve secp256k1." The relay is expected to verify signatures to ensure event integrity.
|
||||
|
||||
**Attack scenario:**
|
||||
An unauthenticated client connects and publishes `["EVENT", {"id": "<valid-hash>", "pubkey": "<victim-pubkey>", "sig": "<any-64-byte-hex>", ...}]`. The relay stores and fans out the forged event as if the victim authored it. This enables impersonation, reputation attacks, and poisoning of replaceable events (kind 0 profile, kind 3 contacts, kind 10002 relay lists).
|
||||
|
||||
**Recommended fix:**
|
||||
Add a secp256k1 library dependency (e.g., `ex_secp256k1` or `:crypto` with OTP 26+ Schnorr support) and add signature verification to `EventValidator.validate/1` after `validate_id_hash/1`. This is the single most important fix.
|
||||
|
||||
**Confidence:** High
|
||||
|
||||
---
|
||||
|
||||
## [Critical] Lossy Tag Storage — Events Returned With Truncated Tags
|
||||
|
||||
**Area:** protocol correctness, database
|
||||
|
||||
**Why it matters:**
|
||||
NIP-01 events have tags with arbitrary numbers of elements (e.g., `["e", "<event-id>", "<relay-url>", "<marker>"]`, `["p", "<pubkey>", "<relay-url>"]`, `["a", "<kind>:<pubkey>:<d-tag>", "<relay-url>"]`). The relay only stores the first two elements (`name` and `value`) of each tag in the `event_tags` table, and single-element tags (like `["-"]` for NIP-70 protected events) are dropped entirely. When events are queried back, the reconstructed tags are truncated.
|
||||
|
||||
**Evidence:**
|
||||
`lib/parrhesia/storage/adapters/postgres/events.ex`:
|
||||
- `insert_tags!/2` (line 266): pattern matches `[name, value | _rest]` — discards `_rest`, ignores tags with fewer than 2 elements.
|
||||
- `load_tags/1` (line 739): reconstructs tags as `[tag.name, tag.value]` — only 2 elements.
|
||||
- `to_nostr_event/2` (line 763): uses the truncated tags directly.
|
||||
|
||||
The `events` table itself does not store the full tag array. The full tags exist only in the original JSON during ingest, then are lost.
|
||||
|
||||
**Spec reference:**
|
||||
NIP-01: Tags are arrays of arbitrary strings. Relay implementations MUST return events with their complete, unmodified tags. Relay hints in `e`/`p` tags, markers, and other metadata are essential for client operation.
|
||||
|
||||
**Attack/failure scenario:**
|
||||
1. Client publishes event with `["e", "<id>", "wss://relay.example.com", "reply"]`.
|
||||
2. Another client queries and receives `["e", "<id>"]` — relay hint and marker lost.
|
||||
3. Client cannot follow the event reference to the correct relay.
|
||||
4. Protected events with `["-"]` tag lose their protection marker on retrieval, breaking NIP-70 semantics.
|
||||
|
||||
**Recommended fix:**
|
||||
Either (a) store the full tag JSON array in the events table (e.g., a `tags` JSONB column), using `event_tags` only as a query index, or (b) add additional columns to `event_tags` to preserve all elements (e.g., a `rest` text array column or store the full tag as a JSONB column). Option (a) is simpler and more correct.
|
||||
|
||||
**Confidence:** High
|
||||
|
||||
---
|
||||
|
||||
## [High] No Ephemeral Event Handling (Kind 20000–29999)
|
||||
|
||||
**Area:** protocol correctness, performance
|
||||
|
||||
**Why it matters:**
|
||||
NIP-01 defines kinds 20000–29999 as ephemeral events that relays are NOT expected to store. They should be fanned out to matching subscribers but never persisted. The relay currently persists all events regardless of kind, which wastes storage and violates client expectations.
|
||||
|
||||
**Evidence:**
|
||||
`lib/parrhesia/storage/adapters/postgres/events.ex`:
|
||||
- `replaceable_kind?/1` (line 515): handles kinds 0, 3, 10000–19999.
|
||||
- `addressable_kind?/1` (line 517): handles kinds 30000–39999.
|
||||
- No function checks for ephemeral kinds (20000–29999).
|
||||
- `put_event/2` persists all non-deletion, non-vanish events unconditionally.
|
||||
|
||||
`lib/parrhesia/web/connection.ex`:
|
||||
- `persist_event/1` (line 420): routes kind 5 to deletion, kind 62 to vanish, everything else to `put_event`. No ephemeral bypass.
|
||||
|
||||
The config has `accept_ephemeral_events: true` but it's never checked anywhere.
|
||||
|
||||
**Spec reference:**
|
||||
NIP-01: "Upon receiving an ephemeral event, a relay is NOT expected to store it and SHOULD send it directly to the clients that have matching filters open."
|
||||
|
||||
**Recommended fix:**
|
||||
In `persist_event/1`, check if the event kind is in the ephemeral range. If so, skip DB persistence and only fan out. The `accept_ephemeral_events` config should gate whether ephemeral events are accepted at all.
|
||||
|
||||
**Confidence:** High
|
||||
|
||||
---
|
||||
|
||||
## [High] NIP-42 AUTH Missing Relay Tag Validation
|
||||
|
||||
**Area:** protocol correctness, security
|
||||
|
||||
**Why it matters:**
|
||||
NIP-42 requires AUTH events to include a `["relay", "<relay-url>"]` tag that matches the relay's URL. Without this check, an AUTH event created for relay A can be replayed against relay B, enabling cross-relay authentication bypass.
|
||||
|
||||
**Evidence:**
|
||||
`lib/parrhesia/web/connection.ex`:
|
||||
- `validate_auth_event/1` (line 573): checks kind 22242 and presence of `challenge` tag.
|
||||
- `validate_auth_challenge/2` (line 590): checks challenge value matches.
|
||||
- **No validation of `relay` tag** anywhere in the auth flow.
|
||||
|
||||
**Spec reference:**
|
||||
NIP-42: AUTH event "MUST include `['relay', '<relay-url>']` tag". The relay MUST verify this tag matches its own URL.
|
||||
|
||||
**Attack scenario:**
|
||||
Attacker obtains an AUTH event from user for relay A (which may be the attacker's relay). Attacker replays this AUTH event against Parrhesia, which accepts it because the challenge is the only thing checked. If the challenge can be predicted or leaked, authentication is fully bypassed.
|
||||
|
||||
**Recommended fix:**
|
||||
Add relay URL validation to `validate_auth_event/1`. The relay should know its own canonical URL (from config or NIP-11 document) and verify the `relay` tag matches.
|
||||
|
||||
**Confidence:** High
|
||||
|
||||
---
|
||||
|
||||
## [High] SQL LIKE Pattern Injection in NIP-50 Search
|
||||
|
||||
**Area:** security, performance
|
||||
|
||||
**Why it matters:**
|
||||
The NIP-50 search implementation uses PostgreSQL `ILIKE` with unsanitized user input interpolated into the pattern. While not traditional SQL injection (the value is parameterized), LIKE metacharacters (`%`, `_`) in the search string alter the matching semantics and can cause catastrophic performance.
|
||||
|
||||
**Evidence:**
|
||||
`lib/parrhesia/storage/adapters/postgres/events.ex` line 627:
|
||||
```elixir
|
||||
where(query, [event], ilike(event.content, ^"%#{search}%"))
|
||||
```
|
||||
|
||||
The `search` variable is directly interpolated into the LIKE pattern. User-supplied values like `%a%b%c%d%e%f%g%h%i%j%` create pathological patterns that force PostgreSQL into exponential backtracking against the full `content` column of every matching row.
|
||||
|
||||
**Attack scenario:**
|
||||
Client sends `["REQ", "sub1", {"search": "%a%b%c%d%e%f%g%h%i%j%k%l%m%n%o%p%q%r%", "kinds": [1]}]`. PostgreSQL executes an expensive sequential scan with exponential LIKE pattern matching. A handful of concurrent requests with adversarial patterns can saturate the DB connection pool and CPU.
|
||||
|
||||
**Recommended fix:**
|
||||
1. Escape `%` and `_` characters in user search input before interpolation: `search |> String.replace("%", "\\%") |> String.replace("_", "\\_")`.
|
||||
2. Consider PostgreSQL full-text search (`tsvector`/`tsquery`) instead of ILIKE for better performance and correct semantics.
|
||||
3. Add a minimum search term length (e.g., 3 characters).
|
||||
|
||||
**Confidence:** High
|
||||
|
||||
---
|
||||
|
||||
## [High] No Per-Connection or Per-IP Rate Limiting on Event Ingestion
|
||||
|
||||
**Area:** security, robustness
|
||||
|
||||
**Why it matters:**
|
||||
There is no rate limiting on EVENT submissions. A single client can flood the relay with events at wire speed, consuming DB connections, CPU (for validation), and disk I/O. The outbound queue has backpressure, but the ingest path is completely unbounded.
|
||||
|
||||
**Evidence:**
|
||||
`lib/parrhesia/web/connection.ex`:
|
||||
- `handle_event_ingest/2` (line 186): processes every EVENT message immediately with no throttle.
|
||||
- No token bucket, sliding window, or any rate-limiting mechanism anywhere in the codebase.
|
||||
- `grep` for `rate.limit`, `throttle`, `rate_limit` across `lib/` returns only error message strings, not actual rate-limiting logic.
|
||||
|
||||
**Attack scenario:**
|
||||
A single WebSocket connection sends 10,000 EVENT messages per second. Each triggers validation, policy checks, and a DB transaction. The DB connection pool (default 32) saturates within milliseconds. All other clients experience timeouts.
|
||||
|
||||
**Recommended fix:**
|
||||
Implement per-connection rate limiting in the WebSocket handler (token bucket per connection state). Consider also per-pubkey and per-IP rate limiting as a separate layer. Start with a simple `{count, window_start}` in connection state.
|
||||
|
||||
**Confidence:** High
|
||||
|
||||
---
|
||||
|
||||
## [High] max_frame_bytes and max_event_bytes Not Enforced
|
||||
|
||||
**Area:** security, robustness
|
||||
|
||||
**Why it matters:**
|
||||
The configuration defines `max_frame_bytes: 1_048_576` and `max_event_bytes: 262_144` but neither value is actually used to limit incoming data. The max_frame_bytes is only reported in the NIP-11 document. An attacker can send arbitrarily large WebSocket frames and events.
|
||||
|
||||
**Evidence:**
|
||||
- `grep` for `max_frame_bytes` in `lib/`: only found in `relay_info.ex` for NIP-11 output.
|
||||
- `grep` for `max_event_bytes` in `lib/`: no results at all.
|
||||
- The Bandit WebSocket upgrade in `router.ex` line 53 passes `timeout: 60_000` but no `max_frame_size` option.
|
||||
- No payload size check in `handle_in/2` before JSON decoding.
|
||||
|
||||
**Attack scenario:**
|
||||
Client sends a 100MB WebSocket frame containing a single event with a massive `content` field or millions of tags. The relay attempts to JSON-decode the entire payload in memory, potentially causing OOM or extreme GC pressure.
|
||||
|
||||
**Recommended fix:**
|
||||
1. Pass `max_frame_size` to Bandit's WebSocket upgrade options.
|
||||
2. Check `byte_size(payload)` in `handle_in/2` before calling `Protocol.decode_client/1`.
|
||||
3. Optionally check individual event size after JSON decoding.
|
||||
|
||||
**Confidence:** High
|
||||
|
||||
---
|
||||
|
||||
## [Medium] NIP-09 Deletion Missing "a" Tag Support for Addressable Events
|
||||
|
||||
**Area:** protocol correctness
|
||||
|
||||
**Why it matters:**
|
||||
NIP-09 specifies that deletion events (kind 5) can reference addressable/replaceable events via `"a"` tags (format: `"<kind>:<pubkey>:<d-tag>"`). The current implementation only handles `"e"` tags.
|
||||
|
||||
**Evidence:**
|
||||
`lib/parrhesia/storage/adapters/postgres/events.ex`:
|
||||
- `extract_delete_event_ids/1` (line 821): only extracts `["e", event_id | _rest]` tags.
|
||||
- No handling of `["a", ...]` tags.
|
||||
- No query against addressable_event_state or events by kind+pubkey+d_tag.
|
||||
|
||||
**Spec reference:**
|
||||
NIP-09: "The deletion event MAY contain `a` tags pointing to the replaceable/addressable events to be deleted."
|
||||
|
||||
**Recommended fix:**
|
||||
Extract `"a"` tags from the deletion event, parse the `kind:pubkey:d_tag` format, and soft-delete matching events from the addressable/replaceable state tables, ensuring the deleter's pubkey matches.
|
||||
|
||||
**Confidence:** High
|
||||
|
||||
---
|
||||
|
||||
## [Medium] Subscription Index GenServer Is a Single-Point Bottleneck
|
||||
|
||||
**Area:** performance, OTP/design
|
||||
|
||||
**Why it matters:**
|
||||
Every event fanout goes through `Index.candidate_subscription_keys/1`, which is a synchronous `GenServer.call` to a single process. Under load with many connections and high event throughput, this process becomes the serialization point for all fanout operations.
|
||||
|
||||
**Evidence:**
|
||||
`lib/parrhesia/subscriptions/index.ex`:
|
||||
- `candidate_subscription_keys/2` (line 68): `GenServer.call(server, {:candidate_subscription_keys, event})`
|
||||
- This is called from every connection process for every ingested event (via `fanout_event/1` in `connection.ex` line 688).
|
||||
- The ETS tables are `:protected`, meaning only the owning GenServer can write but any process can read.
|
||||
|
||||
**Recommended fix:**
|
||||
Since the ETS tables are already `:protected` (readable by all processes), make `candidate_subscription_keys/1` read directly from ETS without going through the GenServer. Only mutations (upsert/remove) need to go through the GenServer. This eliminates the serialization bottleneck entirely.
|
||||
|
||||
Actually, the tables are `:protected` which means other processes CAN read. Refactor `candidate_subscription_keys` to read ETS directly from the caller's process, bypassing the GenServer.
|
||||
|
||||
**Confidence:** High
|
||||
|
||||
---
|
||||
|
||||
## [Medium] Moderation Cache ETS Table Creation Race Condition
|
||||
|
||||
**Area:** robustness, OTP/design
|
||||
|
||||
**Why it matters:**
|
||||
The moderation cache ETS table is lazily created on first access via `cache_table_ref/0`. If two processes simultaneously call a moderation function before the table exists, both will attempt `ets.new(:parrhesia_moderation_cache, [:named_table, ...])` — one will succeed and one will hit the rescue clause. While the rescue catches the `ArgumentError`, this is a race-prone pattern.
|
||||
|
||||
**Evidence:**
|
||||
`lib/parrhesia/storage/adapters/postgres/moderation.ex` lines 211–231:
|
||||
```elixir
|
||||
defp cache_table_ref do
|
||||
case :ets.whereis(@cache_table) do
|
||||
:undefined ->
|
||||
try do
|
||||
:ets.new(@cache_table, [...])
|
||||
rescue
|
||||
ArgumentError -> @cache_table
|
||||
end
|
||||
@cache_table
|
||||
_table_ref ->
|
||||
@cache_table
|
||||
end
|
||||
end
|
||||
```
|
||||
|
||||
Additionally, `ensure_cache_scope_loaded/1` has a TOCTOU race: it checks `ets.member(table, loaded_key)`, then loads from DB and inserts — two processes could both load and insert simultaneously, though this is less harmful (just redundant work).
|
||||
|
||||
**Recommended fix:**
|
||||
Create the ETS table in a supervised process (e.g., in `Parrhesia.Policy.Supervisor` or `Parrhesia.Storage.Supervisor`) at startup, not lazily. This eliminates the race entirely.
|
||||
|
||||
**Confidence:** High
|
||||
|
||||
---
|
||||
|
||||
## [Medium] Archiver SQL Injection
|
||||
|
||||
**Area:** security
|
||||
|
||||
**Why it matters:**
|
||||
The `Parrhesia.Storage.Archiver.archive_sql/2` function directly interpolates arguments into a SQL string without any sanitization or quoting.
|
||||
|
||||
**Evidence:**
|
||||
`lib/parrhesia/storage/archiver.ex` line 32:
|
||||
```elixir
|
||||
def archive_sql(partition_name, archive_table_name) do
|
||||
"INSERT INTO #{archive_table_name} SELECT * FROM #{partition_name};"
|
||||
end
|
||||
```
|
||||
|
||||
If either argument is derived from user input or external configuration, this is a SQL injection vector.
|
||||
|
||||
**Attack scenario:**
|
||||
If the management API or any admin tool passes user-controlled input to this function (e.g., a partition name from a web request), an attacker could inject: `archive_sql("events_default; DROP TABLE events; --", "archive")`.
|
||||
|
||||
**Recommended fix:**
|
||||
Quote identifiers using `~s("#{identifier}")` or better, use Ecto's `Ecto.Adapters.SQL.query/3` with proper identifier quoting. Validate that inputs match expected partition name patterns (e.g., `events_YYYYMM`).
|
||||
|
||||
**Confidence:** Medium (depends on whether this function is exposed to external input)
|
||||
|
||||
---
|
||||
|
||||
## [Medium] Count Query Materialises All Matching Event IDs in Memory
|
||||
|
||||
**Area:** performance
|
||||
|
||||
**Why it matters:**
|
||||
The COUNT implementation fetches all matching event IDs into Elixir memory, deduplicates them with `MapSet.new()`, then counts. For large result sets, this is orders of magnitude slower and more memory-intensive than a SQL `COUNT(DISTINCT id)`.
|
||||
|
||||
**Evidence:**
|
||||
`lib/parrhesia/storage/adapters/postgres/events.ex` lines 111–127:
|
||||
```elixir
|
||||
def count(_context, filters, opts) when is_list(opts) do
|
||||
...
|
||||
total_count =
|
||||
filters
|
||||
|> Enum.flat_map(fn filter ->
|
||||
filter
|
||||
|> event_id_query_for_filter(now, opts)
|
||||
|> Repo.all() # fetches ALL matching IDs
|
||||
end)
|
||||
|> MapSet.new() # deduplicates in memory
|
||||
|> MapSet.size()
|
||||
...
|
||||
end
|
||||
```
|
||||
|
||||
**Attack scenario:**
|
||||
Client sends `["COUNT", "c1", {"kinds": [1]}]` on a relay with 10 million kind-1 events. The relay fetches 10 million binary IDs into memory, builds a MapSet, then counts. This could use hundreds of megabytes of RAM per request.
|
||||
|
||||
**Recommended fix:**
|
||||
For single-filter counts, use `SELECT COUNT(*)` or `SELECT COUNT(DISTINCT id)` directly in SQL. For multi-filter counts where deduplication is needed, use `UNION` in SQL rather than materialising in Elixir.
|
||||
|
||||
**Confidence:** High
|
||||
|
||||
---
|
||||
|
||||
## [Medium] NIP-42 AUTH Does Not Validate created_at Freshness
|
||||
|
||||
**Area:** protocol correctness, security
|
||||
|
||||
**Why it matters:**
|
||||
NIP-42 suggests AUTH events should have a `created_at` close to current time (within ~10 minutes). The relay's AUTH handler validates the event (which includes a future-skew check of 15 minutes) but does not check if the event is too old. An AUTH event from days ago with a matching challenge could be replayed.
|
||||
|
||||
**Evidence:**
|
||||
`lib/parrhesia/web/connection.ex`:
|
||||
- `handle_auth/2` calls `Protocol.validate_event(auth_event)` which checks future skew but not past staleness.
|
||||
- `validate_auth_event/1` (line 573) only checks kind and challenge tag.
|
||||
- No `created_at` freshness check for AUTH events.
|
||||
|
||||
The NIP-98 implementation (`auth/nip98.ex`) DOES have a 60-second freshness check, but the WebSocket AUTH path does not.
|
||||
|
||||
**Recommended fix:**
|
||||
Add a staleness check: reject AUTH events where `created_at` is more than N seconds in the past (e.g., 600 seconds matching NIP-42 suggestion).
|
||||
|
||||
**Confidence:** High
|
||||
|
||||
---
|
||||
|
||||
## [Low] NIP-11 Missing CORS Headers
|
||||
|
||||
**Area:** protocol correctness
|
||||
|
||||
**Why it matters:**
|
||||
NIP-11 states relays MUST accept CORS requests by sending appropriate headers. The relay info endpoint does not set CORS headers.
|
||||
|
||||
**Evidence:**
|
||||
`lib/parrhesia/web/router.ex` line 44–55: the `/relay` GET handler returns NIP-11 JSON but does not set `Access-Control-Allow-Origin`, `Access-Control-Allow-Headers`, or `Access-Control-Allow-Methods` headers. No CORS plug is configured in the router.
|
||||
|
||||
**Recommended fix:**
|
||||
Add CORS headers to the NIP-11 response, at minimum `Access-Control-Allow-Origin: *`.
|
||||
|
||||
**Confidence:** High
|
||||
|
||||
---
|
||||
|
||||
## [Low] Event Query Deduplication Done in Elixir Instead of SQL
|
||||
|
||||
**Area:** performance
|
||||
|
||||
**Why it matters:**
|
||||
When a REQ has multiple filters, each filter runs a separate DB query, results are merged and deduplicated in Elixir using `Map.put_new/3`. This means the relay may fetch duplicate events from the DB and transfer them over the wire from PostgreSQL, only to discard them.
|
||||
|
||||
**Evidence:**
|
||||
`lib/parrhesia/storage/adapters/postgres/events.ex` lines 85–95:
|
||||
```elixir
|
||||
persisted_events =
|
||||
filters
|
||||
|> Enum.flat_map(fn filter ->
|
||||
filter |> event_query_for_filter(now, opts) |> Repo.all()
|
||||
end)
|
||||
|> deduplicate_events()
|
||||
|> sort_persisted_events()
|
||||
```
|
||||
|
||||
**Recommended fix:**
|
||||
For multiple filters, consider using SQL `UNION` or `UNION ALL` with a final `DISTINCT ON` to push deduplication to the database. Alternatively, for the common case of a single filter (which is the majority of REQ messages), this is fine as-is.
|
||||
|
||||
**Confidence:** Medium
|
||||
|
||||
---
|
||||
|
||||
## [Low] No Validation of Subscription ID Content
|
||||
|
||||
**Area:** robustness
|
||||
|
||||
**Why it matters:**
|
||||
Subscription IDs are validated for non-emptiness and max length (64 chars) but not for content. NIP-01 says subscription IDs are "arbitrary" strings, but allowing control characters, null bytes, or extremely long Unicode sequences could cause issues with logging, telemetry, or downstream systems.
|
||||
|
||||
**Evidence:**
|
||||
`lib/parrhesia/protocol.ex` line 218:
|
||||
```elixir
|
||||
defp valid_subscription_id?(subscription_id) do
|
||||
subscription_id != "" and String.length(subscription_id) <= 64
|
||||
end
|
||||
```
|
||||
|
||||
`String.length/1` counts Unicode graphemes, not bytes. A subscription ID of 64 emoji characters could be hundreds of bytes.
|
||||
|
||||
**Recommended fix:**
|
||||
Consider validating that subscription IDs contain only printable ASCII, or at least limit by byte size rather than grapheme count.
|
||||
|
||||
**Confidence:** Medium
|
||||
|
||||
---
|
||||
|
||||
# Protocol Compliance Review
|
||||
|
||||
## NIPs Implemented
|
||||
- **NIP-01**: Core protocol — substantially implemented. Critical gaps: no signature verification, lossy tags, no ephemeral handling.
|
||||
- **NIP-09**: Event deletion — partially implemented (kind 5 with `e` tags only, missing `a` tag deletion).
|
||||
- **NIP-11**: Relay information — implemented, missing CORS headers.
|
||||
- **NIP-22**: Event `created_at` limits — implemented (future skew check, configurable).
|
||||
- **NIP-40**: Expiration — implemented (storage, query filtering, periodic cleanup). Does not reject already-expired events on publish (SHOULD per spec).
|
||||
- **NIP-42**: Authentication — implemented with challenge-response. Missing relay tag validation, AUTH event staleness check.
|
||||
- **NIP-45**: COUNT — implemented with basic and HLL support. Performance concern with in-memory deduplication.
|
||||
- **NIP-50**: Search — implemented via ILIKE. SQL injection concern. No full-text search.
|
||||
- **NIP-70**: Protected events — implemented (tag check, pubkey match). Note: protected tag `["-"]` is lost on retrieval due to single-element tag storage bug.
|
||||
- **NIP-77**: Negentropy — stub implementation (session tracking only, no actual reconciliation logic).
|
||||
- **NIP-86**: Relay management — implemented with NIP-98 auth and audit logging.
|
||||
- **NIP-98**: HTTP auth — implemented with freshness check.
|
||||
- **MARMOT**: Kinds 443–449, 1059, 10050–10051 — validation and policy enforcement implemented.
|
||||
|
||||
## Non-Compliant Behaviours
|
||||
1. **No signature verification** — violates NIP-01 MUST.
|
||||
2. **Lossy tag storage** — violates NIP-01 data integrity.
|
||||
3. **Ephemeral events persisted** — violates NIP-01 SHOULD NOT store.
|
||||
4. **AUTH missing relay tag check** — violates NIP-42 MUST.
|
||||
5. **NIP-09 missing `a` tag deletion** — partial implementation.
|
||||
6. **NIP-40: expired events accepted on publish** — violates SHOULD reject.
|
||||
7. **NIP-11 missing CORS** — violates MUST.
|
||||
|
||||
## Ambiguous Areas
|
||||
- **NIP-01 replaceable event tie-breaking**: implemented correctly (lowest ID wins).
|
||||
- **Deletion event storage**: kind 5 events are stored (correct — relay SHOULD continue publishing deletion requests).
|
||||
- **NIP-45 HLL**: the HLL payload generation is a placeholder (hash of filter+count), not actual HyperLogLog registers. Clients expecting real HLL data will get nonsense.
|
||||
|
||||
---
|
||||
|
||||
# Robustness Review
|
||||
|
||||
The relay handles several failure modes well:
|
||||
- WebSocket binary frames are rejected with a clear notice.
|
||||
- Invalid JSON returns a structured NOTICE.
|
||||
- GenServer exits are caught with `catch :exit` patterns throughout the connection handler.
|
||||
- Outbound queue has configurable backpressure (close, drop_oldest, drop_newest).
|
||||
- Subscription limits are enforced per connection.
|
||||
- Process monitors clean up subscription index entries when connections die.
|
||||
|
||||
**Key resilience gaps:**
|
||||
1. **No ingest rate limiting** — one client can monopolise the relay.
|
||||
2. **No payload size enforcement** — oversized frames/events are processed.
|
||||
3. **Unbounded tag count** — an event with 100,000 tags will generate 100,000 DB inserts in a single transaction.
|
||||
4. **No filter complexity limits** — a filter with hundreds of tag values generates large `ANY(...)` queries.
|
||||
5. **COUNT query memory explosion** — large counts materialise all IDs in memory.
|
||||
6. **No timeout on DB queries** — a slow query (e.g., adversarial search pattern) blocks the connection process indefinitely.
|
||||
7. **Single-GenServer bottleneck** — Subscription Index serialises all fanout lookups.
|
||||
|
||||
**Can one bad client destabilise the relay?** Yes. Through event spam (no rate limit), adversarial search patterns (LIKE injection), or large COUNT queries (memory exhaustion).
|
||||
|
||||
---
|
||||
|
||||
# Security Review
|
||||
|
||||
**Primary Attack Surfaces:**
|
||||
1. **WebSocket ingress** — unauthenticated by default, no rate limiting, no payload size enforcement.
|
||||
2. **NIP-50 search** — LIKE pattern injection enables CPU/IO exhaustion.
|
||||
3. **NIP-86 management API** — properly gated by NIP-98, but `management_auth_required` is a config flag that defaults to `true`. If misconfigured, management API is open.
|
||||
4. **Event forgery** — no signature verification means complete trust of client-provided pubkeys.
|
||||
|
||||
**DoS Vectors (ranked by impact):**
|
||||
1. Event spam flood (unbounded ingest rate).
|
||||
2. Adversarial ILIKE search patterns (DB CPU exhaustion).
|
||||
3. Large COUNT queries (memory exhaustion).
|
||||
4. Many concurrent subscriptions with broad filters (fanout amplification).
|
||||
5. Oversized events with thousands of tags (transaction bloat).
|
||||
6. Rapid REQ/CLOSE cycling (subscription index churn through single GenServer).
|
||||
|
||||
**Authentication/Authorization:**
|
||||
- NIP-42 AUTH flow works but is weakened by missing relay tag validation.
|
||||
- Protected event enforcement is correct (pubkey match required).
|
||||
- Giftwrap (kind 1059) access control is properly implemented.
|
||||
- Management API NIP-98 auth is solid with freshness check.
|
||||
|
||||
**No dynamic atom creation risks found.** Method names in admin are handled as strings. No `String.to_atom` or unsafe deserialization patterns detected.
|
||||
|
||||
**Information leakage:** Error messages in some paths use `inspect(reason)` which could leak internal Elixir terms to clients (e.g., `connection.ex` line 297, line 353, line 389). Consider sanitising.
|
||||
|
||||
---
|
||||
|
||||
# Performance Review
|
||||
|
||||
**Likely Hotspots:**
|
||||
1. **Event ingest path**: validation → policy check → DB transaction (3 inserts + possible state table upsert). The transaction is the bottleneck — each event requires at minimum 2 DB round-trips (event_ids + events insert), plus tag inserts.
|
||||
2. **Subscription fanout**: `Index.candidate_subscription_keys/1` through GenServer.call — serialisation point.
|
||||
3. **Query path**: per-filter DB queries without UNION, Elixir-side deduplication and sorting.
|
||||
4. **COUNT path**: materialises all matching IDs in memory.
|
||||
5. **Search (ILIKE)**: sequential scan without text search index.
|
||||
|
||||
**Missing Indexes:**
|
||||
- No index on `events.content` for search (NIP-50). ILIKE requires sequential scan.
|
||||
- No composite index on `events (pubkey, kind, created_at)` for replaceable event queries.
|
||||
- The `event_tags` index on `(name, value, event_created_at)` is good for tag queries.
|
||||
|
||||
**Scaling Ceiling:**
|
||||
- **DB-bound** at moderate load (event ingest transactions).
|
||||
- **CPU-bound** at high event rates if signature verification is added.
|
||||
- **Memory-bound** if adversarial COUNT queries are submitted.
|
||||
- **GenServer-bound** on fanout at high subscription counts.
|
||||
|
||||
**Top 3 Performance Improvements by Impact:**
|
||||
1. **Make subscription index reads lock-free** — read ETS directly instead of through GenServer (effort: S, impact: High).
|
||||
2. **Push COUNT to SQL** — `SELECT COUNT(DISTINCT id)` instead of materialising (effort: S, impact: High).
|
||||
3. **Add full-text search index** — `GIN` index on `tsvector` column for NIP-50, replacing ILIKE (effort: M, impact: High).
|
||||
|
||||
---
|
||||
|
||||
# Database and Schema Review
|
||||
|
||||
**Strengths:**
|
||||
- Range partitioning on `events.created_at` — good for time-based queries and partition pruning.
|
||||
- Composite primary key `(created_at, id)` enables partition pruning on most queries.
|
||||
- `event_ids` table for deduplication with `ON CONFLICT :nothing` — clean idempotency.
|
||||
- State tables for replaceable/addressable events — correct approach with proper upsert/retire logic.
|
||||
- Partial indexes on `expires_at` and `deleted_at` — avoids indexing NULLs.
|
||||
- FK cascade from `event_tags` to `events` — ensures tag cleanup on delete.
|
||||
|
||||
**Weaknesses:**
|
||||
1. **No unique index on `events.id`** — only a non-unique index. Two events with the same ID but different `created_at` could theoretically exist (the `event_ids` table prevents this at the application level, but there's no DB-level constraint on the events table).
|
||||
2. **`event_tags` stores only name+value** — data loss for multi-element tags (Critical finding above).
|
||||
3. **No `content` index for search** — ILIKE without index = sequential scan.
|
||||
4. **`events.kind` is `integer` (4 bytes)** — NIP-01 allows kinds 0–65535, so `smallint` (2 bytes) would suffice and save space.
|
||||
5. **No retention/partitioning strategy documented** — the default partition catches everything. No automated partition creation or cleanup.
|
||||
6. **`d_tag` column in events table** — redundant with tag storage (but useful for addressable event queries). Not indexed, so no direct benefit. The addressable_event_state table handles this.
|
||||
7. **No index on `events (id, created_at)` for deletion queries** — `delete_by_request` queries by `id` and `pubkey` but the `id` index doesn't include `pubkey`.
|
||||
|
||||
**Missing DB-Level Invariants:**
|
||||
- Events table should have a unique constraint on `id` (across partitions, which is tricky with range partitioning — the `event_ids` table compensates).
|
||||
- No CHECK constraint on `kind >= 0`.
|
||||
- No CHECK constraint on `created_at >= 0`.
|
||||
|
||||
---
|
||||
|
||||
# Test Review
|
||||
|
||||
**Well-Covered Areas:**
|
||||
- Protocol encode/decode (`protocol_test.exs`)
|
||||
- Filter validation and matching, including property-based tests (`filter_test.exs`, `filter_property_test.exs`)
|
||||
- Event validation including MARMOT-specific kinds (`event_validator_marmot_test.exs`)
|
||||
- Policy enforcement (`event_policy_test.exs`)
|
||||
- Storage adapter contract compliance (`adapter_contract_test.exs`, `behaviour_contracts_test.exs`)
|
||||
- PostgreSQL event lifecycle (put, query, delete, replace) (`events_lifecycle_test.exs`)
|
||||
- WebSocket connection lifecycle (`connection_test.exs`)
|
||||
- Auth challenges (`challenges_test.exs`)
|
||||
- NIP-98 HTTP auth (`nip98_test.exs`)
|
||||
- Fault injection (`fault_injection_test.exs`)
|
||||
- Query plan regression (`query_plan_regression_test.exs`) — excellent practice
|
||||
|
||||
**Missing Critical Tests:**
|
||||
1. **No signature verification tests** (because the feature doesn't exist).
|
||||
2. **No test for tag data integrity** — round-trip test that verifies events with multi-element tags are returned unchanged.
|
||||
3. **No ephemeral event test** — verifying kind 20000+ events are not persisted.
|
||||
4. **No NIP-09 `a` tag deletion test**.
|
||||
5. **No adversarial input tests** — LIKE injection patterns, oversized payloads, events with extreme tag counts.
|
||||
6. **No concurrent write tests** — multiple processes writing the same replaceable event simultaneously.
|
||||
7. **No AUTH relay tag validation test**.
|
||||
8. **No test for expired event rejection on publish** (NIP-40).
|
||||
|
||||
**5 Most Valuable Tests to Add:**
|
||||
1. Round-trip tag integrity: publish event with multi-element tags, query back, verify tags are identical.
|
||||
2. Signature verification: publish event with wrong signature, verify rejection.
|
||||
3. Concurrent replaceable event upsert: 10 processes writing same pubkey+kind, verify only one winner.
|
||||
4. Adversarial search pattern: verify ILIKE with `%` metacharacters doesn't cause excessive query time.
|
||||
5. Ingest rate limiting under load: verify relay remains responsive under event flood.
|
||||
|
||||
---
|
||||
|
||||
# Quick Wins
|
||||
|
||||
| Change | Impact | Effort |
|
||||
|--------|--------|--------|
|
||||
| Add Schnorr signature verification | Critical | M |
|
||||
| Store full tags (add `tags` JSONB column to events) | Critical | M |
|
||||
| Escape LIKE metacharacters in search | High | S |
|
||||
| Read subscription index ETS directly (bypass GenServer for reads) | High | S |
|
||||
| Push COUNT to SQL `COUNT(DISTINCT)` | High | S |
|
||||
| Add `max_frame_size` to Bandit WebSocket options | High | S |
|
||||
| Add AUTH relay tag validation | High | S |
|
||||
| Skip persistence for ephemeral events | High | S |
|
||||
| Add payload size check before JSON decode | High | S |
|
||||
| Add CORS headers to NIP-11 endpoint | Low | S |
|
||||
| Create ETS moderation cache table in supervisor | Medium | S |
|
||||
| Add `created_at` staleness check to AUTH handler | Medium | S |
|
||||
|
||||
---
|
||||
|
||||
# Deep Refactor Opportunities
|
||||
|
||||
1. **Full-text search for NIP-50**: Replace ILIKE with PostgreSQL `tsvector`/`tsquery` and a GIN index. This eliminates the LIKE injection vector and dramatically improves search performance. Effort: M. Worth it if search is a used feature.
|
||||
|
||||
2. **SQL UNION for multi-filter queries**: Instead of running N queries and deduplicating in Elixir, build a single SQL query with UNION ALL and DISTINCT. Reduces DB round-trips and pushes deduplication to the engine. Effort: M.
|
||||
|
||||
3. **Per-connection rate limiter**: Add a token-bucket rate limiter to the connection state that throttles EVENT submissions. Consider a pluggable rate-limiting behaviour for flexibility. Effort: M.
|
||||
|
||||
4. **Event partitioning strategy**: Automate partition creation (monthly or weekly) and implement partition detach/archive for old data. The current default partition will accumulate all data forever. Effort: L.
|
||||
|
||||
5. **Batched tag insertion**: Instead of `Repo.insert_all` for tags within the transaction, accumulate tags and use a single multi-row insert with explicit conflict handling. Reduces round-trips for events with many tags. Effort: S.
|
||||
|
||||
---
|
||||
|
||||
# Final Verdict
|
||||
|
||||
**Would I trust this relay:**
|
||||
- **For local development:** Yes, with awareness of the signature bypass.
|
||||
- **For a small private relay (trusted clients):** Conditionally, after fixing lossy tags. The signature gap is tolerable only if all clients are trusted.
|
||||
- **For a medium public relay:** No. Missing rate limiting, signature verification, and the LIKE injection vector make it unsafe.
|
||||
- **For a hostile public internet deployment:** Absolutely not.
|
||||
|
||||
---
|
||||
|
||||
**Ship now?** No.
|
||||
|
||||
**Top blockers before deployment:**
|
||||
|
||||
1. **Add Schnorr signature verification** — without this, the relay has no identity security.
|
||||
2. **Fix lossy tag storage** — store full tag arrays so events survive round-trips intact.
|
||||
3. **Handle ephemeral events** — don't persist kinds 20000–29999.
|
||||
4. **Escape LIKE metacharacters in search** — prevent DoS via adversarial patterns.
|
||||
5. **Enforce payload size limits** — pass `max_frame_size` to Bandit, check payload size before decode.
|
||||
6. **Add basic ingest rate limiting** — per-connection token bucket at minimum.
|
||||
7. **Add AUTH relay tag validation** — prevent cross-relay AUTH replay.
|
||||
|
||||
After these seven fixes, the relay would be suitable for a private deployment with moderate trust. Public deployment would additionally require:
|
||||
- Per-IP rate limiting
|
||||
- Full-text search index (replacing ILIKE)
|
||||
- SQL-based COUNT
|
||||
- Lock-free subscription index reads
|
||||
- Ephemeral event NIP-09 `a` tag deletion
|
||||
- Comprehensive adversarial input testing
|
||||
27
flake.lock
generated
Normal file
27
flake.lock
generated
Normal file
@@ -0,0 +1,27 @@
|
||||
{
|
||||
"nodes": {
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1773389992,
|
||||
"narHash": "sha256-wvfdLLWJ2I9oEpDd9PfMA8osfIZicoQ5MT1jIwNs9Tk=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "c06b4ae3d6599a672a6210b7021d699c351eebda",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "nixos-unstable",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"root": {
|
||||
"inputs": {
|
||||
"nixpkgs": "nixpkgs"
|
||||
}
|
||||
}
|
||||
},
|
||||
"root": "root",
|
||||
"version": 7
|
||||
}
|
||||
68
flake.nix
Normal file
68
flake.nix
Normal file
@@ -0,0 +1,68 @@
|
||||
{
|
||||
description = "Parrhesia Nostr relay";
|
||||
|
||||
inputs.nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable";
|
||||
|
||||
outputs = {nixpkgs, ...}: let
|
||||
systems = [
|
||||
"x86_64-linux"
|
||||
"aarch64-linux"
|
||||
"x86_64-darwin"
|
||||
"aarch64-darwin"
|
||||
];
|
||||
|
||||
forAllSystems = nixpkgs.lib.genAttrs systems;
|
||||
in {
|
||||
formatter = forAllSystems (system: (import nixpkgs {inherit system;}).alejandra);
|
||||
|
||||
packages = forAllSystems (
|
||||
system: let
|
||||
pkgs = import nixpkgs {inherit system;};
|
||||
lib = pkgs.lib;
|
||||
parrhesia = pkgs.callPackage ./default.nix {};
|
||||
in
|
||||
{
|
||||
default = parrhesia;
|
||||
inherit parrhesia;
|
||||
}
|
||||
// lib.optionalAttrs pkgs.stdenv.hostPlatform.isLinux {
|
||||
dockerImage = pkgs.dockerTools.buildLayeredImage {
|
||||
name = "parrhesia";
|
||||
tag = "latest";
|
||||
|
||||
contents = [
|
||||
parrhesia
|
||||
pkgs.bash
|
||||
pkgs.cacert
|
||||
pkgs.coreutils
|
||||
pkgs.fakeNss
|
||||
];
|
||||
|
||||
extraCommands = ''
|
||||
mkdir -p tmp
|
||||
chmod 1777 tmp
|
||||
'';
|
||||
|
||||
config = {
|
||||
Entrypoint = ["${parrhesia}/bin/parrhesia"];
|
||||
Cmd = ["start"];
|
||||
ExposedPorts = {
|
||||
"4413/tcp" = {};
|
||||
};
|
||||
WorkingDir = "/";
|
||||
User = "65534:65534";
|
||||
Env = [
|
||||
"HOME=/tmp"
|
||||
"LANG=C.UTF-8"
|
||||
"LC_ALL=C.UTF-8"
|
||||
"MIX_ENV=prod"
|
||||
"PORT=4413"
|
||||
"RELEASE_DISTRIBUTION=none"
|
||||
"SSL_CERT_FILE=${pkgs.cacert}/etc/ssl/certs/ca-bundle.crt"
|
||||
];
|
||||
};
|
||||
};
|
||||
}
|
||||
);
|
||||
};
|
||||
}
|
||||
250
lib/parrhesia/api/acl.ex
Normal file
250
lib/parrhesia/api/acl.ex
Normal file
@@ -0,0 +1,250 @@
|
||||
defmodule Parrhesia.API.ACL do
|
||||
@moduledoc """
|
||||
Public ACL API and rule matching for protected sync traffic.
|
||||
"""
|
||||
|
||||
alias Parrhesia.API.RequestContext
|
||||
alias Parrhesia.Protocol.Filter
|
||||
alias Parrhesia.Storage
|
||||
|
||||
@spec grant(map(), keyword()) :: :ok | {:error, term()}
|
||||
def grant(rule, _opts \\ []) do
|
||||
with {:ok, _stored_rule} <- Storage.acl().put_rule(%{}, normalize_rule(rule)) do
|
||||
:ok
|
||||
end
|
||||
end
|
||||
|
||||
@spec revoke(map(), keyword()) :: :ok | {:error, term()}
|
||||
def revoke(rule, _opts \\ []) do
|
||||
Storage.acl().delete_rule(%{}, normalize_delete_selector(rule))
|
||||
end
|
||||
|
||||
@spec list(keyword()) :: {:ok, [map()]} | {:error, term()}
|
||||
def list(opts \\ []) do
|
||||
Storage.acl().list_rules(%{}, normalize_list_opts(opts))
|
||||
end
|
||||
|
||||
@spec check(atom(), map(), keyword()) :: :ok | {:error, term()}
|
||||
def check(capability, subject, opts \\ [])
|
||||
|
||||
def check(capability, subject, opts)
|
||||
when capability in [:sync_read, :sync_write] and is_map(subject) do
|
||||
context = Keyword.get(opts, :context, %RequestContext{})
|
||||
|
||||
with {:ok, normalized_capability} <- normalize_capability(capability),
|
||||
{:ok, normalized_context} <- normalize_context(context),
|
||||
{:ok, protected_filters} <- protected_filters() do
|
||||
if protected_subject?(normalized_capability, subject, protected_filters) do
|
||||
authorize_subject(normalized_capability, subject, normalized_context)
|
||||
else
|
||||
:ok
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
def check(_capability, _subject, _opts), do: {:error, :invalid_acl_capability}
|
||||
|
||||
@spec protected_read?(map()) :: boolean()
|
||||
def protected_read?(filter) when is_map(filter) do
|
||||
case protected_filters() do
|
||||
{:ok, protected_filters} ->
|
||||
protected_subject?(:sync_read, filter, protected_filters)
|
||||
|
||||
{:error, _reason} ->
|
||||
false
|
||||
end
|
||||
end
|
||||
|
||||
def protected_read?(_filter), do: false
|
||||
|
||||
@spec protected_write?(map()) :: boolean()
|
||||
def protected_write?(event) when is_map(event) do
|
||||
case protected_filters() do
|
||||
{:ok, protected_filters} ->
|
||||
protected_subject?(:sync_write, event, protected_filters)
|
||||
|
||||
{:error, _reason} ->
|
||||
false
|
||||
end
|
||||
end
|
||||
|
||||
def protected_write?(_event), do: false
|
||||
|
||||
defp authorize_subject(capability, subject, %RequestContext{} = context) do
|
||||
if MapSet.size(context.authenticated_pubkeys) == 0 do
|
||||
{:error, :auth_required}
|
||||
else
|
||||
capability
|
||||
|> list_rules_for_capability()
|
||||
|> authorize_against_rules(capability, context.authenticated_pubkeys, subject)
|
||||
end
|
||||
end
|
||||
|
||||
defp list_rules_for_capability(capability) do
|
||||
Storage.acl().list_rules(%{}, principal_type: :pubkey, capability: capability)
|
||||
end
|
||||
|
||||
defp authorize_against_rules({:ok, rules}, capability, authenticated_pubkeys, subject) do
|
||||
if Enum.any?(authenticated_pubkeys, &principal_authorized?(&1, subject, rules)) do
|
||||
:ok
|
||||
else
|
||||
{:error, denial_reason(capability)}
|
||||
end
|
||||
end
|
||||
|
||||
defp authorize_against_rules({:error, reason}, _capability, _authenticated_pubkeys, _subject),
|
||||
do: {:error, reason}
|
||||
|
||||
defp principal_authorized?(authenticated_pubkey, subject, rules) do
|
||||
Enum.any?(rules, fn rule ->
|
||||
rule.principal == authenticated_pubkey and
|
||||
rule_covers_subject?(rule.capability, rule.match, subject)
|
||||
end)
|
||||
end
|
||||
|
||||
defp rule_covers_subject?(:sync_read, rule_match, filter),
|
||||
do: filter_within_rule?(filter, rule_match)
|
||||
|
||||
defp rule_covers_subject?(:sync_write, rule_match, event),
|
||||
do: Filter.matches_filter?(event, rule_match)
|
||||
|
||||
defp protected_subject?(:sync_read, filter, protected_filters) do
|
||||
Enum.any?(protected_filters, &filters_overlap?(filter, &1))
|
||||
end
|
||||
|
||||
defp protected_subject?(:sync_write, event, protected_filters) do
|
||||
Enum.any?(protected_filters, &Filter.matches_filter?(event, &1))
|
||||
end
|
||||
|
||||
defp filters_overlap?(left, right) when is_map(left) and is_map(right) do
|
||||
comparable_keys =
|
||||
left
|
||||
|> comparable_filter_keys(right)
|
||||
|> Enum.reject(&(&1 in ["limit", "search", "since", "until"]))
|
||||
|
||||
Enum.all?(
|
||||
comparable_keys,
|
||||
&filter_constraint_compatible?(Map.get(left, &1), Map.get(right, &1), &1)
|
||||
) and
|
||||
filter_ranges_overlap?(left, right)
|
||||
end
|
||||
|
||||
defp filter_constraint_compatible?(nil, _right, _key), do: true
|
||||
defp filter_constraint_compatible?(_left, nil, _key), do: true
|
||||
|
||||
defp filter_constraint_compatible?(left, right, _key) when is_list(left) and is_list(right) do
|
||||
MapSet.disjoint?(MapSet.new(left), MapSet.new(right)) == false
|
||||
end
|
||||
|
||||
defp filter_constraint_compatible?(left, right, _key), do: left == right
|
||||
|
||||
defp filter_within_rule?(filter, rule_match) when is_map(filter) and is_map(rule_match) do
|
||||
Enum.reject(rule_match, fn {key, _value} -> key in ["since", "until", "limit", "search"] end)
|
||||
|> Enum.all?(fn {key, rule_value} ->
|
||||
requested_value = Map.get(filter, key)
|
||||
requested_constraint_within_rule?(requested_value, rule_value, key)
|
||||
end) and filter_range_within_rule?(filter, rule_match)
|
||||
end
|
||||
|
||||
defp requested_constraint_within_rule?(nil, _rule_value, _key), do: false
|
||||
|
||||
defp requested_constraint_within_rule?(requested_values, rule_values, _key)
|
||||
when is_list(requested_values) and is_list(rule_values) do
|
||||
requested_values
|
||||
|> MapSet.new()
|
||||
|> MapSet.subset?(MapSet.new(rule_values))
|
||||
end
|
||||
|
||||
defp requested_constraint_within_rule?(requested_value, rule_value, _key),
|
||||
do: requested_value == rule_value
|
||||
|
||||
defp denial_reason(:sync_read), do: :sync_read_not_allowed
|
||||
defp denial_reason(:sync_write), do: :sync_write_not_allowed
|
||||
|
||||
defp normalize_context(%RequestContext{} = context), do: {:ok, normalize_pubkeys(context)}
|
||||
defp normalize_context(_context), do: {:error, :invalid_context}
|
||||
|
||||
defp normalize_pubkeys(%RequestContext{} = context) do
|
||||
normalized_pubkeys =
|
||||
context.authenticated_pubkeys
|
||||
|> Enum.map(&String.downcase/1)
|
||||
|> MapSet.new()
|
||||
|
||||
%RequestContext{context | authenticated_pubkeys: normalized_pubkeys}
|
||||
end
|
||||
|
||||
defp normalize_rule(rule) when is_map(rule), do: rule
|
||||
defp normalize_rule(_rule), do: %{}
|
||||
|
||||
defp normalize_delete_selector(selector) when is_map(selector), do: selector
|
||||
defp normalize_delete_selector(_selector), do: %{}
|
||||
|
||||
defp normalize_list_opts(opts) do
|
||||
[]
|
||||
|> maybe_put_opt(:principal_type, Keyword.get(opts, :principal_type))
|
||||
|> maybe_put_opt(:principal, normalize_list_principal(Keyword.get(opts, :principal)))
|
||||
|> maybe_put_opt(:capability, Keyword.get(opts, :capability))
|
||||
end
|
||||
|
||||
defp normalize_list_principal(nil), do: nil
|
||||
|
||||
defp normalize_list_principal(principal) when is_binary(principal),
|
||||
do: String.downcase(principal)
|
||||
|
||||
defp normalize_list_principal(principal), do: principal
|
||||
|
||||
defp maybe_put_opt(opts, _key, nil), do: opts
|
||||
defp maybe_put_opt(opts, key, value), do: Keyword.put(opts, key, value)
|
||||
|
||||
defp normalize_capability(capability) do
|
||||
case capability do
|
||||
:sync_read -> {:ok, :sync_read}
|
||||
:sync_write -> {:ok, :sync_write}
|
||||
_other -> {:error, :invalid_acl_capability}
|
||||
end
|
||||
end
|
||||
|
||||
defp protected_filters do
|
||||
filters =
|
||||
:parrhesia
|
||||
|> Application.get_env(:acl, [])
|
||||
|> Keyword.get(:protected_filters, [])
|
||||
|
||||
if is_list(filters) and
|
||||
Enum.all?(filters, &(match?(%{}, &1) and Filter.validate_filter(&1) == :ok)) do
|
||||
{:ok, filters}
|
||||
else
|
||||
{:error, :invalid_protected_filters}
|
||||
end
|
||||
end
|
||||
|
||||
defp comparable_filter_keys(left, right) do
|
||||
Map.keys(left)
|
||||
|> Kernel.++(Map.keys(right))
|
||||
|> Enum.uniq()
|
||||
end
|
||||
|
||||
defp filter_ranges_overlap?(left, right) do
|
||||
since = max(boundary_value(left, "since", :lower), boundary_value(right, "since", :lower))
|
||||
until = min(boundary_value(left, "until", :upper), boundary_value(right, "until", :upper))
|
||||
since <= until
|
||||
end
|
||||
|
||||
defp filter_range_within_rule?(filter, rule_match) do
|
||||
requested_since = Map.get(filter, "since")
|
||||
requested_until = Map.get(filter, "until")
|
||||
rule_since = Map.get(rule_match, "since")
|
||||
rule_until = Map.get(rule_match, "until")
|
||||
|
||||
lower_ok? =
|
||||
is_nil(rule_since) or (is_integer(requested_since) and requested_since >= rule_since)
|
||||
|
||||
upper_ok? =
|
||||
is_nil(rule_until) or (is_integer(requested_until) and requested_until <= rule_until)
|
||||
|
||||
lower_ok? and upper_ok?
|
||||
end
|
||||
|
||||
defp boundary_value(filter, key, :lower), do: Map.get(filter, key, 0)
|
||||
defp boundary_value(filter, key, :upper), do: Map.get(filter, key, 9_223_372_036_854_775_807)
|
||||
end
|
||||
286
lib/parrhesia/api/admin.ex
Normal file
286
lib/parrhesia/api/admin.ex
Normal file
@@ -0,0 +1,286 @@
|
||||
defmodule Parrhesia.API.Admin do
|
||||
@moduledoc """
|
||||
Public management API facade.
|
||||
"""
|
||||
|
||||
alias Parrhesia.API.ACL
|
||||
alias Parrhesia.API.Identity
|
||||
alias Parrhesia.API.Sync
|
||||
alias Parrhesia.Storage
|
||||
alias Parrhesia.Web.Endpoint
|
||||
|
||||
@supported_admin_methods ~w(health list_audit_logs stats)
|
||||
@supported_acl_methods ~w(acl_grant acl_revoke acl_list)
|
||||
@supported_identity_methods ~w(identity_ensure identity_get identity_import identity_rotate)
|
||||
@supported_listener_methods ~w(listener_reload)
|
||||
@supported_sync_methods ~w(
|
||||
sync_get_server
|
||||
sync_health
|
||||
sync_list_servers
|
||||
sync_put_server
|
||||
sync_remove_server
|
||||
sync_server_stats
|
||||
sync_start_server
|
||||
sync_stats
|
||||
sync_stop_server
|
||||
sync_sync_now
|
||||
)
|
||||
|
||||
@spec execute(String.t() | atom(), map(), keyword()) :: {:ok, map()} | {:error, term()}
|
||||
def execute(method, params, opts \\ [])
|
||||
|
||||
def execute(method, params, opts) when is_map(params) do
|
||||
method_name = normalize_method_name(method)
|
||||
|
||||
case execute_builtin(method_name, params, opts) do
|
||||
{:continue, other_method} -> Storage.admin().execute(%{}, other_method, params)
|
||||
result -> result
|
||||
end
|
||||
end
|
||||
|
||||
def execute(method, _params, _opts),
|
||||
do: {:error, {:unsupported_method, normalize_method_name(method)}}
|
||||
|
||||
@spec stats(keyword()) :: {:ok, map()} | {:error, term()}
|
||||
def stats(opts \\ []) do
|
||||
with {:ok, relay_stats} <- relay_stats(),
|
||||
{:ok, sync_stats} <- Sync.sync_stats(opts) do
|
||||
{:ok, Map.put(relay_stats, "sync", sync_stats)}
|
||||
end
|
||||
end
|
||||
|
||||
@spec health(keyword()) :: {:ok, map()} | {:error, term()}
|
||||
def health(opts \\ []) do
|
||||
with {:ok, sync_health} <- Sync.sync_health(opts) do
|
||||
{:ok,
|
||||
%{
|
||||
"status" => overall_health_status(sync_health),
|
||||
"sync" => sync_health
|
||||
}}
|
||||
end
|
||||
end
|
||||
|
||||
@spec list_audit_logs(keyword()) :: {:ok, [map()]} | {:error, term()}
|
||||
def list_audit_logs(opts \\ []) do
|
||||
Storage.admin().list_audit_logs(%{}, opts)
|
||||
end
|
||||
|
||||
defp acl_grant(params) do
|
||||
with :ok <- ACL.grant(params) do
|
||||
{:ok, %{"ok" => true}}
|
||||
end
|
||||
end
|
||||
|
||||
defp acl_revoke(params) do
|
||||
with :ok <- ACL.revoke(params) do
|
||||
{:ok, %{"ok" => true}}
|
||||
end
|
||||
end
|
||||
|
||||
defp acl_list(params) do
|
||||
with {:ok, rules} <- ACL.list(acl_list_opts(params)) do
|
||||
{:ok, %{"rules" => rules}}
|
||||
end
|
||||
end
|
||||
|
||||
defp acl_list_opts(params) do
|
||||
[]
|
||||
|> maybe_put_opt(:principal_type, fetch_value(params, :principal_type))
|
||||
|> maybe_put_opt(:principal, fetch_value(params, :principal))
|
||||
|> maybe_put_opt(:capability, fetch_value(params, :capability))
|
||||
end
|
||||
|
||||
defp supported_methods do
|
||||
storage_supported =
|
||||
case Storage.admin().execute(%{}, :supportedmethods, %{}) do
|
||||
{:ok, methods} when is_list(methods) -> methods
|
||||
{:ok, %{"methods" => methods}} when is_list(methods) -> methods
|
||||
_other -> []
|
||||
end
|
||||
|
||||
(storage_supported ++
|
||||
@supported_admin_methods ++
|
||||
@supported_acl_methods ++
|
||||
@supported_identity_methods ++ @supported_listener_methods ++ @supported_sync_methods)
|
||||
|> Enum.uniq()
|
||||
|> Enum.sort()
|
||||
end
|
||||
|
||||
defp identity_get(_params), do: Identity.get()
|
||||
|
||||
defp identity_ensure(_params), do: Identity.ensure()
|
||||
|
||||
defp identity_rotate(_params), do: Identity.rotate()
|
||||
|
||||
defp identity_import(params) do
|
||||
Identity.import(params)
|
||||
end
|
||||
|
||||
defp admin_stats(_params, opts), do: stats(opts)
|
||||
defp admin_health(_params, opts), do: health(opts)
|
||||
|
||||
defp admin_list_audit_logs(params, _opts) do
|
||||
list_audit_logs(audit_log_opts(params))
|
||||
end
|
||||
|
||||
defp listener_reload(params) do
|
||||
case normalize_listener_id(fetch_value(params, :id)) do
|
||||
:all ->
|
||||
Endpoint.reload_all()
|
||||
|> ok_result()
|
||||
|
||||
{:ok, listener_id} ->
|
||||
listener_id
|
||||
|> Endpoint.reload_listener()
|
||||
|> ok_result()
|
||||
|
||||
:error ->
|
||||
{:error, :not_found}
|
||||
end
|
||||
end
|
||||
|
||||
defp sync_put_server(params, opts), do: Sync.put_server(params, opts)
|
||||
|
||||
defp sync_remove_server(params, opts) do
|
||||
with {:ok, server_id} <- fetch_required_string(params, :id),
|
||||
:ok <- Sync.remove_server(server_id, opts) do
|
||||
{:ok, %{"ok" => true}}
|
||||
end
|
||||
end
|
||||
|
||||
defp sync_get_server(params, opts) do
|
||||
with {:ok, server_id} <- fetch_required_string(params, :id),
|
||||
{:ok, server} <- Sync.get_server(server_id, opts) do
|
||||
{:ok, server}
|
||||
else
|
||||
:error -> {:error, :not_found}
|
||||
other -> other
|
||||
end
|
||||
end
|
||||
|
||||
defp sync_list_servers(_params, opts), do: Sync.list_servers(opts)
|
||||
|
||||
defp sync_start_server(params, opts) do
|
||||
with {:ok, server_id} <- fetch_required_string(params, :id),
|
||||
:ok <- Sync.start_server(server_id, opts) do
|
||||
{:ok, %{"ok" => true}}
|
||||
end
|
||||
end
|
||||
|
||||
defp sync_stop_server(params, opts) do
|
||||
with {:ok, server_id} <- fetch_required_string(params, :id),
|
||||
:ok <- Sync.stop_server(server_id, opts) do
|
||||
{:ok, %{"ok" => true}}
|
||||
end
|
||||
end
|
||||
|
||||
defp sync_sync_now(params, opts) do
|
||||
with {:ok, server_id} <- fetch_required_string(params, :id),
|
||||
:ok <- Sync.sync_now(server_id, opts) do
|
||||
{:ok, %{"ok" => true}}
|
||||
end
|
||||
end
|
||||
|
||||
defp sync_server_stats(params, opts) do
|
||||
with {:ok, server_id} <- fetch_required_string(params, :id),
|
||||
{:ok, stats} <- Sync.server_stats(server_id, opts) do
|
||||
{:ok, stats}
|
||||
else
|
||||
:error -> {:error, :not_found}
|
||||
other -> other
|
||||
end
|
||||
end
|
||||
|
||||
defp sync_stats(_params, opts), do: Sync.sync_stats(opts)
|
||||
defp sync_health(_params, opts), do: Sync.sync_health(opts)
|
||||
|
||||
defp execute_builtin("stats", params, opts), do: admin_stats(params, opts)
|
||||
defp execute_builtin("health", params, opts), do: admin_health(params, opts)
|
||||
defp execute_builtin("list_audit_logs", params, opts), do: admin_list_audit_logs(params, opts)
|
||||
defp execute_builtin("acl_grant", params, _opts), do: acl_grant(params)
|
||||
defp execute_builtin("acl_revoke", params, _opts), do: acl_revoke(params)
|
||||
defp execute_builtin("acl_list", params, _opts), do: acl_list(params)
|
||||
defp execute_builtin("identity_get", params, _opts), do: identity_get(params)
|
||||
defp execute_builtin("identity_ensure", params, _opts), do: identity_ensure(params)
|
||||
defp execute_builtin("identity_import", params, _opts), do: identity_import(params)
|
||||
defp execute_builtin("identity_rotate", params, _opts), do: identity_rotate(params)
|
||||
defp execute_builtin("listener_reload", params, _opts), do: listener_reload(params)
|
||||
defp execute_builtin("sync_put_server", params, opts), do: sync_put_server(params, opts)
|
||||
defp execute_builtin("sync_remove_server", params, opts), do: sync_remove_server(params, opts)
|
||||
defp execute_builtin("sync_get_server", params, opts), do: sync_get_server(params, opts)
|
||||
defp execute_builtin("sync_list_servers", params, opts), do: sync_list_servers(params, opts)
|
||||
defp execute_builtin("sync_start_server", params, opts), do: sync_start_server(params, opts)
|
||||
defp execute_builtin("sync_stop_server", params, opts), do: sync_stop_server(params, opts)
|
||||
defp execute_builtin("sync_sync_now", params, opts), do: sync_sync_now(params, opts)
|
||||
defp execute_builtin("sync_server_stats", params, opts), do: sync_server_stats(params, opts)
|
||||
defp execute_builtin("sync_stats", params, opts), do: sync_stats(params, opts)
|
||||
defp execute_builtin("sync_health", params, opts), do: sync_health(params, opts)
|
||||
|
||||
defp execute_builtin("supportedmethods", _params, _opts),
|
||||
do: {:ok, %{"methods" => supported_methods()}}
|
||||
|
||||
defp execute_builtin(other_method, _params, _opts), do: {:continue, other_method}
|
||||
|
||||
defp relay_stats do
|
||||
case Storage.admin().execute(%{}, :stats, %{}) do
|
||||
{:ok, stats} when is_map(stats) -> {:ok, stats}
|
||||
{:error, {:unsupported_method, _method}} -> {:ok, %{}}
|
||||
other -> other
|
||||
end
|
||||
end
|
||||
|
||||
defp overall_health_status(%{"status" => "degraded"}), do: "degraded"
|
||||
defp overall_health_status(_sync_health), do: "ok"
|
||||
|
||||
defp audit_log_opts(params) do
|
||||
[]
|
||||
|> maybe_put_opt(:limit, fetch_value(params, :limit))
|
||||
|> maybe_put_opt(:method, fetch_value(params, :method))
|
||||
|> maybe_put_opt(:actor_pubkey, fetch_value(params, :actor_pubkey))
|
||||
end
|
||||
|
||||
defp maybe_put_opt(opts, _key, nil), do: opts
|
||||
defp maybe_put_opt(opts, key, value), do: Keyword.put(opts, key, value)
|
||||
|
||||
defp ok_result(:ok), do: {:ok, %{"ok" => true}}
|
||||
defp ok_result({:error, _reason} = error), do: error
|
||||
defp ok_result(other), do: other
|
||||
|
||||
defp normalize_listener_id(nil), do: :all
|
||||
|
||||
defp normalize_listener_id(listener_id) when is_atom(listener_id) do
|
||||
{:ok, listener_id}
|
||||
end
|
||||
|
||||
defp normalize_listener_id(listener_id) when is_binary(listener_id) do
|
||||
case Supervisor.which_children(Endpoint) do
|
||||
children when is_list(children) ->
|
||||
Enum.find_value(children, :error, &match_listener_child(&1, listener_id))
|
||||
|
||||
_other ->
|
||||
:error
|
||||
end
|
||||
end
|
||||
|
||||
defp normalize_listener_id(_listener_id), do: :error
|
||||
|
||||
defp match_listener_child({{:listener, id}, _pid, _type, _modules}, listener_id) do
|
||||
normalized_id = Atom.to_string(id)
|
||||
if normalized_id == listener_id, do: {:ok, id}, else: false
|
||||
end
|
||||
|
||||
defp match_listener_child(_child, _listener_id), do: false
|
||||
|
||||
defp fetch_required_string(map, key) do
|
||||
case fetch_value(map, key) do
|
||||
value when is_binary(value) and value != "" -> {:ok, value}
|
||||
_other -> {:error, {:missing_param, Atom.to_string(key)}}
|
||||
end
|
||||
end
|
||||
|
||||
defp fetch_value(map, key), do: Map.get(map, key) || Map.get(map, Atom.to_string(key))
|
||||
|
||||
defp normalize_method_name(method) when is_atom(method), do: Atom.to_string(method)
|
||||
defp normalize_method_name(method) when is_binary(method), do: method
|
||||
defp normalize_method_name(method), do: inspect(method)
|
||||
end
|
||||
48
lib/parrhesia/api/auth.ex
Normal file
48
lib/parrhesia/api/auth.ex
Normal file
@@ -0,0 +1,48 @@
|
||||
defmodule Parrhesia.API.Auth do
|
||||
@moduledoc """
|
||||
Shared auth and event validation helpers.
|
||||
"""
|
||||
|
||||
alias Parrhesia.API.Auth.Context
|
||||
alias Parrhesia.API.RequestContext
|
||||
alias Parrhesia.Auth.Nip98
|
||||
alias Parrhesia.Protocol.EventValidator
|
||||
|
||||
@spec validate_event(map()) :: :ok | {:error, term()}
|
||||
def validate_event(event), do: EventValidator.validate(event)
|
||||
|
||||
@spec compute_event_id(map()) :: String.t()
|
||||
def compute_event_id(event), do: EventValidator.compute_id(event)
|
||||
|
||||
@spec validate_nip98(String.t() | nil, String.t(), String.t()) ::
|
||||
{:ok, Context.t()} | {:error, term()}
|
||||
def validate_nip98(authorization, method, url) do
|
||||
validate_nip98(authorization, method, url, [])
|
||||
end
|
||||
|
||||
@spec validate_nip98(String.t() | nil, String.t(), String.t(), keyword()) ::
|
||||
{:ok, Context.t()} | {:error, term()}
|
||||
def validate_nip98(authorization, method, url, opts)
|
||||
when is_binary(method) and is_binary(url) and is_list(opts) do
|
||||
with {:ok, auth_event} <-
|
||||
Nip98.validate_authorization_header(authorization, method, url, opts),
|
||||
pubkey when is_binary(pubkey) <- Map.get(auth_event, "pubkey") do
|
||||
{:ok,
|
||||
%Context{
|
||||
auth_event: auth_event,
|
||||
pubkey: pubkey,
|
||||
request_context: %RequestContext{
|
||||
authenticated_pubkeys: MapSet.new([pubkey]),
|
||||
caller: :http
|
||||
},
|
||||
metadata: %{
|
||||
method: method,
|
||||
url: url
|
||||
}
|
||||
}}
|
||||
else
|
||||
nil -> {:error, :invalid_event}
|
||||
{:error, reason} -> {:error, reason}
|
||||
end
|
||||
end
|
||||
end
|
||||
19
lib/parrhesia/api/auth/context.ex
Normal file
19
lib/parrhesia/api/auth/context.ex
Normal file
@@ -0,0 +1,19 @@
|
||||
defmodule Parrhesia.API.Auth.Context do
|
||||
@moduledoc """
|
||||
Authenticated request details returned by shared auth helpers.
|
||||
"""
|
||||
|
||||
alias Parrhesia.API.RequestContext
|
||||
|
||||
defstruct auth_event: nil,
|
||||
pubkey: nil,
|
||||
request_context: %RequestContext{},
|
||||
metadata: %{}
|
||||
|
||||
@type t :: %__MODULE__{
|
||||
auth_event: map() | nil,
|
||||
pubkey: String.t() | nil,
|
||||
request_context: RequestContext.t(),
|
||||
metadata: map()
|
||||
}
|
||||
end
|
||||
373
lib/parrhesia/api/events.ex
Normal file
373
lib/parrhesia/api/events.ex
Normal file
@@ -0,0 +1,373 @@
|
||||
defmodule Parrhesia.API.Events do
|
||||
@moduledoc """
|
||||
Canonical event publish, query, and count API.
|
||||
"""
|
||||
|
||||
alias Parrhesia.API.Events.PublishResult
|
||||
alias Parrhesia.API.RequestContext
|
||||
alias Parrhesia.Fanout.MultiNode
|
||||
alias Parrhesia.Groups.Flow
|
||||
alias Parrhesia.Policy.EventPolicy
|
||||
alias Parrhesia.Protocol
|
||||
alias Parrhesia.Protocol.Filter
|
||||
alias Parrhesia.Storage
|
||||
alias Parrhesia.Subscriptions.Index
|
||||
alias Parrhesia.Telemetry
|
||||
|
||||
@default_max_event_bytes 262_144
|
||||
|
||||
@marmot_kinds MapSet.new([
|
||||
443,
|
||||
444,
|
||||
445,
|
||||
1059,
|
||||
10_050,
|
||||
10_051,
|
||||
446,
|
||||
447,
|
||||
448,
|
||||
449
|
||||
])
|
||||
|
||||
@spec publish(map(), keyword()) :: {:ok, PublishResult.t()} | {:error, term()}
|
||||
def publish(event, opts \\ [])
|
||||
|
||||
def publish(event, opts) when is_map(event) and is_list(opts) do
|
||||
started_at = System.monotonic_time()
|
||||
event_id = Map.get(event, "id", "")
|
||||
|
||||
with {:ok, context} <- fetch_context(opts),
|
||||
:ok <- validate_event_payload_size(event, max_event_bytes(opts)),
|
||||
:ok <- Protocol.validate_event(event),
|
||||
:ok <- EventPolicy.authorize_write(event, context.authenticated_pubkeys, context),
|
||||
:ok <- maybe_process_group_event(event),
|
||||
{:ok, _stored, message} <- persist_event(event) do
|
||||
Telemetry.emit(
|
||||
[:parrhesia, :ingest, :stop],
|
||||
%{duration: System.monotonic_time() - started_at},
|
||||
telemetry_metadata_for_event(event)
|
||||
)
|
||||
|
||||
fanout_event(event)
|
||||
maybe_publish_multi_node(event)
|
||||
|
||||
{:ok,
|
||||
%PublishResult{
|
||||
event_id: event_id,
|
||||
accepted: true,
|
||||
message: message,
|
||||
reason: nil
|
||||
}}
|
||||
else
|
||||
{:error, :invalid_context} = error ->
|
||||
error
|
||||
|
||||
{:error, reason} ->
|
||||
{:ok,
|
||||
%PublishResult{
|
||||
event_id: event_id,
|
||||
accepted: false,
|
||||
message: error_message_for_publish_failure(reason),
|
||||
reason: reason
|
||||
}}
|
||||
end
|
||||
end
|
||||
|
||||
def publish(_event, _opts), do: {:error, :invalid_event}
|
||||
|
||||
@spec query([map()], keyword()) :: {:ok, [map()]} | {:error, term()}
|
||||
def query(filters, opts \\ [])
|
||||
|
||||
def query(filters, opts) when is_list(filters) and is_list(opts) do
|
||||
started_at = System.monotonic_time()
|
||||
|
||||
with {:ok, context} <- fetch_context(opts),
|
||||
:ok <- maybe_validate_filters(filters, opts),
|
||||
:ok <- maybe_authorize_read(filters, context, opts),
|
||||
{:ok, events} <- Storage.events().query(%{}, filters, storage_query_opts(context, opts)) do
|
||||
Telemetry.emit(
|
||||
[:parrhesia, :query, :stop],
|
||||
%{duration: System.monotonic_time() - started_at},
|
||||
telemetry_metadata_for_filters(filters)
|
||||
)
|
||||
|
||||
{:ok, events}
|
||||
end
|
||||
end
|
||||
|
||||
def query(_filters, _opts), do: {:error, :invalid_filters}
|
||||
|
||||
@spec count([map()], keyword()) :: {:ok, non_neg_integer() | map()} | {:error, term()}
|
||||
def count(filters, opts \\ [])
|
||||
|
||||
def count(filters, opts) when is_list(filters) and is_list(opts) do
|
||||
started_at = System.monotonic_time()
|
||||
|
||||
with {:ok, context} <- fetch_context(opts),
|
||||
:ok <- maybe_validate_filters(filters, opts),
|
||||
:ok <- maybe_authorize_read(filters, context, opts),
|
||||
{:ok, count} <-
|
||||
Storage.events().count(%{}, filters, requester_pubkeys: requester_pubkeys(context)),
|
||||
{:ok, result} <- maybe_build_count_result(filters, count, Keyword.get(opts, :options)) do
|
||||
Telemetry.emit(
|
||||
[:parrhesia, :query, :stop],
|
||||
%{duration: System.monotonic_time() - started_at},
|
||||
telemetry_metadata_for_filters(filters)
|
||||
)
|
||||
|
||||
{:ok, result}
|
||||
end
|
||||
end
|
||||
|
||||
def count(_filters, _opts), do: {:error, :invalid_filters}
|
||||
|
||||
defp maybe_validate_filters(filters, opts) do
|
||||
if Keyword.get(opts, :validate_filters?, true) do
|
||||
Filter.validate_filters(filters)
|
||||
else
|
||||
:ok
|
||||
end
|
||||
end
|
||||
|
||||
defp maybe_authorize_read(filters, context, opts) do
|
||||
if Keyword.get(opts, :authorize_read?, true) do
|
||||
EventPolicy.authorize_read(filters, context.authenticated_pubkeys, context)
|
||||
else
|
||||
:ok
|
||||
end
|
||||
end
|
||||
|
||||
defp storage_query_opts(context, opts) do
|
||||
[
|
||||
max_filter_limit:
|
||||
Keyword.get(opts, :max_filter_limit, Parrhesia.Config.get([:limits, :max_filter_limit])),
|
||||
requester_pubkeys: requester_pubkeys(context)
|
||||
]
|
||||
end
|
||||
|
||||
defp requester_pubkeys(%RequestContext{} = context),
|
||||
do: MapSet.to_list(context.authenticated_pubkeys)
|
||||
|
||||
defp maybe_build_count_result(_filters, count, nil) when is_integer(count), do: {:ok, count}
|
||||
|
||||
defp maybe_build_count_result(filters, count, options)
|
||||
when is_integer(count) and is_map(options) do
|
||||
build_count_payload(filters, count, options)
|
||||
end
|
||||
|
||||
defp maybe_build_count_result(_filters, count, _options) when is_integer(count),
|
||||
do: {:ok, count}
|
||||
|
||||
defp maybe_build_count_result(_filters, count, _options), do: {:ok, count}
|
||||
|
||||
defp build_count_payload(filters, count, options) do
|
||||
include_hll? =
|
||||
Map.get(options, "hll", false) and Parrhesia.Config.get([:features, :nip_45_count], true)
|
||||
|
||||
payload = %{"count" => count, "approximate" => false}
|
||||
|
||||
payload =
|
||||
if include_hll? do
|
||||
Map.put(payload, "hll", generate_hll_payload(filters, count))
|
||||
else
|
||||
payload
|
||||
end
|
||||
|
||||
{:ok, payload}
|
||||
end
|
||||
|
||||
defp generate_hll_payload(filters, count) do
|
||||
filters
|
||||
|> JSON.encode!()
|
||||
|> then(&"#{&1}:#{count}")
|
||||
|> then(&:crypto.hash(:sha256, &1))
|
||||
|> Base.encode64()
|
||||
end
|
||||
|
||||
defp maybe_process_group_event(event) do
|
||||
if Flow.group_related_kind?(Map.get(event, "kind")) do
|
||||
Flow.handle_event(event)
|
||||
else
|
||||
:ok
|
||||
end
|
||||
end
|
||||
|
||||
defp persist_event(event) do
|
||||
kind = Map.get(event, "kind")
|
||||
|
||||
cond do
|
||||
kind in [5, 62] -> persist_control_event(kind, event)
|
||||
ephemeral_kind?(kind) -> persist_ephemeral_event()
|
||||
true -> persist_regular_event(event)
|
||||
end
|
||||
end
|
||||
|
||||
defp persist_control_event(5, event) do
|
||||
with {:ok, deleted_count} <- Storage.events().delete_by_request(%{}, event) do
|
||||
{:ok, deleted_count, "ok: deletion request processed"}
|
||||
end
|
||||
end
|
||||
|
||||
defp persist_control_event(62, event) do
|
||||
with {:ok, deleted_count} <- Storage.events().vanish(%{}, event) do
|
||||
{:ok, deleted_count, "ok: vanish request processed"}
|
||||
end
|
||||
end
|
||||
|
||||
defp persist_ephemeral_event do
|
||||
if accept_ephemeral_events?() do
|
||||
{:ok, :ephemeral, "ok: ephemeral event accepted"}
|
||||
else
|
||||
{:error, :ephemeral_events_disabled}
|
||||
end
|
||||
end
|
||||
|
||||
defp persist_regular_event(event) do
|
||||
case Storage.events().put_event(%{}, event) do
|
||||
{:ok, persisted_event} -> {:ok, persisted_event, "ok: event stored"}
|
||||
{:error, :duplicate_event} -> {:error, :duplicate_event}
|
||||
{:error, reason} -> {:error, reason}
|
||||
end
|
||||
end
|
||||
|
||||
defp fanout_event(event) do
|
||||
case Index.candidate_subscription_keys(event) do
|
||||
candidates when is_list(candidates) ->
|
||||
Enum.each(candidates, fn {owner_pid, subscription_id} ->
|
||||
send(owner_pid, {:fanout_event, subscription_id, event})
|
||||
end)
|
||||
|
||||
_other ->
|
||||
:ok
|
||||
end
|
||||
catch
|
||||
:exit, _reason -> :ok
|
||||
end
|
||||
|
||||
defp maybe_publish_multi_node(event) do
|
||||
MultiNode.publish(event)
|
||||
:ok
|
||||
catch
|
||||
:exit, _reason -> :ok
|
||||
end
|
||||
|
||||
defp telemetry_metadata_for_event(event) do
|
||||
%{traffic_class: traffic_class_for_event(event)}
|
||||
end
|
||||
|
||||
defp telemetry_metadata_for_filters(filters) do
|
||||
%{traffic_class: traffic_class_for_filters(filters)}
|
||||
end
|
||||
|
||||
defp traffic_class_for_filters(filters) do
|
||||
if Enum.any?(filters, &marmot_filter?/1) do
|
||||
:marmot
|
||||
else
|
||||
:generic
|
||||
end
|
||||
end
|
||||
|
||||
defp marmot_filter?(filter) when is_map(filter) do
|
||||
has_marmot_kind? =
|
||||
case Map.get(filter, "kinds") do
|
||||
kinds when is_list(kinds) -> Enum.any?(kinds, &MapSet.member?(@marmot_kinds, &1))
|
||||
_other -> false
|
||||
end
|
||||
|
||||
has_marmot_kind? or Map.has_key?(filter, "#h") or Map.has_key?(filter, "#i")
|
||||
end
|
||||
|
||||
defp marmot_filter?(_filter), do: false
|
||||
|
||||
defp traffic_class_for_event(event) when is_map(event) do
|
||||
if MapSet.member?(@marmot_kinds, Map.get(event, "kind")) do
|
||||
:marmot
|
||||
else
|
||||
:generic
|
||||
end
|
||||
end
|
||||
|
||||
defp traffic_class_for_event(_event), do: :generic
|
||||
|
||||
defp fetch_context(opts) do
|
||||
case Keyword.get(opts, :context) do
|
||||
%RequestContext{} = context -> {:ok, context}
|
||||
_other -> {:error, :invalid_context}
|
||||
end
|
||||
end
|
||||
|
||||
defp error_message_for_publish_failure(:duplicate_event),
|
||||
do: "duplicate: event already stored"
|
||||
|
||||
defp error_message_for_publish_failure(:event_too_large),
|
||||
do: "invalid: event exceeds max event size"
|
||||
|
||||
defp error_message_for_publish_failure(:ephemeral_events_disabled),
|
||||
do: "blocked: ephemeral events are disabled"
|
||||
|
||||
defp error_message_for_publish_failure(reason)
|
||||
when reason in [
|
||||
:auth_required,
|
||||
:pubkey_not_allowed,
|
||||
:restricted_giftwrap,
|
||||
:sync_write_not_allowed,
|
||||
:protected_event_requires_auth,
|
||||
:protected_event_pubkey_mismatch,
|
||||
:pow_below_minimum,
|
||||
:pubkey_banned,
|
||||
:event_banned,
|
||||
:media_metadata_tags_exceeded,
|
||||
:media_metadata_tag_value_too_large,
|
||||
:media_metadata_url_too_long,
|
||||
:media_metadata_invalid_url,
|
||||
:media_metadata_invalid_hash,
|
||||
:media_metadata_invalid_mime,
|
||||
:media_metadata_mime_not_allowed,
|
||||
:media_metadata_unsupported_version,
|
||||
:push_notification_relay_tags_exceeded,
|
||||
:push_notification_payload_too_large,
|
||||
:push_notification_replay_window_exceeded,
|
||||
:push_notification_missing_expiration,
|
||||
:push_notification_expiration_too_far,
|
||||
:push_notification_server_recipients_exceeded
|
||||
],
|
||||
do: EventPolicy.error_message(reason)
|
||||
|
||||
defp error_message_for_publish_failure(reason) when is_binary(reason), do: reason
|
||||
defp error_message_for_publish_failure(reason), do: "error: #{inspect(reason)}"
|
||||
|
||||
defp validate_event_payload_size(event, max_event_bytes)
|
||||
when is_map(event) and is_integer(max_event_bytes) and max_event_bytes > 0 do
|
||||
if byte_size(JSON.encode!(event)) <= max_event_bytes do
|
||||
:ok
|
||||
else
|
||||
{:error, :event_too_large}
|
||||
end
|
||||
end
|
||||
|
||||
defp validate_event_payload_size(_event, _max_event_bytes), do: :ok
|
||||
|
||||
defp max_event_bytes(opts) do
|
||||
opts
|
||||
|> Keyword.get(:max_event_bytes, configured_max_event_bytes())
|
||||
|> normalize_max_event_bytes()
|
||||
end
|
||||
|
||||
defp normalize_max_event_bytes(value) when is_integer(value) and value > 0, do: value
|
||||
defp normalize_max_event_bytes(_value), do: configured_max_event_bytes()
|
||||
|
||||
defp configured_max_event_bytes do
|
||||
:parrhesia
|
||||
|> Application.get_env(:limits, [])
|
||||
|> Keyword.get(:max_event_bytes, @default_max_event_bytes)
|
||||
end
|
||||
|
||||
defp ephemeral_kind?(kind) when is_integer(kind), do: kind >= 20_000 and kind < 30_000
|
||||
defp ephemeral_kind?(_kind), do: false
|
||||
|
||||
defp accept_ephemeral_events? do
|
||||
:parrhesia
|
||||
|> Application.get_env(:policies, [])
|
||||
|> Keyword.get(:accept_ephemeral_events, true)
|
||||
end
|
||||
end
|
||||
14
lib/parrhesia/api/events/publish_result.ex
Normal file
14
lib/parrhesia/api/events/publish_result.ex
Normal file
@@ -0,0 +1,14 @@
|
||||
defmodule Parrhesia.API.Events.PublishResult do
|
||||
@moduledoc """
|
||||
Result shape for event publish attempts.
|
||||
"""
|
||||
|
||||
defstruct [:event_id, :accepted, :message, :reason]
|
||||
|
||||
@type t :: %__MODULE__{
|
||||
event_id: String.t(),
|
||||
accepted: boolean(),
|
||||
message: String.t(),
|
||||
reason: term()
|
||||
}
|
||||
end
|
||||
243
lib/parrhesia/api/identity.ex
Normal file
243
lib/parrhesia/api/identity.ex
Normal file
@@ -0,0 +1,243 @@
|
||||
defmodule Parrhesia.API.Identity do
|
||||
@moduledoc """
|
||||
Server-auth identity management.
|
||||
"""
|
||||
|
||||
alias Parrhesia.API.Auth
|
||||
|
||||
@type identity_metadata :: %{
|
||||
pubkey: String.t(),
|
||||
source: :configured | :persisted | :generated | :imported
|
||||
}
|
||||
|
||||
@spec get(keyword()) :: {:ok, identity_metadata()} | {:error, term()}
|
||||
def get(opts \\ []) do
|
||||
with {:ok, identity} <- fetch_existing_identity(opts) do
|
||||
{:ok, public_identity(identity)}
|
||||
end
|
||||
end
|
||||
|
||||
@spec ensure(keyword()) :: {:ok, identity_metadata()} | {:error, term()}
|
||||
def ensure(opts \\ []) do
|
||||
with {:ok, identity} <- ensure_identity(opts) do
|
||||
{:ok, public_identity(identity)}
|
||||
end
|
||||
end
|
||||
|
||||
@spec import(map(), keyword()) :: {:ok, identity_metadata()} | {:error, term()}
|
||||
def import(identity, opts \\ [])
|
||||
|
||||
def import(identity, opts) when is_map(identity) do
|
||||
with {:ok, secret_key} <- fetch_secret_key(identity),
|
||||
{:ok, normalized_identity} <- build_identity(secret_key, :imported),
|
||||
:ok <- persist_identity(normalized_identity, opts) do
|
||||
{:ok, public_identity(normalized_identity)}
|
||||
end
|
||||
end
|
||||
|
||||
def import(_identity, _opts), do: {:error, :invalid_identity}
|
||||
|
||||
@spec rotate(keyword()) :: {:ok, identity_metadata()} | {:error, term()}
|
||||
def rotate(opts \\ []) do
|
||||
with :ok <- ensure_rotation_allowed(opts),
|
||||
{:ok, identity} <- generate_identity(:generated),
|
||||
:ok <- persist_identity(identity, opts) do
|
||||
{:ok, public_identity(identity)}
|
||||
end
|
||||
end
|
||||
|
||||
@spec sign_event(map(), keyword()) :: {:ok, map()} | {:error, term()}
|
||||
def sign_event(event, opts \\ [])
|
||||
|
||||
def sign_event(event, opts) when is_map(event) and is_list(opts) do
|
||||
with :ok <- validate_signable_event(event),
|
||||
{:ok, identity} <- ensure_identity(opts),
|
||||
signed_event <- attach_signature(event, identity) do
|
||||
{:ok, signed_event}
|
||||
end
|
||||
end
|
||||
|
||||
def sign_event(_event, _opts), do: {:error, :invalid_event}
|
||||
|
||||
def default_path do
|
||||
Path.join([default_data_dir(), "server_identity.json"])
|
||||
end
|
||||
|
||||
defp ensure_identity(opts) do
|
||||
case fetch_existing_identity(opts) do
|
||||
{:ok, identity} ->
|
||||
{:ok, identity}
|
||||
|
||||
{:error, :identity_not_found} ->
|
||||
with {:ok, identity} <- generate_identity(:generated),
|
||||
:ok <- persist_identity(identity, opts) do
|
||||
{:ok, identity}
|
||||
end
|
||||
|
||||
{:error, reason} ->
|
||||
{:error, reason}
|
||||
end
|
||||
end
|
||||
|
||||
defp fetch_existing_identity(opts) do
|
||||
if configured_private_key = configured_private_key(opts) do
|
||||
build_identity(configured_private_key, :configured)
|
||||
else
|
||||
read_persisted_identity(opts)
|
||||
end
|
||||
end
|
||||
|
||||
defp ensure_rotation_allowed(opts) do
|
||||
if configured_private_key(opts) do
|
||||
{:error, :configured_identity_cannot_rotate}
|
||||
else
|
||||
:ok
|
||||
end
|
||||
end
|
||||
|
||||
defp validate_signable_event(event) do
|
||||
signable =
|
||||
is_integer(Map.get(event, "created_at")) and
|
||||
is_integer(Map.get(event, "kind")) and
|
||||
is_list(Map.get(event, "tags")) and
|
||||
is_binary(Map.get(event, "content", ""))
|
||||
|
||||
if signable, do: :ok, else: {:error, :invalid_event}
|
||||
end
|
||||
|
||||
defp attach_signature(event, identity) do
|
||||
unsigned_event =
|
||||
event
|
||||
|> Map.put("pubkey", identity.pubkey)
|
||||
|> Map.put("sig", String.duplicate("0", 128))
|
||||
|
||||
event_id =
|
||||
unsigned_event
|
||||
|> Auth.compute_event_id()
|
||||
|
||||
signature =
|
||||
event_id
|
||||
|> Base.decode16!(case: :lower)
|
||||
|> Secp256k1.schnorr_sign(identity.secret_key)
|
||||
|> Base.encode16(case: :lower)
|
||||
|
||||
unsigned_event
|
||||
|> Map.put("id", event_id)
|
||||
|> Map.put("sig", signature)
|
||||
end
|
||||
|
||||
defp read_persisted_identity(opts) do
|
||||
path = identity_path(opts)
|
||||
|
||||
case File.read(path) do
|
||||
{:ok, payload} ->
|
||||
with {:ok, decoded} <- JSON.decode(payload),
|
||||
{:ok, secret_key} <- fetch_secret_key(decoded),
|
||||
{:ok, identity} <- build_identity(secret_key, :persisted) do
|
||||
{:ok, identity}
|
||||
else
|
||||
{:error, reason} -> {:error, reason}
|
||||
end
|
||||
|
||||
{:error, :enoent} ->
|
||||
{:error, :identity_not_found}
|
||||
|
||||
{:error, reason} ->
|
||||
{:error, reason}
|
||||
end
|
||||
end
|
||||
|
||||
defp persist_identity(identity, opts) do
|
||||
path = identity_path(opts)
|
||||
temp_path = path <> ".tmp"
|
||||
|
||||
with :ok <- File.mkdir_p(Path.dirname(path)),
|
||||
:ok <- File.write(temp_path, JSON.encode!(persisted_identity(identity))),
|
||||
:ok <- File.rename(temp_path, path) do
|
||||
:ok
|
||||
else
|
||||
{:error, reason} ->
|
||||
_ = File.rm(temp_path)
|
||||
{:error, reason}
|
||||
end
|
||||
end
|
||||
|
||||
defp persisted_identity(identity) do
|
||||
%{
|
||||
"secret_key" => Base.encode16(identity.secret_key, case: :lower),
|
||||
"pubkey" => identity.pubkey
|
||||
}
|
||||
end
|
||||
|
||||
defp generate_identity(source) do
|
||||
{secret_key, pubkey} = Secp256k1.keypair(:xonly)
|
||||
|
||||
{:ok,
|
||||
%{
|
||||
secret_key: secret_key,
|
||||
pubkey: Base.encode16(pubkey, case: :lower),
|
||||
source: source
|
||||
}}
|
||||
rescue
|
||||
_error -> {:error, :identity_generation_failed}
|
||||
end
|
||||
|
||||
defp build_identity(secret_key_hex, source) when is_binary(secret_key_hex) do
|
||||
with {:ok, secret_key} <- decode_secret_key(secret_key_hex),
|
||||
pubkey <- Secp256k1.pubkey(secret_key, :xonly) do
|
||||
{:ok,
|
||||
%{
|
||||
secret_key: secret_key,
|
||||
pubkey: Base.encode16(pubkey, case: :lower),
|
||||
source: source
|
||||
}}
|
||||
end
|
||||
rescue
|
||||
_error -> {:error, :invalid_secret_key}
|
||||
end
|
||||
|
||||
defp decode_secret_key(secret_key_hex) when is_binary(secret_key_hex) do
|
||||
normalized = String.downcase(secret_key_hex)
|
||||
|
||||
case Base.decode16(normalized, case: :lower) do
|
||||
{:ok, <<_::256>> = secret_key} -> {:ok, secret_key}
|
||||
_other -> {:error, :invalid_secret_key}
|
||||
end
|
||||
end
|
||||
|
||||
defp fetch_secret_key(identity) when is_map(identity) do
|
||||
case Map.get(identity, :secret_key) || Map.get(identity, "secret_key") do
|
||||
secret_key when is_binary(secret_key) -> {:ok, secret_key}
|
||||
_other -> {:error, :invalid_identity}
|
||||
end
|
||||
end
|
||||
|
||||
defp configured_private_key(opts) do
|
||||
opts[:private_key] || opts[:configured_private_key] || config_value(:private_key)
|
||||
end
|
||||
|
||||
defp identity_path(opts) do
|
||||
opts[:path] || config_value(:path) || default_path()
|
||||
end
|
||||
|
||||
defp public_identity(identity) do
|
||||
%{
|
||||
pubkey: identity.pubkey,
|
||||
source: identity.source
|
||||
}
|
||||
end
|
||||
|
||||
defp config_value(key) do
|
||||
:parrhesia
|
||||
|> Application.get_env(:identity, [])
|
||||
|> Keyword.get(key)
|
||||
end
|
||||
|
||||
defp default_data_dir do
|
||||
base_dir =
|
||||
System.get_env("XDG_DATA_HOME") ||
|
||||
Path.join(System.user_home!(), ".local/share")
|
||||
|
||||
Path.join(base_dir, "parrhesia")
|
||||
end
|
||||
end
|
||||
25
lib/parrhesia/api/identity/manager.ex
Normal file
25
lib/parrhesia/api/identity/manager.ex
Normal file
@@ -0,0 +1,25 @@
|
||||
defmodule Parrhesia.API.Identity.Manager do
|
||||
@moduledoc false
|
||||
|
||||
use GenServer
|
||||
|
||||
alias Parrhesia.API.Identity
|
||||
|
||||
require Logger
|
||||
|
||||
def start_link(opts \\ []) do
|
||||
GenServer.start_link(__MODULE__, opts, name: __MODULE__)
|
||||
end
|
||||
|
||||
@impl true
|
||||
def init(_opts) do
|
||||
case Identity.ensure() do
|
||||
{:ok, _identity} ->
|
||||
{:ok, %{}}
|
||||
|
||||
{:error, reason} ->
|
||||
Logger.error("failed to ensure server identity: #{inspect(reason)}")
|
||||
{:ok, %{}}
|
||||
end
|
||||
end
|
||||
end
|
||||
30
lib/parrhesia/api/request_context.ex
Normal file
30
lib/parrhesia/api/request_context.ex
Normal file
@@ -0,0 +1,30 @@
|
||||
defmodule Parrhesia.API.RequestContext do
|
||||
@moduledoc """
|
||||
Shared request context used across API and policy surfaces.
|
||||
"""
|
||||
|
||||
defstruct authenticated_pubkeys: MapSet.new(),
|
||||
actor: nil,
|
||||
caller: :local,
|
||||
remote_ip: nil,
|
||||
subscription_id: nil,
|
||||
peer_id: nil,
|
||||
transport_identity: nil,
|
||||
metadata: %{}
|
||||
|
||||
@type t :: %__MODULE__{
|
||||
authenticated_pubkeys: MapSet.t(String.t()),
|
||||
actor: term(),
|
||||
caller: atom(),
|
||||
remote_ip: String.t() | nil,
|
||||
subscription_id: String.t() | nil,
|
||||
peer_id: String.t() | nil,
|
||||
transport_identity: map() | nil,
|
||||
metadata: map()
|
||||
}
|
||||
|
||||
@spec put_metadata(t(), map()) :: t()
|
||||
def put_metadata(%__MODULE__{} = context, metadata) when is_map(metadata) do
|
||||
%__MODULE__{context | metadata: Map.merge(context.metadata, metadata)}
|
||||
end
|
||||
end
|
||||
97
lib/parrhesia/api/stream.ex
Normal file
97
lib/parrhesia/api/stream.ex
Normal file
@@ -0,0 +1,97 @@
|
||||
defmodule Parrhesia.API.Stream do
|
||||
@moduledoc """
|
||||
In-process subscription API with relay-equivalent catch-up and live fanout semantics.
|
||||
"""
|
||||
|
||||
alias Parrhesia.API.Events
|
||||
alias Parrhesia.API.RequestContext
|
||||
alias Parrhesia.API.Stream.Subscription
|
||||
alias Parrhesia.Policy.EventPolicy
|
||||
alias Parrhesia.Protocol.Filter
|
||||
|
||||
@spec subscribe(pid(), String.t(), [map()], keyword()) :: {:ok, reference()} | {:error, term()}
|
||||
def subscribe(subscriber, subscription_id, filters, opts \\ [])
|
||||
|
||||
def subscribe(subscriber, subscription_id, filters, opts)
|
||||
when is_pid(subscriber) and is_binary(subscription_id) and is_list(filters) and
|
||||
is_list(opts) do
|
||||
with {:ok, context} <- fetch_context(opts),
|
||||
:ok <- Filter.validate_filters(filters),
|
||||
:ok <-
|
||||
EventPolicy.authorize_read(
|
||||
filters,
|
||||
context.authenticated_pubkeys,
|
||||
stream_context(context, subscription_id)
|
||||
) do
|
||||
ref = make_ref()
|
||||
|
||||
case DynamicSupervisor.start_child(
|
||||
Parrhesia.API.Stream.Supervisor,
|
||||
{Subscription,
|
||||
ref: ref, subscriber: subscriber, subscription_id: subscription_id, filters: filters}
|
||||
) do
|
||||
{:ok, pid} ->
|
||||
finalize_subscription(pid, ref, filters, stream_context(context, subscription_id))
|
||||
|
||||
{:error, reason} ->
|
||||
{:error, reason}
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
def subscribe(_subscriber, _subscription_id, _filters, _opts),
|
||||
do: {:error, :invalid_subscription}
|
||||
|
||||
@spec unsubscribe(reference()) :: :ok
|
||||
def unsubscribe(ref) when is_reference(ref) do
|
||||
case Registry.lookup(Parrhesia.API.Stream.Registry, ref) do
|
||||
[{pid, _value}] ->
|
||||
try do
|
||||
:ok = GenServer.stop(pid, :normal)
|
||||
catch
|
||||
:exit, _reason -> :ok
|
||||
end
|
||||
|
||||
:ok
|
||||
|
||||
[] ->
|
||||
:ok
|
||||
end
|
||||
end
|
||||
|
||||
def unsubscribe(_ref), do: :ok
|
||||
|
||||
defp fetch_context(opts) do
|
||||
case Keyword.get(opts, :context) do
|
||||
%RequestContext{} = context -> {:ok, context}
|
||||
_other -> {:error, :invalid_context}
|
||||
end
|
||||
end
|
||||
|
||||
defp finalize_subscription(pid, ref, filters, context) do
|
||||
with {:ok, initial_events} <-
|
||||
Events.query(filters,
|
||||
context: context,
|
||||
validate_filters?: false,
|
||||
authorize_read?: false
|
||||
),
|
||||
:ok <- Subscription.deliver_initial(pid, initial_events) do
|
||||
{:ok, ref}
|
||||
else
|
||||
{:error, reason} ->
|
||||
_ = safe_stop_subscription(pid)
|
||||
{:error, reason}
|
||||
end
|
||||
end
|
||||
|
||||
defp safe_stop_subscription(pid) do
|
||||
GenServer.stop(pid, :shutdown)
|
||||
:ok
|
||||
catch
|
||||
:exit, _reason -> :ok
|
||||
end
|
||||
|
||||
defp stream_context(%RequestContext{} = context, subscription_id) do
|
||||
%RequestContext{context | subscription_id: subscription_id}
|
||||
end
|
||||
end
|
||||
178
lib/parrhesia/api/stream/subscription.ex
Normal file
178
lib/parrhesia/api/stream/subscription.ex
Normal file
@@ -0,0 +1,178 @@
|
||||
defmodule Parrhesia.API.Stream.Subscription do
|
||||
@moduledoc false
|
||||
|
||||
use GenServer
|
||||
|
||||
alias Parrhesia.Protocol.Filter
|
||||
alias Parrhesia.Subscriptions.Index
|
||||
|
||||
defstruct [
|
||||
:ref,
|
||||
:subscriber,
|
||||
:subscriber_monitor_ref,
|
||||
:subscription_id,
|
||||
:filters,
|
||||
ready?: false,
|
||||
buffered_events: []
|
||||
]
|
||||
|
||||
@type t :: %__MODULE__{
|
||||
ref: reference(),
|
||||
subscriber: pid(),
|
||||
subscriber_monitor_ref: reference(),
|
||||
subscription_id: String.t(),
|
||||
filters: [map()],
|
||||
ready?: boolean(),
|
||||
buffered_events: [map()]
|
||||
}
|
||||
|
||||
@spec start_link(keyword()) :: GenServer.on_start()
|
||||
def start_link(opts) when is_list(opts) do
|
||||
ref = Keyword.fetch!(opts, :ref)
|
||||
|
||||
GenServer.start_link(__MODULE__, opts, name: via_tuple(ref))
|
||||
end
|
||||
|
||||
@spec deliver_initial(GenServer.server(), [map()]) :: :ok | {:error, term()}
|
||||
def deliver_initial(server, initial_events) when is_list(initial_events) do
|
||||
GenServer.call(server, {:deliver_initial, initial_events})
|
||||
end
|
||||
|
||||
@impl true
|
||||
def init(opts) do
|
||||
with {:ok, subscriber} <- fetch_subscriber(opts),
|
||||
{:ok, subscription_id} <- fetch_subscription_id(opts),
|
||||
{:ok, filters} <- fetch_filters(opts),
|
||||
:ok <-
|
||||
maybe_upsert_index_subscription(subscription_index(opts), subscription_id, filters) do
|
||||
monitor_ref = Process.monitor(subscriber)
|
||||
|
||||
state = %__MODULE__{
|
||||
ref: Keyword.fetch!(opts, :ref),
|
||||
subscriber: subscriber,
|
||||
subscriber_monitor_ref: monitor_ref,
|
||||
subscription_id: subscription_id,
|
||||
filters: filters,
|
||||
ready?: false,
|
||||
buffered_events: []
|
||||
}
|
||||
|
||||
{:ok, state}
|
||||
else
|
||||
{:error, reason} -> {:stop, reason}
|
||||
end
|
||||
end
|
||||
|
||||
@impl true
|
||||
def handle_call({:deliver_initial, initial_events}, _from, %__MODULE__{} = state) do
|
||||
send_initial_events(state, initial_events)
|
||||
|
||||
Enum.each(Enum.reverse(state.buffered_events), fn event ->
|
||||
send(state.subscriber, {:parrhesia, :event, state.ref, state.subscription_id, event})
|
||||
end)
|
||||
|
||||
{:reply, :ok, %__MODULE__{state | ready?: true, buffered_events: []}}
|
||||
end
|
||||
|
||||
@impl true
|
||||
def handle_info({:fanout_event, subscription_id, event}, %__MODULE__{} = state)
|
||||
when is_binary(subscription_id) and is_map(event) do
|
||||
handle_fanout_event(state, subscription_id, event)
|
||||
end
|
||||
|
||||
def handle_info({:DOWN, monitor_ref, :process, subscriber, _reason}, %__MODULE__{} = state)
|
||||
when monitor_ref == state.subscriber_monitor_ref and subscriber == state.subscriber do
|
||||
{:stop, :normal, state}
|
||||
end
|
||||
|
||||
def handle_info(_message, %__MODULE__{} = state), do: {:noreply, state}
|
||||
|
||||
@impl true
|
||||
def terminate(reason, %__MODULE__{} = state) do
|
||||
:ok = maybe_remove_index_subscription(state.subscription_id)
|
||||
|
||||
if reason not in [:normal, :shutdown] do
|
||||
send(state.subscriber, {:parrhesia, :closed, state.ref, state.subscription_id, reason})
|
||||
end
|
||||
|
||||
:ok
|
||||
end
|
||||
|
||||
defp send_initial_events(state, events) do
|
||||
Enum.each(events, fn event ->
|
||||
send(state.subscriber, {:parrhesia, :event, state.ref, state.subscription_id, event})
|
||||
end)
|
||||
|
||||
send(state.subscriber, {:parrhesia, :eose, state.ref, state.subscription_id})
|
||||
end
|
||||
|
||||
defp via_tuple(ref), do: {:via, Registry, {Parrhesia.API.Stream.Registry, ref}}
|
||||
|
||||
defp fetch_subscriber(opts) do
|
||||
case Keyword.get(opts, :subscriber) do
|
||||
subscriber when is_pid(subscriber) -> {:ok, subscriber}
|
||||
_other -> {:error, :invalid_subscriber}
|
||||
end
|
||||
end
|
||||
|
||||
defp fetch_subscription_id(opts) do
|
||||
case Keyword.get(opts, :subscription_id) do
|
||||
subscription_id when is_binary(subscription_id) -> {:ok, subscription_id}
|
||||
_other -> {:error, :invalid_subscription_id}
|
||||
end
|
||||
end
|
||||
|
||||
defp fetch_filters(opts) do
|
||||
case Keyword.get(opts, :filters) do
|
||||
filters when is_list(filters) -> {:ok, filters}
|
||||
_other -> {:error, :invalid_filters}
|
||||
end
|
||||
end
|
||||
|
||||
defp subscription_index(opts) do
|
||||
case Keyword.get(opts, :subscription_index, Index) do
|
||||
subscription_index when is_pid(subscription_index) or is_atom(subscription_index) ->
|
||||
subscription_index
|
||||
|
||||
_other ->
|
||||
nil
|
||||
end
|
||||
end
|
||||
|
||||
defp maybe_upsert_index_subscription(nil, _subscription_id, _filters),
|
||||
do: {:error, :subscription_index_unavailable}
|
||||
|
||||
defp maybe_upsert_index_subscription(subscription_index, subscription_id, filters) do
|
||||
case Index.upsert(subscription_index, self(), subscription_id, filters) do
|
||||
:ok -> :ok
|
||||
{:error, reason} -> {:error, reason}
|
||||
end
|
||||
catch
|
||||
:exit, _reason -> {:error, :subscription_index_unavailable}
|
||||
end
|
||||
|
||||
defp maybe_remove_index_subscription(subscription_id) do
|
||||
:ok = Index.remove(Index, self(), subscription_id)
|
||||
:ok
|
||||
catch
|
||||
:exit, _reason -> :ok
|
||||
end
|
||||
|
||||
defp handle_fanout_event(%__MODULE__{} = state, subscription_id, event) do
|
||||
cond do
|
||||
subscription_id != state.subscription_id ->
|
||||
{:noreply, state}
|
||||
|
||||
not Filter.matches_any?(event, state.filters) ->
|
||||
{:noreply, state}
|
||||
|
||||
state.ready? ->
|
||||
send(state.subscriber, {:parrhesia, :event, state.ref, state.subscription_id, event})
|
||||
{:noreply, state}
|
||||
|
||||
true ->
|
||||
buffered_events = [event | state.buffered_events]
|
||||
{:noreply, %__MODULE__{state | buffered_events: buffered_events}}
|
||||
end
|
||||
end
|
||||
end
|
||||
103
lib/parrhesia/api/sync.ex
Normal file
103
lib/parrhesia/api/sync.ex
Normal file
@@ -0,0 +1,103 @@
|
||||
defmodule Parrhesia.API.Sync do
|
||||
@moduledoc """
|
||||
Sync server control-plane API.
|
||||
"""
|
||||
|
||||
alias Parrhesia.API.Sync.Manager
|
||||
|
||||
@type server :: map()
|
||||
|
||||
@spec put_server(map(), keyword()) :: {:ok, server()} | {:error, term()}
|
||||
def put_server(server, opts \\ [])
|
||||
|
||||
def put_server(server, opts) when is_map(server) and is_list(opts) do
|
||||
Manager.put_server(manager_name(opts), server)
|
||||
end
|
||||
|
||||
def put_server(_server, _opts), do: {:error, :invalid_server}
|
||||
|
||||
@spec remove_server(String.t(), keyword()) :: :ok | {:error, term()}
|
||||
def remove_server(server_id, opts \\ [])
|
||||
|
||||
def remove_server(server_id, opts) when is_binary(server_id) and is_list(opts) do
|
||||
Manager.remove_server(manager_name(opts), server_id)
|
||||
end
|
||||
|
||||
def remove_server(_server_id, _opts), do: {:error, :invalid_server_id}
|
||||
|
||||
@spec get_server(String.t(), keyword()) :: {:ok, server()} | :error | {:error, term()}
|
||||
def get_server(server_id, opts \\ [])
|
||||
|
||||
def get_server(server_id, opts) when is_binary(server_id) and is_list(opts) do
|
||||
Manager.get_server(manager_name(opts), server_id)
|
||||
end
|
||||
|
||||
def get_server(_server_id, _opts), do: {:error, :invalid_server_id}
|
||||
|
||||
@spec list_servers(keyword()) :: {:ok, [server()]} | {:error, term()}
|
||||
def list_servers(opts \\ []) when is_list(opts) do
|
||||
Manager.list_servers(manager_name(opts))
|
||||
end
|
||||
|
||||
@spec start_server(String.t(), keyword()) :: :ok | {:error, term()}
|
||||
def start_server(server_id, opts \\ [])
|
||||
|
||||
def start_server(server_id, opts) when is_binary(server_id) and is_list(opts) do
|
||||
Manager.start_server(manager_name(opts), server_id)
|
||||
end
|
||||
|
||||
def start_server(_server_id, _opts), do: {:error, :invalid_server_id}
|
||||
|
||||
@spec stop_server(String.t(), keyword()) :: :ok | {:error, term()}
|
||||
def stop_server(server_id, opts \\ [])
|
||||
|
||||
def stop_server(server_id, opts) when is_binary(server_id) and is_list(opts) do
|
||||
Manager.stop_server(manager_name(opts), server_id)
|
||||
end
|
||||
|
||||
def stop_server(_server_id, _opts), do: {:error, :invalid_server_id}
|
||||
|
||||
@spec sync_now(String.t(), keyword()) :: :ok | {:error, term()}
|
||||
def sync_now(server_id, opts \\ [])
|
||||
|
||||
def sync_now(server_id, opts) when is_binary(server_id) and is_list(opts) do
|
||||
Manager.sync_now(manager_name(opts), server_id)
|
||||
end
|
||||
|
||||
def sync_now(_server_id, _opts), do: {:error, :invalid_server_id}
|
||||
|
||||
@spec server_stats(String.t(), keyword()) :: {:ok, map()} | :error | {:error, term()}
|
||||
def server_stats(server_id, opts \\ [])
|
||||
|
||||
def server_stats(server_id, opts) when is_binary(server_id) and is_list(opts) do
|
||||
Manager.server_stats(manager_name(opts), server_id)
|
||||
end
|
||||
|
||||
def server_stats(_server_id, _opts), do: {:error, :invalid_server_id}
|
||||
|
||||
@spec sync_stats(keyword()) :: {:ok, map()} | {:error, term()}
|
||||
def sync_stats(opts \\ []) when is_list(opts) do
|
||||
Manager.sync_stats(manager_name(opts))
|
||||
end
|
||||
|
||||
@spec sync_health(keyword()) :: {:ok, map()} | {:error, term()}
|
||||
def sync_health(opts \\ []) when is_list(opts) do
|
||||
Manager.sync_health(manager_name(opts))
|
||||
end
|
||||
|
||||
def default_path do
|
||||
Path.join([default_data_dir(), "sync_servers.json"])
|
||||
end
|
||||
|
||||
defp manager_name(opts) do
|
||||
opts[:manager] || opts[:name] || Manager
|
||||
end
|
||||
|
||||
defp default_data_dir do
|
||||
base_dir =
|
||||
System.get_env("XDG_DATA_HOME") ||
|
||||
Path.join(System.user_home!(), ".local/share")
|
||||
|
||||
Path.join(base_dir, "parrhesia")
|
||||
end
|
||||
end
|
||||
939
lib/parrhesia/api/sync/manager.ex
Normal file
939
lib/parrhesia/api/sync/manager.ex
Normal file
@@ -0,0 +1,939 @@
|
||||
defmodule Parrhesia.API.Sync.Manager do
|
||||
@moduledoc false
|
||||
|
||||
use GenServer
|
||||
|
||||
alias Parrhesia.API.Sync
|
||||
alias Parrhesia.Protocol.Filter
|
||||
alias Parrhesia.Sync.Transport.WebSockexClient
|
||||
alias Parrhesia.Sync.Worker
|
||||
|
||||
require Logger
|
||||
|
||||
@default_overlap_window_seconds 300
|
||||
@default_mode :req_stream
|
||||
@default_auth_type :nip42
|
||||
@default_tls_mode :required
|
||||
@hex64 ~r/\A[0-9a-f]{64}\z/
|
||||
|
||||
def start_link(opts \\ []) do
|
||||
name = Keyword.get(opts, :name, __MODULE__)
|
||||
GenServer.start_link(__MODULE__, opts, name: name)
|
||||
end
|
||||
|
||||
def put_server(name, server), do: GenServer.call(name, {:put_server, server})
|
||||
def remove_server(name, server_id), do: GenServer.call(name, {:remove_server, server_id})
|
||||
def get_server(name, server_id), do: GenServer.call(name, {:get_server, server_id})
|
||||
def list_servers(name), do: GenServer.call(name, :list_servers)
|
||||
def start_server(name, server_id), do: GenServer.call(name, {:start_server, server_id})
|
||||
def stop_server(name, server_id), do: GenServer.call(name, {:stop_server, server_id})
|
||||
def sync_now(name, server_id), do: GenServer.call(name, {:sync_now, server_id})
|
||||
def server_stats(name, server_id), do: GenServer.call(name, {:server_stats, server_id})
|
||||
def sync_stats(name), do: GenServer.call(name, :sync_stats)
|
||||
def sync_health(name), do: GenServer.call(name, :sync_health)
|
||||
|
||||
def runtime_event(name, server_id, kind, attrs \\ %{}) do
|
||||
GenServer.cast(name, {:runtime_event, server_id, kind, attrs})
|
||||
end
|
||||
|
||||
@impl true
|
||||
def init(opts) do
|
||||
path = Keyword.get(opts, :path, config_path() || Sync.default_path())
|
||||
|
||||
state =
|
||||
load_state(path)
|
||||
|> Map.merge(%{
|
||||
start_workers?: Keyword.get(opts, :start_workers?, config_value(:start_workers?, true)),
|
||||
worker_supervisor: Keyword.get(opts, :worker_supervisor, Parrhesia.Sync.WorkerSupervisor),
|
||||
worker_registry: Keyword.get(opts, :worker_registry, Parrhesia.Sync.WorkerRegistry),
|
||||
transport_module: Keyword.get(opts, :transport_module, WebSockexClient),
|
||||
relay_info_opts: Keyword.get(opts, :relay_info_opts, []),
|
||||
transport_opts: Keyword.get(opts, :transport_opts, [])
|
||||
})
|
||||
|
||||
{:ok, state, {:continue, :bootstrap}}
|
||||
end
|
||||
|
||||
@impl true
|
||||
def handle_continue(:bootstrap, state) do
|
||||
next_state =
|
||||
if state.start_workers? do
|
||||
state.servers
|
||||
|> Map.keys()
|
||||
|> Enum.reduce(state, fn server_id, acc -> maybe_start_worker(acc, server_id) end)
|
||||
else
|
||||
state
|
||||
end
|
||||
|
||||
{:noreply, next_state}
|
||||
end
|
||||
|
||||
@impl true
|
||||
def handle_call({:put_server, server}, _from, state) do
|
||||
case normalize_server(server) do
|
||||
{:ok, normalized_server} ->
|
||||
updated_state =
|
||||
state
|
||||
|> put_server_state(normalized_server)
|
||||
|> persist_and_reconcile!(normalized_server.id)
|
||||
|
||||
{:reply, {:ok, merged_server(updated_state, normalized_server.id)}, updated_state}
|
||||
|
||||
{:error, reason} ->
|
||||
{:reply, {:error, reason}, state}
|
||||
end
|
||||
end
|
||||
|
||||
def handle_call({:remove_server, server_id}, _from, state) do
|
||||
if Map.has_key?(state.servers, server_id) do
|
||||
next_state =
|
||||
state
|
||||
|> stop_worker_if_running(server_id)
|
||||
|> Map.update!(:servers, &Map.delete(&1, server_id))
|
||||
|> Map.update!(:runtime, &Map.delete(&1, server_id))
|
||||
|
||||
with :ok <- persist_state(next_state) do
|
||||
{:reply, :ok, next_state}
|
||||
end
|
||||
else
|
||||
{:reply, {:error, :not_found}, state}
|
||||
end
|
||||
end
|
||||
|
||||
def handle_call({:get_server, server_id}, _from, state) do
|
||||
case Map.fetch(state.servers, server_id) do
|
||||
{:ok, _server} -> {:reply, {:ok, merged_server(state, server_id)}, state}
|
||||
:error -> {:reply, :error, state}
|
||||
end
|
||||
end
|
||||
|
||||
def handle_call(:list_servers, _from, state) do
|
||||
servers =
|
||||
state.servers
|
||||
|> Map.keys()
|
||||
|> Enum.sort()
|
||||
|> Enum.map(&merged_server(state, &1))
|
||||
|
||||
{:reply, {:ok, servers}, state}
|
||||
end
|
||||
|
||||
def handle_call({:start_server, server_id}, _from, state) do
|
||||
case Map.fetch(state.runtime, server_id) do
|
||||
{:ok, runtime} ->
|
||||
next_state =
|
||||
state
|
||||
|> put_runtime(server_id, %{runtime | state: :running, last_error: nil})
|
||||
|> persist_and_reconcile!(server_id)
|
||||
|
||||
{:reply, :ok, next_state}
|
||||
|
||||
:error ->
|
||||
{:reply, {:error, :not_found}, state}
|
||||
end
|
||||
end
|
||||
|
||||
def handle_call({:stop_server, server_id}, _from, state) do
|
||||
case Map.fetch(state.runtime, server_id) do
|
||||
{:ok, runtime} ->
|
||||
next_runtime =
|
||||
runtime
|
||||
|> Map.put(:state, :stopped)
|
||||
|> Map.put(:connected?, false)
|
||||
|> Map.put(:last_disconnected_at, now())
|
||||
|
||||
next_state =
|
||||
state
|
||||
|> stop_worker_if_running(server_id)
|
||||
|> put_runtime(server_id, next_runtime)
|
||||
|
||||
with :ok <- persist_state(next_state) do
|
||||
{:reply, :ok, next_state}
|
||||
end
|
||||
|
||||
:error ->
|
||||
{:reply, {:error, :not_found}, state}
|
||||
end
|
||||
end
|
||||
|
||||
def handle_call({:sync_now, server_id}, _from, state) do
|
||||
case {Map.has_key?(state.runtime, server_id), state.start_workers?,
|
||||
lookup_worker(state, server_id)} do
|
||||
{false, _start_workers?, _worker_pid} ->
|
||||
{:reply, {:error, :not_found}, state}
|
||||
|
||||
{true, true, worker_pid} when is_pid(worker_pid) ->
|
||||
Worker.sync_now(worker_pid)
|
||||
{:reply, :ok, state}
|
||||
|
||||
{true, true, nil} ->
|
||||
next_state =
|
||||
state
|
||||
|> put_in([:runtime, server_id, :state], :running)
|
||||
|> persist_and_reconcile!(server_id)
|
||||
|
||||
{:reply, :ok, next_state}
|
||||
|
||||
{true, false, _worker_pid} ->
|
||||
next_state =
|
||||
apply_runtime_event(state, server_id, :sync_started, %{})
|
||||
|> apply_runtime_event(server_id, :sync_completed, %{})
|
||||
|
||||
with :ok <- persist_state(next_state) do
|
||||
{:reply, :ok, next_state}
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
def handle_call({:server_stats, server_id}, _from, state) do
|
||||
case Map.fetch(state.runtime, server_id) do
|
||||
{:ok, runtime} -> {:reply, {:ok, runtime_stats(runtime)}, state}
|
||||
:error -> {:reply, :error, state}
|
||||
end
|
||||
end
|
||||
|
||||
def handle_call(:sync_stats, _from, state), do: {:reply, {:ok, aggregate_stats(state)}, state}
|
||||
def handle_call(:sync_health, _from, state), do: {:reply, {:ok, health_summary(state)}, state}
|
||||
|
||||
@impl true
|
||||
def handle_cast({:runtime_event, server_id, kind, attrs}, state) do
|
||||
next_state =
|
||||
state
|
||||
|> apply_runtime_event(server_id, kind, attrs)
|
||||
|> persist_state_if_known_server(server_id)
|
||||
|
||||
{:noreply, next_state}
|
||||
end
|
||||
|
||||
defp persist_state_if_known_server(state, server_id) do
|
||||
if Map.has_key?(state.runtime, server_id) do
|
||||
case persist_state(state) do
|
||||
:ok ->
|
||||
state
|
||||
|
||||
{:error, reason} ->
|
||||
Logger.warning("failed to persist sync runtime for #{server_id}: #{inspect(reason)}")
|
||||
state
|
||||
end
|
||||
else
|
||||
state
|
||||
end
|
||||
end
|
||||
|
||||
defp put_server_state(state, server) do
|
||||
runtime =
|
||||
case Map.get(state.runtime, server.id) do
|
||||
nil -> default_runtime(server)
|
||||
existing_runtime -> existing_runtime
|
||||
end
|
||||
|
||||
%{
|
||||
state
|
||||
| servers: Map.put(state.servers, server.id, server),
|
||||
runtime: Map.put(state.runtime, server.id, runtime)
|
||||
}
|
||||
end
|
||||
|
||||
defp put_runtime(state, server_id, runtime) do
|
||||
%{state | runtime: Map.put(state.runtime, server_id, runtime)}
|
||||
end
|
||||
|
||||
defp persist_and_reconcile!(state, server_id) do
|
||||
:ok = persist_state(state)
|
||||
reconcile_worker(state, server_id)
|
||||
end
|
||||
|
||||
defp reconcile_worker(state, server_id) do
|
||||
cond do
|
||||
not state.start_workers? ->
|
||||
state
|
||||
|
||||
desired_running?(state, server_id) ->
|
||||
state
|
||||
|> stop_worker_if_running(server_id)
|
||||
|> maybe_start_worker(server_id)
|
||||
|
||||
true ->
|
||||
stop_worker_if_running(state, server_id)
|
||||
end
|
||||
end
|
||||
|
||||
defp maybe_start_worker(state, server_id) do
|
||||
cond do
|
||||
not state.start_workers? ->
|
||||
state
|
||||
|
||||
not desired_running?(state, server_id) ->
|
||||
state
|
||||
|
||||
lookup_worker(state, server_id) != nil ->
|
||||
state
|
||||
|
||||
true ->
|
||||
server = Map.fetch!(state.servers, server_id)
|
||||
runtime = Map.fetch!(state.runtime, server_id)
|
||||
|
||||
child_spec = %{
|
||||
id: {:sync_worker, server_id},
|
||||
start:
|
||||
{Worker, :start_link,
|
||||
[
|
||||
[
|
||||
name: via_tuple(server_id, state.worker_registry),
|
||||
server: server,
|
||||
runtime: runtime,
|
||||
manager: self(),
|
||||
transport_module: state.transport_module,
|
||||
relay_info_opts: state.relay_info_opts,
|
||||
transport_opts: state.transport_opts
|
||||
]
|
||||
]},
|
||||
restart: :transient
|
||||
}
|
||||
|
||||
case DynamicSupervisor.start_child(state.worker_supervisor, child_spec) do
|
||||
{:ok, _pid} ->
|
||||
state
|
||||
|
||||
{:error, {:already_started, _pid}} ->
|
||||
state
|
||||
|
||||
{:error, reason} ->
|
||||
Logger.warning("failed to start sync worker #{server_id}: #{inspect(reason)}")
|
||||
state
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
defp stop_worker_if_running(state, server_id) do
|
||||
if worker_pid = lookup_worker(state, server_id) do
|
||||
_ = Worker.stop(worker_pid)
|
||||
end
|
||||
|
||||
state
|
||||
end
|
||||
|
||||
defp desired_running?(state, server_id) do
|
||||
case Map.fetch(state.runtime, server_id) do
|
||||
{:ok, runtime} -> runtime.state == :running
|
||||
:error -> false
|
||||
end
|
||||
end
|
||||
|
||||
defp lookup_worker(state, server_id) do
|
||||
case Registry.lookup(state.worker_registry, server_id) do
|
||||
[{pid, _value}] -> pid
|
||||
[] -> nil
|
||||
end
|
||||
catch
|
||||
:exit, _reason -> nil
|
||||
end
|
||||
|
||||
defp via_tuple(server_id, registry) do
|
||||
{:via, Registry, {registry, server_id}}
|
||||
end
|
||||
|
||||
defp merged_server(state, server_id) do
|
||||
state.servers
|
||||
|> Map.fetch!(server_id)
|
||||
|> Map.put(:runtime, Map.fetch!(state.runtime, server_id))
|
||||
end
|
||||
|
||||
defp runtime_stats(runtime) do
|
||||
%{
|
||||
"server_id" => runtime.server_id,
|
||||
"state" => Atom.to_string(runtime.state),
|
||||
"connected" => runtime.connected?,
|
||||
"events_received" => runtime.events_received,
|
||||
"events_accepted" => runtime.events_accepted,
|
||||
"events_duplicate" => runtime.events_duplicate,
|
||||
"events_rejected" => runtime.events_rejected,
|
||||
"query_runs" => runtime.query_runs,
|
||||
"subscription_restarts" => runtime.subscription_restarts,
|
||||
"reconnects" => runtime.reconnects,
|
||||
"last_sync_started_at" => runtime.last_sync_started_at,
|
||||
"last_sync_completed_at" => runtime.last_sync_completed_at,
|
||||
"last_remote_eose_at" => runtime.last_remote_eose_at,
|
||||
"last_error" => runtime.last_error,
|
||||
"cursor_created_at" => runtime.cursor_created_at,
|
||||
"cursor_event_id" => runtime.cursor_event_id
|
||||
}
|
||||
end
|
||||
|
||||
defp aggregate_stats(state) do
|
||||
runtimes = Map.values(state.runtime)
|
||||
|
||||
%{
|
||||
"servers_total" => map_size(state.servers),
|
||||
"servers_enabled" => Enum.count(state.servers, fn {_id, server} -> server.enabled? end),
|
||||
"servers_running" => Enum.count(runtimes, &(&1.state == :running)),
|
||||
"servers_connected" => Enum.count(runtimes, & &1.connected?),
|
||||
"events_received" => Enum.reduce(runtimes, 0, &(&1.events_received + &2)),
|
||||
"events_accepted" => Enum.reduce(runtimes, 0, &(&1.events_accepted + &2)),
|
||||
"events_duplicate" => Enum.reduce(runtimes, 0, &(&1.events_duplicate + &2)),
|
||||
"events_rejected" => Enum.reduce(runtimes, 0, &(&1.events_rejected + &2)),
|
||||
"query_runs" => Enum.reduce(runtimes, 0, &(&1.query_runs + &2)),
|
||||
"subscription_restarts" => Enum.reduce(runtimes, 0, &(&1.subscription_restarts + &2)),
|
||||
"reconnects" => Enum.reduce(runtimes, 0, &(&1.reconnects + &2))
|
||||
}
|
||||
end
|
||||
|
||||
defp health_summary(state) do
|
||||
failing_servers =
|
||||
state.runtime
|
||||
|> Enum.flat_map(fn {server_id, runtime} ->
|
||||
if is_binary(runtime.last_error) and runtime.last_error != "" do
|
||||
[%{"id" => server_id, "reason" => runtime.last_error}]
|
||||
else
|
||||
[]
|
||||
end
|
||||
end)
|
||||
|
||||
%{
|
||||
"status" => if(failing_servers == [], do: "ok", else: "degraded"),
|
||||
"servers_total" => map_size(state.servers),
|
||||
"servers_connected" =>
|
||||
Enum.count(state.runtime, fn {_id, runtime} -> runtime.connected? end),
|
||||
"servers_failing" => failing_servers
|
||||
}
|
||||
end
|
||||
|
||||
defp apply_runtime_event(state, server_id, kind, attrs) do
|
||||
case Map.fetch(state.runtime, server_id) do
|
||||
{:ok, runtime} ->
|
||||
updated_runtime = update_runtime_for_event(runtime, kind, attrs)
|
||||
put_runtime(state, server_id, updated_runtime)
|
||||
|
||||
:error ->
|
||||
state
|
||||
end
|
||||
end
|
||||
|
||||
defp update_runtime_for_event(runtime, :connected, _attrs) do
|
||||
runtime
|
||||
|> Map.put(:state, :running)
|
||||
|> Map.put(:connected?, true)
|
||||
|> Map.put(:last_connected_at, now())
|
||||
|> Map.put(:last_error, nil)
|
||||
end
|
||||
|
||||
defp update_runtime_for_event(runtime, :disconnected, attrs) do
|
||||
reason = format_reason(Map.get(attrs, :reason))
|
||||
|
||||
runtime
|
||||
|> Map.put(:connected?, false)
|
||||
|> Map.put(:last_disconnected_at, now())
|
||||
|> Map.update!(:reconnects, &(&1 + 1))
|
||||
|> Map.put(:last_error, reason)
|
||||
end
|
||||
|
||||
defp update_runtime_for_event(runtime, :error, attrs) do
|
||||
Map.put(runtime, :last_error, format_reason(Map.get(attrs, :reason)))
|
||||
end
|
||||
|
||||
defp update_runtime_for_event(runtime, :sync_started, _attrs) do
|
||||
runtime
|
||||
|> Map.put(:last_sync_started_at, now())
|
||||
|> Map.update!(:query_runs, &(&1 + 1))
|
||||
end
|
||||
|
||||
defp update_runtime_for_event(runtime, :sync_completed, _attrs) do
|
||||
timestamp = now()
|
||||
|
||||
runtime
|
||||
|> Map.put(:last_sync_completed_at, timestamp)
|
||||
|> Map.put(:last_eose_at, timestamp)
|
||||
|> Map.put(:last_remote_eose_at, timestamp)
|
||||
end
|
||||
|
||||
defp update_runtime_for_event(runtime, :subscription_restart, _attrs) do
|
||||
Map.update!(runtime, :subscription_restarts, &(&1 + 1))
|
||||
end
|
||||
|
||||
defp update_runtime_for_event(runtime, :cursor_advanced, attrs) do
|
||||
runtime
|
||||
|> Map.put(:cursor_created_at, Map.get(attrs, :created_at))
|
||||
|> Map.put(:cursor_event_id, Map.get(attrs, :event_id))
|
||||
end
|
||||
|
||||
defp update_runtime_for_event(runtime, :event_result, attrs) do
|
||||
event = Map.get(attrs, :event, %{})
|
||||
result = Map.get(attrs, :result)
|
||||
|
||||
runtime
|
||||
|> Map.update!(:events_received, &(&1 + 1))
|
||||
|> Map.put(:last_event_received_at, now())
|
||||
|> increment_result_counter(result)
|
||||
|> maybe_put_last_error(attrs)
|
||||
|> maybe_advance_runtime_cursor(event, result)
|
||||
end
|
||||
|
||||
defp update_runtime_for_event(runtime, _kind, _attrs), do: runtime
|
||||
|
||||
defp increment_result_counter(runtime, :accepted),
|
||||
do: Map.update!(runtime, :events_accepted, &(&1 + 1))
|
||||
|
||||
defp increment_result_counter(runtime, :duplicate),
|
||||
do: Map.update!(runtime, :events_duplicate, &(&1 + 1))
|
||||
|
||||
defp increment_result_counter(runtime, :rejected),
|
||||
do: Map.update!(runtime, :events_rejected, &(&1 + 1))
|
||||
|
||||
defp increment_result_counter(runtime, _result), do: runtime
|
||||
|
||||
defp maybe_put_last_error(runtime, %{reason: nil}), do: runtime
|
||||
|
||||
defp maybe_put_last_error(runtime, attrs),
|
||||
do: Map.put(runtime, :last_error, format_reason(attrs[:reason]))
|
||||
|
||||
defp maybe_advance_runtime_cursor(runtime, event, result)
|
||||
when result in [:accepted, :duplicate] do
|
||||
created_at = Map.get(event, "created_at")
|
||||
event_id = Map.get(event, "id")
|
||||
|
||||
cond do
|
||||
not is_integer(created_at) or not is_binary(event_id) ->
|
||||
runtime
|
||||
|
||||
is_nil(runtime.cursor_created_at) ->
|
||||
runtime
|
||||
|> Map.put(:cursor_created_at, created_at)
|
||||
|> Map.put(:cursor_event_id, event_id)
|
||||
|
||||
created_at > runtime.cursor_created_at ->
|
||||
runtime
|
||||
|> Map.put(:cursor_created_at, created_at)
|
||||
|> Map.put(:cursor_event_id, event_id)
|
||||
|
||||
created_at == runtime.cursor_created_at and event_id > runtime.cursor_event_id ->
|
||||
runtime
|
||||
|> Map.put(:cursor_created_at, created_at)
|
||||
|> Map.put(:cursor_event_id, event_id)
|
||||
|
||||
true ->
|
||||
runtime
|
||||
end
|
||||
end
|
||||
|
||||
defp maybe_advance_runtime_cursor(runtime, _event, _result), do: runtime
|
||||
|
||||
defp format_reason(nil), do: nil
|
||||
defp format_reason(reason) when is_binary(reason), do: reason
|
||||
defp format_reason(reason), do: inspect(reason)
|
||||
|
||||
defp load_state(path) do
|
||||
case File.read(path) do
|
||||
{:ok, payload} ->
|
||||
case decode_persisted_state(payload, path) do
|
||||
{:ok, state} ->
|
||||
state
|
||||
|
||||
{:error, reason} ->
|
||||
Logger.warning("failed to load sync state from #{path}: #{inspect(reason)}")
|
||||
empty_state(path)
|
||||
end
|
||||
|
||||
{:error, :enoent} ->
|
||||
empty_state(path)
|
||||
|
||||
{:error, reason} ->
|
||||
Logger.warning("failed to read sync state from #{path}: #{inspect(reason)}")
|
||||
empty_state(path)
|
||||
end
|
||||
end
|
||||
|
||||
defp decode_persisted_state(payload, path) do
|
||||
with {:ok, decoded} <- JSON.decode(payload),
|
||||
{:ok, servers} <- decode_servers(Map.get(decoded, "servers", %{})),
|
||||
{:ok, runtime} <- decode_runtime(Map.get(decoded, "runtime", %{}), servers) do
|
||||
{:ok, %{path: path, servers: servers, runtime: runtime}}
|
||||
end
|
||||
end
|
||||
|
||||
defp decode_servers(servers) when is_map(servers) do
|
||||
Enum.reduce_while(servers, {:ok, %{}}, fn {_id, server_payload}, {:ok, acc} ->
|
||||
case normalize_server(server_payload) do
|
||||
{:ok, server} -> {:cont, {:ok, Map.put(acc, server.id, server)}}
|
||||
{:error, reason} -> {:halt, {:error, reason}}
|
||||
end
|
||||
end)
|
||||
end
|
||||
|
||||
defp decode_servers(_servers), do: {:error, :invalid_servers_state}
|
||||
|
||||
defp decode_runtime(runtime_payload, servers)
|
||||
when is_map(runtime_payload) and is_map(servers) do
|
||||
runtime =
|
||||
Enum.reduce(servers, %{}, fn {server_id, server}, acc ->
|
||||
decoded_runtime =
|
||||
runtime_payload
|
||||
|> Map.get(server_id)
|
||||
|> normalize_runtime(server)
|
||||
|
||||
Map.put(acc, server_id, decoded_runtime)
|
||||
end)
|
||||
|
||||
{:ok, runtime}
|
||||
end
|
||||
|
||||
defp decode_runtime(_runtime_payload, _servers), do: {:error, :invalid_runtime_state}
|
||||
|
||||
defp normalize_runtime(nil, server), do: default_runtime(server)
|
||||
|
||||
defp normalize_runtime(runtime, server) when is_map(runtime) do
|
||||
%{
|
||||
server_id: server.id,
|
||||
state: normalize_runtime_state(fetch_value(runtime, :state)),
|
||||
connected?: fetch_boolean(runtime, :connected?) || false,
|
||||
last_connected_at: fetch_string_or_nil(runtime, :last_connected_at),
|
||||
last_disconnected_at: fetch_string_or_nil(runtime, :last_disconnected_at),
|
||||
last_sync_started_at: fetch_string_or_nil(runtime, :last_sync_started_at),
|
||||
last_sync_completed_at: fetch_string_or_nil(runtime, :last_sync_completed_at),
|
||||
last_event_received_at: fetch_string_or_nil(runtime, :last_event_received_at),
|
||||
last_eose_at: fetch_string_or_nil(runtime, :last_eose_at),
|
||||
reconnect_attempts: fetch_non_neg_integer(runtime, :reconnect_attempts),
|
||||
last_error: fetch_string_or_nil(runtime, :last_error),
|
||||
events_received: fetch_non_neg_integer(runtime, :events_received),
|
||||
events_accepted: fetch_non_neg_integer(runtime, :events_accepted),
|
||||
events_duplicate: fetch_non_neg_integer(runtime, :events_duplicate),
|
||||
events_rejected: fetch_non_neg_integer(runtime, :events_rejected),
|
||||
query_runs: fetch_non_neg_integer(runtime, :query_runs),
|
||||
subscription_restarts: fetch_non_neg_integer(runtime, :subscription_restarts),
|
||||
reconnects: fetch_non_neg_integer(runtime, :reconnects),
|
||||
last_remote_eose_at: fetch_string_or_nil(runtime, :last_remote_eose_at),
|
||||
cursor_created_at: fetch_optional_integer(runtime, :cursor_created_at),
|
||||
cursor_event_id: fetch_string_or_nil(runtime, :cursor_event_id)
|
||||
}
|
||||
end
|
||||
|
||||
defp normalize_runtime(_runtime, server), do: default_runtime(server)
|
||||
|
||||
defp persist_state(%{path: path} = state) do
|
||||
temp_path = path <> ".tmp"
|
||||
|
||||
with :ok <- File.mkdir_p(Path.dirname(path)),
|
||||
:ok <- File.write(temp_path, JSON.encode!(encode_state(state))),
|
||||
:ok <- File.rename(temp_path, path) do
|
||||
:ok
|
||||
else
|
||||
{:error, reason} ->
|
||||
_ = File.rm(temp_path)
|
||||
{:error, reason}
|
||||
end
|
||||
end
|
||||
|
||||
defp encode_state(state) do
|
||||
%{
|
||||
"version" => 2,
|
||||
"servers" =>
|
||||
Map.new(state.servers, fn {server_id, server} -> {server_id, encode_server(server)} end),
|
||||
"runtime" =>
|
||||
Map.new(state.runtime, fn {server_id, runtime} -> {server_id, encode_runtime(runtime)} end)
|
||||
}
|
||||
end
|
||||
|
||||
defp encode_server(server) do
|
||||
%{
|
||||
"id" => server.id,
|
||||
"url" => server.url,
|
||||
"enabled?" => server.enabled?,
|
||||
"auth_pubkey" => server.auth_pubkey,
|
||||
"filters" => server.filters,
|
||||
"mode" => Atom.to_string(server.mode),
|
||||
"overlap_window_seconds" => server.overlap_window_seconds,
|
||||
"auth" => %{"type" => Atom.to_string(server.auth.type)},
|
||||
"tls" => %{
|
||||
"mode" => Atom.to_string(server.tls.mode),
|
||||
"hostname" => server.tls.hostname,
|
||||
"pins" =>
|
||||
Enum.map(server.tls.pins, fn pin ->
|
||||
%{
|
||||
"type" => Atom.to_string(pin.type),
|
||||
"value" => pin.value
|
||||
}
|
||||
end)
|
||||
},
|
||||
"metadata" => server.metadata
|
||||
}
|
||||
end
|
||||
|
||||
defp encode_runtime(runtime) do
|
||||
%{
|
||||
"server_id" => runtime.server_id,
|
||||
"state" => Atom.to_string(runtime.state),
|
||||
"connected?" => runtime.connected?,
|
||||
"last_connected_at" => runtime.last_connected_at,
|
||||
"last_disconnected_at" => runtime.last_disconnected_at,
|
||||
"last_sync_started_at" => runtime.last_sync_started_at,
|
||||
"last_sync_completed_at" => runtime.last_sync_completed_at,
|
||||
"last_event_received_at" => runtime.last_event_received_at,
|
||||
"last_eose_at" => runtime.last_eose_at,
|
||||
"reconnect_attempts" => runtime.reconnect_attempts,
|
||||
"last_error" => runtime.last_error,
|
||||
"events_received" => runtime.events_received,
|
||||
"events_accepted" => runtime.events_accepted,
|
||||
"events_duplicate" => runtime.events_duplicate,
|
||||
"events_rejected" => runtime.events_rejected,
|
||||
"query_runs" => runtime.query_runs,
|
||||
"subscription_restarts" => runtime.subscription_restarts,
|
||||
"reconnects" => runtime.reconnects,
|
||||
"last_remote_eose_at" => runtime.last_remote_eose_at,
|
||||
"cursor_created_at" => runtime.cursor_created_at,
|
||||
"cursor_event_id" => runtime.cursor_event_id
|
||||
}
|
||||
end
|
||||
|
||||
defp empty_state(path) do
|
||||
%{path: path, servers: %{}, runtime: %{}}
|
||||
end
|
||||
|
||||
defp default_runtime(server) do
|
||||
%{
|
||||
server_id: server.id,
|
||||
state: if(server.enabled?, do: :running, else: :stopped),
|
||||
connected?: false,
|
||||
last_connected_at: nil,
|
||||
last_disconnected_at: nil,
|
||||
last_sync_started_at: nil,
|
||||
last_sync_completed_at: nil,
|
||||
last_event_received_at: nil,
|
||||
last_eose_at: nil,
|
||||
reconnect_attempts: 0,
|
||||
last_error: nil,
|
||||
events_received: 0,
|
||||
events_accepted: 0,
|
||||
events_duplicate: 0,
|
||||
events_rejected: 0,
|
||||
query_runs: 0,
|
||||
subscription_restarts: 0,
|
||||
reconnects: 0,
|
||||
last_remote_eose_at: nil,
|
||||
cursor_created_at: nil,
|
||||
cursor_event_id: nil
|
||||
}
|
||||
end
|
||||
|
||||
defp normalize_server(server) when is_map(server) do
|
||||
with {:ok, id} <- normalize_non_empty_string(fetch_value(server, :id), :invalid_server_id),
|
||||
{:ok, {url, host, scheme}} <- normalize_url(fetch_value(server, :url)),
|
||||
{:ok, enabled?} <- normalize_boolean(fetch_value(server, :enabled?), true),
|
||||
{:ok, auth_pubkey} <- normalize_pubkey(fetch_value(server, :auth_pubkey)),
|
||||
{:ok, filters} <- normalize_filters(fetch_value(server, :filters)),
|
||||
{:ok, mode} <- normalize_mode(fetch_value(server, :mode)),
|
||||
{:ok, overlap_window_seconds} <-
|
||||
normalize_overlap_window(fetch_value(server, :overlap_window_seconds)),
|
||||
{:ok, auth} <- normalize_auth(fetch_value(server, :auth)),
|
||||
{:ok, tls} <- normalize_tls(fetch_value(server, :tls), host, scheme),
|
||||
{:ok, metadata} <- normalize_metadata(fetch_value(server, :metadata)) do
|
||||
{:ok,
|
||||
%{
|
||||
id: id,
|
||||
url: url,
|
||||
enabled?: enabled?,
|
||||
auth_pubkey: auth_pubkey,
|
||||
filters: filters,
|
||||
mode: mode,
|
||||
overlap_window_seconds: overlap_window_seconds,
|
||||
auth: auth,
|
||||
tls: tls,
|
||||
metadata: metadata
|
||||
}}
|
||||
end
|
||||
end
|
||||
|
||||
defp normalize_server(_server), do: {:error, :invalid_server}
|
||||
|
||||
defp normalize_url(url) when is_binary(url) and url != "" do
|
||||
uri = URI.parse(url)
|
||||
|
||||
if uri.scheme in ["ws", "wss"] and is_binary(uri.host) and uri.host != "" do
|
||||
{:ok, {URI.to_string(uri), uri.host, uri.scheme}}
|
||||
else
|
||||
{:error, :invalid_url}
|
||||
end
|
||||
end
|
||||
|
||||
defp normalize_url(_url), do: {:error, :invalid_url}
|
||||
|
||||
defp normalize_pubkey(pubkey) when is_binary(pubkey) do
|
||||
normalized = String.downcase(pubkey)
|
||||
|
||||
if String.match?(normalized, @hex64) do
|
||||
{:ok, normalized}
|
||||
else
|
||||
{:error, :invalid_auth_pubkey}
|
||||
end
|
||||
end
|
||||
|
||||
defp normalize_pubkey(_pubkey), do: {:error, :invalid_auth_pubkey}
|
||||
|
||||
defp normalize_filters(filters) when is_list(filters) do
|
||||
normalized_filters = Enum.map(filters, &normalize_filter_map/1)
|
||||
|
||||
with :ok <- Filter.validate_filters(normalized_filters) do
|
||||
{:ok, normalized_filters}
|
||||
end
|
||||
end
|
||||
|
||||
defp normalize_filters(_filters), do: {:error, :invalid_filters}
|
||||
|
||||
defp normalize_mode(nil), do: {:ok, @default_mode}
|
||||
defp normalize_mode(:req_stream), do: {:ok, :req_stream}
|
||||
defp normalize_mode("req_stream"), do: {:ok, :req_stream}
|
||||
defp normalize_mode(_mode), do: {:error, :invalid_mode}
|
||||
|
||||
defp normalize_overlap_window(nil), do: {:ok, @default_overlap_window_seconds}
|
||||
|
||||
defp normalize_overlap_window(seconds) when is_integer(seconds) and seconds >= 0,
|
||||
do: {:ok, seconds}
|
||||
|
||||
defp normalize_overlap_window(_seconds), do: {:error, :invalid_overlap_window_seconds}
|
||||
|
||||
defp normalize_auth(nil), do: {:ok, %{type: @default_auth_type}}
|
||||
|
||||
defp normalize_auth(auth) when is_map(auth) do
|
||||
with {:ok, type} <- normalize_auth_type(fetch_value(auth, :type)) do
|
||||
{:ok, %{type: type}}
|
||||
end
|
||||
end
|
||||
|
||||
defp normalize_auth(_auth), do: {:error, :invalid_auth}
|
||||
|
||||
defp normalize_auth_type(nil), do: {:ok, @default_auth_type}
|
||||
defp normalize_auth_type(:nip42), do: {:ok, :nip42}
|
||||
defp normalize_auth_type("nip42"), do: {:ok, :nip42}
|
||||
defp normalize_auth_type(_type), do: {:error, :invalid_auth_type}
|
||||
|
||||
defp normalize_tls(tls, host, scheme) when is_map(tls) do
|
||||
with {:ok, mode} <- normalize_tls_mode(fetch_value(tls, :mode)),
|
||||
:ok <- validate_tls_mode_against_scheme(mode, scheme),
|
||||
{:ok, hostname} <- normalize_hostname(fetch_value(tls, :hostname) || host),
|
||||
{:ok, pins} <- normalize_tls_pins(mode, fetch_value(tls, :pins)) do
|
||||
{:ok, %{mode: mode, hostname: hostname, pins: pins}}
|
||||
end
|
||||
end
|
||||
|
||||
defp normalize_tls(_tls, _host, _scheme), do: {:error, :invalid_tls}
|
||||
|
||||
defp normalize_tls_mode(nil), do: {:ok, @default_tls_mode}
|
||||
defp normalize_tls_mode(:required), do: {:ok, :required}
|
||||
defp normalize_tls_mode("required"), do: {:ok, :required}
|
||||
defp normalize_tls_mode(:disabled), do: {:ok, :disabled}
|
||||
defp normalize_tls_mode("disabled"), do: {:ok, :disabled}
|
||||
defp normalize_tls_mode(_mode), do: {:error, :invalid_tls_mode}
|
||||
|
||||
defp validate_tls_mode_against_scheme(:required, "wss"), do: :ok
|
||||
defp validate_tls_mode_against_scheme(:required, _scheme), do: {:error, :invalid_url}
|
||||
defp validate_tls_mode_against_scheme(:disabled, _scheme), do: :ok
|
||||
|
||||
defp normalize_hostname(hostname) when is_binary(hostname) and hostname != "",
|
||||
do: {:ok, hostname}
|
||||
|
||||
defp normalize_hostname(_hostname), do: {:error, :invalid_tls_hostname}
|
||||
|
||||
defp normalize_tls_pins(:disabled, nil), do: {:ok, []}
|
||||
defp normalize_tls_pins(:disabled, pins) when is_list(pins), do: {:ok, []}
|
||||
|
||||
defp normalize_tls_pins(:required, pins) when is_list(pins) and pins != [] do
|
||||
Enum.reduce_while(pins, {:ok, []}, fn pin, {:ok, acc} ->
|
||||
case normalize_tls_pin(pin) do
|
||||
{:ok, normalized_pin} -> {:cont, {:ok, [normalized_pin | acc]}}
|
||||
{:error, reason} -> {:halt, {:error, reason}}
|
||||
end
|
||||
end)
|
||||
|> case do
|
||||
{:ok, normalized_pins} -> {:ok, Enum.reverse(normalized_pins)}
|
||||
error -> error
|
||||
end
|
||||
end
|
||||
|
||||
defp normalize_tls_pins(:required, _pins), do: {:error, :invalid_tls_pins}
|
||||
|
||||
defp normalize_tls_pin(pin) when is_map(pin) do
|
||||
with {:ok, type} <- normalize_tls_pin_type(fetch_value(pin, :type)),
|
||||
{:ok, value} <- normalize_non_empty_string(fetch_value(pin, :value), :invalid_tls_pin) do
|
||||
{:ok, %{type: type, value: value}}
|
||||
end
|
||||
end
|
||||
|
||||
defp normalize_tls_pin(_pin), do: {:error, :invalid_tls_pin}
|
||||
|
||||
defp normalize_tls_pin_type(:spki_sha256), do: {:ok, :spki_sha256}
|
||||
defp normalize_tls_pin_type("spki_sha256"), do: {:ok, :spki_sha256}
|
||||
defp normalize_tls_pin_type(_type), do: {:error, :invalid_tls_pin}
|
||||
|
||||
defp normalize_metadata(nil), do: {:ok, %{}}
|
||||
defp normalize_metadata(metadata) when is_map(metadata), do: {:ok, metadata}
|
||||
defp normalize_metadata(_metadata), do: {:error, :invalid_metadata}
|
||||
|
||||
defp normalize_boolean(nil, default), do: {:ok, default}
|
||||
defp normalize_boolean(value, _default) when is_boolean(value), do: {:ok, value}
|
||||
defp normalize_boolean(_value, _default), do: {:error, :invalid_enabled_flag}
|
||||
|
||||
defp normalize_non_empty_string(value, _reason) when is_binary(value) and value != "",
|
||||
do: {:ok, value}
|
||||
|
||||
defp normalize_non_empty_string(_value, reason), do: {:error, reason}
|
||||
|
||||
defp normalize_filter_map(filter) when is_map(filter) do
|
||||
Map.new(filter, fn
|
||||
{key, value} when is_atom(key) -> {Atom.to_string(key), value}
|
||||
{key, value} -> {key, value}
|
||||
end)
|
||||
end
|
||||
|
||||
defp normalize_filter_map(filter), do: filter
|
||||
|
||||
defp normalize_runtime_state("running"), do: :running
|
||||
defp normalize_runtime_state(:running), do: :running
|
||||
defp normalize_runtime_state("stopped"), do: :stopped
|
||||
defp normalize_runtime_state(:stopped), do: :stopped
|
||||
defp normalize_runtime_state(_state), do: :stopped
|
||||
|
||||
defp fetch_non_neg_integer(map, key) do
|
||||
case fetch_value(map, key) do
|
||||
value when is_integer(value) and value >= 0 -> value
|
||||
_other -> 0
|
||||
end
|
||||
end
|
||||
|
||||
defp fetch_optional_integer(map, key) do
|
||||
case fetch_value(map, key) do
|
||||
value when is_integer(value) and value >= 0 -> value
|
||||
_other -> nil
|
||||
end
|
||||
end
|
||||
|
||||
defp fetch_boolean(map, key) do
|
||||
case fetch_value(map, key) do
|
||||
value when is_boolean(value) -> value
|
||||
_other -> nil
|
||||
end
|
||||
end
|
||||
|
||||
defp fetch_string_or_nil(map, key) do
|
||||
case fetch_value(map, key) do
|
||||
value when is_binary(value) and value != "" -> value
|
||||
_other -> nil
|
||||
end
|
||||
end
|
||||
|
||||
defp fetch_value(map, key) when is_map(map) do
|
||||
Map.get(map, key) || Map.get(map, Atom.to_string(key))
|
||||
end
|
||||
|
||||
defp config_path do
|
||||
config_value(:path)
|
||||
end
|
||||
|
||||
defp config_value(key, default \\ nil) do
|
||||
:parrhesia
|
||||
|> Application.get_env(:sync, [])
|
||||
|> Keyword.get(key, default)
|
||||
end
|
||||
|
||||
defp now do
|
||||
DateTime.utc_now()
|
||||
|> DateTime.truncate(:second)
|
||||
|> DateTime.to_iso8601()
|
||||
end
|
||||
end
|
||||
@@ -11,6 +11,7 @@ defmodule Parrhesia.Application do
|
||||
Parrhesia.Storage.Supervisor,
|
||||
Parrhesia.Subscriptions.Supervisor,
|
||||
Parrhesia.Auth.Supervisor,
|
||||
Parrhesia.Sync.Supervisor,
|
||||
Parrhesia.Policy.Supervisor,
|
||||
Parrhesia.Web.Endpoint,
|
||||
Parrhesia.Tasks.Supervisor
|
||||
|
||||
@@ -9,13 +9,20 @@ defmodule Parrhesia.Auth.Nip98 do
|
||||
|
||||
@spec validate_authorization_header(String.t() | nil, String.t(), String.t()) ::
|
||||
{:ok, map()} | {:error, atom()}
|
||||
def validate_authorization_header(nil, _method, _url), do: {:error, :missing_authorization}
|
||||
def validate_authorization_header(authorization, method, url) do
|
||||
validate_authorization_header(authorization, method, url, [])
|
||||
end
|
||||
|
||||
def validate_authorization_header("Nostr " <> encoded_event, method, url)
|
||||
when is_binary(method) and is_binary(url) do
|
||||
@spec validate_authorization_header(String.t() | nil, String.t(), String.t(), keyword()) ::
|
||||
{:ok, map()} | {:error, atom()}
|
||||
def validate_authorization_header(nil, _method, _url, _opts),
|
||||
do: {:error, :missing_authorization}
|
||||
|
||||
def validate_authorization_header("Nostr " <> encoded_event, method, url, opts)
|
||||
when is_binary(method) and is_binary(url) and is_list(opts) do
|
||||
with {:ok, event_json} <- decode_base64(encoded_event),
|
||||
{:ok, event} <- JSON.decode(event_json),
|
||||
:ok <- validate_event_shape(event),
|
||||
:ok <- validate_event_shape(event, opts),
|
||||
:ok <- validate_http_binding(event, method, url) do
|
||||
{:ok, event}
|
||||
else
|
||||
@@ -24,7 +31,8 @@ defmodule Parrhesia.Auth.Nip98 do
|
||||
end
|
||||
end
|
||||
|
||||
def validate_authorization_header(_header, _method, _url), do: {:error, :invalid_authorization}
|
||||
def validate_authorization_header(_header, _method, _url, _opts),
|
||||
do: {:error, :invalid_authorization}
|
||||
|
||||
defp decode_base64(encoded_event) do
|
||||
case Base.decode64(encoded_event) do
|
||||
@@ -33,33 +41,35 @@ defmodule Parrhesia.Auth.Nip98 do
|
||||
end
|
||||
end
|
||||
|
||||
defp validate_event_shape(event) when is_map(event) do
|
||||
defp validate_event_shape(event, opts) when is_map(event) do
|
||||
with :ok <- EventValidator.validate(event),
|
||||
:ok <- validate_kind(event),
|
||||
:ok <- validate_fresh_created_at(event) do
|
||||
:ok <- validate_fresh_created_at(event, opts) do
|
||||
:ok
|
||||
else
|
||||
:ok -> :ok
|
||||
{:error, :stale_event} -> {:error, :stale_event}
|
||||
{:error, _reason} -> {:error, :invalid_event}
|
||||
end
|
||||
end
|
||||
|
||||
defp validate_event_shape(_event), do: {:error, :invalid_event}
|
||||
defp validate_event_shape(_event, _opts), do: {:error, :invalid_event}
|
||||
|
||||
defp validate_kind(%{"kind" => 27_235}), do: :ok
|
||||
defp validate_kind(_event), do: {:error, :invalid_event}
|
||||
|
||||
defp validate_fresh_created_at(%{"created_at" => created_at}) when is_integer(created_at) do
|
||||
defp validate_fresh_created_at(%{"created_at" => created_at}, opts)
|
||||
when is_integer(created_at) do
|
||||
now = System.system_time(:second)
|
||||
max_age_seconds = Keyword.get(opts, :max_age_seconds, @max_age_seconds)
|
||||
|
||||
if abs(now - created_at) <= @max_age_seconds do
|
||||
if abs(now - created_at) <= max_age_seconds do
|
||||
:ok
|
||||
else
|
||||
{:error, :stale_event}
|
||||
end
|
||||
end
|
||||
|
||||
defp validate_fresh_created_at(_event), do: {:error, :invalid_event}
|
||||
defp validate_fresh_created_at(_event, _opts), do: {:error, :invalid_event}
|
||||
|
||||
defp validate_http_binding(event, method, url) do
|
||||
tags = Map.get(event, "tags", [])
|
||||
|
||||
@@ -12,7 +12,8 @@ defmodule Parrhesia.Auth.Supervisor do
|
||||
@impl true
|
||||
def init(_init_arg) do
|
||||
children = [
|
||||
{Parrhesia.Auth.Challenges, name: Parrhesia.Auth.Challenges}
|
||||
{Parrhesia.Auth.Challenges, name: Parrhesia.Auth.Challenges},
|
||||
{Parrhesia.API.Identity.Manager, []}
|
||||
]
|
||||
|
||||
Supervisor.init(children, strategy: :one_for_one)
|
||||
|
||||
136
lib/parrhesia/negentropy/engine.ex
Normal file
136
lib/parrhesia/negentropy/engine.ex
Normal file
@@ -0,0 +1,136 @@
|
||||
defmodule Parrhesia.Negentropy.Engine do
|
||||
@moduledoc """
|
||||
Relay/client-agnostic negentropy reconciliation engine.
|
||||
"""
|
||||
|
||||
alias Parrhesia.Negentropy.Message
|
||||
|
||||
@default_id_list_threshold 32
|
||||
|
||||
@type item :: Message.item()
|
||||
|
||||
@spec initial_message([item()], keyword()) :: binary()
|
||||
def initial_message(items, opts \\ []) when is_list(opts) do
|
||||
normalized_items = normalize_items(items)
|
||||
|
||||
Message.encode([
|
||||
describe_range(normalized_items, :infinity, id_list_threshold(opts))
|
||||
])
|
||||
end
|
||||
|
||||
@spec answer([item()], binary(), keyword()) :: {:ok, binary()} | {:error, term()}
|
||||
def answer(items, incoming_message, opts \\ [])
|
||||
when is_binary(incoming_message) and is_list(opts) do
|
||||
normalized_items = normalize_items(items)
|
||||
threshold = id_list_threshold(opts)
|
||||
|
||||
case Message.decode(incoming_message) do
|
||||
{:ok, ranges} ->
|
||||
response_ranges =
|
||||
respond_to_ranges(normalized_items, ranges, Message.initial_lower_bound(), threshold)
|
||||
|
||||
{:ok, Message.encode(response_ranges)}
|
||||
|
||||
{:unsupported_version, _supported_version} ->
|
||||
{:ok, Message.supported_version_message()}
|
||||
|
||||
{:error, reason} ->
|
||||
{:error, reason}
|
||||
end
|
||||
end
|
||||
|
||||
defp respond_to_ranges(_items, [], _lower_bound, _threshold), do: []
|
||||
|
||||
defp respond_to_ranges(items, [range | rest], lower_bound, threshold) do
|
||||
upper_bound = Map.fetch!(range, :upper_bound)
|
||||
|
||||
items_in_range =
|
||||
Enum.filter(items, fn item ->
|
||||
Message.item_in_range?(item, lower_bound, upper_bound)
|
||||
end)
|
||||
|
||||
response =
|
||||
case range.mode do
|
||||
:skip ->
|
||||
[%{upper_bound: upper_bound, mode: :skip, payload: nil}]
|
||||
|
||||
:fingerprint ->
|
||||
respond_to_fingerprint_range(items_in_range, upper_bound, range.payload, threshold)
|
||||
|
||||
:id_list ->
|
||||
respond_to_id_list_range(items_in_range, upper_bound, range.payload, threshold)
|
||||
end
|
||||
|
||||
response ++ respond_to_ranges(items, rest, upper_bound, threshold)
|
||||
end
|
||||
|
||||
defp respond_to_fingerprint_range(items, upper_bound, remote_fingerprint, threshold) do
|
||||
if Message.fingerprint(items) == remote_fingerprint do
|
||||
[%{upper_bound: upper_bound, mode: :skip, payload: nil}]
|
||||
else
|
||||
mismatch_response(items, upper_bound, threshold)
|
||||
end
|
||||
end
|
||||
|
||||
defp respond_to_id_list_range(items, upper_bound, remote_ids, threshold) do
|
||||
if Enum.map(items, & &1.id) == remote_ids do
|
||||
[%{upper_bound: upper_bound, mode: :skip, payload: nil}]
|
||||
else
|
||||
mismatch_response(items, upper_bound, threshold)
|
||||
end
|
||||
end
|
||||
|
||||
defp mismatch_response(items, upper_bound, threshold) do
|
||||
if length(items) <= threshold do
|
||||
[%{upper_bound: upper_bound, mode: :id_list, payload: Enum.map(items, & &1.id)}]
|
||||
else
|
||||
split_response(items, upper_bound, threshold)
|
||||
end
|
||||
end
|
||||
|
||||
defp split_response(items, upper_bound, threshold) do
|
||||
midpoint = div(length(items), 2)
|
||||
left_items = Enum.take(items, midpoint)
|
||||
right_items = Enum.drop(items, midpoint)
|
||||
|
||||
boundary =
|
||||
left_items
|
||||
|> List.last()
|
||||
|> then(&Message.split_bound(&1, hd(right_items)))
|
||||
|
||||
[
|
||||
describe_range(left_items, boundary, threshold),
|
||||
describe_range(right_items, upper_bound, threshold)
|
||||
]
|
||||
end
|
||||
|
||||
defp describe_range(items, upper_bound, threshold) do
|
||||
if length(items) <= threshold do
|
||||
%{upper_bound: upper_bound, mode: :id_list, payload: Enum.map(items, & &1.id)}
|
||||
else
|
||||
%{upper_bound: upper_bound, mode: :fingerprint, payload: Message.fingerprint(items)}
|
||||
end
|
||||
end
|
||||
|
||||
defp normalize_items(items) do
|
||||
items
|
||||
|> Enum.map(&normalize_item/1)
|
||||
|> Enum.sort(&(Message.compare_items(&1, &2) != :gt))
|
||||
end
|
||||
|
||||
defp normalize_item(%{created_at: created_at, id: id})
|
||||
when is_integer(created_at) and created_at >= 0 and is_binary(id) and byte_size(id) == 32 do
|
||||
%{created_at: created_at, id: id}
|
||||
end
|
||||
|
||||
defp normalize_item(item) do
|
||||
raise ArgumentError, "invalid negentropy item: #{inspect(item)}"
|
||||
end
|
||||
|
||||
defp id_list_threshold(opts) do
|
||||
case Keyword.get(opts, :id_list_threshold, @default_id_list_threshold) do
|
||||
threshold when is_integer(threshold) and threshold > 0 -> threshold
|
||||
_other -> @default_id_list_threshold
|
||||
end
|
||||
end
|
||||
end
|
||||
349
lib/parrhesia/negentropy/message.ex
Normal file
349
lib/parrhesia/negentropy/message.ex
Normal file
@@ -0,0 +1,349 @@
|
||||
defmodule Parrhesia.Negentropy.Message do
|
||||
@moduledoc """
|
||||
NIP-77 negentropy message codec and helpers.
|
||||
"""
|
||||
|
||||
import Bitwise
|
||||
|
||||
@protocol_version 0x61
|
||||
@id_size 32
|
||||
@fingerprint_size 16
|
||||
@u256_mod 1 <<< 256
|
||||
@zero_id <<0::size(256)>>
|
||||
|
||||
@type item :: %{created_at: non_neg_integer(), id: binary()}
|
||||
@type bound :: :infinity | {non_neg_integer(), binary()}
|
||||
@type range ::
|
||||
%{
|
||||
upper_bound: bound(),
|
||||
mode: :skip | :fingerprint | :id_list,
|
||||
payload: nil | binary() | [binary()]
|
||||
}
|
||||
|
||||
@spec protocol_version() :: byte()
|
||||
def protocol_version, do: @protocol_version
|
||||
|
||||
@spec supported_version_message() :: binary()
|
||||
def supported_version_message, do: <<@protocol_version>>
|
||||
|
||||
@spec decode(binary()) :: {:ok, [range()]} | {:unsupported_version, byte()} | {:error, term()}
|
||||
def decode(<<version, _rest::binary>>) when version != @protocol_version,
|
||||
do: {:unsupported_version, @protocol_version}
|
||||
|
||||
def decode(<<@protocol_version, rest::binary>>) do
|
||||
decode_ranges(rest, 0, initial_lower_bound(), [])
|
||||
end
|
||||
|
||||
def decode(_message), do: {:error, :invalid_message}
|
||||
|
||||
@spec encode([range()]) :: binary()
|
||||
def encode(ranges) when is_list(ranges) do
|
||||
ranges
|
||||
|> drop_trailing_skip_ranges()
|
||||
|> Enum.reduce({[@protocol_version], 0}, fn range, {acc, previous_timestamp} ->
|
||||
{encoded_range, next_timestamp} = encode_range(range, previous_timestamp)
|
||||
{[acc, encoded_range], next_timestamp}
|
||||
end)
|
||||
|> elem(0)
|
||||
|> IO.iodata_to_binary()
|
||||
end
|
||||
|
||||
@spec fingerprint([item()]) :: binary()
|
||||
def fingerprint(items) when is_list(items) do
|
||||
sum =
|
||||
Enum.reduce(items, 0, fn %{id: id}, acc ->
|
||||
<<id_integer::unsigned-little-size(256)>> = id
|
||||
rem(acc + id_integer, @u256_mod)
|
||||
end)
|
||||
|
||||
payload = [<<sum::unsigned-little-size(256)>>, encode_varint(length(items))]
|
||||
|
||||
payload
|
||||
|> IO.iodata_to_binary()
|
||||
|> then(&:crypto.hash(:sha256, &1))
|
||||
|> binary_part(0, @fingerprint_size)
|
||||
end
|
||||
|
||||
@spec compare_items(item(), item()) :: :lt | :eq | :gt
|
||||
def compare_items(left, right) do
|
||||
cond do
|
||||
left.created_at < right.created_at -> :lt
|
||||
left.created_at > right.created_at -> :gt
|
||||
left.id < right.id -> :lt
|
||||
left.id > right.id -> :gt
|
||||
true -> :eq
|
||||
end
|
||||
end
|
||||
|
||||
@spec compare_bound(bound(), bound()) :: :lt | :eq | :gt
|
||||
def compare_bound(:infinity, :infinity), do: :eq
|
||||
def compare_bound(:infinity, _other), do: :gt
|
||||
def compare_bound(_other, :infinity), do: :lt
|
||||
|
||||
def compare_bound({left_timestamp, left_id}, {right_timestamp, right_id}) do
|
||||
cond do
|
||||
left_timestamp < right_timestamp -> :lt
|
||||
left_timestamp > right_timestamp -> :gt
|
||||
left_id < right_id -> :lt
|
||||
left_id > right_id -> :gt
|
||||
true -> :eq
|
||||
end
|
||||
end
|
||||
|
||||
@spec item_in_range?(item(), bound(), bound()) :: boolean()
|
||||
def item_in_range?(item, lower_bound, upper_bound) do
|
||||
compare_item_to_bound(item, lower_bound) != :lt and
|
||||
compare_item_to_bound(item, upper_bound) == :lt
|
||||
end
|
||||
|
||||
@spec initial_lower_bound() :: bound()
|
||||
def initial_lower_bound, do: {0, @zero_id}
|
||||
|
||||
@spec zero_id() :: binary()
|
||||
def zero_id, do: @zero_id
|
||||
|
||||
@spec split_bound(item(), item()) :: bound()
|
||||
def split_bound(previous_item, next_item)
|
||||
when is_map(previous_item) and is_map(next_item) do
|
||||
cond do
|
||||
previous_item.created_at < next_item.created_at ->
|
||||
{next_item.created_at, @zero_id}
|
||||
|
||||
previous_item.created_at == next_item.created_at ->
|
||||
prefix_length = shared_prefix_length(previous_item.id, next_item.id) + 1
|
||||
<<prefix::binary-size(prefix_length), _rest::binary>> = next_item.id
|
||||
{next_item.created_at, prefix <> :binary.copy(<<0>>, @id_size - prefix_length)}
|
||||
|
||||
true ->
|
||||
raise ArgumentError, "split_bound/2 requires previous_item <= next_item"
|
||||
end
|
||||
end
|
||||
|
||||
defp decode_ranges(<<>>, _previous_timestamp, _lower_bound, ranges),
|
||||
do: {:ok, Enum.reverse(ranges)}
|
||||
|
||||
defp decode_ranges(binary, previous_timestamp, lower_bound, ranges) do
|
||||
with {:ok, upper_bound, rest, next_timestamp} <- decode_bound(binary, previous_timestamp),
|
||||
:ok <- validate_upper_bound(lower_bound, upper_bound),
|
||||
{:ok, mode, payload, tail} <- decode_payload(rest) do
|
||||
next_ranges = [%{upper_bound: upper_bound, mode: mode, payload: payload} | ranges]
|
||||
|
||||
if upper_bound == :infinity and tail != <<>> do
|
||||
{:error, :invalid_message}
|
||||
else
|
||||
decode_ranges(tail, next_timestamp, upper_bound, next_ranges)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
defp validate_upper_bound(lower_bound, upper_bound) do
|
||||
if compare_bound(lower_bound, upper_bound) == :lt do
|
||||
:ok
|
||||
else
|
||||
{:error, :invalid_message}
|
||||
end
|
||||
end
|
||||
|
||||
defp decode_bound(binary, previous_timestamp) do
|
||||
with {:ok, encoded_timestamp, rest} <- decode_varint(binary),
|
||||
{:ok, length, tail} <- decode_varint(rest),
|
||||
:ok <- validate_bound_prefix_length(length),
|
||||
{:ok, prefix, remainder} <- decode_prefix(tail, length) do
|
||||
decode_bound_value(encoded_timestamp, length, prefix, remainder, previous_timestamp)
|
||||
end
|
||||
end
|
||||
|
||||
defp decode_payload(binary) do
|
||||
with {:ok, mode_value, rest} <- decode_varint(binary) do
|
||||
case mode_value do
|
||||
0 ->
|
||||
{:ok, :skip, nil, rest}
|
||||
|
||||
1 ->
|
||||
decode_fingerprint_payload(rest)
|
||||
|
||||
2 ->
|
||||
decode_id_list_payload(rest)
|
||||
|
||||
_other ->
|
||||
{:error, :invalid_message}
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
defp decode_varint(binary), do: decode_varint(binary, 0)
|
||||
|
||||
defp decode_varint(<<>>, _acc), do: {:error, :invalid_message}
|
||||
|
||||
defp decode_varint(<<byte, rest::binary>>, acc) do
|
||||
value = acc * 128 + band(byte, 0x7F)
|
||||
|
||||
if band(byte, 0x80) == 0 do
|
||||
{:ok, value, rest}
|
||||
else
|
||||
decode_varint(rest, value)
|
||||
end
|
||||
end
|
||||
|
||||
defp encode_range(range, previous_timestamp) do
|
||||
{encoded_bound, next_timestamp} = encode_bound(range.upper_bound, previous_timestamp)
|
||||
{mode, payload} = encode_payload(range)
|
||||
{[encoded_bound, mode, payload], next_timestamp}
|
||||
end
|
||||
|
||||
defp encode_bound(:infinity, previous_timestamp),
|
||||
do: {[encode_varint(0), encode_varint(0)], previous_timestamp}
|
||||
|
||||
defp encode_bound({timestamp, id}, previous_timestamp) do
|
||||
prefix_length = id_prefix_length(id)
|
||||
<<prefix::binary-size(prefix_length), _rest::binary>> = id
|
||||
|
||||
{
|
||||
[encode_varint(timestamp - previous_timestamp + 1), encode_varint(prefix_length), prefix],
|
||||
timestamp
|
||||
}
|
||||
end
|
||||
|
||||
defp encode_payload(%{mode: :skip}) do
|
||||
{encode_varint(0), <<>>}
|
||||
end
|
||||
|
||||
defp encode_payload(%{mode: :fingerprint, payload: fingerprint})
|
||||
when is_binary(fingerprint) and byte_size(fingerprint) == @fingerprint_size do
|
||||
{encode_varint(1), fingerprint}
|
||||
end
|
||||
|
||||
defp encode_payload(%{mode: :id_list, payload: ids}) when is_list(ids) do
|
||||
encoded_ids = Enum.map(ids, fn id -> validate_id!(id) end)
|
||||
{encode_varint(2), [encode_varint(length(encoded_ids)), encoded_ids]}
|
||||
end
|
||||
|
||||
defp encode_varint(value) when is_integer(value) and value >= 0 do
|
||||
digits = collect_base128_digits(value, [])
|
||||
last_index = length(digits) - 1
|
||||
|
||||
digits
|
||||
|> Enum.with_index()
|
||||
|> Enum.map(fn {digit, index} ->
|
||||
if index == last_index do
|
||||
digit
|
||||
else
|
||||
digit + 128
|
||||
end
|
||||
end)
|
||||
|> :erlang.list_to_binary()
|
||||
end
|
||||
|
||||
defp collect_base128_digits(value, acc) do
|
||||
quotient = div(value, 128)
|
||||
remainder = rem(value, 128)
|
||||
|
||||
if quotient == 0 do
|
||||
[remainder | acc]
|
||||
else
|
||||
collect_base128_digits(quotient, [remainder | acc])
|
||||
end
|
||||
end
|
||||
|
||||
defp unpack_ids(binary), do: unpack_ids(binary, [])
|
||||
|
||||
defp unpack_ids(<<>>, acc), do: Enum.reverse(acc)
|
||||
|
||||
defp unpack_ids(<<id::binary-size(@id_size), rest::binary>>, acc),
|
||||
do: unpack_ids(rest, [id | acc])
|
||||
|
||||
defp decode_prefix(binary, length) when byte_size(binary) >= length do
|
||||
<<prefix::binary-size(length), rest::binary>> = binary
|
||||
{:ok, prefix, rest}
|
||||
end
|
||||
|
||||
defp decode_prefix(_binary, _length), do: {:error, :invalid_message}
|
||||
|
||||
defp decode_bound_value(0, 0, _prefix, remainder, previous_timestamp),
|
||||
do: {:ok, :infinity, remainder, previous_timestamp}
|
||||
|
||||
defp decode_bound_value(0, _length, _prefix, _remainder, _previous_timestamp),
|
||||
do: {:error, :invalid_message}
|
||||
|
||||
defp decode_bound_value(encoded_timestamp, length, prefix, remainder, previous_timestamp) do
|
||||
timestamp = previous_timestamp + encoded_timestamp - 1
|
||||
id = prefix <> :binary.copy(<<0>>, @id_size - length)
|
||||
{:ok, {timestamp, id}, remainder, timestamp}
|
||||
end
|
||||
|
||||
defp decode_fingerprint_payload(<<fingerprint::binary-size(@fingerprint_size), tail::binary>>),
|
||||
do: {:ok, :fingerprint, fingerprint, tail}
|
||||
|
||||
defp decode_fingerprint_payload(_payload), do: {:error, :invalid_message}
|
||||
|
||||
defp decode_id_list_payload(rest) do
|
||||
with {:ok, count, tail} <- decode_varint(rest),
|
||||
{:ok, ids, remainder} <- decode_id_list_bytes(tail, count) do
|
||||
{:ok, :id_list, ids, remainder}
|
||||
end
|
||||
end
|
||||
|
||||
defp decode_id_list_bytes(tail, count) do
|
||||
expected_bytes = count * @id_size
|
||||
|
||||
if byte_size(tail) >= expected_bytes do
|
||||
<<ids::binary-size(expected_bytes), remainder::binary>> = tail
|
||||
{:ok, unpack_ids(ids), remainder}
|
||||
else
|
||||
{:error, :invalid_message}
|
||||
end
|
||||
end
|
||||
|
||||
defp validate_bound_prefix_length(length)
|
||||
when is_integer(length) and length >= 0 and length <= @id_size,
|
||||
do: :ok
|
||||
|
||||
defp validate_bound_prefix_length(_length), do: {:error, :invalid_message}
|
||||
|
||||
defp id_prefix_length(id) do
|
||||
id
|
||||
|> validate_id!()
|
||||
|> :binary.bin_to_list()
|
||||
|> Enum.reverse()
|
||||
|> Enum.drop_while(&(&1 == 0))
|
||||
|> length()
|
||||
end
|
||||
|
||||
defp shared_prefix_length(left_id, right_id) do
|
||||
left_id = validate_id!(left_id)
|
||||
right_id = validate_id!(right_id)
|
||||
|
||||
left_id
|
||||
|> :binary.bin_to_list()
|
||||
|> Enum.zip(:binary.bin_to_list(right_id))
|
||||
|> Enum.reduce_while(0, fn
|
||||
{left_byte, right_byte}, acc when left_byte == right_byte -> {:cont, acc + 1}
|
||||
_pair, acc -> {:halt, acc}
|
||||
end)
|
||||
end
|
||||
|
||||
defp drop_trailing_skip_ranges(ranges) do
|
||||
ranges
|
||||
|> Enum.reverse()
|
||||
|> Enum.drop_while(fn range -> range.mode == :skip end)
|
||||
|> Enum.reverse()
|
||||
end
|
||||
|
||||
defp compare_item_to_bound(_item, :infinity), do: :lt
|
||||
|
||||
defp compare_item_to_bound(item, {timestamp, id}) do
|
||||
cond do
|
||||
item.created_at < timestamp -> :lt
|
||||
item.created_at > timestamp -> :gt
|
||||
item.id < id -> :lt
|
||||
item.id > id -> :gt
|
||||
true -> :eq
|
||||
end
|
||||
end
|
||||
|
||||
defp validate_id!(id) when is_binary(id) and byte_size(id) == @id_size, do: id
|
||||
|
||||
defp validate_id!(_id) do
|
||||
raise ArgumentError, "negentropy ids must be 32-byte binaries"
|
||||
end
|
||||
end
|
||||
@@ -1,28 +1,43 @@
|
||||
defmodule Parrhesia.Negentropy.Sessions do
|
||||
@moduledoc """
|
||||
In-memory NEG-* session tracking.
|
||||
In-memory NIP-77 session tracking over bounded local event snapshots.
|
||||
"""
|
||||
|
||||
use GenServer
|
||||
|
||||
alias Parrhesia.Negentropy.Engine
|
||||
alias Parrhesia.Storage
|
||||
|
||||
@type session_key :: {pid(), String.t()}
|
||||
|
||||
@default_max_payload_bytes 4096
|
||||
@default_max_sessions_per_owner 8
|
||||
@default_max_total_sessions 10_000
|
||||
@default_max_idle_seconds 60
|
||||
@default_sweep_interval_seconds 10
|
||||
@default_max_items_per_session 50_000
|
||||
@default_id_list_threshold 32
|
||||
@sweep_idle_sessions :sweep_idle_sessions
|
||||
|
||||
@spec start_link(keyword()) :: GenServer.on_start()
|
||||
def start_link(opts \\ []) do
|
||||
name = Keyword.get(opts, :name, __MODULE__)
|
||||
GenServer.start_link(__MODULE__, :ok, name: name)
|
||||
GenServer.start_link(__MODULE__, opts, name: name)
|
||||
end
|
||||
|
||||
@spec open(GenServer.server(), pid(), String.t(), map()) :: {:ok, map()} | {:error, term()}
|
||||
def open(server \\ __MODULE__, owner_pid, subscription_id, params)
|
||||
when is_pid(owner_pid) and is_binary(subscription_id) and is_map(params) do
|
||||
GenServer.call(server, {:open, owner_pid, subscription_id, params})
|
||||
@spec open(GenServer.server(), pid(), String.t(), map(), binary(), keyword()) ::
|
||||
{:ok, binary()} | {:error, term()}
|
||||
def open(server \\ __MODULE__, owner_pid, subscription_id, filter, message, opts \\ [])
|
||||
when is_pid(owner_pid) and is_binary(subscription_id) and is_map(filter) and
|
||||
is_binary(message) and is_list(opts) do
|
||||
GenServer.call(server, {:open, owner_pid, subscription_id, filter, message, opts})
|
||||
end
|
||||
|
||||
@spec message(GenServer.server(), pid(), String.t(), map()) :: {:ok, map()} | {:error, term()}
|
||||
def message(server \\ __MODULE__, owner_pid, subscription_id, payload)
|
||||
when is_pid(owner_pid) and is_binary(subscription_id) and is_map(payload) do
|
||||
GenServer.call(server, {:message, owner_pid, subscription_id, payload})
|
||||
@spec message(GenServer.server(), pid(), String.t(), binary()) ::
|
||||
{:ok, binary()} | {:error, term()}
|
||||
def message(server \\ __MODULE__, owner_pid, subscription_id, message)
|
||||
when is_pid(owner_pid) and is_binary(subscription_id) and is_binary(message) do
|
||||
GenServer.call(server, {:message, owner_pid, subscription_id, message})
|
||||
end
|
||||
|
||||
@spec close(GenServer.server(), pid(), String.t()) :: :ok
|
||||
@@ -32,29 +47,79 @@ defmodule Parrhesia.Negentropy.Sessions do
|
||||
end
|
||||
|
||||
@impl true
|
||||
def init(:ok) do
|
||||
{:ok, %{sessions: %{}, monitors: %{}}}
|
||||
def init(opts) do
|
||||
max_idle_ms =
|
||||
normalize_positive_integer(Keyword.get(opts, :max_idle_seconds), max_idle_seconds()) * 1000
|
||||
|
||||
sweep_interval_ms =
|
||||
normalize_positive_integer(
|
||||
Keyword.get(opts, :sweep_interval_seconds),
|
||||
sweep_interval_seconds()
|
||||
) *
|
||||
1000
|
||||
|
||||
state = %{
|
||||
sessions: %{},
|
||||
monitors: %{},
|
||||
max_payload_bytes:
|
||||
normalize_positive_integer(Keyword.get(opts, :max_payload_bytes), max_payload_bytes()),
|
||||
max_sessions_per_owner:
|
||||
normalize_positive_integer(
|
||||
Keyword.get(opts, :max_sessions_per_owner),
|
||||
max_sessions_per_owner()
|
||||
),
|
||||
max_total_sessions:
|
||||
normalize_positive_integer(Keyword.get(opts, :max_total_sessions), max_total_sessions()),
|
||||
max_idle_ms: max_idle_ms,
|
||||
sweep_interval_ms: sweep_interval_ms,
|
||||
max_items_per_session:
|
||||
normalize_positive_integer(
|
||||
Keyword.get(opts, :max_items_per_session),
|
||||
max_items_per_session()
|
||||
),
|
||||
id_list_threshold:
|
||||
normalize_positive_integer(
|
||||
Keyword.get(opts, :id_list_threshold),
|
||||
id_list_threshold()
|
||||
)
|
||||
}
|
||||
|
||||
:ok = schedule_idle_sweep(sweep_interval_ms)
|
||||
|
||||
{:ok, state}
|
||||
end
|
||||
|
||||
@impl true
|
||||
def handle_call({:open, owner_pid, subscription_id, params}, _from, state) do
|
||||
def handle_call({:open, owner_pid, subscription_id, filter, message, opts}, _from, state) do
|
||||
key = {owner_pid, subscription_id}
|
||||
|
||||
session = %{
|
||||
cursor: 0,
|
||||
params: params,
|
||||
opened_at: System.system_time(:second)
|
||||
}
|
||||
with :ok <- validate_payload_size(filter, message, state.max_payload_bytes),
|
||||
:ok <- enforce_session_limits(state, owner_pid, key),
|
||||
{:ok, refs} <- fetch_event_refs(filter, opts, state.max_items_per_session),
|
||||
{:ok, response} <-
|
||||
Engine.answer(refs, message, id_list_threshold: state.id_list_threshold) do
|
||||
now_ms = System.monotonic_time(:millisecond)
|
||||
|
||||
state =
|
||||
state
|
||||
|> ensure_monitor(owner_pid)
|
||||
|> put_in([:sessions, key], session)
|
||||
session = %{
|
||||
filter: filter,
|
||||
refs: refs,
|
||||
opened_at: System.system_time(:second),
|
||||
last_active_at_ms: now_ms
|
||||
}
|
||||
|
||||
{:reply, {:ok, %{"status" => "open", "cursor" => 0}}, state}
|
||||
state =
|
||||
state
|
||||
|> ensure_monitor(owner_pid)
|
||||
|> put_in([:sessions, key], session)
|
||||
|
||||
{:reply, {:ok, response}, state}
|
||||
else
|
||||
{:error, reason} ->
|
||||
{:reply, {:error, reason}, state}
|
||||
end
|
||||
end
|
||||
|
||||
def handle_call({:message, owner_pid, subscription_id, payload}, _from, state) do
|
||||
def handle_call({:message, owner_pid, subscription_id, message}, _from, state) do
|
||||
key = {owner_pid, subscription_id}
|
||||
|
||||
case Map.get(state.sessions, key) do
|
||||
@@ -62,22 +127,66 @@ defmodule Parrhesia.Negentropy.Sessions do
|
||||
{:reply, {:error, :unknown_session}, state}
|
||||
|
||||
session ->
|
||||
cursor = session.cursor + 1
|
||||
with :ok <- validate_payload_size(session.filter, message, state.max_payload_bytes),
|
||||
{:ok, response} <-
|
||||
Engine.answer(session.refs, message, id_list_threshold: state.id_list_threshold) do
|
||||
next_session = %{
|
||||
session
|
||||
| last_active_at_ms: System.monotonic_time(:millisecond)
|
||||
}
|
||||
|
||||
next_session = %{session | cursor: cursor, params: Map.merge(session.params, payload)}
|
||||
state = put_in(state, [:sessions, key], next_session)
|
||||
state = put_in(state, [:sessions, key], next_session)
|
||||
|
||||
{:reply, {:ok, %{"status" => "ack", "cursor" => cursor}}, state}
|
||||
{:reply, {:ok, response}, state}
|
||||
else
|
||||
{:error, reason} ->
|
||||
{:reply, {:error, reason}, state}
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
def handle_call({:close, owner_pid, subscription_id}, _from, state) do
|
||||
key = {owner_pid, subscription_id}
|
||||
state = update_in(state.sessions, &Map.delete(&1, key))
|
||||
|
||||
state =
|
||||
state
|
||||
|> update_in([:sessions], &Map.delete(&1, key))
|
||||
|> maybe_remove_monitor_if_owner_has_no_sessions(owner_pid)
|
||||
|
||||
{:reply, :ok, state}
|
||||
end
|
||||
|
||||
@impl true
|
||||
def handle_info(@sweep_idle_sessions, state) do
|
||||
now_ms = System.monotonic_time(:millisecond)
|
||||
|
||||
sessions =
|
||||
Enum.reduce(state.sessions, %{}, fn {key, session}, acc ->
|
||||
idle_ms = now_ms - Map.get(session, :last_active_at_ms, now_ms)
|
||||
|
||||
if idle_ms >= state.max_idle_ms do
|
||||
acc
|
||||
else
|
||||
Map.put(acc, key, session)
|
||||
end
|
||||
end)
|
||||
|
||||
owner_pids =
|
||||
sessions
|
||||
|> Map.keys()
|
||||
|> Enum.map(fn {owner_pid, _subscription_id} -> owner_pid end)
|
||||
|> MapSet.new()
|
||||
|
||||
state =
|
||||
state
|
||||
|> Map.put(:sessions, sessions)
|
||||
|> clear_monitors_without_sessions(owner_pids)
|
||||
|
||||
:ok = schedule_idle_sweep(state.sweep_interval_ms)
|
||||
|
||||
{:noreply, state}
|
||||
end
|
||||
|
||||
def handle_info({:DOWN, monitor_ref, :process, owner_pid, _reason}, state) do
|
||||
case Map.get(state.monitors, owner_pid) do
|
||||
^monitor_ref ->
|
||||
@@ -95,6 +204,31 @@ defmodule Parrhesia.Negentropy.Sessions do
|
||||
|
||||
def handle_info(_message, state), do: {:noreply, state}
|
||||
|
||||
defp fetch_event_refs(filter, opts, max_items_per_session) do
|
||||
query_opts =
|
||||
opts
|
||||
|> Keyword.take([:now, :requester_pubkeys])
|
||||
|> Keyword.put(:limit, max_items_per_session + 1)
|
||||
|
||||
with {:ok, refs} <- Storage.events().query_event_refs(%{}, [filter], query_opts) do
|
||||
if length(refs) > max_items_per_session do
|
||||
{:error, :query_too_big}
|
||||
else
|
||||
{:ok, refs}
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
defp clear_monitors_without_sessions(state, owner_pids) do
|
||||
Enum.reduce(Map.keys(state.monitors), state, fn owner_pid, acc ->
|
||||
if MapSet.member?(owner_pids, owner_pid) do
|
||||
acc
|
||||
else
|
||||
maybe_remove_monitor(acc, owner_pid)
|
||||
end
|
||||
end)
|
||||
end
|
||||
|
||||
defp remove_owner_sessions(state, owner_pid) do
|
||||
update_in(state.sessions, fn sessions ->
|
||||
sessions
|
||||
@@ -103,6 +237,39 @@ defmodule Parrhesia.Negentropy.Sessions do
|
||||
end)
|
||||
end
|
||||
|
||||
defp validate_payload_size(filter, message, max_payload_bytes) do
|
||||
if :erlang.external_size({filter, message}) <= max_payload_bytes do
|
||||
:ok
|
||||
else
|
||||
{:error, :payload_too_large}
|
||||
end
|
||||
end
|
||||
|
||||
defp enforce_session_limits(state, owner_pid, key) do
|
||||
if Map.has_key?(state.sessions, key) do
|
||||
:ok
|
||||
else
|
||||
total_sessions = map_size(state.sessions)
|
||||
|
||||
cond do
|
||||
total_sessions >= state.max_total_sessions ->
|
||||
{:error, :session_limit_reached}
|
||||
|
||||
owner_session_count(state.sessions, owner_pid) >= state.max_sessions_per_owner ->
|
||||
{:error, :owner_session_limit_reached}
|
||||
|
||||
true ->
|
||||
:ok
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
defp owner_session_count(sessions, owner_pid) do
|
||||
Enum.count(sessions, fn {{session_owner, _subscription_id}, _session} ->
|
||||
session_owner == owner_pid
|
||||
end)
|
||||
end
|
||||
|
||||
defp ensure_monitor(state, owner_pid) do
|
||||
case Map.has_key?(state.monitors, owner_pid) do
|
||||
true -> state
|
||||
@@ -110,6 +277,14 @@ defmodule Parrhesia.Negentropy.Sessions do
|
||||
end
|
||||
end
|
||||
|
||||
defp maybe_remove_monitor_if_owner_has_no_sessions(state, owner_pid) do
|
||||
if owner_session_count(state.sessions, owner_pid) == 0 do
|
||||
maybe_remove_monitor(state, owner_pid)
|
||||
else
|
||||
state
|
||||
end
|
||||
end
|
||||
|
||||
defp maybe_remove_monitor(state, owner_pid) do
|
||||
{monitor_ref, monitors} = Map.pop(state.monitors, owner_pid)
|
||||
|
||||
@@ -119,4 +294,56 @@ defmodule Parrhesia.Negentropy.Sessions do
|
||||
|
||||
Map.put(state, :monitors, monitors)
|
||||
end
|
||||
|
||||
defp schedule_idle_sweep(sweep_interval_ms) do
|
||||
_timer_ref = Process.send_after(self(), @sweep_idle_sessions, sweep_interval_ms)
|
||||
:ok
|
||||
end
|
||||
|
||||
defp max_payload_bytes do
|
||||
:parrhesia
|
||||
|> Application.get_env(:limits, [])
|
||||
|> Keyword.get(:max_negentropy_payload_bytes, @default_max_payload_bytes)
|
||||
end
|
||||
|
||||
defp max_sessions_per_owner do
|
||||
:parrhesia
|
||||
|> Application.get_env(:limits, [])
|
||||
|> Keyword.get(:max_negentropy_sessions_per_connection, @default_max_sessions_per_owner)
|
||||
end
|
||||
|
||||
defp max_total_sessions do
|
||||
:parrhesia
|
||||
|> Application.get_env(:limits, [])
|
||||
|> Keyword.get(:max_negentropy_total_sessions, @default_max_total_sessions)
|
||||
end
|
||||
|
||||
defp max_idle_seconds do
|
||||
:parrhesia
|
||||
|> Application.get_env(:limits, [])
|
||||
|> Keyword.get(:negentropy_session_idle_timeout_seconds, @default_max_idle_seconds)
|
||||
end
|
||||
|
||||
defp sweep_interval_seconds do
|
||||
:parrhesia
|
||||
|> Application.get_env(:limits, [])
|
||||
|> Keyword.get(:negentropy_session_sweep_interval_seconds, @default_sweep_interval_seconds)
|
||||
end
|
||||
|
||||
defp max_items_per_session do
|
||||
:parrhesia
|
||||
|> Application.get_env(:limits, [])
|
||||
|> Keyword.get(:max_negentropy_items_per_session, @default_max_items_per_session)
|
||||
end
|
||||
|
||||
defp id_list_threshold do
|
||||
:parrhesia
|
||||
|> Application.get_env(:limits, [])
|
||||
|> Keyword.get(:negentropy_id_list_threshold, @default_id_list_threshold)
|
||||
end
|
||||
|
||||
defp normalize_positive_integer(value, _default) when is_integer(value) and value > 0,
|
||||
do: value
|
||||
|
||||
defp normalize_positive_integer(_value, default), do: default
|
||||
end
|
||||
|
||||
68
lib/parrhesia/policy/connection_policy.ex
Normal file
68
lib/parrhesia/policy/connection_policy.ex
Normal file
@@ -0,0 +1,68 @@
|
||||
defmodule Parrhesia.Policy.ConnectionPolicy do
|
||||
@moduledoc """
|
||||
Connection/session-level policy checks shared by websocket and management entrypoints.
|
||||
"""
|
||||
|
||||
alias Parrhesia.Storage
|
||||
|
||||
@spec authorize_remote_ip(tuple() | String.t() | nil) :: :ok | {:error, :ip_blocked}
|
||||
def authorize_remote_ip(remote_ip) do
|
||||
case normalize_ip(remote_ip) do
|
||||
nil ->
|
||||
:ok
|
||||
|
||||
normalized_ip ->
|
||||
case Storage.moderation().ip_blocked?(%{}, normalized_ip) do
|
||||
{:ok, true} -> {:error, :ip_blocked}
|
||||
_other -> :ok
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
@spec authorize_authenticated_pubkey(String.t()) :: :ok | {:error, :pubkey_not_allowed}
|
||||
def authorize_authenticated_pubkey(pubkey) when is_binary(pubkey) do
|
||||
if allowlist_active?() do
|
||||
case Storage.moderation().pubkey_allowed?(%{}, pubkey) do
|
||||
{:ok, true} -> :ok
|
||||
_other -> {:error, :pubkey_not_allowed}
|
||||
end
|
||||
else
|
||||
:ok
|
||||
end
|
||||
end
|
||||
|
||||
@spec authorize_authenticated_pubkeys(MapSet.t(String.t())) ::
|
||||
:ok | {:error, :auth_required | :pubkey_not_allowed}
|
||||
def authorize_authenticated_pubkeys(authenticated_pubkeys) do
|
||||
if allowlist_active?() do
|
||||
cond do
|
||||
MapSet.size(authenticated_pubkeys) == 0 ->
|
||||
{:error, :auth_required}
|
||||
|
||||
Enum.any?(authenticated_pubkeys, &(authorize_authenticated_pubkey(&1) == :ok)) ->
|
||||
:ok
|
||||
|
||||
true ->
|
||||
{:error, :pubkey_not_allowed}
|
||||
end
|
||||
else
|
||||
:ok
|
||||
end
|
||||
end
|
||||
|
||||
defp allowlist_active? do
|
||||
case Storage.moderation().has_allowed_pubkeys?(%{}) do
|
||||
{:ok, true} -> true
|
||||
_other -> false
|
||||
end
|
||||
end
|
||||
|
||||
defp normalize_ip(nil), do: nil
|
||||
defp normalize_ip({_, _, _, _} = remote_ip), do: :inet.ntoa(remote_ip) |> to_string()
|
||||
|
||||
defp normalize_ip({_, _, _, _, _, _, _, _} = remote_ip),
|
||||
do: :inet.ntoa(remote_ip) |> to_string()
|
||||
|
||||
defp normalize_ip(remote_ip) when is_binary(remote_ip), do: remote_ip
|
||||
defp normalize_ip(_remote_ip), do: nil
|
||||
end
|
||||
@@ -3,11 +3,17 @@ defmodule Parrhesia.Policy.EventPolicy do
|
||||
Write/read policy checks for relay operations.
|
||||
"""
|
||||
|
||||
alias Parrhesia.API.ACL
|
||||
alias Parrhesia.API.RequestContext
|
||||
alias Parrhesia.Policy.ConnectionPolicy
|
||||
alias Parrhesia.Storage
|
||||
|
||||
@type policy_error ::
|
||||
:auth_required
|
||||
| :pubkey_not_allowed
|
||||
| :restricted_giftwrap
|
||||
| :sync_read_not_allowed
|
||||
| :sync_write_not_allowed
|
||||
| :marmot_group_h_tag_required
|
||||
| :marmot_group_h_values_exceeded
|
||||
| :marmot_group_filter_window_too_wide
|
||||
@@ -33,15 +39,31 @@ defmodule Parrhesia.Policy.EventPolicy do
|
||||
|
||||
@spec authorize_read([map()], MapSet.t(String.t())) :: :ok | {:error, policy_error()}
|
||||
def authorize_read(filters, authenticated_pubkeys) when is_list(filters) do
|
||||
authorize_read(filters, authenticated_pubkeys, request_context(authenticated_pubkeys))
|
||||
end
|
||||
|
||||
@spec authorize_read([map()], MapSet.t(String.t()), RequestContext.t()) ::
|
||||
:ok | {:error, policy_error()}
|
||||
def authorize_read(filters, authenticated_pubkeys, %RequestContext{} = context)
|
||||
when is_list(filters) do
|
||||
auth_required? = config_bool([:policies, :auth_required_for_reads], false)
|
||||
|
||||
cond do
|
||||
match?(
|
||||
{:error, _reason},
|
||||
ConnectionPolicy.authorize_authenticated_pubkeys(authenticated_pubkeys)
|
||||
) ->
|
||||
ConnectionPolicy.authorize_authenticated_pubkeys(authenticated_pubkeys)
|
||||
|
||||
auth_required? and MapSet.size(authenticated_pubkeys) == 0 ->
|
||||
{:error, :auth_required}
|
||||
|
||||
giftwrap_restricted?(filters, authenticated_pubkeys) ->
|
||||
{:error, :restricted_giftwrap}
|
||||
|
||||
match?({:error, _reason}, authorize_sync_reads(filters, context)) ->
|
||||
authorize_sync_reads(filters, context)
|
||||
|
||||
true ->
|
||||
enforce_marmot_group_read_guardrails(filters)
|
||||
end
|
||||
@@ -49,8 +71,17 @@ defmodule Parrhesia.Policy.EventPolicy do
|
||||
|
||||
@spec authorize_write(map(), MapSet.t(String.t())) :: :ok | {:error, policy_error()}
|
||||
def authorize_write(event, authenticated_pubkeys) when is_map(event) do
|
||||
authorize_write(event, authenticated_pubkeys, request_context(authenticated_pubkeys))
|
||||
end
|
||||
|
||||
@spec authorize_write(map(), MapSet.t(String.t()), RequestContext.t()) ::
|
||||
:ok | {:error, policy_error()}
|
||||
def authorize_write(event, authenticated_pubkeys, %RequestContext{} = context)
|
||||
when is_map(event) do
|
||||
checks = [
|
||||
fn -> ConnectionPolicy.authorize_authenticated_pubkeys(authenticated_pubkeys) end,
|
||||
fn -> maybe_require_auth_for_write(authenticated_pubkeys) end,
|
||||
fn -> authorize_sync_write(event, context) end,
|
||||
fn -> reject_if_pubkey_banned(event) end,
|
||||
fn -> reject_if_event_banned(event) end,
|
||||
fn -> enforce_pow(event) end,
|
||||
@@ -69,10 +100,17 @@ defmodule Parrhesia.Policy.EventPolicy do
|
||||
|
||||
@spec error_message(policy_error()) :: String.t()
|
||||
def error_message(:auth_required), do: "auth-required: authentication required"
|
||||
def error_message(:pubkey_not_allowed), do: "restricted: authenticated pubkey is not allowed"
|
||||
|
||||
def error_message(:restricted_giftwrap),
|
||||
do: "restricted: giftwrap access requires recipient authentication"
|
||||
|
||||
def error_message(:sync_read_not_allowed),
|
||||
do: "restricted: sync read not allowed for authenticated pubkey"
|
||||
|
||||
def error_message(:sync_write_not_allowed),
|
||||
do: "restricted: sync write not allowed for authenticated pubkey"
|
||||
|
||||
def error_message(:marmot_group_h_tag_required),
|
||||
do: "restricted: kind 445 queries must include a #h tag"
|
||||
|
||||
@@ -143,6 +181,19 @@ defmodule Parrhesia.Policy.EventPolicy do
|
||||
end
|
||||
end
|
||||
|
||||
defp authorize_sync_reads(filters, %RequestContext{} = context) do
|
||||
Enum.reduce_while(filters, :ok, fn filter, :ok ->
|
||||
case ACL.check(:sync_read, filter, context: context) do
|
||||
:ok -> {:cont, :ok}
|
||||
{:error, reason} -> {:halt, {:error, reason}}
|
||||
end
|
||||
end)
|
||||
end
|
||||
|
||||
defp authorize_sync_write(event, %RequestContext{} = context) do
|
||||
ACL.check(:sync_write, event, context: context)
|
||||
end
|
||||
|
||||
defp giftwrap_restricted?(filters, authenticated_pubkeys) do
|
||||
if MapSet.size(authenticated_pubkeys) == 0 do
|
||||
any_filter_targets_giftwrap?(filters)
|
||||
@@ -672,4 +723,8 @@ defmodule Parrhesia.Policy.EventPolicy do
|
||||
default
|
||||
end
|
||||
end
|
||||
|
||||
defp request_context(authenticated_pubkeys) do
|
||||
%RequestContext{authenticated_pubkeys: authenticated_pubkeys}
|
||||
end
|
||||
end
|
||||
|
||||
1
lib/parrhesia/postgres_types.ex
Normal file
1
lib/parrhesia/postgres_types.ex
Normal file
@@ -0,0 +1 @@
|
||||
Postgrex.Types.define(Parrhesia.PostgresTypes, [], json: JSON)
|
||||
@@ -14,8 +14,8 @@ defmodule Parrhesia.Protocol do
|
||||
| {:close, String.t()}
|
||||
| {:auth, event()}
|
||||
| {:count, String.t(), [filter()], map()}
|
||||
| {:neg_open, String.t(), map()}
|
||||
| {:neg_msg, String.t(), map()}
|
||||
| {:neg_open, String.t(), filter(), binary()}
|
||||
| {:neg_msg, String.t(), binary()}
|
||||
| {:neg_close, String.t()}
|
||||
|
||||
@type relay_message ::
|
||||
@@ -26,7 +26,8 @@ defmodule Parrhesia.Protocol do
|
||||
| {:event, String.t(), event()}
|
||||
| {:auth, String.t()}
|
||||
| {:count, String.t(), map()}
|
||||
| {:neg_msg, String.t(), map()}
|
||||
| {:neg_msg, String.t(), String.t()}
|
||||
| {:neg_err, String.t(), String.t()}
|
||||
|
||||
@type decode_error ::
|
||||
:invalid_json
|
||||
@@ -122,21 +123,25 @@ defmodule Parrhesia.Protocol do
|
||||
|
||||
defp decode_message(["AUTH", _invalid]), do: {:error, :invalid_auth}
|
||||
|
||||
defp decode_message(["NEG-OPEN", subscription_id, payload])
|
||||
when is_binary(subscription_id) and is_map(payload) do
|
||||
if valid_subscription_id?(subscription_id) do
|
||||
{:ok, {:neg_open, subscription_id, payload}}
|
||||
defp decode_message(["NEG-OPEN", subscription_id, filter, initial_message])
|
||||
when is_binary(subscription_id) and is_map(filter) and is_binary(initial_message) do
|
||||
with true <- valid_subscription_id?(subscription_id),
|
||||
{:ok, decoded_message} <- decode_negentropy_hex(initial_message) do
|
||||
{:ok, {:neg_open, subscription_id, filter, decoded_message}}
|
||||
else
|
||||
{:error, :invalid_subscription_id}
|
||||
false -> {:error, :invalid_subscription_id}
|
||||
{:error, _reason} -> {:error, :invalid_negentropy}
|
||||
end
|
||||
end
|
||||
|
||||
defp decode_message(["NEG-MSG", subscription_id, payload])
|
||||
when is_binary(subscription_id) and is_map(payload) do
|
||||
if valid_subscription_id?(subscription_id) do
|
||||
{:ok, {:neg_msg, subscription_id, payload}}
|
||||
when is_binary(subscription_id) and is_binary(payload) do
|
||||
with true <- valid_subscription_id?(subscription_id),
|
||||
{:ok, decoded_payload} <- decode_negentropy_hex(payload) do
|
||||
{:ok, {:neg_msg, subscription_id, decoded_payload}}
|
||||
else
|
||||
{:error, :invalid_subscription_id}
|
||||
false -> {:error, :invalid_subscription_id}
|
||||
{:error, _reason} -> {:error, :invalid_negentropy}
|
||||
end
|
||||
end
|
||||
|
||||
@@ -215,7 +220,19 @@ defmodule Parrhesia.Protocol do
|
||||
defp relay_frame({:neg_msg, subscription_id, payload}),
|
||||
do: ["NEG-MSG", subscription_id, payload]
|
||||
|
||||
defp relay_frame({:neg_err, subscription_id, reason}),
|
||||
do: ["NEG-ERR", subscription_id, reason]
|
||||
|
||||
defp valid_subscription_id?(subscription_id) do
|
||||
subscription_id != "" and String.length(subscription_id) <= 64
|
||||
end
|
||||
|
||||
defp decode_negentropy_hex(payload) when is_binary(payload) and payload != "" do
|
||||
case Base.decode16(payload, case: :mixed) do
|
||||
{:ok, decoded} when decoded != <<>> -> {:ok, decoded}
|
||||
_other -> {:error, :invalid_negentropy}
|
||||
end
|
||||
end
|
||||
|
||||
defp decode_negentropy_hex(_payload), do: {:error, :invalid_negentropy}
|
||||
end
|
||||
|
||||
@@ -21,6 +21,7 @@ defmodule Parrhesia.Protocol.EventValidator do
|
||||
| :invalid_content
|
||||
| :invalid_sig
|
||||
| :invalid_id_hash
|
||||
| :invalid_signature
|
||||
| :invalid_marmot_keypackage_content
|
||||
| :missing_marmot_encoding_tag
|
||||
| :invalid_marmot_encoding_tag
|
||||
@@ -54,7 +55,8 @@ defmodule Parrhesia.Protocol.EventValidator do
|
||||
:ok <- validate_tags(event["tags"]),
|
||||
:ok <- validate_content(event["content"]),
|
||||
:ok <- validate_sig(event["sig"]),
|
||||
:ok <- validate_id_hash(event) do
|
||||
:ok <- validate_id_hash(event),
|
||||
:ok <- validate_signature(event) do
|
||||
validate_kind_specific(event)
|
||||
end
|
||||
end
|
||||
@@ -89,6 +91,7 @@ defmodule Parrhesia.Protocol.EventValidator do
|
||||
invalid_content: "invalid: content must be a string",
|
||||
invalid_sig: "invalid: sig must be 64-byte lowercase hex",
|
||||
invalid_id_hash: "invalid: event id does not match serialized event",
|
||||
invalid_signature: "invalid: event signature is invalid",
|
||||
invalid_marmot_keypackage_content: "invalid: kind 443 content must be non-empty base64",
|
||||
missing_marmot_encoding_tag: "invalid: kind 443 must include [\"encoding\", \"base64\"]",
|
||||
invalid_marmot_encoding_tag: "invalid: kind 443 must include [\"encoding\", \"base64\"]",
|
||||
@@ -193,6 +196,29 @@ defmodule Parrhesia.Protocol.EventValidator do
|
||||
end
|
||||
end
|
||||
|
||||
defp validate_signature(event) do
|
||||
if verify_event_signatures?() do
|
||||
verify_signature(event)
|
||||
else
|
||||
:ok
|
||||
end
|
||||
end
|
||||
|
||||
defp verify_signature(%{"id" => id, "pubkey" => pubkey, "sig" => sig}) do
|
||||
with {:ok, id_bin} <- Base.decode16(id, case: :lower),
|
||||
{:ok, pubkey_bin} <- Base.decode16(pubkey, case: :lower),
|
||||
{:ok, sig_bin} <- Base.decode16(sig, case: :lower),
|
||||
true <- Secp256k1.schnorr_valid?(sig_bin, id_bin, pubkey_bin) do
|
||||
:ok
|
||||
else
|
||||
_other -> {:error, :invalid_signature}
|
||||
end
|
||||
rescue
|
||||
_error -> {:error, :invalid_signature}
|
||||
end
|
||||
|
||||
defp verify_signature(_event), do: {:error, :invalid_signature}
|
||||
|
||||
defp valid_tag?(tag) when is_list(tag) do
|
||||
tag != [] and Enum.all?(tag, &is_binary/1)
|
||||
end
|
||||
@@ -473,6 +499,12 @@ defmodule Parrhesia.Protocol.EventValidator do
|
||||
match?({:ok, _decoded}, Base.decode16(value, case: :lower))
|
||||
end
|
||||
|
||||
defp verify_event_signatures? do
|
||||
:parrhesia
|
||||
|> Application.get_env(:features, [])
|
||||
|> Keyword.get(:verify_event_signatures, true)
|
||||
end
|
||||
|
||||
defp max_event_future_skew_seconds do
|
||||
:parrhesia
|
||||
|> Application.get_env(:limits, [])
|
||||
|
||||
35
lib/parrhesia/release.ex
Normal file
35
lib/parrhesia/release.ex
Normal file
@@ -0,0 +1,35 @@
|
||||
defmodule Parrhesia.Release do
|
||||
@moduledoc """
|
||||
Helpers for running Ecto tasks from a production release.
|
||||
"""
|
||||
|
||||
@app :parrhesia
|
||||
|
||||
def migrate do
|
||||
load_app()
|
||||
|
||||
for repo <- repos() do
|
||||
{:ok, _, _} =
|
||||
Ecto.Migrator.with_repo(repo, fn repo ->
|
||||
Ecto.Migrator.run(repo, :up, all: true)
|
||||
end)
|
||||
end
|
||||
end
|
||||
|
||||
def rollback(repo, version) when is_atom(repo) and is_integer(version) do
|
||||
load_app()
|
||||
|
||||
{:ok, _, _} =
|
||||
Ecto.Migrator.with_repo(repo, fn repo ->
|
||||
Ecto.Migrator.run(repo, :down, to: version)
|
||||
end)
|
||||
end
|
||||
|
||||
defp load_app do
|
||||
Application.load(@app)
|
||||
end
|
||||
|
||||
defp repos do
|
||||
Application.fetch_env!(@app, :ecto_repos)
|
||||
end
|
||||
end
|
||||
@@ -8,6 +8,7 @@ defmodule Parrhesia.Storage do
|
||||
|
||||
@default_modules [
|
||||
events: Parrhesia.Storage.Adapters.Postgres.Events,
|
||||
acl: Parrhesia.Storage.Adapters.Postgres.ACL,
|
||||
moderation: Parrhesia.Storage.Adapters.Postgres.Moderation,
|
||||
groups: Parrhesia.Storage.Adapters.Postgres.Groups,
|
||||
admin: Parrhesia.Storage.Adapters.Postgres.Admin
|
||||
@@ -19,6 +20,9 @@ defmodule Parrhesia.Storage do
|
||||
@spec moderation() :: module()
|
||||
def moderation, do: fetch_module!(:moderation, Parrhesia.Storage.Moderation)
|
||||
|
||||
@spec acl() :: module()
|
||||
def acl, do: fetch_module!(:acl, Parrhesia.Storage.ACL)
|
||||
|
||||
@spec groups() :: module()
|
||||
def groups, do: fetch_module!(:groups, Parrhesia.Storage.Groups)
|
||||
|
||||
|
||||
14
lib/parrhesia/storage/acl.ex
Normal file
14
lib/parrhesia/storage/acl.ex
Normal file
@@ -0,0 +1,14 @@
|
||||
defmodule Parrhesia.Storage.ACL do
|
||||
@moduledoc """
|
||||
Storage callbacks for persisted ACL rules.
|
||||
"""
|
||||
|
||||
@type context :: map()
|
||||
@type rule :: map()
|
||||
@type opts :: keyword()
|
||||
@type reason :: term()
|
||||
|
||||
@callback put_rule(context(), rule()) :: {:ok, rule()} | {:error, reason()}
|
||||
@callback delete_rule(context(), map()) :: :ok | {:error, reason()}
|
||||
@callback list_rules(context(), opts()) :: {:ok, [rule()]} | {:error, reason()}
|
||||
end
|
||||
157
lib/parrhesia/storage/adapters/memory/acl.ex
Normal file
157
lib/parrhesia/storage/adapters/memory/acl.ex
Normal file
@@ -0,0 +1,157 @@
|
||||
defmodule Parrhesia.Storage.Adapters.Memory.ACL do
|
||||
@moduledoc """
|
||||
In-memory prototype adapter for `Parrhesia.Storage.ACL`.
|
||||
"""
|
||||
|
||||
alias Parrhesia.Storage.Adapters.Memory.Store
|
||||
|
||||
@behaviour Parrhesia.Storage.ACL
|
||||
|
||||
@impl true
|
||||
def put_rule(_context, rule) when is_map(rule) do
|
||||
with {:ok, normalized_rule} <- normalize_rule(rule) do
|
||||
Store.get_and_update(fn state -> put_rule_in_state(state, normalized_rule) end)
|
||||
end
|
||||
end
|
||||
|
||||
def put_rule(_context, _rule), do: {:error, :invalid_acl_rule}
|
||||
|
||||
@impl true
|
||||
def delete_rule(_context, selector) when is_map(selector) do
|
||||
case normalize_delete_selector(selector) do
|
||||
{:ok, {:id, id}} ->
|
||||
Store.update(fn state ->
|
||||
%{state | acl_rules: Enum.reject(state.acl_rules, &(&1.id == id))}
|
||||
end)
|
||||
|
||||
:ok
|
||||
|
||||
{:ok, {:exact, rule}} ->
|
||||
Store.update(fn state ->
|
||||
%{state | acl_rules: Enum.reject(state.acl_rules, &same_rule?(&1, rule))}
|
||||
end)
|
||||
|
||||
:ok
|
||||
|
||||
{:error, reason} ->
|
||||
{:error, reason}
|
||||
end
|
||||
end
|
||||
|
||||
def delete_rule(_context, _selector), do: {:error, :invalid_acl_rule}
|
||||
|
||||
@impl true
|
||||
def list_rules(_context, opts) when is_list(opts) do
|
||||
rules =
|
||||
Store.get(fn state -> Enum.reverse(state.acl_rules) end)
|
||||
|> Enum.filter(fn rule ->
|
||||
matches_principal_type?(rule, Keyword.get(opts, :principal_type)) and
|
||||
matches_principal?(rule, Keyword.get(opts, :principal)) and
|
||||
matches_capability?(rule, Keyword.get(opts, :capability))
|
||||
end)
|
||||
|
||||
{:ok, rules}
|
||||
end
|
||||
|
||||
def list_rules(_context, _opts), do: {:error, :invalid_opts}
|
||||
|
||||
defp put_rule_in_state(state, normalized_rule) do
|
||||
case Enum.find(state.acl_rules, &same_rule?(&1, normalized_rule)) do
|
||||
nil ->
|
||||
next_id = state.next_acl_rule_id
|
||||
persisted_rule = Map.put(normalized_rule, :id, next_id)
|
||||
|
||||
{{:ok, persisted_rule},
|
||||
%{
|
||||
state
|
||||
| acl_rules: [persisted_rule | state.acl_rules],
|
||||
next_acl_rule_id: next_id + 1
|
||||
}}
|
||||
|
||||
existing_rule ->
|
||||
{{:ok, existing_rule}, state}
|
||||
end
|
||||
end
|
||||
|
||||
defp matches_principal_type?(_rule, nil), do: true
|
||||
defp matches_principal_type?(rule, principal_type), do: rule.principal_type == principal_type
|
||||
|
||||
defp matches_principal?(_rule, nil), do: true
|
||||
defp matches_principal?(rule, principal), do: rule.principal == principal
|
||||
|
||||
defp matches_capability?(_rule, nil), do: true
|
||||
defp matches_capability?(rule, capability), do: rule.capability == capability
|
||||
|
||||
defp same_rule?(left, right) do
|
||||
left.principal_type == right.principal_type and
|
||||
left.principal == right.principal and
|
||||
left.capability == right.capability and
|
||||
left.match == right.match
|
||||
end
|
||||
|
||||
defp normalize_delete_selector(%{"id" => id}), do: normalize_delete_selector(%{id: id})
|
||||
|
||||
defp normalize_delete_selector(%{id: id}) when is_integer(id) and id > 0,
|
||||
do: {:ok, {:id, id}}
|
||||
|
||||
defp normalize_delete_selector(selector) do
|
||||
case normalize_rule(selector) do
|
||||
{:ok, rule} -> {:ok, {:exact, rule}}
|
||||
{:error, reason} -> {:error, reason}
|
||||
end
|
||||
end
|
||||
|
||||
defp normalize_rule(rule) when is_map(rule) do
|
||||
with {:ok, principal_type} <- normalize_principal_type(fetch(rule, :principal_type)),
|
||||
{:ok, principal} <- normalize_principal(fetch(rule, :principal)),
|
||||
{:ok, capability} <- normalize_capability(fetch(rule, :capability)),
|
||||
{:ok, match} <- normalize_match(fetch(rule, :match)) do
|
||||
{:ok,
|
||||
%{
|
||||
principal_type: principal_type,
|
||||
principal: principal,
|
||||
capability: capability,
|
||||
match: match
|
||||
}}
|
||||
end
|
||||
end
|
||||
|
||||
defp normalize_rule(_rule), do: {:error, :invalid_acl_rule}
|
||||
|
||||
defp normalize_principal_type(:pubkey), do: {:ok, :pubkey}
|
||||
defp normalize_principal_type("pubkey"), do: {:ok, :pubkey}
|
||||
defp normalize_principal_type(_value), do: {:error, :invalid_acl_principal_type}
|
||||
|
||||
defp normalize_principal(value) when is_binary(value) and byte_size(value) == 64,
|
||||
do: {:ok, String.downcase(value)}
|
||||
|
||||
defp normalize_principal(_value), do: {:error, :invalid_acl_principal}
|
||||
|
||||
defp normalize_capability(:sync_read), do: {:ok, :sync_read}
|
||||
defp normalize_capability(:sync_write), do: {:ok, :sync_write}
|
||||
defp normalize_capability("sync_read"), do: {:ok, :sync_read}
|
||||
defp normalize_capability("sync_write"), do: {:ok, :sync_write}
|
||||
defp normalize_capability(_value), do: {:error, :invalid_acl_capability}
|
||||
|
||||
defp normalize_match(match) when is_map(match) do
|
||||
normalized_match =
|
||||
Enum.reduce(match, %{}, fn
|
||||
{key, values}, acc when is_binary(key) ->
|
||||
Map.put(acc, key, values)
|
||||
|
||||
{key, values}, acc when is_atom(key) ->
|
||||
Map.put(acc, Atom.to_string(key), values)
|
||||
|
||||
_entry, acc ->
|
||||
acc
|
||||
end)
|
||||
|
||||
{:ok, normalized_match}
|
||||
end
|
||||
|
||||
defp normalize_match(_match), do: {:error, :invalid_acl_match}
|
||||
|
||||
defp fetch(map, key) do
|
||||
Map.get(map, key) || Map.get(map, Atom.to_string(key))
|
||||
end
|
||||
end
|
||||
@@ -55,6 +55,24 @@ defmodule Parrhesia.Storage.Adapters.Memory.Events do
|
||||
end
|
||||
end
|
||||
|
||||
@impl true
|
||||
def query_event_refs(context, filters, opts) do
|
||||
with {:ok, events} <- query(context, filters, opts) do
|
||||
refs =
|
||||
events
|
||||
|> Enum.map(fn event ->
|
||||
%{
|
||||
created_at: Map.fetch!(event, "created_at"),
|
||||
id: Base.decode16!(Map.fetch!(event, "id"), case: :mixed)
|
||||
}
|
||||
end)
|
||||
|> Enum.sort(&(compare_event_refs(&1, &2) != :gt))
|
||||
|> maybe_limit_event_refs(opts)
|
||||
|
||||
{:ok, refs}
|
||||
end
|
||||
end
|
||||
|
||||
@impl true
|
||||
def count(context, filters, opts) do
|
||||
with {:ok, events} <- query(context, filters, opts) do
|
||||
@@ -64,21 +82,49 @@ defmodule Parrhesia.Storage.Adapters.Memory.Events do
|
||||
|
||||
@impl true
|
||||
def delete_by_request(_context, event) do
|
||||
delete_ids =
|
||||
deleter_pubkey = Map.get(event, "pubkey")
|
||||
|
||||
delete_event_ids =
|
||||
event
|
||||
|> Map.get("tags", [])
|
||||
|> Enum.flat_map(fn
|
||||
["e", event_id | _rest] -> [event_id]
|
||||
["e", event_id | _rest] when is_binary(event_id) -> [event_id]
|
||||
_tag -> []
|
||||
end)
|
||||
|
||||
delete_coordinates =
|
||||
event
|
||||
|> Map.get("tags", [])
|
||||
|> Enum.flat_map(fn
|
||||
["a", coordinate | _rest] when is_binary(coordinate) ->
|
||||
case parse_delete_coordinate(coordinate) do
|
||||
{:ok, parsed_coordinate} -> [parsed_coordinate]
|
||||
{:error, _reason} -> []
|
||||
end
|
||||
|
||||
_tag ->
|
||||
[]
|
||||
end)
|
||||
|
||||
coordinate_delete_ids =
|
||||
Store.get(fn state ->
|
||||
state.events
|
||||
|> Map.values()
|
||||
|> Enum.filter(fn candidate ->
|
||||
matches_delete_coordinate?(candidate, delete_coordinates, deleter_pubkey)
|
||||
end)
|
||||
|> Enum.map(& &1["id"])
|
||||
end)
|
||||
|
||||
all_delete_ids = Enum.uniq(delete_event_ids ++ coordinate_delete_ids)
|
||||
|
||||
Store.update(fn state ->
|
||||
Enum.reduce(delete_ids, state, fn event_id, acc ->
|
||||
Enum.reduce(all_delete_ids, state, fn event_id, acc ->
|
||||
update_in(acc.deleted, &MapSet.put(&1, event_id))
|
||||
end)
|
||||
end)
|
||||
|
||||
{:ok, length(delete_ids)}
|
||||
{:ok, length(all_delete_ids)}
|
||||
end
|
||||
|
||||
@impl true
|
||||
@@ -105,6 +151,47 @@ defmodule Parrhesia.Storage.Adapters.Memory.Events do
|
||||
@impl true
|
||||
def purge_expired(_opts), do: {:ok, 0}
|
||||
|
||||
defp parse_delete_coordinate(coordinate) do
|
||||
case String.split(coordinate, ":", parts: 3) do
|
||||
[kind_part, pubkey, d_tag] ->
|
||||
case Integer.parse(kind_part) do
|
||||
{kind, ""} when kind >= 0 -> {:ok, %{kind: kind, pubkey: pubkey, d_tag: d_tag}}
|
||||
_other -> {:error, :invalid_coordinate}
|
||||
end
|
||||
|
||||
_other ->
|
||||
{:error, :invalid_coordinate}
|
||||
end
|
||||
end
|
||||
|
||||
defp matches_delete_coordinate?(candidate, delete_coordinates, deleter_pubkey) do
|
||||
Enum.any?(delete_coordinates, fn coordinate ->
|
||||
coordinate.pubkey == deleter_pubkey and
|
||||
candidate["pubkey"] == deleter_pubkey and
|
||||
candidate["kind"] == coordinate.kind and
|
||||
coordinate_match_for_kind?(candidate, coordinate)
|
||||
end)
|
||||
end
|
||||
|
||||
defp coordinate_match_for_kind?(candidate, coordinate) do
|
||||
if addressable_kind?(coordinate.kind) do
|
||||
candidate_d_tag =
|
||||
candidate
|
||||
|> Map.get("tags", [])
|
||||
|> Enum.find_value("", fn
|
||||
["d", value | _rest] -> value
|
||||
_tag -> nil
|
||||
end)
|
||||
|
||||
candidate_d_tag == coordinate.d_tag
|
||||
else
|
||||
replaceable_kind?(coordinate.kind)
|
||||
end
|
||||
end
|
||||
|
||||
defp replaceable_kind?(kind), do: kind in [0, 3] or (kind >= 10_000 and kind < 20_000)
|
||||
defp addressable_kind?(kind), do: kind >= 30_000 and kind < 40_000
|
||||
|
||||
defp giftwrap_visible_to_requester?(%{"kind" => 1059} = event, requester_pubkeys) do
|
||||
requester_pubkeys != [] and
|
||||
event_targets_any_recipient?(event, requester_pubkeys)
|
||||
@@ -120,4 +207,21 @@ defmodule Parrhesia.Storage.Adapters.Memory.Events do
|
||||
_tag -> false
|
||||
end)
|
||||
end
|
||||
|
||||
defp compare_event_refs(left, right) do
|
||||
cond do
|
||||
left.created_at < right.created_at -> :lt
|
||||
left.created_at > right.created_at -> :gt
|
||||
left.id < right.id -> :lt
|
||||
left.id > right.id -> :gt
|
||||
true -> :eq
|
||||
end
|
||||
end
|
||||
|
||||
defp maybe_limit_event_refs(refs, opts) do
|
||||
case Keyword.get(opts, :limit) do
|
||||
limit when is_integer(limit) and limit > 0 -> Enum.take(refs, limit)
|
||||
_other -> refs
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
@@ -33,6 +33,11 @@ defmodule Parrhesia.Storage.Adapters.Memory.Moderation do
|
||||
{:ok, Store.get(fn state -> MapSet.member?(state.allowed_pubkeys, pubkey) end)}
|
||||
end
|
||||
|
||||
@impl true
|
||||
def has_allowed_pubkeys?(_context) do
|
||||
{:ok, Store.get(fn state -> MapSet.size(state.allowed_pubkeys) > 0 end)}
|
||||
end
|
||||
|
||||
@impl true
|
||||
def ban_event(_context, event_id), do: update_ban_set(:events, event_id, :add)
|
||||
|
||||
|
||||
@@ -10,6 +10,8 @@ defmodule Parrhesia.Storage.Adapters.Memory.Store do
|
||||
deleted: MapSet.new(),
|
||||
bans: %{pubkeys: MapSet.new(), events: MapSet.new(), ips: MapSet.new()},
|
||||
allowed_pubkeys: MapSet.new(),
|
||||
acl_rules: [],
|
||||
next_acl_rule_id: 1,
|
||||
groups: %{},
|
||||
roles: %{},
|
||||
audit_logs: []
|
||||
|
||||
273
lib/parrhesia/storage/adapters/postgres/acl.ex
Normal file
273
lib/parrhesia/storage/adapters/postgres/acl.ex
Normal file
@@ -0,0 +1,273 @@
|
||||
defmodule Parrhesia.Storage.Adapters.Postgres.ACL do
|
||||
@moduledoc """
|
||||
PostgreSQL-backed implementation for `Parrhesia.Storage.ACL`.
|
||||
"""
|
||||
|
||||
import Ecto.Query
|
||||
|
||||
alias Parrhesia.Repo
|
||||
|
||||
@behaviour Parrhesia.Storage.ACL
|
||||
|
||||
@impl true
|
||||
def put_rule(_context, rule) when is_map(rule) do
|
||||
with {:ok, normalized_rule} <- normalize_rule(rule) do
|
||||
normalized_rule
|
||||
|> find_matching_rule()
|
||||
|> maybe_insert_rule(normalized_rule)
|
||||
end
|
||||
end
|
||||
|
||||
def put_rule(_context, _rule), do: {:error, :invalid_acl_rule}
|
||||
|
||||
defp maybe_insert_rule(nil, normalized_rule), do: insert_rule(normalized_rule)
|
||||
defp maybe_insert_rule(existing_rule, _normalized_rule), do: {:ok, existing_rule}
|
||||
|
||||
@impl true
|
||||
def delete_rule(_context, selector) when is_map(selector) do
|
||||
case normalize_delete_selector(selector) do
|
||||
{:ok, {:id, id}} ->
|
||||
query = from(rule in "acl_rules", where: rule.id == ^id)
|
||||
{_deleted, _result} = Repo.delete_all(query)
|
||||
:ok
|
||||
|
||||
{:ok, {:exact, rule}} ->
|
||||
query =
|
||||
from(stored_rule in "acl_rules",
|
||||
where:
|
||||
stored_rule.principal_type == ^rule.principal_type and
|
||||
stored_rule.principal == ^rule.principal and
|
||||
stored_rule.capability == ^rule.capability and
|
||||
stored_rule.match == ^rule.match
|
||||
)
|
||||
|
||||
{_deleted, _result} = Repo.delete_all(query)
|
||||
:ok
|
||||
|
||||
{:error, reason} ->
|
||||
{:error, reason}
|
||||
end
|
||||
end
|
||||
|
||||
def delete_rule(_context, _selector), do: {:error, :invalid_acl_rule}
|
||||
|
||||
@impl true
|
||||
def list_rules(_context, opts) when is_list(opts) do
|
||||
query =
|
||||
from(rule in "acl_rules",
|
||||
order_by: [
|
||||
asc: rule.principal_type,
|
||||
asc: rule.principal,
|
||||
asc: rule.capability,
|
||||
asc: rule.id
|
||||
],
|
||||
select: %{
|
||||
id: rule.id,
|
||||
principal_type: rule.principal_type,
|
||||
principal: rule.principal,
|
||||
capability: rule.capability,
|
||||
match: rule.match,
|
||||
inserted_at: rule.inserted_at
|
||||
}
|
||||
)
|
||||
|> maybe_filter_principal_type(Keyword.get(opts, :principal_type))
|
||||
|> maybe_filter_principal(Keyword.get(opts, :principal))
|
||||
|> maybe_filter_capability(Keyword.get(opts, :capability))
|
||||
|
||||
{:ok, Enum.map(Repo.all(query), &normalize_persisted_rule/1)}
|
||||
end
|
||||
|
||||
def list_rules(_context, _opts), do: {:error, :invalid_opts}
|
||||
|
||||
defp maybe_filter_principal_type(query, nil), do: query
|
||||
|
||||
defp maybe_filter_principal_type(query, principal_type) when is_atom(principal_type) do
|
||||
maybe_filter_principal_type(query, Atom.to_string(principal_type))
|
||||
end
|
||||
|
||||
defp maybe_filter_principal_type(query, principal_type) when is_binary(principal_type) do
|
||||
where(query, [rule], rule.principal_type == ^principal_type)
|
||||
end
|
||||
|
||||
defp maybe_filter_principal_type(query, _principal_type), do: query
|
||||
|
||||
defp maybe_filter_principal(query, nil), do: query
|
||||
|
||||
defp maybe_filter_principal(query, principal) when is_binary(principal) do
|
||||
case decode_hex_or_binary(principal, 32, :invalid_acl_principal) do
|
||||
{:ok, decoded_principal} -> where(query, [rule], rule.principal == ^decoded_principal)
|
||||
{:error, _reason} -> where(query, [rule], false)
|
||||
end
|
||||
end
|
||||
|
||||
defp maybe_filter_principal(query, _principal), do: query
|
||||
|
||||
defp maybe_filter_capability(query, nil), do: query
|
||||
|
||||
defp maybe_filter_capability(query, capability) when is_atom(capability) do
|
||||
maybe_filter_capability(query, Atom.to_string(capability))
|
||||
end
|
||||
|
||||
defp maybe_filter_capability(query, capability) when is_binary(capability) do
|
||||
where(query, [rule], rule.capability == ^capability)
|
||||
end
|
||||
|
||||
defp maybe_filter_capability(query, _capability), do: query
|
||||
|
||||
defp find_matching_rule(normalized_rule) do
|
||||
query =
|
||||
from(stored_rule in "acl_rules",
|
||||
where:
|
||||
stored_rule.principal_type == ^normalized_rule.principal_type and
|
||||
stored_rule.principal == ^normalized_rule.principal and
|
||||
stored_rule.capability == ^normalized_rule.capability and
|
||||
stored_rule.match == ^normalized_rule.match,
|
||||
limit: 1,
|
||||
select: %{
|
||||
id: stored_rule.id,
|
||||
principal_type: stored_rule.principal_type,
|
||||
principal: stored_rule.principal,
|
||||
capability: stored_rule.capability,
|
||||
match: stored_rule.match,
|
||||
inserted_at: stored_rule.inserted_at
|
||||
}
|
||||
)
|
||||
|
||||
case Repo.one(query) do
|
||||
nil -> nil
|
||||
stored_rule -> normalize_persisted_rule(stored_rule)
|
||||
end
|
||||
end
|
||||
|
||||
defp insert_rule(normalized_rule) do
|
||||
now = DateTime.utc_now() |> DateTime.truncate(:microsecond)
|
||||
|
||||
row = %{
|
||||
principal_type: normalized_rule.principal_type,
|
||||
principal: normalized_rule.principal,
|
||||
capability: normalized_rule.capability,
|
||||
match: normalized_rule.match,
|
||||
inserted_at: now
|
||||
}
|
||||
|
||||
case Repo.insert_all("acl_rules", [row], returning: [:id, :inserted_at]) do
|
||||
{1, [inserted_row]} ->
|
||||
{:ok, normalize_persisted_rule(Map.merge(row, Map.new(inserted_row)))}
|
||||
|
||||
_other ->
|
||||
{:error, :acl_rule_insert_failed}
|
||||
end
|
||||
end
|
||||
|
||||
defp normalize_persisted_rule(rule) do
|
||||
%{
|
||||
id: rule.id,
|
||||
principal_type: normalize_principal_type(rule.principal_type),
|
||||
principal: Base.encode16(rule.principal, case: :lower),
|
||||
capability: normalize_capability(rule.capability),
|
||||
match: normalize_match(rule.match),
|
||||
inserted_at: rule.inserted_at
|
||||
}
|
||||
end
|
||||
|
||||
defp normalize_delete_selector(%{"id" => id}), do: normalize_delete_selector(%{id: id})
|
||||
|
||||
defp normalize_delete_selector(%{id: id}) when is_integer(id) and id > 0,
|
||||
do: {:ok, {:id, id}}
|
||||
|
||||
defp normalize_delete_selector(selector) do
|
||||
case normalize_rule(selector) do
|
||||
{:ok, normalized_rule} -> {:ok, {:exact, normalized_rule}}
|
||||
{:error, reason} -> {:error, reason}
|
||||
end
|
||||
end
|
||||
|
||||
defp normalize_rule(rule) when is_map(rule) do
|
||||
with {:ok, principal_type} <- normalize_principal_type_value(fetch(rule, :principal_type)),
|
||||
{:ok, principal} <-
|
||||
decode_hex_or_binary(fetch(rule, :principal), 32, :invalid_acl_principal),
|
||||
{:ok, capability} <- normalize_capability_value(fetch(rule, :capability)),
|
||||
{:ok, match} <- normalize_match_value(fetch(rule, :match)) do
|
||||
{:ok,
|
||||
%{
|
||||
principal_type: principal_type,
|
||||
principal: principal,
|
||||
capability: capability,
|
||||
match: match
|
||||
}}
|
||||
end
|
||||
end
|
||||
|
||||
defp normalize_rule(_rule), do: {:error, :invalid_acl_rule}
|
||||
|
||||
defp normalize_principal_type("pubkey"), do: :pubkey
|
||||
defp normalize_principal_type(principal_type), do: principal_type
|
||||
|
||||
defp normalize_capability("sync_read"), do: :sync_read
|
||||
defp normalize_capability("sync_write"), do: :sync_write
|
||||
defp normalize_capability(capability), do: capability
|
||||
|
||||
defp normalize_principal_type_value(:pubkey), do: {:ok, "pubkey"}
|
||||
defp normalize_principal_type_value("pubkey"), do: {:ok, "pubkey"}
|
||||
defp normalize_principal_type_value(_principal_type), do: {:error, :invalid_acl_principal_type}
|
||||
|
||||
defp normalize_capability_value(:sync_read), do: {:ok, "sync_read"}
|
||||
defp normalize_capability_value(:sync_write), do: {:ok, "sync_write"}
|
||||
defp normalize_capability_value("sync_read"), do: {:ok, "sync_read"}
|
||||
defp normalize_capability_value("sync_write"), do: {:ok, "sync_write"}
|
||||
defp normalize_capability_value(_capability), do: {:error, :invalid_acl_capability}
|
||||
|
||||
defp normalize_match_value(match) when is_map(match) do
|
||||
normalized_match =
|
||||
Enum.reduce(match, %{}, fn
|
||||
{key, values}, acc when is_binary(key) ->
|
||||
Map.put(acc, key, values)
|
||||
|
||||
{key, values}, acc when is_atom(key) ->
|
||||
Map.put(acc, Atom.to_string(key), values)
|
||||
|
||||
_entry, acc ->
|
||||
acc
|
||||
end)
|
||||
|
||||
{:ok, normalize_match(normalized_match)}
|
||||
end
|
||||
|
||||
defp normalize_match_value(_match), do: {:error, :invalid_acl_match}
|
||||
|
||||
defp normalize_match(match) when is_map(match) do
|
||||
Enum.reduce(match, %{}, fn
|
||||
{key, values}, acc when is_binary(key) and is_list(values) ->
|
||||
Map.put(acc, key, Enum.uniq(values))
|
||||
|
||||
{key, value}, acc when is_binary(key) ->
|
||||
Map.put(acc, key, value)
|
||||
|
||||
_entry, acc ->
|
||||
acc
|
||||
end)
|
||||
end
|
||||
|
||||
defp normalize_match(_match), do: %{}
|
||||
|
||||
defp fetch(map, key) do
|
||||
Map.get(map, key) || Map.get(map, Atom.to_string(key))
|
||||
end
|
||||
|
||||
defp decode_hex_or_binary(value, expected_bytes, _reason)
|
||||
when is_binary(value) and byte_size(value) == expected_bytes,
|
||||
do: {:ok, value}
|
||||
|
||||
defp decode_hex_or_binary(value, expected_bytes, reason) when is_binary(value) do
|
||||
if byte_size(value) == expected_bytes * 2 do
|
||||
case Base.decode16(value, case: :mixed) do
|
||||
{:ok, decoded} -> {:ok, decoded}
|
||||
:error -> {:error, reason}
|
||||
end
|
||||
else
|
||||
{:error, reason}
|
||||
end
|
||||
end
|
||||
|
||||
defp decode_hex_or_binary(_value, _expected_bytes, reason), do: {:error, reason}
|
||||
end
|
||||
@@ -20,6 +20,7 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Admin do
|
||||
case method_name do
|
||||
"ping" -> {:ok, %{"status" => "ok"}}
|
||||
"stats" -> {:ok, relay_stats()}
|
||||
"supportedmethods" -> {:ok, %{"methods" => supported_methods()}}
|
||||
"list_audit_logs" -> list_audit_logs(%{}, audit_list_opts(params))
|
||||
_other -> execute_moderation_method(moderation, method_name, params)
|
||||
end
|
||||
@@ -84,15 +85,36 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Admin do
|
||||
defp relay_stats do
|
||||
events_count = Repo.aggregate("events", :count, :id)
|
||||
banned_pubkeys = Repo.aggregate("banned_pubkeys", :count, :pubkey)
|
||||
allowed_pubkeys = Repo.aggregate("allowed_pubkeys", :count, :pubkey)
|
||||
blocked_ips = Repo.aggregate("blocked_ips", :count, :ip)
|
||||
acl_rules = Repo.aggregate("acl_rules", :count, :id)
|
||||
|
||||
%{
|
||||
"events" => events_count,
|
||||
"banned_pubkeys" => banned_pubkeys,
|
||||
"allowed_pubkeys" => allowed_pubkeys,
|
||||
"acl_rules" => acl_rules,
|
||||
"blocked_ips" => blocked_ips
|
||||
}
|
||||
end
|
||||
|
||||
defp supported_methods do
|
||||
[
|
||||
"allow_pubkey",
|
||||
"ban_event",
|
||||
"ban_pubkey",
|
||||
"block_ip",
|
||||
"disallow_pubkey",
|
||||
"list_audit_logs",
|
||||
"ping",
|
||||
"stats",
|
||||
"supportedmethods",
|
||||
"unban_event",
|
||||
"unban_pubkey",
|
||||
"unblock_ip"
|
||||
]
|
||||
end
|
||||
|
||||
defp execute_moderation_method(moderation, "ban_pubkey", params),
|
||||
do: execute_pubkey_method(fn ctx, value -> moderation.ban_pubkey(ctx, value) end, params)
|
||||
|
||||
|
||||
@@ -56,6 +56,7 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Events do
|
||||
pubkey: event.pubkey,
|
||||
created_at: event.created_at,
|
||||
kind: event.kind,
|
||||
tags: event.tags,
|
||||
content: event.content,
|
||||
sig: event.sig
|
||||
}
|
||||
@@ -66,13 +67,7 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Events do
|
||||
{:ok, nil}
|
||||
|
||||
persisted_event ->
|
||||
tags = load_tags([{persisted_event.created_at, persisted_event.id}])
|
||||
|
||||
{:ok,
|
||||
to_nostr_event(
|
||||
persisted_event,
|
||||
Map.get(tags, {persisted_event.created_at, persisted_event.id}, [])
|
||||
)}
|
||||
{:ok, to_nostr_event(persisted_event)}
|
||||
end
|
||||
end
|
||||
end
|
||||
@@ -93,36 +88,27 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Events do
|
||||
|> sort_persisted_events()
|
||||
|> maybe_apply_query_limit(opts)
|
||||
|
||||
event_keys = Enum.map(persisted_events, fn event -> {event.created_at, event.id} end)
|
||||
tags_by_event = load_tags(event_keys)
|
||||
|
||||
nostr_events =
|
||||
Enum.map(persisted_events, fn event ->
|
||||
to_nostr_event(event, Map.get(tags_by_event, {event.created_at, event.id}, []))
|
||||
end)
|
||||
|
||||
{:ok, nostr_events}
|
||||
{:ok, Enum.map(persisted_events, &to_nostr_event/1)}
|
||||
end
|
||||
end
|
||||
|
||||
def query(_context, _filters, _opts), do: {:error, :invalid_opts}
|
||||
|
||||
@impl true
|
||||
def query_event_refs(_context, filters, opts) when is_list(opts) do
|
||||
with :ok <- Filter.validate_filters(filters) do
|
||||
now = Keyword.get(opts, :now, System.system_time(:second))
|
||||
{:ok, fetch_event_refs(filters, now, opts)}
|
||||
end
|
||||
end
|
||||
|
||||
def query_event_refs(_context, _filters, _opts), do: {:error, :invalid_opts}
|
||||
|
||||
@impl true
|
||||
def count(_context, filters, opts) when is_list(opts) do
|
||||
with :ok <- Filter.validate_filters(filters) do
|
||||
now = Keyword.get(opts, :now, System.system_time(:second))
|
||||
|
||||
total_count =
|
||||
filters
|
||||
|> Enum.flat_map(fn filter ->
|
||||
filter
|
||||
|> event_id_query_for_filter(now, opts)
|
||||
|> Repo.all()
|
||||
end)
|
||||
|> MapSet.new()
|
||||
|> MapSet.size()
|
||||
|
||||
{:ok, total_count}
|
||||
{:ok, count_events(filters, now, opts)}
|
||||
end
|
||||
end
|
||||
|
||||
@@ -131,18 +117,83 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Events do
|
||||
@impl true
|
||||
def delete_by_request(_context, event) do
|
||||
with {:ok, deleter_pubkey} <- decode_hex(Map.get(event, "pubkey"), 32, :invalid_pubkey),
|
||||
{:ok, delete_ids} <- extract_delete_event_ids(event) do
|
||||
{:ok, delete_targets} <- extract_delete_targets(event) do
|
||||
deleted_at = System.system_time(:second)
|
||||
|
||||
deleted_by_id_count =
|
||||
delete_targets
|
||||
|> Map.get(:event_ids, [])
|
||||
|> delete_events_by_ids(deleter_pubkey, deleted_at)
|
||||
|
||||
deleted_by_coordinate_count =
|
||||
delete_targets
|
||||
|> Map.get(:coordinates, [])
|
||||
|> delete_events_by_coordinates(deleter_pubkey, deleted_at)
|
||||
|
||||
{:ok, deleted_by_id_count + deleted_by_coordinate_count}
|
||||
end
|
||||
end
|
||||
|
||||
defp delete_events_by_ids([], _deleter_pubkey, _deleted_at), do: 0
|
||||
|
||||
defp delete_events_by_ids(delete_ids, deleter_pubkey, deleted_at) do
|
||||
query =
|
||||
from(stored_event in "events",
|
||||
where:
|
||||
stored_event.id in ^delete_ids and
|
||||
stored_event.pubkey == ^deleter_pubkey and
|
||||
is_nil(stored_event.deleted_at)
|
||||
)
|
||||
|
||||
{count, _result} = Repo.update_all(query, set: [deleted_at: deleted_at])
|
||||
count
|
||||
end
|
||||
|
||||
defp delete_events_by_coordinates([], _deleter_pubkey, _deleted_at), do: 0
|
||||
|
||||
defp delete_events_by_coordinates(coordinates, deleter_pubkey, deleted_at) do
|
||||
relevant_coordinates =
|
||||
Enum.filter(coordinates, fn coordinate ->
|
||||
coordinate.pubkey == deleter_pubkey and
|
||||
(replaceable_kind?(coordinate.kind) or addressable_kind?(coordinate.kind))
|
||||
end)
|
||||
|
||||
if relevant_coordinates == [] do
|
||||
0
|
||||
else
|
||||
dynamic_conditions =
|
||||
Enum.reduce(relevant_coordinates, dynamic(false), fn coordinate, acc ->
|
||||
coordinate_condition =
|
||||
coordinate_delete_condition(coordinate, deleter_pubkey)
|
||||
|
||||
dynamic([stored_event], ^acc or ^coordinate_condition)
|
||||
end)
|
||||
|
||||
query =
|
||||
from(stored_event in "events",
|
||||
where:
|
||||
stored_event.id in ^delete_ids and
|
||||
stored_event.pubkey == ^deleter_pubkey and
|
||||
is_nil(stored_event.deleted_at)
|
||||
where: is_nil(stored_event.deleted_at)
|
||||
)
|
||||
|> where(^dynamic_conditions)
|
||||
|
||||
deleted_at = System.system_time(:second)
|
||||
{count, _result} = Repo.update_all(query, set: [deleted_at: deleted_at])
|
||||
{:ok, count}
|
||||
count
|
||||
end
|
||||
end
|
||||
|
||||
defp coordinate_delete_condition(coordinate, deleter_pubkey) do
|
||||
if addressable_kind?(coordinate.kind) do
|
||||
dynamic(
|
||||
[stored_event],
|
||||
stored_event.kind == ^coordinate.kind and
|
||||
stored_event.pubkey == ^deleter_pubkey and
|
||||
stored_event.d_tag == ^coordinate.d_tag
|
||||
)
|
||||
else
|
||||
dynamic(
|
||||
[stored_event],
|
||||
stored_event.kind == ^coordinate.kind and
|
||||
stored_event.pubkey == ^deleter_pubkey
|
||||
)
|
||||
end
|
||||
end
|
||||
|
||||
@@ -545,6 +596,7 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Events do
|
||||
pubkey: normalized_event.pubkey,
|
||||
created_at: normalized_event.created_at,
|
||||
kind: normalized_event.kind,
|
||||
tags: normalized_event.tags,
|
||||
content: normalized_event.content,
|
||||
sig: normalized_event.sig,
|
||||
d_tag: normalized_event.d_tag,
|
||||
@@ -555,87 +607,210 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Events do
|
||||
end
|
||||
|
||||
defp event_query_for_filter(filter, now, opts) do
|
||||
base_query =
|
||||
from(event in "events",
|
||||
where: is_nil(event.deleted_at) and (is_nil(event.expires_at) or event.expires_at > ^now),
|
||||
order_by: [desc: event.created_at, asc: event.id],
|
||||
select: %{
|
||||
id: event.id,
|
||||
pubkey: event.pubkey,
|
||||
created_at: event.created_at,
|
||||
kind: event.kind,
|
||||
content: event.content,
|
||||
sig: event.sig
|
||||
}
|
||||
)
|
||||
{base_query, remaining_tag_filters} = event_source_query(filter, now)
|
||||
|
||||
query =
|
||||
base_query
|
||||
|> maybe_filter_ids(Map.get(filter, "ids"))
|
||||
|> maybe_filter_authors(Map.get(filter, "authors"))
|
||||
|> maybe_filter_kinds(Map.get(filter, "kinds"))
|
||||
|> maybe_filter_since(Map.get(filter, "since"))
|
||||
|> maybe_filter_until(Map.get(filter, "until"))
|
||||
|> maybe_filter_search(Map.get(filter, "search"))
|
||||
|> filter_by_tags(filter)
|
||||
|> maybe_restrict_giftwrap_access(filter, opts)
|
||||
|
||||
maybe_limit_query(query, effective_filter_limit(filter, opts))
|
||||
base_query
|
||||
|> apply_common_event_filters(filter, remaining_tag_filters, opts)
|
||||
|> order_by([event: event], desc: event.created_at, asc: event.id)
|
||||
|> select([event: event], %{
|
||||
id: event.id,
|
||||
pubkey: event.pubkey,
|
||||
created_at: event.created_at,
|
||||
kind: event.kind,
|
||||
tags: event.tags,
|
||||
content: event.content,
|
||||
sig: event.sig
|
||||
})
|
||||
|> maybe_limit_query(effective_filter_limit(filter, opts))
|
||||
end
|
||||
|
||||
defp event_id_query_for_filter(filter, now, opts) do
|
||||
{base_query, remaining_tag_filters} = event_source_query(filter, now)
|
||||
|
||||
base_query
|
||||
|> apply_common_event_filters(filter, remaining_tag_filters, opts)
|
||||
|> select([event: event], event.id)
|
||||
end
|
||||
|
||||
defp event_id_distinct_union_query_for_filters([], now, _opts) do
|
||||
from(event in "events",
|
||||
where: is_nil(event.deleted_at) and (is_nil(event.expires_at) or event.expires_at > ^now),
|
||||
where: event.created_at > ^now and event.created_at < ^now,
|
||||
select: event.id
|
||||
)
|
||||
end
|
||||
|
||||
defp event_id_distinct_union_query_for_filters([first_filter | rest_filters], now, opts) do
|
||||
Enum.reduce(rest_filters, event_id_query_for_filter(first_filter, now, opts), fn filter,
|
||||
acc ->
|
||||
union(acc, ^event_id_query_for_filter(filter, now, opts))
|
||||
end)
|
||||
end
|
||||
|
||||
defp event_ref_query_for_filter(filter, now, opts) do
|
||||
{base_query, remaining_tag_filters} = event_source_query(filter, now)
|
||||
|
||||
base_query
|
||||
|> apply_common_event_filters(filter, remaining_tag_filters, opts)
|
||||
|> order_by([event: event], asc: event.created_at, asc: event.id)
|
||||
|> select([event: event], %{
|
||||
created_at: event.created_at,
|
||||
id: event.id
|
||||
})
|
||||
|> maybe_limit_query(effective_filter_limit(filter, opts))
|
||||
end
|
||||
|
||||
defp event_ref_union_query_for_filters([], now, _opts) do
|
||||
from(event in "events",
|
||||
where: event.created_at > ^now and event.created_at < ^now,
|
||||
select: %{created_at: event.created_at, id: event.id}
|
||||
)
|
||||
end
|
||||
|
||||
defp event_ref_union_query_for_filters([first_filter | rest_filters], now, opts) do
|
||||
Enum.reduce(rest_filters, event_ref_query_for_filter(first_filter, now, opts), fn filter,
|
||||
acc ->
|
||||
union_all(acc, ^event_ref_query_for_filter(filter, now, opts))
|
||||
end)
|
||||
end
|
||||
|
||||
defp fetch_event_refs([filter], now, opts) do
|
||||
filter
|
||||
|> event_ref_query_for_filter(now, opts)
|
||||
|> maybe_limit_query(Keyword.get(opts, :limit))
|
||||
|> Repo.all()
|
||||
end
|
||||
|
||||
defp fetch_event_refs(filters, now, opts) do
|
||||
filters
|
||||
|> event_ref_union_query_for_filters(now, opts)
|
||||
|> subquery()
|
||||
|> then(fn union_query ->
|
||||
from(ref in union_query,
|
||||
group_by: [ref.created_at, ref.id],
|
||||
order_by: [asc: ref.created_at, asc: ref.id],
|
||||
select: %{created_at: ref.created_at, id: ref.id}
|
||||
)
|
||||
end)
|
||||
|> maybe_limit_query(Keyword.get(opts, :limit))
|
||||
|> Repo.all()
|
||||
end
|
||||
|
||||
defp count_events([filter], now, opts) do
|
||||
filter
|
||||
|> event_id_query_for_filter(now, opts)
|
||||
|> subquery()
|
||||
|> then(fn query ->
|
||||
from(event in query, select: count())
|
||||
end)
|
||||
|> Repo.one()
|
||||
end
|
||||
|
||||
defp count_events(filters, now, opts) do
|
||||
filters
|
||||
|> event_id_distinct_union_query_for_filters(now, opts)
|
||||
|> subquery()
|
||||
|> then(fn union_query ->
|
||||
from(event in union_query, select: count())
|
||||
end)
|
||||
|> Repo.one()
|
||||
end
|
||||
|
||||
defp event_source_query(filter, now) do
|
||||
tag_filters = tag_filters(filter)
|
||||
|
||||
case primary_tag_filter(tag_filters) do
|
||||
nil ->
|
||||
{from(event in "events",
|
||||
as: :event,
|
||||
where:
|
||||
is_nil(event.deleted_at) and
|
||||
(is_nil(event.expires_at) or event.expires_at > ^now)
|
||||
), []}
|
||||
|
||||
{tag_name, values} = primary_tag_filter ->
|
||||
remaining_tag_filters = List.delete(tag_filters, primary_tag_filter)
|
||||
|
||||
{from(tag in "event_tags",
|
||||
as: :primary_tag,
|
||||
where: tag.name == ^tag_name and tag.value in ^values,
|
||||
join: event in "events",
|
||||
as: :event,
|
||||
on: event.created_at == tag.event_created_at and event.id == tag.event_id,
|
||||
where:
|
||||
is_nil(event.deleted_at) and
|
||||
(is_nil(event.expires_at) or event.expires_at > ^now),
|
||||
distinct: [event.created_at, event.id]
|
||||
), remaining_tag_filters}
|
||||
end
|
||||
end
|
||||
|
||||
defp apply_common_event_filters(query, filter, remaining_tag_filters, opts) do
|
||||
query
|
||||
|> maybe_filter_ids(Map.get(filter, "ids"))
|
||||
|> maybe_filter_authors(Map.get(filter, "authors"))
|
||||
|> maybe_filter_kinds(Map.get(filter, "kinds"))
|
||||
|> maybe_filter_since(Map.get(filter, "since"))
|
||||
|> maybe_filter_until(Map.get(filter, "until"))
|
||||
|> maybe_filter_search(Map.get(filter, "search"))
|
||||
|> filter_by_tags(filter)
|
||||
|> filter_by_tag_filters(remaining_tag_filters)
|
||||
|> maybe_restrict_giftwrap_access(filter, opts)
|
||||
end
|
||||
|
||||
defp primary_tag_filter([]), do: nil
|
||||
|
||||
defp primary_tag_filter(tag_filters) do
|
||||
Enum.find(tag_filters, fn {tag_name, _values} -> tag_name in ["h", "i"] end) ||
|
||||
List.first(tag_filters)
|
||||
end
|
||||
|
||||
defp maybe_filter_ids(query, nil), do: query
|
||||
|
||||
defp maybe_filter_ids(query, ids) do
|
||||
decoded_ids = decode_hex_list(ids, :lower)
|
||||
where(query, [event], event.id in ^decoded_ids)
|
||||
where(query, [event: event], event.id in ^decoded_ids)
|
||||
end
|
||||
|
||||
defp maybe_filter_authors(query, nil), do: query
|
||||
|
||||
defp maybe_filter_authors(query, authors) do
|
||||
decoded_authors = decode_hex_list(authors, :lower)
|
||||
where(query, [event], event.pubkey in ^decoded_authors)
|
||||
where(query, [event: event], event.pubkey in ^decoded_authors)
|
||||
end
|
||||
|
||||
defp maybe_filter_kinds(query, nil), do: query
|
||||
defp maybe_filter_kinds(query, kinds), do: where(query, [event], event.kind in ^kinds)
|
||||
defp maybe_filter_kinds(query, kinds), do: where(query, [event: event], event.kind in ^kinds)
|
||||
|
||||
defp maybe_filter_since(query, nil), do: query
|
||||
defp maybe_filter_since(query, since), do: where(query, [event], event.created_at >= ^since)
|
||||
|
||||
defp maybe_filter_since(query, since),
|
||||
do: where(query, [event: event], event.created_at >= ^since)
|
||||
|
||||
defp maybe_filter_until(query, nil), do: query
|
||||
defp maybe_filter_until(query, until), do: where(query, [event], event.created_at <= ^until)
|
||||
|
||||
defp maybe_filter_until(query, until),
|
||||
do: where(query, [event: event], event.created_at <= ^until)
|
||||
|
||||
defp maybe_filter_search(query, nil), do: query
|
||||
|
||||
defp maybe_filter_search(query, search) when is_binary(search) and search != "" do
|
||||
where(query, [event], ilike(event.content, ^"%#{search}%"))
|
||||
escaped_search = escape_like_pattern(search)
|
||||
where(query, [event: event], ilike(event.content, ^"%#{escaped_search}%"))
|
||||
end
|
||||
|
||||
defp maybe_filter_search(query, _search), do: query
|
||||
|
||||
defp filter_by_tags(query, filter) do
|
||||
filter
|
||||
|> tag_filters()
|
||||
|> Enum.reduce(query, fn {tag_name, values}, acc ->
|
||||
defp escape_like_pattern(search) do
|
||||
search
|
||||
|> String.replace("\\", "\\\\")
|
||||
|> String.replace("%", "\\%")
|
||||
|> String.replace("_", "\\_")
|
||||
end
|
||||
|
||||
defp filter_by_tag_filters(query, tag_filters) do
|
||||
Enum.reduce(tag_filters, query, fn {tag_name, values}, acc ->
|
||||
where(
|
||||
acc,
|
||||
[event],
|
||||
[event: event],
|
||||
fragment(
|
||||
"EXISTS (SELECT 1 FROM event_tags AS tag WHERE tag.event_created_at = ? AND tag.event_id = ? AND tag.name = ? AND tag.value = ANY(?))",
|
||||
event.created_at,
|
||||
@@ -665,7 +840,7 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Events do
|
||||
targets_giftwrap?(filter) and requester_pubkeys != [] ->
|
||||
where(
|
||||
query,
|
||||
[event],
|
||||
[event: event],
|
||||
fragment(
|
||||
"EXISTS (SELECT 1 FROM event_tags AS tag WHERE tag.event_created_at = ? AND tag.event_id = ? AND tag.name = 'p' AND tag.value = ANY(?))",
|
||||
event.created_at,
|
||||
@@ -675,7 +850,7 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Events do
|
||||
)
|
||||
|
||||
targets_giftwrap?(filter) ->
|
||||
where(query, [_event], false)
|
||||
where(query, [event: _event], false)
|
||||
|
||||
true ->
|
||||
query
|
||||
@@ -734,44 +909,21 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Events do
|
||||
end
|
||||
end
|
||||
|
||||
defp load_tags([]), do: %{}
|
||||
|
||||
defp load_tags(event_keys) when is_list(event_keys) do
|
||||
created_at_values = Enum.map(event_keys, fn {created_at, _event_id} -> created_at end)
|
||||
event_id_values = Enum.map(event_keys, fn {_created_at, event_id} -> event_id end)
|
||||
|
||||
query =
|
||||
from(tag in "event_tags",
|
||||
where: tag.event_created_at in ^created_at_values and tag.event_id in ^event_id_values,
|
||||
order_by: [asc: tag.idx],
|
||||
select: %{
|
||||
event_created_at: tag.event_created_at,
|
||||
event_id: tag.event_id,
|
||||
name: tag.name,
|
||||
value: tag.value
|
||||
}
|
||||
)
|
||||
|
||||
query
|
||||
|> Repo.all()
|
||||
|> Enum.group_by(
|
||||
fn tag -> {tag.event_created_at, tag.event_id} end,
|
||||
fn tag -> [tag.name, tag.value] end
|
||||
)
|
||||
end
|
||||
|
||||
defp to_nostr_event(persisted_event, tags) do
|
||||
defp to_nostr_event(persisted_event) do
|
||||
%{
|
||||
"id" => Base.encode16(persisted_event.id, case: :lower),
|
||||
"pubkey" => Base.encode16(persisted_event.pubkey, case: :lower),
|
||||
"created_at" => persisted_event.created_at,
|
||||
"kind" => persisted_event.kind,
|
||||
"tags" => tags,
|
||||
"tags" => normalize_persisted_tags(persisted_event.tags),
|
||||
"content" => persisted_event.content,
|
||||
"sig" => Base.encode16(persisted_event.sig, case: :lower)
|
||||
}
|
||||
end
|
||||
|
||||
defp normalize_persisted_tags(tags) when is_list(tags), do: tags
|
||||
defp normalize_persisted_tags(_tags), do: []
|
||||
|
||||
defp decode_hex(value, bytes, reason) when is_binary(value) do
|
||||
if byte_size(value) == bytes * 2 do
|
||||
case Base.decode16(value, case: :mixed) do
|
||||
@@ -818,23 +970,69 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Events do
|
||||
end)
|
||||
end
|
||||
|
||||
defp extract_delete_event_ids(event) do
|
||||
delete_ids =
|
||||
event
|
||||
|> Map.get("tags", [])
|
||||
|> Enum.reduce([], fn
|
||||
["e", event_id | _rest], acc when is_binary(event_id) -> [event_id | acc]
|
||||
_tag, acc -> acc
|
||||
end)
|
||||
|> Enum.uniq()
|
||||
defp extract_delete_targets(event) do
|
||||
with {:ok, targets} <- parse_delete_targets(Map.get(event, "tags", [])) do
|
||||
event_ids = targets.event_ids |> Enum.uniq()
|
||||
coordinates = targets.coordinates |> Enum.uniq()
|
||||
|
||||
if delete_ids == [] do
|
||||
{:error, :no_delete_targets}
|
||||
else
|
||||
{:ok, Enum.map(delete_ids, &Base.decode16!(&1, case: :mixed))}
|
||||
if event_ids == [] and coordinates == [] do
|
||||
{:error, :no_delete_targets}
|
||||
else
|
||||
{:ok, %{event_ids: event_ids, coordinates: coordinates}}
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
defp parse_delete_targets(tags) when is_list(tags) do
|
||||
Enum.reduce_while(tags, {:ok, %{event_ids: [], coordinates: []}}, fn tag, {:ok, acc} ->
|
||||
case parse_delete_target(tag) do
|
||||
{:ok, {:event_id, event_id}} ->
|
||||
{:cont, {:ok, %{acc | event_ids: [event_id | acc.event_ids]}}}
|
||||
|
||||
{:ok, {:coordinate, coordinate}} ->
|
||||
{:cont, {:ok, %{acc | coordinates: [coordinate | acc.coordinates]}}}
|
||||
|
||||
:ignore ->
|
||||
{:cont, {:ok, acc}}
|
||||
|
||||
{:error, _reason} = error ->
|
||||
{:halt, error}
|
||||
end
|
||||
end)
|
||||
end
|
||||
|
||||
defp parse_delete_targets(_tags), do: {:error, :invalid_delete_target}
|
||||
|
||||
defp parse_delete_target(["e", event_id | _rest]) when is_binary(event_id) do
|
||||
case decode_hex(event_id, 32, :invalid_delete_target) do
|
||||
{:ok, decoded_event_id} -> {:ok, {:event_id, decoded_event_id}}
|
||||
{:error, _reason} -> {:error, :invalid_delete_target}
|
||||
end
|
||||
end
|
||||
|
||||
defp parse_delete_target(["a", coordinate | _rest]) when is_binary(coordinate) do
|
||||
case parse_address_coordinate(coordinate) do
|
||||
{:ok, parsed_coordinate} -> {:ok, {:coordinate, parsed_coordinate}}
|
||||
{:error, _reason} -> {:error, :invalid_delete_target}
|
||||
end
|
||||
end
|
||||
|
||||
defp parse_delete_target(_tag), do: :ignore
|
||||
|
||||
defp parse_address_coordinate(coordinate) do
|
||||
case String.split(coordinate, ":", parts: 3) do
|
||||
[kind_part, pubkey_hex, d_tag] ->
|
||||
with {kind, ""} <- Integer.parse(kind_part),
|
||||
true <- kind >= 0,
|
||||
{:ok, pubkey} <- decode_hex(pubkey_hex, 32, :invalid_delete_target) do
|
||||
{:ok, %{kind: kind, pubkey: pubkey, d_tag: d_tag}}
|
||||
else
|
||||
_other -> {:error, :invalid_delete_target}
|
||||
end
|
||||
|
||||
_other ->
|
||||
{:error, :invalid_delete_target}
|
||||
end
|
||||
rescue
|
||||
ArgumentError -> {:error, :invalid_delete_target}
|
||||
end
|
||||
|
||||
defp extract_expiration(tags) do
|
||||
|
||||
@@ -9,87 +9,116 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Moderation do
|
||||
|
||||
@behaviour Parrhesia.Storage.Moderation
|
||||
|
||||
@cache_table :parrhesia_moderation_cache
|
||||
@cache_scope_sources %{
|
||||
banned_pubkeys: {"banned_pubkeys", :pubkey},
|
||||
allowed_pubkeys: {"allowed_pubkeys", :pubkey},
|
||||
banned_events: {"banned_events", :event_id},
|
||||
blocked_ips: {"blocked_ips", :ip}
|
||||
}
|
||||
|
||||
@impl true
|
||||
def ban_pubkey(_context, pubkey) do
|
||||
with {:ok, normalized_pubkey} <- normalize_hex_or_binary(pubkey, 32, :invalid_pubkey) do
|
||||
upsert_presence_table("banned_pubkeys", :pubkey, normalized_pubkey)
|
||||
with {:ok, normalized_pubkey} <- normalize_hex_or_binary(pubkey, 32, :invalid_pubkey),
|
||||
:ok <- upsert_presence_table("banned_pubkeys", :pubkey, normalized_pubkey) do
|
||||
cache_put(:banned_pubkeys, normalized_pubkey)
|
||||
:ok
|
||||
end
|
||||
end
|
||||
|
||||
@impl true
|
||||
def unban_pubkey(_context, pubkey) do
|
||||
with {:ok, normalized_pubkey} <- normalize_hex_or_binary(pubkey, 32, :invalid_pubkey) do
|
||||
delete_from_table("banned_pubkeys", :pubkey, normalized_pubkey)
|
||||
with {:ok, normalized_pubkey} <- normalize_hex_or_binary(pubkey, 32, :invalid_pubkey),
|
||||
:ok <- delete_from_table("banned_pubkeys", :pubkey, normalized_pubkey) do
|
||||
cache_delete(:banned_pubkeys, normalized_pubkey)
|
||||
:ok
|
||||
end
|
||||
end
|
||||
|
||||
@impl true
|
||||
def pubkey_banned?(_context, pubkey) do
|
||||
with {:ok, normalized_pubkey} <- normalize_hex_or_binary(pubkey, 32, :invalid_pubkey) do
|
||||
{:ok, exists_in_table?("banned_pubkeys", :pubkey, normalized_pubkey)}
|
||||
{:ok, exists_in_scope?(:banned_pubkeys, normalized_pubkey)}
|
||||
end
|
||||
end
|
||||
|
||||
@impl true
|
||||
def allow_pubkey(_context, pubkey) do
|
||||
with {:ok, normalized_pubkey} <- normalize_hex_or_binary(pubkey, 32, :invalid_pubkey) do
|
||||
upsert_presence_table("allowed_pubkeys", :pubkey, normalized_pubkey)
|
||||
with {:ok, normalized_pubkey} <- normalize_hex_or_binary(pubkey, 32, :invalid_pubkey),
|
||||
:ok <- upsert_presence_table("allowed_pubkeys", :pubkey, normalized_pubkey) do
|
||||
cache_put(:allowed_pubkeys, normalized_pubkey)
|
||||
:ok
|
||||
end
|
||||
end
|
||||
|
||||
@impl true
|
||||
def disallow_pubkey(_context, pubkey) do
|
||||
with {:ok, normalized_pubkey} <- normalize_hex_or_binary(pubkey, 32, :invalid_pubkey) do
|
||||
delete_from_table("allowed_pubkeys", :pubkey, normalized_pubkey)
|
||||
with {:ok, normalized_pubkey} <- normalize_hex_or_binary(pubkey, 32, :invalid_pubkey),
|
||||
:ok <- delete_from_table("allowed_pubkeys", :pubkey, normalized_pubkey) do
|
||||
cache_delete(:allowed_pubkeys, normalized_pubkey)
|
||||
:ok
|
||||
end
|
||||
end
|
||||
|
||||
@impl true
|
||||
def pubkey_allowed?(_context, pubkey) do
|
||||
with {:ok, normalized_pubkey} <- normalize_hex_or_binary(pubkey, 32, :invalid_pubkey) do
|
||||
{:ok, exists_in_table?("allowed_pubkeys", :pubkey, normalized_pubkey)}
|
||||
{:ok, exists_in_scope?(:allowed_pubkeys, normalized_pubkey)}
|
||||
end
|
||||
end
|
||||
|
||||
@impl true
|
||||
def has_allowed_pubkeys?(_context) do
|
||||
{:ok, scope_populated?(:allowed_pubkeys)}
|
||||
end
|
||||
|
||||
@impl true
|
||||
def ban_event(_context, event_id) do
|
||||
with {:ok, normalized_event_id} <- normalize_hex_or_binary(event_id, 32, :invalid_event_id) do
|
||||
upsert_presence_table("banned_events", :event_id, normalized_event_id)
|
||||
with {:ok, normalized_event_id} <- normalize_hex_or_binary(event_id, 32, :invalid_event_id),
|
||||
:ok <- upsert_presence_table("banned_events", :event_id, normalized_event_id) do
|
||||
cache_put(:banned_events, normalized_event_id)
|
||||
:ok
|
||||
end
|
||||
end
|
||||
|
||||
@impl true
|
||||
def unban_event(_context, event_id) do
|
||||
with {:ok, normalized_event_id} <- normalize_hex_or_binary(event_id, 32, :invalid_event_id) do
|
||||
delete_from_table("banned_events", :event_id, normalized_event_id)
|
||||
with {:ok, normalized_event_id} <- normalize_hex_or_binary(event_id, 32, :invalid_event_id),
|
||||
:ok <- delete_from_table("banned_events", :event_id, normalized_event_id) do
|
||||
cache_delete(:banned_events, normalized_event_id)
|
||||
:ok
|
||||
end
|
||||
end
|
||||
|
||||
@impl true
|
||||
def event_banned?(_context, event_id) do
|
||||
with {:ok, normalized_event_id} <- normalize_hex_or_binary(event_id, 32, :invalid_event_id) do
|
||||
{:ok, exists_in_table?("banned_events", :event_id, normalized_event_id)}
|
||||
{:ok, exists_in_scope?(:banned_events, normalized_event_id)}
|
||||
end
|
||||
end
|
||||
|
||||
@impl true
|
||||
def block_ip(_context, ip_address) do
|
||||
with {:ok, normalized_ip} <- normalize_ip(ip_address) do
|
||||
upsert_presence_table("blocked_ips", :ip, normalized_ip)
|
||||
with {:ok, normalized_ip} <- normalize_ip(ip_address),
|
||||
:ok <- upsert_presence_table("blocked_ips", :ip, normalized_ip) do
|
||||
cache_put(:blocked_ips, normalized_ip)
|
||||
:ok
|
||||
end
|
||||
end
|
||||
|
||||
@impl true
|
||||
def unblock_ip(_context, ip_address) do
|
||||
with {:ok, normalized_ip} <- normalize_ip(ip_address) do
|
||||
delete_from_table("blocked_ips", :ip, normalized_ip)
|
||||
with {:ok, normalized_ip} <- normalize_ip(ip_address),
|
||||
:ok <- delete_from_table("blocked_ips", :ip, normalized_ip) do
|
||||
cache_delete(:blocked_ips, normalized_ip)
|
||||
:ok
|
||||
end
|
||||
end
|
||||
|
||||
@impl true
|
||||
def ip_blocked?(_context, ip_address) do
|
||||
with {:ok, normalized_ip} <- normalize_ip(ip_address) do
|
||||
{:ok, exists_in_table?("blocked_ips", :ip, normalized_ip)}
|
||||
{:ok, exists_in_scope?(:blocked_ips, normalized_ip)}
|
||||
end
|
||||
end
|
||||
|
||||
@@ -122,7 +151,114 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Moderation do
|
||||
:ok
|
||||
end
|
||||
|
||||
defp exists_in_table?(table, field, value) do
|
||||
defp exists_in_scope?(scope, value) do
|
||||
{table, field} = cache_scope_source!(scope)
|
||||
|
||||
if moderation_cache_enabled?() do
|
||||
case cache_table_ref() do
|
||||
:undefined ->
|
||||
exists_in_table_db?(table, field, value)
|
||||
|
||||
cache_table ->
|
||||
ensure_cache_scope_loaded(scope, cache_table)
|
||||
:ets.member(cache_table, cache_member_key(scope, value))
|
||||
end
|
||||
else
|
||||
exists_in_table_db?(table, field, value)
|
||||
end
|
||||
end
|
||||
|
||||
defp scope_populated?(scope) do
|
||||
{table, field} = cache_scope_source!(scope)
|
||||
|
||||
if moderation_cache_enabled?() do
|
||||
case cache_table_ref() do
|
||||
:undefined ->
|
||||
scope_populated_db?(table, field)
|
||||
|
||||
cache_table ->
|
||||
ensure_cache_scope_loaded(scope, cache_table)
|
||||
|
||||
:ets.select_count(cache_table, [{{{:member, scope, :_}, true}, [], [true]}]) > 0
|
||||
end
|
||||
else
|
||||
scope_populated_db?(table, field)
|
||||
end
|
||||
end
|
||||
|
||||
defp ensure_cache_scope_loaded(scope, table) do
|
||||
loaded_key = cache_loaded_key(scope)
|
||||
|
||||
if :ets.member(table, loaded_key) do
|
||||
:ok
|
||||
else
|
||||
{db_table, db_field} = cache_scope_source!(scope)
|
||||
values = load_scope_values(db_table, db_field)
|
||||
|
||||
entries = Enum.map(values, &{cache_member_key(scope, &1), true})
|
||||
|
||||
if entries != [] do
|
||||
true = :ets.insert(table, entries)
|
||||
end
|
||||
|
||||
true = :ets.insert(table, {loaded_key, true})
|
||||
:ok
|
||||
end
|
||||
end
|
||||
|
||||
defp load_scope_values(table, field) do
|
||||
query =
|
||||
from(record in table,
|
||||
select: field(record, ^field)
|
||||
)
|
||||
|
||||
Repo.all(query)
|
||||
end
|
||||
|
||||
defp cache_put(scope, value) do
|
||||
if moderation_cache_enabled?() do
|
||||
case cache_table_ref() do
|
||||
:undefined -> :ok
|
||||
cache_table -> true = :ets.insert(cache_table, {cache_member_key(scope, value), true})
|
||||
end
|
||||
end
|
||||
|
||||
:ok
|
||||
end
|
||||
|
||||
defp cache_delete(scope, value) do
|
||||
if moderation_cache_enabled?() do
|
||||
case cache_table_ref() do
|
||||
:undefined -> :ok
|
||||
cache_table -> true = :ets.delete(cache_table, cache_member_key(scope, value))
|
||||
end
|
||||
end
|
||||
|
||||
:ok
|
||||
end
|
||||
|
||||
defp cache_scope_source!(scope), do: Map.fetch!(@cache_scope_sources, scope)
|
||||
|
||||
defp cache_loaded_key(scope), do: {:loaded, scope}
|
||||
|
||||
defp cache_member_key(scope, value), do: {:member, scope, value}
|
||||
|
||||
defp cache_table_ref do
|
||||
case :ets.whereis(@cache_table) do
|
||||
:undefined -> :undefined
|
||||
_table_ref -> @cache_table
|
||||
end
|
||||
end
|
||||
|
||||
defp moderation_cache_enabled? do
|
||||
case Application.get_env(:parrhesia, :moderation_cache_enabled, true) do
|
||||
true -> true
|
||||
false -> false
|
||||
_other -> true
|
||||
end
|
||||
end
|
||||
|
||||
defp exists_in_table_db?(table, field, value) do
|
||||
query =
|
||||
from(record in table,
|
||||
where: field(record, ^field) == ^value,
|
||||
@@ -133,6 +269,16 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Moderation do
|
||||
Repo.one(query) == 1
|
||||
end
|
||||
|
||||
defp scope_populated_db?(table, field) do
|
||||
query =
|
||||
from(record in table,
|
||||
select: field(record, ^field),
|
||||
limit: 1
|
||||
)
|
||||
|
||||
not is_nil(Repo.one(query))
|
||||
end
|
||||
|
||||
defp normalize_hex_or_binary(value, expected_bytes, _reason)
|
||||
when is_binary(value) and byte_size(value) == expected_bytes,
|
||||
do: {:ok, value}
|
||||
|
||||
28
lib/parrhesia/storage/adapters/postgres/moderation_cache.ex
Normal file
28
lib/parrhesia/storage/adapters/postgres/moderation_cache.ex
Normal file
@@ -0,0 +1,28 @@
|
||||
defmodule Parrhesia.Storage.Adapters.Postgres.ModerationCache do
|
||||
@moduledoc """
|
||||
ETS owner process for moderation cache tables.
|
||||
"""
|
||||
|
||||
use GenServer
|
||||
|
||||
@cache_table :parrhesia_moderation_cache
|
||||
|
||||
@spec start_link(keyword()) :: GenServer.on_start()
|
||||
def start_link(opts \\ []) do
|
||||
GenServer.start_link(__MODULE__, :ok, opts)
|
||||
end
|
||||
|
||||
@impl true
|
||||
def init(:ok) do
|
||||
_table =
|
||||
:ets.new(@cache_table, [
|
||||
:named_table,
|
||||
:set,
|
||||
:public,
|
||||
read_concurrency: true,
|
||||
write_concurrency: true
|
||||
])
|
||||
|
||||
{:ok, %{}}
|
||||
end
|
||||
end
|
||||
@@ -1,34 +0,0 @@
|
||||
defmodule Parrhesia.Storage.Archiver do
|
||||
@moduledoc """
|
||||
Partition-aware archival helpers for Postgres event partitions.
|
||||
"""
|
||||
|
||||
import Ecto.Query
|
||||
|
||||
alias Parrhesia.Repo
|
||||
|
||||
@doc """
|
||||
Lists all `events_*` partitions excluding the default partition.
|
||||
"""
|
||||
@spec list_partitions() :: [String.t()]
|
||||
def list_partitions do
|
||||
query =
|
||||
from(table in "pg_tables",
|
||||
where: table.schemaname == "public",
|
||||
where: like(table.tablename, "events_%"),
|
||||
where: table.tablename != "events_default",
|
||||
select: table.tablename,
|
||||
order_by: [asc: table.tablename]
|
||||
)
|
||||
|
||||
Repo.all(query)
|
||||
end
|
||||
|
||||
@doc """
|
||||
Generates an archive SQL statement for the given partition.
|
||||
"""
|
||||
@spec archive_sql(String.t(), String.t()) :: String.t()
|
||||
def archive_sql(partition_name, archive_table_name) do
|
||||
"INSERT INTO #{archive_table_name} SELECT * FROM #{partition_name};"
|
||||
end
|
||||
end
|
||||
@@ -7,6 +7,7 @@ defmodule Parrhesia.Storage.Events do
|
||||
@type event_id :: binary()
|
||||
@type event :: map()
|
||||
@type filter :: map()
|
||||
@type event_ref :: %{created_at: non_neg_integer(), id: binary()}
|
||||
@type query_opts :: keyword()
|
||||
@type count_result :: non_neg_integer() | %{optional(atom()) => term()}
|
||||
@type reason :: term()
|
||||
@@ -14,6 +15,8 @@ defmodule Parrhesia.Storage.Events do
|
||||
@callback put_event(context(), event()) :: {:ok, event()} | {:error, reason()}
|
||||
@callback get_event(context(), event_id()) :: {:ok, event() | nil} | {:error, reason()}
|
||||
@callback query(context(), [filter()], query_opts()) :: {:ok, [event()]} | {:error, reason()}
|
||||
@callback query_event_refs(context(), [filter()], query_opts()) ::
|
||||
{:ok, [event_ref()]} | {:error, reason()}
|
||||
@callback count(context(), [filter()], query_opts()) ::
|
||||
{:ok, count_result()} | {:error, reason()}
|
||||
@callback delete_by_request(context(), event()) :: {:ok, non_neg_integer()} | {:error, reason()}
|
||||
|
||||
@@ -16,6 +16,7 @@ defmodule Parrhesia.Storage.Moderation do
|
||||
@callback allow_pubkey(context(), pubkey()) :: :ok | {:error, reason()}
|
||||
@callback disallow_pubkey(context(), pubkey()) :: :ok | {:error, reason()}
|
||||
@callback pubkey_allowed?(context(), pubkey()) :: {:ok, boolean()} | {:error, reason()}
|
||||
@callback has_allowed_pubkeys?(context()) :: {:ok, boolean()} | {:error, reason()}
|
||||
|
||||
@callback ban_event(context(), event_id()) :: :ok | {:error, reason()}
|
||||
@callback unban_event(context(), event_id()) :: :ok | {:error, reason()}
|
||||
|
||||
310
lib/parrhesia/storage/partitions.ex
Normal file
310
lib/parrhesia/storage/partitions.ex
Normal file
@@ -0,0 +1,310 @@
|
||||
defmodule Parrhesia.Storage.Partitions do
|
||||
@moduledoc """
|
||||
Partition lifecycle helpers for Postgres `events` and `event_tags` monthly partitions.
|
||||
"""
|
||||
|
||||
import Ecto.Query
|
||||
|
||||
alias Parrhesia.Repo
|
||||
|
||||
@identifier_pattern ~r/^[a-zA-Z_][a-zA-Z0-9_]*$/
|
||||
@monthly_partition_pattern ~r/^events_(\d{4})_(\d{2})$/
|
||||
@events_partition_prefix "events"
|
||||
@event_tags_partition_prefix "event_tags"
|
||||
@default_months_ahead 2
|
||||
|
||||
@type monthly_partition :: %{
|
||||
name: String.t(),
|
||||
year: pos_integer(),
|
||||
month: pos_integer(),
|
||||
month_start_unix: non_neg_integer(),
|
||||
month_end_unix: non_neg_integer()
|
||||
}
|
||||
|
||||
@doc """
|
||||
Lists all `events_*` partitions excluding the default partition.
|
||||
"""
|
||||
@spec list_partitions() :: [String.t()]
|
||||
def list_partitions do
|
||||
query =
|
||||
from(table in "pg_tables",
|
||||
where: table.schemaname == "public",
|
||||
where: like(table.tablename, "events_%"),
|
||||
where: table.tablename != "events_default",
|
||||
select: table.tablename,
|
||||
order_by: [asc: table.tablename]
|
||||
)
|
||||
|
||||
Repo.all(query)
|
||||
end
|
||||
|
||||
@doc """
|
||||
Lists monthly event partitions that match `events_YYYY_MM` naming.
|
||||
"""
|
||||
@spec list_monthly_partitions() :: [monthly_partition()]
|
||||
def list_monthly_partitions do
|
||||
list_partitions()
|
||||
|> Enum.map(&parse_monthly_partition/1)
|
||||
|> Enum.reject(&is_nil/1)
|
||||
|> Enum.sort_by(&{&1.year, &1.month})
|
||||
end
|
||||
|
||||
@doc """
|
||||
Ensures monthly partitions exist for the current month and `months_ahead` future months.
|
||||
"""
|
||||
@spec ensure_monthly_partitions(keyword()) :: :ok | {:error, term()}
|
||||
def ensure_monthly_partitions(opts \\ []) when is_list(opts) do
|
||||
months_ahead =
|
||||
opts
|
||||
|> Keyword.get(:months_ahead, @default_months_ahead)
|
||||
|> normalize_non_negative_integer(@default_months_ahead)
|
||||
|
||||
reference_date =
|
||||
opts
|
||||
|> Keyword.get(:reference_date, Date.utc_today())
|
||||
|> normalize_reference_date()
|
||||
|
||||
reference_month = month_start(reference_date)
|
||||
|
||||
offsets =
|
||||
if months_ahead == 0 do
|
||||
[0]
|
||||
else
|
||||
Enum.to_list(0..months_ahead)
|
||||
end
|
||||
|
||||
Enum.reduce_while(offsets, :ok, fn offset, :ok ->
|
||||
target_month = shift_month(reference_month, offset)
|
||||
|
||||
case create_monthly_partitions(target_month) do
|
||||
:ok -> {:cont, :ok}
|
||||
{:error, reason} -> {:halt, {:error, reason}}
|
||||
end
|
||||
end)
|
||||
end
|
||||
|
||||
@doc """
|
||||
Returns the current database size in bytes.
|
||||
"""
|
||||
@spec database_size_bytes() :: {:ok, non_neg_integer()} | {:error, term()}
|
||||
def database_size_bytes do
|
||||
case Repo.query("SELECT pg_database_size(current_database())") do
|
||||
{:ok, %{rows: [[size]]}} when is_integer(size) and size >= 0 -> {:ok, size}
|
||||
{:ok, _result} -> {:error, :unexpected_result}
|
||||
{:error, reason} -> {:error, reason}
|
||||
end
|
||||
end
|
||||
|
||||
@doc """
|
||||
Drops an event partition table by name.
|
||||
|
||||
For monthly `events_YYYY_MM` partitions, the matching `event_tags_YYYY_MM`
|
||||
partition is dropped first to keep partition lifecycle aligned.
|
||||
"""
|
||||
@spec drop_partition(String.t()) :: :ok | {:error, term()}
|
||||
def drop_partition(partition_name) when is_binary(partition_name) do
|
||||
if protected_partition?(partition_name) do
|
||||
{:error, :protected_partition}
|
||||
else
|
||||
drop_partition_tables(partition_name)
|
||||
end
|
||||
end
|
||||
|
||||
@doc """
|
||||
Returns the monthly `events` partition name for a date.
|
||||
"""
|
||||
@spec month_partition_name(Date.t()) :: String.t()
|
||||
def month_partition_name(%Date{} = date) do
|
||||
monthly_partition_name(@events_partition_prefix, date)
|
||||
end
|
||||
|
||||
@doc """
|
||||
Returns the monthly `event_tags` partition name for a date.
|
||||
"""
|
||||
@spec event_tags_month_partition_name(Date.t()) :: String.t()
|
||||
def event_tags_month_partition_name(%Date{} = date) do
|
||||
monthly_partition_name(@event_tags_partition_prefix, date)
|
||||
end
|
||||
|
||||
defp monthly_partition_name(prefix, %Date{} = date) do
|
||||
month_suffix = date.month |> Integer.to_string() |> String.pad_leading(2, "0")
|
||||
"#{prefix}_#{date.year}_#{month_suffix}"
|
||||
end
|
||||
|
||||
defp create_monthly_partitions(%Date{} = month_date) do
|
||||
{start_unix, end_unix} = month_bounds_unix(month_date.year, month_date.month)
|
||||
|
||||
case create_monthly_partition(
|
||||
month_partition_name(month_date),
|
||||
@events_partition_prefix,
|
||||
start_unix,
|
||||
end_unix
|
||||
) do
|
||||
:ok ->
|
||||
create_monthly_partition(
|
||||
event_tags_month_partition_name(month_date),
|
||||
@event_tags_partition_prefix,
|
||||
start_unix,
|
||||
end_unix
|
||||
)
|
||||
|
||||
{:error, reason} ->
|
||||
{:error, reason}
|
||||
end
|
||||
end
|
||||
|
||||
defp create_monthly_partition(partition_name, parent_table_name, start_unix, end_unix) do
|
||||
quoted_partition_name = quote_identifier!(partition_name)
|
||||
quoted_parent_table_name = quote_identifier!(parent_table_name)
|
||||
|
||||
sql =
|
||||
"""
|
||||
CREATE TABLE IF NOT EXISTS #{quoted_partition_name}
|
||||
PARTITION OF #{quoted_parent_table_name}
|
||||
FOR VALUES FROM (#{start_unix}) TO (#{end_unix})
|
||||
"""
|
||||
|
||||
case Repo.query(sql) do
|
||||
{:ok, _result} -> :ok
|
||||
{:error, reason} -> {:error, reason}
|
||||
end
|
||||
end
|
||||
|
||||
defp drop_partition_tables(partition_name) do
|
||||
case parse_monthly_partition(partition_name) do
|
||||
nil -> drop_table(partition_name)
|
||||
monthly_partition -> drop_monthly_partition(partition_name, monthly_partition)
|
||||
end
|
||||
end
|
||||
|
||||
defp drop_monthly_partition(partition_name, %{year: year, month: month}) do
|
||||
month_date = Date.new!(year, month, 1)
|
||||
tags_partition_name = monthly_partition_name(@event_tags_partition_prefix, month_date)
|
||||
|
||||
with :ok <- maybe_detach_events_partition(partition_name),
|
||||
:ok <- drop_table(tags_partition_name) do
|
||||
drop_table(partition_name)
|
||||
end
|
||||
end
|
||||
|
||||
defp maybe_detach_events_partition(partition_name) do
|
||||
if attached_partition?(partition_name, @events_partition_prefix) do
|
||||
quoted_parent_table_name = quote_identifier!(@events_partition_prefix)
|
||||
quoted_partition_name = quote_identifier!(partition_name)
|
||||
|
||||
case Repo.query(
|
||||
"ALTER TABLE #{quoted_parent_table_name} DETACH PARTITION #{quoted_partition_name}"
|
||||
) do
|
||||
{:ok, _result} -> :ok
|
||||
{:error, reason} -> {:error, reason}
|
||||
end
|
||||
else
|
||||
:ok
|
||||
end
|
||||
end
|
||||
|
||||
defp attached_partition?(partition_name, parent_table_name) do
|
||||
query =
|
||||
"""
|
||||
SELECT 1
|
||||
FROM pg_inherits AS inheritance
|
||||
JOIN pg_class AS child ON child.oid = inheritance.inhrelid
|
||||
JOIN pg_namespace AS child_ns ON child_ns.oid = child.relnamespace
|
||||
JOIN pg_class AS parent ON parent.oid = inheritance.inhparent
|
||||
JOIN pg_namespace AS parent_ns ON parent_ns.oid = parent.relnamespace
|
||||
WHERE child_ns.nspname = 'public'
|
||||
AND parent_ns.nspname = 'public'
|
||||
AND child.relname = $1
|
||||
AND parent.relname = $2
|
||||
LIMIT 1
|
||||
"""
|
||||
|
||||
case Repo.query(query, [partition_name, parent_table_name]) do
|
||||
{:ok, %{rows: [[1]]}} -> true
|
||||
{:ok, %{rows: []}} -> false
|
||||
{:ok, _result} -> false
|
||||
{:error, _reason} -> false
|
||||
end
|
||||
end
|
||||
|
||||
defp drop_table(table_name) do
|
||||
quoted_table_name = quote_identifier!(table_name)
|
||||
|
||||
case Repo.query("DROP TABLE IF EXISTS #{quoted_table_name}") do
|
||||
{:ok, _result} -> :ok
|
||||
{:error, reason} -> {:error, reason}
|
||||
end
|
||||
end
|
||||
|
||||
defp protected_partition?(partition_name) do
|
||||
partition_name in ["events", "events_default", "event_tags", "event_tags_default"]
|
||||
end
|
||||
|
||||
defp parse_monthly_partition(partition_name) do
|
||||
case Regex.run(@monthly_partition_pattern, partition_name, capture: :all_but_first) do
|
||||
[year_text, month_text] ->
|
||||
{year, ""} = Integer.parse(year_text)
|
||||
{month, ""} = Integer.parse(month_text)
|
||||
|
||||
if month in 1..12 do
|
||||
{month_start_unix, month_end_unix} = month_bounds_unix(year, month)
|
||||
|
||||
%{
|
||||
name: partition_name,
|
||||
year: year,
|
||||
month: month,
|
||||
month_start_unix: month_start_unix,
|
||||
month_end_unix: month_end_unix
|
||||
}
|
||||
else
|
||||
nil
|
||||
end
|
||||
|
||||
_other ->
|
||||
nil
|
||||
end
|
||||
end
|
||||
|
||||
defp month_bounds_unix(year, month) do
|
||||
month_date = Date.new!(year, month, 1)
|
||||
next_month_date = shift_month(month_date, 1)
|
||||
|
||||
{date_to_unix(month_date), date_to_unix(next_month_date)}
|
||||
end
|
||||
|
||||
defp date_to_unix(%Date{} = date) do
|
||||
date
|
||||
|> DateTime.new!(~T[00:00:00], "Etc/UTC")
|
||||
|> DateTime.to_unix()
|
||||
end
|
||||
|
||||
defp month_start(%Date{} = date), do: Date.new!(date.year, date.month, 1)
|
||||
|
||||
defp shift_month(%Date{} = date, month_delta) when is_integer(month_delta) do
|
||||
month_index = date.year * 12 + date.month - 1 + month_delta
|
||||
shifted_year = div(month_index, 12)
|
||||
shifted_month = rem(month_index, 12) + 1
|
||||
|
||||
Date.new!(shifted_year, shifted_month, 1)
|
||||
end
|
||||
|
||||
defp normalize_reference_date(%Date{} = date), do: date
|
||||
defp normalize_reference_date(_other), do: Date.utc_today()
|
||||
|
||||
defp normalize_non_negative_integer(value, _default) when is_integer(value) and value >= 0,
|
||||
do: value
|
||||
|
||||
defp normalize_non_negative_integer(_value, default), do: default
|
||||
|
||||
defp quote_identifier!(identifier) when is_binary(identifier) do
|
||||
if Regex.match?(@identifier_pattern, identifier) do
|
||||
~s("#{identifier}")
|
||||
else
|
||||
raise ArgumentError, "invalid SQL identifier: #{inspect(identifier)}"
|
||||
end
|
||||
end
|
||||
|
||||
defp quote_identifier!(identifier) do
|
||||
raise ArgumentError, "invalid SQL identifier: #{inspect(identifier)}"
|
||||
end
|
||||
end
|
||||
@@ -12,6 +12,8 @@ defmodule Parrhesia.Storage.Supervisor do
|
||||
@impl true
|
||||
def init(_init_arg) do
|
||||
children = [
|
||||
{Parrhesia.Storage.Adapters.Postgres.ModerationCache,
|
||||
name: Parrhesia.Storage.Adapters.Postgres.ModerationCache},
|
||||
Parrhesia.Repo
|
||||
]
|
||||
|
||||
|
||||
@@ -11,6 +11,13 @@ defmodule Parrhesia.Subscriptions.Index do
|
||||
alias Parrhesia.Protocol.Filter
|
||||
|
||||
@wildcard_key :all
|
||||
@subscriptions_table_name :parrhesia_subscriptions_table
|
||||
@kind_index_table_name :parrhesia_subscription_kind_index
|
||||
@author_index_table_name :parrhesia_subscription_author_index
|
||||
@tag_index_table_name :parrhesia_subscription_tag_index
|
||||
@kind_wildcard_table_name :parrhesia_subscription_kind_wildcard_index
|
||||
@author_wildcard_table_name :parrhesia_subscription_author_wildcard_index
|
||||
@tag_wildcard_table_name :parrhesia_subscription_tag_wildcard_index
|
||||
|
||||
@type subscription_id :: String.t()
|
||||
@type owner :: pid()
|
||||
@@ -20,11 +27,12 @@ defmodule Parrhesia.Subscriptions.Index do
|
||||
@spec start_link(keyword()) :: GenServer.on_start()
|
||||
def start_link(opts \\ []) do
|
||||
name = Keyword.get(opts, :name)
|
||||
init_arg = %{named_tables?: name == __MODULE__}
|
||||
|
||||
if is_nil(name) do
|
||||
GenServer.start_link(__MODULE__, :ok)
|
||||
GenServer.start_link(__MODULE__, init_arg)
|
||||
else
|
||||
GenServer.start_link(__MODULE__, :ok, name: name)
|
||||
GenServer.start_link(__MODULE__, init_arg, name: name)
|
||||
end
|
||||
end
|
||||
|
||||
@@ -65,6 +73,13 @@ defmodule Parrhesia.Subscriptions.Index do
|
||||
end
|
||||
|
||||
@spec candidate_subscription_keys(GenServer.server(), map()) :: [subscription_key()]
|
||||
def candidate_subscription_keys(__MODULE__, event) do
|
||||
case named_tables() do
|
||||
{:ok, tables} -> candidate_subscription_keys_for_tables(tables, event)
|
||||
:error -> GenServer.call(__MODULE__, {:candidate_subscription_keys, event})
|
||||
end
|
||||
end
|
||||
|
||||
def candidate_subscription_keys(server, event) do
|
||||
GenServer.call(server, {:candidate_subscription_keys, event})
|
||||
end
|
||||
@@ -76,20 +91,15 @@ defmodule Parrhesia.Subscriptions.Index do
|
||||
end
|
||||
|
||||
@impl true
|
||||
def init(:ok) do
|
||||
def init(%{named_tables?: named_tables?}) do
|
||||
tables = create_tables(named_tables?)
|
||||
|
||||
{:ok,
|
||||
%{
|
||||
subscriptions_table: :ets.new(:subscriptions_table, [:set, :protected]),
|
||||
kind_index_table: :ets.new(:subscription_kind_index, [:bag, :protected]),
|
||||
author_index_table: :ets.new(:subscription_author_index, [:bag, :protected]),
|
||||
tag_index_table: :ets.new(:subscription_tag_index, [:bag, :protected]),
|
||||
kind_wildcard_table: :ets.new(:subscription_kind_wildcard_index, [:bag, :protected]),
|
||||
author_wildcard_table: :ets.new(:subscription_author_wildcard_index, [:bag, :protected]),
|
||||
tag_wildcard_table: :ets.new(:subscription_tag_wildcard_index, [:bag, :protected]),
|
||||
Map.merge(tables, %{
|
||||
owner_subscriptions: %{},
|
||||
owner_monitors: %{},
|
||||
monitor_owners: %{}
|
||||
}}
|
||||
})}
|
||||
end
|
||||
|
||||
@impl true
|
||||
@@ -128,14 +138,7 @@ defmodule Parrhesia.Subscriptions.Index do
|
||||
end
|
||||
|
||||
def handle_call({:candidate_subscription_keys, event}, _from, state) do
|
||||
candidates =
|
||||
state
|
||||
|> kind_candidates(event)
|
||||
|> MapSet.intersection(author_candidates(state, event))
|
||||
|> MapSet.intersection(tag_candidates(state, event))
|
||||
|> MapSet.to_list()
|
||||
|
||||
{:reply, candidates, state}
|
||||
{:reply, candidate_subscription_keys_for_tables(state, event), state}
|
||||
end
|
||||
|
||||
def handle_call({:fetch_filters, owner_pid, subscription_id}, _from, state) do
|
||||
@@ -371,28 +374,110 @@ defmodule Parrhesia.Subscriptions.Index do
|
||||
|> update_in([:owner_subscriptions], &Map.delete(&1, owner_pid))
|
||||
end
|
||||
|
||||
defp kind_candidates(state, event) do
|
||||
defp create_tables(true) do
|
||||
%{
|
||||
subscriptions_table:
|
||||
:ets.new(@subscriptions_table_name, [
|
||||
:set,
|
||||
:protected,
|
||||
:named_table,
|
||||
read_concurrency: true
|
||||
]),
|
||||
kind_index_table:
|
||||
:ets.new(@kind_index_table_name, [:bag, :protected, :named_table, read_concurrency: true]),
|
||||
author_index_table:
|
||||
:ets.new(@author_index_table_name, [
|
||||
:bag,
|
||||
:protected,
|
||||
:named_table,
|
||||
read_concurrency: true
|
||||
]),
|
||||
tag_index_table:
|
||||
:ets.new(@tag_index_table_name, [:bag, :protected, :named_table, read_concurrency: true]),
|
||||
kind_wildcard_table:
|
||||
:ets.new(@kind_wildcard_table_name, [
|
||||
:bag,
|
||||
:protected,
|
||||
:named_table,
|
||||
read_concurrency: true
|
||||
]),
|
||||
author_wildcard_table:
|
||||
:ets.new(@author_wildcard_table_name, [
|
||||
:bag,
|
||||
:protected,
|
||||
:named_table,
|
||||
read_concurrency: true
|
||||
]),
|
||||
tag_wildcard_table:
|
||||
:ets.new(@tag_wildcard_table_name, [
|
||||
:bag,
|
||||
:protected,
|
||||
:named_table,
|
||||
read_concurrency: true
|
||||
])
|
||||
}
|
||||
end
|
||||
|
||||
defp create_tables(false) do
|
||||
%{
|
||||
subscriptions_table: :ets.new(:subscriptions_table, [:set, :protected]),
|
||||
kind_index_table: :ets.new(:subscription_kind_index, [:bag, :protected]),
|
||||
author_index_table: :ets.new(:subscription_author_index, [:bag, :protected]),
|
||||
tag_index_table: :ets.new(:subscription_tag_index, [:bag, :protected]),
|
||||
kind_wildcard_table: :ets.new(:subscription_kind_wildcard_index, [:bag, :protected]),
|
||||
author_wildcard_table: :ets.new(:subscription_author_wildcard_index, [:bag, :protected]),
|
||||
tag_wildcard_table: :ets.new(:subscription_tag_wildcard_index, [:bag, :protected])
|
||||
}
|
||||
end
|
||||
|
||||
defp named_tables do
|
||||
tables = %{
|
||||
subscriptions_table: :ets.whereis(@subscriptions_table_name),
|
||||
kind_index_table: :ets.whereis(@kind_index_table_name),
|
||||
author_index_table: :ets.whereis(@author_index_table_name),
|
||||
tag_index_table: :ets.whereis(@tag_index_table_name),
|
||||
kind_wildcard_table: :ets.whereis(@kind_wildcard_table_name),
|
||||
author_wildcard_table: :ets.whereis(@author_wildcard_table_name),
|
||||
tag_wildcard_table: :ets.whereis(@tag_wildcard_table_name)
|
||||
}
|
||||
|
||||
if Enum.any?(tables, fn {_key, table_ref} -> table_ref == :undefined end) do
|
||||
:error
|
||||
else
|
||||
{:ok, tables}
|
||||
end
|
||||
end
|
||||
|
||||
defp candidate_subscription_keys_for_tables(tables, event) do
|
||||
tables
|
||||
|> kind_candidates(event)
|
||||
|> MapSet.intersection(author_candidates(tables, event))
|
||||
|> MapSet.intersection(tag_candidates(tables, event))
|
||||
|> MapSet.to_list()
|
||||
end
|
||||
|
||||
defp kind_candidates(tables, event) do
|
||||
event
|
||||
|> Map.get("kind")
|
||||
|> index_candidates_for_value(state.kind_index_table, state.kind_wildcard_table)
|
||||
|> index_candidates_for_value(tables.kind_index_table, tables.kind_wildcard_table)
|
||||
end
|
||||
|
||||
defp author_candidates(state, event) do
|
||||
defp author_candidates(tables, event) do
|
||||
event
|
||||
|> Map.get("pubkey")
|
||||
|> index_candidates_for_value(state.author_index_table, state.author_wildcard_table)
|
||||
|> index_candidates_for_value(tables.author_index_table, tables.author_wildcard_table)
|
||||
end
|
||||
|
||||
defp tag_candidates(state, event) do
|
||||
defp tag_candidates(tables, event) do
|
||||
tag_pairs = event_tag_pairs(Map.get(event, "tags"))
|
||||
wildcard_candidates = lookup_candidates(state.tag_wildcard_table, @wildcard_key)
|
||||
wildcard_candidates = lookup_candidates(tables.tag_wildcard_table, @wildcard_key)
|
||||
|
||||
if MapSet.size(tag_pairs) == 0 do
|
||||
wildcard_candidates
|
||||
else
|
||||
matched_candidates =
|
||||
Enum.reduce(tag_pairs, MapSet.new(), fn {tag_name, value}, acc ->
|
||||
MapSet.union(acc, lookup_candidates(state.tag_index_table, {tag_name, value}))
|
||||
MapSet.union(acc, lookup_candidates(tables.tag_index_table, {tag_name, value}))
|
||||
end)
|
||||
|
||||
MapSet.union(matched_candidates, wildcard_candidates)
|
||||
|
||||
@@ -11,12 +11,28 @@ defmodule Parrhesia.Subscriptions.Supervisor do
|
||||
|
||||
@impl true
|
||||
def init(_init_arg) do
|
||||
children = [
|
||||
{Parrhesia.Subscriptions.Index, name: Parrhesia.Subscriptions.Index},
|
||||
{Parrhesia.Negentropy.Sessions, name: Parrhesia.Negentropy.Sessions},
|
||||
{Parrhesia.Fanout.MultiNode, name: Parrhesia.Fanout.MultiNode}
|
||||
]
|
||||
children =
|
||||
[
|
||||
{Parrhesia.Subscriptions.Index, name: Parrhesia.Subscriptions.Index},
|
||||
{Registry, keys: :unique, name: Parrhesia.API.Stream.Registry},
|
||||
{DynamicSupervisor, strategy: :one_for_one, name: Parrhesia.API.Stream.Supervisor}
|
||||
] ++
|
||||
negentropy_children() ++ [{Parrhesia.Fanout.MultiNode, name: Parrhesia.Fanout.MultiNode}]
|
||||
|
||||
Supervisor.init(children, strategy: :one_for_one)
|
||||
end
|
||||
|
||||
defp negentropy_children do
|
||||
if negentropy_enabled?() do
|
||||
[{Parrhesia.Negentropy.Sessions, name: Parrhesia.Negentropy.Sessions}]
|
||||
else
|
||||
[]
|
||||
end
|
||||
end
|
||||
|
||||
defp negentropy_enabled? do
|
||||
:parrhesia
|
||||
|> Application.get_env(:features, [])
|
||||
|> Keyword.get(:nip_77_negentropy, true)
|
||||
end
|
||||
end
|
||||
|
||||
60
lib/parrhesia/sync/relay_info_client.ex
Normal file
60
lib/parrhesia/sync/relay_info_client.ex
Normal file
@@ -0,0 +1,60 @@
|
||||
defmodule Parrhesia.Sync.RelayInfoClient do
|
||||
@moduledoc false
|
||||
|
||||
alias Parrhesia.Sync.TLS
|
||||
|
||||
@spec verify_remote_identity(map(), keyword()) :: :ok | {:error, term()}
|
||||
def verify_remote_identity(server, opts \\ []) do
|
||||
request_fun = Keyword.get(opts, :request_fun, &default_request/2)
|
||||
|
||||
with {:ok, response} <- request_fun.(relay_info_url(server.url), request_opts(server)),
|
||||
{:ok, pubkey} <- extract_pubkey(response) do
|
||||
if pubkey == server.auth_pubkey do
|
||||
:ok
|
||||
else
|
||||
{:error, :remote_identity_mismatch}
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
defp default_request(url, opts) do
|
||||
case Req.get(
|
||||
url: url,
|
||||
headers: [{"accept", "application/nostr+json"}],
|
||||
decode_body: false,
|
||||
connect_options: opts
|
||||
) do
|
||||
{:ok, response} -> {:ok, response}
|
||||
{:error, reason} -> {:error, reason}
|
||||
end
|
||||
end
|
||||
|
||||
defp extract_pubkey(%Req.Response{status: 200, body: body}) when is_binary(body) do
|
||||
with {:ok, payload} <- JSON.decode(body),
|
||||
pubkey when is_binary(pubkey) and pubkey != "" <- Map.get(payload, "pubkey") do
|
||||
{:ok, String.downcase(pubkey)}
|
||||
else
|
||||
nil -> {:error, :missing_remote_identity}
|
||||
{:error, reason} -> {:error, reason}
|
||||
_other -> {:error, :missing_remote_identity}
|
||||
end
|
||||
end
|
||||
|
||||
defp extract_pubkey(%Req.Response{status: status}),
|
||||
do: {:error, {:relay_info_request_failed, status}}
|
||||
|
||||
defp extract_pubkey(_response), do: {:error, :invalid_relay_info}
|
||||
|
||||
defp request_opts(%{tls: %{mode: :disabled}}), do: []
|
||||
defp request_opts(%{tls: tls}), do: TLS.req_connect_options(tls)
|
||||
|
||||
defp relay_info_url(relay_url) do
|
||||
relay_url
|
||||
|> URI.parse()
|
||||
|> Map.update!(:scheme, fn
|
||||
"wss" -> "https"
|
||||
"ws" -> "http"
|
||||
end)
|
||||
|> URI.to_string()
|
||||
end
|
||||
end
|
||||
43
lib/parrhesia/sync/supervisor.ex
Normal file
43
lib/parrhesia/sync/supervisor.ex
Normal file
@@ -0,0 +1,43 @@
|
||||
defmodule Parrhesia.Sync.Supervisor do
|
||||
@moduledoc """
|
||||
Supervision entrypoint for sync control-plane processes.
|
||||
"""
|
||||
|
||||
use Supervisor
|
||||
|
||||
def start_link(init_arg \\ []) do
|
||||
name = Keyword.get(init_arg, :name, __MODULE__)
|
||||
Supervisor.start_link(__MODULE__, init_arg, name: name)
|
||||
end
|
||||
|
||||
@impl true
|
||||
def init(init_arg) do
|
||||
worker_registry = Keyword.get(init_arg, :worker_registry, Parrhesia.Sync.WorkerRegistry)
|
||||
worker_supervisor = Keyword.get(init_arg, :worker_supervisor, Parrhesia.Sync.WorkerSupervisor)
|
||||
manager_name = Keyword.get(init_arg, :manager, Parrhesia.API.Sync.Manager)
|
||||
|
||||
children = [
|
||||
{Registry, keys: :unique, name: worker_registry},
|
||||
{DynamicSupervisor, strategy: :one_for_one, name: worker_supervisor},
|
||||
{Parrhesia.API.Sync.Manager,
|
||||
manager_opts(init_arg, manager_name, worker_registry, worker_supervisor)}
|
||||
]
|
||||
|
||||
Supervisor.init(children, strategy: :one_for_one)
|
||||
end
|
||||
|
||||
defp manager_opts(init_arg, manager_name, worker_registry, worker_supervisor) do
|
||||
[
|
||||
name: manager_name,
|
||||
worker_registry: worker_registry,
|
||||
worker_supervisor: worker_supervisor
|
||||
] ++
|
||||
Keyword.take(init_arg, [
|
||||
:path,
|
||||
:start_workers?,
|
||||
:transport_module,
|
||||
:relay_info_opts,
|
||||
:transport_opts
|
||||
])
|
||||
end
|
||||
end
|
||||
112
lib/parrhesia/sync/tls.ex
Normal file
112
lib/parrhesia/sync/tls.ex
Normal file
@@ -0,0 +1,112 @@
|
||||
defmodule Parrhesia.Sync.TLS do
|
||||
@moduledoc false
|
||||
|
||||
@type tls_config :: %{
|
||||
mode: :required | :disabled,
|
||||
hostname: String.t(),
|
||||
pins: [%{type: :spki_sha256, value: String.t()}]
|
||||
}
|
||||
|
||||
@spec websocket_options(tls_config()) :: keyword()
|
||||
def websocket_options(%{mode: :disabled}), do: [insecure: true]
|
||||
|
||||
def websocket_options(%{mode: :required} = tls) do
|
||||
[
|
||||
ssl_options: transport_opts(tls)
|
||||
]
|
||||
end
|
||||
|
||||
@spec req_connect_options(tls_config()) :: keyword()
|
||||
def req_connect_options(%{mode: :disabled}), do: []
|
||||
|
||||
def req_connect_options(%{mode: :required} = tls) do
|
||||
[
|
||||
transport_opts: transport_opts(tls)
|
||||
]
|
||||
end
|
||||
|
||||
def transport_opts(%{hostname: hostname, pins: pins}) do
|
||||
[
|
||||
verify: :verify_peer,
|
||||
cacerts: system_cacerts(),
|
||||
server_name_indication: String.to_charlist(hostname),
|
||||
customize_hostname_check: [
|
||||
match_fun: :public_key.pkix_verify_hostname_match_fun(:https)
|
||||
]
|
||||
]
|
||||
|> maybe_put_verify_fun(pins)
|
||||
end
|
||||
|
||||
defp maybe_put_verify_fun(options, []), do: options
|
||||
|
||||
defp maybe_put_verify_fun(options, pins) do
|
||||
Keyword.put(
|
||||
options,
|
||||
:verify_fun,
|
||||
{&verify_certificate/3, %{pins: MapSet.new(Enum.map(pins, & &1.value)), matched?: false}}
|
||||
)
|
||||
end
|
||||
|
||||
defp verify_certificate(_cert, :valid_peer, %{matched?: true} = state), do: {:valid, state}
|
||||
defp verify_certificate(_cert, :valid_peer, _state), do: {:fail, :pin_mismatch}
|
||||
|
||||
defp verify_certificate(_cert, {:bad_cert, reason}, _state), do: {:fail, reason}
|
||||
|
||||
defp verify_certificate(cert, _event, state) when is_binary(cert) do
|
||||
matched? = MapSet.member?(state.pins, spki_pin_from_verify(cert))
|
||||
{:valid, %{state | matched?: state.matched? or matched?}}
|
||||
rescue
|
||||
_error -> {:fail, :invalid_certificate}
|
||||
end
|
||||
|
||||
defp verify_certificate({:OTPCertificate, _tbs, _sig_alg, _sig} = cert, _event, state) do
|
||||
matched? = MapSet.member?(state.pins, spki_pin_from_verify(cert))
|
||||
{:valid, %{state | matched?: state.matched? or matched?}}
|
||||
rescue
|
||||
_error -> {:fail, :invalid_certificate}
|
||||
end
|
||||
|
||||
defp verify_certificate({:Certificate, _tbs, _sig_alg, _sig} = cert, _event, state) do
|
||||
matched? = MapSet.member?(state.pins, spki_pin_from_verify(cert))
|
||||
{:valid, %{state | matched?: state.matched? or matched?}}
|
||||
rescue
|
||||
_error -> {:fail, :invalid_certificate}
|
||||
end
|
||||
|
||||
defp verify_certificate(_cert, _event, state), do: {:valid, state}
|
||||
|
||||
defp spki_pin(cert_der) do
|
||||
cert = :public_key.pkix_decode_cert(cert_der, :plain)
|
||||
spki = cert |> elem(1) |> elem(7)
|
||||
|
||||
:public_key.der_encode(:SubjectPublicKeyInfo, spki)
|
||||
|> then(&:crypto.hash(:sha256, &1))
|
||||
|> Base.encode64()
|
||||
end
|
||||
|
||||
defp spki_pin_from_verify(cert) when is_binary(cert), do: spki_pin(cert)
|
||||
|
||||
defp spki_pin_from_verify({:OTPCertificate, _tbs, _sig_alg, _sig} = cert) do
|
||||
cert
|
||||
|> then(&:public_key.pkix_encode(:OTPCertificate, &1, :otp))
|
||||
|> spki_pin()
|
||||
end
|
||||
|
||||
defp spki_pin_from_verify({:Certificate, _tbs, _sig_alg, _sig} = cert) do
|
||||
cert
|
||||
|> then(&:public_key.der_encode(:Certificate, &1))
|
||||
|> spki_pin()
|
||||
end
|
||||
|
||||
defp spki_pin_from_verify(_cert) do
|
||||
raise(ArgumentError, "invalid certificate")
|
||||
end
|
||||
|
||||
defp system_cacerts do
|
||||
if function_exported?(:public_key, :cacerts_get, 0) do
|
||||
:public_key.cacerts_get()
|
||||
else
|
||||
[]
|
||||
end
|
||||
end
|
||||
end
|
||||
7
lib/parrhesia/sync/transport.ex
Normal file
7
lib/parrhesia/sync/transport.ex
Normal file
@@ -0,0 +1,7 @@
|
||||
defmodule Parrhesia.Sync.Transport do
|
||||
@moduledoc false
|
||||
|
||||
@callback connect(pid(), map(), keyword()) :: {:ok, pid()} | {:error, term()}
|
||||
@callback send_json(pid(), term()) :: :ok | {:error, term()}
|
||||
@callback close(pid()) :: :ok
|
||||
end
|
||||
93
lib/parrhesia/sync/transport/websockex_client.ex
Normal file
93
lib/parrhesia/sync/transport/websockex_client.ex
Normal file
@@ -0,0 +1,93 @@
|
||||
defmodule Parrhesia.Sync.Transport.WebSockexClient do
|
||||
@moduledoc false
|
||||
|
||||
use WebSockex
|
||||
|
||||
alias Parrhesia.Sync.TLS
|
||||
|
||||
@behaviour Parrhesia.Sync.Transport
|
||||
|
||||
@impl true
|
||||
def connect(owner, server, opts \\ []) do
|
||||
state = %{
|
||||
owner: owner,
|
||||
server: server
|
||||
}
|
||||
|
||||
transport_opts =
|
||||
server.tls
|
||||
|> TLS.websocket_options()
|
||||
|> merge_websocket_opts(Keyword.get(opts, :websocket_opts, []))
|
||||
|> Keyword.put(:handle_initial_conn_failure, true)
|
||||
|
||||
WebSockex.start(server.url, __MODULE__, state, transport_opts)
|
||||
end
|
||||
|
||||
@impl true
|
||||
def send_json(pid, payload) do
|
||||
WebSockex.cast(pid, {:send_json, payload})
|
||||
end
|
||||
|
||||
@impl true
|
||||
def close(pid) do
|
||||
WebSockex.cast(pid, :close)
|
||||
:ok
|
||||
end
|
||||
|
||||
@impl true
|
||||
def handle_connect(conn, state) do
|
||||
send(state.owner, {:sync_transport, self(), :connected, %{resp_headers: conn.resp_headers}})
|
||||
{:ok, state}
|
||||
end
|
||||
|
||||
@impl true
|
||||
def handle_frame({:text, payload}, state) do
|
||||
message =
|
||||
case JSON.decode(payload) do
|
||||
{:ok, frame} -> frame
|
||||
{:error, reason} -> {:decode_error, reason, payload}
|
||||
end
|
||||
|
||||
send(state.owner, {:sync_transport, self(), :frame, message})
|
||||
{:ok, state}
|
||||
end
|
||||
|
||||
def handle_frame(frame, state) do
|
||||
send(state.owner, {:sync_transport, self(), :frame, frame})
|
||||
{:ok, state}
|
||||
end
|
||||
|
||||
@impl true
|
||||
def handle_cast({:send_json, payload}, state) do
|
||||
{:reply, {:text, JSON.encode!(payload)}, state}
|
||||
end
|
||||
|
||||
def handle_cast(:close, state) do
|
||||
{:close, state}
|
||||
end
|
||||
|
||||
@impl true
|
||||
def handle_disconnect(status, state) do
|
||||
send(state.owner, {:sync_transport, self(), :disconnected, status})
|
||||
{:ok, state}
|
||||
end
|
||||
|
||||
defp merge_websocket_opts(base_opts, override_opts) do
|
||||
override_ssl_options = Keyword.get(override_opts, :ssl_options)
|
||||
|
||||
merged_ssl_options =
|
||||
case {Keyword.get(base_opts, :ssl_options), override_ssl_options} do
|
||||
{nil, nil} -> nil
|
||||
{base_ssl, nil} -> base_ssl
|
||||
{nil, override_ssl} -> override_ssl
|
||||
{base_ssl, override_ssl} -> Keyword.merge(base_ssl, override_ssl)
|
||||
end
|
||||
|
||||
base_opts
|
||||
|> Keyword.merge(Keyword.delete(override_opts, :ssl_options))
|
||||
|> maybe_put_ssl_options(merged_ssl_options)
|
||||
end
|
||||
|
||||
defp maybe_put_ssl_options(opts, nil), do: opts
|
||||
defp maybe_put_ssl_options(opts, ssl_options), do: Keyword.put(opts, :ssl_options, ssl_options)
|
||||
end
|
||||
367
lib/parrhesia/sync/worker.ex
Normal file
367
lib/parrhesia/sync/worker.ex
Normal file
@@ -0,0 +1,367 @@
|
||||
defmodule Parrhesia.Sync.Worker do
|
||||
@moduledoc false
|
||||
|
||||
use GenServer
|
||||
|
||||
alias Parrhesia.API.Events
|
||||
alias Parrhesia.API.Identity
|
||||
alias Parrhesia.API.RequestContext
|
||||
alias Parrhesia.API.Sync.Manager
|
||||
alias Parrhesia.Sync.RelayInfoClient
|
||||
alias Parrhesia.Sync.Transport.WebSockexClient
|
||||
|
||||
@initial_backoff_ms 1_000
|
||||
@max_backoff_ms 30_000
|
||||
@auth_kind 22_242
|
||||
|
||||
defstruct server: nil,
|
||||
manager: nil,
|
||||
transport_module: WebSockexClient,
|
||||
transport_pid: nil,
|
||||
phase: :idle,
|
||||
current_subscription_id: nil,
|
||||
backoff_ms: @initial_backoff_ms,
|
||||
authenticated?: false,
|
||||
auth_event_id: nil,
|
||||
resubscribe_after_auth?: false,
|
||||
cursor_created_at: nil,
|
||||
cursor_event_id: nil,
|
||||
relay_info_opts: [],
|
||||
transport_opts: []
|
||||
|
||||
@type t :: %__MODULE__{}
|
||||
|
||||
def child_spec(opts) do
|
||||
server = Keyword.fetch!(opts, :server)
|
||||
|
||||
%{
|
||||
id: {:sync_worker, server.id},
|
||||
start: {__MODULE__, :start_link, [opts]},
|
||||
restart: :transient
|
||||
}
|
||||
end
|
||||
|
||||
def start_link(opts) do
|
||||
name = Keyword.get(opts, :name)
|
||||
GenServer.start_link(__MODULE__, opts, name: name)
|
||||
end
|
||||
|
||||
def sync_now(worker), do: GenServer.cast(worker, :sync_now)
|
||||
def stop(worker), do: GenServer.stop(worker, :normal)
|
||||
|
||||
@impl true
|
||||
def init(opts) do
|
||||
server = Keyword.fetch!(opts, :server)
|
||||
runtime = Keyword.get(opts, :runtime, %{})
|
||||
|
||||
state = %__MODULE__{
|
||||
server: server,
|
||||
manager: Keyword.fetch!(opts, :manager),
|
||||
transport_module: Keyword.get(opts, :transport_module, WebSockexClient),
|
||||
cursor_created_at: Map.get(runtime, :cursor_created_at),
|
||||
cursor_event_id: Map.get(runtime, :cursor_event_id),
|
||||
relay_info_opts: Keyword.get(opts, :relay_info_opts, []),
|
||||
transport_opts: Keyword.get(opts, :transport_opts, [])
|
||||
}
|
||||
|
||||
send(self(), :connect)
|
||||
{:ok, state}
|
||||
end
|
||||
|
||||
@impl true
|
||||
def handle_cast(:sync_now, state) do
|
||||
Manager.runtime_event(state.manager, state.server.id, :subscription_restart)
|
||||
|
||||
next_state =
|
||||
state
|
||||
|> close_subscription()
|
||||
|> issue_subscription()
|
||||
|
||||
{:noreply, next_state}
|
||||
end
|
||||
|
||||
@impl true
|
||||
def handle_info(:connect, %__MODULE__{transport_pid: nil} = state) do
|
||||
case RelayInfoClient.verify_remote_identity(state.server, state.relay_info_opts) do
|
||||
:ok ->
|
||||
connect_transport(state)
|
||||
|
||||
{:error, reason} ->
|
||||
Manager.runtime_event(state.manager, state.server.id, :disconnected, %{reason: reason})
|
||||
{:noreply, schedule_reconnect(state)}
|
||||
end
|
||||
end
|
||||
|
||||
def handle_info(:connect, state), do: {:noreply, state}
|
||||
|
||||
def handle_info({:sync_transport, transport_pid, :connected, _info}, state) do
|
||||
Manager.runtime_event(state.manager, state.server.id, :connected, %{})
|
||||
|
||||
next_state =
|
||||
state
|
||||
|> Map.put(:transport_pid, transport_pid)
|
||||
|> Map.put(:backoff_ms, @initial_backoff_ms)
|
||||
|> Map.put(:authenticated?, false)
|
||||
|> Map.put(:auth_event_id, nil)
|
||||
|> Map.put(:resubscribe_after_auth?, false)
|
||||
|> issue_subscription()
|
||||
|
||||
{:noreply, next_state}
|
||||
end
|
||||
|
||||
def handle_info({:sync_transport, _transport_pid, :frame, frame}, state) do
|
||||
{:noreply, handle_transport_frame(state, frame)}
|
||||
end
|
||||
|
||||
def handle_info({:sync_transport, _transport_pid, :disconnected, status}, state) do
|
||||
Manager.runtime_event(state.manager, state.server.id, :disconnected, %{reason: status.reason})
|
||||
|
||||
next_state =
|
||||
state
|
||||
|> Map.put(:transport_pid, nil)
|
||||
|> Map.put(:phase, :idle)
|
||||
|> Map.put(:authenticated?, false)
|
||||
|> Map.put(:auth_event_id, nil)
|
||||
|> Map.put(:resubscribe_after_auth?, false)
|
||||
|> Map.put(:current_subscription_id, nil)
|
||||
|> schedule_reconnect()
|
||||
|
||||
{:noreply, next_state}
|
||||
end
|
||||
|
||||
def handle_info(_message, state), do: {:noreply, state}
|
||||
|
||||
defp connect_transport(state) do
|
||||
case state.transport_module.connect(self(), state.server, state.transport_opts) do
|
||||
{:ok, transport_pid} ->
|
||||
{:noreply, %{state | transport_pid: transport_pid, phase: :connecting}}
|
||||
|
||||
{:error, reason} ->
|
||||
Manager.runtime_event(state.manager, state.server.id, :disconnected, %{reason: reason})
|
||||
{:noreply, schedule_reconnect(state)}
|
||||
end
|
||||
end
|
||||
|
||||
defp handle_transport_frame(state, ["AUTH", challenge]) when is_binary(challenge) do
|
||||
case send_auth_event(state, challenge) do
|
||||
{:ok, auth_event_id} ->
|
||||
%{state | auth_event_id: auth_event_id, phase: :authenticating}
|
||||
|
||||
{:error, reason} ->
|
||||
Manager.runtime_event(state.manager, state.server.id, :error, %{reason: reason})
|
||||
state
|
||||
end
|
||||
end
|
||||
|
||||
defp handle_transport_frame(state, ["OK", event_id, true, _message])
|
||||
when event_id == state.auth_event_id do
|
||||
next_state = %{state | authenticated?: true, auth_event_id: nil}
|
||||
|
||||
if next_state.resubscribe_after_auth? do
|
||||
next_state
|
||||
|> Map.put(:resubscribe_after_auth?, false)
|
||||
|> issue_subscription()
|
||||
else
|
||||
next_state
|
||||
end
|
||||
end
|
||||
|
||||
defp handle_transport_frame(state, ["OK", event_id, false, message])
|
||||
when event_id == state.auth_event_id do
|
||||
Manager.runtime_event(state.manager, state.server.id, :error, %{reason: message})
|
||||
schedule_reconnect(%{state | auth_event_id: nil, authenticated?: false})
|
||||
end
|
||||
|
||||
defp handle_transport_frame(state, ["EVENT", subscription_id, event])
|
||||
when subscription_id == state.current_subscription_id and is_map(event) do
|
||||
handle_remote_event(state, event)
|
||||
end
|
||||
|
||||
defp handle_transport_frame(state, ["EOSE", subscription_id])
|
||||
when subscription_id == state.current_subscription_id do
|
||||
Manager.runtime_event(state.manager, state.server.id, :sync_completed, %{})
|
||||
%{state | phase: :streaming}
|
||||
end
|
||||
|
||||
defp handle_transport_frame(state, ["CLOSED", subscription_id, message])
|
||||
when subscription_id == state.current_subscription_id do
|
||||
auth_required? = is_binary(message) and String.contains?(String.downcase(message), "auth")
|
||||
|
||||
next_state =
|
||||
state
|
||||
|> Map.put(:current_subscription_id, nil)
|
||||
|> Map.put(:phase, :idle)
|
||||
|
||||
if auth_required? and not state.authenticated? do
|
||||
%{next_state | resubscribe_after_auth?: true}
|
||||
else
|
||||
Manager.runtime_event(state.manager, state.server.id, :error, %{reason: message})
|
||||
schedule_reconnect(next_state)
|
||||
end
|
||||
end
|
||||
|
||||
defp handle_transport_frame(state, {:decode_error, reason, _payload}) do
|
||||
Manager.runtime_event(state.manager, state.server.id, :error, %{reason: reason})
|
||||
state
|
||||
end
|
||||
|
||||
defp handle_transport_frame(state, _frame), do: state
|
||||
|
||||
defp issue_subscription(%__MODULE__{transport_pid: nil} = state), do: state
|
||||
|
||||
defp issue_subscription(state) do
|
||||
subscription_id = subscription_id(state.server.id)
|
||||
filters = sync_filters(state)
|
||||
|
||||
:ok =
|
||||
state.transport_module.send_json(state.transport_pid, ["REQ", subscription_id | filters])
|
||||
|
||||
Manager.runtime_event(state.manager, state.server.id, :sync_started, %{})
|
||||
|
||||
%{
|
||||
state
|
||||
| current_subscription_id: subscription_id,
|
||||
phase: :catchup
|
||||
}
|
||||
end
|
||||
|
||||
defp close_subscription(%__MODULE__{transport_pid: nil} = state), do: state
|
||||
defp close_subscription(%__MODULE__{current_subscription_id: nil} = state), do: state
|
||||
|
||||
defp close_subscription(state) do
|
||||
:ok =
|
||||
state.transport_module.send_json(state.transport_pid, [
|
||||
"CLOSE",
|
||||
state.current_subscription_id
|
||||
])
|
||||
|
||||
%{state | current_subscription_id: nil}
|
||||
end
|
||||
|
||||
defp send_auth_event(state, challenge) do
|
||||
event = %{
|
||||
"created_at" => System.system_time(:second),
|
||||
"kind" => @auth_kind,
|
||||
"tags" => [["challenge", challenge], ["relay", state.server.url]],
|
||||
"content" => ""
|
||||
}
|
||||
|
||||
with {:ok, signed_event} <- Identity.sign_event(event) do
|
||||
:ok = state.transport_module.send_json(state.transport_pid, ["AUTH", signed_event])
|
||||
{:ok, signed_event["id"]}
|
||||
end
|
||||
end
|
||||
|
||||
defp handle_remote_event(state, event) do
|
||||
context = request_context(state)
|
||||
|
||||
case Events.publish(event, context: context) do
|
||||
{:ok, %{accepted: true}} ->
|
||||
Manager.runtime_event(state.manager, state.server.id, :event_result, %{
|
||||
result: :accepted,
|
||||
event: event
|
||||
})
|
||||
|
||||
advance_cursor(state, event)
|
||||
|
||||
{:ok, %{accepted: false, reason: :duplicate_event}} ->
|
||||
Manager.runtime_event(state.manager, state.server.id, :event_result, %{
|
||||
result: :duplicate,
|
||||
event: event
|
||||
})
|
||||
|
||||
advance_cursor(state, event)
|
||||
|
||||
{:ok, %{accepted: false, reason: reason}} ->
|
||||
Manager.runtime_event(state.manager, state.server.id, :event_result, %{
|
||||
result: :rejected,
|
||||
event: event,
|
||||
reason: reason
|
||||
})
|
||||
|
||||
state
|
||||
|
||||
{:error, reason} ->
|
||||
Manager.runtime_event(state.manager, state.server.id, :event_result, %{
|
||||
result: :rejected,
|
||||
event: event,
|
||||
reason: reason
|
||||
})
|
||||
|
||||
state
|
||||
end
|
||||
end
|
||||
|
||||
defp request_context(state) do
|
||||
%RequestContext{
|
||||
authenticated_pubkeys: MapSet.new([state.server.auth_pubkey]),
|
||||
caller: :sync,
|
||||
subscription_id: state.current_subscription_id,
|
||||
peer_id: state.server.id,
|
||||
metadata: %{
|
||||
sync_server_id: state.server.id,
|
||||
remote_url: state.server.url
|
||||
}
|
||||
}
|
||||
end
|
||||
|
||||
defp advance_cursor(state, event) do
|
||||
created_at = Map.get(event, "created_at")
|
||||
event_id = Map.get(event, "id")
|
||||
|
||||
if newer_cursor?(state.cursor_created_at, state.cursor_event_id, created_at, event_id) do
|
||||
Manager.runtime_event(state.manager, state.server.id, :cursor_advanced, %{
|
||||
created_at: created_at,
|
||||
event_id: event_id
|
||||
})
|
||||
|
||||
%{state | cursor_created_at: created_at, cursor_event_id: event_id}
|
||||
else
|
||||
state
|
||||
end
|
||||
end
|
||||
|
||||
defp newer_cursor?(nil, _cursor_event_id, created_at, event_id),
|
||||
do: is_integer(created_at) and is_binary(event_id)
|
||||
|
||||
defp newer_cursor?(cursor_created_at, cursor_event_id, created_at, event_id) do
|
||||
cond do
|
||||
not is_integer(created_at) or not is_binary(event_id) ->
|
||||
false
|
||||
|
||||
created_at > cursor_created_at ->
|
||||
true
|
||||
|
||||
created_at == cursor_created_at and is_binary(cursor_event_id) and
|
||||
event_id > cursor_event_id ->
|
||||
true
|
||||
|
||||
true ->
|
||||
false
|
||||
end
|
||||
end
|
||||
|
||||
defp sync_filters(state) do
|
||||
Enum.map(state.server.filters, fn filter ->
|
||||
case since_value(state, filter) do
|
||||
nil -> filter
|
||||
since -> Map.put(filter, "since", since)
|
||||
end
|
||||
end)
|
||||
end
|
||||
|
||||
defp since_value(%__MODULE__{cursor_created_at: nil}, _filter), do: nil
|
||||
|
||||
defp since_value(state, _filter) do
|
||||
max(state.cursor_created_at - state.server.overlap_window_seconds, 0)
|
||||
end
|
||||
|
||||
defp schedule_reconnect(state) do
|
||||
Process.send_after(self(), :connect, state.backoff_ms)
|
||||
%{state | backoff_ms: min(state.backoff_ms * 2, @max_backoff_ms)}
|
||||
end
|
||||
|
||||
defp subscription_id(server_id) do
|
||||
"sync-#{server_id}-#{System.unique_integer([:positive, :monotonic])}"
|
||||
end
|
||||
end
|
||||
280
lib/parrhesia/tasks/partition_retention_worker.ex
Normal file
280
lib/parrhesia/tasks/partition_retention_worker.ex
Normal file
@@ -0,0 +1,280 @@
|
||||
defmodule Parrhesia.Tasks.PartitionRetentionWorker do
|
||||
@moduledoc """
|
||||
Periodic worker that ensures monthly event partitions and applies retention pruning.
|
||||
"""
|
||||
|
||||
use GenServer
|
||||
|
||||
alias Parrhesia.Storage.Partitions
|
||||
alias Parrhesia.Telemetry
|
||||
|
||||
@default_check_interval_hours 24
|
||||
@default_months_ahead 2
|
||||
@default_max_partitions_to_drop_per_run 1
|
||||
@bytes_per_gib 1_073_741_824
|
||||
|
||||
@type monthly_partition :: Partitions.monthly_partition()
|
||||
|
||||
@spec start_link(keyword()) :: GenServer.on_start()
|
||||
def start_link(opts \\ []) do
|
||||
name = Keyword.get(opts, :name, __MODULE__)
|
||||
GenServer.start_link(__MODULE__, opts, name: name)
|
||||
end
|
||||
|
||||
@impl true
|
||||
def init(opts) do
|
||||
retention_config = Application.get_env(:parrhesia, :retention, [])
|
||||
|
||||
state = %{
|
||||
partition_ops: Keyword.get(opts, :partition_ops, Partitions),
|
||||
interval_ms: interval_ms(opts, retention_config),
|
||||
months_ahead: months_ahead(opts, retention_config),
|
||||
max_db_gib: max_db_gib(opts, retention_config),
|
||||
max_months_to_keep: max_months_to_keep(opts, retention_config),
|
||||
max_partitions_to_drop_per_run: max_partitions_to_drop_per_run(opts, retention_config),
|
||||
today_fun: today_fun(opts)
|
||||
}
|
||||
|
||||
schedule_tick(0)
|
||||
{:ok, state}
|
||||
end
|
||||
|
||||
@impl true
|
||||
def handle_info(:tick, state) do
|
||||
started_at = System.monotonic_time()
|
||||
|
||||
{dropped_count, status} =
|
||||
case run_maintenance(state) do
|
||||
{:ok, count} -> {count, :ok}
|
||||
{:error, _reason} -> {0, :error}
|
||||
end
|
||||
|
||||
Telemetry.emit(
|
||||
[:parrhesia, :maintenance, :partition_retention, :stop],
|
||||
%{
|
||||
duration: System.monotonic_time() - started_at,
|
||||
dropped_partitions: dropped_count
|
||||
},
|
||||
%{status: status}
|
||||
)
|
||||
|
||||
schedule_tick(state.interval_ms)
|
||||
{:noreply, state}
|
||||
end
|
||||
|
||||
def handle_info(_message, state), do: {:noreply, state}
|
||||
|
||||
defp run_maintenance(state) do
|
||||
case state.partition_ops.ensure_monthly_partitions(months_ahead: state.months_ahead) do
|
||||
:ok -> maybe_drop_oldest_partitions(state)
|
||||
{:error, reason} -> {:error, reason}
|
||||
end
|
||||
end
|
||||
|
||||
defp maybe_drop_oldest_partitions(%{max_partitions_to_drop_per_run: max_drops})
|
||||
when max_drops <= 0,
|
||||
do: {:ok, 0}
|
||||
|
||||
defp maybe_drop_oldest_partitions(state) do
|
||||
1..state.max_partitions_to_drop_per_run
|
||||
|> Enum.reduce_while({:ok, 0}, fn _attempt, {:ok, dropped_count} ->
|
||||
drop_oldest_partition_once(state, dropped_count)
|
||||
end)
|
||||
end
|
||||
|
||||
defp drop_oldest_partition_once(state, dropped_count) do
|
||||
case next_partition_to_drop(state) do
|
||||
{:ok, partition} -> apply_partition_drop(state, partition, dropped_count)
|
||||
{:error, reason} -> {:halt, {:error, reason}}
|
||||
end
|
||||
end
|
||||
|
||||
defp apply_partition_drop(_state, nil, dropped_count), do: {:halt, {:ok, dropped_count}}
|
||||
|
||||
defp apply_partition_drop(state, partition, dropped_count) do
|
||||
case state.partition_ops.drop_partition(partition.name) do
|
||||
:ok -> {:cont, {:ok, dropped_count + 1}}
|
||||
{:error, reason} -> {:halt, {:error, reason}}
|
||||
end
|
||||
end
|
||||
|
||||
defp next_partition_to_drop(state) do
|
||||
partitions = state.partition_ops.list_monthly_partitions()
|
||||
current_month_index = current_month_index(state.today_fun)
|
||||
|
||||
month_limit_candidate =
|
||||
oldest_partition_exceeding_month_limit(
|
||||
partitions,
|
||||
state.max_months_to_keep,
|
||||
current_month_index
|
||||
)
|
||||
|
||||
with {:ok, size_limit_candidate} <-
|
||||
oldest_partition_exceeding_size_limit(
|
||||
partitions,
|
||||
state.max_db_gib,
|
||||
current_month_index,
|
||||
state.partition_ops
|
||||
) do
|
||||
{:ok, pick_oldest_partition(month_limit_candidate, size_limit_candidate)}
|
||||
end
|
||||
end
|
||||
|
||||
defp oldest_partition_exceeding_month_limit(_partitions, :infinity, _current_month_index),
|
||||
do: nil
|
||||
|
||||
defp oldest_partition_exceeding_month_limit(partitions, max_months_to_keep, current_month_index)
|
||||
when is_integer(max_months_to_keep) and max_months_to_keep > 0 do
|
||||
oldest_month_to_keep_index = current_month_index - (max_months_to_keep - 1)
|
||||
|
||||
partitions
|
||||
|> Enum.filter(fn partition ->
|
||||
month_index(partition) < current_month_index and
|
||||
month_index(partition) < oldest_month_to_keep_index
|
||||
end)
|
||||
|> Enum.min_by(&month_index/1, fn -> nil end)
|
||||
end
|
||||
|
||||
defp oldest_partition_exceeding_month_limit(
|
||||
_partitions,
|
||||
_max_months_to_keep,
|
||||
_current_month_index
|
||||
),
|
||||
do: nil
|
||||
|
||||
defp oldest_partition_exceeding_size_limit(
|
||||
_partitions,
|
||||
:infinity,
|
||||
_current_month_index,
|
||||
_archiver
|
||||
),
|
||||
do: {:ok, nil}
|
||||
|
||||
defp oldest_partition_exceeding_size_limit(
|
||||
partitions,
|
||||
max_db_gib,
|
||||
current_month_index,
|
||||
archiver
|
||||
)
|
||||
when is_integer(max_db_gib) and max_db_gib > 0 do
|
||||
with {:ok, current_size_bytes} <- archiver.database_size_bytes() do
|
||||
max_size_bytes = max_db_gib * @bytes_per_gib
|
||||
|
||||
if current_size_bytes > max_size_bytes do
|
||||
{:ok, oldest_completed_partition(partitions, current_month_index)}
|
||||
else
|
||||
{:ok, nil}
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
defp oldest_partition_exceeding_size_limit(
|
||||
_partitions,
|
||||
_max_db_gib,
|
||||
_current_month_index,
|
||||
_archiver
|
||||
),
|
||||
do: {:ok, nil}
|
||||
|
||||
defp oldest_completed_partition(partitions, current_month_index) do
|
||||
partitions
|
||||
|> Enum.filter(&(month_index(&1) < current_month_index))
|
||||
|> Enum.min_by(&month_index/1, fn -> nil end)
|
||||
end
|
||||
|
||||
defp pick_oldest_partition(nil, nil), do: nil
|
||||
defp pick_oldest_partition(partition, nil), do: partition
|
||||
defp pick_oldest_partition(nil, partition), do: partition
|
||||
|
||||
defp pick_oldest_partition(left, right) do
|
||||
if month_index(left) <= month_index(right) do
|
||||
left
|
||||
else
|
||||
right
|
||||
end
|
||||
end
|
||||
|
||||
defp month_index(%{year: year, month: month}) when is_integer(year) and is_integer(month) do
|
||||
year * 12 + month
|
||||
end
|
||||
|
||||
defp current_month_index(today_fun) do
|
||||
today = today_fun.()
|
||||
today.year * 12 + today.month
|
||||
end
|
||||
|
||||
defp interval_ms(opts, retention_config) do
|
||||
case Keyword.get(opts, :interval_ms) do
|
||||
value when is_integer(value) and value > 0 ->
|
||||
value
|
||||
|
||||
_other ->
|
||||
retention_config
|
||||
|> Keyword.get(:check_interval_hours, @default_check_interval_hours)
|
||||
|> normalize_positive_integer(@default_check_interval_hours)
|
||||
|> hours_to_ms()
|
||||
end
|
||||
end
|
||||
|
||||
defp months_ahead(opts, retention_config) do
|
||||
opts
|
||||
|> Keyword.get(
|
||||
:months_ahead,
|
||||
Keyword.get(retention_config, :months_ahead, @default_months_ahead)
|
||||
)
|
||||
|> normalize_non_negative_integer(@default_months_ahead)
|
||||
end
|
||||
|
||||
defp max_db_gib(opts, retention_config) do
|
||||
opts
|
||||
|> Keyword.get(:max_db_bytes, Keyword.get(retention_config, :max_db_bytes, :infinity))
|
||||
|> normalize_limit()
|
||||
end
|
||||
|
||||
defp max_months_to_keep(opts, retention_config) do
|
||||
opts
|
||||
|> Keyword.get(
|
||||
:max_months_to_keep,
|
||||
Keyword.get(retention_config, :max_months_to_keep, :infinity)
|
||||
)
|
||||
|> normalize_limit()
|
||||
end
|
||||
|
||||
defp max_partitions_to_drop_per_run(opts, retention_config) do
|
||||
opts
|
||||
|> Keyword.get(
|
||||
:max_partitions_to_drop_per_run,
|
||||
Keyword.get(
|
||||
retention_config,
|
||||
:max_partitions_to_drop_per_run,
|
||||
@default_max_partitions_to_drop_per_run
|
||||
)
|
||||
)
|
||||
|> normalize_non_negative_integer(@default_max_partitions_to_drop_per_run)
|
||||
end
|
||||
|
||||
defp today_fun(opts) do
|
||||
case Keyword.get(opts, :today_fun, &Date.utc_today/0) do
|
||||
function when is_function(function, 0) -> function
|
||||
_other -> &Date.utc_today/0
|
||||
end
|
||||
end
|
||||
|
||||
defp normalize_limit(:infinity), do: :infinity
|
||||
defp normalize_limit(value) when is_integer(value) and value > 0, do: value
|
||||
defp normalize_limit(_value), do: :infinity
|
||||
|
||||
defp normalize_positive_integer(value, _default) when is_integer(value) and value > 0, do: value
|
||||
defp normalize_positive_integer(_value, default), do: default
|
||||
|
||||
defp normalize_non_negative_integer(value, _default) when is_integer(value) and value >= 0,
|
||||
do: value
|
||||
|
||||
defp normalize_non_negative_integer(_value, default), do: default
|
||||
|
||||
defp hours_to_ms(hours), do: hours * 60 * 60 * 1000
|
||||
|
||||
defp schedule_tick(interval_ms) do
|
||||
Process.send_after(self(), :tick, interval_ms)
|
||||
end
|
||||
end
|
||||
@@ -11,13 +11,22 @@ defmodule Parrhesia.Tasks.Supervisor do
|
||||
|
||||
@impl true
|
||||
def init(_init_arg) do
|
||||
children =
|
||||
if Application.get_env(:parrhesia, :enable_expiration_worker, true) do
|
||||
[{Parrhesia.Tasks.ExpirationWorker, name: Parrhesia.Tasks.ExpirationWorker}]
|
||||
else
|
||||
[]
|
||||
end
|
||||
children = expiration_children() ++ partition_retention_children()
|
||||
|
||||
Supervisor.init(children, strategy: :one_for_one)
|
||||
end
|
||||
|
||||
defp expiration_children do
|
||||
if Application.get_env(:parrhesia, :enable_expiration_worker, true) do
|
||||
[{Parrhesia.Tasks.ExpirationWorker, name: Parrhesia.Tasks.ExpirationWorker}]
|
||||
else
|
||||
[]
|
||||
end
|
||||
end
|
||||
|
||||
defp partition_retention_children do
|
||||
[
|
||||
{Parrhesia.Tasks.PartitionRetentionWorker, name: Parrhesia.Tasks.PartitionRetentionWorker}
|
||||
]
|
||||
end
|
||||
end
|
||||
|
||||
@@ -12,6 +12,9 @@ defmodule Parrhesia.TestSupport.ExpirationStubEvents do
|
||||
@impl true
|
||||
def query(_context, _filters, _opts), do: {:ok, []}
|
||||
|
||||
@impl true
|
||||
def query_event_refs(_context, _filters, _opts), do: {:ok, []}
|
||||
|
||||
@impl true
|
||||
def count(_context, _filters, _opts), do: {:ok, 0}
|
||||
|
||||
|
||||
@@ -12,6 +12,9 @@ defmodule Parrhesia.TestSupport.FailingEvents do
|
||||
@impl true
|
||||
def query(_context, _filters, _opts), do: {:error, :db_down}
|
||||
|
||||
@impl true
|
||||
def query_event_refs(_context, _filters, _opts), do: {:error, :db_down}
|
||||
|
||||
@impl true
|
||||
def count(_context, _filters, _opts), do: {:error, :db_down}
|
||||
|
||||
|
||||
@@ -0,0 +1,52 @@
|
||||
defmodule Parrhesia.TestSupport.PartitionRetentionStubPartitions do
|
||||
@moduledoc false
|
||||
|
||||
use Agent
|
||||
|
||||
@spec start_link(keyword()) :: Agent.on_start()
|
||||
def start_link(opts \\ []) do
|
||||
name = Keyword.get(opts, :name, __MODULE__)
|
||||
|
||||
initial_state = %{
|
||||
partitions: Keyword.get(opts, :partitions, []),
|
||||
db_size_bytes: Keyword.get(opts, :db_size_bytes, 0),
|
||||
test_pid: Keyword.get(opts, :test_pid)
|
||||
}
|
||||
|
||||
Agent.start_link(fn -> initial_state end, name: name)
|
||||
end
|
||||
|
||||
@spec ensure_monthly_partitions(keyword()) :: :ok
|
||||
def ensure_monthly_partitions(opts \\ []) do
|
||||
notify({:ensure_monthly_partitions, opts})
|
||||
:ok
|
||||
end
|
||||
|
||||
@spec list_monthly_partitions() :: [map()]
|
||||
def list_monthly_partitions do
|
||||
Agent.get(__MODULE__, & &1.partitions)
|
||||
end
|
||||
|
||||
@spec database_size_bytes() :: {:ok, non_neg_integer()}
|
||||
def database_size_bytes do
|
||||
notify(:database_size_bytes)
|
||||
{:ok, Agent.get(__MODULE__, & &1.db_size_bytes)}
|
||||
end
|
||||
|
||||
@spec drop_partition(String.t()) :: :ok
|
||||
def drop_partition(partition_name) when is_binary(partition_name) do
|
||||
Agent.update(__MODULE__, fn state ->
|
||||
%{state | partitions: Enum.reject(state.partitions, &(&1.name == partition_name))}
|
||||
end)
|
||||
|
||||
notify({:drop_partition, partition_name})
|
||||
:ok
|
||||
end
|
||||
|
||||
defp notify(message) do
|
||||
case Agent.get(__MODULE__, & &1.test_pid) do
|
||||
pid when is_pid(pid) -> send(pid, message)
|
||||
_other -> :ok
|
||||
end
|
||||
end
|
||||
end
|
||||
@@ -21,6 +21,9 @@ defmodule Parrhesia.TestSupport.PermissiveModeration do
|
||||
@impl true
|
||||
def pubkey_allowed?(_context, _pubkey), do: {:ok, true}
|
||||
|
||||
@impl true
|
||||
def has_allowed_pubkeys?(_context), do: {:ok, false}
|
||||
|
||||
@impl true
|
||||
def ban_event(_context, _event_id), do: :ok
|
||||
|
||||
|
||||
49
lib/parrhesia/test_support/sync_fake_relay/plug.ex
Normal file
49
lib/parrhesia/test_support/sync_fake_relay/plug.ex
Normal file
@@ -0,0 +1,49 @@
|
||||
defmodule Parrhesia.TestSupport.SyncFakeRelay.Plug do
|
||||
@moduledoc false
|
||||
|
||||
import Plug.Conn
|
||||
|
||||
alias Parrhesia.TestSupport.SyncFakeRelay.Server
|
||||
|
||||
def init(opts), do: opts
|
||||
|
||||
def call(conn, opts) do
|
||||
server = Keyword.fetch!(opts, :server)
|
||||
|
||||
cond do
|
||||
conn.request_path == "/relay" and wants_nip11?(conn) ->
|
||||
send_json(conn, 200, Server.document(server))
|
||||
|
||||
conn.request_path == "/relay" ->
|
||||
conn
|
||||
|> WebSockAdapter.upgrade(
|
||||
Parrhesia.TestSupport.SyncFakeRelay.Socket,
|
||||
%{server: server, relay_url: relay_url(conn)},
|
||||
timeout: 60_000
|
||||
)
|
||||
|> halt()
|
||||
|
||||
true ->
|
||||
send_resp(conn, 404, "not found")
|
||||
end
|
||||
end
|
||||
|
||||
defp wants_nip11?(conn) do
|
||||
conn
|
||||
|> get_req_header("accept")
|
||||
|> Enum.any?(&String.contains?(&1, "application/nostr+json"))
|
||||
end
|
||||
|
||||
defp send_json(conn, status, body) do
|
||||
encoded = JSON.encode!(body)
|
||||
|
||||
conn
|
||||
|> put_resp_content_type("application/nostr+json")
|
||||
|> send_resp(status, encoded)
|
||||
end
|
||||
|
||||
defp relay_url(conn) do
|
||||
scheme = if conn.scheme == :https, do: "wss", else: "ws"
|
||||
"#{scheme}://#{conn.host}:#{conn.port}#{conn.request_path}"
|
||||
end
|
||||
end
|
||||
65
lib/parrhesia/test_support/sync_fake_relay/server.ex
Normal file
65
lib/parrhesia/test_support/sync_fake_relay/server.ex
Normal file
@@ -0,0 +1,65 @@
|
||||
defmodule Parrhesia.TestSupport.SyncFakeRelay.Server do
|
||||
@moduledoc false
|
||||
|
||||
use Agent
|
||||
|
||||
def start_link(opts) do
|
||||
name = Keyword.fetch!(opts, :name)
|
||||
|
||||
initial_state = %{
|
||||
pubkey: Keyword.fetch!(opts, :pubkey),
|
||||
expected_client_pubkey: Keyword.fetch!(opts, :expected_client_pubkey),
|
||||
initial_events: Keyword.get(opts, :initial_events, []),
|
||||
subscribers: %{}
|
||||
}
|
||||
|
||||
Agent.start_link(fn -> initial_state end, name: name)
|
||||
end
|
||||
|
||||
def document(server) do
|
||||
Agent.get(server, fn state ->
|
||||
%{
|
||||
"name" => "Sync Fake Relay",
|
||||
"description" => "test relay",
|
||||
"pubkey" => state.pubkey,
|
||||
"supported_nips" => [1, 11, 42]
|
||||
}
|
||||
end)
|
||||
end
|
||||
|
||||
def initial_events(server) do
|
||||
Agent.get(server, & &1.initial_events)
|
||||
end
|
||||
|
||||
def expected_client_pubkey(server) do
|
||||
Agent.get(server, & &1.expected_client_pubkey)
|
||||
end
|
||||
|
||||
def register_subscription(server, pid, subscription_id) do
|
||||
Agent.update(server, fn state ->
|
||||
put_in(state, [:subscribers, {pid, subscription_id}], true)
|
||||
end)
|
||||
end
|
||||
|
||||
def unregister_subscription(server, pid, subscription_id) do
|
||||
Agent.update(server, fn state ->
|
||||
update_in(state.subscribers, &Map.delete(&1, {pid, subscription_id}))
|
||||
end)
|
||||
end
|
||||
|
||||
def publish_live_event(server, event) do
|
||||
subscribers =
|
||||
Agent.get_and_update(server, fn state ->
|
||||
{
|
||||
Map.keys(state.subscribers),
|
||||
%{state | initial_events: state.initial_events ++ [event]}
|
||||
}
|
||||
end)
|
||||
|
||||
Enum.each(subscribers, fn {pid, subscription_id} ->
|
||||
send(pid, {:sync_fake_relay_event, subscription_id, event})
|
||||
end)
|
||||
|
||||
:ok
|
||||
end
|
||||
end
|
||||
118
lib/parrhesia/test_support/sync_fake_relay/socket.ex
Normal file
118
lib/parrhesia/test_support/sync_fake_relay/socket.ex
Normal file
@@ -0,0 +1,118 @@
|
||||
defmodule Parrhesia.TestSupport.SyncFakeRelay.Socket do
|
||||
@moduledoc false
|
||||
|
||||
@behaviour WebSock
|
||||
|
||||
alias Parrhesia.TestSupport.SyncFakeRelay.Server
|
||||
|
||||
def init(state), do: {:ok, Map.put(state, :authenticated?, false)}
|
||||
|
||||
def handle_in({payload, [opcode: :text]}, state) do
|
||||
case JSON.decode(payload) do
|
||||
{:ok, ["REQ", subscription_id | _filters]} ->
|
||||
maybe_authorize_req(state, subscription_id)
|
||||
|
||||
{:ok, ["AUTH", auth_event]} when is_map(auth_event) ->
|
||||
handle_auth(auth_event, state)
|
||||
|
||||
{:ok, ["CLOSE", subscription_id]} ->
|
||||
Server.unregister_subscription(state.server, self(), subscription_id)
|
||||
|
||||
{:push, {:text, JSON.encode!(["CLOSED", subscription_id, "error: subscription closed"])},
|
||||
state}
|
||||
|
||||
_other ->
|
||||
{:ok, state}
|
||||
end
|
||||
end
|
||||
|
||||
def handle_in(_frame, state), do: {:ok, state}
|
||||
|
||||
def handle_info({:sync_fake_relay_event, subscription_id, event}, state) do
|
||||
{:push, {:text, JSON.encode!(["EVENT", subscription_id, event])}, state}
|
||||
end
|
||||
|
||||
def handle_info(_message, state), do: {:ok, state}
|
||||
|
||||
def terminate(_reason, state) do
|
||||
Enum.each(Map.get(state, :subscriptions, []), fn subscription_id ->
|
||||
Server.unregister_subscription(state.server, self(), subscription_id)
|
||||
end)
|
||||
|
||||
:ok
|
||||
end
|
||||
|
||||
defp maybe_authorize_req(%{authenticated?: true} = state, subscription_id) do
|
||||
Server.register_subscription(state.server, self(), subscription_id)
|
||||
|
||||
frames =
|
||||
Server.initial_events(state.server)
|
||||
|> Enum.map(fn event -> {:text, JSON.encode!(["EVENT", subscription_id, event])} end)
|
||||
|> Kernel.++([{:text, JSON.encode!(["EOSE", subscription_id])}])
|
||||
|
||||
next_state =
|
||||
state
|
||||
|> Map.update(:subscriptions, [subscription_id], &[subscription_id | &1])
|
||||
|
||||
{:push, frames, next_state}
|
||||
end
|
||||
|
||||
defp maybe_authorize_req(state, subscription_id) do
|
||||
challenge = Base.encode16(:crypto.strong_rand_bytes(12), case: :lower)
|
||||
|
||||
next_state =
|
||||
state
|
||||
|> Map.put(:challenge, challenge)
|
||||
|> Map.put(:pending_subscription_id, subscription_id)
|
||||
|
||||
{:push,
|
||||
[
|
||||
{:text, JSON.encode!(["AUTH", challenge])},
|
||||
{:text,
|
||||
JSON.encode!(["CLOSED", subscription_id, "auth-required: sync access requires AUTH"])}
|
||||
], next_state}
|
||||
end
|
||||
|
||||
defp handle_auth(auth_event, state) do
|
||||
challenge_ok? = has_tag?(auth_event, "challenge", state.challenge)
|
||||
relay_ok? = has_tag?(auth_event, "relay", state.relay_url)
|
||||
pubkey_ok? = Map.get(auth_event, "pubkey") == Server.expected_client_pubkey(state.server)
|
||||
|
||||
if challenge_ok? and relay_ok? and pubkey_ok? do
|
||||
accepted_state = %{state | authenticated?: true}
|
||||
ok_frame = ["OK", Map.get(auth_event, "id"), true, "ok: auth accepted"]
|
||||
|
||||
if subscription_id = Map.get(accepted_state, :pending_subscription_id) do
|
||||
next_state =
|
||||
accepted_state
|
||||
|> Map.delete(:pending_subscription_id)
|
||||
|> Map.update(:subscriptions, [subscription_id], &[subscription_id | &1])
|
||||
|
||||
Server.register_subscription(state.server, self(), subscription_id)
|
||||
|
||||
{:push,
|
||||
[{:text, JSON.encode!(ok_frame)} | auth_success_frames(accepted_state, subscription_id)],
|
||||
next_state}
|
||||
else
|
||||
{:push, {:text, JSON.encode!(ok_frame)}, accepted_state}
|
||||
end
|
||||
else
|
||||
{:push,
|
||||
{:text, JSON.encode!(["OK", Map.get(auth_event, "id"), false, "invalid: auth rejected"])},
|
||||
state}
|
||||
end
|
||||
end
|
||||
|
||||
defp auth_success_frames(state, subscription_id) do
|
||||
Server.initial_events(state.server)
|
||||
|> Enum.map(fn event -> {:text, JSON.encode!(["EVENT", subscription_id, event])} end)
|
||||
|> Kernel.++([{:text, JSON.encode!(["EOSE", subscription_id])}])
|
||||
end
|
||||
|
||||
defp has_tag?(event, name, expected_value) do
|
||||
Enum.any?(Map.get(event, "tags", []), fn
|
||||
[^name, ^expected_value | _rest] -> true
|
||||
_other -> false
|
||||
end)
|
||||
end
|
||||
end
|
||||
145
lib/parrhesia/test_support/tls_certs.ex
Normal file
145
lib/parrhesia/test_support/tls_certs.ex
Normal file
@@ -0,0 +1,145 @@
|
||||
defmodule Parrhesia.TestSupport.TLSCerts do
|
||||
@moduledoc false
|
||||
|
||||
@spec create_ca!(String.t(), String.t()) :: map()
|
||||
def create_ca!(dir, name) do
|
||||
keyfile = Path.join(dir, "#{name}-ca.key.pem")
|
||||
certfile = Path.join(dir, "#{name}-ca.cert.pem")
|
||||
|
||||
openssl!([
|
||||
"req",
|
||||
"-x509",
|
||||
"-newkey",
|
||||
"rsa:2048",
|
||||
"-nodes",
|
||||
"-sha256",
|
||||
"-days",
|
||||
"2",
|
||||
"-subj",
|
||||
"/CN=#{name} Test CA",
|
||||
"-keyout",
|
||||
keyfile,
|
||||
"-out",
|
||||
certfile
|
||||
])
|
||||
|
||||
%{keyfile: keyfile, certfile: certfile}
|
||||
end
|
||||
|
||||
@spec issue_server_cert!(String.t(), map(), String.t()) :: map()
|
||||
def issue_server_cert!(dir, ca, name) do
|
||||
issue_cert!(
|
||||
dir,
|
||||
ca,
|
||||
name,
|
||||
"localhost",
|
||||
["DNS:localhost", "IP:127.0.0.1"],
|
||||
"serverAuth"
|
||||
)
|
||||
end
|
||||
|
||||
@spec issue_client_cert!(String.t(), map(), String.t()) :: map()
|
||||
def issue_client_cert!(dir, ca, name) do
|
||||
issue_cert!(dir, ca, name, name, [], "clientAuth")
|
||||
end
|
||||
|
||||
@spec spki_pin!(String.t()) :: String.t()
|
||||
def spki_pin!(certfile) do
|
||||
certfile
|
||||
|> der_cert!()
|
||||
|> spki_pin()
|
||||
end
|
||||
|
||||
@spec cert_sha256!(String.t()) :: String.t()
|
||||
def cert_sha256!(certfile) do
|
||||
certfile
|
||||
|> der_cert!()
|
||||
|> then(&Base.encode64(:crypto.hash(:sha256, &1)))
|
||||
end
|
||||
|
||||
defp issue_cert!(dir, ca, name, common_name, san_entries, extended_key_usage) do
|
||||
keyfile = Path.join(dir, "#{name}.key.pem")
|
||||
csrfile = Path.join(dir, "#{name}.csr.pem")
|
||||
certfile = Path.join(dir, "#{name}.cert.pem")
|
||||
extfile = Path.join(dir, "#{name}.ext.cnf")
|
||||
|
||||
openssl!([
|
||||
"req",
|
||||
"-new",
|
||||
"-newkey",
|
||||
"rsa:2048",
|
||||
"-nodes",
|
||||
"-subj",
|
||||
"/CN=#{common_name}",
|
||||
"-keyout",
|
||||
keyfile,
|
||||
"-out",
|
||||
csrfile
|
||||
])
|
||||
|
||||
File.write!(extfile, extension_config(san_entries, extended_key_usage))
|
||||
|
||||
openssl!([
|
||||
"x509",
|
||||
"-req",
|
||||
"-in",
|
||||
csrfile,
|
||||
"-CA",
|
||||
ca.certfile,
|
||||
"-CAkey",
|
||||
ca.keyfile,
|
||||
"-CAcreateserial",
|
||||
"-out",
|
||||
certfile,
|
||||
"-days",
|
||||
"2",
|
||||
"-sha256",
|
||||
"-extfile",
|
||||
extfile,
|
||||
"-extensions",
|
||||
"v3_req"
|
||||
])
|
||||
|
||||
%{keyfile: keyfile, certfile: certfile}
|
||||
end
|
||||
|
||||
defp extension_config(san_entries, extended_key_usage) do
|
||||
san_block =
|
||||
case san_entries do
|
||||
[] -> ""
|
||||
entries -> "subjectAltName = #{Enum.join(entries, ",")}\n"
|
||||
end
|
||||
|
||||
"""
|
||||
[v3_req]
|
||||
basicConstraints = CA:FALSE
|
||||
keyUsage = digitalSignature,keyEncipherment
|
||||
extendedKeyUsage = #{extended_key_usage}
|
||||
#{san_block}
|
||||
"""
|
||||
end
|
||||
|
||||
defp der_cert!(certfile) do
|
||||
certfile
|
||||
|> File.read!()
|
||||
|> :public_key.pem_decode()
|
||||
|> List.first()
|
||||
|> elem(1)
|
||||
end
|
||||
|
||||
defp spki_pin(cert_der) do
|
||||
cert = :public_key.pkix_decode_cert(cert_der, :plain)
|
||||
spki = cert |> elem(1) |> elem(7)
|
||||
|
||||
:public_key.der_encode(:SubjectPublicKeyInfo, spki)
|
||||
|> then(&:crypto.hash(:sha256, &1))
|
||||
|> Base.encode64()
|
||||
end
|
||||
|
||||
defp openssl!(args) do
|
||||
case System.cmd("/usr/bin/openssl", args, stderr_to_stdout: true) do
|
||||
{output, 0} -> output
|
||||
{output, status} -> raise "openssl failed with status #{status}: #{output}"
|
||||
end
|
||||
end
|
||||
end
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,29 +1,74 @@
|
||||
defmodule Parrhesia.Web.Endpoint do
|
||||
@moduledoc """
|
||||
Supervision entrypoint for WS/HTTP ingress.
|
||||
Supervision entrypoint for configured ingress listeners.
|
||||
"""
|
||||
|
||||
use Supervisor
|
||||
|
||||
def start_link(init_arg \\ []) do
|
||||
Supervisor.start_link(__MODULE__, init_arg, name: __MODULE__)
|
||||
alias Parrhesia.Web.Listener
|
||||
|
||||
def start_link(opts \\ []) do
|
||||
name = Keyword.get(opts, :name, __MODULE__)
|
||||
listeners = Keyword.get(opts, :listeners, :configured)
|
||||
Supervisor.start_link(__MODULE__, listeners, name: name)
|
||||
end
|
||||
|
||||
@spec reload_listener(Supervisor.supervisor(), atom()) :: :ok | {:error, term()}
|
||||
def reload_listener(supervisor \\ __MODULE__, listener_id) when is_atom(listener_id) do
|
||||
with :ok <- Supervisor.terminate_child(supervisor, {:listener, listener_id}),
|
||||
{:ok, _pid} <- Supervisor.restart_child(supervisor, {:listener, listener_id}) do
|
||||
:ok
|
||||
else
|
||||
{:error, :not_found} = error -> error
|
||||
{:error, _reason} = error -> error
|
||||
other -> other
|
||||
end
|
||||
end
|
||||
|
||||
@spec reload_all(Supervisor.supervisor()) :: :ok | {:error, term()}
|
||||
def reload_all(supervisor \\ __MODULE__) do
|
||||
supervisor
|
||||
|> Supervisor.which_children()
|
||||
|> Enum.filter(fn {id, _pid, _type, _modules} ->
|
||||
match?({:listener, _listener_id}, id)
|
||||
end)
|
||||
|> Enum.reduce_while(:ok, fn {{:listener, listener_id}, _pid, _type, _modules}, :ok ->
|
||||
case reload_listener(supervisor, listener_id) do
|
||||
:ok -> {:cont, :ok}
|
||||
{:error, _reason} = error -> {:halt, error}
|
||||
end
|
||||
end)
|
||||
end
|
||||
|
||||
@impl true
|
||||
def init(init_arg) do
|
||||
children = [
|
||||
{Bandit, bandit_options(init_arg)}
|
||||
]
|
||||
def init(listeners) do
|
||||
children =
|
||||
listeners(listeners)
|
||||
|> Enum.map(fn listener ->
|
||||
%{
|
||||
id: {:listener, listener.id},
|
||||
start: {Bandit, :start_link, [Listener.bandit_options(listener)]}
|
||||
}
|
||||
end)
|
||||
|
||||
Supervisor.init(children, strategy: :one_for_one)
|
||||
end
|
||||
|
||||
defp bandit_options(overrides) do
|
||||
configured = Application.get_env(:parrhesia, __MODULE__, [])
|
||||
defp listeners(:configured), do: Listener.all()
|
||||
|
||||
configured
|
||||
|> Keyword.merge(overrides)
|
||||
|> Keyword.put_new(:scheme, :http)
|
||||
|> Keyword.put_new(:plug, Parrhesia.Web.Router)
|
||||
defp listeners(listeners) when is_list(listeners) do
|
||||
Enum.map(listeners, fn
|
||||
{id, listener} when is_atom(id) and is_map(listener) ->
|
||||
Listener.from_opts(listener: Map.put_new(listener, :id, id))
|
||||
|
||||
listener ->
|
||||
Listener.from_opts(listener: listener)
|
||||
end)
|
||||
end
|
||||
|
||||
defp listeners(listeners) when is_map(listeners) do
|
||||
listeners
|
||||
|> Enum.map(fn {id, listener} -> {id, listener} end)
|
||||
|> listeners()
|
||||
end
|
||||
end
|
||||
|
||||
662
lib/parrhesia/web/listener.ex
Normal file
662
lib/parrhesia/web/listener.ex
Normal file
@@ -0,0 +1,662 @@
|
||||
defmodule Parrhesia.Web.Listener do
|
||||
@moduledoc false
|
||||
|
||||
import Bitwise
|
||||
|
||||
alias Parrhesia.Protocol.Filter
|
||||
alias Parrhesia.Web.TLS
|
||||
|
||||
@private_cidrs [
|
||||
"127.0.0.0/8",
|
||||
"10.0.0.0/8",
|
||||
"172.16.0.0/12",
|
||||
"192.168.0.0/16",
|
||||
"169.254.0.0/16",
|
||||
"::1/128",
|
||||
"fc00::/7",
|
||||
"fe80::/10"
|
||||
]
|
||||
|
||||
@type t :: %{
|
||||
id: atom(),
|
||||
enabled: boolean(),
|
||||
bind: %{ip: tuple(), port: pos_integer()},
|
||||
transport: map(),
|
||||
proxy: map(),
|
||||
network: map(),
|
||||
features: map(),
|
||||
auth: map(),
|
||||
baseline_acl: map(),
|
||||
bandit_options: keyword()
|
||||
}
|
||||
|
||||
@spec all() :: [t()]
|
||||
def all do
|
||||
:parrhesia
|
||||
|> Application.get_env(:listeners, %{})
|
||||
|> normalize_listeners()
|
||||
|> Enum.filter(& &1.enabled)
|
||||
end
|
||||
|
||||
@spec from_opts(keyword() | map()) :: t()
|
||||
def from_opts(opts) when is_list(opts) do
|
||||
opts
|
||||
|> Keyword.get(:listener, default_listener())
|
||||
|> normalize_listener()
|
||||
end
|
||||
|
||||
def from_opts(opts) when is_map(opts) do
|
||||
opts
|
||||
|> Map.get(:listener, default_listener())
|
||||
|> normalize_listener()
|
||||
end
|
||||
|
||||
@spec from_conn(Plug.Conn.t()) :: t()
|
||||
def from_conn(conn) do
|
||||
conn.private
|
||||
|> Map.get(:parrhesia_listener, default_listener())
|
||||
|> normalize_listener()
|
||||
end
|
||||
|
||||
@spec put_conn(Plug.Conn.t(), keyword()) :: Plug.Conn.t()
|
||||
def put_conn(conn, opts) when is_list(opts) do
|
||||
Plug.Conn.put_private(conn, :parrhesia_listener, from_opts(opts))
|
||||
end
|
||||
|
||||
@spec feature_enabled?(t(), atom()) :: boolean()
|
||||
def feature_enabled?(listener, feature) when is_map(listener) and is_atom(feature) do
|
||||
listener
|
||||
|> Map.get(:features, %{})
|
||||
|> Map.get(feature, %{})
|
||||
|> Map.get(:enabled, false)
|
||||
end
|
||||
|
||||
@spec nip42_required?(t()) :: boolean()
|
||||
def nip42_required?(listener), do: listener.auth.nip42_required
|
||||
|
||||
@spec admin_auth_required?(t()) :: boolean()
|
||||
def admin_auth_required?(listener), do: listener.auth.nip98_required_for_admin
|
||||
|
||||
@spec trusted_proxies(t()) :: [String.t()]
|
||||
def trusted_proxies(listener) do
|
||||
listener.proxy.trusted_cidrs
|
||||
end
|
||||
|
||||
@spec trusted_proxy_request?(t(), Plug.Conn.t()) :: boolean()
|
||||
def trusted_proxy_request?(listener, conn) do
|
||||
TLS.trusted_proxy_request?(conn, trusted_proxies(listener))
|
||||
end
|
||||
|
||||
@spec remote_ip_allowed?(t(), tuple() | String.t() | nil) :: boolean()
|
||||
def remote_ip_allowed?(listener, remote_ip) do
|
||||
access_allowed?(listener.network, remote_ip)
|
||||
end
|
||||
|
||||
@spec authorize_transport_request(t(), Plug.Conn.t()) ::
|
||||
{:ok, map() | nil} | {:error, atom()}
|
||||
def authorize_transport_request(listener, conn) do
|
||||
TLS.authorize_request(listener.transport.tls, conn, trusted_proxy_request?(listener, conn))
|
||||
end
|
||||
|
||||
@spec request_scheme(t(), Plug.Conn.t()) :: :http | :https
|
||||
def request_scheme(listener, conn) do
|
||||
TLS.request_scheme(listener.transport.tls, conn, trusted_proxy_request?(listener, conn))
|
||||
end
|
||||
|
||||
@spec request_host(t(), Plug.Conn.t()) :: String.t()
|
||||
def request_host(listener, conn) do
|
||||
TLS.request_host(conn, trusted_proxy_request?(listener, conn))
|
||||
end
|
||||
|
||||
@spec request_port(t(), Plug.Conn.t()) :: non_neg_integer()
|
||||
def request_port(listener, conn) do
|
||||
scheme = request_scheme(listener, conn)
|
||||
TLS.request_port(listener.transport.tls, conn, trusted_proxy_request?(listener, conn), scheme)
|
||||
end
|
||||
|
||||
@spec metrics_allowed?(t(), Plug.Conn.t()) :: boolean()
|
||||
def metrics_allowed?(listener, conn) do
|
||||
metrics = Map.get(listener.features, :metrics, %{})
|
||||
|
||||
feature_enabled?(listener, :metrics) and
|
||||
access_allowed?(Map.get(metrics, :access, %{}), conn.remote_ip) and
|
||||
metrics_token_allowed?(metrics, conn)
|
||||
end
|
||||
|
||||
@spec relay_url(t(), Plug.Conn.t()) :: String.t()
|
||||
def relay_url(listener, conn) do
|
||||
scheme = request_scheme(listener, conn)
|
||||
host = request_host(listener, conn)
|
||||
port = request_port(listener, conn)
|
||||
ws_scheme = if scheme == :https, do: "wss", else: "ws"
|
||||
|
||||
port_segment =
|
||||
if default_http_port?(scheme, port) do
|
||||
""
|
||||
else
|
||||
":#{port}"
|
||||
end
|
||||
|
||||
"#{ws_scheme}://#{host}#{port_segment}#{conn.request_path}"
|
||||
end
|
||||
|
||||
@spec relay_auth_required?(t()) :: boolean()
|
||||
def relay_auth_required?(listener), do: listener.auth.nip42_required
|
||||
|
||||
@spec authorize_read(t(), [map()]) :: :ok | {:error, :listener_read_not_allowed}
|
||||
def authorize_read(listener, filters) when is_list(filters) do
|
||||
case evaluate_rules(listener.baseline_acl.read, filters, :read) do
|
||||
:allow -> :ok
|
||||
:deny -> {:error, :listener_read_not_allowed}
|
||||
end
|
||||
end
|
||||
|
||||
@spec authorize_write(t(), map()) :: :ok | {:error, :listener_write_not_allowed}
|
||||
def authorize_write(listener, event) when is_map(event) do
|
||||
case evaluate_rules(listener.baseline_acl.write, event, :write) do
|
||||
:allow -> :ok
|
||||
:deny -> {:error, :listener_write_not_allowed}
|
||||
end
|
||||
end
|
||||
|
||||
@spec bandit_options(t()) :: keyword()
|
||||
def bandit_options(listener) do
|
||||
scheme =
|
||||
case listener.transport.tls.mode do
|
||||
mode when mode in [:server, :mutual] -> :https
|
||||
_other -> listener.transport.scheme
|
||||
end
|
||||
|
||||
[
|
||||
ip: listener.bind.ip,
|
||||
port: listener.bind.port,
|
||||
scheme: scheme,
|
||||
plug: {Parrhesia.Web.ListenerPlug, listener: listener}
|
||||
] ++ TLS.bandit_options(listener.transport.tls) ++ listener.bandit_options
|
||||
end
|
||||
|
||||
defp normalize_listeners(listeners) when is_list(listeners) do
|
||||
Enum.map(listeners, fn
|
||||
{id, listener} when is_atom(id) and is_map(listener) ->
|
||||
normalize_listener(Map.put(listener, :id, id))
|
||||
|
||||
listener when is_map(listener) ->
|
||||
normalize_listener(listener)
|
||||
end)
|
||||
end
|
||||
|
||||
defp normalize_listeners(listeners) when is_map(listeners) do
|
||||
listeners
|
||||
|> Enum.map(fn {id, listener} -> normalize_listener(Map.put(listener, :id, id)) end)
|
||||
|> Enum.sort_by(& &1.id)
|
||||
end
|
||||
|
||||
defp normalize_listener(listener) when is_map(listener) do
|
||||
id = normalize_atom(fetch_value(listener, :id), :listener)
|
||||
enabled = normalize_boolean(fetch_value(listener, :enabled), true)
|
||||
bind = normalize_bind(fetch_value(listener, :bind), listener)
|
||||
transport = normalize_transport(fetch_value(listener, :transport))
|
||||
proxy = normalize_proxy(fetch_value(listener, :proxy))
|
||||
network = normalize_access(fetch_value(listener, :network), %{allow_all?: true})
|
||||
features = normalize_features(fetch_value(listener, :features))
|
||||
auth = normalize_auth(fetch_value(listener, :auth))
|
||||
baseline_acl = normalize_baseline_acl(fetch_value(listener, :baseline_acl))
|
||||
bandit_options = normalize_bandit_options(fetch_value(listener, :bandit_options))
|
||||
|
||||
%{
|
||||
id: id,
|
||||
enabled: enabled,
|
||||
bind: bind,
|
||||
transport: transport,
|
||||
proxy: proxy,
|
||||
network: network,
|
||||
features: features,
|
||||
auth: auth,
|
||||
baseline_acl: baseline_acl,
|
||||
bandit_options: bandit_options
|
||||
}
|
||||
end
|
||||
|
||||
defp normalize_listener(_listener), do: default_listener()
|
||||
|
||||
defp normalize_bind(bind, listener) when is_map(bind) do
|
||||
%{
|
||||
ip: normalize_ip(fetch_value(bind, :ip), default_bind_ip(listener)),
|
||||
port: normalize_port(fetch_value(bind, :port), 4413)
|
||||
}
|
||||
end
|
||||
|
||||
defp normalize_bind(_bind, listener) do
|
||||
%{
|
||||
ip: default_bind_ip(listener),
|
||||
port: normalize_port(fetch_value(listener, :port), 4413)
|
||||
}
|
||||
end
|
||||
|
||||
defp default_bind_ip(listener) do
|
||||
normalize_ip(fetch_value(listener, :ip), {0, 0, 0, 0})
|
||||
end
|
||||
|
||||
defp normalize_transport(transport) when is_map(transport) do
|
||||
scheme = normalize_scheme(fetch_value(transport, :scheme), :http)
|
||||
|
||||
%{
|
||||
scheme: scheme,
|
||||
tls: TLS.normalize_config(fetch_value(transport, :tls), scheme)
|
||||
}
|
||||
end
|
||||
|
||||
defp normalize_transport(_transport), do: %{scheme: :http, tls: TLS.default_config()}
|
||||
|
||||
defp normalize_proxy(proxy) when is_map(proxy) do
|
||||
%{
|
||||
trusted_cidrs: normalize_string_list(fetch_value(proxy, :trusted_cidrs)),
|
||||
honor_x_forwarded_for: normalize_boolean(fetch_value(proxy, :honor_x_forwarded_for), true)
|
||||
}
|
||||
end
|
||||
|
||||
defp normalize_proxy(_proxy), do: %{trusted_cidrs: [], honor_x_forwarded_for: true}
|
||||
|
||||
defp normalize_features(features) when is_map(features) do
|
||||
%{
|
||||
nostr: normalize_simple_feature(fetch_value(features, :nostr), true),
|
||||
admin: normalize_simple_feature(fetch_value(features, :admin), true),
|
||||
metrics: normalize_metrics_feature(fetch_value(features, :metrics))
|
||||
}
|
||||
end
|
||||
|
||||
defp normalize_features(_features) do
|
||||
%{
|
||||
nostr: %{enabled: true},
|
||||
admin: %{enabled: true},
|
||||
metrics: %{enabled: false, access: default_feature_access()}
|
||||
}
|
||||
end
|
||||
|
||||
defp normalize_simple_feature(feature, default_enabled) when is_map(feature) do
|
||||
%{enabled: normalize_boolean(fetch_value(feature, :enabled), default_enabled)}
|
||||
end
|
||||
|
||||
defp normalize_simple_feature(feature, _default_enabled) when is_boolean(feature) do
|
||||
%{enabled: feature}
|
||||
end
|
||||
|
||||
defp normalize_simple_feature(_feature, default_enabled), do: %{enabled: default_enabled}
|
||||
|
||||
defp normalize_metrics_feature(feature) when is_map(feature) do
|
||||
%{
|
||||
enabled: normalize_boolean(fetch_value(feature, :enabled), false),
|
||||
auth_token: normalize_optional_string(fetch_value(feature, :auth_token)),
|
||||
access:
|
||||
normalize_access(fetch_value(feature, :access), %{
|
||||
private_networks_only?: false,
|
||||
allow_all?: true
|
||||
})
|
||||
}
|
||||
end
|
||||
|
||||
defp normalize_metrics_feature(feature) when is_boolean(feature) do
|
||||
%{enabled: feature, auth_token: nil, access: default_feature_access()}
|
||||
end
|
||||
|
||||
defp normalize_metrics_feature(_feature),
|
||||
do: %{enabled: false, auth_token: nil, access: default_feature_access()}
|
||||
|
||||
defp default_feature_access do
|
||||
%{public?: false, private_networks_only?: false, allow_cidrs: [], allow_all?: true}
|
||||
end
|
||||
|
||||
defp normalize_auth(auth) when is_map(auth) do
|
||||
%{
|
||||
nip42_required: normalize_boolean(fetch_value(auth, :nip42_required), false),
|
||||
nip98_required_for_admin:
|
||||
normalize_boolean(fetch_value(auth, :nip98_required_for_admin), true)
|
||||
}
|
||||
end
|
||||
|
||||
defp normalize_auth(_auth), do: %{nip42_required: false, nip98_required_for_admin: true}
|
||||
|
||||
defp normalize_baseline_acl(acl) when is_map(acl) do
|
||||
%{
|
||||
read: normalize_baseline_rules(fetch_value(acl, :read)),
|
||||
write: normalize_baseline_rules(fetch_value(acl, :write))
|
||||
}
|
||||
end
|
||||
|
||||
defp normalize_baseline_acl(_acl), do: %{read: [], write: []}
|
||||
|
||||
defp normalize_baseline_rules(rules) when is_list(rules) do
|
||||
Enum.flat_map(rules, fn
|
||||
%{match: match} = rule when is_map(match) ->
|
||||
[
|
||||
%{
|
||||
action: normalize_rule_action(fetch_value(rule, :action)),
|
||||
match: normalize_filter_map(match)
|
||||
}
|
||||
]
|
||||
|
||||
_other ->
|
||||
[]
|
||||
end)
|
||||
end
|
||||
|
||||
defp normalize_baseline_rules(_rules), do: []
|
||||
|
||||
defp normalize_rule_action(:deny), do: :deny
|
||||
defp normalize_rule_action("deny"), do: :deny
|
||||
defp normalize_rule_action(_action), do: :allow
|
||||
|
||||
defp normalize_bandit_options(options) when is_list(options), do: options
|
||||
defp normalize_bandit_options(_options), do: []
|
||||
|
||||
defp normalize_access(access, defaults) when is_map(access) do
|
||||
%{
|
||||
public?:
|
||||
normalize_boolean(
|
||||
first_present(access, [:public, :public?]),
|
||||
Map.get(defaults, :public?, false)
|
||||
),
|
||||
private_networks_only?:
|
||||
normalize_boolean(
|
||||
first_present(access, [:private_networks_only, :private_networks_only?]),
|
||||
Map.get(defaults, :private_networks_only?, false)
|
||||
),
|
||||
allow_cidrs: normalize_string_list(fetch_value(access, :allow_cidrs)),
|
||||
allow_all?:
|
||||
normalize_boolean(
|
||||
first_present(access, [:allow_all, :allow_all?]),
|
||||
Map.get(defaults, :allow_all?, false)
|
||||
)
|
||||
}
|
||||
end
|
||||
|
||||
defp normalize_access(_access, defaults) do
|
||||
%{
|
||||
public?: Map.get(defaults, :public?, false),
|
||||
private_networks_only?: Map.get(defaults, :private_networks_only?, false),
|
||||
allow_cidrs: [],
|
||||
allow_all?: Map.get(defaults, :allow_all?, false)
|
||||
}
|
||||
end
|
||||
|
||||
defp access_allowed?(%{public?: true}, _remote_ip), do: true
|
||||
|
||||
defp access_allowed?(%{allow_cidrs: allow_cidrs}, remote_ip) when allow_cidrs != [] do
|
||||
Enum.any?(allow_cidrs, &ip_in_cidr?(remote_ip, &1))
|
||||
end
|
||||
|
||||
defp access_allowed?(%{private_networks_only?: true}, remote_ip) do
|
||||
Enum.any?(@private_cidrs, &ip_in_cidr?(remote_ip, &1))
|
||||
end
|
||||
|
||||
defp access_allowed?(%{allow_all?: allow_all?}, _remote_ip), do: allow_all?
|
||||
|
||||
defp metrics_token_allowed?(metrics, conn) do
|
||||
case metrics.auth_token do
|
||||
nil ->
|
||||
true
|
||||
|
||||
token ->
|
||||
conn
|
||||
|> Plug.Conn.get_req_header("authorization")
|
||||
|> List.first()
|
||||
|> normalize_authorization_header()
|
||||
|> Kernel.==(token)
|
||||
end
|
||||
end
|
||||
|
||||
defp normalize_authorization_header("Bearer " <> token), do: token
|
||||
defp normalize_authorization_header(token) when is_binary(token), do: token
|
||||
defp normalize_authorization_header(_header), do: nil
|
||||
|
||||
defp evaluate_rules([], _subject, _mode), do: :allow
|
||||
|
||||
defp evaluate_rules(rules, subject, mode) do
|
||||
has_allow_rules? = Enum.any?(rules, &(&1.action == :allow))
|
||||
|
||||
case Enum.find(rules, &rule_matches?(&1, subject, mode)) do
|
||||
%{action: :deny} -> :deny
|
||||
%{action: :allow} -> :allow
|
||||
nil when has_allow_rules? -> :deny
|
||||
nil -> :allow
|
||||
end
|
||||
end
|
||||
|
||||
defp rule_matches?(rule, filters, :read) when is_list(filters) do
|
||||
Enum.any?(filters, &filters_overlap?(&1, rule.match))
|
||||
end
|
||||
|
||||
defp rule_matches?(rule, event, :write) when is_map(event) do
|
||||
Filter.matches_filter?(event, rule.match)
|
||||
end
|
||||
|
||||
defp rule_matches?(_rule, _subject, _mode), do: false
|
||||
|
||||
defp filters_overlap?(left, right) when is_map(left) and is_map(right) do
|
||||
comparable_keys =
|
||||
left
|
||||
|> Map.keys()
|
||||
|> Kernel.++(Map.keys(right))
|
||||
|> Enum.uniq()
|
||||
|> Enum.reject(&(&1 in ["limit", "search", "since", "until"]))
|
||||
|
||||
Enum.all?(comparable_keys, fn key ->
|
||||
filter_constraint_compatible?(Map.get(left, key), Map.get(right, key))
|
||||
end) and filter_ranges_overlap?(left, right)
|
||||
end
|
||||
|
||||
defp filter_constraint_compatible?(nil, _right), do: true
|
||||
defp filter_constraint_compatible?(_left, nil), do: true
|
||||
|
||||
defp filter_constraint_compatible?(left, right) when is_list(left) and is_list(right) do
|
||||
not MapSet.disjoint?(MapSet.new(left), MapSet.new(right))
|
||||
end
|
||||
|
||||
defp filter_constraint_compatible?(left, right), do: left == right
|
||||
|
||||
defp filter_ranges_overlap?(left, right) do
|
||||
since = max(Map.get(left, "since", 0), Map.get(right, "since", 0))
|
||||
|
||||
until =
|
||||
min(
|
||||
Map.get(left, "until", 9_223_372_036_854_775_807),
|
||||
Map.get(right, "until", 9_223_372_036_854_775_807)
|
||||
)
|
||||
|
||||
since <= until
|
||||
end
|
||||
|
||||
defp default_listener do
|
||||
case configured_default_listener() do
|
||||
nil -> fallback_listener()
|
||||
listener -> normalize_listener(listener)
|
||||
end
|
||||
end
|
||||
|
||||
defp configured_default_listener do
|
||||
listeners = Application.get_env(:parrhesia, :listeners, %{})
|
||||
|
||||
case fetch_public_listener(listeners) do
|
||||
nil -> first_configured_listener(listeners)
|
||||
listener -> listener
|
||||
end
|
||||
end
|
||||
|
||||
defp fetch_public_listener(%{public: listener}) when is_map(listener),
|
||||
do: Map.put_new(listener, :id, :public)
|
||||
|
||||
defp fetch_public_listener(listeners) when is_list(listeners) do
|
||||
case Keyword.fetch(listeners, :public) do
|
||||
{:ok, listener} when is_map(listener) -> Map.put_new(listener, :id, :public)
|
||||
_other -> nil
|
||||
end
|
||||
end
|
||||
|
||||
defp fetch_public_listener(_listeners), do: nil
|
||||
|
||||
defp first_configured_listener(listeners) when is_list(listeners) do
|
||||
case listeners do
|
||||
[{id, listener} | _rest] when is_atom(id) and is_map(listener) ->
|
||||
Map.put_new(listener, :id, id)
|
||||
|
||||
_other ->
|
||||
nil
|
||||
end
|
||||
end
|
||||
|
||||
defp first_configured_listener(listeners) when is_map(listeners) and map_size(listeners) > 0 do
|
||||
{id, listener} = Enum.at(Enum.sort_by(listeners, fn {key, _value} -> key end), 0)
|
||||
Map.put_new(listener, :id, id)
|
||||
end
|
||||
|
||||
defp first_configured_listener(_listeners), do: nil
|
||||
|
||||
defp fallback_listener do
|
||||
%{
|
||||
id: :public,
|
||||
enabled: true,
|
||||
bind: %{ip: {0, 0, 0, 0}, port: 4413},
|
||||
transport: %{scheme: :http, tls: TLS.default_config()},
|
||||
proxy: %{trusted_cidrs: [], honor_x_forwarded_for: true},
|
||||
network: %{public?: false, private_networks_only?: false, allow_cidrs: [], allow_all?: true},
|
||||
features: %{
|
||||
nostr: %{enabled: true},
|
||||
admin: %{enabled: true},
|
||||
metrics: %{enabled: false, auth_token: nil, access: default_feature_access()}
|
||||
},
|
||||
auth: %{nip42_required: false, nip98_required_for_admin: true},
|
||||
baseline_acl: %{read: [], write: []},
|
||||
bandit_options: []
|
||||
}
|
||||
end
|
||||
|
||||
defp fetch_value(map, key) when is_map(map) do
|
||||
cond do
|
||||
Map.has_key?(map, key) ->
|
||||
Map.get(map, key)
|
||||
|
||||
is_atom(key) and Map.has_key?(map, Atom.to_string(key)) ->
|
||||
Map.get(map, Atom.to_string(key))
|
||||
|
||||
true ->
|
||||
nil
|
||||
end
|
||||
end
|
||||
|
||||
defp first_present(map, keys) do
|
||||
Enum.find_value(keys, fn key ->
|
||||
cond do
|
||||
Map.has_key?(map, key) ->
|
||||
{:present, Map.get(map, key)}
|
||||
|
||||
is_atom(key) and Map.has_key?(map, Atom.to_string(key)) ->
|
||||
{:present, Map.get(map, Atom.to_string(key))}
|
||||
|
||||
true ->
|
||||
nil
|
||||
end
|
||||
end)
|
||||
|> case do
|
||||
{:present, value} -> value
|
||||
nil -> nil
|
||||
end
|
||||
end
|
||||
|
||||
defp normalize_boolean(value, _default) when is_boolean(value), do: value
|
||||
defp normalize_boolean(nil, default), do: default
|
||||
defp normalize_boolean(_value, default), do: default
|
||||
|
||||
defp normalize_optional_string(value) when is_binary(value) and value != "", do: value
|
||||
defp normalize_optional_string(_value), do: nil
|
||||
|
||||
defp normalize_string_list(values) when is_list(values) do
|
||||
Enum.filter(values, &(is_binary(&1) and &1 != ""))
|
||||
end
|
||||
|
||||
defp normalize_string_list(_values), do: []
|
||||
|
||||
defp normalize_ip({_, _, _, _} = ip, _default), do: ip
|
||||
defp normalize_ip({_, _, _, _, _, _, _, _} = ip, _default), do: ip
|
||||
defp normalize_ip(_ip, default), do: default
|
||||
|
||||
defp normalize_port(port, _default) when is_integer(port) and port > 0, do: port
|
||||
defp normalize_port(0, _default), do: 0
|
||||
defp normalize_port(_port, default), do: default
|
||||
|
||||
defp normalize_scheme(:https, _default), do: :https
|
||||
defp normalize_scheme("https", _default), do: :https
|
||||
defp normalize_scheme(_scheme, default), do: default
|
||||
|
||||
defp normalize_atom(value, _default) when is_atom(value), do: value
|
||||
defp normalize_atom(_value, default), do: default
|
||||
|
||||
defp normalize_filter_map(filter) when is_map(filter) do
|
||||
Map.new(filter, fn
|
||||
{key, value} when is_atom(key) -> {Atom.to_string(key), value}
|
||||
{key, value} -> {key, value}
|
||||
end)
|
||||
end
|
||||
|
||||
defp normalize_filter_map(filter), do: filter
|
||||
|
||||
defp default_http_port?(:http, 80), do: true
|
||||
defp default_http_port?(:https, 443), do: true
|
||||
defp default_http_port?(_scheme, _port), do: false
|
||||
|
||||
defp ip_in_cidr?(ip, cidr) do
|
||||
with {network, prefix_len} <- parse_cidr(cidr),
|
||||
{:ok, ip_size, ip_value} <- ip_to_int(ip),
|
||||
{:ok, network_size, network_value} <- ip_to_int(network),
|
||||
true <- ip_size == network_size,
|
||||
true <- prefix_len >= 0,
|
||||
true <- prefix_len <= ip_size do
|
||||
mask = network_mask(ip_size, prefix_len)
|
||||
(ip_value &&& mask) == (network_value &&& mask)
|
||||
else
|
||||
_other -> false
|
||||
end
|
||||
end
|
||||
|
||||
defp parse_cidr(cidr) when is_binary(cidr) do
|
||||
case String.split(cidr, "/", parts: 2) do
|
||||
[address, prefix_str] ->
|
||||
with {prefix_len, ""} <- Integer.parse(prefix_str),
|
||||
{:ok, ip} <- :inet.parse_address(String.to_charlist(address)) do
|
||||
{ip, prefix_len}
|
||||
else
|
||||
_other -> :error
|
||||
end
|
||||
|
||||
[address] ->
|
||||
case :inet.parse_address(String.to_charlist(address)) do
|
||||
{:ok, {_, _, _, _} = ip} -> {ip, 32}
|
||||
{:ok, {_, _, _, _, _, _, _, _} = ip} -> {ip, 128}
|
||||
_other -> :error
|
||||
end
|
||||
|
||||
_other ->
|
||||
:error
|
||||
end
|
||||
end
|
||||
|
||||
defp parse_cidr(_cidr), do: :error
|
||||
|
||||
defp ip_to_int({a, b, c, d}) do
|
||||
{:ok, 32, (a <<< 24) + (b <<< 16) + (c <<< 8) + d}
|
||||
end
|
||||
|
||||
defp ip_to_int({a, b, c, d, e, f, g, h}) do
|
||||
{:ok, 128,
|
||||
(a <<< 112) + (b <<< 96) + (c <<< 80) + (d <<< 64) + (e <<< 48) + (f <<< 32) + (g <<< 16) +
|
||||
h}
|
||||
end
|
||||
|
||||
defp ip_to_int(_ip), do: :error
|
||||
|
||||
defp network_mask(_size, 0), do: 0
|
||||
|
||||
defp network_mask(size, prefix_len) do
|
||||
all_ones = (1 <<< size) - 1
|
||||
all_ones <<< (size - prefix_len)
|
||||
end
|
||||
end
|
||||
14
lib/parrhesia/web/listener_plug.ex
Normal file
14
lib/parrhesia/web/listener_plug.ex
Normal file
@@ -0,0 +1,14 @@
|
||||
defmodule Parrhesia.Web.ListenerPlug do
|
||||
@moduledoc false
|
||||
|
||||
alias Parrhesia.Web.Listener
|
||||
alias Parrhesia.Web.Router
|
||||
|
||||
def init(opts), do: opts
|
||||
|
||||
def call(conn, opts) do
|
||||
conn
|
||||
|> Listener.put_conn(opts)
|
||||
|> Router.call([])
|
||||
end
|
||||
end
|
||||
@@ -5,19 +5,22 @@ defmodule Parrhesia.Web.Management do
|
||||
|
||||
import Plug.Conn
|
||||
|
||||
alias Parrhesia.Auth.Nip98
|
||||
alias Parrhesia.Storage
|
||||
alias Parrhesia.API.Admin
|
||||
alias Parrhesia.API.Auth
|
||||
alias Parrhesia.Web.Listener
|
||||
|
||||
@spec handle(Plug.Conn.t()) :: Plug.Conn.t()
|
||||
def handle(conn) do
|
||||
@spec handle(Plug.Conn.t(), keyword()) :: Plug.Conn.t()
|
||||
def handle(conn, opts \\ []) do
|
||||
full_url = full_request_url(conn)
|
||||
method = conn.method
|
||||
authorization = get_req_header(conn, "authorization") |> List.first()
|
||||
auth_required? = admin_auth_required?(opts)
|
||||
|
||||
with {:ok, auth_event} <- Nip98.validate_authorization_header(authorization, method, full_url),
|
||||
with {:ok, auth_context} <-
|
||||
maybe_validate_nip98(auth_required?, authorization, method, full_url),
|
||||
{:ok, payload} <- parse_payload(conn.body_params),
|
||||
{:ok, result} <- execute_method(payload),
|
||||
:ok <- append_audit_log(auth_event, payload, result) do
|
||||
{:ok, result} <- execute_method(payload, opts),
|
||||
:ok <- append_audit_log(auth_context, payload, result) do
|
||||
send_json(conn, 200, %{"ok" => true, "result" => result})
|
||||
else
|
||||
{:error, :missing_authorization} ->
|
||||
@@ -46,6 +49,14 @@ defmodule Parrhesia.Web.Management do
|
||||
end
|
||||
end
|
||||
|
||||
defp maybe_validate_nip98(true, authorization, method, url) do
|
||||
Auth.validate_nip98(authorization, method, url)
|
||||
end
|
||||
|
||||
defp maybe_validate_nip98(false, _authorization, _method, _url) do
|
||||
{:ok, %{pubkey: nil}}
|
||||
end
|
||||
|
||||
defp parse_payload(%{"method" => method} = payload) when is_binary(method) do
|
||||
params = Map.get(payload, "params", %{})
|
||||
|
||||
@@ -58,14 +69,14 @@ defmodule Parrhesia.Web.Management do
|
||||
|
||||
defp parse_payload(_payload), do: {:error, :invalid_payload}
|
||||
|
||||
defp execute_method(payload) do
|
||||
Storage.admin().execute(%{}, payload.method, payload.params)
|
||||
defp execute_method(payload, opts) do
|
||||
Admin.execute(payload.method, payload.params, opts)
|
||||
end
|
||||
|
||||
defp append_audit_log(auth_event, payload, result) do
|
||||
Storage.admin().append_audit_log(%{}, %{
|
||||
defp append_audit_log(auth_context, payload, result) do
|
||||
Parrhesia.Storage.admin().append_audit_log(%{}, %{
|
||||
method: payload.method,
|
||||
actor_pubkey: Map.get(auth_event, "pubkey"),
|
||||
actor_pubkey: auth_context.pubkey,
|
||||
params: payload.params,
|
||||
result: normalize_result(result)
|
||||
})
|
||||
@@ -84,14 +95,15 @@ defmodule Parrhesia.Web.Management do
|
||||
end
|
||||
|
||||
defp full_request_url(conn) do
|
||||
scheme = Atom.to_string(conn.scheme)
|
||||
host = conn.host
|
||||
port = conn.port
|
||||
listener = Listener.from_conn(conn)
|
||||
scheme = Listener.request_scheme(listener, conn)
|
||||
host = Listener.request_host(listener, conn)
|
||||
port = Listener.request_port(listener, conn)
|
||||
|
||||
port_suffix =
|
||||
cond do
|
||||
conn.scheme == :http and port == 80 -> ""
|
||||
conn.scheme == :https and port == 443 -> ""
|
||||
scheme == :http and port == 80 -> ""
|
||||
scheme == :https and port == 443 -> ""
|
||||
true -> ":#{port}"
|
||||
end
|
||||
|
||||
@@ -99,4 +111,13 @@ defmodule Parrhesia.Web.Management do
|
||||
|
||||
"#{scheme}://#{host}#{port_suffix}#{conn.request_path}#{query_suffix}"
|
||||
end
|
||||
|
||||
defp admin_auth_required?(opts) do
|
||||
opts
|
||||
|> Keyword.get(:listener)
|
||||
|> case do
|
||||
%{auth: %{nip98_required_for_admin: value}} -> value
|
||||
_other -> true
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
23
lib/parrhesia/web/metrics.ex
Normal file
23
lib/parrhesia/web/metrics.ex
Normal file
@@ -0,0 +1,23 @@
|
||||
defmodule Parrhesia.Web.Metrics do
|
||||
@moduledoc false
|
||||
|
||||
import Plug.Conn
|
||||
|
||||
alias Parrhesia.Telemetry
|
||||
alias Parrhesia.Web.Listener
|
||||
|
||||
@spec handle(Plug.Conn.t()) :: Plug.Conn.t()
|
||||
def handle(conn) do
|
||||
listener = Listener.from_conn(conn)
|
||||
|
||||
if Listener.metrics_allowed?(listener, conn) do
|
||||
body = TelemetryMetricsPrometheus.Core.scrape(Telemetry.prometheus_reporter())
|
||||
|
||||
conn
|
||||
|> put_resp_content_type("text/plain")
|
||||
|> send_resp(200, body)
|
||||
else
|
||||
send_resp(conn, 403, "forbidden")
|
||||
end
|
||||
end
|
||||
end
|
||||
138
lib/parrhesia/web/metrics_access.ex
Normal file
138
lib/parrhesia/web/metrics_access.ex
Normal file
@@ -0,0 +1,138 @@
|
||||
defmodule Parrhesia.Web.MetricsAccess do
|
||||
@moduledoc false
|
||||
|
||||
import Plug.Conn
|
||||
import Bitwise
|
||||
|
||||
@private_cidrs [
|
||||
"127.0.0.0/8",
|
||||
"10.0.0.0/8",
|
||||
"172.16.0.0/12",
|
||||
"192.168.0.0/16",
|
||||
"169.254.0.0/16",
|
||||
"::1/128",
|
||||
"fc00::/7",
|
||||
"fe80::/10"
|
||||
]
|
||||
|
||||
@spec allowed?(Plug.Conn.t()) :: boolean()
|
||||
def allowed?(conn) do
|
||||
if metrics_public?() do
|
||||
true
|
||||
else
|
||||
token_allowed?(conn) and network_allowed?(conn)
|
||||
end
|
||||
end
|
||||
|
||||
defp token_allowed?(conn) do
|
||||
case configured_auth_token() do
|
||||
nil ->
|
||||
true
|
||||
|
||||
token ->
|
||||
provided_token(conn) == token
|
||||
end
|
||||
end
|
||||
|
||||
defp provided_token(conn) do
|
||||
conn
|
||||
|> get_req_header("authorization")
|
||||
|> List.first()
|
||||
|> normalize_authorization_header()
|
||||
end
|
||||
|
||||
defp normalize_authorization_header("Bearer " <> token), do: token
|
||||
defp normalize_authorization_header(token) when is_binary(token), do: token
|
||||
defp normalize_authorization_header(_header), do: nil
|
||||
|
||||
defp network_allowed?(conn) do
|
||||
remote_ip = conn.remote_ip
|
||||
|
||||
cond do
|
||||
configured_allowed_cidrs() != [] ->
|
||||
Enum.any?(configured_allowed_cidrs(), &ip_in_cidr?(remote_ip, &1))
|
||||
|
||||
metrics_private_networks_only?() ->
|
||||
Enum.any?(@private_cidrs, &ip_in_cidr?(remote_ip, &1))
|
||||
|
||||
true ->
|
||||
true
|
||||
end
|
||||
end
|
||||
|
||||
defp ip_in_cidr?(ip, cidr) do
|
||||
with {network, prefix_len} <- parse_cidr(cidr),
|
||||
{:ok, ip_size, ip_value} <- ip_to_int(ip),
|
||||
{:ok, network_size, network_value} <- ip_to_int(network),
|
||||
true <- ip_size == network_size,
|
||||
true <- prefix_len >= 0,
|
||||
true <- prefix_len <= ip_size do
|
||||
mask = network_mask(ip_size, prefix_len)
|
||||
(ip_value &&& mask) == (network_value &&& mask)
|
||||
else
|
||||
_other -> false
|
||||
end
|
||||
end
|
||||
|
||||
defp parse_cidr(cidr) when is_binary(cidr) do
|
||||
case String.split(cidr, "/", parts: 2) do
|
||||
[address, prefix_str] ->
|
||||
with {prefix_len, ""} <- Integer.parse(prefix_str),
|
||||
{:ok, ip} <- :inet.parse_address(String.to_charlist(address)) do
|
||||
{ip, prefix_len}
|
||||
else
|
||||
_other -> :error
|
||||
end
|
||||
|
||||
_other ->
|
||||
:error
|
||||
end
|
||||
end
|
||||
|
||||
defp parse_cidr(_cidr), do: :error
|
||||
|
||||
defp ip_to_int({a, b, c, d}) do
|
||||
{:ok, 32, (a <<< 24) + (b <<< 16) + (c <<< 8) + d}
|
||||
end
|
||||
|
||||
defp ip_to_int({a, b, c, d, e, f, g, h}) do
|
||||
{:ok, 128,
|
||||
(a <<< 112) + (b <<< 96) + (c <<< 80) + (d <<< 64) + (e <<< 48) + (f <<< 32) + (g <<< 16) +
|
||||
h}
|
||||
end
|
||||
|
||||
defp ip_to_int(_ip), do: :error
|
||||
|
||||
defp network_mask(_size, 0), do: 0
|
||||
|
||||
defp network_mask(size, prefix_len) do
|
||||
all_ones = (1 <<< size) - 1
|
||||
all_ones <<< (size - prefix_len)
|
||||
end
|
||||
|
||||
defp configured_allowed_cidrs do
|
||||
:parrhesia
|
||||
|> Application.get_env(:metrics, [])
|
||||
|> Keyword.get(:allowed_cidrs, [])
|
||||
|> Enum.filter(&is_binary/1)
|
||||
end
|
||||
|
||||
defp configured_auth_token do
|
||||
case :parrhesia |> Application.get_env(:metrics, []) |> Keyword.get(:auth_token) do
|
||||
token when is_binary(token) and token != "" -> token
|
||||
_other -> nil
|
||||
end
|
||||
end
|
||||
|
||||
defp metrics_public? do
|
||||
:parrhesia
|
||||
|> Application.get_env(:metrics, [])
|
||||
|> Keyword.get(:public, false)
|
||||
end
|
||||
|
||||
defp metrics_private_networks_only? do
|
||||
:parrhesia
|
||||
|> Application.get_env(:metrics, [])
|
||||
|> Keyword.get(:private_networks_only, true)
|
||||
end
|
||||
end
|
||||
34
lib/parrhesia/web/metrics_endpoint.ex
Normal file
34
lib/parrhesia/web/metrics_endpoint.ex
Normal file
@@ -0,0 +1,34 @@
|
||||
defmodule Parrhesia.Web.MetricsEndpoint do
|
||||
@moduledoc """
|
||||
Optional dedicated HTTP listener for Prometheus metrics scraping.
|
||||
"""
|
||||
|
||||
use Supervisor
|
||||
|
||||
def start_link(init_arg \\ []) do
|
||||
Supervisor.start_link(__MODULE__, init_arg, name: __MODULE__)
|
||||
end
|
||||
|
||||
@impl true
|
||||
def init(init_arg) do
|
||||
options = bandit_options(init_arg)
|
||||
|
||||
children =
|
||||
if Keyword.get(options, :enabled, false) do
|
||||
[{Bandit, Keyword.delete(options, :enabled)}]
|
||||
else
|
||||
[]
|
||||
end
|
||||
|
||||
Supervisor.init(children, strategy: :one_for_one)
|
||||
end
|
||||
|
||||
defp bandit_options(overrides) do
|
||||
configured = Application.get_env(:parrhesia, __MODULE__, [])
|
||||
|
||||
configured
|
||||
|> Keyword.merge(overrides)
|
||||
|> Keyword.put_new(:scheme, :http)
|
||||
|> Keyword.put_new(:plug, Parrhesia.Web.MetricsRouter)
|
||||
end
|
||||
end
|
||||
18
lib/parrhesia/web/metrics_router.ex
Normal file
18
lib/parrhesia/web/metrics_router.ex
Normal file
@@ -0,0 +1,18 @@
|
||||
defmodule Parrhesia.Web.MetricsRouter do
|
||||
@moduledoc false
|
||||
|
||||
use Plug.Router
|
||||
|
||||
alias Parrhesia.Web.Metrics
|
||||
|
||||
plug(:match)
|
||||
plug(:dispatch)
|
||||
|
||||
get "/metrics" do
|
||||
Metrics.handle(conn)
|
||||
end
|
||||
|
||||
match _ do
|
||||
send_resp(conn, 404, "not found")
|
||||
end
|
||||
end
|
||||
@@ -5,10 +5,24 @@ defmodule Parrhesia.Web.Readiness do
|
||||
def ready? do
|
||||
process_ready?(Parrhesia.Subscriptions.Index) and
|
||||
process_ready?(Parrhesia.Auth.Challenges) and
|
||||
process_ready?(Parrhesia.Negentropy.Sessions) and
|
||||
negentropy_ready?() and
|
||||
process_ready?(Parrhesia.Repo)
|
||||
end
|
||||
|
||||
defp negentropy_ready? do
|
||||
if negentropy_enabled?() do
|
||||
process_ready?(Parrhesia.Negentropy.Sessions)
|
||||
else
|
||||
true
|
||||
end
|
||||
end
|
||||
|
||||
defp negentropy_enabled? do
|
||||
:parrhesia
|
||||
|> Application.get_env(:features, [])
|
||||
|> Keyword.get(:nip_77_negentropy, true)
|
||||
end
|
||||
|
||||
defp process_ready?(name) do
|
||||
case Process.whereis(name) do
|
||||
pid when is_pid(pid) -> true
|
||||
|
||||
@@ -3,49 +3,55 @@ defmodule Parrhesia.Web.RelayInfo do
|
||||
NIP-11 relay information document.
|
||||
"""
|
||||
|
||||
@spec document() :: map()
|
||||
def document do
|
||||
alias Parrhesia.API.Identity
|
||||
alias Parrhesia.Web.Listener
|
||||
|
||||
@spec document(Listener.t()) :: map()
|
||||
def document(listener) do
|
||||
%{
|
||||
"name" => "Parrhesia",
|
||||
"description" => "Nostr/Marmot relay",
|
||||
"pubkey" => nil,
|
||||
"pubkey" => relay_pubkey(),
|
||||
"supported_nips" => supported_nips(),
|
||||
"software" => "https://git.teralink.net/self/parrhesia",
|
||||
"version" => Application.spec(:parrhesia, :vsn) |> to_string(),
|
||||
"limitation" => limitations()
|
||||
"limitation" => limitations(listener)
|
||||
}
|
||||
end
|
||||
|
||||
defp supported_nips do
|
||||
[
|
||||
1,
|
||||
9,
|
||||
11,
|
||||
13,
|
||||
17,
|
||||
40,
|
||||
42,
|
||||
43,
|
||||
44,
|
||||
45,
|
||||
50,
|
||||
59,
|
||||
62,
|
||||
66,
|
||||
70,
|
||||
77,
|
||||
86,
|
||||
98
|
||||
]
|
||||
base = [1, 9, 11, 13, 17, 40, 42, 43, 44, 45, 50, 59, 62, 66, 70]
|
||||
|
||||
with_negentropy =
|
||||
if negentropy_enabled?() do
|
||||
base ++ [77]
|
||||
else
|
||||
base
|
||||
end
|
||||
|
||||
with_negentropy ++ [86, 98]
|
||||
end
|
||||
|
||||
defp limitations do
|
||||
defp limitations(listener) do
|
||||
%{
|
||||
"max_message_length" => Parrhesia.Config.get([:limits, :max_frame_bytes], 1_048_576),
|
||||
"max_subscriptions" =>
|
||||
Parrhesia.Config.get([:limits, :max_subscriptions_per_connection], 32),
|
||||
"max_filters" => Parrhesia.Config.get([:limits, :max_filters_per_req], 16),
|
||||
"auth_required" => Parrhesia.Config.get([:policies, :auth_required_for_reads], false)
|
||||
"auth_required" => Listener.relay_auth_required?(listener)
|
||||
}
|
||||
end
|
||||
|
||||
defp negentropy_enabled? do
|
||||
:parrhesia
|
||||
|> Application.get_env(:features, [])
|
||||
|> Keyword.get(:nip_77_negentropy, true)
|
||||
end
|
||||
|
||||
defp relay_pubkey do
|
||||
case Identity.get() do
|
||||
{:ok, %{pubkey: pubkey}} -> pubkey
|
||||
{:error, _reason} -> nil
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
187
lib/parrhesia/web/remote_ip.ex
Normal file
187
lib/parrhesia/web/remote_ip.ex
Normal file
@@ -0,0 +1,187 @@
|
||||
defmodule Parrhesia.Web.RemoteIp do
|
||||
@moduledoc false
|
||||
|
||||
import Bitwise
|
||||
|
||||
alias Parrhesia.Web.Listener
|
||||
|
||||
@spec init(term()) :: term()
|
||||
def init(opts), do: opts
|
||||
|
||||
@spec call(Plug.Conn.t(), term()) :: Plug.Conn.t()
|
||||
def call(conn, _opts) do
|
||||
if trusted_proxy?(conn) and honor_x_forwarded_for?(conn) do
|
||||
case forwarded_ip(conn) do
|
||||
nil -> conn
|
||||
forwarded_ip -> %{conn | remote_ip: forwarded_ip}
|
||||
end
|
||||
else
|
||||
conn
|
||||
end
|
||||
end
|
||||
|
||||
defp forwarded_ip(conn) do
|
||||
conn
|
||||
|> x_forwarded_for_ip()
|
||||
|> fallback_forwarded_ip(conn)
|
||||
|> fallback_real_ip(conn)
|
||||
end
|
||||
|
||||
defp x_forwarded_for_ip(conn) do
|
||||
conn
|
||||
|> Plug.Conn.get_req_header("x-forwarded-for")
|
||||
|> List.first()
|
||||
|> parse_x_forwarded_for()
|
||||
end
|
||||
|
||||
defp fallback_forwarded_ip(nil, conn) do
|
||||
conn
|
||||
|> Plug.Conn.get_req_header("forwarded")
|
||||
|> List.first()
|
||||
|> parse_forwarded_header()
|
||||
end
|
||||
|
||||
defp fallback_forwarded_ip(ip, _conn), do: ip
|
||||
|
||||
defp fallback_real_ip(nil, conn) do
|
||||
conn
|
||||
|> Plug.Conn.get_req_header("x-real-ip")
|
||||
|> List.first()
|
||||
|> parse_ip_string()
|
||||
end
|
||||
|
||||
defp fallback_real_ip(ip, _conn), do: ip
|
||||
|
||||
defp trusted_proxy?(conn) do
|
||||
Enum.any?(trusted_proxies(conn), &ip_in_cidr?(conn.remote_ip, &1))
|
||||
end
|
||||
|
||||
defp trusted_proxies(conn) do
|
||||
listener = Listener.from_conn(conn)
|
||||
|
||||
case Listener.trusted_proxies(listener) do
|
||||
[] ->
|
||||
:parrhesia
|
||||
|> Application.get_env(:trusted_proxies, [])
|
||||
|> Enum.filter(&is_binary/1)
|
||||
|
||||
trusted_proxies ->
|
||||
trusted_proxies
|
||||
end
|
||||
end
|
||||
|
||||
defp honor_x_forwarded_for?(conn) do
|
||||
listener = Listener.from_conn(conn)
|
||||
listener.proxy.honor_x_forwarded_for
|
||||
end
|
||||
|
||||
defp parse_x_forwarded_for(value) when is_binary(value) do
|
||||
value
|
||||
|> String.split(",")
|
||||
|> Enum.map(&String.trim/1)
|
||||
|> Enum.find_value(&parse_ip_string/1)
|
||||
end
|
||||
|
||||
defp parse_x_forwarded_for(_value), do: nil
|
||||
|
||||
defp parse_forwarded_header(value) when is_binary(value) do
|
||||
value
|
||||
|> String.split(",")
|
||||
|> Enum.find_value(fn part ->
|
||||
part
|
||||
|> String.split(";")
|
||||
|> Enum.find_value(&forwarded_for_segment/1)
|
||||
end)
|
||||
end
|
||||
|
||||
defp parse_forwarded_header(_value), do: nil
|
||||
|
||||
defp forwarded_for_segment(segment) do
|
||||
case String.split(segment, "=", parts: 2) do
|
||||
[key, ip] ->
|
||||
if String.downcase(String.trim(key)) == "for" do
|
||||
ip
|
||||
|> String.trim()
|
||||
|> String.trim("\"")
|
||||
|> String.trim_leading("[")
|
||||
|> String.trim_trailing("]")
|
||||
|> parse_ip_string()
|
||||
end
|
||||
|
||||
_other ->
|
||||
nil
|
||||
end
|
||||
end
|
||||
|
||||
defp parse_ip_string(value) when is_binary(value) do
|
||||
value
|
||||
|> String.trim()
|
||||
|> String.split(":", parts: 2)
|
||||
|> List.first()
|
||||
|> then(fn ip ->
|
||||
case :inet.parse_address(String.to_charlist(ip)) do
|
||||
{:ok, parsed_ip} -> parsed_ip
|
||||
_other -> nil
|
||||
end
|
||||
end)
|
||||
end
|
||||
|
||||
defp parse_ip_string(_value), do: nil
|
||||
|
||||
defp ip_in_cidr?(ip, cidr) do
|
||||
with {network, prefix_len} <- parse_cidr(cidr),
|
||||
{:ok, ip_size, ip_value} <- ip_to_int(ip),
|
||||
{:ok, network_size, network_value} <- ip_to_int(network),
|
||||
true <- ip_size == network_size,
|
||||
true <- prefix_len >= 0,
|
||||
true <- prefix_len <= ip_size do
|
||||
mask = network_mask(ip_size, prefix_len)
|
||||
(ip_value &&& mask) == (network_value &&& mask)
|
||||
else
|
||||
_other -> false
|
||||
end
|
||||
end
|
||||
|
||||
defp parse_cidr(cidr) when is_binary(cidr) do
|
||||
case String.split(cidr, "/", parts: 2) do
|
||||
[address, prefix_str] ->
|
||||
with {prefix_len, ""} <- Integer.parse(prefix_str),
|
||||
{:ok, ip} <- :inet.parse_address(String.to_charlist(address)) do
|
||||
{ip, prefix_len}
|
||||
else
|
||||
_other -> :error
|
||||
end
|
||||
|
||||
[address] ->
|
||||
case :inet.parse_address(String.to_charlist(address)) do
|
||||
{:ok, {_, _, _, _} = ip} -> {ip, 32}
|
||||
{:ok, {_, _, _, _, _, _, _, _} = ip} -> {ip, 128}
|
||||
_other -> :error
|
||||
end
|
||||
|
||||
_other ->
|
||||
:error
|
||||
end
|
||||
end
|
||||
|
||||
defp parse_cidr(_cidr), do: :error
|
||||
|
||||
defp ip_to_int({a, b, c, d}) do
|
||||
{:ok, 32, (a <<< 24) + (b <<< 16) + (c <<< 8) + d}
|
||||
end
|
||||
|
||||
defp ip_to_int({a, b, c, d, e, f, g, h}) do
|
||||
{:ok, 128,
|
||||
(a <<< 112) + (b <<< 96) + (c <<< 80) + (d <<< 64) + (e <<< 48) + (f <<< 32) + (g <<< 16) +
|
||||
h}
|
||||
end
|
||||
|
||||
defp ip_to_int(_ip), do: :error
|
||||
|
||||
defp network_mask(_size, 0), do: 0
|
||||
|
||||
defp network_mask(size, prefix_len) do
|
||||
all_ones = (1 <<< size) - 1
|
||||
all_ones <<< (size - prefix_len)
|
||||
end
|
||||
end
|
||||
@@ -3,17 +3,22 @@ defmodule Parrhesia.Web.Router do
|
||||
|
||||
use Plug.Router
|
||||
|
||||
alias Parrhesia.Telemetry
|
||||
alias Parrhesia.Policy.ConnectionPolicy
|
||||
alias Parrhesia.Web.Listener
|
||||
alias Parrhesia.Web.Management
|
||||
alias Parrhesia.Web.Metrics
|
||||
alias Parrhesia.Web.Readiness
|
||||
alias Parrhesia.Web.RelayInfo
|
||||
|
||||
plug(:put_listener)
|
||||
|
||||
plug(Plug.Parsers,
|
||||
parsers: [:json],
|
||||
pass: ["application/json"],
|
||||
json_decoder: JSON
|
||||
)
|
||||
|
||||
plug(Parrhesia.Web.RemoteIp)
|
||||
plug(:match)
|
||||
plug(:dispatch)
|
||||
|
||||
@@ -30,28 +35,64 @@ defmodule Parrhesia.Web.Router do
|
||||
end
|
||||
|
||||
get "/metrics" do
|
||||
body = TelemetryMetricsPrometheus.Core.scrape(Telemetry.prometheus_reporter())
|
||||
listener = Listener.from_conn(conn)
|
||||
|
||||
conn
|
||||
|> put_resp_content_type("text/plain")
|
||||
|> send_resp(200, body)
|
||||
if Listener.feature_enabled?(listener, :metrics) do
|
||||
case authorize_listener_request(conn, listener) do
|
||||
{:ok, conn} -> Metrics.handle(conn)
|
||||
{:error, :forbidden} -> send_resp(conn, 403, "forbidden")
|
||||
end
|
||||
else
|
||||
send_resp(conn, 404, "not found")
|
||||
end
|
||||
end
|
||||
|
||||
post "/management" do
|
||||
Management.handle(conn)
|
||||
listener = Listener.from_conn(conn)
|
||||
|
||||
if Listener.feature_enabled?(listener, :admin) do
|
||||
case authorize_listener_request(conn, listener) do
|
||||
{:ok, conn} -> Management.handle(conn, listener: listener)
|
||||
{:error, :forbidden} -> send_resp(conn, 403, "forbidden")
|
||||
end
|
||||
else
|
||||
send_resp(conn, 404, "not found")
|
||||
end
|
||||
end
|
||||
|
||||
get "/relay" do
|
||||
if accepts_nip11?(conn) do
|
||||
body = JSON.encode!(RelayInfo.document())
|
||||
listener = Listener.from_conn(conn)
|
||||
|
||||
conn
|
||||
|> put_resp_content_type("application/nostr+json")
|
||||
|> send_resp(200, body)
|
||||
if Listener.feature_enabled?(listener, :nostr) do
|
||||
case authorize_listener_request(conn, listener) do
|
||||
{:ok, conn} ->
|
||||
if accepts_nip11?(conn) do
|
||||
body = JSON.encode!(RelayInfo.document(listener))
|
||||
|
||||
conn
|
||||
|> put_resp_content_type("application/nostr+json")
|
||||
|> send_resp(200, body)
|
||||
else
|
||||
conn
|
||||
|> WebSockAdapter.upgrade(
|
||||
Parrhesia.Web.Connection,
|
||||
%{
|
||||
listener: listener,
|
||||
relay_url: Listener.relay_url(listener, conn),
|
||||
remote_ip: remote_ip(conn),
|
||||
transport_identity: transport_identity(conn)
|
||||
},
|
||||
timeout: 60_000,
|
||||
max_frame_size: max_frame_bytes()
|
||||
)
|
||||
|> halt()
|
||||
end
|
||||
|
||||
{:error, :forbidden} ->
|
||||
send_resp(conn, 403, "forbidden")
|
||||
end
|
||||
else
|
||||
conn
|
||||
|> WebSockAdapter.upgrade(Parrhesia.Web.Connection, %{}, timeout: 60_000)
|
||||
|> halt()
|
||||
send_resp(conn, 404, "not found")
|
||||
end
|
||||
end
|
||||
|
||||
@@ -59,9 +100,54 @@ defmodule Parrhesia.Web.Router do
|
||||
send_resp(conn, 404, "not found")
|
||||
end
|
||||
|
||||
defp put_listener(conn, opts) do
|
||||
case conn.private do
|
||||
%{parrhesia_listener: _listener} -> conn
|
||||
_other -> Listener.put_conn(conn, opts)
|
||||
end
|
||||
end
|
||||
|
||||
defp accepts_nip11?(conn) do
|
||||
conn
|
||||
|> get_req_header("accept")
|
||||
|> Enum.any?(&String.contains?(&1, "application/nostr+json"))
|
||||
end
|
||||
|
||||
defp max_frame_bytes do
|
||||
Parrhesia.Config.get([:limits, :max_frame_bytes], 1_048_576)
|
||||
end
|
||||
|
||||
defp authorize_listener_request(conn, listener) do
|
||||
with :ok <- authorize_remote_ip(conn),
|
||||
true <- Listener.remote_ip_allowed?(listener, conn.remote_ip),
|
||||
{:ok, transport_identity} <- Listener.authorize_transport_request(listener, conn) do
|
||||
{:ok, maybe_put_transport_identity(conn, transport_identity)}
|
||||
else
|
||||
{:error, :ip_blocked} -> {:error, :forbidden}
|
||||
{:error, _reason} -> {:error, :forbidden}
|
||||
false -> {:error, :forbidden}
|
||||
end
|
||||
end
|
||||
|
||||
defp authorize_remote_ip(conn) do
|
||||
ConnectionPolicy.authorize_remote_ip(conn.remote_ip)
|
||||
end
|
||||
|
||||
defp remote_ip(conn) do
|
||||
case conn.remote_ip do
|
||||
{_, _, _, _} = remote_ip -> :inet.ntoa(remote_ip) |> to_string()
|
||||
{_, _, _, _, _, _, _, _} = remote_ip -> :inet.ntoa(remote_ip) |> to_string()
|
||||
_other -> nil
|
||||
end
|
||||
end
|
||||
|
||||
defp maybe_put_transport_identity(conn, nil), do: conn
|
||||
|
||||
defp maybe_put_transport_identity(conn, transport_identity) do
|
||||
Plug.Conn.put_private(conn, :parrhesia_transport_identity, transport_identity)
|
||||
end
|
||||
|
||||
defp transport_identity(conn) do
|
||||
Map.get(conn.private, :parrhesia_transport_identity)
|
||||
end
|
||||
end
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user