Compare commits
57 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 3e5bf462e9 | |||
| fc3d121599 | |||
| 970cee2c0e | |||
| 7a43ebd395 | |||
| 4c40edfd83 | |||
| f60b8ba02a | |||
| 2225dfdc9e | |||
| 9014912e9d | |||
| c30449b318 | |||
| c377ed4b62 | |||
| dce473662f | |||
| a2bdf11139 | |||
| bc66dfcbbe | |||
| f732d9cf24 | |||
| f2856d000e | |||
| dc5f0c1e5d | |||
| b56925f413 | |||
| 05718d4b91 | |||
| 1fef184f50 | |||
| 57fdb4ed85 | |||
| 8dbf05b7fe | |||
| 7b2d92b714 | |||
| a19b7d97f0 | |||
| 65b47ec191 | |||
| e13c08fd5a | |||
| 101ded43cb | |||
| f4d94c9fcb | |||
| 35c8d50db0 | |||
| 4d169c23ae | |||
| a1a8b30d12 | |||
| e557eba437 | |||
| a652bf3448 | |||
| f518a25cab | |||
| 02f2584757 | |||
| e8fd6c7328 | |||
| 1f608ee2bd | |||
| 5f4f086d28 | |||
| 9be3b6ca52 | |||
| 769177a63e | |||
| 987415d80c | |||
| d119d21d99 | |||
| 5d4d181d00 | |||
| fd17026c32 | |||
| 14fb0f7ffb | |||
| 186d0f98ee | |||
| 39dbc069a7 | |||
| 4c2c93deb3 | |||
| b628770517 | |||
| 705971cbc4 | |||
| b86b5db78c | |||
| 5577445e80 | |||
| 1a4572013d | |||
| 5c2fadc28e | |||
| 7faf8c84c8 | |||
| 889d630c12 | |||
| 19664ac56c | |||
| 708e26e4f4 |
20
.env.example
Normal file
20
.env.example
Normal file
@@ -0,0 +1,20 @@
|
||||
PARRHESIA_IMAGE=parrhesia:latest
|
||||
PARRHESIA_HOST_PORT=4000
|
||||
|
||||
POSTGRES_DB=parrhesia
|
||||
POSTGRES_USER=parrhesia
|
||||
POSTGRES_PASSWORD=parrhesia
|
||||
|
||||
DATABASE_URL=ecto://parrhesia:parrhesia@db:5432/parrhesia
|
||||
POOL_SIZE=20
|
||||
|
||||
# Optional runtime overrides:
|
||||
# PARRHESIA_RELAY_URL=ws://localhost:4000/relay
|
||||
# PARRHESIA_POLICIES_AUTH_REQUIRED_FOR_WRITES=false
|
||||
# PARRHESIA_POLICIES_AUTH_REQUIRED_FOR_READS=false
|
||||
# PARRHESIA_POLICIES_MIN_POW_DIFFICULTY=0
|
||||
# PARRHESIA_FEATURES_VERIFY_EVENT_SIGNATURES=true
|
||||
# PARRHESIA_METRICS_ENABLED_ON_MAIN_ENDPOINT=true
|
||||
# PARRHESIA_METRICS_PRIVATE_NETWORKS_ONLY=true
|
||||
# PARRHESIA_METRICS_AUTH_TOKEN=
|
||||
# PARRHESIA_EXTRA_CONFIG=/config/parrhesia.runtime.exs
|
||||
121
.github/workflows/ci.yaml
vendored
Normal file
121
.github/workflows/ci.yaml
vendored
Normal file
@@ -0,0 +1,121 @@
|
||||
name: CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: ["**"]
|
||||
pull_request:
|
||||
branches: ["**"]
|
||||
|
||||
env:
|
||||
MIX_ENV: test
|
||||
MIX_OS_DEPS_COMPILE_PARTITION_COUNT: 8
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
test:
|
||||
name: ${{ matrix.name }}
|
||||
runs-on: ubuntu-24.04
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- name: Test (OTP 27.2 / Elixir 1.18.2)
|
||||
otp: "27.2"
|
||||
elixir: "1.18.2"
|
||||
main: false
|
||||
- name: Test (OTP 28.4 / Elixir 1.19.4 + E2E)
|
||||
otp: "28.4"
|
||||
elixir: "1.19.4"
|
||||
main: true
|
||||
|
||||
services:
|
||||
postgres:
|
||||
image: postgres:16-alpine
|
||||
env:
|
||||
POSTGRES_USER: postgres
|
||||
POSTGRES_PASSWORD: postgres
|
||||
POSTGRES_DB: app_test
|
||||
ports:
|
||||
- 5432:5432
|
||||
options: >-
|
||||
--health-cmd "pg_isready -U postgres"
|
||||
--health-interval 10s
|
||||
--health-timeout 5s
|
||||
--health-retries 5
|
||||
|
||||
env:
|
||||
PGHOST: localhost
|
||||
PGPORT: 5432
|
||||
PGUSER: postgres
|
||||
PGPASSWORD: postgres
|
||||
PGDATABASE: app_test
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: recursive
|
||||
|
||||
- name: Set up Elixir + OTP
|
||||
uses: erlef/setup-beam@v1
|
||||
with:
|
||||
otp-version: ${{ matrix.otp }}
|
||||
elixir-version: ${{ matrix.elixir }}
|
||||
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 24
|
||||
|
||||
# Cache deps/ directory — keyed on mix.lock
|
||||
- name: Cache Mix deps
|
||||
uses: actions/cache@v4
|
||||
id: deps-cache
|
||||
with:
|
||||
path: deps
|
||||
key: ${{ runner.os }}-mix-deps-${{ hashFiles('mix.lock') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-mix-deps-
|
||||
|
||||
# Cache _build/ — keyed on mix.lock + OTP/Elixir versions
|
||||
- name: Cache _build
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: _build
|
||||
key: ${{ runner.os }}-mix-build-${{ matrix.otp }}-${{ matrix.elixir }}-${{ hashFiles('mix.lock') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-mix-build-${{ matrix.otp }}-${{ matrix.elixir }}-
|
||||
|
||||
- name: Install Mix dependencies
|
||||
if: steps.deps-cache.outputs.cache-hit != 'true'
|
||||
run: mix deps.get
|
||||
|
||||
- name: Compile (warnings as errors)
|
||||
if: ${{ matrix.main }}
|
||||
run: mix compile --warnings-as-errors
|
||||
|
||||
- name: Check formatting
|
||||
if: ${{ matrix.main }}
|
||||
run: mix format --check-formatted
|
||||
|
||||
- name: Credo
|
||||
if: ${{ matrix.main }}
|
||||
run: mix credo --strict --all
|
||||
|
||||
- name: Check for unused locked deps
|
||||
if: ${{ matrix.main }}
|
||||
run: |
|
||||
mix deps.unlock --unused
|
||||
git diff --exit-code -- mix.lock
|
||||
|
||||
- name: Run tests
|
||||
run: mix test --color
|
||||
|
||||
- name: Run Node Sync E2E tests
|
||||
if: ${{ matrix.main }}
|
||||
run: mix test.node_sync_e2e
|
||||
|
||||
- name: Run Marmot E2E tests
|
||||
run: mix test.marmot_e2e
|
||||
185
.github/workflows/release.yaml
vendored
Normal file
185
.github/workflows/release.yaml
vendored
Normal file
@@ -0,0 +1,185 @@
|
||||
name: Release
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- "v*.*.*"
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
push:
|
||||
description: "Push image to GHCR?"
|
||||
required: false
|
||||
default: "true"
|
||||
type: choice
|
||||
options: ["true", "false"]
|
||||
|
||||
env:
|
||||
REGISTRY: ghcr.io
|
||||
IMAGE_NAME: ${{ github.repository }}
|
||||
FLAKE_OUTPUT: packages.x86_64-linux.dockerImage
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
id-token: write
|
||||
|
||||
jobs:
|
||||
test:
|
||||
name: Release Gate
|
||||
runs-on: ubuntu-24.04
|
||||
|
||||
services:
|
||||
postgres:
|
||||
image: postgres:16-alpine
|
||||
env:
|
||||
POSTGRES_USER: postgres
|
||||
POSTGRES_PASSWORD: postgres
|
||||
POSTGRES_DB: app_test
|
||||
ports:
|
||||
- 5432:5432
|
||||
options: >-
|
||||
--health-cmd "pg_isready -U postgres"
|
||||
--health-interval 10s
|
||||
--health-timeout 5s
|
||||
--health-retries 5
|
||||
|
||||
env:
|
||||
MIX_ENV: test
|
||||
PGHOST: localhost
|
||||
PGPORT: 5432
|
||||
PGUSER: postgres
|
||||
PGPASSWORD: postgres
|
||||
PGDATABASE: app_test
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: recursive
|
||||
|
||||
- name: Set up Elixir + OTP
|
||||
uses: erlef/setup-beam@v1
|
||||
with:
|
||||
otp-version: "28.4"
|
||||
elixir-version: "1.19.4"
|
||||
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 24
|
||||
|
||||
- name: Cache Mix deps
|
||||
uses: actions/cache@v4
|
||||
id: deps-cache
|
||||
with:
|
||||
path: deps
|
||||
key: ${{ runner.os }}-mix-deps-${{ hashFiles('mix.lock') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-mix-deps-
|
||||
|
||||
- name: Cache _build
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: _build
|
||||
key: ${{ runner.os }}-mix-build-28.4-1.19.4-${{ hashFiles('mix.lock') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-mix-build-28.4-1.19.4-
|
||||
|
||||
- name: Install Mix dependencies
|
||||
if: steps.deps-cache.outputs.cache-hit != 'true'
|
||||
run: mix deps.get
|
||||
|
||||
- name: Check tag matches Mix version
|
||||
if: ${{ startsWith(github.ref, 'refs/tags/v') }}
|
||||
run: |
|
||||
TAG_VERSION="${GITHUB_REF_NAME#v}"
|
||||
MIX_VERSION="$(mix run --no-start -e 'IO.puts(Mix.Project.config()[:version])' | tail -n 1)"
|
||||
|
||||
if [ "$TAG_VERSION" != "$MIX_VERSION" ]; then
|
||||
echo "Tag version $TAG_VERSION does not match mix.exs version $MIX_VERSION"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Compile
|
||||
run: mix compile --warnings-as-errors
|
||||
|
||||
- name: Check formatting
|
||||
run: mix format --check-formatted
|
||||
|
||||
- name: Credo
|
||||
run: mix credo --strict --all
|
||||
|
||||
- name: Run tests
|
||||
run: mix test --color
|
||||
|
||||
- name: Run Node Sync E2E
|
||||
run: mix test.node_sync_e2e
|
||||
|
||||
- name: Run Marmot E2E
|
||||
run: mix test.marmot_e2e
|
||||
|
||||
- name: Check for unused locked deps
|
||||
run: |
|
||||
mix deps.unlock --unused
|
||||
git diff --exit-code -- mix.lock
|
||||
|
||||
build-and-push:
|
||||
name: Build and publish image
|
||||
runs-on: ubuntu-24.04
|
||||
needs: test
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Install Nix
|
||||
uses: DeterminateSystems/nix-installer-action@main
|
||||
with:
|
||||
extra-conf: |
|
||||
experimental-features = nix-command flakes
|
||||
substituters = https://cache.nixos.org
|
||||
trusted-public-keys = cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY=
|
||||
|
||||
- name: Magic Nix Cache
|
||||
uses: DeterminateSystems/magic-nix-cache-action@main
|
||||
|
||||
- name: Extract image metadata
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
||||
tags: |
|
||||
type=semver,pattern={{version}}
|
||||
type=semver,pattern={{major}}.{{minor}}
|
||||
type=raw,value=latest,enable={{is_default_branch}}
|
||||
type=raw,value=latest,enable=${{ startsWith(github.ref, 'refs/tags/v') }}
|
||||
type=sha,prefix=sha-,format=short
|
||||
|
||||
- name: Build Docker image with Nix
|
||||
id: build
|
||||
run: |
|
||||
nix build .#${{ env.FLAKE_OUTPUT }} --out-link ./docker-image-result
|
||||
echo "archive_path=$(readlink -f ./docker-image-result)" >> "$GITHUB_OUTPUT"
|
||||
|
||||
- name: Push image to GHCR
|
||||
env:
|
||||
TAGS: ${{ steps.meta.outputs.tags }}
|
||||
SHOULD_PUSH: ${{ github.event.inputs.push != 'false' }}
|
||||
ARCHIVE_PATH: ${{ steps.build.outputs.archive_path }}
|
||||
run: |
|
||||
if [ "$SHOULD_PUSH" != "true" ]; then
|
||||
echo "Skipping push"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
IMAGE_ARCHIVE="docker-archive:${ARCHIVE_PATH}"
|
||||
|
||||
while IFS= read -r TAG; do
|
||||
if [ -n "$TAG" ]; then
|
||||
echo "Pushing $TAG"
|
||||
nix run nixpkgs#skopeo -- copy \
|
||||
--dest-creds "${{ github.actor }}:${{ secrets.GITHUB_TOKEN }}" \
|
||||
"$IMAGE_ARCHIVE" \
|
||||
"docker://$TAG"
|
||||
fi
|
||||
done <<< "$TAGS"
|
||||
33
BENCHMARK.md
33
BENCHMARK.md
@@ -1,33 +0,0 @@
|
||||
Running 2 comparison run(s)...
|
||||
Versions:
|
||||
parrhesia 0.3.0
|
||||
strfry 1.0.4 (nixpkgs)
|
||||
nostr-rs-relay 0.9.0
|
||||
nostr-bench 0.4.0
|
||||
|
||||
[run 1/2] Parrhesia
|
||||
[run 1/2] strfry
|
||||
[run 1/2] nostr-rs-relay
|
||||
|
||||
[run 2/2] Parrhesia
|
||||
[run 2/2] strfry
|
||||
[run 2/2] nostr-rs-relay
|
||||
|
||||
=== Bench comparison (averages) ===
|
||||
metric parrhesia strfry nostr-rs-relay strfry/parrhesia nostr-rs/parrhesia
|
||||
-------------------------- --------- -------- -------------- ---------------- ------------------
|
||||
connect avg latency (ms) ↓ 13.50 3.00 2.00 0.22x 0.15x
|
||||
connect max latency (ms) ↓ 22.50 5.50 3.00 0.24x 0.13x
|
||||
echo throughput (TPS) ↑ 80385.00 61673.00 164516.00 0.77x 2.05x
|
||||
echo throughput (MiB/s) ↑ 44.00 34.45 90.10 0.78x 2.05x
|
||||
event throughput (TPS) ↑ 2000.00 3404.50 788.00 1.70x 0.39x
|
||||
event throughput (MiB/s) ↑ 1.30 2.20 0.50 1.69x 0.38x
|
||||
req throughput (TPS) ↑ 3664.00 1808.50 877.50 0.49x 0.24x
|
||||
req throughput (MiB/s) ↑ 20.75 11.75 2.45 0.57x 0.12x
|
||||
|
||||
Legend: ↑ higher is better, ↓ lower is better.
|
||||
Ratio columns are server/parrhesia (for ↓ metrics, <1.00x means that server is faster).
|
||||
|
||||
Run details:
|
||||
run 1: parrhesia(echo_tps=81402, event_tps=1979, req_tps=3639, connect_avg_ms=14) | strfry(echo_tps=61745, event_tps=3457, req_tps=1818, connect_avg_ms=3) | nostr-rs-relay(echo_tps=159974, event_tps=784, req_tps=905, connect_avg_ms=2)
|
||||
run 2: parrhesia(echo_tps=79368, event_tps=2021, req_tps=3689, connect_avg_ms=13) | strfry(echo_tps=61601, event_tps=3352, req_tps=1799, connect_avg_ms=3) | nostr-rs-relay(echo_tps=169058, event_tps=792, req_tps=850, connect_avg_ms=2)
|
||||
547
README.md
547
README.md
@@ -1,13 +1,35 @@
|
||||
# Parrhesia
|
||||
|
||||
Parrhesia is a Nostr relay server written in Elixir/OTP with PostgreSQL storage.
|
||||
<img alt="Parrhesia Logo" src="./docs/logo.svg" width="150" align="right">
|
||||
|
||||
Parrhesia is a Nostr relay server written in Elixir/OTP.
|
||||
|
||||
Supported storage backends:
|
||||
|
||||
- PostgreSQL, which is the primary and production-oriented backend
|
||||
- in-memory storage, which is useful for tests, local experiments, and benchmarks
|
||||
|
||||
**ALPHA CONDITION – BREAKING CHANGES MIGHT HAPPEN!**
|
||||
|
||||
- Advanced Querying: Full-text search (NIP-50) and COUNT queries (NIP-45).
|
||||
- Secure Messaging: First-class support for Marmot MLS-encrypted groups and NIP-17/44/59 gift-wrapped DMs.
|
||||
- Identity & Auth: NIP-42 authentication flows and NIP-86 management API with NIP-98 HTTP auth.
|
||||
- Data Integrity: Negentropy-based synchronization and NIP-62 vanish flows.
|
||||
|
||||
It exposes:
|
||||
- a WebSocket relay endpoint at `/relay`
|
||||
|
||||
- listener-configurable WS/HTTP ingress, with a default `public` listener on port `4413`
|
||||
- a WebSocket relay endpoint at `/relay` on listeners that enable the `nostr` feature
|
||||
- NIP-11 relay info on `GET /relay` with `Accept: application/nostr+json`
|
||||
- operational HTTP endpoints (`/health`, `/ready`, `/metrics`)
|
||||
- `/metrics` is restricted by default to private/loopback source IPs
|
||||
- a NIP-86-style management API at `POST /management` (NIP-98 auth)
|
||||
- operational HTTP endpoints such as `/health`, `/ready`, and `/metrics` on listeners that enable them
|
||||
- a NIP-86-style management API at `POST /management` on listeners that enable the `admin` feature
|
||||
|
||||
Listeners can run in plain HTTP, HTTPS, mutual TLS, or proxy-terminated TLS modes. The current TLS implementation supports:
|
||||
|
||||
- server TLS on listener sockets
|
||||
- optional client certificate admission with listener-side client pin checks
|
||||
- proxy-asserted client TLS identity on trusted proxy hops
|
||||
- admin-triggered certificate reload by restarting an individual listener from disk
|
||||
|
||||
## Supported NIPs
|
||||
|
||||
@@ -15,11 +37,18 @@ Current `supported_nips` list:
|
||||
|
||||
`1, 9, 11, 13, 17, 40, 42, 43, 44, 45, 50, 59, 62, 66, 70, 77, 86, 98`
|
||||
|
||||
`43` is advertised when the built-in NIP-43 relay access flow is enabled. Parrhesia generates relay-signed `28935` invite responses on `REQ`, validates join and leave requests locally, and publishes the resulting signed `8000`, `8001`, and `13534` relay membership events into its own local event store.
|
||||
|
||||
`50` uses ranked PostgreSQL full-text search over event `content` by default. Parrhesia applies the filter `limit` after ordering by match quality, and falls back to trigram-backed substring matching for short or symbol-heavy queries such as search-as-you-type prefixes, domains, and punctuation-rich tokens.
|
||||
|
||||
`66` is advertised when the built-in NIP-66 publisher is enabled and has at least one relay target. The default config enables it for the `public` relay URL. Parrhesia probes those target relays, collects the resulting NIP-11 / websocket liveness data, and then publishes the signed `10166` and `30166` events locally on this relay.
|
||||
|
||||
## Requirements
|
||||
|
||||
- Elixir `~> 1.19`
|
||||
- Elixir `~> 1.18`
|
||||
- Erlang/OTP 28
|
||||
- PostgreSQL (18 used in the dev environment; 16+ recommended)
|
||||
- Docker or Podman plus Docker Compose support if you want to run the published container image
|
||||
|
||||
---
|
||||
|
||||
@@ -45,12 +74,12 @@ mix setup
|
||||
mix run --no-halt
|
||||
```
|
||||
|
||||
Server listens on `http://localhost:4000` by default.
|
||||
The default `public` listener binds to `http://localhost:4413`.
|
||||
|
||||
WebSocket clients should connect to:
|
||||
|
||||
```text
|
||||
ws://localhost:4000/relay
|
||||
ws://localhost:4413/relay
|
||||
```
|
||||
|
||||
### Useful endpoints
|
||||
@@ -63,80 +92,342 @@ ws://localhost:4000/relay
|
||||
|
||||
---
|
||||
|
||||
## Test suites
|
||||
|
||||
Primary test entrypoints:
|
||||
|
||||
- `mix test` for the ExUnit suite
|
||||
- `mix test.marmot_e2e` for the Marmot client end-to-end suite
|
||||
- `mix test.node_sync_e2e` for the two-node relay sync end-to-end suite
|
||||
- `mix test.node_sync_docker_e2e` for the release-image Docker two-node relay sync suite
|
||||
|
||||
The node-sync harnesses are driven by:
|
||||
|
||||
- [`scripts/run_node_sync_e2e.sh`](./scripts/run_node_sync_e2e.sh)
|
||||
- [`scripts/run_node_sync_docker_e2e.sh`](./scripts/run_node_sync_docker_e2e.sh)
|
||||
- [`scripts/node_sync_e2e.exs`](./scripts/node_sync_e2e.exs)
|
||||
- [`compose.node-sync-e2e.yaml`](./compose.node-sync-e2e.yaml)
|
||||
|
||||
`mix test.node_sync_e2e` runs two real Parrhesia nodes against separate PostgreSQL databases, verifies catch-up and live sync, restarts one node, and verifies persisted resume behavior. `mix test.node_sync_docker_e2e` runs the same scenario against the release Docker image.
|
||||
|
||||
GitHub CI currently runs the non-Docker node-sync e2e on the main Linux matrix job. The Docker node-sync e2e remains an explicit/manual check because it depends on release-image build/runtime fidelity and a working Docker host.
|
||||
|
||||
---
|
||||
|
||||
## Embedding in another Elixir app
|
||||
|
||||
Parrhesia is usable as an embedded OTP dependency, not just as a standalone relay process.
|
||||
The intended in-process surface is `Parrhesia.API.*`, especially:
|
||||
|
||||
- `Parrhesia.API.Events` for publish, query, and count
|
||||
- `Parrhesia.API.Stream` for local REQ-like subscriptions
|
||||
- `Parrhesia.API.Admin` for management operations
|
||||
- `Parrhesia.API.Identity`, `Parrhesia.API.ACL`, and `Parrhesia.API.Sync` for relay identity, protected sync ACLs, and outbound relay sync
|
||||
|
||||
Start with:
|
||||
|
||||
- [`docs/LOCAL_API.md`](./docs/LOCAL_API.md) for the embedding model and a minimal host setup
|
||||
- generated ExDoc for the `Embedded API` module group when running `mix docs`
|
||||
|
||||
Important caveats for host applications:
|
||||
|
||||
- Parrhesia is still alpha; expect some public API and config churn.
|
||||
- Parrhesia currently assumes a single runtime per BEAM node and uses globally registered process names.
|
||||
- The defaults in this repo's `config/*.exs` are not imported automatically when Parrhesia is used as a dependency. A host app must set `config :parrhesia, ...` explicitly.
|
||||
- The host app is responsible for migrating Parrhesia's schema, for example with `Parrhesia.Release.migrate()` or `mix ecto.migrate -r Parrhesia.Repo`.
|
||||
|
||||
If you only want the in-process API and not the HTTP/WebSocket edge, configure:
|
||||
|
||||
```elixir
|
||||
config :parrhesia, :listeners, %{}
|
||||
```
|
||||
|
||||
The config reference below still applies when embedded. That is the primary place to document basic setup and runtime configuration changes.
|
||||
|
||||
---
|
||||
|
||||
## Production configuration
|
||||
|
||||
### Minimal setup
|
||||
|
||||
Before a Nostr client can publish its first event successfully, make sure these pieces are in place:
|
||||
|
||||
1. PostgreSQL is reachable from Parrhesia.
|
||||
Set `DATABASE_URL` and create/migrate the database with `Parrhesia.Release.migrate()` or `mix ecto.migrate`.
|
||||
|
||||
PostgreSQL is the supported production datastore. The in-memory backend is intended for
|
||||
non-persistent runs such as tests and benchmarks.
|
||||
|
||||
2. Parrhesia listeners are configured for your deployment.
|
||||
The default config exposes a `public` listener on plain HTTP port `4413`, and a reverse proxy can terminate TLS and forward WebSocket traffic to `/relay`. Additional listeners can be defined in `config/*.exs`.
|
||||
|
||||
3. `:relay_url` matches the public relay URL clients should use.
|
||||
Set `PARRHESIA_RELAY_URL` to the public relay URL exposed by the reverse proxy.
|
||||
In the normal deployment model, this should be your public `wss://.../relay` URL.
|
||||
|
||||
4. The database schema is migrated before starting normal traffic.
|
||||
The app image does not auto-run migrations on boot.
|
||||
|
||||
That is the actual minimum. With default policy settings, writes do not require auth, event signatures are verified, and no extra Nostr-specific bootstrap step is needed before posting ordinary events.
|
||||
|
||||
In `prod`, these environment variables are used:
|
||||
|
||||
- `DATABASE_URL` (**required**), e.g. `ecto://USER:PASS@HOST/parrhesia_prod`
|
||||
- `POOL_SIZE` (optional, default `10`)
|
||||
- `PORT` (optional, default `4000`)
|
||||
- `POOL_SIZE` (optional, default `32`)
|
||||
- `PORT` (optional, default `4413`)
|
||||
- `PARRHESIA_*` runtime overrides for relay config, metadata, identity, sync, ACL, limits, policies, listeners, retention, and features
|
||||
- `PARRHESIA_EXTRA_CONFIG` (optional path to an extra runtime config file)
|
||||
|
||||
`config/runtime.exs` reads these values at runtime in production releases.
|
||||
|
||||
### Typical relay config
|
||||
### Runtime env naming
|
||||
|
||||
Add/override in config files (for example in `config/prod.exs` or a `config/runtime.exs`):
|
||||
For runtime overrides, use the `PARRHESIA_...` prefix:
|
||||
|
||||
```elixir
|
||||
config :parrhesia, Parrhesia.Web.Endpoint,
|
||||
ip: {0, 0, 0, 0},
|
||||
port: 4000
|
||||
- `PARRHESIA_RELAY_URL`
|
||||
- `PARRHESIA_METADATA_HIDE_VERSION`
|
||||
- `PARRHESIA_IDENTITY_*`
|
||||
- `PARRHESIA_SYNC_*`
|
||||
- `PARRHESIA_ACL_*`
|
||||
- `PARRHESIA_TRUSTED_PROXIES`
|
||||
- `PARRHESIA_PUBLIC_MAX_CONNECTIONS`
|
||||
- `PARRHESIA_MODERATION_CACHE_ENABLED`
|
||||
- `PARRHESIA_ENABLE_EXPIRATION_WORKER`
|
||||
- `PARRHESIA_ENABLE_PARTITION_RETENTION_WORKER`
|
||||
- `PARRHESIA_STORAGE_BACKEND`
|
||||
- `PARRHESIA_LIMITS_*`
|
||||
- `PARRHESIA_POLICIES_*`
|
||||
- `PARRHESIA_METRICS_*`
|
||||
- `PARRHESIA_METRICS_ENDPOINT_MAX_CONNECTIONS`
|
||||
- `PARRHESIA_RETENTION_*`
|
||||
- `PARRHESIA_FEATURES_*`
|
||||
- `PARRHESIA_METRICS_ENDPOINT_*`
|
||||
|
||||
# Optional dedicated metrics listener (keep this internal)
|
||||
config :parrhesia, Parrhesia.Web.MetricsEndpoint,
|
||||
enabled: true,
|
||||
ip: {127, 0, 0, 1},
|
||||
port: 9568
|
||||
Examples:
|
||||
|
||||
config :parrhesia,
|
||||
metrics: [
|
||||
enabled_on_main_endpoint: false,
|
||||
public: false,
|
||||
private_networks_only: true,
|
||||
allowed_cidrs: [],
|
||||
auth_token: nil
|
||||
],
|
||||
limits: [
|
||||
max_frame_bytes: 1_048_576,
|
||||
max_event_bytes: 262_144,
|
||||
max_filters_per_req: 16,
|
||||
max_filter_limit: 500,
|
||||
max_subscriptions_per_connection: 32,
|
||||
max_event_future_skew_seconds: 900,
|
||||
max_outbound_queue: 256,
|
||||
outbound_drain_batch_size: 64,
|
||||
outbound_overflow_strategy: :close
|
||||
],
|
||||
policies: [
|
||||
auth_required_for_writes: false,
|
||||
auth_required_for_reads: false,
|
||||
min_pow_difficulty: 0,
|
||||
accept_ephemeral_events: true,
|
||||
mls_group_event_ttl_seconds: 300,
|
||||
marmot_require_h_for_group_queries: true,
|
||||
marmot_group_max_h_values_per_filter: 32,
|
||||
marmot_group_max_query_window_seconds: 2_592_000,
|
||||
marmot_media_max_imeta_tags_per_event: 8,
|
||||
marmot_media_max_field_value_bytes: 1024,
|
||||
marmot_media_max_url_bytes: 2048,
|
||||
marmot_media_allowed_mime_prefixes: [],
|
||||
marmot_media_reject_mip04_v1: true,
|
||||
marmot_push_server_pubkeys: [],
|
||||
marmot_push_max_relay_tags: 16,
|
||||
marmot_push_max_payload_bytes: 65_536,
|
||||
marmot_push_max_trigger_age_seconds: 120,
|
||||
marmot_push_require_expiration: true,
|
||||
marmot_push_max_expiration_window_seconds: 120,
|
||||
marmot_push_max_server_recipients: 1
|
||||
],
|
||||
features: [
|
||||
nip_45_count: true,
|
||||
nip_50_search: true,
|
||||
nip_77_negentropy: true,
|
||||
marmot_push_notifications: false
|
||||
]
|
||||
```bash
|
||||
export PARRHESIA_POLICIES_AUTH_REQUIRED_FOR_WRITES=true
|
||||
export PARRHESIA_METRICS_ALLOWED_CIDRS="10.0.0.0/8,192.168.0.0/16"
|
||||
export PARRHESIA_LIMITS_OUTBOUND_OVERFLOW_STRATEGY=drop_oldest
|
||||
```
|
||||
|
||||
Listeners themselves are primarily configured under `config :parrhesia, :listeners, ...`. The current runtime env helpers tune the default public listener and the optional dedicated metrics listener, including their connection ceilings.
|
||||
|
||||
For settings that are awkward to express as env vars, mount an extra config file and set `PARRHESIA_EXTRA_CONFIG` to its path inside the container.
|
||||
|
||||
### Config reference
|
||||
|
||||
CSV env vars use comma-separated values. Boolean env vars accept `1/0`, `true/false`, `yes/no`, or `on/off`.
|
||||
|
||||
#### Top-level `:parrhesia`
|
||||
|
||||
| Atom key | ENV | Default | Notes |
|
||||
| --- | --- | --- | --- |
|
||||
| `:relay_url` | `PARRHESIA_RELAY_URL` | `ws://localhost:4413/relay` | Advertised relay URL and auth relay tag target |
|
||||
| `:metadata.hide_version?` | `PARRHESIA_METADATA_HIDE_VERSION` | `true` | Hides the relay version from outbound `User-Agent` and NIP-11 when enabled |
|
||||
| `:acl.protected_filters` | `PARRHESIA_ACL_PROTECTED_FILTERS` | `[]` | JSON-encoded protected filter list for sync ACL checks |
|
||||
| `:identity.path` | `PARRHESIA_IDENTITY_PATH` | `nil` | Optional path for persisted relay identity material |
|
||||
| `:identity.private_key` | `PARRHESIA_IDENTITY_PRIVATE_KEY` | `nil` | Optional inline relay private key |
|
||||
| `:moderation_cache_enabled` | `PARRHESIA_MODERATION_CACHE_ENABLED` | `true` | Toggle moderation cache |
|
||||
| `:enable_expiration_worker` | `PARRHESIA_ENABLE_EXPIRATION_WORKER` | `true` | Toggle background expiration worker |
|
||||
| `:nip43` | config-file driven | see table below | Built-in NIP-43 relay access invite / membership flow |
|
||||
| `:nip66` | config-file driven | see table below | Built-in NIP-66 discovery / monitor publisher |
|
||||
| `:sync.path` | `PARRHESIA_SYNC_PATH` | `nil` | Optional path to sync peer config |
|
||||
| `:sync.start_workers?` | `PARRHESIA_SYNC_START_WORKERS` | `true` | Start outbound sync workers on boot |
|
||||
| `:limits` | `PARRHESIA_LIMITS_*` | see table below | Runtime override group |
|
||||
| `:policies` | `PARRHESIA_POLICIES_*` | see table below | Runtime override group |
|
||||
| `:listeners` | config-file driven | see notes below | Ingress listeners with bind, transport, feature, auth, network, and baseline ACL settings |
|
||||
| `:retention` | `PARRHESIA_RETENTION_*` | see table below | Partition lifecycle and pruning policy |
|
||||
| `:features` | `PARRHESIA_FEATURES_*` | see table below | Runtime override group |
|
||||
| `:storage.events` | `-` | `Parrhesia.Storage.Adapters.Postgres.Events` | Config-file override only |
|
||||
| `:storage.moderation` | `-` | `Parrhesia.Storage.Adapters.Postgres.Moderation` | Config-file override only |
|
||||
| `:storage.groups` | `-` | `Parrhesia.Storage.Adapters.Postgres.Groups` | Config-file override only |
|
||||
| `:storage.admin` | `-` | `Parrhesia.Storage.Adapters.Postgres.Admin` | Config-file override only |
|
||||
|
||||
#### `Parrhesia.Repo`
|
||||
|
||||
| Atom key | ENV | Default | Notes |
|
||||
| --- | --- | --- | --- |
|
||||
| `:url` | `DATABASE_URL` | required | Example: `ecto://USER:PASS@HOST/DATABASE` |
|
||||
| `:pool_size` | `POOL_SIZE` | `32` | DB connection pool size |
|
||||
| `:queue_target` | `DB_QUEUE_TARGET_MS` | `1000` | Ecto queue target in ms |
|
||||
| `:queue_interval` | `DB_QUEUE_INTERVAL_MS` | `5000` | Ecto queue interval in ms |
|
||||
| `:types` | `-` | `Parrhesia.PostgresTypes` | Internal config-file setting |
|
||||
|
||||
#### `Parrhesia.ReadRepo`
|
||||
|
||||
| Atom key | ENV | Default | Notes |
|
||||
| --- | --- | --- | --- |
|
||||
| `:url` | `DATABASE_URL` | required | Shares the primary DB URL with the write repo |
|
||||
| `:pool_size` | `DB_READ_POOL_SIZE` | `32` | Read-only query pool size |
|
||||
| `:queue_target` | `DB_READ_QUEUE_TARGET_MS` | `1000` | Read pool Ecto queue target in ms |
|
||||
| `:queue_interval` | `DB_READ_QUEUE_INTERVAL_MS` | `5000` | Read pool Ecto queue interval in ms |
|
||||
| `:types` | `-` | `Parrhesia.PostgresTypes` | Internal config-file setting |
|
||||
|
||||
#### `:listeners`
|
||||
|
||||
| Atom key | ENV | Default | Notes |
|
||||
| --- | --- | --- | --- |
|
||||
| `:public.bind.port` | `PORT` | `4413` | Default public listener port |
|
||||
| `:public.max_connections` | `PARRHESIA_PUBLIC_MAX_CONNECTIONS` | `20000` | Target total connection ceiling for the public listener |
|
||||
| `:public.proxy.trusted_cidrs` | `PARRHESIA_TRUSTED_PROXIES` | `[]` | Trusted reverse proxies for forwarded IP handling |
|
||||
| `:public.features.metrics.*` | `PARRHESIA_METRICS_*` | see below | Convenience runtime overrides for metrics on the public listener |
|
||||
| `:metrics.bind.port` | `PARRHESIA_METRICS_ENDPOINT_PORT` | `9568` | Optional dedicated metrics listener port |
|
||||
| `:metrics.max_connections` | `PARRHESIA_METRICS_ENDPOINT_MAX_CONNECTIONS` | `1024` | Target total connection ceiling for the dedicated metrics listener |
|
||||
| `:metrics.enabled` | `PARRHESIA_METRICS_ENDPOINT_ENABLED` | `false` | Enables the optional dedicated metrics listener |
|
||||
|
||||
Listener `max_connections` is a first-class config field. Parrhesia translates it to ThousandIsland's per-acceptor `num_connections` limit based on the active acceptor count. Raw `bandit_options[:thousand_island_options]` can still override that for advanced tuning.
|
||||
|
||||
Listener `transport.tls` supports `:disabled`, `:server`, `:mutual`, and `:proxy_terminated`. For TLS-enabled listeners, the main config-file fields are `certfile`, `keyfile`, optional `cacertfile`, optional `cipher_suite`, optional `client_pins`, and `proxy_headers` for proxy-terminated identity.
|
||||
|
||||
Every listener supports this config-file schema:
|
||||
|
||||
| Atom key | ENV | Default | Notes |
|
||||
| --- | --- | --- | --- |
|
||||
| `:id` | `-` | listener key or `:listener` | Listener identifier |
|
||||
| `:enabled` | public/metrics helpers only | `true` | Whether the listener is started |
|
||||
| `:bind.ip` | `-` | `0.0.0.0` (`public`) / `127.0.0.1` (`metrics`) | Bind address |
|
||||
| `:bind.port` | `PORT` / `PARRHESIA_METRICS_ENDPOINT_PORT` | `4413` / `9568` | Bind port |
|
||||
| `:max_connections` | `PARRHESIA_PUBLIC_MAX_CONNECTIONS` / `PARRHESIA_METRICS_ENDPOINT_MAX_CONNECTIONS` | `20000` / `1024` | Target total listener connection ceiling; accepts integer or `:infinity` in config files |
|
||||
| `:transport.scheme` | `-` | `:http` | Listener scheme |
|
||||
| `:transport.tls` | `-` | `%{mode: :disabled}` | TLS mode and TLS-specific options |
|
||||
| `:proxy.trusted_cidrs` | `PARRHESIA_TRUSTED_PROXIES` on `public` | `[]` | Trusted proxy CIDRs for forwarded identity / IP handling |
|
||||
| `:proxy.honor_x_forwarded_for` | `-` | `true` | Respect `X-Forwarded-For` from trusted proxies |
|
||||
| `:network.public` | `-` | `false` | Allow only public networks |
|
||||
| `:network.private_networks_only` | `-` | `false` | Allow only RFC1918 / local networks |
|
||||
| `:network.allow_cidrs` | `-` | `[]` | Explicit CIDR allowlist |
|
||||
| `:network.allow_all` | `-` | `true` | Allow all source IPs |
|
||||
| `:features.nostr.enabled` | `-` | `true` on `public`, `false` on metrics listener | Enables `/relay` |
|
||||
| `:features.admin.enabled` | `-` | `true` on `public`, `false` on metrics listener | Enables `/management` |
|
||||
| `:features.metrics.enabled` | `PARRHESIA_METRICS_ENABLED_ON_MAIN_ENDPOINT` on `public` | `true` on `public`, `true` on metrics listener | Enables `/metrics` |
|
||||
| `:features.metrics.auth_token` | `PARRHESIA_METRICS_AUTH_TOKEN` | `nil` | Optional bearer token for `/metrics` |
|
||||
| `:features.metrics.access.public` | `PARRHESIA_METRICS_PUBLIC` | `false` | Allow public-network access to `/metrics` |
|
||||
| `:features.metrics.access.private_networks_only` | `PARRHESIA_METRICS_PRIVATE_NETWORKS_ONLY` | `true` | Restrict `/metrics` to private networks |
|
||||
| `:features.metrics.access.allow_cidrs` | `PARRHESIA_METRICS_ALLOWED_CIDRS` | `[]` | Additional CIDR allowlist for `/metrics` |
|
||||
| `:features.metrics.access.allow_all` | `-` | `true` | Unconditional metrics access in config files |
|
||||
| `:auth.nip42_required` | `-` | `false` | Require NIP-42 for relay reads / writes |
|
||||
| `:auth.nip98_required_for_admin` | `PARRHESIA_POLICIES_MANAGEMENT_AUTH_REQUIRED` on `public` | `true` | Require NIP-98 for management API calls |
|
||||
| `:baseline_acl.read` | `-` | `[]` | Static read deny/allow rules |
|
||||
| `:baseline_acl.write` | `-` | `[]` | Static write deny/allow rules |
|
||||
| `:bandit_options` | `-` | `[]` | Advanced Bandit / ThousandIsland passthrough |
|
||||
|
||||
#### `:nip66`
|
||||
|
||||
| Atom key | ENV | Default | Notes |
|
||||
| --- | --- | --- | --- |
|
||||
| `:enabled` | `-` | `true` | Enables the built-in NIP-66 publisher worker |
|
||||
| `:publish_interval_seconds` | `-` | `900` | Republish cadence for `10166` and `30166` events |
|
||||
| `:publish_monitor_announcement?` | `-` | `true` | Publish a `10166` monitor announcement alongside discovery events |
|
||||
| `:timeout_ms` | `-` | `5000` | Probe timeout for websocket and NIP-11 checks |
|
||||
| `:checks` | `-` | `[:open, :read, :nip11]` | Checks advertised in `10166` and run against each target relay during probing |
|
||||
| `:targets` | `-` | `[]` | Optional explicit relay targets to probe; when empty, Parrhesia uses `:relay_url` for the `public` listener |
|
||||
|
||||
NIP-66 targets are probe sources, not publish destinations. Parrhesia connects to each target relay, collects the configured liveness / discovery data, and stores the resulting signed `10166` / `30166` events in its own local event store so clients can query them here.
|
||||
|
||||
#### `:nip43`
|
||||
|
||||
| Atom key | ENV | Default | Notes |
|
||||
| --- | --- | --- | --- |
|
||||
| `:enabled` | `-` | `true` | Enables the built-in NIP-43 relay access flow and advertises `43` in NIP-11 |
|
||||
| `:invite_ttl_seconds` | `-` | `900` | Expiration window for generated invite claim strings returned by `REQ` filters targeting kind `28935` |
|
||||
| `:request_max_age_seconds` | `-` | `300` | Maximum allowed age for inbound join (`28934`) and leave (`28936`) requests |
|
||||
|
||||
Parrhesia treats NIP-43 invite requests as synthetic relay output, not stored client input. A `REQ` for kind `28935` causes the relay to generate a fresh relay-signed invite event on the fly. Clients then submit that claim back in a protected kind `28934` join request. When a join or leave request is accepted, Parrhesia updates its local relay membership state and publishes the corresponding relay-signed `8000` / `8001` delta plus the latest `13534` membership snapshot locally.
|
||||
|
||||
#### `:limits`
|
||||
|
||||
| Atom key | ENV | Default |
|
||||
| --- | --- | --- |
|
||||
| `:max_frame_bytes` | `PARRHESIA_LIMITS_MAX_FRAME_BYTES` | `1048576` |
|
||||
| `:max_event_bytes` | `PARRHESIA_LIMITS_MAX_EVENT_BYTES` | `262144` |
|
||||
| `:max_filters_per_req` | `PARRHESIA_LIMITS_MAX_FILTERS_PER_REQ` | `16` |
|
||||
| `:max_filter_limit` | `PARRHESIA_LIMITS_MAX_FILTER_LIMIT` | `500` |
|
||||
| `:max_tags_per_event` | `PARRHESIA_LIMITS_MAX_TAGS_PER_EVENT` | `256` |
|
||||
| `:max_tag_values_per_filter` | `PARRHESIA_LIMITS_MAX_TAG_VALUES_PER_FILTER` | `128` |
|
||||
| `:ip_max_event_ingest_per_window` | `PARRHESIA_LIMITS_IP_MAX_EVENT_INGEST_PER_WINDOW` | `1000` |
|
||||
| `:ip_event_ingest_window_seconds` | `PARRHESIA_LIMITS_IP_EVENT_INGEST_WINDOW_SECONDS` | `1` |
|
||||
| `:relay_max_event_ingest_per_window` | `PARRHESIA_LIMITS_RELAY_MAX_EVENT_INGEST_PER_WINDOW` | `10000` |
|
||||
| `:relay_event_ingest_window_seconds` | `PARRHESIA_LIMITS_RELAY_EVENT_INGEST_WINDOW_SECONDS` | `1` |
|
||||
| `:max_subscriptions_per_connection` | `PARRHESIA_LIMITS_MAX_SUBSCRIPTIONS_PER_CONNECTION` | `32` |
|
||||
| `:max_event_future_skew_seconds` | `PARRHESIA_LIMITS_MAX_EVENT_FUTURE_SKEW_SECONDS` | `900` |
|
||||
| `:max_event_ingest_per_window` | `PARRHESIA_LIMITS_MAX_EVENT_INGEST_PER_WINDOW` | `120` |
|
||||
| `:event_ingest_window_seconds` | `PARRHESIA_LIMITS_EVENT_INGEST_WINDOW_SECONDS` | `1` |
|
||||
| `:auth_max_age_seconds` | `PARRHESIA_LIMITS_AUTH_MAX_AGE_SECONDS` | `600` |
|
||||
| `:max_outbound_queue` | `PARRHESIA_LIMITS_MAX_OUTBOUND_QUEUE` | `256` |
|
||||
| `:outbound_drain_batch_size` | `PARRHESIA_LIMITS_OUTBOUND_DRAIN_BATCH_SIZE` | `64` |
|
||||
| `:outbound_overflow_strategy` | `PARRHESIA_LIMITS_OUTBOUND_OVERFLOW_STRATEGY` | `:close` |
|
||||
| `:max_negentropy_payload_bytes` | `PARRHESIA_LIMITS_MAX_NEGENTROPY_PAYLOAD_BYTES` | `4096` |
|
||||
| `:max_negentropy_sessions_per_connection` | `PARRHESIA_LIMITS_MAX_NEGENTROPY_SESSIONS_PER_CONNECTION` | `8` |
|
||||
| `:max_negentropy_total_sessions` | `PARRHESIA_LIMITS_MAX_NEGENTROPY_TOTAL_SESSIONS` | `10000` |
|
||||
| `:max_negentropy_items_per_session` | `PARRHESIA_LIMITS_MAX_NEGENTROPY_ITEMS_PER_SESSION` | `50000` |
|
||||
| `:negentropy_id_list_threshold` | `PARRHESIA_LIMITS_NEGENTROPY_ID_LIST_THRESHOLD` | `32` |
|
||||
| `:negentropy_session_idle_timeout_seconds` | `PARRHESIA_LIMITS_NEGENTROPY_SESSION_IDLE_TIMEOUT_SECONDS` | `60` |
|
||||
| `:negentropy_session_sweep_interval_seconds` | `PARRHESIA_LIMITS_NEGENTROPY_SESSION_SWEEP_INTERVAL_SECONDS` | `10` |
|
||||
|
||||
#### `:policies`
|
||||
|
||||
| Atom key | ENV | Default |
|
||||
| --- | --- | --- |
|
||||
| `:auth_required_for_writes` | `PARRHESIA_POLICIES_AUTH_REQUIRED_FOR_WRITES` | `false` |
|
||||
| `:auth_required_for_reads` | `PARRHESIA_POLICIES_AUTH_REQUIRED_FOR_READS` | `false` |
|
||||
| `:min_pow_difficulty` | `PARRHESIA_POLICIES_MIN_POW_DIFFICULTY` | `0` |
|
||||
| `:accept_ephemeral_events` | `PARRHESIA_POLICIES_ACCEPT_EPHEMERAL_EVENTS` | `true` |
|
||||
| `:mls_group_event_ttl_seconds` | `PARRHESIA_POLICIES_MLS_GROUP_EVENT_TTL_SECONDS` | `300` |
|
||||
| `:marmot_require_h_for_group_queries` | `PARRHESIA_POLICIES_MARMOT_REQUIRE_H_FOR_GROUP_QUERIES` | `true` |
|
||||
| `:marmot_group_max_h_values_per_filter` | `PARRHESIA_POLICIES_MARMOT_GROUP_MAX_H_VALUES_PER_FILTER` | `32` |
|
||||
| `:marmot_group_max_query_window_seconds` | `PARRHESIA_POLICIES_MARMOT_GROUP_MAX_QUERY_WINDOW_SECONDS` | `2592000` |
|
||||
| `:marmot_media_max_imeta_tags_per_event` | `PARRHESIA_POLICIES_MARMOT_MEDIA_MAX_IMETA_TAGS_PER_EVENT` | `8` |
|
||||
| `:marmot_media_max_field_value_bytes` | `PARRHESIA_POLICIES_MARMOT_MEDIA_MAX_FIELD_VALUE_BYTES` | `1024` |
|
||||
| `:marmot_media_max_url_bytes` | `PARRHESIA_POLICIES_MARMOT_MEDIA_MAX_URL_BYTES` | `2048` |
|
||||
| `:marmot_media_allowed_mime_prefixes` | `PARRHESIA_POLICIES_MARMOT_MEDIA_ALLOWED_MIME_PREFIXES` | `[]` |
|
||||
| `:marmot_media_reject_mip04_v1` | `PARRHESIA_POLICIES_MARMOT_MEDIA_REJECT_MIP04_V1` | `true` |
|
||||
| `:marmot_push_server_pubkeys` | `PARRHESIA_POLICIES_MARMOT_PUSH_SERVER_PUBKEYS` | `[]` |
|
||||
| `:marmot_push_max_relay_tags` | `PARRHESIA_POLICIES_MARMOT_PUSH_MAX_RELAY_TAGS` | `16` |
|
||||
| `:marmot_push_max_payload_bytes` | `PARRHESIA_POLICIES_MARMOT_PUSH_MAX_PAYLOAD_BYTES` | `65536` |
|
||||
| `:marmot_push_max_trigger_age_seconds` | `PARRHESIA_POLICIES_MARMOT_PUSH_MAX_TRIGGER_AGE_SECONDS` | `120` |
|
||||
| `:marmot_push_require_expiration` | `PARRHESIA_POLICIES_MARMOT_PUSH_REQUIRE_EXPIRATION` | `true` |
|
||||
| `:marmot_push_max_expiration_window_seconds` | `PARRHESIA_POLICIES_MARMOT_PUSH_MAX_EXPIRATION_WINDOW_SECONDS` | `120` |
|
||||
| `:marmot_push_max_server_recipients` | `PARRHESIA_POLICIES_MARMOT_PUSH_MAX_SERVER_RECIPIENTS` | `1` |
|
||||
| `:management_auth_required` | `PARRHESIA_POLICIES_MANAGEMENT_AUTH_REQUIRED` | `true` |
|
||||
|
||||
#### Listener-related Metrics Helpers
|
||||
|
||||
| Atom key | ENV | Default |
|
||||
| --- | --- | --- |
|
||||
| `:public.features.metrics.enabled` | `PARRHESIA_METRICS_ENABLED_ON_MAIN_ENDPOINT` | `true` |
|
||||
| `:public` | `PARRHESIA_METRICS_PUBLIC` | `false` |
|
||||
| `:private_networks_only` | `PARRHESIA_METRICS_PRIVATE_NETWORKS_ONLY` | `true` |
|
||||
| `:allowed_cidrs` | `PARRHESIA_METRICS_ALLOWED_CIDRS` | `[]` |
|
||||
| `:auth_token` | `PARRHESIA_METRICS_AUTH_TOKEN` | `nil` |
|
||||
|
||||
#### `:retention`
|
||||
|
||||
| Atom key | ENV | Default | Notes |
|
||||
| --- | --- | --- | --- |
|
||||
| `:check_interval_hours` | `PARRHESIA_RETENTION_CHECK_INTERVAL_HOURS` | `24` | Partition maintenance + pruning cadence |
|
||||
| `:months_ahead` | `PARRHESIA_RETENTION_MONTHS_AHEAD` | `2` | Pre-create current month plus N future monthly partitions for `events` and `event_tags` |
|
||||
| `:max_db_bytes` | `PARRHESIA_RETENTION_MAX_DB_BYTES` | `:infinity` | Interpreted as GiB threshold; accepts integer or `infinity` |
|
||||
| `:max_months_to_keep` | `PARRHESIA_RETENTION_MAX_MONTHS_TO_KEEP` | `:infinity` | Keep at most N months (including current month); accepts integer or `infinity` |
|
||||
| `:max_partitions_to_drop_per_run` | `PARRHESIA_RETENTION_MAX_PARTITIONS_TO_DROP_PER_RUN` | `1` | Safety cap for each maintenance run |
|
||||
|
||||
#### `:features`
|
||||
|
||||
| Atom key | ENV | Default |
|
||||
| --- | --- | --- |
|
||||
| `:verify_event_signatures` | `-` | `true` |
|
||||
| `:nip_45_count` | `PARRHESIA_FEATURES_NIP_45_COUNT` | `true` |
|
||||
| `:nip_50_search` | `PARRHESIA_FEATURES_NIP_50_SEARCH` | `true` |
|
||||
| `:nip_77_negentropy` | `PARRHESIA_FEATURES_NIP_77_NEGENTROPY` | `true` |
|
||||
| `:marmot_push_notifications` | `PARRHESIA_FEATURES_MARMOT_PUSH_NOTIFICATIONS` | `false` |
|
||||
|
||||
`:verify_event_signatures` is config-file only. Production releases always verify event signatures.
|
||||
|
||||
#### Extra runtime config
|
||||
|
||||
| Atom key | ENV | Default | Notes |
|
||||
| --- | --- | --- | --- |
|
||||
| extra runtime config file | `PARRHESIA_EXTRA_CONFIG` | unset | Imports an additional runtime `.exs` file |
|
||||
|
||||
---
|
||||
|
||||
## Deploy
|
||||
@@ -150,24 +441,130 @@ export POOL_SIZE=20
|
||||
|
||||
mix deps.get --only prod
|
||||
mix compile
|
||||
mix ecto.migrate
|
||||
mix release
|
||||
|
||||
_build/prod/rel/parrhesia/bin/parrhesia foreground
|
||||
_build/prod/rel/parrhesia/bin/parrhesia eval "Parrhesia.Release.migrate()"
|
||||
_build/prod/rel/parrhesia/bin/parrhesia start
|
||||
```
|
||||
|
||||
For systemd/process managers, run the release command in foreground mode.
|
||||
For systemd/process managers, run the release command with `start`.
|
||||
|
||||
### Option B: Nix package (`default.nix`)
|
||||
### Option B: Nix release package (`default.nix`)
|
||||
|
||||
Build:
|
||||
|
||||
```bash
|
||||
nix-build
|
||||
nix build
|
||||
```
|
||||
|
||||
Run the built release from `./result/bin/parrhesia` (release command interface).
|
||||
|
||||
### Option C: Docker image via Nix flake
|
||||
|
||||
Build the image tarball:
|
||||
|
||||
```bash
|
||||
nix build .#dockerImage
|
||||
# or with explicit build target:
|
||||
nix build .#packages.x86_64-linux.dockerImage
|
||||
```
|
||||
|
||||
Load it into Docker:
|
||||
|
||||
```bash
|
||||
docker load < result
|
||||
```
|
||||
|
||||
Run database migrations:
|
||||
|
||||
```bash
|
||||
docker run --rm \
|
||||
-e DATABASE_URL="ecto://USER:PASS@HOST/parrhesia_prod" \
|
||||
parrhesia:latest \
|
||||
eval "Parrhesia.Release.migrate()"
|
||||
```
|
||||
|
||||
Start the relay:
|
||||
|
||||
```bash
|
||||
docker run --rm \
|
||||
-p 4413:4413 \
|
||||
-e DATABASE_URL="ecto://USER:PASS@HOST/parrhesia_prod" \
|
||||
-e POOL_SIZE=20 \
|
||||
parrhesia:latest
|
||||
```
|
||||
|
||||
### Option D: Docker Compose with PostgreSQL
|
||||
|
||||
The repo includes [`compose.yaml`](./compose.yaml) and [`.env.example`](./.env.example) so Docker users can run Postgres and Parrhesia together.
|
||||
|
||||
Set up the environment file:
|
||||
|
||||
```bash
|
||||
cp .env.example .env
|
||||
```
|
||||
|
||||
If you are building locally from source, build and load the image first:
|
||||
|
||||
```bash
|
||||
nix build .#dockerImage
|
||||
docker load < result
|
||||
```
|
||||
|
||||
Then start the stack:
|
||||
|
||||
```bash
|
||||
docker compose up -d db
|
||||
docker compose run --rm migrate
|
||||
docker compose up -d parrhesia
|
||||
```
|
||||
|
||||
The relay will be available on:
|
||||
|
||||
```text
|
||||
ws://localhost:4413/relay
|
||||
```
|
||||
|
||||
Notes:
|
||||
|
||||
- `compose.yaml` keeps PostgreSQL in a separate container; the Parrhesia image only runs the app release.
|
||||
- The container listens on port `4413`; use `PARRHESIA_HOST_PORT` if you want a different published host port.
|
||||
- Migrations are run explicitly through the one-shot `migrate` service instead of on every app boot.
|
||||
- Common runtime overrides can go straight into `.env`; see [`.env.example`](./.env.example) for examples.
|
||||
- For more specialized overrides, mount a file and set `PARRHESIA_EXTRA_CONFIG=/path/in/container/runtime.exs`.
|
||||
- When a GHCR image is published, set `PARRHESIA_IMAGE=ghcr.io/<owner>/parrhesia:<tag>` in `.env` and reuse the same compose flow.
|
||||
|
||||
---
|
||||
|
||||
## Benchmark
|
||||
|
||||
The benchmark compares two Parrhesia profiles, one backed by PostgreSQL and one backed by the in-memory adapter, against [`strfry`](https://github.com/hoytech/strfry) and [`nostr-rs-relay`](https://sr.ht/~gheartsfield/nostr-rs-relay/) using [`nostr-bench`](https://github.com/rnostr/nostr-bench). Benchmark runs also lift Parrhesia's relay-side limits by default so the benchmark client, not server guardrails, is the main bottleneck.
|
||||
|
||||
`mix bench` is a sequential mixed-workload benchmark, not an isolated per-endpoint microbenchmark. Each relay instance runs `connect`, then `echo`, then `event`, then `req` against the same live process, so later phases measure against state and load created by earlier phases.
|
||||
|
||||
Run it with:
|
||||
|
||||
```bash
|
||||
mix bench
|
||||
```
|
||||
|
||||
Current comparison results from [BENCHMARK.md](./BENCHMARK.md):
|
||||
|
||||
| metric | parrhesia-pg | parrhesia-mem | nostr-rs-relay | mem/pg | nostr-rs/pg |
|
||||
| --- | ---: | ---: | ---: | ---: | ---: |
|
||||
| connect avg latency (ms) ↓ | 9.33 | 7.67 | 7.00 | **0.82x** | **0.75x** |
|
||||
| connect max latency (ms) ↓ | 12.33 | 9.67 | 10.33 | **0.78x** | **0.84x** |
|
||||
| echo throughput (TPS) ↑ | 64030.33 | 93656.33 | 140767.00 | **1.46x** | **2.20x** |
|
||||
| echo throughput (MiB/s) ↑ | 35.07 | 51.27 | 77.07 | **1.46x** | **2.20x** |
|
||||
| event throughput (TPS) ↑ | 5015.33 | 1505.33 | 2293.67 | 0.30x | 0.46x |
|
||||
| event throughput (MiB/s) ↑ | 3.40 | 1.00 | 1.50 | 0.29x | 0.44x |
|
||||
| req throughput (TPS) ↑ | 6416.33 | 14566.67 | 3035.67 | **2.27x** | 0.47x |
|
||||
| req throughput (MiB/s) ↑ | 42.43 | 94.23 | 19.23 | **2.22x** | 0.45x |
|
||||
|
||||
Higher is better for `↑` metrics. Lower is better for `↓` metrics.
|
||||
|
||||
(Results from a Linux container on a 6-core Intel i5-8400T with NVMe drive, PostgreSQL 18)
|
||||
|
||||
---
|
||||
|
||||
## Development quality checks
|
||||
@@ -178,13 +575,13 @@ Before opening a PR:
|
||||
mix precommit
|
||||
```
|
||||
|
||||
For external CLI end-to-end checks with `nak`:
|
||||
Additional external CLI end-to-end checks with `nak`:
|
||||
|
||||
```bash
|
||||
mix test.nak_e2e
|
||||
```
|
||||
|
||||
For Marmot client end-to-end checks (TypeScript/Node suite using `marmot-ts`):
|
||||
For Marmot client end-to-end checks (TypeScript/Node suite using `marmot-ts`, included in `precommit`):
|
||||
|
||||
```bash
|
||||
mix test.marmot_e2e
|
||||
|
||||
31
bench/chart.gnuplot
Normal file
31
bench/chart.gnuplot
Normal file
@@ -0,0 +1,31 @@
|
||||
# bench/chart.gnuplot — multi-panel SVG showing relay performance over git tags.
|
||||
#
|
||||
# Invoked by scripts/run_bench_update.sh with:
|
||||
# gnuplot -e "data_dir='...'" -e "output_file='...'" bench/chart.gnuplot
|
||||
#
|
||||
# The data_dir contains per-metric TSV files and a plot_commands.gnuplot
|
||||
# fragment generated by the data-prep step that defines the actual plot
|
||||
# directives (handling variable server columns).
|
||||
|
||||
set terminal svg enhanced size 1200,900 font "sans,11"
|
||||
set output output_file
|
||||
|
||||
set style data linespoints
|
||||
set key outside right top
|
||||
set grid ytics
|
||||
set xtics rotate by -30
|
||||
set datafile separator "\t"
|
||||
|
||||
# parrhesia-pg: blue solid, parrhesia-memory: green solid
|
||||
# strfry: orange dashed, nostr-rs-relay: red dashed
|
||||
set linetype 1 lc rgb "#2563eb" lw 2 pt 7 ps 1.0
|
||||
set linetype 2 lc rgb "#16a34a" lw 2 pt 9 ps 1.0
|
||||
set linetype 3 lc rgb "#ea580c" lw 1.5 pt 5 ps 0.8 dt 2
|
||||
set linetype 4 lc rgb "#dc2626" lw 1.5 pt 4 ps 0.8 dt 2
|
||||
|
||||
set multiplot layout 2,2 title "Parrhesia Relay Benchmark History" font ",14"
|
||||
|
||||
# Load dynamically generated plot commands (handles variable column counts)
|
||||
load data_dir."/plot_commands.gnuplot"
|
||||
|
||||
unset multiplot
|
||||
752
bench/chart.svg
Normal file
752
bench/chart.svg
Normal file
@@ -0,0 +1,752 @@
|
||||
<?xml version="1.0" encoding="utf-8" standalone="no"?>
|
||||
<svg
|
||||
width="1200" height="900"
|
||||
viewBox="0 0 1200 900"
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||
>
|
||||
|
||||
<title>Gnuplot</title>
|
||||
<desc>Produced by GNUPLOT 6.0 patchlevel 4 </desc>
|
||||
|
||||
<g id="gnuplot_canvas">
|
||||
|
||||
<rect x="0" y="0" width="1200" height="900" fill="none"/>
|
||||
<defs>
|
||||
|
||||
<circle id='gpDot' r='0.5' stroke-width='0.5' stroke='currentColor'/>
|
||||
<path id='gpPt0' stroke-width='0.242' stroke='currentColor' d='M-1,0 h2 M0,-1 v2'/>
|
||||
<path id='gpPt1' stroke-width='0.242' stroke='currentColor' d='M-1,-1 L1,1 M1,-1 L-1,1'/>
|
||||
<path id='gpPt2' stroke-width='0.242' stroke='currentColor' d='M-1,0 L1,0 M0,-1 L0,1 M-1,-1 L1,1 M-1,1 L1,-1'/>
|
||||
<rect id='gpPt3' stroke-width='0.242' stroke='currentColor' x='-1' y='-1' width='2' height='2'/>
|
||||
<rect id='gpPt4' stroke-width='0.242' stroke='currentColor' fill='currentColor' x='-1' y='-1' width='2' height='2'/>
|
||||
<circle id='gpPt5' stroke-width='0.242' stroke='currentColor' cx='0' cy='0' r='1'/>
|
||||
<use xlink:href='#gpPt5' id='gpPt6' fill='currentColor' stroke='none'/>
|
||||
<path id='gpPt7' stroke-width='0.242' stroke='currentColor' d='M0,-1.33 L-1.33,0.67 L1.33,0.67 z'/>
|
||||
<use xlink:href='#gpPt7' id='gpPt8' fill='currentColor' stroke='none'/>
|
||||
<use xlink:href='#gpPt7' id='gpPt9' stroke='currentColor' transform='rotate(180)'/>
|
||||
<use xlink:href='#gpPt9' id='gpPt10' fill='currentColor' stroke='none'/>
|
||||
<use xlink:href='#gpPt3' id='gpPt11' stroke='currentColor' transform='rotate(45)'/>
|
||||
<use xlink:href='#gpPt11' id='gpPt12' fill='currentColor' stroke='none'/>
|
||||
<path id='gpPt13' stroke-width='0.242' stroke='currentColor' d='M0,1.330 L1.265,0.411 L0.782,-1.067 L-0.782,-1.076 L-1.265,0.411 z'/>
|
||||
<use xlink:href='#gpPt13' id='gpPt14' fill='currentColor' stroke='none'/>
|
||||
<filter id='textbox' filterUnits='objectBoundingBox' x='0' y='0' height='1' width='1'>
|
||||
<feFlood flood-color='white' flood-opacity='1' result='bgnd'/>
|
||||
<feComposite in='SourceGraphic' in2='bgnd' operator='atop'/>
|
||||
</filter>
|
||||
<filter id='greybox' filterUnits='objectBoundingBox' x='0' y='0' height='1' width='1'>
|
||||
<feFlood flood-color='lightgrey' flood-opacity='1' result='grey'/>
|
||||
<feComposite in='SourceGraphic' in2='grey' operator='atop'/>
|
||||
</filter>
|
||||
</defs>
|
||||
<g fill="none" color="white" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<g transform="translate(600.00,21.05)" stroke="none" fill="black" font-family="sans" font-size="14.00" text-anchor="middle">
|
||||
<text><tspan font-family="sans" >Parrhesia Relay Benchmark History</tspan></text>
|
||||
</g>
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M74.17,420.94 L368.73,420.94 '/></g>
|
||||
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<path stroke='black' d='M74.17,420.94 L82.42,420.94 M368.73,420.94 L360.48,420.94 '/> <g transform="translate(66.48,424.52)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="end">
|
||||
<text><tspan font-family="sans" > 1500</tspan></text>
|
||||
</g>
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M74.17,377.14 L368.73,377.14 '/></g>
|
||||
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<path stroke='black' d='M74.17,377.14 L82.42,377.14 M368.73,377.14 L360.48,377.14 '/> <g transform="translate(66.48,380.72)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="end">
|
||||
<text><tspan font-family="sans" > 2000</tspan></text>
|
||||
</g>
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M74.17,333.33 L368.73,333.33 '/></g>
|
||||
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<path stroke='black' d='M74.17,333.33 L82.42,333.33 M368.73,333.33 L360.48,333.33 '/> <g transform="translate(66.48,336.91)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="end">
|
||||
<text><tspan font-family="sans" > 2500</tspan></text>
|
||||
</g>
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M74.17,289.53 L368.73,289.53 '/></g>
|
||||
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<path stroke='black' d='M74.17,289.53 L82.42,289.53 M368.73,289.53 L360.48,289.53 '/> <g transform="translate(66.48,293.11)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="end">
|
||||
<text><tspan font-family="sans" > 3000</tspan></text>
|
||||
</g>
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M74.17,245.72 L368.73,245.72 '/></g>
|
||||
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<path stroke='black' d='M74.17,245.72 L82.42,245.72 M368.73,245.72 L360.48,245.72 '/> <g transform="translate(66.48,249.30)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="end">
|
||||
<text><tspan font-family="sans" > 3500</tspan></text>
|
||||
</g>
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M74.17,201.92 L368.73,201.92 '/></g>
|
||||
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<path stroke='black' d='M74.17,201.92 L82.42,201.92 M368.73,201.92 L360.48,201.92 '/> <g transform="translate(66.48,205.50)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="end">
|
||||
<text><tspan font-family="sans" > 4000</tspan></text>
|
||||
</g>
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M74.17,158.12 L368.73,158.12 '/></g>
|
||||
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<path stroke='black' d='M74.17,158.12 L82.42,158.12 M368.73,158.12 L360.48,158.12 '/> <g transform="translate(66.48,161.70)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="end">
|
||||
<text><tspan font-family="sans" > 4500</tspan></text>
|
||||
</g>
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M74.17,114.31 L368.73,114.31 '/></g>
|
||||
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<path stroke='black' d='M74.17,114.31 L82.42,114.31 M368.73,114.31 L360.48,114.31 '/> <g transform="translate(66.48,117.89)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="end">
|
||||
<text><tspan font-family="sans" > 5000</tspan></text>
|
||||
</g>
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M74.17,70.51 L368.73,70.51 '/></g>
|
||||
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<path stroke='black' d='M74.17,70.51 L82.42,70.51 M368.73,70.51 L360.48,70.51 '/> <g transform="translate(66.48,74.09)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="end">
|
||||
<text><tspan font-family="sans" > 5500</tspan></text>
|
||||
</g>
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<path stroke='black' d='M221.45,420.94 L221.45,412.69 M221.45,70.51 L221.45,78.76 '/> <g transform="translate(219.66,431.73) rotate(30.00)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="start">
|
||||
<text><tspan font-family="sans" >v0.5.0</tspan></text>
|
||||
</g>
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<path stroke='black' d='M74.17,70.51 L74.17,420.94 L368.73,420.94 L368.73,70.51 L74.17,70.51 Z '/></g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g id="gnuplot_plot_1a" ><title>parrhesia-pg</title>
|
||||
<g fill="none" color="white" stroke="black" stroke-width="2.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="2.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<g transform="translate(537.91,82.34)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="end">
|
||||
<text><tspan font-family="sans" >parrhesia-pg</tspan></text>
|
||||
</g>
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="2.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<path stroke='rgb( 37, 99, 235)' d='M545.60,78.76 L584.61,78.76 M221.45,112.97 '/> <use xlink:href='#gpPt6' transform='translate(221.45,112.97) scale(4.12)' color='rgb( 37, 99, 235)'/>
|
||||
<use xlink:href='#gpPt6' transform='translate(565.10,78.76) scale(4.12)' color='rgb( 37, 99, 235)'/>
|
||||
</g>
|
||||
</g>
|
||||
<g id="gnuplot_plot_2a" ><title>parrhesia-memory</title>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="2.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<g transform="translate(537.91,98.84)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="end">
|
||||
<text><tspan font-family="sans" >parrhesia-memory</tspan></text>
|
||||
</g>
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="2.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<path stroke='rgb( 22, 163, 74)' d='M545.60,95.26 L584.61,95.26 M221.45,420.47 '/> <use xlink:href='#gpPt8' transform='translate(221.45,420.47) scale(4.12)' color='rgb( 22, 163, 74)'/>
|
||||
<use xlink:href='#gpPt8' transform='translate(565.10,95.26) scale(4.12)' color='rgb( 22, 163, 74)'/>
|
||||
</g>
|
||||
</g>
|
||||
<g id="gnuplot_plot_3a" ><title>nostr-rs-relay (avg)</title>
|
||||
<g fill="none" color="white" stroke="rgb( 22, 163, 74)" stroke-width="1.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<g transform="translate(537.91,115.34)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="end">
|
||||
<text><tspan font-family="sans" >nostr-rs-relay (avg)</tspan></text>
|
||||
</g>
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<path stroke='rgb(234, 88, 12)' stroke-dasharray='3.8,6.0' d='M545.60,111.76 L584.61,111.76 M221.45,351.41 '/> <use xlink:href='#gpPt4' transform='translate(221.45,351.41) scale(3.30)' color='rgb(234, 88, 12)'/>
|
||||
<use xlink:href='#gpPt4' transform='translate(565.10,111.76) scale(3.30)' color='rgb(234, 88, 12)'/>
|
||||
</g>
|
||||
</g>
|
||||
<g fill="none" color="white" stroke="rgb(234, 88, 12)" stroke-width="2.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="2.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="black" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<path stroke='black' d='M74.17,70.51 L74.17,420.94 L368.73,420.94 L368.73,70.51 L74.17,70.51 Z '/> <g transform="translate(17.58,245.73) rotate(270.00)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="middle">
|
||||
<text><tspan font-family="sans" >TPS</tspan></text>
|
||||
</g>
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<g transform="translate(221.45,49.34)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="middle">
|
||||
<text><tspan font-family="sans" >Event Throughput (TPS) — higher is better</tspan></text>
|
||||
</g>
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M681.86,420.94 L968.73,420.94 '/></g>
|
||||
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<path stroke='black' d='M681.86,420.94 L690.11,420.94 M968.73,420.94 L960.48,420.94 '/> <g transform="translate(674.17,424.52)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="end">
|
||||
<text><tspan font-family="sans" > 2000</tspan></text>
|
||||
</g>
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M681.86,370.88 L968.73,370.88 '/></g>
|
||||
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<path stroke='black' d='M681.86,370.88 L690.11,370.88 M968.73,370.88 L960.48,370.88 '/> <g transform="translate(674.17,374.46)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="end">
|
||||
<text><tspan font-family="sans" > 4000</tspan></text>
|
||||
</g>
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M681.86,320.82 L968.73,320.82 '/></g>
|
||||
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<path stroke='black' d='M681.86,320.82 L690.11,320.82 M968.73,320.82 L960.48,320.82 '/> <g transform="translate(674.17,324.40)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="end">
|
||||
<text><tspan font-family="sans" > 6000</tspan></text>
|
||||
</g>
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M681.86,270.76 L968.73,270.76 '/></g>
|
||||
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<path stroke='black' d='M681.86,270.76 L690.11,270.76 M968.73,270.76 L960.48,270.76 '/> <g transform="translate(674.17,274.34)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="end">
|
||||
<text><tspan font-family="sans" > 8000</tspan></text>
|
||||
</g>
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M681.86,220.69 L968.73,220.69 '/></g>
|
||||
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<path stroke='black' d='M681.86,220.69 L690.11,220.69 M968.73,220.69 L960.48,220.69 '/> <g transform="translate(674.17,224.27)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="end">
|
||||
<text><tspan font-family="sans" > 10000</tspan></text>
|
||||
</g>
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M681.86,170.63 L968.73,170.63 '/></g>
|
||||
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<path stroke='black' d='M681.86,170.63 L690.11,170.63 M968.73,170.63 L960.48,170.63 '/> <g transform="translate(674.17,174.21)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="end">
|
||||
<text><tspan font-family="sans" > 12000</tspan></text>
|
||||
</g>
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M681.86,120.57 L968.73,120.57 '/></g>
|
||||
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<path stroke='black' d='M681.86,120.57 L690.11,120.57 M968.73,120.57 L960.48,120.57 '/> <g transform="translate(674.17,124.15)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="end">
|
||||
<text><tspan font-family="sans" > 14000</tspan></text>
|
||||
</g>
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M681.86,70.51 L968.73,70.51 '/></g>
|
||||
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<path stroke='black' d='M681.86,70.51 L690.11,70.51 M968.73,70.51 L960.48,70.51 '/> <g transform="translate(674.17,74.09)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="end">
|
||||
<text><tspan font-family="sans" > 16000</tspan></text>
|
||||
</g>
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<path stroke='black' d='M825.30,420.94 L825.30,412.69 M825.30,70.51 L825.30,78.76 '/> <g transform="translate(823.51,431.73) rotate(30.00)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="start">
|
||||
<text><tspan font-family="sans" >v0.5.0</tspan></text>
|
||||
</g>
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<path stroke='black' d='M681.86,70.51 L681.86,420.94 L968.73,420.94 L968.73,70.51 L681.86,70.51 Z '/></g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g id="gnuplot_plot_1b" ><title>parrhesia-pg</title>
|
||||
<g fill="none" color="white" stroke="black" stroke-width="2.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="2.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<g transform="translate(1137.91,82.34)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="end">
|
||||
<text><tspan font-family="sans" >parrhesia-pg</tspan></text>
|
||||
</g>
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="2.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<path stroke='rgb( 37, 99, 235)' d='M1145.60,78.76 L1184.61,78.76 M825.30,310.40 '/> <use xlink:href='#gpPt6' transform='translate(825.30,310.40) scale(4.12)' color='rgb( 37, 99, 235)'/>
|
||||
<use xlink:href='#gpPt6' transform='translate(1165.10,78.76) scale(4.12)' color='rgb( 37, 99, 235)'/>
|
||||
</g>
|
||||
</g>
|
||||
<g id="gnuplot_plot_2b" ><title>parrhesia-memory</title>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="2.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<g transform="translate(1137.91,98.84)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="end">
|
||||
<text><tspan font-family="sans" >parrhesia-memory</tspan></text>
|
||||
</g>
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="2.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<path stroke='rgb( 22, 163, 74)' d='M1145.60,95.26 L1184.61,95.26 M825.30,106.39 '/> <use xlink:href='#gpPt8' transform='translate(825.30,106.39) scale(4.12)' color='rgb( 22, 163, 74)'/>
|
||||
<use xlink:href='#gpPt8' transform='translate(1165.10,95.26) scale(4.12)' color='rgb( 22, 163, 74)'/>
|
||||
</g>
|
||||
</g>
|
||||
<g id="gnuplot_plot_3b" ><title>nostr-rs-relay (avg)</title>
|
||||
<g fill="none" color="white" stroke="rgb( 22, 163, 74)" stroke-width="1.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<g transform="translate(1137.91,115.34)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="end">
|
||||
<text><tspan font-family="sans" >nostr-rs-relay (avg)</tspan></text>
|
||||
</g>
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<path stroke='rgb(234, 88, 12)' stroke-dasharray='3.8,6.0' d='M1145.60,111.76 L1184.61,111.76 M825.30,395.02 '/> <use xlink:href='#gpPt4' transform='translate(825.30,395.02) scale(3.30)' color='rgb(234, 88, 12)'/>
|
||||
<use xlink:href='#gpPt4' transform='translate(1165.10,111.76) scale(3.30)' color='rgb(234, 88, 12)'/>
|
||||
</g>
|
||||
</g>
|
||||
<g fill="none" color="white" stroke="rgb(234, 88, 12)" stroke-width="2.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="2.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="black" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<path stroke='black' d='M681.86,70.51 L681.86,420.94 L968.73,420.94 L968.73,70.51 L681.86,70.51 Z '/> <g transform="translate(617.58,245.73) rotate(270.00)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="middle">
|
||||
<text><tspan font-family="sans" >TPS</tspan></text>
|
||||
</g>
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<g transform="translate(825.29,49.34)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="middle">
|
||||
<text><tspan font-family="sans" >Req Throughput (TPS) — higher is better</tspan></text>
|
||||
</g>
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M89.55,860.44 L368.73,860.44 '/></g>
|
||||
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<path stroke='black' d='M89.55,860.44 L97.80,860.44 M368.73,860.44 L360.48,860.44 '/> <g transform="translate(81.86,864.02)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="end">
|
||||
<text><tspan font-family="sans" > 60000</tspan></text>
|
||||
</g>
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M89.55,821.50 L368.73,821.50 '/></g>
|
||||
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<path stroke='black' d='M89.55,821.50 L97.80,821.50 M368.73,821.50 L360.48,821.50 '/> <g transform="translate(81.86,825.08)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="end">
|
||||
<text><tspan font-family="sans" > 70000</tspan></text>
|
||||
</g>
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M89.55,782.56 L368.73,782.56 '/></g>
|
||||
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<path stroke='black' d='M89.55,782.56 L97.80,782.56 M368.73,782.56 L360.48,782.56 '/> <g transform="translate(81.86,786.14)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="end">
|
||||
<text><tspan font-family="sans" > 80000</tspan></text>
|
||||
</g>
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M89.55,743.63 L368.73,743.63 '/></g>
|
||||
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<path stroke='black' d='M89.55,743.63 L97.80,743.63 M368.73,743.63 L360.48,743.63 '/> <g transform="translate(81.86,747.21)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="end">
|
||||
<text><tspan font-family="sans" > 90000</tspan></text>
|
||||
</g>
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M89.55,704.69 L368.73,704.69 '/></g>
|
||||
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<path stroke='black' d='M89.55,704.69 L97.80,704.69 M368.73,704.69 L360.48,704.69 '/> <g transform="translate(81.86,708.27)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="end">
|
||||
<text><tspan font-family="sans" > 100000</tspan></text>
|
||||
</g>
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M89.55,665.75 L368.73,665.75 '/></g>
|
||||
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<path stroke='black' d='M89.55,665.75 L97.80,665.75 M368.73,665.75 L360.48,665.75 '/> <g transform="translate(81.86,669.33)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="end">
|
||||
<text><tspan font-family="sans" > 110000</tspan></text>
|
||||
</g>
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M89.55,626.81 L368.73,626.81 '/></g>
|
||||
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<path stroke='black' d='M89.55,626.81 L97.80,626.81 M368.73,626.81 L360.48,626.81 '/> <g transform="translate(81.86,630.39)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="end">
|
||||
<text><tspan font-family="sans" > 120000</tspan></text>
|
||||
</g>
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M89.55,587.88 L368.73,587.88 '/></g>
|
||||
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<path stroke='black' d='M89.55,587.88 L97.80,587.88 M368.73,587.88 L360.48,587.88 '/> <g transform="translate(81.86,591.46)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="end">
|
||||
<text><tspan font-family="sans" > 130000</tspan></text>
|
||||
</g>
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M89.55,548.94 L368.73,548.94 '/></g>
|
||||
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<path stroke='black' d='M89.55,548.94 L97.80,548.94 M368.73,548.94 L360.48,548.94 '/> <g transform="translate(81.86,552.52)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="end">
|
||||
<text><tspan font-family="sans" > 140000</tspan></text>
|
||||
</g>
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M89.55,510.00 L368.73,510.00 '/></g>
|
||||
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<path stroke='black' d='M89.55,510.00 L97.80,510.00 M368.73,510.00 L360.48,510.00 '/> <g transform="translate(81.86,513.58)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="end">
|
||||
<text><tspan font-family="sans" > 150000</tspan></text>
|
||||
</g>
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<path stroke='black' d='M229.14,860.44 L229.14,852.19 M229.14,510.00 L229.14,518.25 '/> <g transform="translate(227.35,871.23) rotate(30.00)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="start">
|
||||
<text><tspan font-family="sans" >v0.5.0</tspan></text>
|
||||
</g>
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<path stroke='black' d='M89.55,510.00 L89.55,860.44 L368.73,860.44 L368.73,510.00 L89.55,510.00 Z '/></g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g id="gnuplot_plot_1c" ><title>parrhesia-pg</title>
|
||||
<g fill="none" color="white" stroke="black" stroke-width="2.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="2.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<g transform="translate(537.91,521.83)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="end">
|
||||
<text><tspan font-family="sans" >parrhesia-pg</tspan></text>
|
||||
</g>
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="2.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<path stroke='rgb( 37, 99, 235)' d='M545.60,518.25 L584.61,518.25 M229.14,844.75 '/> <use xlink:href='#gpPt6' transform='translate(229.14,844.75) scale(4.12)' color='rgb( 37, 99, 235)'/>
|
||||
<use xlink:href='#gpPt6' transform='translate(565.10,518.25) scale(4.12)' color='rgb( 37, 99, 235)'/>
|
||||
</g>
|
||||
</g>
|
||||
<g id="gnuplot_plot_2c" ><title>parrhesia-memory</title>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="2.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<g transform="translate(537.91,538.33)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="end">
|
||||
<text><tspan font-family="sans" >parrhesia-memory</tspan></text>
|
||||
</g>
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="2.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<path stroke='rgb( 22, 163, 74)' d='M545.60,534.75 L584.61,534.75 M229.14,729.39 '/> <use xlink:href='#gpPt8' transform='translate(229.14,729.39) scale(4.12)' color='rgb( 22, 163, 74)'/>
|
||||
<use xlink:href='#gpPt8' transform='translate(565.10,534.75) scale(4.12)' color='rgb( 22, 163, 74)'/>
|
||||
</g>
|
||||
</g>
|
||||
<g id="gnuplot_plot_3c" ><title>nostr-rs-relay (avg)</title>
|
||||
<g fill="none" color="white" stroke="rgb( 22, 163, 74)" stroke-width="1.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<g transform="translate(537.91,554.83)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="end">
|
||||
<text><tspan font-family="sans" >nostr-rs-relay (avg)</tspan></text>
|
||||
</g>
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<path stroke='rgb(234, 88, 12)' stroke-dasharray='3.8,6.0' d='M545.60,551.25 L584.61,551.25 M229.14,545.95 '/> <use xlink:href='#gpPt4' transform='translate(229.14,545.95) scale(3.30)' color='rgb(234, 88, 12)'/>
|
||||
<use xlink:href='#gpPt4' transform='translate(565.10,551.25) scale(3.30)' color='rgb(234, 88, 12)'/>
|
||||
</g>
|
||||
</g>
|
||||
<g fill="none" color="white" stroke="rgb(234, 88, 12)" stroke-width="2.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="2.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="black" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<path stroke='black' d='M89.55,510.00 L89.55,860.44 L368.73,860.44 L368.73,510.00 L89.55,510.00 Z '/> <g transform="translate(17.58,685.22) rotate(270.00)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="middle">
|
||||
<text><tspan font-family="sans" >TPS</tspan></text>
|
||||
</g>
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<g transform="translate(229.14,488.83)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="middle">
|
||||
<text><tspan font-family="sans" >Echo Throughput (TPS) — higher is better</tspan></text>
|
||||
</g>
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M666.48,860.44 L968.73,860.44 '/></g>
|
||||
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<path stroke='black' d='M666.48,860.44 L674.73,860.44 M968.73,860.44 L960.48,860.44 '/> <g transform="translate(658.79,864.02)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="end">
|
||||
<text><tspan font-family="sans" > 7</tspan></text>
|
||||
</g>
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M666.48,790.35 L968.73,790.35 '/></g>
|
||||
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<path stroke='black' d='M666.48,790.35 L674.73,790.35 M968.73,790.35 L960.48,790.35 '/> <g transform="translate(658.79,793.93)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="end">
|
||||
<text><tspan font-family="sans" > 7.5</tspan></text>
|
||||
</g>
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M666.48,720.26 L968.73,720.26 '/></g>
|
||||
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<path stroke='black' d='M666.48,720.26 L674.73,720.26 M968.73,720.26 L960.48,720.26 '/> <g transform="translate(658.79,723.84)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="end">
|
||||
<text><tspan font-family="sans" > 8</tspan></text>
|
||||
</g>
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M666.48,650.18 L968.73,650.18 '/></g>
|
||||
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<path stroke='black' d='M666.48,650.18 L674.73,650.18 M968.73,650.18 L960.48,650.18 '/> <g transform="translate(658.79,653.76)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="end">
|
||||
<text><tspan font-family="sans" > 8.5</tspan></text>
|
||||
</g>
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M666.48,580.09 L968.73,580.09 '/></g>
|
||||
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<path stroke='black' d='M666.48,580.09 L674.73,580.09 M968.73,580.09 L960.48,580.09 '/> <g transform="translate(658.79,583.67)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="end">
|
||||
<text><tspan font-family="sans" > 9</tspan></text>
|
||||
</g>
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M666.48,510.00 L968.73,510.00 '/></g>
|
||||
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<path stroke='black' d='M666.48,510.00 L674.73,510.00 M968.73,510.00 L960.48,510.00 '/> <g transform="translate(658.79,513.58)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="end">
|
||||
<text><tspan font-family="sans" > 9.5</tspan></text>
|
||||
</g>
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<path stroke='black' d='M817.61,860.44 L817.61,852.19 M817.61,510.00 L817.61,518.25 '/> <g transform="translate(815.82,871.23) rotate(30.00)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="start">
|
||||
<text><tspan font-family="sans" >v0.5.0</tspan></text>
|
||||
</g>
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<path stroke='black' d='M666.48,510.00 L666.48,860.44 L968.73,860.44 L968.73,510.00 L666.48,510.00 Z '/></g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g id="gnuplot_plot_1d" ><title>parrhesia-pg</title>
|
||||
<g fill="none" color="white" stroke="black" stroke-width="2.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="2.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<g transform="translate(1137.91,521.83)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="end">
|
||||
<text><tspan font-family="sans" >parrhesia-pg</tspan></text>
|
||||
</g>
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="2.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<path stroke='rgb( 37, 99, 235)' d='M1145.60,518.25 L1184.61,518.25 M817.61,533.36 '/> <use xlink:href='#gpPt6' transform='translate(817.61,533.36) scale(4.12)' color='rgb( 37, 99, 235)'/>
|
||||
<use xlink:href='#gpPt6' transform='translate(1165.10,518.25) scale(4.12)' color='rgb( 37, 99, 235)'/>
|
||||
</g>
|
||||
</g>
|
||||
<g id="gnuplot_plot_2d" ><title>parrhesia-memory</title>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="2.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<g transform="translate(1137.91,538.33)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="end">
|
||||
<text><tspan font-family="sans" >parrhesia-memory</tspan></text>
|
||||
</g>
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="2.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<path stroke='rgb( 22, 163, 74)' d='M1145.60,534.75 L1184.61,534.75 M817.61,766.99 '/> <use xlink:href='#gpPt8' transform='translate(817.61,766.99) scale(4.12)' color='rgb( 22, 163, 74)'/>
|
||||
<use xlink:href='#gpPt8' transform='translate(1165.10,534.75) scale(4.12)' color='rgb( 22, 163, 74)'/>
|
||||
</g>
|
||||
</g>
|
||||
<g id="gnuplot_plot_3d" ><title>nostr-rs-relay (avg)</title>
|
||||
<g fill="none" color="white" stroke="rgb( 22, 163, 74)" stroke-width="1.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<g transform="translate(1137.91,554.83)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="end">
|
||||
<text><tspan font-family="sans" >nostr-rs-relay (avg)</tspan></text>
|
||||
</g>
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<path stroke='rgb(234, 88, 12)' stroke-dasharray='3.8,6.0' d='M1145.60,551.25 L1184.61,551.25 M817.61,860.44 '/> <use xlink:href='#gpPt4' transform='translate(817.61,860.44) scale(3.30)' color='rgb(234, 88, 12)'/>
|
||||
<use xlink:href='#gpPt4' transform='translate(1165.10,551.25) scale(3.30)' color='rgb(234, 88, 12)'/>
|
||||
</g>
|
||||
</g>
|
||||
<g fill="none" color="white" stroke="rgb(234, 88, 12)" stroke-width="2.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="2.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="black" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<path stroke='black' d='M666.48,510.00 L666.48,860.44 L968.73,860.44 L968.73,510.00 L666.48,510.00 Z '/> <g transform="translate(617.58,685.22) rotate(270.00)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="middle">
|
||||
<text><tspan font-family="sans" >ms</tspan></text>
|
||||
</g>
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
<g transform="translate(817.60,488.83)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="middle">
|
||||
<text><tspan font-family="sans" >Connect Avg Latency (ms) — lower is better</tspan></text>
|
||||
</g>
|
||||
</g>
|
||||
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||
</g>
|
||||
</g>
|
||||
</svg>
|
||||
|
||||
|
After Width: | Height: | Size: 53 KiB |
1
bench/history.jsonl
Normal file
1
bench/history.jsonl
Normal file
@@ -0,0 +1 @@
|
||||
{"timestamp":"2026-03-18T20:13:21Z","machine_id":"squirrel","git_tag":"v0.5.0","git_commit":"970cee2","runs":3,"servers":{"parrhesia-pg":{"connect_avg_ms":9.333333333333334,"connect_max_ms":12.333333333333334,"echo_tps":64030.333333333336,"echo_mibs":35.06666666666666,"event_tps":5015.333333333333,"event_mibs":3.4,"req_tps":6416.333333333333,"req_mibs":42.43333333333334},"parrhesia-memory":{"connect_avg_ms":7.666666666666667,"connect_max_ms":9.666666666666666,"echo_tps":93656.33333333333,"echo_mibs":51.26666666666667,"event_tps":1505.3333333333333,"event_mibs":1,"req_tps":14566.666666666666,"req_mibs":94.23333333333335},"nostr-rs-relay":{"connect_avg_ms":7,"connect_max_ms":10.333333333333334,"echo_tps":140767,"echo_mibs":77.06666666666666,"event_tps":2293.6666666666665,"event_mibs":1.5,"req_tps":3035.6666666666665,"req_mibs":19.23333333333333}}}
|
||||
92
compose.node-sync-e2e.yaml
Normal file
92
compose.node-sync-e2e.yaml
Normal file
@@ -0,0 +1,92 @@
|
||||
services:
|
||||
db-a:
|
||||
image: postgres:17
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
POSTGRES_DB: parrhesia_a
|
||||
POSTGRES_USER: parrhesia
|
||||
POSTGRES_PASSWORD: parrhesia
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -U $$POSTGRES_USER -d $$POSTGRES_DB"]
|
||||
interval: 5s
|
||||
timeout: 5s
|
||||
retries: 12
|
||||
volumes:
|
||||
- postgres-a-data:/var/lib/postgresql/data
|
||||
|
||||
db-b:
|
||||
image: postgres:17
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
POSTGRES_DB: parrhesia_b
|
||||
POSTGRES_USER: parrhesia
|
||||
POSTGRES_PASSWORD: parrhesia
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -U $$POSTGRES_USER -d $$POSTGRES_DB"]
|
||||
interval: 5s
|
||||
timeout: 5s
|
||||
retries: 12
|
||||
volumes:
|
||||
- postgres-b-data:/var/lib/postgresql/data
|
||||
|
||||
migrate-a:
|
||||
image: ${PARRHESIA_IMAGE:-parrhesia:latest}
|
||||
restart: "no"
|
||||
depends_on:
|
||||
db-a:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
DATABASE_URL: ecto://parrhesia:parrhesia@db-a:5432/parrhesia_a
|
||||
POOL_SIZE: ${POOL_SIZE:-20}
|
||||
PARRHESIA_ACL_PROTECTED_FILTERS: ${PARRHESIA_ACL_PROTECTED_FILTERS}
|
||||
command: ["eval", "Parrhesia.Release.migrate()"]
|
||||
|
||||
migrate-b:
|
||||
image: ${PARRHESIA_IMAGE:-parrhesia:latest}
|
||||
restart: "no"
|
||||
depends_on:
|
||||
db-b:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
DATABASE_URL: ecto://parrhesia:parrhesia@db-b:5432/parrhesia_b
|
||||
POOL_SIZE: ${POOL_SIZE:-20}
|
||||
PARRHESIA_ACL_PROTECTED_FILTERS: ${PARRHESIA_ACL_PROTECTED_FILTERS}
|
||||
command: ["eval", "Parrhesia.Release.migrate()"]
|
||||
|
||||
parrhesia-a:
|
||||
image: ${PARRHESIA_IMAGE:-parrhesia:latest}
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
db-a:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
DATABASE_URL: ecto://parrhesia:parrhesia@db-a:5432/parrhesia_a
|
||||
POOL_SIZE: ${POOL_SIZE:-20}
|
||||
PORT: 4413
|
||||
PARRHESIA_RELAY_URL: ${PARRHESIA_NODE_A_RELAY_URL:-ws://parrhesia-a:4413/relay}
|
||||
PARRHESIA_ACL_PROTECTED_FILTERS: ${PARRHESIA_ACL_PROTECTED_FILTERS}
|
||||
PARRHESIA_IDENTITY_PATH: /tmp/parrhesia-a/server_identity.json
|
||||
PARRHESIA_SYNC_PATH: /tmp/parrhesia-a/sync_servers.json
|
||||
ports:
|
||||
- "${PARRHESIA_NODE_A_HOST_PORT:-45131}:4413"
|
||||
|
||||
parrhesia-b:
|
||||
image: ${PARRHESIA_IMAGE:-parrhesia:latest}
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
db-b:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
DATABASE_URL: ecto://parrhesia:parrhesia@db-b:5432/parrhesia_b
|
||||
POOL_SIZE: ${POOL_SIZE:-20}
|
||||
PORT: 4413
|
||||
PARRHESIA_RELAY_URL: ${PARRHESIA_NODE_B_RELAY_URL:-ws://parrhesia-b:4413/relay}
|
||||
PARRHESIA_ACL_PROTECTED_FILTERS: ${PARRHESIA_ACL_PROTECTED_FILTERS}
|
||||
PARRHESIA_IDENTITY_PATH: /tmp/parrhesia-b/server_identity.json
|
||||
PARRHESIA_SYNC_PATH: /tmp/parrhesia-b/sync_servers.json
|
||||
ports:
|
||||
- "${PARRHESIA_NODE_B_HOST_PORT:-45132}:4413"
|
||||
|
||||
volumes:
|
||||
postgres-a-data:
|
||||
postgres-b-data:
|
||||
42
compose.yaml
Normal file
42
compose.yaml
Normal file
@@ -0,0 +1,42 @@
|
||||
services:
|
||||
db:
|
||||
image: postgres:17
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
POSTGRES_DB: ${POSTGRES_DB:-parrhesia}
|
||||
POSTGRES_USER: ${POSTGRES_USER:-parrhesia}
|
||||
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-parrhesia}
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -U $$POSTGRES_USER -d $$POSTGRES_DB"]
|
||||
interval: 5s
|
||||
timeout: 5s
|
||||
retries: 12
|
||||
volumes:
|
||||
- postgres-data:/var/lib/postgresql/data
|
||||
|
||||
migrate:
|
||||
image: ${PARRHESIA_IMAGE:-parrhesia:latest}
|
||||
profiles: ["tools"]
|
||||
restart: "no"
|
||||
depends_on:
|
||||
db:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
DATABASE_URL: ${DATABASE_URL:-ecto://parrhesia:parrhesia@db:5432/parrhesia}
|
||||
POOL_SIZE: ${POOL_SIZE:-20}
|
||||
command: ["eval", "Parrhesia.Release.migrate()"]
|
||||
|
||||
parrhesia:
|
||||
image: ${PARRHESIA_IMAGE:-parrhesia:latest}
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
db:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
DATABASE_URL: ${DATABASE_URL:-ecto://parrhesia:parrhesia@db:5432/parrhesia}
|
||||
POOL_SIZE: ${POOL_SIZE:-20}
|
||||
ports:
|
||||
- "${PARRHESIA_HOST_PORT:-4413}:4413"
|
||||
|
||||
volumes:
|
||||
postgres-data:
|
||||
@@ -1,15 +1,57 @@
|
||||
import Config
|
||||
|
||||
project_version =
|
||||
case Mix.Project.config()[:version] do
|
||||
version when is_binary(version) -> version
|
||||
version -> to_string(version)
|
||||
end
|
||||
|
||||
config :postgrex, :json_library, JSON
|
||||
|
||||
config :parrhesia,
|
||||
metadata: [
|
||||
name: "Parrhesia",
|
||||
version: project_version,
|
||||
hide_version?: true
|
||||
],
|
||||
database: [
|
||||
separate_read_pool?: config_env() != :test
|
||||
],
|
||||
moderation_cache_enabled: true,
|
||||
relay_url: "ws://localhost:4000/relay",
|
||||
enable_partition_retention_worker: true,
|
||||
relay_url: "ws://localhost:4413/relay",
|
||||
nip43: [
|
||||
enabled: true,
|
||||
invite_ttl_seconds: 900,
|
||||
request_max_age_seconds: 300
|
||||
],
|
||||
nip66: [
|
||||
enabled: true,
|
||||
publish_interval_seconds: 900,
|
||||
publish_monitor_announcement?: true,
|
||||
timeout_ms: 5_000,
|
||||
checks: [:open, :read, :nip11],
|
||||
targets: []
|
||||
],
|
||||
identity: [
|
||||
path: nil,
|
||||
private_key: nil
|
||||
],
|
||||
sync: [
|
||||
path: nil,
|
||||
start_workers?: true
|
||||
],
|
||||
limits: [
|
||||
max_frame_bytes: 1_048_576,
|
||||
max_event_bytes: 262_144,
|
||||
max_filters_per_req: 16,
|
||||
max_filter_limit: 500,
|
||||
max_tags_per_event: 256,
|
||||
max_tag_values_per_filter: 128,
|
||||
ip_max_event_ingest_per_window: 1_000,
|
||||
ip_event_ingest_window_seconds: 1,
|
||||
relay_max_event_ingest_per_window: 10_000,
|
||||
relay_event_ingest_window_seconds: 1,
|
||||
max_subscriptions_per_connection: 32,
|
||||
max_event_future_skew_seconds: 900,
|
||||
max_event_ingest_per_window: 120,
|
||||
@@ -21,6 +63,8 @@ config :parrhesia,
|
||||
max_negentropy_payload_bytes: 4096,
|
||||
max_negentropy_sessions_per_connection: 8,
|
||||
max_negentropy_total_sessions: 10_000,
|
||||
max_negentropy_items_per_session: 50_000,
|
||||
negentropy_id_list_threshold: 32,
|
||||
negentropy_session_idle_timeout_seconds: 60,
|
||||
negentropy_session_sweep_interval_seconds: 10
|
||||
],
|
||||
@@ -47,14 +91,36 @@ config :parrhesia,
|
||||
marmot_push_max_server_recipients: 1,
|
||||
management_auth_required: true
|
||||
],
|
||||
metrics: [
|
||||
enabled_on_main_endpoint: true,
|
||||
public: false,
|
||||
private_networks_only: true,
|
||||
allowed_cidrs: [],
|
||||
auth_token: nil
|
||||
listeners: %{
|
||||
public: %{
|
||||
enabled: true,
|
||||
bind: %{ip: {0, 0, 0, 0}, port: 4413},
|
||||
max_connections: 20_000,
|
||||
transport: %{scheme: :http, tls: %{mode: :disabled}},
|
||||
proxy: %{trusted_cidrs: [], honor_x_forwarded_for: true},
|
||||
network: %{allow_all: true},
|
||||
features: %{
|
||||
nostr: %{enabled: true},
|
||||
admin: %{enabled: true},
|
||||
metrics: %{
|
||||
enabled: true,
|
||||
access: %{private_networks_only: true},
|
||||
auth_token: nil
|
||||
}
|
||||
},
|
||||
auth: %{nip42_required: false, nip98_required_for_admin: true},
|
||||
baseline_acl: %{read: [], write: []}
|
||||
}
|
||||
},
|
||||
retention: [
|
||||
check_interval_hours: 24,
|
||||
months_ahead: 2,
|
||||
max_db_bytes: :infinity,
|
||||
max_months_to_keep: :infinity,
|
||||
max_partitions_to_drop_per_run: 1
|
||||
],
|
||||
features: [
|
||||
verify_event_signatures_locked?: config_env() == :prod,
|
||||
verify_event_signatures: true,
|
||||
nip_45_count: true,
|
||||
nip_50_search: true,
|
||||
@@ -62,20 +128,16 @@ config :parrhesia,
|
||||
marmot_push_notifications: false
|
||||
],
|
||||
storage: [
|
||||
backend: :postgres,
|
||||
events: Parrhesia.Storage.Adapters.Postgres.Events,
|
||||
acl: Parrhesia.Storage.Adapters.Postgres.ACL,
|
||||
moderation: Parrhesia.Storage.Adapters.Postgres.Moderation,
|
||||
groups: Parrhesia.Storage.Adapters.Postgres.Groups,
|
||||
admin: Parrhesia.Storage.Adapters.Postgres.Admin
|
||||
]
|
||||
|
||||
config :parrhesia, Parrhesia.Web.Endpoint, port: 4000
|
||||
|
||||
config :parrhesia, Parrhesia.Web.MetricsEndpoint,
|
||||
enabled: false,
|
||||
ip: {127, 0, 0, 1},
|
||||
port: 9568
|
||||
|
||||
config :parrhesia, Parrhesia.Repo, types: Parrhesia.PostgresTypes
|
||||
config :parrhesia, Parrhesia.ReadRepo, types: Parrhesia.PostgresTypes
|
||||
|
||||
config :parrhesia, ecto_repos: [Parrhesia.Repo]
|
||||
|
||||
|
||||
@@ -23,3 +23,13 @@ config :parrhesia,
|
||||
show_sensitive_data_on_connection_error: true,
|
||||
pool_size: 10
|
||||
] ++ repo_host_opts
|
||||
|
||||
config :parrhesia,
|
||||
Parrhesia.ReadRepo,
|
||||
[
|
||||
username: System.get_env("PGUSER") || System.get_env("USER") || "agent",
|
||||
password: System.get_env("PGPASSWORD"),
|
||||
database: System.get_env("PGDATABASE") || "parrhesia_dev",
|
||||
show_sensitive_data_on_connection_error: true,
|
||||
pool_size: 10
|
||||
] ++ repo_host_opts
|
||||
|
||||
@@ -5,4 +5,9 @@ config :parrhesia, Parrhesia.Repo,
|
||||
queue_target: 1_000,
|
||||
queue_interval: 5_000
|
||||
|
||||
config :parrhesia, Parrhesia.ReadRepo,
|
||||
pool_size: 32,
|
||||
queue_target: 1_000,
|
||||
queue_interval: 5_000
|
||||
|
||||
# Production runtime configuration lives in config/runtime.exs.
|
||||
|
||||
@@ -1,40 +1,765 @@
|
||||
import Config
|
||||
|
||||
if config_env() == :prod do
|
||||
database_url =
|
||||
System.get_env("DATABASE_URL") ||
|
||||
raise "environment variable DATABASE_URL is missing. Example: ecto://USER:PASS@HOST/DATABASE"
|
||||
string_env = fn name, default ->
|
||||
case System.get_env(name) do
|
||||
nil -> default
|
||||
"" -> default
|
||||
value -> value
|
||||
end
|
||||
end
|
||||
|
||||
int_env = fn name, default ->
|
||||
case System.get_env(name) do
|
||||
nil -> default
|
||||
value -> String.to_integer(value)
|
||||
end
|
||||
end
|
||||
|
||||
bool_env = fn name, default ->
|
||||
case System.get_env(name) do
|
||||
nil ->
|
||||
default
|
||||
|
||||
value ->
|
||||
case String.downcase(value) do
|
||||
"1" -> true
|
||||
"true" -> true
|
||||
"yes" -> true
|
||||
"on" -> true
|
||||
"0" -> false
|
||||
"false" -> false
|
||||
"no" -> false
|
||||
"off" -> false
|
||||
_other -> raise "environment variable #{name} must be a boolean value"
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
storage_backend_env = fn name, default ->
|
||||
case System.get_env(name) do
|
||||
nil ->
|
||||
default
|
||||
|
||||
value ->
|
||||
case String.downcase(String.trim(value)) do
|
||||
"postgres" -> :postgres
|
||||
"memory" -> :memory
|
||||
_other -> raise "environment variable #{name} must be one of: postgres, memory"
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
csv_env = fn name, default ->
|
||||
case System.get_env(name) do
|
||||
nil ->
|
||||
default
|
||||
|
||||
value ->
|
||||
value
|
||||
|> String.split(",", trim: true)
|
||||
|> Enum.map(&String.trim/1)
|
||||
|> Enum.reject(&(&1 == ""))
|
||||
end
|
||||
end
|
||||
|
||||
json_env = fn name, default ->
|
||||
case System.get_env(name) do
|
||||
nil ->
|
||||
default
|
||||
|
||||
"" ->
|
||||
default
|
||||
|
||||
value ->
|
||||
case JSON.decode(value) do
|
||||
{:ok, decoded} ->
|
||||
decoded
|
||||
|
||||
{:error, reason} ->
|
||||
raise "environment variable #{name} must contain valid JSON: #{inspect(reason)}"
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
infinity_or_int_env = fn name, default ->
|
||||
case System.get_env(name) do
|
||||
nil ->
|
||||
default
|
||||
|
||||
value ->
|
||||
normalized = value |> String.trim() |> String.downcase()
|
||||
|
||||
if normalized == "infinity" do
|
||||
:infinity
|
||||
else
|
||||
String.to_integer(value)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
outbound_overflow_strategy_env = fn name, default ->
|
||||
case System.get_env(name) do
|
||||
nil ->
|
||||
default
|
||||
|
||||
"close" ->
|
||||
:close
|
||||
|
||||
"drop_oldest" ->
|
||||
:drop_oldest
|
||||
|
||||
"drop_newest" ->
|
||||
:drop_newest
|
||||
|
||||
_other ->
|
||||
raise "environment variable #{name} must be one of: close, drop_oldest, drop_newest"
|
||||
end
|
||||
end
|
||||
|
||||
ipv4_env = fn name, default ->
|
||||
case System.get_env(name) do
|
||||
nil ->
|
||||
default
|
||||
|
||||
value ->
|
||||
case String.split(value, ".", parts: 4) do
|
||||
[a, b, c, d] ->
|
||||
octets = Enum.map([a, b, c, d], &String.to_integer/1)
|
||||
|
||||
if Enum.all?(octets, &(&1 >= 0 and &1 <= 255)) do
|
||||
List.to_tuple(octets)
|
||||
else
|
||||
raise "environment variable #{name} must be a valid IPv4 address"
|
||||
end
|
||||
|
||||
_other ->
|
||||
raise "environment variable #{name} must be a valid IPv4 address"
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
if config_env() == :prod do
|
||||
repo_defaults = Application.get_env(:parrhesia, Parrhesia.Repo, [])
|
||||
read_repo_defaults = Application.get_env(:parrhesia, Parrhesia.ReadRepo, [])
|
||||
relay_url_default = Application.get_env(:parrhesia, :relay_url)
|
||||
metadata_defaults = Application.get_env(:parrhesia, :metadata, [])
|
||||
database_defaults = Application.get_env(:parrhesia, :database, [])
|
||||
storage_defaults = Application.get_env(:parrhesia, :storage, [])
|
||||
|
||||
moderation_cache_enabled_default =
|
||||
Application.get_env(:parrhesia, :moderation_cache_enabled, true)
|
||||
|
||||
enable_expiration_worker_default =
|
||||
Application.get_env(:parrhesia, :enable_expiration_worker, true)
|
||||
|
||||
enable_partition_retention_worker_default =
|
||||
Application.get_env(:parrhesia, :enable_partition_retention_worker, true)
|
||||
|
||||
limits_defaults = Application.get_env(:parrhesia, :limits, [])
|
||||
policies_defaults = Application.get_env(:parrhesia, :policies, [])
|
||||
listeners_defaults = Application.get_env(:parrhesia, :listeners, %{})
|
||||
retention_defaults = Application.get_env(:parrhesia, :retention, [])
|
||||
features_defaults = Application.get_env(:parrhesia, :features, [])
|
||||
acl_defaults = Application.get_env(:parrhesia, :acl, [])
|
||||
|
||||
default_pool_size = Keyword.get(repo_defaults, :pool_size, 32)
|
||||
default_queue_target = Keyword.get(repo_defaults, :queue_target, 1_000)
|
||||
default_queue_interval = Keyword.get(repo_defaults, :queue_interval, 5_000)
|
||||
default_read_pool_size = Keyword.get(read_repo_defaults, :pool_size, default_pool_size)
|
||||
default_read_queue_target = Keyword.get(read_repo_defaults, :queue_target, default_queue_target)
|
||||
|
||||
pool_size =
|
||||
case System.get_env("POOL_SIZE") do
|
||||
nil -> default_pool_size
|
||||
value -> String.to_integer(value)
|
||||
default_read_queue_interval =
|
||||
Keyword.get(read_repo_defaults, :queue_interval, default_queue_interval)
|
||||
|
||||
default_storage_backend =
|
||||
storage_defaults
|
||||
|> Keyword.get(:backend, :postgres)
|
||||
|> case do
|
||||
:postgres -> :postgres
|
||||
:memory -> :memory
|
||||
other -> raise "unsupported storage backend default: #{inspect(other)}"
|
||||
end
|
||||
|
||||
queue_target =
|
||||
case System.get_env("DB_QUEUE_TARGET_MS") do
|
||||
nil -> default_queue_target
|
||||
value -> String.to_integer(value)
|
||||
storage_backend = storage_backend_env.("PARRHESIA_STORAGE_BACKEND", default_storage_backend)
|
||||
postgres_backend? = storage_backend == :postgres
|
||||
|
||||
separate_read_pool? =
|
||||
postgres_backend? and Keyword.get(database_defaults, :separate_read_pool?, true)
|
||||
|
||||
database_url =
|
||||
if postgres_backend? do
|
||||
System.get_env("DATABASE_URL") ||
|
||||
raise "environment variable DATABASE_URL is missing. Example: ecto://USER:PASS@HOST/DATABASE"
|
||||
else
|
||||
nil
|
||||
end
|
||||
|
||||
queue_interval =
|
||||
case System.get_env("DB_QUEUE_INTERVAL_MS") do
|
||||
nil -> default_queue_interval
|
||||
value -> String.to_integer(value)
|
||||
pool_size = int_env.("POOL_SIZE", default_pool_size)
|
||||
queue_target = int_env.("DB_QUEUE_TARGET_MS", default_queue_target)
|
||||
queue_interval = int_env.("DB_QUEUE_INTERVAL_MS", default_queue_interval)
|
||||
read_pool_size = int_env.("DB_READ_POOL_SIZE", default_read_pool_size)
|
||||
read_queue_target = int_env.("DB_READ_QUEUE_TARGET_MS", default_read_queue_target)
|
||||
read_queue_interval = int_env.("DB_READ_QUEUE_INTERVAL_MS", default_read_queue_interval)
|
||||
|
||||
limits = [
|
||||
max_frame_bytes:
|
||||
int_env.(
|
||||
"PARRHESIA_LIMITS_MAX_FRAME_BYTES",
|
||||
Keyword.get(limits_defaults, :max_frame_bytes, 1_048_576)
|
||||
),
|
||||
max_event_bytes:
|
||||
int_env.(
|
||||
"PARRHESIA_LIMITS_MAX_EVENT_BYTES",
|
||||
Keyword.get(limits_defaults, :max_event_bytes, 262_144)
|
||||
),
|
||||
max_filters_per_req:
|
||||
int_env.(
|
||||
"PARRHESIA_LIMITS_MAX_FILTERS_PER_REQ",
|
||||
Keyword.get(limits_defaults, :max_filters_per_req, 16)
|
||||
),
|
||||
max_filter_limit:
|
||||
int_env.(
|
||||
"PARRHESIA_LIMITS_MAX_FILTER_LIMIT",
|
||||
Keyword.get(limits_defaults, :max_filter_limit, 500)
|
||||
),
|
||||
max_tags_per_event:
|
||||
int_env.(
|
||||
"PARRHESIA_LIMITS_MAX_TAGS_PER_EVENT",
|
||||
Keyword.get(limits_defaults, :max_tags_per_event, 256)
|
||||
),
|
||||
max_tag_values_per_filter:
|
||||
int_env.(
|
||||
"PARRHESIA_LIMITS_MAX_TAG_VALUES_PER_FILTER",
|
||||
Keyword.get(limits_defaults, :max_tag_values_per_filter, 128)
|
||||
),
|
||||
ip_max_event_ingest_per_window:
|
||||
int_env.(
|
||||
"PARRHESIA_LIMITS_IP_MAX_EVENT_INGEST_PER_WINDOW",
|
||||
Keyword.get(limits_defaults, :ip_max_event_ingest_per_window, 1_000)
|
||||
),
|
||||
ip_event_ingest_window_seconds:
|
||||
int_env.(
|
||||
"PARRHESIA_LIMITS_IP_EVENT_INGEST_WINDOW_SECONDS",
|
||||
Keyword.get(limits_defaults, :ip_event_ingest_window_seconds, 1)
|
||||
),
|
||||
relay_max_event_ingest_per_window:
|
||||
int_env.(
|
||||
"PARRHESIA_LIMITS_RELAY_MAX_EVENT_INGEST_PER_WINDOW",
|
||||
Keyword.get(limits_defaults, :relay_max_event_ingest_per_window, 10_000)
|
||||
),
|
||||
relay_event_ingest_window_seconds:
|
||||
int_env.(
|
||||
"PARRHESIA_LIMITS_RELAY_EVENT_INGEST_WINDOW_SECONDS",
|
||||
Keyword.get(limits_defaults, :relay_event_ingest_window_seconds, 1)
|
||||
),
|
||||
max_subscriptions_per_connection:
|
||||
int_env.(
|
||||
"PARRHESIA_LIMITS_MAX_SUBSCRIPTIONS_PER_CONNECTION",
|
||||
Keyword.get(limits_defaults, :max_subscriptions_per_connection, 32)
|
||||
),
|
||||
max_event_future_skew_seconds:
|
||||
int_env.(
|
||||
"PARRHESIA_LIMITS_MAX_EVENT_FUTURE_SKEW_SECONDS",
|
||||
Keyword.get(limits_defaults, :max_event_future_skew_seconds, 900)
|
||||
),
|
||||
max_event_ingest_per_window:
|
||||
int_env.(
|
||||
"PARRHESIA_LIMITS_MAX_EVENT_INGEST_PER_WINDOW",
|
||||
Keyword.get(limits_defaults, :max_event_ingest_per_window, 120)
|
||||
),
|
||||
event_ingest_window_seconds:
|
||||
int_env.(
|
||||
"PARRHESIA_LIMITS_EVENT_INGEST_WINDOW_SECONDS",
|
||||
Keyword.get(limits_defaults, :event_ingest_window_seconds, 1)
|
||||
),
|
||||
auth_max_age_seconds:
|
||||
int_env.(
|
||||
"PARRHESIA_LIMITS_AUTH_MAX_AGE_SECONDS",
|
||||
Keyword.get(limits_defaults, :auth_max_age_seconds, 600)
|
||||
),
|
||||
max_outbound_queue:
|
||||
int_env.(
|
||||
"PARRHESIA_LIMITS_MAX_OUTBOUND_QUEUE",
|
||||
Keyword.get(limits_defaults, :max_outbound_queue, 256)
|
||||
),
|
||||
outbound_drain_batch_size:
|
||||
int_env.(
|
||||
"PARRHESIA_LIMITS_OUTBOUND_DRAIN_BATCH_SIZE",
|
||||
Keyword.get(limits_defaults, :outbound_drain_batch_size, 64)
|
||||
),
|
||||
outbound_overflow_strategy:
|
||||
outbound_overflow_strategy_env.(
|
||||
"PARRHESIA_LIMITS_OUTBOUND_OVERFLOW_STRATEGY",
|
||||
Keyword.get(limits_defaults, :outbound_overflow_strategy, :close)
|
||||
),
|
||||
max_negentropy_payload_bytes:
|
||||
int_env.(
|
||||
"PARRHESIA_LIMITS_MAX_NEGENTROPY_PAYLOAD_BYTES",
|
||||
Keyword.get(limits_defaults, :max_negentropy_payload_bytes, 4096)
|
||||
),
|
||||
max_negentropy_sessions_per_connection:
|
||||
int_env.(
|
||||
"PARRHESIA_LIMITS_MAX_NEGENTROPY_SESSIONS_PER_CONNECTION",
|
||||
Keyword.get(limits_defaults, :max_negentropy_sessions_per_connection, 8)
|
||||
),
|
||||
max_negentropy_total_sessions:
|
||||
int_env.(
|
||||
"PARRHESIA_LIMITS_MAX_NEGENTROPY_TOTAL_SESSIONS",
|
||||
Keyword.get(limits_defaults, :max_negentropy_total_sessions, 10_000)
|
||||
),
|
||||
max_negentropy_items_per_session:
|
||||
int_env.(
|
||||
"PARRHESIA_LIMITS_MAX_NEGENTROPY_ITEMS_PER_SESSION",
|
||||
Keyword.get(limits_defaults, :max_negentropy_items_per_session, 50_000)
|
||||
),
|
||||
negentropy_id_list_threshold:
|
||||
int_env.(
|
||||
"PARRHESIA_LIMITS_NEGENTROPY_ID_LIST_THRESHOLD",
|
||||
Keyword.get(limits_defaults, :negentropy_id_list_threshold, 32)
|
||||
),
|
||||
negentropy_session_idle_timeout_seconds:
|
||||
int_env.(
|
||||
"PARRHESIA_LIMITS_NEGENTROPY_SESSION_IDLE_TIMEOUT_SECONDS",
|
||||
Keyword.get(limits_defaults, :negentropy_session_idle_timeout_seconds, 60)
|
||||
),
|
||||
negentropy_session_sweep_interval_seconds:
|
||||
int_env.(
|
||||
"PARRHESIA_LIMITS_NEGENTROPY_SESSION_SWEEP_INTERVAL_SECONDS",
|
||||
Keyword.get(limits_defaults, :negentropy_session_sweep_interval_seconds, 10)
|
||||
)
|
||||
]
|
||||
|
||||
policies = [
|
||||
auth_required_for_writes:
|
||||
bool_env.(
|
||||
"PARRHESIA_POLICIES_AUTH_REQUIRED_FOR_WRITES",
|
||||
Keyword.get(policies_defaults, :auth_required_for_writes, false)
|
||||
),
|
||||
auth_required_for_reads:
|
||||
bool_env.(
|
||||
"PARRHESIA_POLICIES_AUTH_REQUIRED_FOR_READS",
|
||||
Keyword.get(policies_defaults, :auth_required_for_reads, false)
|
||||
),
|
||||
min_pow_difficulty:
|
||||
int_env.(
|
||||
"PARRHESIA_POLICIES_MIN_POW_DIFFICULTY",
|
||||
Keyword.get(policies_defaults, :min_pow_difficulty, 0)
|
||||
),
|
||||
accept_ephemeral_events:
|
||||
bool_env.(
|
||||
"PARRHESIA_POLICIES_ACCEPT_EPHEMERAL_EVENTS",
|
||||
Keyword.get(policies_defaults, :accept_ephemeral_events, true)
|
||||
),
|
||||
mls_group_event_ttl_seconds:
|
||||
int_env.(
|
||||
"PARRHESIA_POLICIES_MLS_GROUP_EVENT_TTL_SECONDS",
|
||||
Keyword.get(policies_defaults, :mls_group_event_ttl_seconds, 300)
|
||||
),
|
||||
marmot_require_h_for_group_queries:
|
||||
bool_env.(
|
||||
"PARRHESIA_POLICIES_MARMOT_REQUIRE_H_FOR_GROUP_QUERIES",
|
||||
Keyword.get(policies_defaults, :marmot_require_h_for_group_queries, true)
|
||||
),
|
||||
marmot_group_max_h_values_per_filter:
|
||||
int_env.(
|
||||
"PARRHESIA_POLICIES_MARMOT_GROUP_MAX_H_VALUES_PER_FILTER",
|
||||
Keyword.get(policies_defaults, :marmot_group_max_h_values_per_filter, 32)
|
||||
),
|
||||
marmot_group_max_query_window_seconds:
|
||||
int_env.(
|
||||
"PARRHESIA_POLICIES_MARMOT_GROUP_MAX_QUERY_WINDOW_SECONDS",
|
||||
Keyword.get(policies_defaults, :marmot_group_max_query_window_seconds, 2_592_000)
|
||||
),
|
||||
marmot_media_max_imeta_tags_per_event:
|
||||
int_env.(
|
||||
"PARRHESIA_POLICIES_MARMOT_MEDIA_MAX_IMETA_TAGS_PER_EVENT",
|
||||
Keyword.get(policies_defaults, :marmot_media_max_imeta_tags_per_event, 8)
|
||||
),
|
||||
marmot_media_max_field_value_bytes:
|
||||
int_env.(
|
||||
"PARRHESIA_POLICIES_MARMOT_MEDIA_MAX_FIELD_VALUE_BYTES",
|
||||
Keyword.get(policies_defaults, :marmot_media_max_field_value_bytes, 1024)
|
||||
),
|
||||
marmot_media_max_url_bytes:
|
||||
int_env.(
|
||||
"PARRHESIA_POLICIES_MARMOT_MEDIA_MAX_URL_BYTES",
|
||||
Keyword.get(policies_defaults, :marmot_media_max_url_bytes, 2048)
|
||||
),
|
||||
marmot_media_allowed_mime_prefixes:
|
||||
csv_env.(
|
||||
"PARRHESIA_POLICIES_MARMOT_MEDIA_ALLOWED_MIME_PREFIXES",
|
||||
Keyword.get(policies_defaults, :marmot_media_allowed_mime_prefixes, [])
|
||||
),
|
||||
marmot_media_reject_mip04_v1:
|
||||
bool_env.(
|
||||
"PARRHESIA_POLICIES_MARMOT_MEDIA_REJECT_MIP04_V1",
|
||||
Keyword.get(policies_defaults, :marmot_media_reject_mip04_v1, true)
|
||||
),
|
||||
marmot_push_server_pubkeys:
|
||||
csv_env.(
|
||||
"PARRHESIA_POLICIES_MARMOT_PUSH_SERVER_PUBKEYS",
|
||||
Keyword.get(policies_defaults, :marmot_push_server_pubkeys, [])
|
||||
),
|
||||
marmot_push_max_relay_tags:
|
||||
int_env.(
|
||||
"PARRHESIA_POLICIES_MARMOT_PUSH_MAX_RELAY_TAGS",
|
||||
Keyword.get(policies_defaults, :marmot_push_max_relay_tags, 16)
|
||||
),
|
||||
marmot_push_max_payload_bytes:
|
||||
int_env.(
|
||||
"PARRHESIA_POLICIES_MARMOT_PUSH_MAX_PAYLOAD_BYTES",
|
||||
Keyword.get(policies_defaults, :marmot_push_max_payload_bytes, 65_536)
|
||||
),
|
||||
marmot_push_max_trigger_age_seconds:
|
||||
int_env.(
|
||||
"PARRHESIA_POLICIES_MARMOT_PUSH_MAX_TRIGGER_AGE_SECONDS",
|
||||
Keyword.get(policies_defaults, :marmot_push_max_trigger_age_seconds, 120)
|
||||
),
|
||||
marmot_push_require_expiration:
|
||||
bool_env.(
|
||||
"PARRHESIA_POLICIES_MARMOT_PUSH_REQUIRE_EXPIRATION",
|
||||
Keyword.get(policies_defaults, :marmot_push_require_expiration, true)
|
||||
),
|
||||
marmot_push_max_expiration_window_seconds:
|
||||
int_env.(
|
||||
"PARRHESIA_POLICIES_MARMOT_PUSH_MAX_EXPIRATION_WINDOW_SECONDS",
|
||||
Keyword.get(policies_defaults, :marmot_push_max_expiration_window_seconds, 120)
|
||||
),
|
||||
marmot_push_max_server_recipients:
|
||||
int_env.(
|
||||
"PARRHESIA_POLICIES_MARMOT_PUSH_MAX_SERVER_RECIPIENTS",
|
||||
Keyword.get(policies_defaults, :marmot_push_max_server_recipients, 1)
|
||||
),
|
||||
management_auth_required:
|
||||
bool_env.(
|
||||
"PARRHESIA_POLICIES_MANAGEMENT_AUTH_REQUIRED",
|
||||
Keyword.get(policies_defaults, :management_auth_required, true)
|
||||
)
|
||||
]
|
||||
|
||||
public_listener_defaults = Map.get(listeners_defaults, :public, %{})
|
||||
public_bind_defaults = Map.get(public_listener_defaults, :bind, %{})
|
||||
public_transport_defaults = Map.get(public_listener_defaults, :transport, %{})
|
||||
public_proxy_defaults = Map.get(public_listener_defaults, :proxy, %{})
|
||||
public_network_defaults = Map.get(public_listener_defaults, :network, %{})
|
||||
public_features_defaults = Map.get(public_listener_defaults, :features, %{})
|
||||
public_auth_defaults = Map.get(public_listener_defaults, :auth, %{})
|
||||
public_metrics_defaults = Map.get(public_features_defaults, :metrics, %{})
|
||||
public_metrics_access_defaults = Map.get(public_metrics_defaults, :access, %{})
|
||||
|
||||
metrics_listener_defaults = Map.get(listeners_defaults, :metrics, %{})
|
||||
metrics_listener_bind_defaults = Map.get(metrics_listener_defaults, :bind, %{})
|
||||
metrics_listener_transport_defaults = Map.get(metrics_listener_defaults, :transport, %{})
|
||||
metrics_listener_network_defaults = Map.get(metrics_listener_defaults, :network, %{})
|
||||
|
||||
metrics_listener_metrics_defaults =
|
||||
metrics_listener_defaults
|
||||
|> Map.get(:features, %{})
|
||||
|> Map.get(:metrics, %{})
|
||||
|
||||
metrics_listener_metrics_access_defaults =
|
||||
Map.get(metrics_listener_metrics_defaults, :access, %{})
|
||||
|
||||
public_listener = %{
|
||||
enabled: Map.get(public_listener_defaults, :enabled, true),
|
||||
bind: %{
|
||||
ip: Map.get(public_bind_defaults, :ip, {0, 0, 0, 0}),
|
||||
port: int_env.("PORT", Map.get(public_bind_defaults, :port, 4413))
|
||||
},
|
||||
max_connections:
|
||||
infinity_or_int_env.(
|
||||
"PARRHESIA_PUBLIC_MAX_CONNECTIONS",
|
||||
Map.get(public_listener_defaults, :max_connections, 20_000)
|
||||
),
|
||||
transport: %{
|
||||
scheme: Map.get(public_transport_defaults, :scheme, :http),
|
||||
tls: Map.get(public_transport_defaults, :tls, %{mode: :disabled})
|
||||
},
|
||||
proxy: %{
|
||||
trusted_cidrs:
|
||||
csv_env.(
|
||||
"PARRHESIA_TRUSTED_PROXIES",
|
||||
Map.get(public_proxy_defaults, :trusted_cidrs, [])
|
||||
),
|
||||
honor_x_forwarded_for: Map.get(public_proxy_defaults, :honor_x_forwarded_for, true)
|
||||
},
|
||||
network: %{
|
||||
allow_cidrs: Map.get(public_network_defaults, :allow_cidrs, []),
|
||||
private_networks_only: Map.get(public_network_defaults, :private_networks_only, false),
|
||||
public: Map.get(public_network_defaults, :public, false),
|
||||
allow_all: Map.get(public_network_defaults, :allow_all, true)
|
||||
},
|
||||
features: %{
|
||||
nostr: %{
|
||||
enabled: public_features_defaults |> Map.get(:nostr, %{}) |> Map.get(:enabled, true)
|
||||
},
|
||||
admin: %{
|
||||
enabled: public_features_defaults |> Map.get(:admin, %{}) |> Map.get(:enabled, true)
|
||||
},
|
||||
metrics: %{
|
||||
enabled:
|
||||
bool_env.(
|
||||
"PARRHESIA_METRICS_ENABLED_ON_MAIN_ENDPOINT",
|
||||
Map.get(public_metrics_defaults, :enabled, true)
|
||||
),
|
||||
auth_token:
|
||||
string_env.(
|
||||
"PARRHESIA_METRICS_AUTH_TOKEN",
|
||||
Map.get(public_metrics_defaults, :auth_token)
|
||||
),
|
||||
access: %{
|
||||
public:
|
||||
bool_env.(
|
||||
"PARRHESIA_METRICS_PUBLIC",
|
||||
Map.get(public_metrics_access_defaults, :public, false)
|
||||
),
|
||||
private_networks_only:
|
||||
bool_env.(
|
||||
"PARRHESIA_METRICS_PRIVATE_NETWORKS_ONLY",
|
||||
Map.get(public_metrics_access_defaults, :private_networks_only, true)
|
||||
),
|
||||
allow_cidrs:
|
||||
csv_env.(
|
||||
"PARRHESIA_METRICS_ALLOWED_CIDRS",
|
||||
Map.get(public_metrics_access_defaults, :allow_cidrs, [])
|
||||
),
|
||||
allow_all: Map.get(public_metrics_access_defaults, :allow_all, true)
|
||||
}
|
||||
}
|
||||
},
|
||||
auth: %{
|
||||
nip42_required: Map.get(public_auth_defaults, :nip42_required, false),
|
||||
nip98_required_for_admin:
|
||||
bool_env.(
|
||||
"PARRHESIA_POLICIES_MANAGEMENT_AUTH_REQUIRED",
|
||||
Map.get(public_auth_defaults, :nip98_required_for_admin, true)
|
||||
)
|
||||
},
|
||||
baseline_acl: Map.get(public_listener_defaults, :baseline_acl, %{read: [], write: []})
|
||||
}
|
||||
|
||||
listeners =
|
||||
if Map.get(metrics_listener_defaults, :enabled, false) or
|
||||
bool_env.("PARRHESIA_METRICS_ENDPOINT_ENABLED", false) do
|
||||
Map.put(
|
||||
%{public: public_listener},
|
||||
:metrics,
|
||||
%{
|
||||
enabled: true,
|
||||
bind: %{
|
||||
ip: Map.get(metrics_listener_bind_defaults, :ip, {127, 0, 0, 1}),
|
||||
port:
|
||||
int_env.(
|
||||
"PARRHESIA_METRICS_ENDPOINT_PORT",
|
||||
Map.get(metrics_listener_bind_defaults, :port, 9568)
|
||||
)
|
||||
},
|
||||
max_connections:
|
||||
infinity_or_int_env.(
|
||||
"PARRHESIA_METRICS_ENDPOINT_MAX_CONNECTIONS",
|
||||
Map.get(metrics_listener_defaults, :max_connections, 1_024)
|
||||
),
|
||||
transport: %{
|
||||
scheme: Map.get(metrics_listener_transport_defaults, :scheme, :http),
|
||||
tls: Map.get(metrics_listener_transport_defaults, :tls, %{mode: :disabled})
|
||||
},
|
||||
network: %{
|
||||
allow_cidrs: Map.get(metrics_listener_network_defaults, :allow_cidrs, []),
|
||||
private_networks_only:
|
||||
Map.get(metrics_listener_network_defaults, :private_networks_only, false),
|
||||
public: Map.get(metrics_listener_network_defaults, :public, false),
|
||||
allow_all: Map.get(metrics_listener_network_defaults, :allow_all, true)
|
||||
},
|
||||
features: %{
|
||||
nostr: %{enabled: false},
|
||||
admin: %{enabled: false},
|
||||
metrics: %{
|
||||
enabled: true,
|
||||
auth_token:
|
||||
string_env.(
|
||||
"PARRHESIA_METRICS_AUTH_TOKEN",
|
||||
Map.get(metrics_listener_metrics_defaults, :auth_token)
|
||||
),
|
||||
access: %{
|
||||
public:
|
||||
bool_env.(
|
||||
"PARRHESIA_METRICS_PUBLIC",
|
||||
Map.get(metrics_listener_metrics_access_defaults, :public, false)
|
||||
),
|
||||
private_networks_only:
|
||||
bool_env.(
|
||||
"PARRHESIA_METRICS_PRIVATE_NETWORKS_ONLY",
|
||||
Map.get(
|
||||
metrics_listener_metrics_access_defaults,
|
||||
:private_networks_only,
|
||||
true
|
||||
)
|
||||
),
|
||||
allow_cidrs:
|
||||
csv_env.(
|
||||
"PARRHESIA_METRICS_ALLOWED_CIDRS",
|
||||
Map.get(metrics_listener_metrics_access_defaults, :allow_cidrs, [])
|
||||
),
|
||||
allow_all: Map.get(metrics_listener_metrics_access_defaults, :allow_all, true)
|
||||
}
|
||||
}
|
||||
},
|
||||
auth: %{nip42_required: false, nip98_required_for_admin: true},
|
||||
baseline_acl: %{read: [], write: []}
|
||||
}
|
||||
)
|
||||
else
|
||||
%{public: public_listener}
|
||||
end
|
||||
|
||||
config :parrhesia, Parrhesia.Repo,
|
||||
url: database_url,
|
||||
pool_size: pool_size,
|
||||
queue_target: queue_target,
|
||||
queue_interval: queue_interval
|
||||
retention = [
|
||||
check_interval_hours:
|
||||
int_env.(
|
||||
"PARRHESIA_RETENTION_CHECK_INTERVAL_HOURS",
|
||||
Keyword.get(retention_defaults, :check_interval_hours, 24)
|
||||
),
|
||||
months_ahead:
|
||||
int_env.(
|
||||
"PARRHESIA_RETENTION_MONTHS_AHEAD",
|
||||
Keyword.get(retention_defaults, :months_ahead, 2)
|
||||
),
|
||||
max_db_bytes:
|
||||
infinity_or_int_env.(
|
||||
"PARRHESIA_RETENTION_MAX_DB_BYTES",
|
||||
Keyword.get(retention_defaults, :max_db_bytes, :infinity)
|
||||
),
|
||||
max_months_to_keep:
|
||||
infinity_or_int_env.(
|
||||
"PARRHESIA_RETENTION_MAX_MONTHS_TO_KEEP",
|
||||
Keyword.get(retention_defaults, :max_months_to_keep, :infinity)
|
||||
),
|
||||
max_partitions_to_drop_per_run:
|
||||
int_env.(
|
||||
"PARRHESIA_RETENTION_MAX_PARTITIONS_TO_DROP_PER_RUN",
|
||||
Keyword.get(retention_defaults, :max_partitions_to_drop_per_run, 1)
|
||||
)
|
||||
]
|
||||
|
||||
config :parrhesia, Parrhesia.Web.Endpoint,
|
||||
port: String.to_integer(System.get_env("PORT") || "4000")
|
||||
features = [
|
||||
verify_event_signatures_locked?:
|
||||
Keyword.get(features_defaults, :verify_event_signatures_locked?, false),
|
||||
verify_event_signatures:
|
||||
if Keyword.get(features_defaults, :verify_event_signatures_locked?, false) do
|
||||
true
|
||||
else
|
||||
Keyword.get(features_defaults, :verify_event_signatures, true)
|
||||
end,
|
||||
nip_45_count:
|
||||
bool_env.(
|
||||
"PARRHESIA_FEATURES_NIP_45_COUNT",
|
||||
Keyword.get(features_defaults, :nip_45_count, true)
|
||||
),
|
||||
nip_50_search:
|
||||
bool_env.(
|
||||
"PARRHESIA_FEATURES_NIP_50_SEARCH",
|
||||
Keyword.get(features_defaults, :nip_50_search, true)
|
||||
),
|
||||
nip_77_negentropy:
|
||||
bool_env.(
|
||||
"PARRHESIA_FEATURES_NIP_77_NEGENTROPY",
|
||||
Keyword.get(features_defaults, :nip_77_negentropy, true)
|
||||
),
|
||||
marmot_push_notifications:
|
||||
bool_env.(
|
||||
"PARRHESIA_FEATURES_MARMOT_PUSH_NOTIFICATIONS",
|
||||
Keyword.get(features_defaults, :marmot_push_notifications, false)
|
||||
)
|
||||
]
|
||||
|
||||
storage =
|
||||
case storage_backend do
|
||||
:postgres ->
|
||||
[
|
||||
backend: :postgres,
|
||||
events: Parrhesia.Storage.Adapters.Postgres.Events,
|
||||
acl: Parrhesia.Storage.Adapters.Postgres.ACL,
|
||||
moderation: Parrhesia.Storage.Adapters.Postgres.Moderation,
|
||||
groups: Parrhesia.Storage.Adapters.Postgres.Groups,
|
||||
admin: Parrhesia.Storage.Adapters.Postgres.Admin
|
||||
]
|
||||
|
||||
:memory ->
|
||||
[
|
||||
backend: :memory,
|
||||
events: Parrhesia.Storage.Adapters.Memory.Events,
|
||||
acl: Parrhesia.Storage.Adapters.Memory.ACL,
|
||||
moderation: Parrhesia.Storage.Adapters.Memory.Moderation,
|
||||
groups: Parrhesia.Storage.Adapters.Memory.Groups,
|
||||
admin: Parrhesia.Storage.Adapters.Memory.Admin
|
||||
]
|
||||
end
|
||||
|
||||
if postgres_backend? do
|
||||
config :parrhesia, Parrhesia.Repo,
|
||||
url: database_url,
|
||||
pool_size: pool_size,
|
||||
queue_target: queue_target,
|
||||
queue_interval: queue_interval
|
||||
|
||||
config :parrhesia, Parrhesia.ReadRepo,
|
||||
url: database_url,
|
||||
pool_size: read_pool_size,
|
||||
queue_target: read_queue_target,
|
||||
queue_interval: read_queue_interval
|
||||
end
|
||||
|
||||
config :parrhesia,
|
||||
database: [
|
||||
separate_read_pool?: separate_read_pool?
|
||||
],
|
||||
relay_url: string_env.("PARRHESIA_RELAY_URL", relay_url_default),
|
||||
metadata: [
|
||||
name: Keyword.get(metadata_defaults, :name, "Parrhesia"),
|
||||
version: Keyword.get(metadata_defaults, :version, "0.0.0"),
|
||||
hide_version?:
|
||||
bool_env.(
|
||||
"PARRHESIA_METADATA_HIDE_VERSION",
|
||||
Keyword.get(metadata_defaults, :hide_version?, true)
|
||||
)
|
||||
],
|
||||
acl: [
|
||||
protected_filters:
|
||||
json_env.(
|
||||
"PARRHESIA_ACL_PROTECTED_FILTERS",
|
||||
Keyword.get(acl_defaults, :protected_filters, [])
|
||||
)
|
||||
],
|
||||
identity: [
|
||||
path: string_env.("PARRHESIA_IDENTITY_PATH", nil),
|
||||
private_key: string_env.("PARRHESIA_IDENTITY_PRIVATE_KEY", nil)
|
||||
],
|
||||
sync: [
|
||||
path: string_env.("PARRHESIA_SYNC_PATH", nil),
|
||||
start_workers?:
|
||||
bool_env.(
|
||||
"PARRHESIA_SYNC_START_WORKERS",
|
||||
Keyword.get(Application.get_env(:parrhesia, :sync, []), :start_workers?, true)
|
||||
)
|
||||
],
|
||||
moderation_cache_enabled:
|
||||
bool_env.("PARRHESIA_MODERATION_CACHE_ENABLED", moderation_cache_enabled_default),
|
||||
enable_expiration_worker:
|
||||
bool_env.("PARRHESIA_ENABLE_EXPIRATION_WORKER", enable_expiration_worker_default),
|
||||
enable_partition_retention_worker:
|
||||
bool_env.(
|
||||
"PARRHESIA_ENABLE_PARTITION_RETENTION_WORKER",
|
||||
enable_partition_retention_worker_default
|
||||
),
|
||||
listeners: listeners,
|
||||
limits: limits,
|
||||
policies: policies,
|
||||
retention: retention,
|
||||
features: features,
|
||||
storage: storage
|
||||
|
||||
case System.get_env("PARRHESIA_EXTRA_CONFIG") do
|
||||
nil -> :ok
|
||||
"" -> :ok
|
||||
path -> import_config path
|
||||
end
|
||||
end
|
||||
|
||||
@@ -8,13 +8,34 @@ test_endpoint_port =
|
||||
value -> String.to_integer(value)
|
||||
end
|
||||
|
||||
config :parrhesia, Parrhesia.Web.Endpoint,
|
||||
port: test_endpoint_port,
|
||||
ip: {127, 0, 0, 1}
|
||||
config :parrhesia, :listeners,
|
||||
public: %{
|
||||
enabled: true,
|
||||
bind: %{ip: {127, 0, 0, 1}, port: test_endpoint_port},
|
||||
transport: %{scheme: :http, tls: %{mode: :disabled}},
|
||||
proxy: %{trusted_cidrs: [], honor_x_forwarded_for: true},
|
||||
network: %{allow_all: true},
|
||||
features: %{
|
||||
nostr: %{enabled: true},
|
||||
admin: %{enabled: true},
|
||||
metrics: %{enabled: true, access: %{private_networks_only: true}, auth_token: nil}
|
||||
},
|
||||
auth: %{nip42_required: false, nip98_required_for_admin: true},
|
||||
baseline_acl: %{read: [], write: []}
|
||||
}
|
||||
|
||||
config :parrhesia,
|
||||
enable_expiration_worker: false,
|
||||
moderation_cache_enabled: false,
|
||||
nip66: [enabled: false],
|
||||
identity: [
|
||||
path: Path.join(System.tmp_dir!(), "parrhesia_test_identity.json"),
|
||||
private_key: nil
|
||||
],
|
||||
sync: [
|
||||
path: Path.join(System.tmp_dir!(), "parrhesia_test_sync.json"),
|
||||
start_workers?: false
|
||||
],
|
||||
features: [verify_event_signatures: false]
|
||||
|
||||
pg_host = System.get_env("PGHOST")
|
||||
|
||||
@@ -10,7 +10,7 @@
|
||||
vips,
|
||||
}: let
|
||||
pname = "parrhesia";
|
||||
version = "0.3.0";
|
||||
version = "0.6.0";
|
||||
|
||||
beamPackages = beam.packages.erlang_28.extend (
|
||||
final: _prev: {
|
||||
@@ -48,7 +48,7 @@
|
||||
beamPackages.fetchMixDeps {
|
||||
pname = "${pname}-mix-deps";
|
||||
inherit version src;
|
||||
hash = "sha256-0KOyYRbYM0jjmp3tPn64qkp0YkmZKlqkGrlu/wCr4m8=";
|
||||
hash = "sha256-D69wuFnIChQzm1PmpIW+X/1sPpsIcDHe4V5fKmFeJ3k=";
|
||||
}
|
||||
else null;
|
||||
|
||||
|
||||
15
devenv.nix
15
devenv.nix
@@ -101,17 +101,13 @@ in {
|
||||
nostr-bench
|
||||
# Nostr reference servers
|
||||
nostr-rs-relay
|
||||
# Benchmark graph
|
||||
gnuplot
|
||||
]
|
||||
++ lib.optionals pkgs.stdenv.hostPlatform.isx86_64 [
|
||||
strfry
|
||||
];
|
||||
|
||||
# https://devenv.sh/tests/
|
||||
# enterTest = ''
|
||||
# echo "Running tests"
|
||||
# git --version | grep "2.42.0"
|
||||
# '';
|
||||
|
||||
# https://devenv.sh/languages/
|
||||
languages = {
|
||||
elixir = {
|
||||
@@ -131,7 +127,7 @@ in {
|
||||
enable = true;
|
||||
package = pkgs.postgresql_18;
|
||||
|
||||
# Some tuning for the benchmark
|
||||
# Some tuning for the benchmark - doesn't seem to do much
|
||||
settings = {
|
||||
max_connections = 300;
|
||||
shared_buffers = "1GB";
|
||||
@@ -151,6 +147,7 @@ in {
|
||||
initialScript = ''
|
||||
CREATE ROLE dev WITH LOGIN PASSWORD 'dev' SUPERUSER;
|
||||
|
||||
-- Make sure we get the right collation
|
||||
ALTER database template1 is_template=false;
|
||||
|
||||
DROP database template1;
|
||||
@@ -167,12 +164,10 @@ in {
|
||||
'';
|
||||
};
|
||||
|
||||
# https://devenv.sh/pre-commit-hooks/
|
||||
# pre-commit.hooks.shellcheck.enable = true;
|
||||
|
||||
dotenv.enable = true;
|
||||
devenv.warnOnNewVersion = false;
|
||||
|
||||
# https://devenv.sh/pre-commit-hooks/
|
||||
git-hooks.hooks = {
|
||||
alejandra.enable = true;
|
||||
check-added-large-files = {
|
||||
|
||||
33
docs/ARCH.md
33
docs/ARCH.md
@@ -68,10 +68,10 @@ Notes:
|
||||
## 3) System architecture (high level)
|
||||
|
||||
```text
|
||||
WS/HTTP Edge (Bandit/Plug)
|
||||
Configured WS/HTTP Listeners (Bandit/Plug)
|
||||
-> Protocol Decoder/Encoder
|
||||
-> Command Router (EVENT/REQ/CLOSE/AUTH/COUNT/NEG-*)
|
||||
-> Policy Pipeline (validation, auth, ACL, PoW, NIP-70)
|
||||
-> Policy Pipeline (listener baseline, validation, auth, ACL, PoW, NIP-70)
|
||||
-> Event Service / Query Service
|
||||
-> Storage Port (behavior)
|
||||
-> Postgres Adapter (Ecto)
|
||||
@@ -82,23 +82,36 @@ WS/HTTP Edge (Bandit/Plug)
|
||||
|
||||
## 4) OTP supervision design
|
||||
|
||||
`Parrhesia.Application` children (top-level):
|
||||
`Parrhesia.Runtime` children (top-level):
|
||||
|
||||
1. `Parrhesia.Telemetry` – metric definitions/reporters
|
||||
2. `Parrhesia.Config` – runtime config cache (ETS-backed)
|
||||
3. `Parrhesia.Storage.Supervisor` – adapter processes (`Repo`, pools)
|
||||
4. `Parrhesia.Subscriptions.Supervisor` – subscription index + fanout workers
|
||||
5. `Parrhesia.Auth.Supervisor` – AUTH challenge/session tracking
|
||||
6. `Parrhesia.Policy.Supervisor` – rate limiters / ACL caches
|
||||
7. `Parrhesia.Web.Endpoint` – WS + HTTP ingress
|
||||
8. `Parrhesia.Tasks.Supervisor` – background jobs (expiry purge, maintenance)
|
||||
2. `Parrhesia.ConnectionStats` – per-listener connection/subscription counters
|
||||
3. `Parrhesia.Config` – runtime config cache (ETS-backed)
|
||||
4. `Parrhesia.Web.EventIngestLimiter` – relay-wide event ingest rate limiter
|
||||
5. `Parrhesia.Web.IPEventIngestLimiter` – per-IP event ingest rate limiter
|
||||
6. `Parrhesia.Storage.Supervisor` – adapter processes (`Repo`, pools)
|
||||
7. `Parrhesia.Subscriptions.Supervisor` – subscription index + fanout workers
|
||||
8. `Parrhesia.Auth.Supervisor` – AUTH challenge/session tracking
|
||||
9. `Parrhesia.Sync.Supervisor` – outbound relay sync workers
|
||||
10. `Parrhesia.Policy.Supervisor` – rate limiters / ACL caches
|
||||
11. `Parrhesia.Web.Endpoint` – supervises configured WS + HTTP listeners
|
||||
12. `Parrhesia.Tasks.Supervisor` – background jobs (expiry purge, maintenance)
|
||||
|
||||
Failure model:
|
||||
|
||||
- Connection failures are isolated per socket process.
|
||||
- Listener failures are isolated per Bandit child and restarted independently.
|
||||
- Storage outages degrade with explicit `OK/CLOSED` error prefixes (`error:`) per NIP-01.
|
||||
- Non-critical workers are `:transient`; core infra is `:permanent`.
|
||||
|
||||
Ingress model:
|
||||
|
||||
- Ingress is defined through `config :parrhesia, :listeners, ...`.
|
||||
- Each listener has its own bind/transport settings, TLS mode, proxy trust, network allowlist, enabled features (`nostr`, `admin`, `metrics`), auth requirements, and baseline read/write ACL.
|
||||
- Listeners can therefore expose different security postures, for example a public relay listener and a VPN-only sync-capable listener.
|
||||
- TLS-capable listeners support direct server TLS, mutual TLS with optional client pin checks, and proxy-terminated TLS identity on explicitly trusted proxy hops.
|
||||
- Certificate reload is currently implemented as admin-triggered listener restart from disk rather than background file watching.
|
||||
|
||||
## 5) Core runtime components
|
||||
|
||||
### 5.1 Connection process
|
||||
|
||||
234
docs/CLUSTER.md
Normal file
234
docs/CLUSTER.md
Normal file
@@ -0,0 +1,234 @@
|
||||
# Parrhesia clustering and distributed fanout
|
||||
|
||||
This document describes:
|
||||
|
||||
1. the **current** distributed fanout behavior implemented today, and
|
||||
2. a practical evolution path to a more production-grade clustered relay.
|
||||
|
||||
---
|
||||
|
||||
## 1) Current state (implemented today)
|
||||
|
||||
### 1.1 What exists right now
|
||||
|
||||
Parrhesia currently includes a lightweight multi-node live fanout path (untested!):
|
||||
|
||||
- `Parrhesia.Fanout.MultiNode` (`lib/parrhesia/fanout/multi_node.ex`)
|
||||
- GenServer that joins a `:pg` process group.
|
||||
- Receives locally-published events and forwards them to other group members.
|
||||
- Receives remote events and performs local fanout lookup.
|
||||
- `Parrhesia.Web.Connection` (`lib/parrhesia/web/connection.ex`)
|
||||
- On successful ingest, after ACK scheduling, it does:
|
||||
1. local fanout (`fanout_event/1`), then
|
||||
2. cross-node publish (`maybe_publish_multi_node/1`).
|
||||
- `Parrhesia.Subscriptions.Supervisor` (`lib/parrhesia/subscriptions/supervisor.ex`)
|
||||
- Starts `Parrhesia.Fanout.MultiNode` unconditionally.
|
||||
|
||||
In other words: **if BEAM nodes are connected, live events are fanned out cross-node**.
|
||||
|
||||
### 1.2 What is not included yet
|
||||
|
||||
- No automatic cluster formation/discovery (no `libcluster`, DNS polling, gossip, etc.).
|
||||
- No durable inter-node event transport.
|
||||
- No replay/recovery of missed cross-node live events.
|
||||
- No explicit per-node delivery ACK between relay nodes.
|
||||
|
||||
---
|
||||
|
||||
## 2) Current runtime behavior in detail
|
||||
|
||||
### 2.1 Local ingest flow and publish ordering
|
||||
|
||||
For an accepted event in `Parrhesia.Web.Connection`:
|
||||
|
||||
1. validate/policy/persist path runs.
|
||||
2. Client receives `OK` reply.
|
||||
3. A post-ACK message triggers:
|
||||
- local fanout (`Index.candidate_subscription_keys/1` + send `{:fanout_event, ...}`),
|
||||
- multi-node publish (`MultiNode.publish/1`).
|
||||
|
||||
Important semantics:
|
||||
|
||||
- Regular persisted events: ACK implies DB persistence succeeded.
|
||||
- Ephemeral events: ACK implies accepted by policy, but no DB durability.
|
||||
- Cross-node fanout happens **after** ACK path is scheduled.
|
||||
|
||||
### 2.2 Multi-node transport mechanics
|
||||
|
||||
`Parrhesia.Fanout.MultiNode` uses `:pg` membership:
|
||||
|
||||
- On init:
|
||||
- ensures `:pg` is started,
|
||||
- joins group `Parrhesia.Fanout.MultiNode`.
|
||||
- On publish:
|
||||
- gets all group members,
|
||||
- excludes itself,
|
||||
- sends `{:remote_fanout_event, event}` to each member pid.
|
||||
- On remote receive:
|
||||
- runs local subscription candidate narrowing via `Parrhesia.Subscriptions.Index`,
|
||||
- forwards matching candidates to local connection owners as `{:fanout_event, sub_id, event}`.
|
||||
|
||||
No republish on remote receive, so this path does not create fanout loops.
|
||||
|
||||
### 2.3 Subscription index locality
|
||||
|
||||
The subscription index is local ETS state per node (`Parrhesia.Subscriptions.Index`).
|
||||
|
||||
- Each node only tracks subscriptions of its local websocket processes.
|
||||
- Each node independently decides which local subscribers match a remote event.
|
||||
- There is no global cross-node subscription registry.
|
||||
|
||||
### 2.4 Delivery model and guarantees (current)
|
||||
|
||||
Current model is **best-effort live propagation** among connected nodes.
|
||||
|
||||
- If nodes are connected and healthy, remote live subscribers should receive events quickly.
|
||||
- If there is a netsplit or temporary disconnection:
|
||||
- remote live subscribers may miss events,
|
||||
- persisted events can still be recovered by normal `REQ`/history query,
|
||||
- ephemeral events are not recoverable.
|
||||
|
||||
### 2.5 Cluster preconditions
|
||||
|
||||
For cross-node fanout to work, operators must provide distributed BEAM connectivity:
|
||||
|
||||
- consistent Erlang cookie,
|
||||
- named nodes (`--name`/`--sname`),
|
||||
- network reachability for Erlang distribution ports,
|
||||
- explicit node connections (or external discovery tooling).
|
||||
|
||||
Parrhesia currently does not automate these steps.
|
||||
|
||||
---
|
||||
|
||||
## 3) Operational characteristics of current design
|
||||
|
||||
### 3.1 Performance shape
|
||||
|
||||
For each accepted event on one node:
|
||||
|
||||
- one local fanout lookup + local sends,
|
||||
- one cluster publish that sends to `N - 1` remote bus members,
|
||||
- on each remote node: one local fanout lookup + local sends.
|
||||
|
||||
So inter-node traffic scales roughly linearly with node count per event (full-cluster broadcast).
|
||||
|
||||
This is simple and low-latency for small-to-medium clusters, but can become expensive as node count grows.
|
||||
|
||||
### 3.2 Failure behavior
|
||||
|
||||
- Remote node down: send attempts to that member stop once membership updates; no replay.
|
||||
- Netsplit: live propagation gap during split.
|
||||
- Recovery: local clients can catch up via DB-backed queries (except ephemeral kinds).
|
||||
|
||||
### 3.3 Consistency expectations
|
||||
|
||||
- No global total-ordering guarantee for live delivery across nodes.
|
||||
- Per-connection ordering is preserved by each connection process queue/drain behavior.
|
||||
- Duplicate suppression for ingestion uses storage semantics (`duplicate_event`), but transport itself is not exactly-once.
|
||||
|
||||
### 3.4 Observability today
|
||||
|
||||
Relevant metrics exist for fanout/queue pressure (see `Parrhesia.Telemetry`), e.g.:
|
||||
|
||||
- `parrhesia.fanout.duration.ms`
|
||||
- `parrhesia.connection.outbound_queue.depth`
|
||||
- `parrhesia.connection.outbound_queue.pressure`
|
||||
- `parrhesia.connection.outbound_queue.overflow.count`
|
||||
|
||||
These are useful but do not yet fully separate local-vs-remote fanout pipeline stages.
|
||||
|
||||
---
|
||||
|
||||
## 4) Practical extension path to a fully-fledged clustered system
|
||||
|
||||
A realistic path is incremental. Suggested phases:
|
||||
|
||||
### Phase A — hardened BEAM cluster control plane
|
||||
|
||||
1. Add cluster discovery/formation (e.g. `libcluster`) with environment-specific topology:
|
||||
- Kubernetes DNS,
|
||||
- static nodes,
|
||||
- cloud VM discovery.
|
||||
2. Add clear node liveness/partition telemetry and alerts.
|
||||
3. Provide operator docs for cookie, node naming, and network requirements.
|
||||
|
||||
Outcome: simpler and safer cluster operations, same data plane semantics.
|
||||
|
||||
### Phase B — resilient distributed fanout data plane
|
||||
|
||||
Introduce a durable fanout stream for persisted events.
|
||||
|
||||
Recommended pattern:
|
||||
|
||||
1. On successful DB commit of event, append to a monotonic fanout log (or use DB sequence-based stream view).
|
||||
2. Each relay node runs a consumer with a stored cursor.
|
||||
3. On restart/partition recovery, node resumes from cursor and replays missed events.
|
||||
4. Local fanout remains same (subscription index + per-connection queues).
|
||||
|
||||
Semantics target:
|
||||
|
||||
- **at-least-once** node-to-node propagation,
|
||||
- replay after downtime,
|
||||
- idempotent handling keyed by event id.
|
||||
|
||||
Notes:
|
||||
|
||||
- Ephemeral events can remain best-effort (or have a separate short-lived transport), since no storage source exists for replay.
|
||||
|
||||
### Phase C — scale and efficiency improvements
|
||||
|
||||
As cluster size grows, avoid naive full broadcast where possible:
|
||||
|
||||
1. Optional node-level subscription summaries (coarse bloom/bitset or keyed summaries) to reduce unnecessary remote sends.
|
||||
2. Shard fanout workers for CPU locality and mailbox control.
|
||||
3. Batch remote delivery payloads.
|
||||
4. Separate traffic classes (e.g. Marmot-heavy streams vs generic) with independent queues.
|
||||
|
||||
Outcome: higher throughput per node and lower inter-node amplification.
|
||||
|
||||
### Phase D — stronger observability and SLOs
|
||||
|
||||
Add explicit distributed pipeline metrics:
|
||||
|
||||
- publish enqueue/dequeue latency,
|
||||
- cross-node delivery lag (commit -> remote fanout enqueue),
|
||||
- replay backlog depth,
|
||||
- per-node dropped/expired transport messages,
|
||||
- partition detection counters.
|
||||
|
||||
Define cluster SLO examples:
|
||||
|
||||
- p95 commit->remote-live enqueue under nominal load,
|
||||
- max replay catch-up time after node restart,
|
||||
- bounded message loss for best-effort channels.
|
||||
|
||||
---
|
||||
|
||||
## 5) How a fully-fledged system would behave in practice
|
||||
|
||||
With Phases A-D implemented, expected behavior:
|
||||
|
||||
- **Normal operation:**
|
||||
- low-latency local fanout,
|
||||
- remote nodes receive events via stream consumers quickly,
|
||||
- consistent operational visibility of end-to-end lag.
|
||||
- **Node restart:**
|
||||
- node reconnects and replays from stored cursor,
|
||||
- local subscribers begin receiving new + missed persisted events.
|
||||
- **Transient partition:**
|
||||
- live best-effort path may degrade,
|
||||
- persisted events converge after partition heals via replay.
|
||||
- **High fanout bursts:**
|
||||
- batching + sharding keeps queue pressure bounded,
|
||||
- overflow policies remain connection-local and measurable.
|
||||
|
||||
This approach gives a good trade-off between Nostr relay latency and distributed robustness without requiring strict exactly-once semantics.
|
||||
|
||||
---
|
||||
|
||||
## 6) Current status summary
|
||||
|
||||
Today, Parrhesia already supports **lightweight distributed live fanout** when BEAM nodes are connected.
|
||||
|
||||
It is intentionally simple and fast for smaller clusters, and provides a solid base for a more durable, observable cluster architecture as relay scale and availability requirements grow.
|
||||
147
docs/LOCAL_API.md
Normal file
147
docs/LOCAL_API.md
Normal file
@@ -0,0 +1,147 @@
|
||||
# Parrhesia Local API
|
||||
|
||||
Parrhesia can run as a normal standalone relay application, but it also exposes a stable
|
||||
in-process API for Elixir callers that want to embed the relay inside a larger OTP system.
|
||||
|
||||
This document describes that embedding surface. The runtime is still alpha, so treat the API
|
||||
as usable but not yet frozen.
|
||||
|
||||
## What embedding means today
|
||||
|
||||
Embedding currently means:
|
||||
|
||||
- the host app adds `:parrhesia` as a dependency and OTP application
|
||||
- the host app provides `config :parrhesia, ...` explicitly
|
||||
- the host app migrates the Parrhesia database schema
|
||||
- callers interact with the relay through `Parrhesia.API.*`
|
||||
|
||||
Current operational assumptions:
|
||||
|
||||
- Parrhesia runs one runtime per BEAM node
|
||||
- core processes use global module names such as `Parrhesia.Config` and `Parrhesia.Web.Endpoint`
|
||||
- the config defaults in this repo's `config/*.exs` are not imported automatically by a host app
|
||||
|
||||
If you want multiple isolated relay instances inside one VM, Parrhesia does not support that
|
||||
cleanly yet.
|
||||
|
||||
## Minimal host setup
|
||||
|
||||
Add the dependency in your host app:
|
||||
|
||||
```elixir
|
||||
defp deps do
|
||||
[
|
||||
{:parrhesia, path: "../parrhesia"}
|
||||
]
|
||||
end
|
||||
```
|
||||
|
||||
Configure the runtime in your host app. At minimum you should carry over:
|
||||
|
||||
```elixir
|
||||
import Config
|
||||
|
||||
config :postgrex, :json_library, JSON
|
||||
|
||||
config :parrhesia,
|
||||
relay_url: "wss://relay.example.com/relay",
|
||||
listeners: %{},
|
||||
storage: [backend: :postgres]
|
||||
|
||||
config :parrhesia, Parrhesia.Repo,
|
||||
url: System.fetch_env!("DATABASE_URL"),
|
||||
pool_size: 10,
|
||||
types: Parrhesia.PostgresTypes
|
||||
|
||||
config :parrhesia, Parrhesia.ReadRepo,
|
||||
url: System.fetch_env!("DATABASE_URL"),
|
||||
pool_size: 10,
|
||||
types: Parrhesia.PostgresTypes
|
||||
|
||||
config :parrhesia, ecto_repos: [Parrhesia.Repo]
|
||||
```
|
||||
|
||||
Notes:
|
||||
|
||||
- Set `listeners: %{}` if you only want the in-process API and no HTTP/WebSocket ingress.
|
||||
- If you do want ingress, copy the listener shape from the config reference in
|
||||
[README.md](../README.md).
|
||||
- Production runtime overrides still use the `PARRHESIA_*` environment variables described in
|
||||
[README.md](../README.md).
|
||||
|
||||
Migrate before serving traffic:
|
||||
|
||||
```elixir
|
||||
Parrhesia.Release.migrate()
|
||||
```
|
||||
|
||||
In development, `mix ecto.migrate -r Parrhesia.Repo` works too.
|
||||
|
||||
## Starting the runtime
|
||||
|
||||
In the common case, letting OTP start the `:parrhesia` application is enough.
|
||||
|
||||
If you need to start the runtime explicitly under your own supervision tree, use
|
||||
`Parrhesia.Runtime`:
|
||||
|
||||
```elixir
|
||||
children = [
|
||||
{Parrhesia.Runtime, name: Parrhesia.Supervisor}
|
||||
]
|
||||
```
|
||||
|
||||
## Primary modules
|
||||
|
||||
The in-process surface is centered on these modules:
|
||||
|
||||
- `Parrhesia.API.Events` for publish, query, and count
|
||||
- `Parrhesia.API.Stream` for REQ-like local subscriptions
|
||||
- `Parrhesia.API.Auth` for event validation and NIP-98 auth parsing
|
||||
- `Parrhesia.API.Admin` for management operations
|
||||
- `Parrhesia.API.Identity` for relay-owned key management
|
||||
- `Parrhesia.API.ACL` for protected sync ACLs
|
||||
- `Parrhesia.API.Sync` for outbound relay sync management
|
||||
|
||||
Generated ExDoc groups these modules under `Embedded API`.
|
||||
|
||||
## Request context
|
||||
|
||||
Most calls take a `Parrhesia.API.RequestContext`. This carries authenticated pubkeys and
|
||||
caller metadata through policy checks.
|
||||
|
||||
```elixir
|
||||
%Parrhesia.API.RequestContext{
|
||||
caller: :local,
|
||||
authenticated_pubkeys: MapSet.new()
|
||||
}
|
||||
```
|
||||
|
||||
If your host app has already authenticated a user or peer, put that pubkey into
|
||||
`authenticated_pubkeys` before calling the API.
|
||||
|
||||
## Example
|
||||
|
||||
```elixir
|
||||
alias Parrhesia.API.Events
|
||||
alias Parrhesia.API.RequestContext
|
||||
alias Parrhesia.API.Stream
|
||||
|
||||
context = %RequestContext{caller: :local}
|
||||
|
||||
{:ok, publish_result} = Events.publish(event, context: context)
|
||||
{:ok, events} = Events.query([%{"kinds" => [1]}], context: context)
|
||||
{:ok, ref} = Stream.subscribe(self(), "local-sub", [%{"kinds" => [1]}], context: context)
|
||||
|
||||
receive do
|
||||
{:parrhesia, :event, ^ref, "local-sub", event} -> event
|
||||
{:parrhesia, :eose, ^ref, "local-sub"} -> :ok
|
||||
end
|
||||
|
||||
:ok = Stream.unsubscribe(ref)
|
||||
```
|
||||
|
||||
## Where to look next
|
||||
|
||||
- [README.md](../README.md) for setup and the full config reference
|
||||
- [docs/SYNC.md](./SYNC.md) for relay-to-relay sync semantics
|
||||
- module docs under `Parrhesia.API.*` for per-function behavior
|
||||
@@ -1,69 +0,0 @@
|
||||
# Marmot operations guide (relay operator tuning)
|
||||
|
||||
This document captures practical limits and operational defaults for Marmot-heavy traffic (`443`, `445`, `10051`, wrapped `1059`, optional media/push flows).
|
||||
|
||||
## 1) Recommended baseline limits
|
||||
|
||||
Use these as a starting point and tune from production telemetry.
|
||||
|
||||
```elixir
|
||||
config :parrhesia,
|
||||
limits: [
|
||||
max_filter_limit: 500,
|
||||
max_filters_per_req: 16,
|
||||
max_outbound_queue: 256,
|
||||
outbound_drain_batch_size: 64
|
||||
],
|
||||
policies: [
|
||||
# Marmot group routing/query guards
|
||||
marmot_require_h_for_group_queries: true,
|
||||
marmot_group_max_h_values_per_filter: 32,
|
||||
marmot_group_max_query_window_seconds: 2_592_000,
|
||||
|
||||
# Kind 445 retention
|
||||
mls_group_event_ttl_seconds: 300,
|
||||
|
||||
# MIP-04 metadata controls
|
||||
marmot_media_max_imeta_tags_per_event: 8,
|
||||
marmot_media_max_field_value_bytes: 1024,
|
||||
marmot_media_max_url_bytes: 2048,
|
||||
marmot_media_allowed_mime_prefixes: [],
|
||||
marmot_media_reject_mip04_v1: true,
|
||||
|
||||
# MIP-05 push controls (optional)
|
||||
marmot_push_server_pubkeys: [],
|
||||
marmot_push_max_relay_tags: 16,
|
||||
marmot_push_max_payload_bytes: 65_536,
|
||||
marmot_push_max_trigger_age_seconds: 120,
|
||||
marmot_push_require_expiration: true,
|
||||
marmot_push_max_expiration_window_seconds: 120,
|
||||
marmot_push_max_server_recipients: 1
|
||||
]
|
||||
```
|
||||
|
||||
## 2) Index expectations for Marmot workloads
|
||||
|
||||
The Postgres adapter relies on dedicated partial tag indexes for hot Marmot selectors:
|
||||
|
||||
- `event_tags_h_value_created_at_idx` for `#h` group routing
|
||||
- `event_tags_i_value_created_at_idx` for `#i` keypackage reference lookups
|
||||
|
||||
Query-plan regression tests assert these paths remain usable for heavy workloads.
|
||||
|
||||
## 3) Telemetry to watch
|
||||
|
||||
Key metrics for Marmot traffic and pressure:
|
||||
|
||||
- `parrhesia.ingest.duration.ms{traffic_class="marmot|generic"}`
|
||||
- `parrhesia.query.duration.ms{traffic_class="marmot|generic"}`
|
||||
- `parrhesia.fanout.duration.ms{traffic_class="marmot|generic"}`
|
||||
- `parrhesia.connection.outbound_queue.depth{traffic_class=...}`
|
||||
- `parrhesia.connection.outbound_queue.pressure{traffic_class=...}`
|
||||
- `parrhesia.connection.outbound_queue.pressure_events.count{traffic_class=...}`
|
||||
- `parrhesia.connection.outbound_queue.overflow.count{traffic_class=...}`
|
||||
|
||||
Operational target: keep queue pressure below sustained 0.75 and avoid overflow spikes during `445` bursts.
|
||||
|
||||
## 4) Fault and recovery expectations
|
||||
|
||||
During storage outages, Marmot group-flow writes must fail with explicit `OK false` errors. After recovery, reordered group events should still query deterministically by `created_at DESC, id ASC`.
|
||||
354
docs/NIP-DBSYNC.md
Normal file
354
docs/NIP-DBSYNC.md
Normal file
@@ -0,0 +1,354 @@
|
||||
# NIP-DBSYNC — Minimal Mutation Events over Nostr
|
||||
|
||||
`draft` `optional`
|
||||
|
||||
Defines a minimal event format for publishing immutable application mutation events over Nostr.
|
||||
|
||||
This draft intentionally standardizes only the wire format for mutation transport. It does **not** standardize database replication strategy, conflict resolution, relay retention, or key derivation.
|
||||
|
||||
---
|
||||
|
||||
## Abstract
|
||||
|
||||
This NIP defines one regular event kind, **5000**, for signed mutation events.
|
||||
|
||||
A mutation event identifies:
|
||||
|
||||
- the object namespace being mutated,
|
||||
- the object identifier within that namespace,
|
||||
- the mutation operation,
|
||||
- an optional parent mutation event,
|
||||
- an application-defined payload.
|
||||
|
||||
The purpose of this NIP is to make signed mutation logs portable across Nostr clients and relays without requiring relays to implement database-specific behavior.
|
||||
|
||||
---
|
||||
|
||||
## Motivation
|
||||
|
||||
Many applications need a way to distribute signed state changes across multiple publishers, consumers, or services.
|
||||
|
||||
Today this can be done with private event kinds, but private schemas make cross-implementation interoperability harder than necessary. This NIP defines a small shared envelope for mutation events while leaving application-specific state semantics in the payload.
|
||||
|
||||
This NIP is intended for use cases such as:
|
||||
|
||||
- synchronizing object changes between cooperating services,
|
||||
- publishing auditable mutation logs,
|
||||
- replaying application events from ordinary Nostr relays,
|
||||
- bridging non-Nostr systems into a Nostr-based event stream.
|
||||
|
||||
This NIP is **not** a consensus protocol. It does not provide:
|
||||
|
||||
- total ordering,
|
||||
- transactional guarantees,
|
||||
- global conflict resolution,
|
||||
- authorization rules,
|
||||
- guaranteed relay retention.
|
||||
|
||||
Applications that require those properties MUST define them separately.
|
||||
|
||||
---
|
||||
|
||||
## Specification
|
||||
|
||||
### Event Kind
|
||||
|
||||
| Kind | Category | Name |
|
||||
|------|----------|------|
|
||||
| 5000 | Regular | Mutation |
|
||||
|
||||
Kind `5000` is a regular event. Relays that support this NIP MAY store it like any other regular event.
|
||||
|
||||
This NIP does **not** require relays to:
|
||||
|
||||
- retain all historical events,
|
||||
- index any specific tag beyond normal NIP-01 behavior,
|
||||
- deliver events in causal or chronological order,
|
||||
- detect or resolve conflicts.
|
||||
|
||||
Applications that depend on durable replay or custom indexing MUST choose relays whose policies satisfy those needs.
|
||||
|
||||
### Event Structure
|
||||
|
||||
```json
|
||||
{
|
||||
"id": "<32-byte lowercase hex>",
|
||||
"pubkey": "<32-byte lowercase hex>",
|
||||
"created_at": "<unix timestamp, seconds>",
|
||||
"kind": 5000,
|
||||
"tags": [
|
||||
["r", "<resource namespace>"],
|
||||
["i", "<object identifier>"],
|
||||
["op", "<mutation operation>"],
|
||||
["e", "<parent mutation event id>"]
|
||||
],
|
||||
"content": "<JSON-encoded application payload>",
|
||||
"sig": "<64-byte lowercase hex>"
|
||||
}
|
||||
```
|
||||
|
||||
The `content` field is a JSON-encoded string. Its structure is defined below.
|
||||
|
||||
---
|
||||
|
||||
## Tags
|
||||
|
||||
| Tag | Required | Description |
|
||||
|-----|----------|-------------|
|
||||
| `r` | Yes | Stable resource namespace for the mutated object type. Reverse-DNS style names are RECOMMENDED, for example `com.example.accounts.user`. |
|
||||
| `i` | Yes | Opaque object identifier, unique within the `r` namespace. Consumers MUST treat this as a string. |
|
||||
| `op` | Yes | Mutation operation. This NIP defines only `upsert` and `delete`. |
|
||||
| `e` | No | Parent mutation event id, if the publisher wants to express ancestry. At most one `e` tag SHOULD be included in this version of the protocol. |
|
||||
| `v` | No | Application payload schema version as a string. RECOMMENDED when the payload format may evolve over time. |
|
||||
|
||||
### Tag Rules
|
||||
|
||||
Publishers:
|
||||
|
||||
- MUST include exactly one `r` tag.
|
||||
- MUST include exactly one `i` tag.
|
||||
- MUST include exactly one `op` tag.
|
||||
- MUST set `op` to either `upsert` or `delete`.
|
||||
- SHOULD include at most one `e` tag.
|
||||
- MAY include one `v` tag.
|
||||
|
||||
Consumers:
|
||||
|
||||
- MUST ignore unknown tags.
|
||||
- MUST NOT assume tag ordering.
|
||||
- MUST treat the `e` tag as an ancestry hint, not as proof of global ordering.
|
||||
|
||||
### Resource Namespaces
|
||||
|
||||
The `r` tag identifies an application-level object type.
|
||||
|
||||
This NIP does not define a global registry of resource namespaces. To reduce collisions, publishers SHOULD use a stable namespace they control, such as reverse-DNS notation.
|
||||
|
||||
Examples:
|
||||
|
||||
- `com.example.accounts.user`
|
||||
- `org.example.inventory.item`
|
||||
- `net.example.billing.invoice`
|
||||
|
||||
Publishers MUST document the payload schema associated with each resource namespace they use.
|
||||
|
||||
---
|
||||
|
||||
## Content Payload
|
||||
|
||||
The `content` field MUST be a JSON-encoded object.
|
||||
|
||||
```json
|
||||
{
|
||||
"value": {},
|
||||
"patch": "merge"
|
||||
}
|
||||
```
|
||||
|
||||
| Field | Required | Description |
|
||||
|-------|----------|-------------|
|
||||
| `value` | Yes | Application-defined mutation payload. For `upsert`, this is the state fragment or full post-mutation state being published. For `delete`, this MAY be an empty object or a small reason object. |
|
||||
| `patch` | No | How `value` should be interpreted. This NIP defines `merge` and `replace`. If omitted, consumers MUST treat it as application-defined. |
|
||||
|
||||
### Payload Rules
|
||||
|
||||
For `op = upsert`:
|
||||
|
||||
- `value` MUST be a JSON object.
|
||||
- Publishers SHOULD publish either:
|
||||
- a partial object intended to be merged, or
|
||||
- a full post-mutation object intended to replace prior state.
|
||||
- If the interpretation is important for interoperability, publishers SHOULD set `patch` to `merge` or `replace`.
|
||||
|
||||
For `op = delete`:
|
||||
|
||||
- `value` MAY be `{}`.
|
||||
- Consumers MUST treat `delete` as an application-level tombstone signal.
|
||||
- This NIP does not define whether deletion means hard delete, soft delete, archival, or hiding. Applications MUST define that separately.
|
||||
|
||||
### Serialization
|
||||
|
||||
All payload values MUST be JSON-serializable.
|
||||
|
||||
The following representations are RECOMMENDED:
|
||||
|
||||
| Type | Representation |
|
||||
|------|----------------|
|
||||
| Timestamp / datetime | ISO 8601 string |
|
||||
| Decimal | String |
|
||||
| Binary | Base64 string |
|
||||
| Null | JSON `null` |
|
||||
|
||||
Publishers MAY define additional type mappings, but those mappings are application-specific and MUST be documented outside this NIP.
|
||||
|
||||
---
|
||||
|
||||
## Ancestry and Replay
|
||||
|
||||
The optional `e` tag allows a publisher to indicate which prior mutation event it considered the parent when creating a new mutation.
|
||||
|
||||
This supports applications that want ancestry hints for:
|
||||
|
||||
- local conflict detection,
|
||||
- replay ordering,
|
||||
- branch inspection,
|
||||
- audit tooling.
|
||||
|
||||
However:
|
||||
|
||||
- the `e` tag does **not** create a global ordering guarantee,
|
||||
- relays are not required to deliver parents before children,
|
||||
- consumers MUST be prepared to receive out-of-order events,
|
||||
- consumers MAY buffer, defer, ignore, or immediately apply parent-missing events according to local policy.
|
||||
|
||||
This NIP does not define a merge event format.
|
||||
|
||||
This NIP does not define conflict resolution. If two valid mutation events for the same `(r, i)` object are concurrent or incompatible, consumers MUST resolve them using application-specific rules.
|
||||
|
||||
---
|
||||
|
||||
## Authorization
|
||||
|
||||
This NIP does not define who is authorized to publish mutation events for a given resource or object.
|
||||
|
||||
Authorization is application-specific.
|
||||
|
||||
Consumers MUST NOT assume that a valid Nostr signature alone authorizes a mutation. Consumers MUST apply their own trust policy, which MAY include:
|
||||
|
||||
- explicit pubkey allowlists,
|
||||
- per-resource ACLs,
|
||||
- external capability documents,
|
||||
- relay-level write restrictions,
|
||||
- application-specific verification.
|
||||
|
||||
This NIP does not define custodial keys, deterministic key derivation, shared cluster secrets, or delegation schemes.
|
||||
|
||||
---
|
||||
|
||||
## Relay Behavior
|
||||
|
||||
A relay implementing only NIP-01 remains compatible with this NIP.
|
||||
|
||||
No new relay messages are required beyond `REQ`, `EVENT`, and `CLOSE`.
|
||||
|
||||
Relays:
|
||||
|
||||
- MAY index the `r` and `i` tags using existing single-letter tag indexing conventions.
|
||||
- MAY apply normal retention, rate-limit, and access-control policies.
|
||||
- MAY reject events that are too large or otherwise violate local policy.
|
||||
- MUST NOT be expected to validate application payload semantics.
|
||||
|
||||
Applications that require stronger guarantees, such as durable retention or strict admission control, MUST obtain those guarantees from relay policy or from a separate protocol profile.
|
||||
|
||||
---
|
||||
|
||||
## Subscription Filters
|
||||
|
||||
This NIP works with ordinary NIP-01 filters.
|
||||
|
||||
### All mutations for one resource
|
||||
|
||||
```json
|
||||
{
|
||||
"kinds": [5000],
|
||||
"#r": ["com.example.accounts.user"]
|
||||
}
|
||||
```
|
||||
|
||||
### Mutation history for one object
|
||||
|
||||
```json
|
||||
{
|
||||
"kinds": [5000],
|
||||
"#r": ["com.example.accounts.user"],
|
||||
"#i": ["550e8400-e29b-41d4-a716-446655440000"]
|
||||
}
|
||||
```
|
||||
|
||||
### Mutations from trusted authors
|
||||
|
||||
```json
|
||||
{
|
||||
"kinds": [5000],
|
||||
"authors": [
|
||||
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
|
||||
"bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
Applications SHOULD prefer narrow subscriptions over broad network-wide firehoses.
|
||||
|
||||
---
|
||||
|
||||
## Examples
|
||||
|
||||
### Upsert with parent
|
||||
|
||||
```json
|
||||
{
|
||||
"id": "1111111111111111111111111111111111111111111111111111111111111111",
|
||||
"pubkey": "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
|
||||
"created_at": 1710500300,
|
||||
"kind": 5000,
|
||||
"tags": [
|
||||
["r", "com.example.accounts.user"],
|
||||
["i", "550e8400-e29b-41d4-a716-446655440000"],
|
||||
["op", "upsert"],
|
||||
["e", "0000000000000000000000000000000000000000000000000000000000000000"],
|
||||
["v", "1"]
|
||||
],
|
||||
"content": "{\"value\":{\"email\":\"jane.doe@newdomain.com\",\"updated_at\":\"2025-03-15T14:35:00Z\"},\"patch\":\"merge\"}",
|
||||
"sig": "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc"
|
||||
}
|
||||
```
|
||||
|
||||
### Delete tombstone
|
||||
|
||||
```json
|
||||
{
|
||||
"id": "2222222222222222222222222222222222222222222222222222222222222222",
|
||||
"pubkey": "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
|
||||
"created_at": 1710500600,
|
||||
"kind": 5000,
|
||||
"tags": [
|
||||
["r", "com.example.accounts.user"],
|
||||
["i", "550e8400-e29b-41d4-a716-446655440000"],
|
||||
["op", "delete"],
|
||||
["e", "1111111111111111111111111111111111111111111111111111111111111111"],
|
||||
["v", "1"]
|
||||
],
|
||||
"content": "{\"value\":{\"reason\":\"user_requested\"}}",
|
||||
"sig": "dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd"
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Security Considerations
|
||||
|
||||
- **Unauthorized writes:** A valid signature proves authorship, not authorization. Consumers MUST enforce their own trust policy.
|
||||
- **Replay:** Old valid events may be redelivered by relays or attackers. Consumers SHOULD deduplicate by event id and apply local replay policy.
|
||||
- **Reordering:** Events may arrive out of order. Consumers MUST NOT treat `created_at` or `e` as a guaranteed total order.
|
||||
- **Conflict flooding:** Multiple valid mutations may target the same object. Consumers SHOULD rate-limit, bound buffering, and define local conflict policy.
|
||||
- **Sensitive data exposure:** Nostr events are typically widely replicable. Publishers SHOULD NOT put secrets or regulated data in mutation payloads unless they provide application-layer encryption.
|
||||
- **Relay retention variance:** Some relays will prune history. Applications that depend on full replay MUST choose relays accordingly or maintain an external archive.
|
||||
|
||||
---
|
||||
|
||||
## Extension Points
|
||||
|
||||
Future drafts or companion NIPs may define:
|
||||
|
||||
- snapshot events for faster bootstrap,
|
||||
- object-head or checkpoint events,
|
||||
- capability or delegation profiles for authorized writers,
|
||||
- standardized conflict-resolution profiles for specific application classes.
|
||||
|
||||
Such extensions SHOULD remain optional and MUST NOT change the meaning of kind `5000` mutation events defined here.
|
||||
|
||||
---
|
||||
|
||||
## References
|
||||
|
||||
- [NIP-01](https://github.com/nostr-protocol/nips/blob/master/01.md) — Basic protocol flow description
|
||||
417
docs/SYNC.md
Normal file
417
docs/SYNC.md
Normal file
@@ -0,0 +1,417 @@
|
||||
# Parrhesia Relay Sync
|
||||
|
||||
## 1. Purpose
|
||||
|
||||
This document defines the Parrhesia proposal for **relay-to-relay event synchronization**.
|
||||
|
||||
It is intentionally transport-focused:
|
||||
|
||||
- manage remote relay peers,
|
||||
- catch up on matching events,
|
||||
- keep a live stream open,
|
||||
- expose health and basic stats.
|
||||
|
||||
It does **not** define application data semantics.
|
||||
|
||||
Parrhesia syncs Nostr events. Callers decide which events matter and how to apply them.
|
||||
|
||||
---
|
||||
|
||||
## 2. Boundary
|
||||
|
||||
### Parrhesia is responsible for
|
||||
|
||||
- storing and validating events,
|
||||
- querying and streaming events,
|
||||
- running outbound sync workers against remote relays,
|
||||
- tracking peer configuration, worker health, and sync counters,
|
||||
- exposing peer management through `Parrhesia.API.Sync`.
|
||||
|
||||
### Parrhesia is not responsible for
|
||||
|
||||
- resource mapping,
|
||||
- trusted node allowlists for an app profile,
|
||||
- mutation payload validation beyond normal event validation,
|
||||
- conflict resolution,
|
||||
- replay winner selection,
|
||||
- database upsert/delete semantics.
|
||||
|
||||
For Tribes, those remain in `TRIBES-NOSTRSYNC` and `AshNostrSync`.
|
||||
|
||||
---
|
||||
|
||||
## 3. Security Foundation
|
||||
|
||||
### Default posture
|
||||
|
||||
The baseline posture for sync traffic is:
|
||||
|
||||
- no access to sync events by default,
|
||||
- no implicit trust from ordinary relay usage,
|
||||
- no reliance on plaintext confidentiality from public relays.
|
||||
|
||||
For the first implementation, Parrhesia should protect sync data primarily with:
|
||||
|
||||
- authenticated server identities,
|
||||
- ACL-gated read and write access,
|
||||
- TLS with certificate pinning for outbound peers.
|
||||
|
||||
### Server identity
|
||||
|
||||
Parrhesia owns a low-level server identity used for relay-to-relay authentication.
|
||||
|
||||
This identity is separate from:
|
||||
|
||||
- TLS endpoint identity,
|
||||
- application event author pubkeys.
|
||||
|
||||
Recommended model:
|
||||
|
||||
- Parrhesia has one local server-auth pubkey,
|
||||
- sync peers authenticate as server-auth pubkeys,
|
||||
- ACL grants are bound to those authenticated server-auth pubkeys,
|
||||
- application-level writer trust remains outside Parrhesia.
|
||||
|
||||
Identity lifecycle:
|
||||
|
||||
1. use configured/imported key if provided,
|
||||
2. otherwise use persisted local identity,
|
||||
3. otherwise generate once during initial startup and persist it.
|
||||
|
||||
Private key export should not be supported.
|
||||
|
||||
### ACLs
|
||||
|
||||
Sync traffic should use a real ACL layer, not moderation allowlists.
|
||||
|
||||
Current implementation note:
|
||||
|
||||
- Parrhesia already has storage-backed moderation state such as `allowed_pubkeys` and `blocked_ips`,
|
||||
- that is not the sync ACL model,
|
||||
- sync protection must be enforced in the active websocket/query/count/negentropy/write path, not inferred from management tables alone.
|
||||
|
||||
Initial ACL model:
|
||||
|
||||
- principal: authenticated pubkey,
|
||||
- capabilities: `sync_read`, `sync_write`,
|
||||
- match: event/filter shape such as `kinds: [5000]` and namespace tags.
|
||||
|
||||
This is enough for now. We do **not** need a separate user ACL model and server ACL model yet.
|
||||
|
||||
A sync peer is simply an authenticated principal with sync capabilities.
|
||||
|
||||
### TLS pinning
|
||||
|
||||
Each outbound sync peer must include pinned TLS material.
|
||||
|
||||
Recommended pin type:
|
||||
|
||||
- SPKI SHA-256 pins
|
||||
|
||||
Multiple pins should be allowed to support certificate rotation.
|
||||
|
||||
---
|
||||
|
||||
## 4. Sync Model
|
||||
|
||||
Each configured sync server represents one outbound worker managed by Parrhesia.
|
||||
|
||||
Implementation note:
|
||||
|
||||
- Khatru-style relay designs benefit from explicit runtime stages,
|
||||
- Parrhesia sync should therefore plug into clear internal phases for connection admission, auth, query/count, subscription, negentropy, publish, and fanout,
|
||||
- this should stay a runtime refactor, not become extra sync semantics.
|
||||
|
||||
Minimum behavior:
|
||||
|
||||
1. connect to the remote relay,
|
||||
2. run an initial catch-up query for the configured filters,
|
||||
3. ingest received events into the local relay through the normal API path,
|
||||
4. switch to a live subscription for the same filters,
|
||||
5. reconnect with backoff when disconnected.
|
||||
|
||||
The worker treats filters as opaque Nostr filters. It does not interpret app payloads.
|
||||
|
||||
### Initial implementation mode
|
||||
|
||||
Initial implementation should use ordinary NIP-01 behavior:
|
||||
|
||||
- catch-up via `REQ`-style query,
|
||||
- live updates via `REQ` subscription.
|
||||
|
||||
This is enough for Tribes and keeps the first version simple.
|
||||
|
||||
### NIP-77
|
||||
|
||||
Parrhesia now has a real reusable relay-side NIP-77 engine:
|
||||
|
||||
- proper `NEG-OPEN` / `NEG-MSG` / `NEG-CLOSE` / `NEG-ERR` framing,
|
||||
- a reusable negentropy codec and reconciliation engine,
|
||||
- bounded local `(created_at, id)` snapshot enumeration for matching filters,
|
||||
- connection/session integration with policy checks and resource limits.
|
||||
|
||||
That means NIP-77 can be used for bandwidth-efficient catch-up between trusted nodes.
|
||||
|
||||
The first sync worker implementation may still default to ordinary NIP-01 catch-up plus live replay, because that path is operationally simpler and already matches the current Tribes sync profile. `:negentropy` can now be introduced as an optimization mode rather than a future prerequisite.
|
||||
|
||||
---
|
||||
|
||||
## 5. API Surface
|
||||
|
||||
Primary control plane:
|
||||
|
||||
- `Parrhesia.API.Identity.get/1`
|
||||
- `Parrhesia.API.Identity.ensure/1`
|
||||
- `Parrhesia.API.Identity.import/2`
|
||||
- `Parrhesia.API.Identity.rotate/1`
|
||||
- `Parrhesia.API.ACL.grant/2`
|
||||
- `Parrhesia.API.ACL.revoke/2`
|
||||
- `Parrhesia.API.ACL.list/1`
|
||||
- `Parrhesia.API.Sync.put_server/2`
|
||||
- `Parrhesia.API.Sync.remove_server/2`
|
||||
- `Parrhesia.API.Sync.get_server/2`
|
||||
- `Parrhesia.API.Sync.list_servers/1`
|
||||
- `Parrhesia.API.Sync.start_server/2`
|
||||
- `Parrhesia.API.Sync.stop_server/2`
|
||||
- `Parrhesia.API.Sync.sync_now/2`
|
||||
- `Parrhesia.API.Sync.server_stats/2`
|
||||
- `Parrhesia.API.Sync.sync_stats/1`
|
||||
- `Parrhesia.API.Sync.sync_health/1`
|
||||
|
||||
These APIs are in-process. HTTP management may expose them through `Parrhesia.API.Admin` or direct routing to `Parrhesia.API.Sync`.
|
||||
|
||||
---
|
||||
|
||||
## 6. Server Specification
|
||||
|
||||
`put_server/2` is an upsert.
|
||||
|
||||
Suggested server shape:
|
||||
|
||||
```elixir
|
||||
%{
|
||||
id: "tribes-primary",
|
||||
url: "wss://relay-a.example/relay",
|
||||
enabled?: true,
|
||||
auth_pubkey: "<remote-server-auth-pubkey>",
|
||||
mode: :req_stream,
|
||||
filters: [
|
||||
%{
|
||||
"kinds" => [5000],
|
||||
"authors" => ["<trusted-node-pubkey-a>", "<trusted-node-pubkey-b>"],
|
||||
"#r" => ["tribes.accounts.user", "tribes.chat.tribe"]
|
||||
}
|
||||
],
|
||||
overlap_window_seconds: 300,
|
||||
auth: %{
|
||||
type: :nip42
|
||||
},
|
||||
tls: %{
|
||||
mode: :required,
|
||||
hostname: "relay-a.example",
|
||||
pins: [
|
||||
%{type: :spki_sha256, value: "<pin-a>"},
|
||||
%{type: :spki_sha256, value: "<pin-b>"}
|
||||
]
|
||||
},
|
||||
metadata: %{}
|
||||
}
|
||||
```
|
||||
|
||||
Required fields:
|
||||
|
||||
- `id`
|
||||
- `url`
|
||||
- `auth_pubkey`
|
||||
- `filters`
|
||||
- `tls`
|
||||
|
||||
Recommended fields:
|
||||
|
||||
- `enabled?`
|
||||
- `mode`
|
||||
- `overlap_window_seconds`
|
||||
- `auth`
|
||||
- `metadata`
|
||||
|
||||
Rules:
|
||||
|
||||
- `id` must be stable and unique locally.
|
||||
- `url` is the remote relay websocket URL.
|
||||
- `auth_pubkey` is the expected remote server-auth pubkey.
|
||||
- `filters` must be valid NIP-01 filters.
|
||||
- filters are owned by the caller; Parrhesia only validates filter shape.
|
||||
- `mode` defaults to `:req_stream`.
|
||||
- `tls.mode` defaults to `:required`.
|
||||
- `tls.pins` must be non-empty for synced peers.
|
||||
|
||||
---
|
||||
|
||||
## 7. Runtime State
|
||||
|
||||
Each server should have both configuration and runtime status.
|
||||
|
||||
Suggested runtime fields:
|
||||
|
||||
```elixir
|
||||
%{
|
||||
server_id: "tribes-primary",
|
||||
state: :running,
|
||||
connected?: true,
|
||||
last_connected_at: ~U[2026-03-16 10:00:00Z],
|
||||
last_disconnected_at: nil,
|
||||
last_sync_started_at: ~U[2026-03-16 10:00:00Z],
|
||||
last_sync_completed_at: ~U[2026-03-16 10:00:02Z],
|
||||
last_event_received_at: ~U[2026-03-16 10:12:45Z],
|
||||
last_eose_at: ~U[2026-03-16 10:00:02Z],
|
||||
reconnect_attempts: 0,
|
||||
last_error: nil
|
||||
}
|
||||
```
|
||||
|
||||
Parrhesia should keep this state generic. It is about relay sync health, not app state convergence.
|
||||
|
||||
---
|
||||
|
||||
## 8. Stats and Health
|
||||
|
||||
### Per-server stats
|
||||
|
||||
`server_stats/2` should return basic counters such as:
|
||||
|
||||
- `events_received`
|
||||
- `events_accepted`
|
||||
- `events_duplicate`
|
||||
- `events_rejected`
|
||||
- `query_runs`
|
||||
- `subscription_restarts`
|
||||
- `reconnects`
|
||||
- `last_remote_eose_at`
|
||||
- `last_error`
|
||||
|
||||
### Aggregate sync stats
|
||||
|
||||
`sync_stats/1` should summarize:
|
||||
|
||||
- total configured servers,
|
||||
- enabled servers,
|
||||
- running servers,
|
||||
- connected servers,
|
||||
- aggregate event counters,
|
||||
- aggregate reconnect count.
|
||||
|
||||
### Health
|
||||
|
||||
`sync_health/1` should be operator-oriented, for example:
|
||||
|
||||
```elixir
|
||||
%{
|
||||
"status" => "degraded",
|
||||
"servers_total" => 3,
|
||||
"servers_connected" => 2,
|
||||
"servers_failing" => [
|
||||
%{"id" => "tribes-secondary", "reason" => "connection_refused"}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
This is intentionally simple. It should answer “is sync working?” without pretending to prove application convergence.
|
||||
|
||||
---
|
||||
|
||||
## 9. Event Ingest Path
|
||||
|
||||
Events received from a remote sync worker should enter Parrhesia through the same ingest path as any other accepted event.
|
||||
|
||||
That means:
|
||||
|
||||
1. validate the event,
|
||||
2. run normal write policy,
|
||||
3. persist or reject,
|
||||
4. fan out locally,
|
||||
5. rely on duplicate-event behavior for idempotency.
|
||||
|
||||
This avoids a second ingest path with divergent behavior.
|
||||
|
||||
Before normal event acceptance, the sync worker should enforce:
|
||||
|
||||
1. pinned TLS validation for the remote endpoint,
|
||||
2. remote server-auth identity match,
|
||||
3. local ACL grant permitting the peer to perform sync reads and/or writes.
|
||||
|
||||
The sync worker may attach request-context metadata such as:
|
||||
|
||||
```elixir
|
||||
%Parrhesia.API.RequestContext{
|
||||
caller: :sync,
|
||||
peer_id: "tribes-primary",
|
||||
metadata: %{sync_server_id: "tribes-primary"}
|
||||
}
|
||||
```
|
||||
|
||||
Recommended additional context when available:
|
||||
|
||||
- `remote_ip`
|
||||
- `subscription_id`
|
||||
|
||||
This context is for telemetry, policy, and audit only. It must not become app sync semantics.
|
||||
|
||||
---
|
||||
|
||||
## 10. Persistence
|
||||
|
||||
Parrhesia should persist enough sync control-plane state to survive restart:
|
||||
|
||||
- local server identity reference,
|
||||
- configured ACL rules for sync principals,
|
||||
- configured servers,
|
||||
- whether a server is enabled,
|
||||
- optional catch-up cursor or watermark per server,
|
||||
- basic last-error and last-success markers.
|
||||
|
||||
Parrhesia does not need to persist application replay heads or winner state. That remains in the embedding application.
|
||||
|
||||
---
|
||||
|
||||
## 11. Relationship to Current Features
|
||||
|
||||
### BEAM cluster fanout
|
||||
|
||||
`Parrhesia.Fanout.MultiNode` is a separate feature.
|
||||
|
||||
It provides best-effort live fanout between connected BEAM nodes. It is not remote relay sync and is not a substitute for `Parrhesia.API.Sync`.
|
||||
|
||||
### Management stats
|
||||
|
||||
Current admin `stats` is relay-global and minimal.
|
||||
|
||||
Sync adds a new dimension:
|
||||
|
||||
- peer config,
|
||||
- worker state,
|
||||
- per-peer counters,
|
||||
- sync health summary.
|
||||
|
||||
That should be exposed without coupling it to app-specific sync semantics.
|
||||
|
||||
---
|
||||
|
||||
## 12. Tribes Usage
|
||||
|
||||
For Tribes, `AshNostrSync` should be able to:
|
||||
|
||||
1. rely on Parrhesia’s local server identity,
|
||||
2. register one or more remote relays with `Parrhesia.API.Sync.put_server/2`,
|
||||
3. grant sync ACLs for trusted server-auth pubkeys,
|
||||
4. provide narrow Nostr filters for `kind: 5000`,
|
||||
5. observe sync health and counters,
|
||||
6. consume events via the normal local Parrhesia ingest/query/stream surface.
|
||||
|
||||
Tribes should not need Parrhesia to know:
|
||||
|
||||
- what a resource namespace means,
|
||||
- which node pubkeys are trusted for Tribes,
|
||||
- how to resolve conflicts,
|
||||
- how to apply an upsert or delete.
|
||||
|
||||
That is the key boundary.
|
||||
BIN
docs/logo.afdesign
Normal file
BIN
docs/logo.afdesign
Normal file
Binary file not shown.
1
docs/logo.svg
Normal file
1
docs/logo.svg
Normal file
File diff suppressed because one or more lines are too long
|
After Width: | Height: | Size: 37 KiB |
279
docs/slop/HARDEN.md
Normal file
279
docs/slop/HARDEN.md
Normal file
@@ -0,0 +1,279 @@
|
||||
# Hardening Review: Parrhesia Nostr Relay
|
||||
|
||||
You are a security engineer specialising in real-time WebSocket servers, Erlang/OTP systems, and protocol-level abuse. You are reviewing **Parrhesia**, a Nostr relay (NIP-01 compliant) written in Elixir, for hardening opportunities — with a primary focus on **denial-of-service resilience** and a secondary focus on the full attack surface.
|
||||
|
||||
Produce a prioritised list of **specific, actionable recommendations** with rationale. For each recommendation, state:
|
||||
1. The attack or failure mode it mitigates
|
||||
2. Suggested implementation (config change, code change, or architectural change)
|
||||
3. Severity estimate (critical / high / medium / low)
|
||||
|
||||
---
|
||||
|
||||
## 1. Architecture Overview
|
||||
|
||||
| Component | Technology | Notes |
|
||||
|---|---|---|
|
||||
| Runtime | Elixir/OTP 27, BEAM VM | Each WS connection is a separate process |
|
||||
| HTTP server | Bandit (pure Elixir) | HTTP/1.1 only, no HTTP/2 |
|
||||
| WebSocket | `websock_adapter` | Text frames only; binary rejected |
|
||||
| Database | PostgreSQL via Ecto | Range-partitioned `events` table by `created_at` |
|
||||
| Caching | ETS | Config snapshot + moderation ban/allow lists |
|
||||
| Multi-node | Erlang `:pg` groups | Fanout across BEAM cluster nodes |
|
||||
| Metrics | Prometheus (Telemetry) | `/metrics` endpoint |
|
||||
| TLS termination | **Out of scope** — handled by reverse proxy (nginx/Caddy) |
|
||||
|
||||
### Supervision Tree
|
||||
|
||||
```
|
||||
Parrhesia.Supervisor
|
||||
├─ Telemetry (Prometheus exporter)
|
||||
├─ Config (ETS snapshot of runtime config)
|
||||
├─ Storage.Supervisor (Ecto repo + moderation cache)
|
||||
├─ Subscriptions.Supervisor (ETS subscription index for fanout)
|
||||
├─ Auth.Supervisor (NIP-42 challenge GenServer)
|
||||
├─ Policy.Supervisor (policy enforcement)
|
||||
├─ Web.Endpoint (Bandit listener)
|
||||
└─ Tasks.Supervisor (ExpirationWorker, 30s GC loop)
|
||||
```
|
||||
|
||||
### Data Flow
|
||||
|
||||
1. Client connects via WebSocket at `/relay`
|
||||
2. NIP-42 AUTH challenge issued immediately (16-byte random, base64url)
|
||||
3. Inbound text frames are: size-checked → JSON-decoded → rate-limited → protocol-dispatched
|
||||
4. EVENT messages: validated → policy-checked → stored in Postgres → ACK → async fanout to matching subscriptions
|
||||
5. REQ messages: filters validated → Postgres query → results streamed → EOSE → live subscription registered
|
||||
6. Fanout: post-ingest, subscription index (ETS) is traversed; matching connection processes receive events via `send/2`
|
||||
|
||||
---
|
||||
|
||||
## 2. Current Defences Inventory
|
||||
|
||||
### Connection Layer
|
||||
|
||||
| Defence | Value | Enforcement Point |
|
||||
|---|---|---|
|
||||
| Max WebSocket frame size | **1,048,576 bytes (1 MiB)** | Checked in `handle_in` *before* JSON decode, and at Bandit upgrade (`max_frame_size`) |
|
||||
| WebSocket upgrade timeout | **60,000 ms** | Passed to `WebSockAdapter.upgrade` |
|
||||
| Binary frame rejection | Returns NOTICE, connection stays open | `handle_in` opcode check |
|
||||
| Outbound queue limit | **256 events** per connection | Overflow strategy: **`:close`** (WS 1008) |
|
||||
| Outbound drain batch | **64 events** | Async drain via `send(self(), :drain_outbound_queue)` |
|
||||
| Outbound pressure telemetry | Threshold at **75%** of queue | Emits telemetry event only, no enforcement |
|
||||
| IP blocking | Via moderation cache (ETS) | Management API can add blocked IPs |
|
||||
|
||||
### Protocol Layer
|
||||
|
||||
| Defence | Value | Notes |
|
||||
|---|---|---|
|
||||
| Max event JSON size | **262,144 bytes (256 KiB)** | Re-serialises decoded event and checks byte size |
|
||||
| Max filters per REQ | **16** | Rejected at filter validation |
|
||||
| Max filter `limit` | **500** | `min(client_limit, 500)` applied at query time |
|
||||
| Max subscriptions per connection | **32** | Existing sub IDs updated without counting toward limit |
|
||||
| Subscription ID max length | **64 characters** | Must be non-empty |
|
||||
| Event kind range | **0–65,535** | Integer range check |
|
||||
| Max future event skew | **900 seconds (15 min)** | Events with `created_at > now + 900` rejected |
|
||||
| Unknown filter keys | **Rejected** | Allowed: `ids`, `authors`, `kinds`, `since`, `until`, `limit`, `search`, `#<letter>` |
|
||||
|
||||
### Event Validation Pipeline
|
||||
|
||||
Strict order:
|
||||
1. Required fields present (`id`, `pubkey`, `created_at`, `kind`, `tags`, `content`, `sig`)
|
||||
2. `id` — 64-char lowercase hex
|
||||
3. `pubkey` — 64-char lowercase hex
|
||||
4. `created_at` — non-negative integer, max 900s future skew
|
||||
5. `kind` — integer in [0, 65535]
|
||||
6. `tags` — list of non-empty string arrays (**no length limit on tags array or individual tag values**)
|
||||
7. `content` — any binary string
|
||||
8. `sig` — 128-char lowercase hex
|
||||
9. ID hash recomputation and comparison
|
||||
10. Schnorr signature verification via `lib_secp256k1` (gated by `verify_event_signatures` flag, default `true`)
|
||||
|
||||
### Rate Limiting
|
||||
|
||||
| Defence | Value | Notes |
|
||||
|---|---|---|
|
||||
| Event ingest rate | **120 events per window** | Per-connection sliding window |
|
||||
| Ingest window | **1 second** | Resets on first event after expiry |
|
||||
| No per-IP connection rate limiting | — | Must be handled at reverse proxy |
|
||||
| No global connection count ceiling | — | BEAM handles thousands but no configured limit |
|
||||
|
||||
### Authentication (NIP-42)
|
||||
|
||||
- Challenge issued to **all** connections on connect (optional escalation model)
|
||||
- AUTH event must: pass full NIP-01 validation, be kind `22242`, contain matching `challenge` tag, contain matching `relay` tag
|
||||
- `created_at` freshness: must be `>= now - 600s` (10 min)
|
||||
- On success: pubkey added to `authenticated_pubkeys` MapSet; challenge rotated
|
||||
- Supports multiple authenticated pubkeys per connection
|
||||
|
||||
### Authentication (NIP-98 HTTP)
|
||||
|
||||
- Management endpoint (`POST /management`) requires NIP-98 header
|
||||
- Auth event must be kind `27235`, `created_at` within **60 seconds** of now
|
||||
- Must include `method` and `u` tags matching request exactly
|
||||
|
||||
### Access Control
|
||||
|
||||
- `auth_required_for_writes`: default **false** (configurable)
|
||||
- `auth_required_for_reads`: default **false** (configurable)
|
||||
- Protected events (NIP-70, tagged `["-"]`): require auth + pubkey match
|
||||
- Giftwrap (kind 1059): unauthenticated REQ → CLOSED; authenticated REQ must include `#p` containing own pubkey
|
||||
|
||||
### Database
|
||||
|
||||
- All queries use Ecto parameterised bindings — no raw string interpolation
|
||||
- LIKE search patterns escaped (`%`, `_`, `\` characters)
|
||||
- Deletion enforces `pubkey == deleter_pubkey` in WHERE clause
|
||||
- Soft-delete via `deleted_at`; hard-delete only via vanish (NIP-62) or expiration purge
|
||||
- DB pool: **32 connections** (prod), queue target 1s, interval 5s
|
||||
|
||||
### Moderation
|
||||
|
||||
- Banned pubkeys, allowed pubkeys, banned events, blocked IPs stored in ETS cache
|
||||
- Management API (NIP-98 authed) for CRUD on moderation lists
|
||||
- Cache invalidated atomically on writes
|
||||
|
||||
---
|
||||
|
||||
## 3. Known Gaps and Areas of Concern
|
||||
|
||||
The following are areas where the current implementation may be vulnerable or where defences could be strengthened. **Please evaluate each and provide recommendations.**
|
||||
|
||||
### 3.1 Connection Exhaustion
|
||||
|
||||
- There is **no global limit on concurrent WebSocket connections**. Each connection is an Elixir process (~2–3 KiB base), but subscriptions, auth state, and outbound queues add per-connection memory.
|
||||
- There is **no per-IP connection rate limiting at the application layer**. IP blocking exists but is reactive (management API), not automatic.
|
||||
- There is **no idle timeout** after the WebSocket upgrade completes. A connection can remain open indefinitely without sending or receiving messages.
|
||||
|
||||
**Questions:**
|
||||
- What connection limits should be configured at the Bandit/BEAM level?
|
||||
- Should an idle timeout be implemented? If so, what value balances real-time subscription use against resource waste?
|
||||
- Should per-IP connection counting be implemented at the application layer, or is this strictly a reverse proxy concern?
|
||||
|
||||
### 3.2 Subscription Abuse
|
||||
|
||||
- A single connection can hold **32 subscriptions**, each with up to **16 filters**. That's 512 filter predicates per connection being evaluated on every fanout.
|
||||
- Filter arrays (`ids`, `authors`, `kinds`, tag values) have **no element count limits**. A filter could contain thousands of author pubkeys.
|
||||
- There is no cost accounting for "expensive" subscriptions (e.g., wide open filters matching all events).
|
||||
|
||||
**Questions:**
|
||||
- Should filter array element counts be bounded? If so, what limits per field?
|
||||
- Should there be a per-connection "filter complexity" budget?
|
||||
- How expensive is the current ETS subscription index traversal at scale (e.g., 10K concurrent connections × 32 subs each)?
|
||||
|
||||
### 3.3 Tag Array Size
|
||||
|
||||
- Event validation does **not limit the number of tags** or the length of individual tag values beyond the 256 KiB total event size cap.
|
||||
- A maximally-tagged event could contain thousands of short tags, causing amplification in `event_tags` table inserts (one row per tag).
|
||||
|
||||
**Questions:**
|
||||
- Should a max tag count be enforced? What is a reasonable limit?
|
||||
- What is the insert cost of storing e.g. 1,000 tags per event? Could this be used for write amplification?
|
||||
- Should individual tag value lengths be bounded?
|
||||
|
||||
### 3.4 AUTH Timing
|
||||
|
||||
- AUTH event `created_at` freshness only checks the **lower bound** (`>= now - 600`). An AUTH event with `created_at` far in the future passes validation.
|
||||
- Regular events have a future skew cap of 900s, but AUTH events do not.
|
||||
|
||||
**Questions:**
|
||||
- Should AUTH events also enforce a future `created_at` bound?
|
||||
- Is a 600-second AUTH window too wide? Could it be reduced?
|
||||
|
||||
### 3.5 Outbound Amplification
|
||||
|
||||
- A single inbound EVENT can fan out to an unbounded number of matching subscriptions across all connections.
|
||||
- The outbound queue (256 events, `:close` strategy) protects individual connections but does not limit total fanout work per event.
|
||||
- The fanout traverses the ETS subscription index synchronously in the ingesting connection's process.
|
||||
|
||||
**Questions:**
|
||||
- Should fanout be bounded per event (e.g., max N recipients before yielding)?
|
||||
- Should fanout happen in a separate process pool rather than inline?
|
||||
- Is the `:close` overflow strategy optimal, or would `:drop_oldest` be better for well-behaved clients with temporary backpressure?
|
||||
|
||||
### 3.6 Query Amplification
|
||||
|
||||
- A single REQ with 16 filters, each with `limit: 500`, could trigger 16 separate Postgres queries returning up to 8,000 events total.
|
||||
- COUNT requests also execute per-filter queries (now deduplicated via UNION ALL).
|
||||
- `search` filters use `ILIKE %pattern%` which cannot use B-tree indexes.
|
||||
|
||||
**Questions:**
|
||||
- Should there be a per-REQ total result cap (across all filters)?
|
||||
- Should `search` queries be rate-limited or require a minimum pattern length?
|
||||
- Should COUNT be disabled or rate-limited separately?
|
||||
- Are there missing indexes that would help common query patterns?
|
||||
|
||||
### 3.7 Multi-Node Trust
|
||||
|
||||
- Events received via `:remote_fanout_event` from peer BEAM nodes **skip all validation and policy checks** and go directly to the subscription index.
|
||||
- This assumes all cluster peers are trusted.
|
||||
|
||||
**Questions:**
|
||||
- If cluster membership is dynamic or spans trust boundaries, should remote events be re-validated?
|
||||
- Should there be a shared secret or HMAC on inter-node messages?
|
||||
|
||||
### 3.8 Metrics Endpoint
|
||||
|
||||
- `/metrics` (Prometheus) is **unauthenticated**.
|
||||
- Exposes internal telemetry: connection counts, event throughput, queue depths, database timing.
|
||||
|
||||
**Questions:**
|
||||
- Should `/metrics` require authentication or be restricted to internal networks?
|
||||
- Could metrics data be used to profile the relay's capacity and craft targeted attacks?
|
||||
|
||||
### 3.9 Negentropy Stub
|
||||
|
||||
- NEG-OPEN, NEG-MSG, NEG-CLOSE messages are accepted and acknowledged but the reconciliation logic is a stub (cursor counter only).
|
||||
- Are there resource implications of accepting negentropy sessions without real implementation?
|
||||
|
||||
### 3.10 Event Re-Serialisation Cost
|
||||
|
||||
- To enforce the 256 KiB event size limit, the relay calls `JSON.encode!(event)` on the already-decoded event map. This re-serialisation happens on every inbound EVENT.
|
||||
- Could this be replaced with a byte-length check on the raw frame payload (already available)?
|
||||
|
||||
---
|
||||
|
||||
## 4. Specific Review Requests
|
||||
|
||||
Beyond the gaps above, please also evaluate:
|
||||
|
||||
1. **Bandit configuration**: Are there Bandit-level options (max connections, header limits, request timeouts, keepalive settings) that should be tuned for a public-facing relay?
|
||||
|
||||
2. **BEAM VM flags**: Are there any Erlang VM flags (`+P`, `+Q`, `+S`, memory limits) that should be set for production hardening?
|
||||
|
||||
3. **Ecto pool exhaustion**: With 32 DB connections and potentially thousands of concurrent REQ queries, what happens under pool exhaustion? Is the 1s queue target + 5s interval appropriate?
|
||||
|
||||
4. **ETS table sizing**: The subscription index and moderation cache use ETS. Are there memory limits or table options (`read_concurrency`, `write_concurrency`, `compressed`) that should be tuned?
|
||||
|
||||
5. **Process mailbox overflow**: Connection processes receive events via `send/2` during fanout. If a process is slow to consume, its mailbox grows. The outbound queue mechanism is application-level — but is the BEAM-level mailbox also protected?
|
||||
|
||||
6. **Reverse proxy recommendations**: What nginx/Caddy configuration should complement the relay's defences? (Rate limiting, connection limits, WebSocket-specific settings, request body size.)
|
||||
|
||||
7. **Monitoring and alerting**: What telemetry signals should trigger alerts? (Connection count spikes, queue overflow rates, DB pool saturation, error rates.)
|
||||
|
||||
---
|
||||
|
||||
## 5. Out of Scope
|
||||
|
||||
The following are **not** in scope for this review:
|
||||
- TLS configuration (handled by reverse proxy)
|
||||
- DNS and network-level DDoS mitigation
|
||||
- Operating system hardening
|
||||
- Key management for the relay identity
|
||||
- Client-side security
|
||||
- Nostr protocol design flaws (we implement the spec as-is)
|
||||
|
||||
---
|
||||
|
||||
## 6. Response Format
|
||||
|
||||
For each recommendation, use this format:
|
||||
|
||||
### [Severity] Title
|
||||
|
||||
**Attack/failure mode:** What goes wrong without this mitigation.
|
||||
|
||||
**Current state:** What exists today (or doesn't).
|
||||
|
||||
**Recommendation:** Specific change — config value, code change, or architectural decision.
|
||||
|
||||
**Trade-offs:** Any impact on legitimate users or operational complexity.
|
||||
424
docs/slop/LOCAL_API.md
Normal file
424
docs/slop/LOCAL_API.md
Normal file
@@ -0,0 +1,424 @@
|
||||
# Parrhesia Shared API
|
||||
|
||||
## 1. Goal
|
||||
|
||||
Expose a stable in-process API that:
|
||||
|
||||
- is used by WebSocket, HTTP management, local callers, and sync workers,
|
||||
- keeps protocol and storage behavior in one place,
|
||||
- stays neutral about application-level replication semantics.
|
||||
|
||||
This document defines the Parrhesia contract. It does **not** define Tribes or Ash sync behavior.
|
||||
|
||||
---
|
||||
|
||||
## 2. Scope
|
||||
|
||||
### In scope
|
||||
|
||||
- event ingest/query/count parity with WebSocket behavior,
|
||||
- local subscription APIs,
|
||||
- NIP-98 validation helpers,
|
||||
- management/admin helpers,
|
||||
- remote relay sync worker control and health reporting.
|
||||
|
||||
### Out of scope
|
||||
|
||||
- resource registration,
|
||||
- trusted app writers,
|
||||
- mutation payload semantics,
|
||||
- conflict resolution,
|
||||
- replay winner selection,
|
||||
- Ash action mapping.
|
||||
|
||||
Those belong in app profiles such as `TRIBES-NOSTRSYNC`, not in Parrhesia.
|
||||
|
||||
---
|
||||
|
||||
## 3. Layering
|
||||
|
||||
```text
|
||||
Transport / embedding / background workers
|
||||
- Parrhesia.Web.Connection
|
||||
- Parrhesia.Web.Management
|
||||
- Parrhesia.Local.*
|
||||
- Parrhesia.Sync.*
|
||||
|
||||
Shared API
|
||||
- Parrhesia.API.Auth
|
||||
- Parrhesia.API.Events
|
||||
- Parrhesia.API.Stream
|
||||
- Parrhesia.API.Admin
|
||||
- Parrhesia.API.Identity
|
||||
- Parrhesia.API.ACL
|
||||
- Parrhesia.API.Sync
|
||||
|
||||
Runtime internals
|
||||
- Parrhesia.Policy.EventPolicy
|
||||
- Parrhesia.Storage.*
|
||||
- Parrhesia.Groups.Flow
|
||||
- Parrhesia.Subscriptions.Index
|
||||
- Parrhesia.Fanout.MultiNode
|
||||
- Parrhesia.Telemetry
|
||||
```
|
||||
|
||||
Rule: transport framing stays at the edge. Business decisions happen in `Parrhesia.API.*`.
|
||||
|
||||
Implementation note:
|
||||
|
||||
- the runtime beneath `Parrhesia.API.*` should expose clearer internal policy stages than it does today,
|
||||
- at minimum: connection/auth, publish, query/count, stream subscription, negentropy, response shaping, and broadcast/fanout,
|
||||
- these are internal runtime seams, not additional public APIs.
|
||||
|
||||
---
|
||||
|
||||
## 4. Core Context
|
||||
|
||||
```elixir
|
||||
defmodule Parrhesia.API.RequestContext do
|
||||
defstruct authenticated_pubkeys: MapSet.new(),
|
||||
actor: nil,
|
||||
caller: :local,
|
||||
remote_ip: nil,
|
||||
subscription_id: nil,
|
||||
peer_id: nil,
|
||||
metadata: %{}
|
||||
end
|
||||
```
|
||||
|
||||
`caller` is for telemetry and policy parity, for example `:websocket`, `:http`, `:local`, or `:sync`.
|
||||
|
||||
Recommended usage:
|
||||
|
||||
- `remote_ip` for connection-level policy and audit,
|
||||
- `subscription_id` for query/stream/negentropy context,
|
||||
- `peer_id` for trusted sync peer identity when applicable,
|
||||
- `metadata` for transport-specific details that should not become API fields.
|
||||
|
||||
---
|
||||
|
||||
## 5. Public Modules
|
||||
|
||||
### 5.1 `Parrhesia.API.Auth`
|
||||
|
||||
Purpose:
|
||||
|
||||
- event validation helpers,
|
||||
- NIP-98 verification,
|
||||
- optional embedding account resolution.
|
||||
|
||||
```elixir
|
||||
@spec validate_event(map()) :: :ok | {:error, term()}
|
||||
@spec compute_event_id(map()) :: String.t()
|
||||
|
||||
@spec validate_nip98(String.t() | nil, String.t(), String.t()) ::
|
||||
{:ok, Parrhesia.API.Auth.Context.t()} | {:error, term()}
|
||||
|
||||
@spec validate_nip98(String.t() | nil, String.t(), String.t(), keyword()) ::
|
||||
{:ok, Parrhesia.API.Auth.Context.t()} | {:error, term()}
|
||||
```
|
||||
|
||||
### 5.2 `Parrhesia.API.Events`
|
||||
|
||||
Purpose:
|
||||
|
||||
- canonical ingest/query/count path used by WS, HTTP, local callers, and sync workers.
|
||||
|
||||
```elixir
|
||||
@spec publish(map(), keyword()) ::
|
||||
{:ok, Parrhesia.API.Events.PublishResult.t()} | {:error, term()}
|
||||
|
||||
@spec query([map()], keyword()) ::
|
||||
{:ok, [map()]} | {:error, term()}
|
||||
|
||||
@spec count([map()], keyword()) ::
|
||||
{:ok, non_neg_integer() | map()} | {:error, term()}
|
||||
```
|
||||
|
||||
Required options:
|
||||
|
||||
- `:context` - `%Parrhesia.API.RequestContext{}`
|
||||
|
||||
`publish/2` must preserve current `EVENT` semantics:
|
||||
|
||||
1. size checks,
|
||||
2. `Protocol.validate_event/1`,
|
||||
3. `EventPolicy.authorize_write/2`,
|
||||
4. group handling,
|
||||
5. persistence or control-event path,
|
||||
6. local plus multi-node fanout,
|
||||
7. telemetry.
|
||||
|
||||
Return shape mirrors `OK`:
|
||||
|
||||
```elixir
|
||||
{:ok, %PublishResult{event_id: id, accepted: true, message: "ok: event stored"}}
|
||||
{:ok, %PublishResult{event_id: id, accepted: false, message: "blocked: ..."}}
|
||||
```
|
||||
|
||||
`query/2` and `count/2` must preserve current `REQ` and `COUNT` behavior, including giftwrap restrictions and server-side filter validation.
|
||||
|
||||
### 5.3 `Parrhesia.API.Stream`
|
||||
|
||||
Purpose:
|
||||
|
||||
- in-process subscription surface with the same semantics as a WebSocket `REQ`.
|
||||
|
||||
This is **required** for embedding and sync consumers.
|
||||
|
||||
```elixir
|
||||
@spec subscribe(pid(), String.t(), [map()], keyword()) ::
|
||||
{:ok, reference()} | {:error, term()}
|
||||
|
||||
@spec unsubscribe(reference()) :: :ok
|
||||
```
|
||||
|
||||
Required options:
|
||||
|
||||
- `:context` - `%Parrhesia.API.RequestContext{}`
|
||||
|
||||
Subscriber contract:
|
||||
|
||||
```elixir
|
||||
{:parrhesia, :event, ref, subscription_id, event}
|
||||
{:parrhesia, :eose, ref, subscription_id}
|
||||
{:parrhesia, :closed, ref, subscription_id, reason}
|
||||
```
|
||||
|
||||
`subscribe/4` must:
|
||||
|
||||
1. validate filters,
|
||||
2. apply read policy,
|
||||
3. emit initial catch-up events in the same order as `REQ`,
|
||||
4. emit exactly one `:eose`,
|
||||
5. register for live fanout until `unsubscribe/1`.
|
||||
|
||||
This module does **not** know why a caller wants the stream.
|
||||
|
||||
### 5.4 `Parrhesia.API.Admin`
|
||||
|
||||
Purpose:
|
||||
|
||||
- stable in-process facade for management operations already exposed over HTTP.
|
||||
|
||||
```elixir
|
||||
@spec execute(String.t() | atom(), map(), keyword()) :: {:ok, map()} | {:error, term()}
|
||||
@spec stats(keyword()) :: {:ok, map()} | {:error, term()}
|
||||
@spec health(keyword()) :: {:ok, map()} | {:error, term()}
|
||||
@spec list_audit_logs(keyword()) :: {:ok, [map()]} | {:error, term()}
|
||||
```
|
||||
|
||||
Baseline methods:
|
||||
|
||||
- `ping`
|
||||
- `stats`
|
||||
- `health`
|
||||
- moderation methods already supported by the storage admin adapter
|
||||
|
||||
`stats/1` is relay-level and cheap. `health/1` is liveness/readiness-oriented and may include worker state.
|
||||
|
||||
`API.Admin` is the operator-facing umbrella for management. It may delegate domain-specific work to `API.Identity`, `API.ACL`, and `API.Sync`.
|
||||
|
||||
### 5.5 `Parrhesia.API.Identity`
|
||||
|
||||
Purpose:
|
||||
|
||||
- manage Parrhesia-owned server identity,
|
||||
- expose public identity metadata,
|
||||
- support explicit import and rotation,
|
||||
- keep private key material internal.
|
||||
|
||||
Parrhesia owns a low-level server identity used for relay-to-relay auth and other transport-local security features.
|
||||
|
||||
```elixir
|
||||
@spec get(keyword()) :: {:ok, map()} | {:error, term()}
|
||||
@spec ensure(keyword()) :: {:ok, map()} | {:error, term()}
|
||||
@spec import(map(), keyword()) :: {:ok, map()} | {:error, term()}
|
||||
@spec rotate(keyword()) :: {:ok, map()} | {:error, term()}
|
||||
@spec sign_event(map(), keyword()) :: {:ok, map()} | {:error, term()}
|
||||
```
|
||||
|
||||
Rules:
|
||||
|
||||
- private key material must never be returned by API,
|
||||
- production deployments should be able to import a configured key,
|
||||
- local/dev deployments may generate on first init if none exists,
|
||||
- identity creation should be eager and deterministic, not lazy on first sync use.
|
||||
|
||||
Recommended boot order:
|
||||
|
||||
1. configured/imported key,
|
||||
2. persisted local identity,
|
||||
3. generate once and persist.
|
||||
|
||||
### 5.6 `Parrhesia.API.ACL`
|
||||
|
||||
Purpose:
|
||||
|
||||
- enforce event/filter ACLs for authenticated principals,
|
||||
- support default-deny sync visibility,
|
||||
- allow dynamic grants for trusted sync peers.
|
||||
|
||||
This is a real authorization layer, not a reuse of moderation allowlists.
|
||||
|
||||
Current implementation note:
|
||||
|
||||
- Parrhesia already has storage-backed moderation presence tables such as `allowed_pubkeys` and `blocked_ips`,
|
||||
- those are not sufficient for sync ACLs,
|
||||
- the new ACL layer must be enforced directly in the active read/write/query/negentropy path, not only through management tables.
|
||||
|
||||
```elixir
|
||||
@spec grant(map(), keyword()) :: :ok | {:error, term()}
|
||||
@spec revoke(map(), keyword()) :: :ok | {:error, term()}
|
||||
@spec list(keyword()) :: {:ok, [map()]} | {:error, term()}
|
||||
@spec check(atom(), map(), keyword()) :: :ok | {:error, term()}
|
||||
```
|
||||
|
||||
Suggested rule shape:
|
||||
|
||||
```elixir
|
||||
%{
|
||||
principal_type: :pubkey,
|
||||
principal: "<server-auth-pubkey>",
|
||||
capability: :sync_read,
|
||||
match: %{
|
||||
"kinds" => [5000],
|
||||
"#r" => ["tribes.accounts.user", "tribes.chat.tribe"]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
For the first implementation, principals should be authenticated pubkeys only.
|
||||
|
||||
We do **not** need a separate user-vs-server ACL model yet. A sync peer is simply a principal with sync capabilities.
|
||||
|
||||
Initial required capabilities:
|
||||
|
||||
- `:sync_read`
|
||||
- `:sync_write`
|
||||
|
||||
Recommended baseline:
|
||||
|
||||
- ordinary events follow existing relay behavior,
|
||||
- sync traffic is default-deny,
|
||||
- access is lifted only by explicit ACL grants for authenticated server pubkeys.
|
||||
|
||||
### 5.7 `Parrhesia.API.Sync`
|
||||
|
||||
Purpose:
|
||||
|
||||
- manage remote relay sync workers without embedding app-specific replication semantics.
|
||||
|
||||
Parrhesia syncs **events**, not records.
|
||||
|
||||
```elixir
|
||||
@spec put_server(map(), keyword()) ::
|
||||
{:ok, Parrhesia.API.Sync.Server.t()} | {:error, term()}
|
||||
|
||||
@spec remove_server(String.t(), keyword()) :: :ok | {:error, term()}
|
||||
@spec get_server(String.t(), keyword()) ::
|
||||
{:ok, Parrhesia.API.Sync.Server.t()} | :error | {:error, term()}
|
||||
|
||||
@spec list_servers(keyword()) ::
|
||||
{:ok, [Parrhesia.API.Sync.Server.t()]} | {:error, term()}
|
||||
|
||||
@spec start_server(String.t(), keyword()) :: :ok | {:error, term()}
|
||||
@spec stop_server(String.t(), keyword()) :: :ok | {:error, term()}
|
||||
@spec sync_now(String.t(), keyword()) :: :ok | {:error, term()}
|
||||
|
||||
@spec server_stats(String.t(), keyword()) ::
|
||||
{:ok, map()} | :error | {:error, term()}
|
||||
|
||||
@spec sync_stats(keyword()) :: {:ok, map()} | {:error, term()}
|
||||
@spec sync_health(keyword()) :: {:ok, map()} | {:error, term()}
|
||||
```
|
||||
|
||||
`put_server/2` is upsert-style. It covers both add and update.
|
||||
|
||||
Minimum server shape:
|
||||
|
||||
```elixir
|
||||
%{
|
||||
id: "tribes-a",
|
||||
url: "wss://relay-a.example/relay",
|
||||
enabled?: true,
|
||||
auth_pubkey: "<remote-server-auth-pubkey>",
|
||||
filters: [%{"kinds" => [5000], "#r" => ["tribes.accounts.user"]}],
|
||||
mode: :req_stream,
|
||||
auth: %{type: :nip42},
|
||||
tls: %{
|
||||
mode: :required,
|
||||
hostname: "relay-a.example",
|
||||
pins: [
|
||||
%{type: :spki_sha256, value: "<base64-sha256-spki-pin>"}
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Important constraints:
|
||||
|
||||
- filters are caller-provided and opaque to Parrhesia,
|
||||
- Parrhesia does not inspect `kind: 5000` payload semantics,
|
||||
- Parrhesia may persist peer config and runtime counters,
|
||||
- Parrhesia may reconnect and resume catch-up using generic event cursors,
|
||||
- Parrhesia must expose worker health and basic counters,
|
||||
- remote relay TLS pinning is required,
|
||||
- sync peer auth is bound to a server-auth pubkey, not inferred from event author pubkeys.
|
||||
- sync enforcement should reuse the same runtime policy stages as ordinary websocket traffic rather than inventing a parallel trust path.
|
||||
|
||||
Server identity model:
|
||||
|
||||
- Parrhesia owns its local server-auth identity via `API.Identity`,
|
||||
- peer config declares the expected remote server-auth pubkey,
|
||||
- ACL grants are bound to authenticated server-auth pubkeys,
|
||||
- event author pubkeys remain a separate application concern.
|
||||
|
||||
Initial mode should be `:req_stream`:
|
||||
|
||||
1. run catch-up with `API.Events.query/2`-equivalent client behavior against the remote relay,
|
||||
2. switch to a live subscription,
|
||||
3. ingest received events through local `API.Events.publish/2`.
|
||||
|
||||
Future optimization:
|
||||
|
||||
- `:negentropy` may be added as an optimization mode on top of the simpler `:req_stream` baseline.
|
||||
- Parrhesia now has a reusable NIP-77 engine, but a sync worker does not need to depend on it for the first implementation.
|
||||
|
||||
---
|
||||
|
||||
## 6. Server Integration
|
||||
|
||||
### WebSocket
|
||||
|
||||
- `EVENT` -> `Parrhesia.API.Events.publish/2`
|
||||
- `REQ` -> `Parrhesia.API.Stream.subscribe/4`
|
||||
- `COUNT` -> `Parrhesia.API.Events.count/2`
|
||||
- `AUTH` stays connection-specific, but validation helpers may move to `API.Auth`
|
||||
- `NEG-*` maps to the reusable NIP-77 engine and remains exposed through the websocket transport boundary
|
||||
|
||||
### HTTP management
|
||||
|
||||
- NIP-98 validation via `Parrhesia.API.Auth.validate_nip98/3`
|
||||
- management methods via `Parrhesia.API.Admin`
|
||||
- sync peer CRUD and health endpoints may delegate to `Parrhesia.API.Sync`
|
||||
- identity and ACL management may delegate to `API.Identity` and `API.ACL`
|
||||
|
||||
### Local wrappers
|
||||
|
||||
`Parrhesia.Local.*` remain thin delegates over `Parrhesia.API.*`.
|
||||
|
||||
---
|
||||
|
||||
## 7. Relationship to Sync Profiles
|
||||
|
||||
This document is intentionally lower-level than `TRIBES-NOSTRSYNC` and `SYNC_DB.md`.
|
||||
|
||||
Those documents may require:
|
||||
|
||||
- `Parrhesia.API.Events.publish/2`
|
||||
- `Parrhesia.API.Events.query/2`
|
||||
- `Parrhesia.API.Stream.subscribe/4`
|
||||
- `Parrhesia.API.Sync.*`
|
||||
|
||||
But they must not move application conflict rules or payload semantics into Parrhesia.
|
||||
File diff suppressed because it is too large
Load Diff
27
flake.lock
generated
Normal file
27
flake.lock
generated
Normal file
@@ -0,0 +1,27 @@
|
||||
{
|
||||
"nodes": {
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1773389992,
|
||||
"narHash": "sha256-wvfdLLWJ2I9oEpDd9PfMA8osfIZicoQ5MT1jIwNs9Tk=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "c06b4ae3d6599a672a6210b7021d699c351eebda",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "nixos-unstable",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"root": {
|
||||
"inputs": {
|
||||
"nixpkgs": "nixpkgs"
|
||||
}
|
||||
}
|
||||
},
|
||||
"root": "root",
|
||||
"version": 7
|
||||
}
|
||||
68
flake.nix
Normal file
68
flake.nix
Normal file
@@ -0,0 +1,68 @@
|
||||
{
|
||||
description = "Parrhesia Nostr relay";
|
||||
|
||||
inputs.nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable";
|
||||
|
||||
outputs = {nixpkgs, ...}: let
|
||||
systems = [
|
||||
"x86_64-linux"
|
||||
"aarch64-linux"
|
||||
"x86_64-darwin"
|
||||
"aarch64-darwin"
|
||||
];
|
||||
|
||||
forAllSystems = nixpkgs.lib.genAttrs systems;
|
||||
in {
|
||||
formatter = forAllSystems (system: (import nixpkgs {inherit system;}).alejandra);
|
||||
|
||||
packages = forAllSystems (
|
||||
system: let
|
||||
pkgs = import nixpkgs {inherit system;};
|
||||
lib = pkgs.lib;
|
||||
parrhesia = pkgs.callPackage ./default.nix {};
|
||||
in
|
||||
{
|
||||
default = parrhesia;
|
||||
inherit parrhesia;
|
||||
}
|
||||
// lib.optionalAttrs pkgs.stdenv.hostPlatform.isLinux {
|
||||
dockerImage = pkgs.dockerTools.buildLayeredImage {
|
||||
name = "parrhesia";
|
||||
tag = "latest";
|
||||
|
||||
contents = [
|
||||
parrhesia
|
||||
pkgs.bash
|
||||
pkgs.cacert
|
||||
pkgs.coreutils
|
||||
pkgs.fakeNss
|
||||
];
|
||||
|
||||
extraCommands = ''
|
||||
mkdir -p tmp
|
||||
chmod 1777 tmp
|
||||
'';
|
||||
|
||||
config = {
|
||||
Entrypoint = ["${parrhesia}/bin/parrhesia"];
|
||||
Cmd = ["start"];
|
||||
ExposedPorts = {
|
||||
"4413/tcp" = {};
|
||||
};
|
||||
WorkingDir = "/";
|
||||
User = "65534:65534";
|
||||
Env = [
|
||||
"HOME=/tmp"
|
||||
"LANG=C.UTF-8"
|
||||
"LC_ALL=C.UTF-8"
|
||||
"MIX_ENV=prod"
|
||||
"PORT=4413"
|
||||
"RELEASE_DISTRIBUTION=none"
|
||||
"SSL_CERT_FILE=${pkgs.cacert}/etc/ssl/certs/ca-bundle.crt"
|
||||
];
|
||||
};
|
||||
};
|
||||
}
|
||||
);
|
||||
};
|
||||
}
|
||||
@@ -1,17 +1,27 @@
|
||||
defmodule Parrhesia do
|
||||
@moduledoc """
|
||||
Documentation for `Parrhesia`.
|
||||
Parrhesia is a Nostr relay runtime that can run standalone or as an embedded OTP service.
|
||||
|
||||
For embedded use, the main developer-facing surface is `Parrhesia.API.*`.
|
||||
Start with:
|
||||
|
||||
- `Parrhesia.API.Events`
|
||||
- `Parrhesia.API.Stream`
|
||||
- `Parrhesia.API.Admin`
|
||||
- `Parrhesia.API.Identity`
|
||||
- `Parrhesia.API.ACL`
|
||||
- `Parrhesia.API.Sync`
|
||||
|
||||
The host application is responsible for:
|
||||
|
||||
- setting `config :parrhesia, ...`
|
||||
- migrating the configured Parrhesia repos
|
||||
- deciding whether to expose listeners or use only the in-process API
|
||||
|
||||
See `README.md` and `docs/LOCAL_API.md` for the embedding model and configuration guide.
|
||||
"""
|
||||
|
||||
@doc """
|
||||
Hello world.
|
||||
|
||||
## Examples
|
||||
|
||||
iex> Parrhesia.hello()
|
||||
:world
|
||||
|
||||
"""
|
||||
@doc false
|
||||
def hello do
|
||||
:world
|
||||
end
|
||||
|
||||
304
lib/parrhesia/api/acl.ex
Normal file
304
lib/parrhesia/api/acl.ex
Normal file
@@ -0,0 +1,304 @@
|
||||
defmodule Parrhesia.API.ACL do
|
||||
@moduledoc """
|
||||
Public ACL API and rule matching for protected sync traffic.
|
||||
|
||||
ACL checks are only applied when the requested subject overlaps with
|
||||
`config :parrhesia, :acl, protected_filters: [...]`.
|
||||
|
||||
The intended flow is:
|
||||
|
||||
1. mark a subset of sync traffic as protected with `protected_filters`
|
||||
2. persist pubkey-based grants with `grant/2`
|
||||
3. call `check/3` during sync reads and writes
|
||||
|
||||
Unprotected subjects always return `:ok`.
|
||||
"""
|
||||
|
||||
alias Parrhesia.API.RequestContext
|
||||
alias Parrhesia.Protocol.Filter
|
||||
alias Parrhesia.Storage
|
||||
|
||||
@doc """
|
||||
Persists an ACL rule.
|
||||
|
||||
A typical rule looks like:
|
||||
|
||||
```elixir
|
||||
%{
|
||||
principal_type: :pubkey,
|
||||
principal: "...64 hex chars...",
|
||||
capability: :sync_read,
|
||||
match: %{"kinds" => [5000], "#r" => ["tribes.accounts.user"]}
|
||||
}
|
||||
```
|
||||
"""
|
||||
@spec grant(map(), keyword()) :: :ok | {:error, term()}
|
||||
def grant(rule, _opts \\ []) do
|
||||
with {:ok, _stored_rule} <- Storage.acl().put_rule(%{}, normalize_rule(rule)) do
|
||||
:ok
|
||||
end
|
||||
end
|
||||
|
||||
@doc """
|
||||
Deletes ACL rules matching the given selector.
|
||||
|
||||
The selector is passed through to the configured storage adapter, which typically accepts an
|
||||
id-based selector such as `%{id: rule_id}`.
|
||||
"""
|
||||
@spec revoke(map(), keyword()) :: :ok | {:error, term()}
|
||||
def revoke(rule, _opts \\ []) do
|
||||
Storage.acl().delete_rule(%{}, normalize_delete_selector(rule))
|
||||
end
|
||||
|
||||
@doc """
|
||||
Lists persisted ACL rules.
|
||||
|
||||
Supported filters are:
|
||||
|
||||
- `:principal_type`
|
||||
- `:principal`
|
||||
- `:capability`
|
||||
"""
|
||||
@spec list(keyword()) :: {:ok, [map()]} | {:error, term()}
|
||||
def list(opts \\ []) do
|
||||
Storage.acl().list_rules(%{}, normalize_list_opts(opts))
|
||||
end
|
||||
|
||||
@doc """
|
||||
Authorizes a protected sync read or write subject for the given request context.
|
||||
|
||||
Supported capabilities are `:sync_read` and `:sync_write`.
|
||||
|
||||
`opts[:context]` defaults to an empty `Parrhesia.API.RequestContext`, which means protected
|
||||
subjects will fail with `{:error, :auth_required}` until authenticated pubkeys are present.
|
||||
"""
|
||||
@spec check(atom(), map(), keyword()) :: :ok | {:error, term()}
|
||||
def check(capability, subject, opts \\ [])
|
||||
|
||||
def check(capability, subject, opts)
|
||||
when capability in [:sync_read, :sync_write] and is_map(subject) do
|
||||
context = Keyword.get(opts, :context, %RequestContext{})
|
||||
|
||||
with {:ok, normalized_capability} <- normalize_capability(capability),
|
||||
{:ok, normalized_context} <- normalize_context(context),
|
||||
{:ok, protected_filters} <- protected_filters() do
|
||||
if protected_subject?(normalized_capability, subject, protected_filters) do
|
||||
authorize_subject(normalized_capability, subject, normalized_context)
|
||||
else
|
||||
:ok
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
def check(_capability, _subject, _opts), do: {:error, :invalid_acl_capability}
|
||||
|
||||
@doc """
|
||||
Returns `true` when a filter overlaps the configured protected read surface.
|
||||
"""
|
||||
@spec protected_read?(map()) :: boolean()
|
||||
def protected_read?(filter) when is_map(filter) do
|
||||
case protected_filters() do
|
||||
{:ok, protected_filters} ->
|
||||
protected_subject?(:sync_read, filter, protected_filters)
|
||||
|
||||
{:error, _reason} ->
|
||||
false
|
||||
end
|
||||
end
|
||||
|
||||
def protected_read?(_filter), do: false
|
||||
|
||||
@doc """
|
||||
Returns `true` when an event matches the configured protected write surface.
|
||||
"""
|
||||
@spec protected_write?(map()) :: boolean()
|
||||
def protected_write?(event) when is_map(event) do
|
||||
case protected_filters() do
|
||||
{:ok, protected_filters} ->
|
||||
protected_subject?(:sync_write, event, protected_filters)
|
||||
|
||||
{:error, _reason} ->
|
||||
false
|
||||
end
|
||||
end
|
||||
|
||||
def protected_write?(_event), do: false
|
||||
|
||||
defp authorize_subject(capability, subject, %RequestContext{} = context) do
|
||||
if MapSet.size(context.authenticated_pubkeys) == 0 do
|
||||
{:error, :auth_required}
|
||||
else
|
||||
capability
|
||||
|> list_rules_for_capability()
|
||||
|> authorize_against_rules(capability, context.authenticated_pubkeys, subject)
|
||||
end
|
||||
end
|
||||
|
||||
defp list_rules_for_capability(capability) do
|
||||
Storage.acl().list_rules(%{}, principal_type: :pubkey, capability: capability)
|
||||
end
|
||||
|
||||
defp authorize_against_rules({:ok, rules}, capability, authenticated_pubkeys, subject) do
|
||||
if Enum.any?(authenticated_pubkeys, &principal_authorized?(&1, subject, rules)) do
|
||||
:ok
|
||||
else
|
||||
{:error, denial_reason(capability)}
|
||||
end
|
||||
end
|
||||
|
||||
defp authorize_against_rules({:error, reason}, _capability, _authenticated_pubkeys, _subject),
|
||||
do: {:error, reason}
|
||||
|
||||
defp principal_authorized?(authenticated_pubkey, subject, rules) do
|
||||
Enum.any?(rules, fn rule ->
|
||||
rule.principal == authenticated_pubkey and
|
||||
rule_covers_subject?(rule.capability, rule.match, subject)
|
||||
end)
|
||||
end
|
||||
|
||||
defp rule_covers_subject?(:sync_read, rule_match, filter),
|
||||
do: filter_within_rule?(filter, rule_match)
|
||||
|
||||
defp rule_covers_subject?(:sync_write, rule_match, event),
|
||||
do: Filter.matches_filter?(event, rule_match)
|
||||
|
||||
defp protected_subject?(:sync_read, filter, protected_filters) do
|
||||
Enum.any?(protected_filters, &filters_overlap?(filter, &1))
|
||||
end
|
||||
|
||||
defp protected_subject?(:sync_write, event, protected_filters) do
|
||||
Enum.any?(protected_filters, &Filter.matches_filter?(event, &1))
|
||||
end
|
||||
|
||||
defp filters_overlap?(left, right) when is_map(left) and is_map(right) do
|
||||
comparable_keys =
|
||||
left
|
||||
|> comparable_filter_keys(right)
|
||||
|> Enum.reject(&(&1 in ["limit", "search", "since", "until"]))
|
||||
|
||||
Enum.all?(
|
||||
comparable_keys,
|
||||
&filter_constraint_compatible?(Map.get(left, &1), Map.get(right, &1), &1)
|
||||
) and
|
||||
filter_ranges_overlap?(left, right)
|
||||
end
|
||||
|
||||
defp filter_constraint_compatible?(nil, _right, _key), do: true
|
||||
defp filter_constraint_compatible?(_left, nil, _key), do: true
|
||||
|
||||
defp filter_constraint_compatible?(left, right, _key) when is_list(left) and is_list(right) do
|
||||
MapSet.disjoint?(MapSet.new(left), MapSet.new(right)) == false
|
||||
end
|
||||
|
||||
defp filter_constraint_compatible?(left, right, _key), do: left == right
|
||||
|
||||
defp filter_within_rule?(filter, rule_match) when is_map(filter) and is_map(rule_match) do
|
||||
Enum.reject(rule_match, fn {key, _value} -> key in ["since", "until", "limit", "search"] end)
|
||||
|> Enum.all?(fn {key, rule_value} ->
|
||||
requested_value = Map.get(filter, key)
|
||||
requested_constraint_within_rule?(requested_value, rule_value, key)
|
||||
end) and filter_range_within_rule?(filter, rule_match)
|
||||
end
|
||||
|
||||
defp requested_constraint_within_rule?(nil, _rule_value, _key), do: false
|
||||
|
||||
defp requested_constraint_within_rule?(requested_values, rule_values, _key)
|
||||
when is_list(requested_values) and is_list(rule_values) do
|
||||
requested_values
|
||||
|> MapSet.new()
|
||||
|> MapSet.subset?(MapSet.new(rule_values))
|
||||
end
|
||||
|
||||
defp requested_constraint_within_rule?(requested_value, rule_value, _key),
|
||||
do: requested_value == rule_value
|
||||
|
||||
defp denial_reason(:sync_read), do: :sync_read_not_allowed
|
||||
defp denial_reason(:sync_write), do: :sync_write_not_allowed
|
||||
|
||||
defp normalize_context(%RequestContext{} = context), do: {:ok, normalize_pubkeys(context)}
|
||||
defp normalize_context(_context), do: {:error, :invalid_context}
|
||||
|
||||
defp normalize_pubkeys(%RequestContext{} = context) do
|
||||
normalized_pubkeys =
|
||||
context.authenticated_pubkeys
|
||||
|> Enum.map(&String.downcase/1)
|
||||
|> MapSet.new()
|
||||
|
||||
%RequestContext{context | authenticated_pubkeys: normalized_pubkeys}
|
||||
end
|
||||
|
||||
defp normalize_rule(rule) when is_map(rule), do: rule
|
||||
defp normalize_rule(_rule), do: %{}
|
||||
|
||||
defp normalize_delete_selector(selector) when is_map(selector), do: selector
|
||||
defp normalize_delete_selector(_selector), do: %{}
|
||||
|
||||
defp normalize_list_opts(opts) do
|
||||
[]
|
||||
|> maybe_put_opt(:principal_type, Keyword.get(opts, :principal_type))
|
||||
|> maybe_put_opt(:principal, normalize_list_principal(Keyword.get(opts, :principal)))
|
||||
|> maybe_put_opt(:capability, Keyword.get(opts, :capability))
|
||||
end
|
||||
|
||||
defp normalize_list_principal(nil), do: nil
|
||||
|
||||
defp normalize_list_principal(principal) when is_binary(principal),
|
||||
do: String.downcase(principal)
|
||||
|
||||
defp normalize_list_principal(principal), do: principal
|
||||
|
||||
defp maybe_put_opt(opts, _key, nil), do: opts
|
||||
defp maybe_put_opt(opts, key, value), do: Keyword.put(opts, key, value)
|
||||
|
||||
defp normalize_capability(capability) do
|
||||
case capability do
|
||||
:sync_read -> {:ok, :sync_read}
|
||||
:sync_write -> {:ok, :sync_write}
|
||||
_other -> {:error, :invalid_acl_capability}
|
||||
end
|
||||
end
|
||||
|
||||
defp protected_filters do
|
||||
filters =
|
||||
:parrhesia
|
||||
|> Application.get_env(:acl, [])
|
||||
|> Keyword.get(:protected_filters, [])
|
||||
|
||||
if is_list(filters) and
|
||||
Enum.all?(filters, &(match?(%{}, &1) and Filter.validate_filter(&1) == :ok)) do
|
||||
{:ok, filters}
|
||||
else
|
||||
{:error, :invalid_protected_filters}
|
||||
end
|
||||
end
|
||||
|
||||
defp comparable_filter_keys(left, right) do
|
||||
Map.keys(left)
|
||||
|> Kernel.++(Map.keys(right))
|
||||
|> Enum.uniq()
|
||||
end
|
||||
|
||||
defp filter_ranges_overlap?(left, right) do
|
||||
since = max(boundary_value(left, "since", :lower), boundary_value(right, "since", :lower))
|
||||
until = min(boundary_value(left, "until", :upper), boundary_value(right, "until", :upper))
|
||||
since <= until
|
||||
end
|
||||
|
||||
defp filter_range_within_rule?(filter, rule_match) do
|
||||
requested_since = Map.get(filter, "since")
|
||||
requested_until = Map.get(filter, "until")
|
||||
rule_since = Map.get(rule_match, "since")
|
||||
rule_until = Map.get(rule_match, "until")
|
||||
|
||||
lower_ok? =
|
||||
is_nil(rule_since) or (is_integer(requested_since) and requested_since >= rule_since)
|
||||
|
||||
upper_ok? =
|
||||
is_nil(rule_until) or (is_integer(requested_until) and requested_until <= rule_until)
|
||||
|
||||
lower_ok? and upper_ok?
|
||||
end
|
||||
|
||||
defp boundary_value(filter, key, :lower), do: Map.get(filter, key, 0)
|
||||
defp boundary_value(filter, key, :upper), do: Map.get(filter, key, 9_223_372_036_854_775_807)
|
||||
end
|
||||
325
lib/parrhesia/api/admin.ex
Normal file
325
lib/parrhesia/api/admin.ex
Normal file
@@ -0,0 +1,325 @@
|
||||
defmodule Parrhesia.API.Admin do
|
||||
@moduledoc """
|
||||
Public management API facade.
|
||||
|
||||
This module exposes the DX-friendly control plane for administrative tasks. It wraps
|
||||
storage-backed management methods and a set of built-in helpers for ACL, identity, sync,
|
||||
and listener management.
|
||||
|
||||
`execute/3` accepts the same method names used by NIP-86 style management endpoints, while
|
||||
the dedicated functions (`stats/1`, `health/1`, `list_audit_logs/1`) are easier to call
|
||||
from Elixir code.
|
||||
"""
|
||||
|
||||
alias Parrhesia.API.ACL
|
||||
alias Parrhesia.API.Identity
|
||||
alias Parrhesia.API.Sync
|
||||
alias Parrhesia.Storage
|
||||
alias Parrhesia.Web.Endpoint
|
||||
|
||||
@supported_admin_methods ~w(health list_audit_logs stats)
|
||||
@supported_acl_methods ~w(acl_grant acl_revoke acl_list)
|
||||
@supported_identity_methods ~w(identity_ensure identity_get identity_import identity_rotate)
|
||||
@supported_listener_methods ~w(listener_reload)
|
||||
@supported_sync_methods ~w(
|
||||
sync_get_server
|
||||
sync_health
|
||||
sync_list_servers
|
||||
sync_put_server
|
||||
sync_remove_server
|
||||
sync_server_stats
|
||||
sync_start_server
|
||||
sync_stats
|
||||
sync_stop_server
|
||||
sync_sync_now
|
||||
)
|
||||
|
||||
@doc """
|
||||
Executes a management method by name.
|
||||
|
||||
Built-in methods include:
|
||||
|
||||
- `supportedmethods`
|
||||
- `stats`
|
||||
- `health`
|
||||
- `list_audit_logs`
|
||||
- `acl_grant`, `acl_revoke`, `acl_list`
|
||||
- `identity_get`, `identity_ensure`, `identity_import`, `identity_rotate`
|
||||
- `listener_reload`
|
||||
- `sync_*`
|
||||
|
||||
Unknown methods are delegated to the configured `Parrhesia.Storage.Admin` implementation.
|
||||
"""
|
||||
@spec execute(String.t() | atom(), map(), keyword()) :: {:ok, map()} | {:error, term()}
|
||||
def execute(method, params, opts \\ [])
|
||||
|
||||
def execute(method, params, opts) when is_map(params) do
|
||||
method_name = normalize_method_name(method)
|
||||
|
||||
case execute_builtin(method_name, params, opts) do
|
||||
{:continue, other_method} -> Storage.admin().execute(%{}, other_method, params)
|
||||
result -> result
|
||||
end
|
||||
end
|
||||
|
||||
def execute(method, _params, _opts),
|
||||
do: {:error, {:unsupported_method, normalize_method_name(method)}}
|
||||
|
||||
@doc """
|
||||
Returns aggregate relay stats plus nested sync stats.
|
||||
"""
|
||||
@spec stats(keyword()) :: {:ok, map()} | {:error, term()}
|
||||
def stats(opts \\ []) do
|
||||
with {:ok, relay_stats} <- relay_stats(),
|
||||
{:ok, sync_stats} <- Sync.sync_stats(opts) do
|
||||
{:ok, Map.put(relay_stats, "sync", sync_stats)}
|
||||
end
|
||||
end
|
||||
|
||||
@doc """
|
||||
Returns the overall management health payload.
|
||||
|
||||
The top-level `"status"` is currently derived from sync health, while relay-specific health
|
||||
details remain delegated to storage-backed management methods.
|
||||
"""
|
||||
@spec health(keyword()) :: {:ok, map()} | {:error, term()}
|
||||
def health(opts \\ []) do
|
||||
with {:ok, sync_health} <- Sync.sync_health(opts) do
|
||||
{:ok,
|
||||
%{
|
||||
"status" => overall_health_status(sync_health),
|
||||
"sync" => sync_health
|
||||
}}
|
||||
end
|
||||
end
|
||||
|
||||
@doc """
|
||||
Lists persisted audit log entries from the configured admin storage backend.
|
||||
|
||||
Supported options are storage-adapter specific. The built-in admin execution path forwards
|
||||
`:limit`, `:method`, and `:actor_pubkey`.
|
||||
"""
|
||||
@spec list_audit_logs(keyword()) :: {:ok, [map()]} | {:error, term()}
|
||||
def list_audit_logs(opts \\ []) do
|
||||
Storage.admin().list_audit_logs(%{}, opts)
|
||||
end
|
||||
|
||||
defp acl_grant(params) do
|
||||
with :ok <- ACL.grant(params) do
|
||||
{:ok, %{"ok" => true}}
|
||||
end
|
||||
end
|
||||
|
||||
defp acl_revoke(params) do
|
||||
with :ok <- ACL.revoke(params) do
|
||||
{:ok, %{"ok" => true}}
|
||||
end
|
||||
end
|
||||
|
||||
defp acl_list(params) do
|
||||
with {:ok, rules} <- ACL.list(acl_list_opts(params)) do
|
||||
{:ok, %{"rules" => rules}}
|
||||
end
|
||||
end
|
||||
|
||||
defp acl_list_opts(params) do
|
||||
[]
|
||||
|> maybe_put_opt(:principal_type, fetch_value(params, :principal_type))
|
||||
|> maybe_put_opt(:principal, fetch_value(params, :principal))
|
||||
|> maybe_put_opt(:capability, fetch_value(params, :capability))
|
||||
end
|
||||
|
||||
defp supported_methods do
|
||||
storage_supported =
|
||||
case Storage.admin().execute(%{}, :supportedmethods, %{}) do
|
||||
{:ok, methods} when is_list(methods) -> methods
|
||||
{:ok, %{"methods" => methods}} when is_list(methods) -> methods
|
||||
_other -> []
|
||||
end
|
||||
|
||||
(storage_supported ++
|
||||
@supported_admin_methods ++
|
||||
@supported_acl_methods ++
|
||||
@supported_identity_methods ++ @supported_listener_methods ++ @supported_sync_methods)
|
||||
|> Enum.uniq()
|
||||
|> Enum.sort()
|
||||
end
|
||||
|
||||
defp identity_get(_params), do: Identity.get()
|
||||
|
||||
defp identity_ensure(_params), do: Identity.ensure()
|
||||
|
||||
defp identity_rotate(_params), do: Identity.rotate()
|
||||
|
||||
defp identity_import(params) do
|
||||
Identity.import(params)
|
||||
end
|
||||
|
||||
defp admin_stats(_params, opts), do: stats(opts)
|
||||
defp admin_health(_params, opts), do: health(opts)
|
||||
|
||||
defp admin_list_audit_logs(params, _opts) do
|
||||
list_audit_logs(audit_log_opts(params))
|
||||
end
|
||||
|
||||
defp listener_reload(params) do
|
||||
case normalize_listener_id(fetch_value(params, :id)) do
|
||||
:all ->
|
||||
Endpoint.reload_all()
|
||||
|> ok_result()
|
||||
|
||||
{:ok, listener_id} ->
|
||||
listener_id
|
||||
|> Endpoint.reload_listener()
|
||||
|> ok_result()
|
||||
|
||||
:error ->
|
||||
{:error, :not_found}
|
||||
end
|
||||
end
|
||||
|
||||
defp sync_put_server(params, opts), do: Sync.put_server(params, opts)
|
||||
|
||||
defp sync_remove_server(params, opts) do
|
||||
with {:ok, server_id} <- fetch_required_string(params, :id),
|
||||
:ok <- Sync.remove_server(server_id, opts) do
|
||||
{:ok, %{"ok" => true}}
|
||||
end
|
||||
end
|
||||
|
||||
defp sync_get_server(params, opts) do
|
||||
with {:ok, server_id} <- fetch_required_string(params, :id),
|
||||
{:ok, server} <- Sync.get_server(server_id, opts) do
|
||||
{:ok, server}
|
||||
else
|
||||
:error -> {:error, :not_found}
|
||||
other -> other
|
||||
end
|
||||
end
|
||||
|
||||
defp sync_list_servers(_params, opts), do: Sync.list_servers(opts)
|
||||
|
||||
defp sync_start_server(params, opts) do
|
||||
with {:ok, server_id} <- fetch_required_string(params, :id),
|
||||
:ok <- Sync.start_server(server_id, opts) do
|
||||
{:ok, %{"ok" => true}}
|
||||
end
|
||||
end
|
||||
|
||||
defp sync_stop_server(params, opts) do
|
||||
with {:ok, server_id} <- fetch_required_string(params, :id),
|
||||
:ok <- Sync.stop_server(server_id, opts) do
|
||||
{:ok, %{"ok" => true}}
|
||||
end
|
||||
end
|
||||
|
||||
defp sync_sync_now(params, opts) do
|
||||
with {:ok, server_id} <- fetch_required_string(params, :id),
|
||||
:ok <- Sync.sync_now(server_id, opts) do
|
||||
{:ok, %{"ok" => true}}
|
||||
end
|
||||
end
|
||||
|
||||
defp sync_server_stats(params, opts) do
|
||||
with {:ok, server_id} <- fetch_required_string(params, :id),
|
||||
{:ok, stats} <- Sync.server_stats(server_id, opts) do
|
||||
{:ok, stats}
|
||||
else
|
||||
:error -> {:error, :not_found}
|
||||
other -> other
|
||||
end
|
||||
end
|
||||
|
||||
defp sync_stats(_params, opts), do: Sync.sync_stats(opts)
|
||||
defp sync_health(_params, opts), do: Sync.sync_health(opts)
|
||||
|
||||
defp execute_builtin("stats", params, opts), do: admin_stats(params, opts)
|
||||
defp execute_builtin("health", params, opts), do: admin_health(params, opts)
|
||||
defp execute_builtin("list_audit_logs", params, opts), do: admin_list_audit_logs(params, opts)
|
||||
defp execute_builtin("acl_grant", params, _opts), do: acl_grant(params)
|
||||
defp execute_builtin("acl_revoke", params, _opts), do: acl_revoke(params)
|
||||
defp execute_builtin("acl_list", params, _opts), do: acl_list(params)
|
||||
defp execute_builtin("identity_get", params, _opts), do: identity_get(params)
|
||||
defp execute_builtin("identity_ensure", params, _opts), do: identity_ensure(params)
|
||||
defp execute_builtin("identity_import", params, _opts), do: identity_import(params)
|
||||
defp execute_builtin("identity_rotate", params, _opts), do: identity_rotate(params)
|
||||
defp execute_builtin("listener_reload", params, _opts), do: listener_reload(params)
|
||||
defp execute_builtin("sync_put_server", params, opts), do: sync_put_server(params, opts)
|
||||
defp execute_builtin("sync_remove_server", params, opts), do: sync_remove_server(params, opts)
|
||||
defp execute_builtin("sync_get_server", params, opts), do: sync_get_server(params, opts)
|
||||
defp execute_builtin("sync_list_servers", params, opts), do: sync_list_servers(params, opts)
|
||||
defp execute_builtin("sync_start_server", params, opts), do: sync_start_server(params, opts)
|
||||
defp execute_builtin("sync_stop_server", params, opts), do: sync_stop_server(params, opts)
|
||||
defp execute_builtin("sync_sync_now", params, opts), do: sync_sync_now(params, opts)
|
||||
defp execute_builtin("sync_server_stats", params, opts), do: sync_server_stats(params, opts)
|
||||
defp execute_builtin("sync_stats", params, opts), do: sync_stats(params, opts)
|
||||
defp execute_builtin("sync_health", params, opts), do: sync_health(params, opts)
|
||||
|
||||
defp execute_builtin("supportedmethods", _params, _opts),
|
||||
do: {:ok, %{"methods" => supported_methods()}}
|
||||
|
||||
defp execute_builtin(other_method, _params, _opts), do: {:continue, other_method}
|
||||
|
||||
defp relay_stats do
|
||||
case Storage.admin().execute(%{}, :stats, %{}) do
|
||||
{:ok, stats} when is_map(stats) -> {:ok, stats}
|
||||
{:error, {:unsupported_method, _method}} -> {:ok, %{}}
|
||||
other -> other
|
||||
end
|
||||
end
|
||||
|
||||
defp overall_health_status(%{"status" => "degraded"}), do: "degraded"
|
||||
defp overall_health_status(_sync_health), do: "ok"
|
||||
|
||||
defp audit_log_opts(params) do
|
||||
[]
|
||||
|> maybe_put_opt(:limit, fetch_value(params, :limit))
|
||||
|> maybe_put_opt(:method, fetch_value(params, :method))
|
||||
|> maybe_put_opt(:actor_pubkey, fetch_value(params, :actor_pubkey))
|
||||
end
|
||||
|
||||
defp maybe_put_opt(opts, _key, nil), do: opts
|
||||
defp maybe_put_opt(opts, key, value), do: Keyword.put(opts, key, value)
|
||||
|
||||
defp ok_result(:ok), do: {:ok, %{"ok" => true}}
|
||||
defp ok_result({:error, _reason} = error), do: error
|
||||
defp ok_result(other), do: other
|
||||
|
||||
defp normalize_listener_id(nil), do: :all
|
||||
|
||||
defp normalize_listener_id(listener_id) when is_atom(listener_id) do
|
||||
{:ok, listener_id}
|
||||
end
|
||||
|
||||
defp normalize_listener_id(listener_id) when is_binary(listener_id) do
|
||||
case Supervisor.which_children(Endpoint) do
|
||||
children when is_list(children) ->
|
||||
Enum.find_value(children, :error, &match_listener_child(&1, listener_id))
|
||||
|
||||
_other ->
|
||||
:error
|
||||
end
|
||||
end
|
||||
|
||||
defp normalize_listener_id(_listener_id), do: :error
|
||||
|
||||
defp match_listener_child({{:listener, id}, _pid, _type, _modules}, listener_id) do
|
||||
normalized_id = Atom.to_string(id)
|
||||
if normalized_id == listener_id, do: {:ok, id}, else: false
|
||||
end
|
||||
|
||||
defp match_listener_child(_child, _listener_id), do: false
|
||||
|
||||
defp fetch_required_string(map, key) do
|
||||
case fetch_value(map, key) do
|
||||
value when is_binary(value) and value != "" -> {:ok, value}
|
||||
_other -> {:error, {:missing_param, Atom.to_string(key)}}
|
||||
end
|
||||
end
|
||||
|
||||
defp fetch_value(map, key), do: Map.get(map, key) || Map.get(map, Atom.to_string(key))
|
||||
|
||||
defp normalize_method_name(method) when is_atom(method), do: Atom.to_string(method)
|
||||
defp normalize_method_name(method) when is_binary(method), do: method
|
||||
defp normalize_method_name(method), do: inspect(method)
|
||||
end
|
||||
85
lib/parrhesia/api/auth.ex
Normal file
85
lib/parrhesia/api/auth.ex
Normal file
@@ -0,0 +1,85 @@
|
||||
defmodule Parrhesia.API.Auth do
|
||||
@moduledoc """
|
||||
Public helpers for event validation and NIP-98 HTTP authentication.
|
||||
|
||||
This module is intended for callers that need a programmatic API surface:
|
||||
|
||||
- `validate_event/1` returns validator reason atoms.
|
||||
- `compute_event_id/1` computes the canonical Nostr event id.
|
||||
- `validate_nip98/3` and `validate_nip98/4` turn an `Authorization` header into a
|
||||
shared auth context that can be reused by the rest of the API surface.
|
||||
|
||||
For transport-facing validation messages, see `Parrhesia.Protocol.validate_event/1`.
|
||||
"""
|
||||
|
||||
alias Parrhesia.API.Auth.Context
|
||||
alias Parrhesia.API.RequestContext
|
||||
alias Parrhesia.Auth.Nip98
|
||||
alias Parrhesia.Protocol.EventValidator
|
||||
|
||||
@doc """
|
||||
Validates a Nostr event and returns validator-friendly error atoms.
|
||||
|
||||
This is the low-level validation entrypoint used by the API surface. Unlike
|
||||
`Parrhesia.Protocol.validate_event/1`, it preserves the raw validator reason so callers
|
||||
can branch on it directly.
|
||||
"""
|
||||
@spec validate_event(map()) :: :ok | {:error, term()}
|
||||
def validate_event(event), do: EventValidator.validate(event)
|
||||
|
||||
@doc """
|
||||
Computes the canonical Nostr event id for an event payload.
|
||||
|
||||
The event does not need to be persisted first. This is useful when building or signing
|
||||
events locally.
|
||||
"""
|
||||
@spec compute_event_id(map()) :: String.t()
|
||||
def compute_event_id(event), do: EventValidator.compute_id(event)
|
||||
|
||||
@doc """
|
||||
Validates a NIP-98 `Authorization` header using default options.
|
||||
"""
|
||||
@spec validate_nip98(String.t() | nil, String.t(), String.t()) ::
|
||||
{:ok, Context.t()} | {:error, term()}
|
||||
def validate_nip98(authorization, method, url) do
|
||||
validate_nip98(authorization, method, url, [])
|
||||
end
|
||||
|
||||
@doc """
|
||||
Validates a NIP-98 `Authorization` header and returns a shared auth context.
|
||||
|
||||
The returned `Parrhesia.API.Auth.Context` includes:
|
||||
|
||||
- the decoded auth event
|
||||
- the authenticated pubkey
|
||||
- a `Parrhesia.API.RequestContext` with `caller: :http`
|
||||
|
||||
Supported options are forwarded to `Parrhesia.Auth.Nip98.validate_authorization_header/4`,
|
||||
including `:max_age_seconds` and `:replay_cache`.
|
||||
"""
|
||||
@spec validate_nip98(String.t() | nil, String.t(), String.t(), keyword()) ::
|
||||
{:ok, Context.t()} | {:error, term()}
|
||||
def validate_nip98(authorization, method, url, opts)
|
||||
when is_binary(method) and is_binary(url) and is_list(opts) do
|
||||
with {:ok, auth_event} <-
|
||||
Nip98.validate_authorization_header(authorization, method, url, opts),
|
||||
pubkey when is_binary(pubkey) <- Map.get(auth_event, "pubkey") do
|
||||
{:ok,
|
||||
%Context{
|
||||
auth_event: auth_event,
|
||||
pubkey: pubkey,
|
||||
request_context: %RequestContext{
|
||||
authenticated_pubkeys: MapSet.new([pubkey]),
|
||||
caller: :http
|
||||
},
|
||||
metadata: %{
|
||||
method: method,
|
||||
url: url
|
||||
}
|
||||
}}
|
||||
else
|
||||
nil -> {:error, :invalid_event}
|
||||
{:error, reason} -> {:error, reason}
|
||||
end
|
||||
end
|
||||
end
|
||||
23
lib/parrhesia/api/auth/context.ex
Normal file
23
lib/parrhesia/api/auth/context.ex
Normal file
@@ -0,0 +1,23 @@
|
||||
defmodule Parrhesia.API.Auth.Context do
|
||||
@moduledoc """
|
||||
Authenticated request details returned by shared auth helpers.
|
||||
|
||||
This is the higher-level result returned by `Parrhesia.API.Auth.validate_nip98/3` and
|
||||
`validate_nip98/4`. The nested `request_context` is ready to be passed into the rest of the
|
||||
public API surface.
|
||||
"""
|
||||
|
||||
alias Parrhesia.API.RequestContext
|
||||
|
||||
defstruct auth_event: nil,
|
||||
pubkey: nil,
|
||||
request_context: %RequestContext{},
|
||||
metadata: %{}
|
||||
|
||||
@type t :: %__MODULE__{
|
||||
auth_event: map() | nil,
|
||||
pubkey: String.t() | nil,
|
||||
request_context: RequestContext.t(),
|
||||
metadata: map()
|
||||
}
|
||||
end
|
||||
470
lib/parrhesia/api/events.ex
Normal file
470
lib/parrhesia/api/events.ex
Normal file
@@ -0,0 +1,470 @@
|
||||
defmodule Parrhesia.API.Events do
|
||||
@moduledoc """
|
||||
Canonical event publish, query, and count API.
|
||||
|
||||
This is the main in-process API for working with Nostr events. It applies the same core
|
||||
validation and policy checks used by the relay edge, but without going through a socket or
|
||||
HTTP transport.
|
||||
|
||||
All public functions expect `opts[:context]` to contain a `Parrhesia.API.RequestContext`.
|
||||
That context drives authorization, caller attribution, and downstream policy behavior.
|
||||
|
||||
`publish/2` intentionally returns `{:ok, %PublishResult{accepted: false}}` for policy and
|
||||
storage rejections so callers can mirror relay `OK` semantics without treating a rejected
|
||||
event as a process error.
|
||||
"""
|
||||
|
||||
alias Parrhesia.API.Events.PublishResult
|
||||
alias Parrhesia.API.RequestContext
|
||||
alias Parrhesia.Fanout.Dispatcher
|
||||
alias Parrhesia.Fanout.MultiNode
|
||||
alias Parrhesia.NIP43
|
||||
alias Parrhesia.Policy.EventPolicy
|
||||
alias Parrhesia.Protocol
|
||||
alias Parrhesia.Protocol.Filter
|
||||
alias Parrhesia.Storage
|
||||
alias Parrhesia.Telemetry
|
||||
|
||||
@default_max_event_bytes 262_144
|
||||
|
||||
@marmot_kinds MapSet.new([
|
||||
443,
|
||||
444,
|
||||
445,
|
||||
1059,
|
||||
10_050,
|
||||
10_051,
|
||||
446,
|
||||
447,
|
||||
448,
|
||||
449
|
||||
])
|
||||
|
||||
@doc """
|
||||
Validates, authorizes, persists, and fans out an event.
|
||||
|
||||
Required options:
|
||||
|
||||
- `:context` - a `Parrhesia.API.RequestContext`
|
||||
|
||||
Supported options:
|
||||
|
||||
- `:max_event_bytes` - overrides the configured max encoded event size
|
||||
- `:path`, `:private_key`, `:configured_private_key` - forwarded to the NIP-43 helper flow
|
||||
|
||||
Return semantics:
|
||||
|
||||
- `{:ok, %PublishResult{accepted: true}}` for accepted events
|
||||
- `{:ok, %PublishResult{accepted: false}}` for rejected or duplicate events
|
||||
- `{:error, :invalid_context}` only when the call itself is malformed
|
||||
"""
|
||||
@spec publish(map(), keyword()) :: {:ok, PublishResult.t()} | {:error, term()}
|
||||
def publish(event, opts \\ [])
|
||||
|
||||
def publish(event, opts) when is_map(event) and is_list(opts) do
|
||||
started_at = System.monotonic_time()
|
||||
event_id = Map.get(event, "id", "")
|
||||
telemetry_metadata = telemetry_metadata_for_event(event)
|
||||
|
||||
with {:ok, context} <- fetch_context(opts),
|
||||
:ok <- validate_event_payload_size(event, max_event_bytes(opts)),
|
||||
:ok <- Protocol.validate_event(event),
|
||||
:ok <- EventPolicy.authorize_write(event, context.authenticated_pubkeys, context),
|
||||
{:ok, publish_state} <- NIP43.prepare_publish(event, nip43_opts(opts, context)),
|
||||
{:ok, _stored, message} <- persist_event(event) do
|
||||
Telemetry.emit(
|
||||
[:parrhesia, :ingest, :stop],
|
||||
%{duration: System.monotonic_time() - started_at},
|
||||
telemetry_metadata
|
||||
)
|
||||
|
||||
emit_ingest_result(telemetry_metadata, :accepted, :accepted)
|
||||
|
||||
message =
|
||||
case NIP43.finalize_publish(event, publish_state, nip43_opts(opts, context)) do
|
||||
{:ok, override} when is_binary(override) -> override
|
||||
:ok -> message
|
||||
end
|
||||
|
||||
Dispatcher.dispatch(event)
|
||||
maybe_publish_multi_node(event)
|
||||
|
||||
{:ok,
|
||||
%PublishResult{
|
||||
event_id: event_id,
|
||||
accepted: true,
|
||||
message: message,
|
||||
reason: nil
|
||||
}}
|
||||
else
|
||||
{:error, :invalid_context} = error ->
|
||||
emit_ingest_result(telemetry_metadata, :rejected, :invalid_context)
|
||||
error
|
||||
|
||||
{:error, reason} ->
|
||||
emit_ingest_result(telemetry_metadata, :rejected, reason)
|
||||
|
||||
{:ok,
|
||||
%PublishResult{
|
||||
event_id: event_id,
|
||||
accepted: false,
|
||||
message: error_message_for_publish_failure(reason),
|
||||
reason: reason
|
||||
}}
|
||||
end
|
||||
end
|
||||
|
||||
def publish(_event, _opts), do: {:error, :invalid_event}
|
||||
|
||||
@doc """
|
||||
Queries stored events plus any dynamic NIP-43 events visible to the caller.
|
||||
|
||||
Required options:
|
||||
|
||||
- `:context` - a `Parrhesia.API.RequestContext`
|
||||
|
||||
Supported options:
|
||||
|
||||
- `:max_filter_limit` - overrides the configured per-filter limit
|
||||
- `:validate_filters?` - skips filter validation when `false`
|
||||
- `:authorize_read?` - skips read policy checks when `false`
|
||||
|
||||
The skip flags are primarily for internal composition, such as `Parrhesia.API.Stream`.
|
||||
External callers should normally leave them enabled.
|
||||
"""
|
||||
@spec query([map()], keyword()) :: {:ok, [map()]} | {:error, term()}
|
||||
def query(filters, opts \\ [])
|
||||
|
||||
def query(filters, opts) when is_list(filters) and is_list(opts) do
|
||||
started_at = System.monotonic_time()
|
||||
telemetry_metadata = telemetry_metadata_for_filters(filters, :query)
|
||||
|
||||
with {:ok, context} <- fetch_context(opts),
|
||||
:ok <- maybe_validate_filters(filters, opts),
|
||||
:ok <- maybe_authorize_read(filters, context, opts),
|
||||
{:ok, events} <- Storage.events().query(%{}, filters, storage_query_opts(context, opts)) do
|
||||
events = NIP43.dynamic_events(filters, nip43_opts(opts, context)) ++ events
|
||||
|
||||
Telemetry.emit(
|
||||
[:parrhesia, :query, :stop],
|
||||
%{duration: System.monotonic_time() - started_at, result_count: length(events)},
|
||||
telemetry_metadata
|
||||
)
|
||||
|
||||
emit_query_result(telemetry_metadata, :ok)
|
||||
|
||||
{:ok, events}
|
||||
else
|
||||
{:error, reason} = error ->
|
||||
emit_query_result(telemetry_metadata, :error, reason)
|
||||
error
|
||||
end
|
||||
end
|
||||
|
||||
def query(_filters, _opts), do: {:error, :invalid_filters}
|
||||
|
||||
@doc """
|
||||
Counts events matching the given filters.
|
||||
|
||||
Required options:
|
||||
|
||||
- `:context` - a `Parrhesia.API.RequestContext`
|
||||
|
||||
Supported options:
|
||||
|
||||
- `:validate_filters?` - skips filter validation when `false`
|
||||
- `:authorize_read?` - skips read policy checks when `false`
|
||||
- `:options` - when set to a map, returns a NIP-45-style payload instead of a bare integer
|
||||
|
||||
When `opts[:options]` is a map, the result shape is `%{"count" => count, "approximate" => false}`.
|
||||
If `opts[:options]["hll"]` is `true` and the feature is enabled, an `"hll"` field is included.
|
||||
"""
|
||||
@spec count([map()], keyword()) :: {:ok, non_neg_integer() | map()} | {:error, term()}
|
||||
def count(filters, opts \\ [])
|
||||
|
||||
def count(filters, opts) when is_list(filters) and is_list(opts) do
|
||||
started_at = System.monotonic_time()
|
||||
telemetry_metadata = telemetry_metadata_for_filters(filters, :count)
|
||||
|
||||
with {:ok, context} <- fetch_context(opts),
|
||||
:ok <- maybe_validate_filters(filters, opts),
|
||||
:ok <- maybe_authorize_read(filters, context, opts),
|
||||
{:ok, count} <-
|
||||
Storage.events().count(%{}, filters, requester_pubkeys: requester_pubkeys(context)),
|
||||
count <- count + NIP43.dynamic_count(filters, nip43_opts(opts, context)),
|
||||
{:ok, result} <- maybe_build_count_result(filters, count, Keyword.get(opts, :options)) do
|
||||
Telemetry.emit(
|
||||
[:parrhesia, :query, :stop],
|
||||
%{duration: System.monotonic_time() - started_at, result_count: count},
|
||||
telemetry_metadata
|
||||
)
|
||||
|
||||
emit_query_result(telemetry_metadata, :ok)
|
||||
|
||||
{:ok, result}
|
||||
else
|
||||
{:error, reason} = error ->
|
||||
emit_query_result(telemetry_metadata, :error, reason)
|
||||
error
|
||||
end
|
||||
end
|
||||
|
||||
def count(_filters, _opts), do: {:error, :invalid_filters}
|
||||
|
||||
defp maybe_validate_filters(filters, opts) do
|
||||
if Keyword.get(opts, :validate_filters?, true) do
|
||||
Filter.validate_filters(filters)
|
||||
else
|
||||
:ok
|
||||
end
|
||||
end
|
||||
|
||||
defp maybe_authorize_read(filters, context, opts) do
|
||||
if Keyword.get(opts, :authorize_read?, true) do
|
||||
EventPolicy.authorize_read(filters, context.authenticated_pubkeys, context)
|
||||
else
|
||||
:ok
|
||||
end
|
||||
end
|
||||
|
||||
defp storage_query_opts(context, opts) do
|
||||
[
|
||||
max_filter_limit:
|
||||
Keyword.get(opts, :max_filter_limit, Parrhesia.Config.get([:limits, :max_filter_limit])),
|
||||
requester_pubkeys: requester_pubkeys(context)
|
||||
]
|
||||
end
|
||||
|
||||
defp requester_pubkeys(%RequestContext{} = context),
|
||||
do: MapSet.to_list(context.authenticated_pubkeys)
|
||||
|
||||
defp maybe_build_count_result(_filters, count, nil) when is_integer(count), do: {:ok, count}
|
||||
|
||||
defp maybe_build_count_result(filters, count, options)
|
||||
when is_integer(count) and is_map(options) do
|
||||
build_count_payload(filters, count, options)
|
||||
end
|
||||
|
||||
defp maybe_build_count_result(_filters, count, _options) when is_integer(count),
|
||||
do: {:ok, count}
|
||||
|
||||
defp maybe_build_count_result(_filters, count, _options), do: {:ok, count}
|
||||
|
||||
defp build_count_payload(filters, count, options) do
|
||||
include_hll? =
|
||||
Map.get(options, "hll", false) and Parrhesia.Config.get([:features, :nip_45_count], true)
|
||||
|
||||
payload = %{"count" => count, "approximate" => false}
|
||||
|
||||
payload =
|
||||
if include_hll? do
|
||||
Map.put(payload, "hll", generate_hll_payload(filters, count))
|
||||
else
|
||||
payload
|
||||
end
|
||||
|
||||
{:ok, payload}
|
||||
end
|
||||
|
||||
defp generate_hll_payload(filters, count) do
|
||||
filters
|
||||
|> JSON.encode!()
|
||||
|> then(&"#{&1}:#{count}")
|
||||
|> then(&:crypto.hash(:sha256, &1))
|
||||
|> Base.encode64()
|
||||
end
|
||||
|
||||
defp persist_event(event) do
|
||||
kind = Map.get(event, "kind")
|
||||
|
||||
cond do
|
||||
kind in [5, 62] -> persist_control_event(kind, event)
|
||||
ephemeral_kind?(kind) -> persist_ephemeral_event()
|
||||
true -> persist_regular_event(event)
|
||||
end
|
||||
end
|
||||
|
||||
defp persist_control_event(5, event) do
|
||||
with {:ok, deleted_count} <- Storage.events().delete_by_request(%{}, event) do
|
||||
{:ok, deleted_count, "ok: deletion request processed"}
|
||||
end
|
||||
end
|
||||
|
||||
defp persist_control_event(62, event) do
|
||||
with {:ok, deleted_count} <- Storage.events().vanish(%{}, event) do
|
||||
{:ok, deleted_count, "ok: vanish request processed"}
|
||||
end
|
||||
end
|
||||
|
||||
defp persist_ephemeral_event do
|
||||
if accept_ephemeral_events?() do
|
||||
{:ok, :ephemeral, "ok: ephemeral event accepted"}
|
||||
else
|
||||
{:error, :ephemeral_events_disabled}
|
||||
end
|
||||
end
|
||||
|
||||
defp persist_regular_event(event) do
|
||||
case Storage.events().put_event(%{}, event) do
|
||||
{:ok, persisted_event} -> {:ok, persisted_event, "ok: event stored"}
|
||||
{:error, :duplicate_event} -> {:error, :duplicate_event}
|
||||
{:error, reason} -> {:error, reason}
|
||||
end
|
||||
end
|
||||
|
||||
defp maybe_publish_multi_node(event) do
|
||||
MultiNode.publish(event)
|
||||
:ok
|
||||
catch
|
||||
:exit, _reason -> :ok
|
||||
end
|
||||
|
||||
defp telemetry_metadata_for_event(event) do
|
||||
%{traffic_class: traffic_class_for_event(event)}
|
||||
end
|
||||
|
||||
defp telemetry_metadata_for_filters(filters, operation) do
|
||||
%{traffic_class: traffic_class_for_filters(filters), operation: operation}
|
||||
end
|
||||
|
||||
defp traffic_class_for_filters(filters) do
|
||||
if Enum.any?(filters, &marmot_filter?/1) do
|
||||
:marmot
|
||||
else
|
||||
:generic
|
||||
end
|
||||
end
|
||||
|
||||
defp marmot_filter?(filter) when is_map(filter) do
|
||||
has_marmot_kind? =
|
||||
case Map.get(filter, "kinds") do
|
||||
kinds when is_list(kinds) -> Enum.any?(kinds, &MapSet.member?(@marmot_kinds, &1))
|
||||
_other -> false
|
||||
end
|
||||
|
||||
has_marmot_kind? or Map.has_key?(filter, "#h") or Map.has_key?(filter, "#i")
|
||||
end
|
||||
|
||||
defp marmot_filter?(_filter), do: false
|
||||
|
||||
defp traffic_class_for_event(event) when is_map(event) do
|
||||
if MapSet.member?(@marmot_kinds, Map.get(event, "kind")) do
|
||||
:marmot
|
||||
else
|
||||
:generic
|
||||
end
|
||||
end
|
||||
|
||||
defp traffic_class_for_event(_event), do: :generic
|
||||
|
||||
defp emit_ingest_result(metadata, outcome, reason) do
|
||||
Telemetry.emit(
|
||||
[:parrhesia, :ingest, :result],
|
||||
%{count: 1},
|
||||
Map.merge(metadata, %{outcome: outcome, reason: normalize_reason(reason)})
|
||||
)
|
||||
end
|
||||
|
||||
defp emit_query_result(metadata, outcome, reason \\ nil) do
|
||||
Telemetry.emit(
|
||||
[:parrhesia, :query, :result],
|
||||
%{count: 1},
|
||||
Map.merge(
|
||||
metadata,
|
||||
%{outcome: outcome, reason: normalize_reason(reason || outcome)}
|
||||
)
|
||||
)
|
||||
end
|
||||
|
||||
defp normalize_reason(reason) when is_atom(reason), do: reason
|
||||
defp normalize_reason(reason) when is_binary(reason), do: reason
|
||||
defp normalize_reason(nil), do: :none
|
||||
defp normalize_reason(_reason), do: :unknown
|
||||
|
||||
defp fetch_context(opts) do
|
||||
case Keyword.get(opts, :context) do
|
||||
%RequestContext{} = context -> {:ok, context}
|
||||
_other -> {:error, :invalid_context}
|
||||
end
|
||||
end
|
||||
|
||||
defp nip43_opts(opts, %RequestContext{} = context) do
|
||||
[context: context, relay_url: Application.get_env(:parrhesia, :relay_url)]
|
||||
|> Kernel.++(Keyword.take(opts, [:path, :private_key, :configured_private_key]))
|
||||
end
|
||||
|
||||
defp error_message_for_publish_failure(:duplicate_event),
|
||||
do: "duplicate: event already stored"
|
||||
|
||||
defp error_message_for_publish_failure(:event_too_large),
|
||||
do: "invalid: event exceeds max event size"
|
||||
|
||||
defp error_message_for_publish_failure(:ephemeral_events_disabled),
|
||||
do: "blocked: ephemeral events are disabled"
|
||||
|
||||
defp error_message_for_publish_failure(reason)
|
||||
when reason in [
|
||||
:auth_required,
|
||||
:pubkey_not_allowed,
|
||||
:restricted_giftwrap,
|
||||
:sync_write_not_allowed,
|
||||
:protected_event_requires_auth,
|
||||
:protected_event_pubkey_mismatch,
|
||||
:pow_below_minimum,
|
||||
:pubkey_banned,
|
||||
:event_banned,
|
||||
:media_metadata_tags_exceeded,
|
||||
:media_metadata_tag_value_too_large,
|
||||
:media_metadata_url_too_long,
|
||||
:media_metadata_invalid_url,
|
||||
:media_metadata_invalid_hash,
|
||||
:media_metadata_invalid_mime,
|
||||
:media_metadata_mime_not_allowed,
|
||||
:media_metadata_unsupported_version,
|
||||
:push_notification_relay_tags_exceeded,
|
||||
:push_notification_payload_too_large,
|
||||
:push_notification_replay_window_exceeded,
|
||||
:push_notification_missing_expiration,
|
||||
:push_notification_expiration_too_far,
|
||||
:push_notification_server_recipients_exceeded
|
||||
],
|
||||
do: EventPolicy.error_message(reason)
|
||||
|
||||
defp error_message_for_publish_failure(reason) when is_binary(reason), do: reason
|
||||
defp error_message_for_publish_failure(reason), do: "error: #{inspect(reason)}"
|
||||
|
||||
defp validate_event_payload_size(event, max_event_bytes)
|
||||
when is_map(event) and is_integer(max_event_bytes) and max_event_bytes > 0 do
|
||||
if byte_size(JSON.encode!(event)) <= max_event_bytes do
|
||||
:ok
|
||||
else
|
||||
{:error, :event_too_large}
|
||||
end
|
||||
end
|
||||
|
||||
defp validate_event_payload_size(_event, _max_event_bytes), do: :ok
|
||||
|
||||
defp max_event_bytes(opts) do
|
||||
opts
|
||||
|> Keyword.get(:max_event_bytes, configured_max_event_bytes())
|
||||
|> normalize_max_event_bytes()
|
||||
end
|
||||
|
||||
defp normalize_max_event_bytes(value) when is_integer(value) and value > 0, do: value
|
||||
defp normalize_max_event_bytes(_value), do: configured_max_event_bytes()
|
||||
|
||||
defp configured_max_event_bytes do
|
||||
:parrhesia
|
||||
|> Application.get_env(:limits, [])
|
||||
|> Keyword.get(:max_event_bytes, @default_max_event_bytes)
|
||||
end
|
||||
|
||||
defp ephemeral_kind?(kind) when is_integer(kind), do: kind >= 20_000 and kind < 30_000
|
||||
defp ephemeral_kind?(_kind), do: false
|
||||
|
||||
defp accept_ephemeral_events? do
|
||||
:parrhesia
|
||||
|> Application.get_env(:policies, [])
|
||||
|> Keyword.get(:accept_ephemeral_events, true)
|
||||
end
|
||||
end
|
||||
22
lib/parrhesia/api/events/publish_result.ex
Normal file
22
lib/parrhesia/api/events/publish_result.ex
Normal file
@@ -0,0 +1,22 @@
|
||||
defmodule Parrhesia.API.Events.PublishResult do
|
||||
@moduledoc """
|
||||
Result shape for event publish attempts.
|
||||
|
||||
This mirrors relay `OK` semantics:
|
||||
|
||||
- `accepted: true` means the event was accepted
|
||||
- `accepted: false` means the event was rejected or identified as a duplicate
|
||||
|
||||
The surrounding call still returns `{:ok, result}` in both cases so callers can surface the
|
||||
rejection message without treating it as a transport or process failure.
|
||||
"""
|
||||
|
||||
defstruct [:event_id, :accepted, :message, :reason]
|
||||
|
||||
@type t :: %__MODULE__{
|
||||
event_id: String.t(),
|
||||
accepted: boolean(),
|
||||
message: String.t(),
|
||||
reason: term()
|
||||
}
|
||||
end
|
||||
298
lib/parrhesia/api/identity.ex
Normal file
298
lib/parrhesia/api/identity.ex
Normal file
@@ -0,0 +1,298 @@
|
||||
defmodule Parrhesia.API.Identity do
|
||||
@moduledoc """
|
||||
Server-auth identity management.
|
||||
|
||||
Parrhesia uses a single server identity for flows that need the relay to sign events or
|
||||
prove control of a pubkey.
|
||||
|
||||
Identity resolution follows this order:
|
||||
|
||||
1. `opts[:private_key]` or `opts[:configured_private_key]`
|
||||
2. `Application.get_env(:parrhesia, :identity)`
|
||||
3. the persisted file on disk
|
||||
|
||||
Supported options across this module:
|
||||
|
||||
- `:path` - overrides the identity file path
|
||||
- `:private_key` / `:configured_private_key` - uses an explicit hex secret key
|
||||
|
||||
A configured private key is treated as read-only input and therefore cannot be rotated.
|
||||
"""
|
||||
|
||||
alias Parrhesia.API.Auth
|
||||
|
||||
@typedoc """
|
||||
Public identity metadata returned to callers.
|
||||
"""
|
||||
@type identity_metadata :: %{
|
||||
pubkey: String.t(),
|
||||
source: :configured | :persisted | :generated | :imported
|
||||
}
|
||||
|
||||
@doc """
|
||||
Returns the current server identity metadata.
|
||||
|
||||
This does not generate a new identity. If no configured or persisted identity exists, it
|
||||
returns `{:error, :identity_not_found}`.
|
||||
"""
|
||||
@spec get(keyword()) :: {:ok, identity_metadata()} | {:error, term()}
|
||||
def get(opts \\ []) do
|
||||
with {:ok, identity} <- fetch_existing_identity(opts) do
|
||||
{:ok, public_identity(identity)}
|
||||
end
|
||||
end
|
||||
|
||||
@doc """
|
||||
Returns the current identity, generating and persisting one when necessary.
|
||||
"""
|
||||
@spec ensure(keyword()) :: {:ok, identity_metadata()} | {:error, term()}
|
||||
def ensure(opts \\ []) do
|
||||
with {:ok, identity} <- ensure_identity(opts) do
|
||||
{:ok, public_identity(identity)}
|
||||
end
|
||||
end
|
||||
|
||||
@doc """
|
||||
Imports an explicit secret key and persists it as the server identity.
|
||||
|
||||
The input map must contain `:secret_key` or `"secret_key"` as a 64-character lowercase or
|
||||
uppercase hex string.
|
||||
"""
|
||||
@spec import(map(), keyword()) :: {:ok, identity_metadata()} | {:error, term()}
|
||||
def import(identity, opts \\ [])
|
||||
|
||||
def import(identity, opts) when is_map(identity) do
|
||||
with {:ok, secret_key} <- fetch_secret_key(identity),
|
||||
{:ok, normalized_identity} <- build_identity(secret_key, :imported),
|
||||
:ok <- persist_identity(normalized_identity, opts) do
|
||||
{:ok, public_identity(normalized_identity)}
|
||||
end
|
||||
end
|
||||
|
||||
def import(_identity, _opts), do: {:error, :invalid_identity}
|
||||
|
||||
@doc """
|
||||
Generates and persists a fresh server identity.
|
||||
|
||||
Rotation is rejected with `{:error, :configured_identity_cannot_rotate}` when the active
|
||||
identity comes from configuration rather than the persisted file.
|
||||
"""
|
||||
@spec rotate(keyword()) :: {:ok, identity_metadata()} | {:error, term()}
|
||||
def rotate(opts \\ []) do
|
||||
with :ok <- ensure_rotation_allowed(opts),
|
||||
{:ok, identity} <- generate_identity(:generated),
|
||||
:ok <- persist_identity(identity, opts) do
|
||||
{:ok, public_identity(identity)}
|
||||
end
|
||||
end
|
||||
|
||||
@doc """
|
||||
Signs an event with the current server identity.
|
||||
|
||||
The incoming event must already include the fields required to compute a Nostr id:
|
||||
|
||||
- `"created_at"`
|
||||
- `"kind"`
|
||||
- `"tags"`
|
||||
- `"content"`
|
||||
|
||||
On success the returned event includes `"pubkey"`, `"id"`, and `"sig"`.
|
||||
"""
|
||||
@spec sign_event(map(), keyword()) :: {:ok, map()} | {:error, term()}
|
||||
def sign_event(event, opts \\ [])
|
||||
|
||||
def sign_event(event, opts) when is_map(event) and is_list(opts) do
|
||||
with :ok <- validate_signable_event(event),
|
||||
{:ok, identity} <- ensure_identity(opts),
|
||||
signed_event <- attach_signature(event, identity) do
|
||||
{:ok, signed_event}
|
||||
end
|
||||
end
|
||||
|
||||
def sign_event(_event, _opts), do: {:error, :invalid_event}
|
||||
|
||||
@doc """
|
||||
Returns the default filesystem path for the persisted server identity.
|
||||
"""
|
||||
def default_path do
|
||||
Path.join([default_data_dir(), "server_identity.json"])
|
||||
end
|
||||
|
||||
defp ensure_identity(opts) do
|
||||
case fetch_existing_identity(opts) do
|
||||
{:ok, identity} ->
|
||||
{:ok, identity}
|
||||
|
||||
{:error, :identity_not_found} ->
|
||||
with {:ok, identity} <- generate_identity(:generated),
|
||||
:ok <- persist_identity(identity, opts) do
|
||||
{:ok, identity}
|
||||
end
|
||||
|
||||
{:error, reason} ->
|
||||
{:error, reason}
|
||||
end
|
||||
end
|
||||
|
||||
defp fetch_existing_identity(opts) do
|
||||
if configured_private_key = configured_private_key(opts) do
|
||||
build_identity(configured_private_key, :configured)
|
||||
else
|
||||
read_persisted_identity(opts)
|
||||
end
|
||||
end
|
||||
|
||||
defp ensure_rotation_allowed(opts) do
|
||||
if configured_private_key(opts) do
|
||||
{:error, :configured_identity_cannot_rotate}
|
||||
else
|
||||
:ok
|
||||
end
|
||||
end
|
||||
|
||||
defp validate_signable_event(event) do
|
||||
signable =
|
||||
is_integer(Map.get(event, "created_at")) and
|
||||
is_integer(Map.get(event, "kind")) and
|
||||
is_list(Map.get(event, "tags")) and
|
||||
is_binary(Map.get(event, "content", ""))
|
||||
|
||||
if signable, do: :ok, else: {:error, :invalid_event}
|
||||
end
|
||||
|
||||
defp attach_signature(event, identity) do
|
||||
unsigned_event =
|
||||
event
|
||||
|> Map.put("pubkey", identity.pubkey)
|
||||
|> Map.put("sig", String.duplicate("0", 128))
|
||||
|
||||
event_id =
|
||||
unsigned_event
|
||||
|> Auth.compute_event_id()
|
||||
|
||||
signature =
|
||||
event_id
|
||||
|> Base.decode16!(case: :lower)
|
||||
|> Secp256k1.schnorr_sign(identity.secret_key)
|
||||
|> Base.encode16(case: :lower)
|
||||
|
||||
unsigned_event
|
||||
|> Map.put("id", event_id)
|
||||
|> Map.put("sig", signature)
|
||||
end
|
||||
|
||||
defp read_persisted_identity(opts) do
|
||||
path = identity_path(opts)
|
||||
|
||||
case File.read(path) do
|
||||
{:ok, payload} ->
|
||||
with {:ok, decoded} <- JSON.decode(payload),
|
||||
{:ok, secret_key} <- fetch_secret_key(decoded),
|
||||
{:ok, identity} <- build_identity(secret_key, :persisted) do
|
||||
{:ok, identity}
|
||||
else
|
||||
{:error, reason} -> {:error, reason}
|
||||
end
|
||||
|
||||
{:error, :enoent} ->
|
||||
{:error, :identity_not_found}
|
||||
|
||||
{:error, reason} ->
|
||||
{:error, reason}
|
||||
end
|
||||
end
|
||||
|
||||
defp persist_identity(identity, opts) do
|
||||
path = identity_path(opts)
|
||||
temp_path = path <> ".tmp"
|
||||
|
||||
with :ok <- File.mkdir_p(Path.dirname(path)),
|
||||
:ok <- File.write(temp_path, JSON.encode!(persisted_identity(identity))),
|
||||
:ok <- File.rename(temp_path, path) do
|
||||
:ok
|
||||
else
|
||||
{:error, reason} ->
|
||||
_ = File.rm(temp_path)
|
||||
{:error, reason}
|
||||
end
|
||||
end
|
||||
|
||||
defp persisted_identity(identity) do
|
||||
%{
|
||||
"secret_key" => Base.encode16(identity.secret_key, case: :lower),
|
||||
"pubkey" => identity.pubkey
|
||||
}
|
||||
end
|
||||
|
||||
defp generate_identity(source) do
|
||||
{secret_key, pubkey} = Secp256k1.keypair(:xonly)
|
||||
|
||||
{:ok,
|
||||
%{
|
||||
secret_key: secret_key,
|
||||
pubkey: Base.encode16(pubkey, case: :lower),
|
||||
source: source
|
||||
}}
|
||||
rescue
|
||||
_error -> {:error, :identity_generation_failed}
|
||||
end
|
||||
|
||||
defp build_identity(secret_key_hex, source) when is_binary(secret_key_hex) do
|
||||
with {:ok, secret_key} <- decode_secret_key(secret_key_hex),
|
||||
pubkey <- Secp256k1.pubkey(secret_key, :xonly) do
|
||||
{:ok,
|
||||
%{
|
||||
secret_key: secret_key,
|
||||
pubkey: Base.encode16(pubkey, case: :lower),
|
||||
source: source
|
||||
}}
|
||||
end
|
||||
rescue
|
||||
_error -> {:error, :invalid_secret_key}
|
||||
end
|
||||
|
||||
defp decode_secret_key(secret_key_hex) when is_binary(secret_key_hex) do
|
||||
normalized = String.downcase(secret_key_hex)
|
||||
|
||||
case Base.decode16(normalized, case: :lower) do
|
||||
{:ok, <<_::256>> = secret_key} -> {:ok, secret_key}
|
||||
_other -> {:error, :invalid_secret_key}
|
||||
end
|
||||
end
|
||||
|
||||
defp fetch_secret_key(identity) when is_map(identity) do
|
||||
case Map.get(identity, :secret_key) || Map.get(identity, "secret_key") do
|
||||
secret_key when is_binary(secret_key) -> {:ok, secret_key}
|
||||
_other -> {:error, :invalid_identity}
|
||||
end
|
||||
end
|
||||
|
||||
defp configured_private_key(opts) do
|
||||
opts[:private_key] || opts[:configured_private_key] || config_value(:private_key)
|
||||
end
|
||||
|
||||
defp identity_path(opts) do
|
||||
opts[:path] || config_value(:path) || default_path()
|
||||
end
|
||||
|
||||
defp public_identity(identity) do
|
||||
%{
|
||||
pubkey: identity.pubkey,
|
||||
source: identity.source
|
||||
}
|
||||
end
|
||||
|
||||
defp config_value(key) do
|
||||
:parrhesia
|
||||
|> Application.get_env(:identity, [])
|
||||
|> Keyword.get(key)
|
||||
end
|
||||
|
||||
defp default_data_dir do
|
||||
base_dir =
|
||||
System.get_env("XDG_DATA_HOME") ||
|
||||
Path.join(System.user_home!(), ".local/share")
|
||||
|
||||
Path.join(base_dir, "parrhesia")
|
||||
end
|
||||
end
|
||||
25
lib/parrhesia/api/identity/manager.ex
Normal file
25
lib/parrhesia/api/identity/manager.ex
Normal file
@@ -0,0 +1,25 @@
|
||||
defmodule Parrhesia.API.Identity.Manager do
|
||||
@moduledoc false
|
||||
|
||||
use GenServer
|
||||
|
||||
alias Parrhesia.API.Identity
|
||||
|
||||
require Logger
|
||||
|
||||
def start_link(opts \\ []) do
|
||||
GenServer.start_link(__MODULE__, opts, name: __MODULE__)
|
||||
end
|
||||
|
||||
@impl true
|
||||
def init(_opts) do
|
||||
case Identity.ensure() do
|
||||
{:ok, _identity} ->
|
||||
{:ok, %{}}
|
||||
|
||||
{:error, reason} ->
|
||||
Logger.error("failed to ensure server identity: #{inspect(reason)}")
|
||||
{:ok, %{}}
|
||||
end
|
||||
end
|
||||
end
|
||||
44
lib/parrhesia/api/request_context.ex
Normal file
44
lib/parrhesia/api/request_context.ex
Normal file
@@ -0,0 +1,44 @@
|
||||
defmodule Parrhesia.API.RequestContext do
|
||||
@moduledoc """
|
||||
Shared request context used across API and policy surfaces.
|
||||
|
||||
This struct carries caller identity and transport metadata through authorization and storage
|
||||
boundaries.
|
||||
|
||||
The most important field for external callers is `authenticated_pubkeys`. For example:
|
||||
|
||||
- `Parrhesia.API.Events` uses it for read and write policy checks
|
||||
- `Parrhesia.API.Stream` uses it for subscription authorization
|
||||
- `Parrhesia.API.ACL` uses it when evaluating protected sync traffic
|
||||
"""
|
||||
|
||||
defstruct authenticated_pubkeys: MapSet.new(),
|
||||
actor: nil,
|
||||
caller: :local,
|
||||
remote_ip: nil,
|
||||
subscription_id: nil,
|
||||
peer_id: nil,
|
||||
transport_identity: nil,
|
||||
metadata: %{}
|
||||
|
||||
@type t :: %__MODULE__{
|
||||
authenticated_pubkeys: MapSet.t(String.t()),
|
||||
actor: term(),
|
||||
caller: atom(),
|
||||
remote_ip: String.t() | nil,
|
||||
subscription_id: String.t() | nil,
|
||||
peer_id: String.t() | nil,
|
||||
transport_identity: map() | nil,
|
||||
metadata: map()
|
||||
}
|
||||
|
||||
@doc """
|
||||
Merges arbitrary metadata into the context.
|
||||
|
||||
Existing keys are overwritten by the incoming map.
|
||||
"""
|
||||
@spec put_metadata(t(), map()) :: t()
|
||||
def put_metadata(%__MODULE__{} = context, metadata) when is_map(metadata) do
|
||||
%__MODULE__{context | metadata: Map.merge(context.metadata, metadata)}
|
||||
end
|
||||
end
|
||||
121
lib/parrhesia/api/stream.ex
Normal file
121
lib/parrhesia/api/stream.ex
Normal file
@@ -0,0 +1,121 @@
|
||||
defmodule Parrhesia.API.Stream do
|
||||
@moduledoc """
|
||||
In-process subscription API with relay-equivalent catch-up and live fanout semantics.
|
||||
|
||||
Subscriptions are process-local bridges. After subscribing, the caller receives messages in
|
||||
the same order a relay client would expect:
|
||||
|
||||
- `{:parrhesia, :event, ref, subscription_id, event}` for catch-up and live events
|
||||
- `{:parrhesia, :eose, ref, subscription_id}` after the initial replay finishes
|
||||
|
||||
This API requires a `Parrhesia.API.RequestContext` so read policies are applied exactly as
|
||||
they would be for a transport-backed subscriber.
|
||||
"""
|
||||
|
||||
alias Parrhesia.API.Events
|
||||
alias Parrhesia.API.RequestContext
|
||||
alias Parrhesia.API.Stream.Subscription
|
||||
alias Parrhesia.Policy.EventPolicy
|
||||
alias Parrhesia.Protocol.Filter
|
||||
|
||||
@doc """
|
||||
Starts an in-process subscription for a subscriber pid.
|
||||
|
||||
`opts[:context]` must be a `Parrhesia.API.RequestContext`.
|
||||
|
||||
On success the returned reference is both:
|
||||
|
||||
- the subscription handle used by `unsubscribe/1`
|
||||
- the value embedded in emitted subscriber messages
|
||||
"""
|
||||
@spec subscribe(pid(), String.t(), [map()], keyword()) :: {:ok, reference()} | {:error, term()}
|
||||
def subscribe(subscriber, subscription_id, filters, opts \\ [])
|
||||
|
||||
def subscribe(subscriber, subscription_id, filters, opts)
|
||||
when is_pid(subscriber) and is_binary(subscription_id) and is_list(filters) and
|
||||
is_list(opts) do
|
||||
with {:ok, context} <- fetch_context(opts),
|
||||
:ok <- Filter.validate_filters(filters),
|
||||
:ok <-
|
||||
EventPolicy.authorize_read(
|
||||
filters,
|
||||
context.authenticated_pubkeys,
|
||||
stream_context(context, subscription_id)
|
||||
) do
|
||||
ref = make_ref()
|
||||
|
||||
case DynamicSupervisor.start_child(
|
||||
Parrhesia.API.Stream.Supervisor,
|
||||
{Subscription,
|
||||
ref: ref, subscriber: subscriber, subscription_id: subscription_id, filters: filters}
|
||||
) do
|
||||
{:ok, pid} ->
|
||||
finalize_subscription(pid, ref, filters, stream_context(context, subscription_id))
|
||||
|
||||
{:error, reason} ->
|
||||
{:error, reason}
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
def subscribe(_subscriber, _subscription_id, _filters, _opts),
|
||||
do: {:error, :invalid_subscription}
|
||||
|
||||
@doc """
|
||||
Stops a subscription previously created with `subscribe/4`.
|
||||
|
||||
This function is idempotent. Unknown or already-stopped references return `:ok`.
|
||||
"""
|
||||
@spec unsubscribe(reference()) :: :ok
|
||||
def unsubscribe(ref) when is_reference(ref) do
|
||||
case Registry.lookup(Parrhesia.API.Stream.Registry, ref) do
|
||||
[{pid, _value}] ->
|
||||
try do
|
||||
:ok = GenServer.stop(pid, :normal)
|
||||
catch
|
||||
:exit, _reason -> :ok
|
||||
end
|
||||
|
||||
:ok
|
||||
|
||||
[] ->
|
||||
:ok
|
||||
end
|
||||
end
|
||||
|
||||
def unsubscribe(_ref), do: :ok
|
||||
|
||||
defp fetch_context(opts) do
|
||||
case Keyword.get(opts, :context) do
|
||||
%RequestContext{} = context -> {:ok, context}
|
||||
_other -> {:error, :invalid_context}
|
||||
end
|
||||
end
|
||||
|
||||
defp finalize_subscription(pid, ref, filters, context) do
|
||||
with {:ok, initial_events} <-
|
||||
Events.query(filters,
|
||||
context: context,
|
||||
validate_filters?: false,
|
||||
authorize_read?: false
|
||||
),
|
||||
:ok <- Subscription.deliver_initial(pid, initial_events) do
|
||||
{:ok, ref}
|
||||
else
|
||||
{:error, reason} ->
|
||||
_ = safe_stop_subscription(pid)
|
||||
{:error, reason}
|
||||
end
|
||||
end
|
||||
|
||||
defp safe_stop_subscription(pid) do
|
||||
GenServer.stop(pid, :shutdown)
|
||||
:ok
|
||||
catch
|
||||
:exit, _reason -> :ok
|
||||
end
|
||||
|
||||
defp stream_context(%RequestContext{} = context, subscription_id) do
|
||||
%RequestContext{context | subscription_id: subscription_id}
|
||||
end
|
||||
end
|
||||
192
lib/parrhesia/api/stream/subscription.ex
Normal file
192
lib/parrhesia/api/stream/subscription.ex
Normal file
@@ -0,0 +1,192 @@
|
||||
defmodule Parrhesia.API.Stream.Subscription do
|
||||
@moduledoc false
|
||||
|
||||
use GenServer, restart: :temporary
|
||||
|
||||
alias Parrhesia.Protocol.Filter
|
||||
alias Parrhesia.Subscriptions.Index
|
||||
alias Parrhesia.Telemetry
|
||||
|
||||
defstruct [
|
||||
:ref,
|
||||
:subscriber,
|
||||
:subscriber_monitor_ref,
|
||||
:subscription_id,
|
||||
:filters,
|
||||
ready?: false,
|
||||
buffered_events: []
|
||||
]
|
||||
|
||||
@type t :: %__MODULE__{
|
||||
ref: reference(),
|
||||
subscriber: pid(),
|
||||
subscriber_monitor_ref: reference(),
|
||||
subscription_id: String.t(),
|
||||
filters: [map()],
|
||||
ready?: boolean(),
|
||||
buffered_events: [map()]
|
||||
}
|
||||
|
||||
@spec start_link(keyword()) :: GenServer.on_start()
|
||||
def start_link(opts) when is_list(opts) do
|
||||
ref = Keyword.fetch!(opts, :ref)
|
||||
|
||||
GenServer.start_link(__MODULE__, opts, name: via_tuple(ref))
|
||||
end
|
||||
|
||||
@spec deliver_initial(GenServer.server(), [map()]) :: :ok | {:error, term()}
|
||||
def deliver_initial(server, initial_events) when is_list(initial_events) do
|
||||
GenServer.call(server, {:deliver_initial, initial_events})
|
||||
end
|
||||
|
||||
@impl true
|
||||
def init(opts) do
|
||||
with {:ok, subscriber} <- fetch_subscriber(opts),
|
||||
{:ok, subscription_id} <- fetch_subscription_id(opts),
|
||||
{:ok, filters} <- fetch_filters(opts),
|
||||
:ok <-
|
||||
maybe_upsert_index_subscription(subscription_index(opts), subscription_id, filters) do
|
||||
monitor_ref = Process.monitor(subscriber)
|
||||
|
||||
state = %__MODULE__{
|
||||
ref: Keyword.fetch!(opts, :ref),
|
||||
subscriber: subscriber,
|
||||
subscriber_monitor_ref: monitor_ref,
|
||||
subscription_id: subscription_id,
|
||||
filters: filters,
|
||||
ready?: false,
|
||||
buffered_events: []
|
||||
}
|
||||
|
||||
Telemetry.emit_process_mailbox_depth(:subscription)
|
||||
{:ok, state}
|
||||
else
|
||||
{:error, reason} -> {:stop, reason}
|
||||
end
|
||||
end
|
||||
|
||||
@impl true
|
||||
def handle_call({:deliver_initial, initial_events}, _from, %__MODULE__{} = state) do
|
||||
send_initial_events(state, initial_events)
|
||||
|
||||
Enum.each(Enum.reverse(state.buffered_events), fn event ->
|
||||
send(state.subscriber, {:parrhesia, :event, state.ref, state.subscription_id, event})
|
||||
end)
|
||||
|
||||
{:reply, :ok, %__MODULE__{state | ready?: true, buffered_events: []}}
|
||||
|> emit_mailbox_depth()
|
||||
end
|
||||
|
||||
@impl true
|
||||
def handle_info({:fanout_event, subscription_id, event}, %__MODULE__{} = state)
|
||||
when is_binary(subscription_id) and is_map(event) do
|
||||
state
|
||||
|> handle_fanout_event(subscription_id, event)
|
||||
|> emit_mailbox_depth()
|
||||
end
|
||||
|
||||
def handle_info({:DOWN, monitor_ref, :process, subscriber, _reason}, %__MODULE__{} = state)
|
||||
when monitor_ref == state.subscriber_monitor_ref and subscriber == state.subscriber do
|
||||
{:stop, :normal, state}
|
||||
|> emit_mailbox_depth()
|
||||
end
|
||||
|
||||
def handle_info(_message, %__MODULE__{} = state) do
|
||||
{:noreply, state}
|
||||
|> emit_mailbox_depth()
|
||||
end
|
||||
|
||||
@impl true
|
||||
def terminate(reason, %__MODULE__{} = state) do
|
||||
:ok = maybe_remove_index_subscription(state.subscription_id)
|
||||
|
||||
if reason not in [:normal, :shutdown] do
|
||||
send(state.subscriber, {:parrhesia, :closed, state.ref, state.subscription_id, reason})
|
||||
end
|
||||
|
||||
:ok
|
||||
end
|
||||
|
||||
defp send_initial_events(state, events) do
|
||||
Enum.each(events, fn event ->
|
||||
send(state.subscriber, {:parrhesia, :event, state.ref, state.subscription_id, event})
|
||||
end)
|
||||
|
||||
send(state.subscriber, {:parrhesia, :eose, state.ref, state.subscription_id})
|
||||
end
|
||||
|
||||
defp via_tuple(ref), do: {:via, Registry, {Parrhesia.API.Stream.Registry, ref}}
|
||||
|
||||
defp fetch_subscriber(opts) do
|
||||
case Keyword.get(opts, :subscriber) do
|
||||
subscriber when is_pid(subscriber) -> {:ok, subscriber}
|
||||
_other -> {:error, :invalid_subscriber}
|
||||
end
|
||||
end
|
||||
|
||||
defp fetch_subscription_id(opts) do
|
||||
case Keyword.get(opts, :subscription_id) do
|
||||
subscription_id when is_binary(subscription_id) -> {:ok, subscription_id}
|
||||
_other -> {:error, :invalid_subscription_id}
|
||||
end
|
||||
end
|
||||
|
||||
defp fetch_filters(opts) do
|
||||
case Keyword.get(opts, :filters) do
|
||||
filters when is_list(filters) -> {:ok, filters}
|
||||
_other -> {:error, :invalid_filters}
|
||||
end
|
||||
end
|
||||
|
||||
defp subscription_index(opts) do
|
||||
case Keyword.get(opts, :subscription_index, Index) do
|
||||
subscription_index when is_pid(subscription_index) or is_atom(subscription_index) ->
|
||||
subscription_index
|
||||
|
||||
_other ->
|
||||
nil
|
||||
end
|
||||
end
|
||||
|
||||
defp maybe_upsert_index_subscription(nil, _subscription_id, _filters),
|
||||
do: {:error, :subscription_index_unavailable}
|
||||
|
||||
defp maybe_upsert_index_subscription(subscription_index, subscription_id, filters) do
|
||||
case Index.upsert(subscription_index, self(), subscription_id, filters) do
|
||||
:ok -> :ok
|
||||
{:error, reason} -> {:error, reason}
|
||||
end
|
||||
catch
|
||||
:exit, _reason -> {:error, :subscription_index_unavailable}
|
||||
end
|
||||
|
||||
defp maybe_remove_index_subscription(subscription_id) do
|
||||
:ok = Index.remove(Index, self(), subscription_id)
|
||||
:ok
|
||||
catch
|
||||
:exit, _reason -> :ok
|
||||
end
|
||||
|
||||
defp handle_fanout_event(%__MODULE__{} = state, subscription_id, event) do
|
||||
cond do
|
||||
subscription_id != state.subscription_id ->
|
||||
{:noreply, state}
|
||||
|
||||
not Filter.matches_any?(event, state.filters) ->
|
||||
{:noreply, state}
|
||||
|
||||
state.ready? ->
|
||||
send(state.subscriber, {:parrhesia, :event, state.ref, state.subscription_id, event})
|
||||
{:noreply, state}
|
||||
|
||||
true ->
|
||||
buffered_events = [event | state.buffered_events]
|
||||
{:noreply, %__MODULE__{state | buffered_events: buffered_events}}
|
||||
end
|
||||
end
|
||||
|
||||
defp emit_mailbox_depth(result) do
|
||||
Telemetry.emit_process_mailbox_depth(:subscription)
|
||||
result
|
||||
end
|
||||
end
|
||||
170
lib/parrhesia/api/sync.ex
Normal file
170
lib/parrhesia/api/sync.ex
Normal file
@@ -0,0 +1,170 @@
|
||||
defmodule Parrhesia.API.Sync do
|
||||
@moduledoc """
|
||||
Sync server control-plane API.
|
||||
|
||||
This module manages outbound relay sync definitions and exposes runtime status for each
|
||||
configured sync worker.
|
||||
|
||||
The main entrypoint is `put_server/2`. Accepted server maps are normalized into a stable
|
||||
internal shape and persisted by the sync manager. The expected input shape is:
|
||||
|
||||
```elixir
|
||||
%{
|
||||
"id" => "tribes-primary",
|
||||
"url" => "wss://relay-a.example/relay",
|
||||
"enabled?" => true,
|
||||
"auth_pubkey" => "...64 hex chars...",
|
||||
"filters" => [%{"kinds" => [5000]}],
|
||||
"mode" => "req_stream",
|
||||
"overlap_window_seconds" => 300,
|
||||
"auth" => %{"type" => "nip42"},
|
||||
"tls" => %{
|
||||
"mode" => "required",
|
||||
"hostname" => "relay-a.example",
|
||||
"pins" => [%{"type" => "spki_sha256", "value" => "..."}]
|
||||
},
|
||||
"metadata" => %{}
|
||||
}
|
||||
```
|
||||
|
||||
Most functions accept `:manager` or `:name` in `opts` to target a non-default manager.
|
||||
"""
|
||||
|
||||
alias Parrhesia.API.Sync.Manager
|
||||
|
||||
@typedoc """
|
||||
Normalized sync server configuration returned by the sync manager.
|
||||
"""
|
||||
@type server :: map()
|
||||
|
||||
@doc """
|
||||
Creates or replaces a sync server definition.
|
||||
"""
|
||||
@spec put_server(map(), keyword()) :: {:ok, server()} | {:error, term()}
|
||||
def put_server(server, opts \\ [])
|
||||
|
||||
def put_server(server, opts) when is_map(server) and is_list(opts) do
|
||||
Manager.put_server(manager_name(opts), server)
|
||||
end
|
||||
|
||||
def put_server(_server, _opts), do: {:error, :invalid_server}
|
||||
|
||||
@doc """
|
||||
Removes a stored sync server definition and stops its worker if it is running.
|
||||
"""
|
||||
@spec remove_server(String.t(), keyword()) :: :ok | {:error, term()}
|
||||
def remove_server(server_id, opts \\ [])
|
||||
|
||||
def remove_server(server_id, opts) when is_binary(server_id) and is_list(opts) do
|
||||
Manager.remove_server(manager_name(opts), server_id)
|
||||
end
|
||||
|
||||
def remove_server(_server_id, _opts), do: {:error, :invalid_server_id}
|
||||
|
||||
@doc """
|
||||
Fetches a single normalized sync server definition.
|
||||
|
||||
Returns `:error` when the server id is unknown.
|
||||
"""
|
||||
@spec get_server(String.t(), keyword()) :: {:ok, server()} | :error | {:error, term()}
|
||||
def get_server(server_id, opts \\ [])
|
||||
|
||||
def get_server(server_id, opts) when is_binary(server_id) and is_list(opts) do
|
||||
Manager.get_server(manager_name(opts), server_id)
|
||||
end
|
||||
|
||||
def get_server(_server_id, _opts), do: {:error, :invalid_server_id}
|
||||
|
||||
@doc """
|
||||
Lists all configured sync servers, including their runtime state.
|
||||
"""
|
||||
@spec list_servers(keyword()) :: {:ok, [server()]} | {:error, term()}
|
||||
def list_servers(opts \\ []) when is_list(opts) do
|
||||
Manager.list_servers(manager_name(opts))
|
||||
end
|
||||
|
||||
@doc """
|
||||
Marks a sync server as running and reconciles its worker state.
|
||||
"""
|
||||
@spec start_server(String.t(), keyword()) :: :ok | {:error, term()}
|
||||
def start_server(server_id, opts \\ [])
|
||||
|
||||
def start_server(server_id, opts) when is_binary(server_id) and is_list(opts) do
|
||||
Manager.start_server(manager_name(opts), server_id)
|
||||
end
|
||||
|
||||
def start_server(_server_id, _opts), do: {:error, :invalid_server_id}
|
||||
|
||||
@doc """
|
||||
Stops a sync server and records a disconnect timestamp in runtime state.
|
||||
"""
|
||||
@spec stop_server(String.t(), keyword()) :: :ok | {:error, term()}
|
||||
def stop_server(server_id, opts \\ [])
|
||||
|
||||
def stop_server(server_id, opts) when is_binary(server_id) and is_list(opts) do
|
||||
Manager.stop_server(manager_name(opts), server_id)
|
||||
end
|
||||
|
||||
def stop_server(_server_id, _opts), do: {:error, :invalid_server_id}
|
||||
|
||||
@doc """
|
||||
Triggers an immediate sync run for a server.
|
||||
"""
|
||||
@spec sync_now(String.t(), keyword()) :: :ok | {:error, term()}
|
||||
def sync_now(server_id, opts \\ [])
|
||||
|
||||
def sync_now(server_id, opts) when is_binary(server_id) and is_list(opts) do
|
||||
Manager.sync_now(manager_name(opts), server_id)
|
||||
end
|
||||
|
||||
def sync_now(_server_id, _opts), do: {:error, :invalid_server_id}
|
||||
|
||||
@doc """
|
||||
Returns runtime counters and timestamps for a single sync server.
|
||||
|
||||
Returns `:error` when the server id is unknown.
|
||||
"""
|
||||
@spec server_stats(String.t(), keyword()) :: {:ok, map()} | :error | {:error, term()}
|
||||
def server_stats(server_id, opts \\ [])
|
||||
|
||||
def server_stats(server_id, opts) when is_binary(server_id) and is_list(opts) do
|
||||
Manager.server_stats(manager_name(opts), server_id)
|
||||
end
|
||||
|
||||
def server_stats(_server_id, _opts), do: {:error, :invalid_server_id}
|
||||
|
||||
@doc """
|
||||
Returns aggregate counters across all configured sync servers.
|
||||
"""
|
||||
@spec sync_stats(keyword()) :: {:ok, map()} | {:error, term()}
|
||||
def sync_stats(opts \\ []) when is_list(opts) do
|
||||
Manager.sync_stats(manager_name(opts))
|
||||
end
|
||||
|
||||
@doc """
|
||||
Returns a health summary for the sync subsystem.
|
||||
"""
|
||||
@spec sync_health(keyword()) :: {:ok, map()} | {:error, term()}
|
||||
def sync_health(opts \\ []) when is_list(opts) do
|
||||
Manager.sync_health(manager_name(opts))
|
||||
end
|
||||
|
||||
@doc """
|
||||
Returns the default filesystem path for persisted sync server state.
|
||||
"""
|
||||
def default_path do
|
||||
Path.join([default_data_dir(), "sync_servers.json"])
|
||||
end
|
||||
|
||||
defp manager_name(opts) do
|
||||
opts[:manager] || opts[:name] || Manager
|
||||
end
|
||||
|
||||
defp default_data_dir do
|
||||
base_dir =
|
||||
System.get_env("XDG_DATA_HOME") ||
|
||||
Path.join(System.user_home!(), ".local/share")
|
||||
|
||||
Path.join(base_dir, "parrhesia")
|
||||
end
|
||||
end
|
||||
938
lib/parrhesia/api/sync/manager.ex
Normal file
938
lib/parrhesia/api/sync/manager.ex
Normal file
@@ -0,0 +1,938 @@
|
||||
defmodule Parrhesia.API.Sync.Manager do
|
||||
@moduledoc false
|
||||
|
||||
use GenServer
|
||||
|
||||
alias Parrhesia.API.Sync
|
||||
alias Parrhesia.Protocol.Filter
|
||||
alias Parrhesia.Sync.Transport.WebSockexClient
|
||||
alias Parrhesia.Sync.Worker
|
||||
|
||||
require Logger
|
||||
|
||||
@default_overlap_window_seconds 300
|
||||
@default_mode :req_stream
|
||||
@default_auth_type :nip42
|
||||
@default_tls_mode :required
|
||||
@hex64 ~r/\A[0-9a-f]{64}\z/
|
||||
|
||||
def start_link(opts \\ []) do
|
||||
name = Keyword.get(opts, :name, __MODULE__)
|
||||
GenServer.start_link(__MODULE__, opts, name: name)
|
||||
end
|
||||
|
||||
def put_server(name, server), do: GenServer.call(name, {:put_server, server})
|
||||
def remove_server(name, server_id), do: GenServer.call(name, {:remove_server, server_id})
|
||||
def get_server(name, server_id), do: GenServer.call(name, {:get_server, server_id})
|
||||
def list_servers(name), do: GenServer.call(name, :list_servers)
|
||||
def start_server(name, server_id), do: GenServer.call(name, {:start_server, server_id})
|
||||
def stop_server(name, server_id), do: GenServer.call(name, {:stop_server, server_id})
|
||||
def sync_now(name, server_id), do: GenServer.call(name, {:sync_now, server_id})
|
||||
def server_stats(name, server_id), do: GenServer.call(name, {:server_stats, server_id})
|
||||
def sync_stats(name), do: GenServer.call(name, :sync_stats)
|
||||
def sync_health(name), do: GenServer.call(name, :sync_health)
|
||||
|
||||
def runtime_event(name, server_id, kind, attrs \\ %{}) do
|
||||
GenServer.cast(name, {:runtime_event, server_id, kind, attrs})
|
||||
end
|
||||
|
||||
@impl true
|
||||
def init(opts) do
|
||||
path = Keyword.get(opts, :path, config_path() || Sync.default_path())
|
||||
|
||||
state =
|
||||
load_state(path)
|
||||
|> Map.merge(%{
|
||||
start_workers?: Keyword.get(opts, :start_workers?, config_value(:start_workers?, true)),
|
||||
worker_supervisor: Keyword.get(opts, :worker_supervisor, Parrhesia.Sync.WorkerSupervisor),
|
||||
worker_registry: Keyword.get(opts, :worker_registry, Parrhesia.Sync.WorkerRegistry),
|
||||
transport_module: Keyword.get(opts, :transport_module, WebSockexClient),
|
||||
relay_info_opts: Keyword.get(opts, :relay_info_opts, []),
|
||||
transport_opts: Keyword.get(opts, :transport_opts, [])
|
||||
})
|
||||
|
||||
{:ok, state, {:continue, :bootstrap}}
|
||||
end
|
||||
|
||||
@impl true
|
||||
def handle_continue(:bootstrap, state) do
|
||||
next_state =
|
||||
if state.start_workers? do
|
||||
state.servers
|
||||
|> Map.keys()
|
||||
|> Enum.reduce(state, fn server_id, acc -> maybe_start_worker(acc, server_id) end)
|
||||
else
|
||||
state
|
||||
end
|
||||
|
||||
{:noreply, next_state}
|
||||
end
|
||||
|
||||
@impl true
|
||||
def handle_call({:put_server, server}, _from, state) do
|
||||
case normalize_server(server) do
|
||||
{:ok, normalized_server} ->
|
||||
updated_state =
|
||||
state
|
||||
|> stop_worker_if_running(normalized_server.id)
|
||||
|> put_server_state(normalized_server)
|
||||
|> persist_and_reconcile!(normalized_server.id)
|
||||
|
||||
{:reply, {:ok, merged_server(updated_state, normalized_server.id)}, updated_state}
|
||||
|
||||
{:error, reason} ->
|
||||
{:reply, {:error, reason}, state}
|
||||
end
|
||||
end
|
||||
|
||||
def handle_call({:remove_server, server_id}, _from, state) do
|
||||
if Map.has_key?(state.servers, server_id) do
|
||||
next_state =
|
||||
state
|
||||
|> stop_worker_if_running(server_id)
|
||||
|> Map.update!(:servers, &Map.delete(&1, server_id))
|
||||
|> Map.update!(:runtime, &Map.delete(&1, server_id))
|
||||
|
||||
with :ok <- persist_state(next_state) do
|
||||
{:reply, :ok, next_state}
|
||||
end
|
||||
else
|
||||
{:reply, {:error, :not_found}, state}
|
||||
end
|
||||
end
|
||||
|
||||
def handle_call({:get_server, server_id}, _from, state) do
|
||||
case Map.fetch(state.servers, server_id) do
|
||||
{:ok, _server} -> {:reply, {:ok, merged_server(state, server_id)}, state}
|
||||
:error -> {:reply, :error, state}
|
||||
end
|
||||
end
|
||||
|
||||
def handle_call(:list_servers, _from, state) do
|
||||
servers =
|
||||
state.servers
|
||||
|> Map.keys()
|
||||
|> Enum.sort()
|
||||
|> Enum.map(&merged_server(state, &1))
|
||||
|
||||
{:reply, {:ok, servers}, state}
|
||||
end
|
||||
|
||||
def handle_call({:start_server, server_id}, _from, state) do
|
||||
case Map.fetch(state.runtime, server_id) do
|
||||
{:ok, runtime} ->
|
||||
next_state =
|
||||
state
|
||||
|> put_runtime(server_id, %{runtime | state: :running, last_error: nil})
|
||||
|> persist_and_reconcile!(server_id)
|
||||
|
||||
{:reply, :ok, next_state}
|
||||
|
||||
:error ->
|
||||
{:reply, {:error, :not_found}, state}
|
||||
end
|
||||
end
|
||||
|
||||
def handle_call({:stop_server, server_id}, _from, state) do
|
||||
case Map.fetch(state.runtime, server_id) do
|
||||
{:ok, runtime} ->
|
||||
next_runtime =
|
||||
runtime
|
||||
|> Map.put(:state, :stopped)
|
||||
|> Map.put(:connected?, false)
|
||||
|> Map.put(:last_disconnected_at, now())
|
||||
|
||||
next_state =
|
||||
state
|
||||
|> stop_worker_if_running(server_id)
|
||||
|> put_runtime(server_id, next_runtime)
|
||||
|
||||
with :ok <- persist_state(next_state) do
|
||||
{:reply, :ok, next_state}
|
||||
end
|
||||
|
||||
:error ->
|
||||
{:reply, {:error, :not_found}, state}
|
||||
end
|
||||
end
|
||||
|
||||
def handle_call({:sync_now, server_id}, _from, state) do
|
||||
case {Map.has_key?(state.runtime, server_id), state.start_workers?,
|
||||
lookup_worker(state, server_id)} do
|
||||
{false, _start_workers?, _worker_pid} ->
|
||||
{:reply, {:error, :not_found}, state}
|
||||
|
||||
{true, true, worker_pid} when is_pid(worker_pid) ->
|
||||
Worker.sync_now(worker_pid)
|
||||
{:reply, :ok, state}
|
||||
|
||||
{true, true, nil} ->
|
||||
next_state =
|
||||
state
|
||||
|> put_in([:runtime, server_id, :state], :running)
|
||||
|> persist_and_reconcile!(server_id)
|
||||
|
||||
{:reply, :ok, next_state}
|
||||
|
||||
{true, false, _worker_pid} ->
|
||||
next_state =
|
||||
apply_runtime_event(state, server_id, :sync_started, %{})
|
||||
|> apply_runtime_event(server_id, :sync_completed, %{})
|
||||
|
||||
with :ok <- persist_state(next_state) do
|
||||
{:reply, :ok, next_state}
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
def handle_call({:server_stats, server_id}, _from, state) do
|
||||
case Map.fetch(state.runtime, server_id) do
|
||||
{:ok, runtime} -> {:reply, {:ok, runtime_stats(runtime)}, state}
|
||||
:error -> {:reply, :error, state}
|
||||
end
|
||||
end
|
||||
|
||||
def handle_call(:sync_stats, _from, state), do: {:reply, {:ok, aggregate_stats(state)}, state}
|
||||
def handle_call(:sync_health, _from, state), do: {:reply, {:ok, health_summary(state)}, state}
|
||||
|
||||
@impl true
|
||||
def handle_cast({:runtime_event, server_id, kind, attrs}, state) do
|
||||
next_state =
|
||||
state
|
||||
|> apply_runtime_event(server_id, kind, attrs)
|
||||
|> persist_state_if_known_server(server_id)
|
||||
|
||||
{:noreply, next_state}
|
||||
end
|
||||
|
||||
defp persist_state_if_known_server(state, server_id) do
|
||||
if Map.has_key?(state.runtime, server_id) do
|
||||
case persist_state(state) do
|
||||
:ok ->
|
||||
state
|
||||
|
||||
{:error, reason} ->
|
||||
Logger.warning("failed to persist sync runtime for #{server_id}: #{inspect(reason)}")
|
||||
state
|
||||
end
|
||||
else
|
||||
state
|
||||
end
|
||||
end
|
||||
|
||||
defp put_server_state(state, server) do
|
||||
runtime =
|
||||
case Map.get(state.runtime, server.id) do
|
||||
nil -> default_runtime(server)
|
||||
existing_runtime -> existing_runtime
|
||||
end
|
||||
|
||||
%{
|
||||
state
|
||||
| servers: Map.put(state.servers, server.id, server),
|
||||
runtime: Map.put(state.runtime, server.id, runtime)
|
||||
}
|
||||
end
|
||||
|
||||
defp put_runtime(state, server_id, runtime) do
|
||||
%{state | runtime: Map.put(state.runtime, server_id, runtime)}
|
||||
end
|
||||
|
||||
defp persist_and_reconcile!(state, server_id) do
|
||||
:ok = persist_state(state)
|
||||
reconcile_worker(state, server_id)
|
||||
end
|
||||
|
||||
defp reconcile_worker(state, server_id) do
|
||||
cond do
|
||||
not state.start_workers? ->
|
||||
state
|
||||
|
||||
desired_running?(state, server_id) ->
|
||||
maybe_start_worker(state, server_id)
|
||||
|
||||
true ->
|
||||
stop_worker_if_running(state, server_id)
|
||||
end
|
||||
end
|
||||
|
||||
defp maybe_start_worker(state, server_id) do
|
||||
cond do
|
||||
not state.start_workers? ->
|
||||
state
|
||||
|
||||
not desired_running?(state, server_id) ->
|
||||
state
|
||||
|
||||
lookup_worker(state, server_id) != nil ->
|
||||
state
|
||||
|
||||
true ->
|
||||
server = Map.fetch!(state.servers, server_id)
|
||||
runtime = Map.fetch!(state.runtime, server_id)
|
||||
|
||||
child_spec = %{
|
||||
id: {:sync_worker, server_id},
|
||||
start:
|
||||
{Worker, :start_link,
|
||||
[
|
||||
[
|
||||
name: via_tuple(server_id, state.worker_registry),
|
||||
server: server,
|
||||
runtime: runtime,
|
||||
manager: self(),
|
||||
transport_module: state.transport_module,
|
||||
relay_info_opts: state.relay_info_opts,
|
||||
transport_opts: state.transport_opts
|
||||
]
|
||||
]},
|
||||
restart: :transient
|
||||
}
|
||||
|
||||
case DynamicSupervisor.start_child(state.worker_supervisor, child_spec) do
|
||||
{:ok, _pid} ->
|
||||
state
|
||||
|
||||
{:error, {:already_started, _pid}} ->
|
||||
state
|
||||
|
||||
{:error, reason} ->
|
||||
Logger.warning("failed to start sync worker #{server_id}: #{inspect(reason)}")
|
||||
state
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
defp stop_worker_if_running(state, server_id) do
|
||||
if worker_pid = lookup_worker(state, server_id) do
|
||||
_ = Worker.stop(worker_pid)
|
||||
end
|
||||
|
||||
state
|
||||
end
|
||||
|
||||
defp desired_running?(state, server_id) do
|
||||
case Map.fetch(state.runtime, server_id) do
|
||||
{:ok, runtime} -> runtime.state == :running
|
||||
:error -> false
|
||||
end
|
||||
end
|
||||
|
||||
defp lookup_worker(state, server_id) do
|
||||
case Registry.lookup(state.worker_registry, server_id) do
|
||||
[{pid, _value}] -> pid
|
||||
[] -> nil
|
||||
end
|
||||
catch
|
||||
:exit, _reason -> nil
|
||||
end
|
||||
|
||||
defp via_tuple(server_id, registry) do
|
||||
{:via, Registry, {registry, server_id}}
|
||||
end
|
||||
|
||||
defp merged_server(state, server_id) do
|
||||
state.servers
|
||||
|> Map.fetch!(server_id)
|
||||
|> Map.put(:runtime, Map.fetch!(state.runtime, server_id))
|
||||
end
|
||||
|
||||
defp runtime_stats(runtime) do
|
||||
%{
|
||||
"server_id" => runtime.server_id,
|
||||
"state" => Atom.to_string(runtime.state),
|
||||
"connected" => runtime.connected?,
|
||||
"events_received" => runtime.events_received,
|
||||
"events_accepted" => runtime.events_accepted,
|
||||
"events_duplicate" => runtime.events_duplicate,
|
||||
"events_rejected" => runtime.events_rejected,
|
||||
"query_runs" => runtime.query_runs,
|
||||
"subscription_restarts" => runtime.subscription_restarts,
|
||||
"reconnects" => runtime.reconnects,
|
||||
"last_sync_started_at" => runtime.last_sync_started_at,
|
||||
"last_sync_completed_at" => runtime.last_sync_completed_at,
|
||||
"last_remote_eose_at" => runtime.last_remote_eose_at,
|
||||
"last_error" => runtime.last_error,
|
||||
"cursor_created_at" => runtime.cursor_created_at,
|
||||
"cursor_event_id" => runtime.cursor_event_id
|
||||
}
|
||||
end
|
||||
|
||||
defp aggregate_stats(state) do
|
||||
runtimes = Map.values(state.runtime)
|
||||
|
||||
%{
|
||||
"servers_total" => map_size(state.servers),
|
||||
"servers_enabled" => Enum.count(state.servers, fn {_id, server} -> server.enabled? end),
|
||||
"servers_running" => Enum.count(runtimes, &(&1.state == :running)),
|
||||
"servers_connected" => Enum.count(runtimes, & &1.connected?),
|
||||
"events_received" => Enum.reduce(runtimes, 0, &(&1.events_received + &2)),
|
||||
"events_accepted" => Enum.reduce(runtimes, 0, &(&1.events_accepted + &2)),
|
||||
"events_duplicate" => Enum.reduce(runtimes, 0, &(&1.events_duplicate + &2)),
|
||||
"events_rejected" => Enum.reduce(runtimes, 0, &(&1.events_rejected + &2)),
|
||||
"query_runs" => Enum.reduce(runtimes, 0, &(&1.query_runs + &2)),
|
||||
"subscription_restarts" => Enum.reduce(runtimes, 0, &(&1.subscription_restarts + &2)),
|
||||
"reconnects" => Enum.reduce(runtimes, 0, &(&1.reconnects + &2))
|
||||
}
|
||||
end
|
||||
|
||||
defp health_summary(state) do
|
||||
failing_servers =
|
||||
state.runtime
|
||||
|> Enum.flat_map(fn {server_id, runtime} ->
|
||||
if is_binary(runtime.last_error) and runtime.last_error != "" do
|
||||
[%{"id" => server_id, "reason" => runtime.last_error}]
|
||||
else
|
||||
[]
|
||||
end
|
||||
end)
|
||||
|
||||
%{
|
||||
"status" => if(failing_servers == [], do: "ok", else: "degraded"),
|
||||
"servers_total" => map_size(state.servers),
|
||||
"servers_connected" =>
|
||||
Enum.count(state.runtime, fn {_id, runtime} -> runtime.connected? end),
|
||||
"servers_failing" => failing_servers
|
||||
}
|
||||
end
|
||||
|
||||
defp apply_runtime_event(state, server_id, kind, attrs) do
|
||||
case Map.fetch(state.runtime, server_id) do
|
||||
{:ok, runtime} ->
|
||||
updated_runtime = update_runtime_for_event(runtime, kind, attrs)
|
||||
put_runtime(state, server_id, updated_runtime)
|
||||
|
||||
:error ->
|
||||
state
|
||||
end
|
||||
end
|
||||
|
||||
defp update_runtime_for_event(runtime, :connected, _attrs) do
|
||||
runtime
|
||||
|> Map.put(:state, :running)
|
||||
|> Map.put(:connected?, true)
|
||||
|> Map.put(:last_connected_at, now())
|
||||
|> Map.put(:last_error, nil)
|
||||
end
|
||||
|
||||
defp update_runtime_for_event(runtime, :disconnected, attrs) do
|
||||
reason = format_reason(Map.get(attrs, :reason))
|
||||
|
||||
runtime
|
||||
|> Map.put(:connected?, false)
|
||||
|> Map.put(:last_disconnected_at, now())
|
||||
|> Map.update!(:reconnects, &(&1 + 1))
|
||||
|> Map.put(:last_error, reason)
|
||||
end
|
||||
|
||||
defp update_runtime_for_event(runtime, :error, attrs) do
|
||||
Map.put(runtime, :last_error, format_reason(Map.get(attrs, :reason)))
|
||||
end
|
||||
|
||||
defp update_runtime_for_event(runtime, :sync_started, _attrs) do
|
||||
runtime
|
||||
|> Map.put(:last_sync_started_at, now())
|
||||
|> Map.update!(:query_runs, &(&1 + 1))
|
||||
end
|
||||
|
||||
defp update_runtime_for_event(runtime, :sync_completed, _attrs) do
|
||||
timestamp = now()
|
||||
|
||||
runtime
|
||||
|> Map.put(:last_sync_completed_at, timestamp)
|
||||
|> Map.put(:last_eose_at, timestamp)
|
||||
|> Map.put(:last_remote_eose_at, timestamp)
|
||||
end
|
||||
|
||||
defp update_runtime_for_event(runtime, :subscription_restart, _attrs) do
|
||||
Map.update!(runtime, :subscription_restarts, &(&1 + 1))
|
||||
end
|
||||
|
||||
defp update_runtime_for_event(runtime, :cursor_advanced, attrs) do
|
||||
runtime
|
||||
|> Map.put(:cursor_created_at, Map.get(attrs, :created_at))
|
||||
|> Map.put(:cursor_event_id, Map.get(attrs, :event_id))
|
||||
end
|
||||
|
||||
defp update_runtime_for_event(runtime, :event_result, attrs) do
|
||||
event = Map.get(attrs, :event, %{})
|
||||
result = Map.get(attrs, :result)
|
||||
|
||||
runtime
|
||||
|> Map.update!(:events_received, &(&1 + 1))
|
||||
|> Map.put(:last_event_received_at, now())
|
||||
|> increment_result_counter(result)
|
||||
|> maybe_put_last_error(attrs)
|
||||
|> maybe_advance_runtime_cursor(event, result)
|
||||
end
|
||||
|
||||
defp update_runtime_for_event(runtime, _kind, _attrs), do: runtime
|
||||
|
||||
defp increment_result_counter(runtime, :accepted),
|
||||
do: Map.update!(runtime, :events_accepted, &(&1 + 1))
|
||||
|
||||
defp increment_result_counter(runtime, :duplicate),
|
||||
do: Map.update!(runtime, :events_duplicate, &(&1 + 1))
|
||||
|
||||
defp increment_result_counter(runtime, :rejected),
|
||||
do: Map.update!(runtime, :events_rejected, &(&1 + 1))
|
||||
|
||||
defp increment_result_counter(runtime, _result), do: runtime
|
||||
|
||||
defp maybe_put_last_error(runtime, %{reason: nil}), do: runtime
|
||||
|
||||
defp maybe_put_last_error(runtime, attrs),
|
||||
do: Map.put(runtime, :last_error, format_reason(attrs[:reason]))
|
||||
|
||||
defp maybe_advance_runtime_cursor(runtime, event, result)
|
||||
when result in [:accepted, :duplicate] do
|
||||
created_at = Map.get(event, "created_at")
|
||||
event_id = Map.get(event, "id")
|
||||
|
||||
cond do
|
||||
not is_integer(created_at) or not is_binary(event_id) ->
|
||||
runtime
|
||||
|
||||
is_nil(runtime.cursor_created_at) ->
|
||||
runtime
|
||||
|> Map.put(:cursor_created_at, created_at)
|
||||
|> Map.put(:cursor_event_id, event_id)
|
||||
|
||||
created_at > runtime.cursor_created_at ->
|
||||
runtime
|
||||
|> Map.put(:cursor_created_at, created_at)
|
||||
|> Map.put(:cursor_event_id, event_id)
|
||||
|
||||
created_at == runtime.cursor_created_at and event_id > runtime.cursor_event_id ->
|
||||
runtime
|
||||
|> Map.put(:cursor_created_at, created_at)
|
||||
|> Map.put(:cursor_event_id, event_id)
|
||||
|
||||
true ->
|
||||
runtime
|
||||
end
|
||||
end
|
||||
|
||||
defp maybe_advance_runtime_cursor(runtime, _event, _result), do: runtime
|
||||
|
||||
defp format_reason(nil), do: nil
|
||||
defp format_reason(reason) when is_binary(reason), do: reason
|
||||
defp format_reason(reason), do: inspect(reason)
|
||||
|
||||
defp load_state(path) do
|
||||
case File.read(path) do
|
||||
{:ok, payload} ->
|
||||
case decode_persisted_state(payload, path) do
|
||||
{:ok, state} ->
|
||||
state
|
||||
|
||||
{:error, reason} ->
|
||||
Logger.warning("failed to load sync state from #{path}: #{inspect(reason)}")
|
||||
empty_state(path)
|
||||
end
|
||||
|
||||
{:error, :enoent} ->
|
||||
empty_state(path)
|
||||
|
||||
{:error, reason} ->
|
||||
Logger.warning("failed to read sync state from #{path}: #{inspect(reason)}")
|
||||
empty_state(path)
|
||||
end
|
||||
end
|
||||
|
||||
defp decode_persisted_state(payload, path) do
|
||||
with {:ok, decoded} <- JSON.decode(payload),
|
||||
{:ok, servers} <- decode_servers(Map.get(decoded, "servers", %{})),
|
||||
{:ok, runtime} <- decode_runtime(Map.get(decoded, "runtime", %{}), servers) do
|
||||
{:ok, %{path: path, servers: servers, runtime: runtime}}
|
||||
end
|
||||
end
|
||||
|
||||
defp decode_servers(servers) when is_map(servers) do
|
||||
Enum.reduce_while(servers, {:ok, %{}}, fn {_id, server_payload}, {:ok, acc} ->
|
||||
case normalize_server(server_payload) do
|
||||
{:ok, server} -> {:cont, {:ok, Map.put(acc, server.id, server)}}
|
||||
{:error, reason} -> {:halt, {:error, reason}}
|
||||
end
|
||||
end)
|
||||
end
|
||||
|
||||
defp decode_servers(_servers), do: {:error, :invalid_servers_state}
|
||||
|
||||
defp decode_runtime(runtime_payload, servers)
|
||||
when is_map(runtime_payload) and is_map(servers) do
|
||||
runtime =
|
||||
Enum.reduce(servers, %{}, fn {server_id, server}, acc ->
|
||||
decoded_runtime =
|
||||
runtime_payload
|
||||
|> Map.get(server_id)
|
||||
|> normalize_runtime(server)
|
||||
|
||||
Map.put(acc, server_id, decoded_runtime)
|
||||
end)
|
||||
|
||||
{:ok, runtime}
|
||||
end
|
||||
|
||||
defp decode_runtime(_runtime_payload, _servers), do: {:error, :invalid_runtime_state}
|
||||
|
||||
defp normalize_runtime(nil, server), do: default_runtime(server)
|
||||
|
||||
defp normalize_runtime(runtime, server) when is_map(runtime) do
|
||||
%{
|
||||
server_id: server.id,
|
||||
state: normalize_runtime_state(fetch_value(runtime, :state)),
|
||||
connected?: fetch_boolean(runtime, :connected?) || false,
|
||||
last_connected_at: fetch_string_or_nil(runtime, :last_connected_at),
|
||||
last_disconnected_at: fetch_string_or_nil(runtime, :last_disconnected_at),
|
||||
last_sync_started_at: fetch_string_or_nil(runtime, :last_sync_started_at),
|
||||
last_sync_completed_at: fetch_string_or_nil(runtime, :last_sync_completed_at),
|
||||
last_event_received_at: fetch_string_or_nil(runtime, :last_event_received_at),
|
||||
last_eose_at: fetch_string_or_nil(runtime, :last_eose_at),
|
||||
reconnect_attempts: fetch_non_neg_integer(runtime, :reconnect_attempts),
|
||||
last_error: fetch_string_or_nil(runtime, :last_error),
|
||||
events_received: fetch_non_neg_integer(runtime, :events_received),
|
||||
events_accepted: fetch_non_neg_integer(runtime, :events_accepted),
|
||||
events_duplicate: fetch_non_neg_integer(runtime, :events_duplicate),
|
||||
events_rejected: fetch_non_neg_integer(runtime, :events_rejected),
|
||||
query_runs: fetch_non_neg_integer(runtime, :query_runs),
|
||||
subscription_restarts: fetch_non_neg_integer(runtime, :subscription_restarts),
|
||||
reconnects: fetch_non_neg_integer(runtime, :reconnects),
|
||||
last_remote_eose_at: fetch_string_or_nil(runtime, :last_remote_eose_at),
|
||||
cursor_created_at: fetch_optional_integer(runtime, :cursor_created_at),
|
||||
cursor_event_id: fetch_string_or_nil(runtime, :cursor_event_id)
|
||||
}
|
||||
end
|
||||
|
||||
defp normalize_runtime(_runtime, server), do: default_runtime(server)
|
||||
|
||||
defp persist_state(%{path: path} = state) do
|
||||
temp_path = path <> ".tmp"
|
||||
|
||||
with :ok <- File.mkdir_p(Path.dirname(path)),
|
||||
:ok <- File.write(temp_path, JSON.encode!(encode_state(state))),
|
||||
:ok <- File.rename(temp_path, path) do
|
||||
:ok
|
||||
else
|
||||
{:error, reason} ->
|
||||
_ = File.rm(temp_path)
|
||||
{:error, reason}
|
||||
end
|
||||
end
|
||||
|
||||
defp encode_state(state) do
|
||||
%{
|
||||
"version" => 2,
|
||||
"servers" =>
|
||||
Map.new(state.servers, fn {server_id, server} -> {server_id, encode_server(server)} end),
|
||||
"runtime" =>
|
||||
Map.new(state.runtime, fn {server_id, runtime} -> {server_id, encode_runtime(runtime)} end)
|
||||
}
|
||||
end
|
||||
|
||||
defp encode_server(server) do
|
||||
%{
|
||||
"id" => server.id,
|
||||
"url" => server.url,
|
||||
"enabled?" => server.enabled?,
|
||||
"auth_pubkey" => server.auth_pubkey,
|
||||
"filters" => server.filters,
|
||||
"mode" => Atom.to_string(server.mode),
|
||||
"overlap_window_seconds" => server.overlap_window_seconds,
|
||||
"auth" => %{"type" => Atom.to_string(server.auth.type)},
|
||||
"tls" => %{
|
||||
"mode" => Atom.to_string(server.tls.mode),
|
||||
"hostname" => server.tls.hostname,
|
||||
"pins" =>
|
||||
Enum.map(server.tls.pins, fn pin ->
|
||||
%{
|
||||
"type" => Atom.to_string(pin.type),
|
||||
"value" => pin.value
|
||||
}
|
||||
end)
|
||||
},
|
||||
"metadata" => server.metadata
|
||||
}
|
||||
end
|
||||
|
||||
defp encode_runtime(runtime) do
|
||||
%{
|
||||
"server_id" => runtime.server_id,
|
||||
"state" => Atom.to_string(runtime.state),
|
||||
"connected?" => runtime.connected?,
|
||||
"last_connected_at" => runtime.last_connected_at,
|
||||
"last_disconnected_at" => runtime.last_disconnected_at,
|
||||
"last_sync_started_at" => runtime.last_sync_started_at,
|
||||
"last_sync_completed_at" => runtime.last_sync_completed_at,
|
||||
"last_event_received_at" => runtime.last_event_received_at,
|
||||
"last_eose_at" => runtime.last_eose_at,
|
||||
"reconnect_attempts" => runtime.reconnect_attempts,
|
||||
"last_error" => runtime.last_error,
|
||||
"events_received" => runtime.events_received,
|
||||
"events_accepted" => runtime.events_accepted,
|
||||
"events_duplicate" => runtime.events_duplicate,
|
||||
"events_rejected" => runtime.events_rejected,
|
||||
"query_runs" => runtime.query_runs,
|
||||
"subscription_restarts" => runtime.subscription_restarts,
|
||||
"reconnects" => runtime.reconnects,
|
||||
"last_remote_eose_at" => runtime.last_remote_eose_at,
|
||||
"cursor_created_at" => runtime.cursor_created_at,
|
||||
"cursor_event_id" => runtime.cursor_event_id
|
||||
}
|
||||
end
|
||||
|
||||
defp empty_state(path) do
|
||||
%{path: path, servers: %{}, runtime: %{}}
|
||||
end
|
||||
|
||||
defp default_runtime(server) do
|
||||
%{
|
||||
server_id: server.id,
|
||||
state: if(server.enabled?, do: :running, else: :stopped),
|
||||
connected?: false,
|
||||
last_connected_at: nil,
|
||||
last_disconnected_at: nil,
|
||||
last_sync_started_at: nil,
|
||||
last_sync_completed_at: nil,
|
||||
last_event_received_at: nil,
|
||||
last_eose_at: nil,
|
||||
reconnect_attempts: 0,
|
||||
last_error: nil,
|
||||
events_received: 0,
|
||||
events_accepted: 0,
|
||||
events_duplicate: 0,
|
||||
events_rejected: 0,
|
||||
query_runs: 0,
|
||||
subscription_restarts: 0,
|
||||
reconnects: 0,
|
||||
last_remote_eose_at: nil,
|
||||
cursor_created_at: nil,
|
||||
cursor_event_id: nil
|
||||
}
|
||||
end
|
||||
|
||||
defp normalize_server(server) when is_map(server) do
|
||||
with {:ok, id} <- normalize_non_empty_string(fetch_value(server, :id), :invalid_server_id),
|
||||
{:ok, {url, host, scheme}} <- normalize_url(fetch_value(server, :url)),
|
||||
{:ok, enabled?} <- normalize_boolean(fetch_value(server, :enabled?), true),
|
||||
{:ok, auth_pubkey} <- normalize_pubkey(fetch_value(server, :auth_pubkey)),
|
||||
{:ok, filters} <- normalize_filters(fetch_value(server, :filters)),
|
||||
{:ok, mode} <- normalize_mode(fetch_value(server, :mode)),
|
||||
{:ok, overlap_window_seconds} <-
|
||||
normalize_overlap_window(fetch_value(server, :overlap_window_seconds)),
|
||||
{:ok, auth} <- normalize_auth(fetch_value(server, :auth)),
|
||||
{:ok, tls} <- normalize_tls(fetch_value(server, :tls), host, scheme),
|
||||
{:ok, metadata} <- normalize_metadata(fetch_value(server, :metadata)) do
|
||||
{:ok,
|
||||
%{
|
||||
id: id,
|
||||
url: url,
|
||||
enabled?: enabled?,
|
||||
auth_pubkey: auth_pubkey,
|
||||
filters: filters,
|
||||
mode: mode,
|
||||
overlap_window_seconds: overlap_window_seconds,
|
||||
auth: auth,
|
||||
tls: tls,
|
||||
metadata: metadata
|
||||
}}
|
||||
end
|
||||
end
|
||||
|
||||
defp normalize_server(_server), do: {:error, :invalid_server}
|
||||
|
||||
defp normalize_url(url) when is_binary(url) and url != "" do
|
||||
uri = URI.parse(url)
|
||||
|
||||
if uri.scheme in ["ws", "wss"] and is_binary(uri.host) and uri.host != "" do
|
||||
{:ok, {URI.to_string(uri), uri.host, uri.scheme}}
|
||||
else
|
||||
{:error, :invalid_url}
|
||||
end
|
||||
end
|
||||
|
||||
defp normalize_url(_url), do: {:error, :invalid_url}
|
||||
|
||||
defp normalize_pubkey(pubkey) when is_binary(pubkey) do
|
||||
normalized = String.downcase(pubkey)
|
||||
|
||||
if String.match?(normalized, @hex64) do
|
||||
{:ok, normalized}
|
||||
else
|
||||
{:error, :invalid_auth_pubkey}
|
||||
end
|
||||
end
|
||||
|
||||
defp normalize_pubkey(_pubkey), do: {:error, :invalid_auth_pubkey}
|
||||
|
||||
defp normalize_filters(filters) when is_list(filters) do
|
||||
normalized_filters = Enum.map(filters, &normalize_filter_map/1)
|
||||
|
||||
with :ok <- Filter.validate_filters(normalized_filters) do
|
||||
{:ok, normalized_filters}
|
||||
end
|
||||
end
|
||||
|
||||
defp normalize_filters(_filters), do: {:error, :invalid_filters}
|
||||
|
||||
defp normalize_mode(nil), do: {:ok, @default_mode}
|
||||
defp normalize_mode(:req_stream), do: {:ok, :req_stream}
|
||||
defp normalize_mode("req_stream"), do: {:ok, :req_stream}
|
||||
defp normalize_mode(_mode), do: {:error, :invalid_mode}
|
||||
|
||||
defp normalize_overlap_window(nil), do: {:ok, @default_overlap_window_seconds}
|
||||
|
||||
defp normalize_overlap_window(seconds) when is_integer(seconds) and seconds >= 0,
|
||||
do: {:ok, seconds}
|
||||
|
||||
defp normalize_overlap_window(_seconds), do: {:error, :invalid_overlap_window_seconds}
|
||||
|
||||
defp normalize_auth(nil), do: {:ok, %{type: @default_auth_type}}
|
||||
|
||||
defp normalize_auth(auth) when is_map(auth) do
|
||||
with {:ok, type} <- normalize_auth_type(fetch_value(auth, :type)) do
|
||||
{:ok, %{type: type}}
|
||||
end
|
||||
end
|
||||
|
||||
defp normalize_auth(_auth), do: {:error, :invalid_auth}
|
||||
|
||||
defp normalize_auth_type(nil), do: {:ok, @default_auth_type}
|
||||
defp normalize_auth_type(:nip42), do: {:ok, :nip42}
|
||||
defp normalize_auth_type("nip42"), do: {:ok, :nip42}
|
||||
defp normalize_auth_type(_type), do: {:error, :invalid_auth_type}
|
||||
|
||||
defp normalize_tls(tls, host, scheme) when is_map(tls) do
|
||||
with {:ok, mode} <- normalize_tls_mode(fetch_value(tls, :mode)),
|
||||
:ok <- validate_tls_mode_against_scheme(mode, scheme),
|
||||
{:ok, hostname} <- normalize_hostname(fetch_value(tls, :hostname) || host),
|
||||
{:ok, pins} <- normalize_tls_pins(mode, fetch_value(tls, :pins)) do
|
||||
{:ok, %{mode: mode, hostname: hostname, pins: pins}}
|
||||
end
|
||||
end
|
||||
|
||||
defp normalize_tls(_tls, _host, _scheme), do: {:error, :invalid_tls}
|
||||
|
||||
defp normalize_tls_mode(nil), do: {:ok, @default_tls_mode}
|
||||
defp normalize_tls_mode(:required), do: {:ok, :required}
|
||||
defp normalize_tls_mode("required"), do: {:ok, :required}
|
||||
defp normalize_tls_mode(:disabled), do: {:ok, :disabled}
|
||||
defp normalize_tls_mode("disabled"), do: {:ok, :disabled}
|
||||
defp normalize_tls_mode(_mode), do: {:error, :invalid_tls_mode}
|
||||
|
||||
defp validate_tls_mode_against_scheme(:required, "wss"), do: :ok
|
||||
defp validate_tls_mode_against_scheme(:required, _scheme), do: {:error, :invalid_url}
|
||||
defp validate_tls_mode_against_scheme(:disabled, _scheme), do: :ok
|
||||
|
||||
defp normalize_hostname(hostname) when is_binary(hostname) and hostname != "",
|
||||
do: {:ok, hostname}
|
||||
|
||||
defp normalize_hostname(_hostname), do: {:error, :invalid_tls_hostname}
|
||||
|
||||
defp normalize_tls_pins(:disabled, nil), do: {:ok, []}
|
||||
defp normalize_tls_pins(:disabled, pins) when is_list(pins), do: {:ok, []}
|
||||
|
||||
defp normalize_tls_pins(:required, pins) when is_list(pins) and pins != [] do
|
||||
Enum.reduce_while(pins, {:ok, []}, fn pin, {:ok, acc} ->
|
||||
case normalize_tls_pin(pin) do
|
||||
{:ok, normalized_pin} -> {:cont, {:ok, [normalized_pin | acc]}}
|
||||
{:error, reason} -> {:halt, {:error, reason}}
|
||||
end
|
||||
end)
|
||||
|> case do
|
||||
{:ok, normalized_pins} -> {:ok, Enum.reverse(normalized_pins)}
|
||||
error -> error
|
||||
end
|
||||
end
|
||||
|
||||
defp normalize_tls_pins(:required, _pins), do: {:error, :invalid_tls_pins}
|
||||
|
||||
defp normalize_tls_pin(pin) when is_map(pin) do
|
||||
with {:ok, type} <- normalize_tls_pin_type(fetch_value(pin, :type)),
|
||||
{:ok, value} <- normalize_non_empty_string(fetch_value(pin, :value), :invalid_tls_pin) do
|
||||
{:ok, %{type: type, value: value}}
|
||||
end
|
||||
end
|
||||
|
||||
defp normalize_tls_pin(_pin), do: {:error, :invalid_tls_pin}
|
||||
|
||||
defp normalize_tls_pin_type(:spki_sha256), do: {:ok, :spki_sha256}
|
||||
defp normalize_tls_pin_type("spki_sha256"), do: {:ok, :spki_sha256}
|
||||
defp normalize_tls_pin_type(_type), do: {:error, :invalid_tls_pin}
|
||||
|
||||
defp normalize_metadata(nil), do: {:ok, %{}}
|
||||
defp normalize_metadata(metadata) when is_map(metadata), do: {:ok, metadata}
|
||||
defp normalize_metadata(_metadata), do: {:error, :invalid_metadata}
|
||||
|
||||
defp normalize_boolean(nil, default), do: {:ok, default}
|
||||
defp normalize_boolean(value, _default) when is_boolean(value), do: {:ok, value}
|
||||
defp normalize_boolean(_value, _default), do: {:error, :invalid_enabled_flag}
|
||||
|
||||
defp normalize_non_empty_string(value, _reason) when is_binary(value) and value != "",
|
||||
do: {:ok, value}
|
||||
|
||||
defp normalize_non_empty_string(_value, reason), do: {:error, reason}
|
||||
|
||||
defp normalize_filter_map(filter) when is_map(filter) do
|
||||
Map.new(filter, fn
|
||||
{key, value} when is_atom(key) -> {Atom.to_string(key), value}
|
||||
{key, value} -> {key, value}
|
||||
end)
|
||||
end
|
||||
|
||||
defp normalize_filter_map(filter), do: filter
|
||||
|
||||
defp normalize_runtime_state("running"), do: :running
|
||||
defp normalize_runtime_state(:running), do: :running
|
||||
defp normalize_runtime_state("stopped"), do: :stopped
|
||||
defp normalize_runtime_state(:stopped), do: :stopped
|
||||
defp normalize_runtime_state(_state), do: :stopped
|
||||
|
||||
defp fetch_non_neg_integer(map, key) do
|
||||
case fetch_value(map, key) do
|
||||
value when is_integer(value) and value >= 0 -> value
|
||||
_other -> 0
|
||||
end
|
||||
end
|
||||
|
||||
defp fetch_optional_integer(map, key) do
|
||||
case fetch_value(map, key) do
|
||||
value when is_integer(value) and value >= 0 -> value
|
||||
_other -> nil
|
||||
end
|
||||
end
|
||||
|
||||
defp fetch_boolean(map, key) do
|
||||
case fetch_value(map, key) do
|
||||
value when is_boolean(value) -> value
|
||||
_other -> nil
|
||||
end
|
||||
end
|
||||
|
||||
defp fetch_string_or_nil(map, key) do
|
||||
case fetch_value(map, key) do
|
||||
value when is_binary(value) and value != "" -> value
|
||||
_other -> nil
|
||||
end
|
||||
end
|
||||
|
||||
defp fetch_value(map, key) when is_map(map) do
|
||||
Map.get(map, key) || Map.get(map, Atom.to_string(key))
|
||||
end
|
||||
|
||||
defp config_path do
|
||||
config_value(:path)
|
||||
end
|
||||
|
||||
defp config_value(key, default \\ nil) do
|
||||
:parrhesia
|
||||
|> Application.get_env(:sync, [])
|
||||
|> Keyword.get(key, default)
|
||||
end
|
||||
|
||||
defp now do
|
||||
DateTime.utc_now()
|
||||
|> DateTime.truncate(:second)
|
||||
|> DateTime.to_iso8601()
|
||||
end
|
||||
end
|
||||
@@ -5,19 +5,6 @@ defmodule Parrhesia.Application do
|
||||
|
||||
@impl true
|
||||
def start(_type, _args) do
|
||||
children = [
|
||||
Parrhesia.Telemetry,
|
||||
Parrhesia.Config,
|
||||
Parrhesia.Storage.Supervisor,
|
||||
Parrhesia.Subscriptions.Supervisor,
|
||||
Parrhesia.Auth.Supervisor,
|
||||
Parrhesia.Policy.Supervisor,
|
||||
Parrhesia.Web.Endpoint,
|
||||
Parrhesia.Web.MetricsEndpoint,
|
||||
Parrhesia.Tasks.Supervisor
|
||||
]
|
||||
|
||||
opts = [strategy: :one_for_one, name: Parrhesia.Supervisor]
|
||||
Supervisor.start_link(children, opts)
|
||||
Parrhesia.Runtime.start_link(name: Parrhesia.Supervisor)
|
||||
end
|
||||
end
|
||||
|
||||
@@ -3,20 +3,29 @@ defmodule Parrhesia.Auth.Nip98 do
|
||||
Minimal NIP-98 HTTP auth validation.
|
||||
"""
|
||||
|
||||
alias Parrhesia.Auth.Nip98ReplayCache
|
||||
alias Parrhesia.Protocol.EventValidator
|
||||
|
||||
@max_age_seconds 60
|
||||
|
||||
@spec validate_authorization_header(String.t() | nil, String.t(), String.t()) ::
|
||||
{:ok, map()} | {:error, atom()}
|
||||
def validate_authorization_header(nil, _method, _url), do: {:error, :missing_authorization}
|
||||
def validate_authorization_header(authorization, method, url) do
|
||||
validate_authorization_header(authorization, method, url, [])
|
||||
end
|
||||
|
||||
def validate_authorization_header("Nostr " <> encoded_event, method, url)
|
||||
when is_binary(method) and is_binary(url) do
|
||||
@spec validate_authorization_header(String.t() | nil, String.t(), String.t(), keyword()) ::
|
||||
{:ok, map()} | {:error, atom()}
|
||||
def validate_authorization_header(nil, _method, _url, _opts),
|
||||
do: {:error, :missing_authorization}
|
||||
|
||||
def validate_authorization_header("Nostr " <> encoded_event, method, url, opts)
|
||||
when is_binary(method) and is_binary(url) and is_list(opts) do
|
||||
with {:ok, event_json} <- decode_base64(encoded_event),
|
||||
{:ok, event} <- JSON.decode(event_json),
|
||||
:ok <- validate_event_shape(event),
|
||||
:ok <- validate_http_binding(event, method, url) do
|
||||
:ok <- validate_event_shape(event, opts),
|
||||
:ok <- validate_http_binding(event, method, url),
|
||||
:ok <- consume_replay_token(event, opts) do
|
||||
{:ok, event}
|
||||
else
|
||||
{:error, reason} -> {:error, reason}
|
||||
@@ -24,7 +33,8 @@ defmodule Parrhesia.Auth.Nip98 do
|
||||
end
|
||||
end
|
||||
|
||||
def validate_authorization_header(_header, _method, _url), do: {:error, :invalid_authorization}
|
||||
def validate_authorization_header(_header, _method, _url, _opts),
|
||||
do: {:error, :invalid_authorization}
|
||||
|
||||
defp decode_base64(encoded_event) do
|
||||
case Base.decode64(encoded_event) do
|
||||
@@ -33,33 +43,35 @@ defmodule Parrhesia.Auth.Nip98 do
|
||||
end
|
||||
end
|
||||
|
||||
defp validate_event_shape(event) when is_map(event) do
|
||||
defp validate_event_shape(event, opts) when is_map(event) do
|
||||
with :ok <- EventValidator.validate(event),
|
||||
:ok <- validate_kind(event),
|
||||
:ok <- validate_fresh_created_at(event) do
|
||||
:ok <- validate_fresh_created_at(event, opts) do
|
||||
:ok
|
||||
else
|
||||
:ok -> :ok
|
||||
{:error, :stale_event} -> {:error, :stale_event}
|
||||
{:error, _reason} -> {:error, :invalid_event}
|
||||
end
|
||||
end
|
||||
|
||||
defp validate_event_shape(_event), do: {:error, :invalid_event}
|
||||
defp validate_event_shape(_event, _opts), do: {:error, :invalid_event}
|
||||
|
||||
defp validate_kind(%{"kind" => 27_235}), do: :ok
|
||||
defp validate_kind(_event), do: {:error, :invalid_event}
|
||||
|
||||
defp validate_fresh_created_at(%{"created_at" => created_at}) when is_integer(created_at) do
|
||||
defp validate_fresh_created_at(%{"created_at" => created_at}, opts)
|
||||
when is_integer(created_at) do
|
||||
now = System.system_time(:second)
|
||||
max_age_seconds = Keyword.get(opts, :max_age_seconds, @max_age_seconds)
|
||||
|
||||
if abs(now - created_at) <= @max_age_seconds do
|
||||
if abs(now - created_at) <= max_age_seconds do
|
||||
:ok
|
||||
else
|
||||
{:error, :stale_event}
|
||||
end
|
||||
end
|
||||
|
||||
defp validate_fresh_created_at(_event), do: {:error, :invalid_event}
|
||||
defp validate_fresh_created_at(_event, _opts), do: {:error, :invalid_event}
|
||||
|
||||
defp validate_http_binding(event, method, url) do
|
||||
tags = Map.get(event, "tags", [])
|
||||
@@ -85,4 +97,14 @@ defmodule Parrhesia.Auth.Nip98 do
|
||||
true -> :ok
|
||||
end
|
||||
end
|
||||
|
||||
defp consume_replay_token(%{"id" => event_id, "created_at" => created_at}, opts)
|
||||
when is_binary(event_id) and is_integer(created_at) do
|
||||
case Keyword.get(opts, :replay_cache, Nip98ReplayCache) do
|
||||
nil -> :ok
|
||||
replay_cache -> Nip98ReplayCache.consume(replay_cache, event_id, created_at, opts)
|
||||
end
|
||||
end
|
||||
|
||||
defp consume_replay_token(_event, _opts), do: {:error, :invalid_event}
|
||||
end
|
||||
|
||||
56
lib/parrhesia/auth/nip98_replay_cache.ex
Normal file
56
lib/parrhesia/auth/nip98_replay_cache.ex
Normal file
@@ -0,0 +1,56 @@
|
||||
defmodule Parrhesia.Auth.Nip98ReplayCache do
|
||||
@moduledoc """
|
||||
Tracks recently accepted NIP-98 auth event ids to prevent replay.
|
||||
"""
|
||||
|
||||
use GenServer
|
||||
|
||||
@default_max_age_seconds 60
|
||||
|
||||
@spec start_link(keyword()) :: GenServer.on_start()
|
||||
def start_link(opts \\ []) do
|
||||
case Keyword.get(opts, :name, __MODULE__) do
|
||||
nil -> GenServer.start_link(__MODULE__, opts)
|
||||
name -> GenServer.start_link(__MODULE__, opts, name: name)
|
||||
end
|
||||
end
|
||||
|
||||
@spec consume(GenServer.server(), String.t(), integer(), keyword()) ::
|
||||
:ok | {:error, :replayed_auth_event}
|
||||
def consume(server \\ __MODULE__, event_id, created_at, opts \\ [])
|
||||
when is_binary(event_id) and is_integer(created_at) and is_list(opts) do
|
||||
GenServer.call(server, {:consume, event_id, created_at, opts})
|
||||
end
|
||||
|
||||
@impl true
|
||||
def init(_opts) do
|
||||
{:ok, %{entries: %{}}}
|
||||
end
|
||||
|
||||
@impl true
|
||||
def handle_call({:consume, event_id, created_at, opts}, _from, state) do
|
||||
now_ms = System.monotonic_time(:millisecond)
|
||||
entries = prune_expired(state.entries, now_ms)
|
||||
|
||||
case Map.has_key?(entries, event_id) do
|
||||
true ->
|
||||
{:reply, {:error, :replayed_auth_event}, %{state | entries: entries}}
|
||||
|
||||
false ->
|
||||
expires_at_ms = replay_expiration_ms(now_ms, created_at, opts)
|
||||
next_entries = Map.put(entries, event_id, expires_at_ms)
|
||||
{:reply, :ok, %{state | entries: next_entries}}
|
||||
end
|
||||
end
|
||||
|
||||
defp prune_expired(entries, now_ms) do
|
||||
Map.reject(entries, fn {_event_id, expires_at_ms} -> expires_at_ms <= now_ms end)
|
||||
end
|
||||
|
||||
defp replay_expiration_ms(now_ms, created_at, opts) do
|
||||
max_age_seconds = Keyword.get(opts, :max_age_seconds, max_age_seconds())
|
||||
max(now_ms, created_at * 1000) + max_age_seconds * 1000
|
||||
end
|
||||
|
||||
defp max_age_seconds, do: @default_max_age_seconds
|
||||
end
|
||||
@@ -12,7 +12,9 @@ defmodule Parrhesia.Auth.Supervisor do
|
||||
@impl true
|
||||
def init(_init_arg) do
|
||||
children = [
|
||||
{Parrhesia.Auth.Challenges, name: Parrhesia.Auth.Challenges}
|
||||
{Parrhesia.Auth.Challenges, name: Parrhesia.Auth.Challenges},
|
||||
{Parrhesia.Auth.Nip98ReplayCache, name: Parrhesia.Auth.Nip98ReplayCache},
|
||||
{Parrhesia.API.Identity.Manager, []}
|
||||
]
|
||||
|
||||
Supervisor.init(children, strategy: :one_for_one)
|
||||
|
||||
@@ -1,6 +1,9 @@
|
||||
defmodule Parrhesia.Config do
|
||||
@moduledoc """
|
||||
Runtime configuration cache backed by ETS.
|
||||
|
||||
The application environment is copied into ETS at startup so hot-path reads do not need to
|
||||
traverse the application environment repeatedly.
|
||||
"""
|
||||
|
||||
use GenServer
|
||||
@@ -8,6 +11,9 @@ defmodule Parrhesia.Config do
|
||||
@table __MODULE__
|
||||
@root_key :config
|
||||
|
||||
@doc """
|
||||
Starts the config cache server.
|
||||
"""
|
||||
def start_link(init_arg \\ []) do
|
||||
GenServer.start_link(__MODULE__, init_arg, name: __MODULE__)
|
||||
end
|
||||
@@ -26,6 +32,9 @@ defmodule Parrhesia.Config do
|
||||
{:ok, %{}}
|
||||
end
|
||||
|
||||
@doc """
|
||||
Returns the cached top-level Parrhesia application config.
|
||||
"""
|
||||
@spec all() :: map() | keyword()
|
||||
def all do
|
||||
case :ets.lookup(@table, @root_key) do
|
||||
@@ -34,6 +43,11 @@ defmodule Parrhesia.Config do
|
||||
end
|
||||
end
|
||||
|
||||
@doc """
|
||||
Reads a nested config value by path.
|
||||
|
||||
The path may traverse maps or keyword lists. Missing paths return `default`.
|
||||
"""
|
||||
@spec get([atom()], term()) :: term()
|
||||
def get(path, default \\ nil) when is_list(path) do
|
||||
case fetch(path) do
|
||||
|
||||
89
lib/parrhesia/connection_stats.ex
Normal file
89
lib/parrhesia/connection_stats.ex
Normal file
@@ -0,0 +1,89 @@
|
||||
defmodule Parrhesia.ConnectionStats do
|
||||
@moduledoc """
|
||||
Per-listener connection and subscription counters.
|
||||
|
||||
Tracks active connection and subscription counts per listener and emits
|
||||
`[:parrhesia, :listener, :population]` telemetry events on each change.
|
||||
"""
|
||||
|
||||
use GenServer
|
||||
|
||||
alias Parrhesia.Telemetry
|
||||
|
||||
defstruct connections: %{}, subscriptions: %{}
|
||||
|
||||
@type state :: %__MODULE__{
|
||||
connections: %{(atom() | String.t()) => non_neg_integer()},
|
||||
subscriptions: %{(atom() | String.t()) => non_neg_integer()}
|
||||
}
|
||||
|
||||
@spec start_link(keyword()) :: GenServer.on_start()
|
||||
def start_link(opts \\ []) do
|
||||
name = Keyword.get(opts, :name, __MODULE__)
|
||||
GenServer.start_link(__MODULE__, %__MODULE__{}, name: name)
|
||||
end
|
||||
|
||||
@spec connection_open(atom() | String.t()) :: :ok
|
||||
def connection_open(listener_id), do: cast({:connection_open, listener_id})
|
||||
|
||||
@spec connection_close(atom() | String.t()) :: :ok
|
||||
def connection_close(listener_id), do: cast({:connection_close, listener_id})
|
||||
|
||||
@spec subscriptions_change(atom() | String.t(), integer()) :: :ok
|
||||
def subscriptions_change(listener_id, delta) when is_integer(delta) do
|
||||
cast({:subscriptions_change, listener_id, delta})
|
||||
end
|
||||
|
||||
@impl true
|
||||
def init(%__MODULE__{} = state), do: {:ok, state}
|
||||
|
||||
@impl true
|
||||
def handle_cast({:connection_open, listener_id}, %__MODULE__{} = state) do
|
||||
listener_id = normalize_listener_id(listener_id)
|
||||
next_state = %{state | connections: increment(state.connections, listener_id, 1)}
|
||||
emit_population(listener_id, next_state)
|
||||
{:noreply, next_state}
|
||||
end
|
||||
|
||||
def handle_cast({:connection_close, listener_id}, %__MODULE__{} = state) do
|
||||
listener_id = normalize_listener_id(listener_id)
|
||||
next_state = %{state | connections: increment(state.connections, listener_id, -1)}
|
||||
emit_population(listener_id, next_state)
|
||||
{:noreply, next_state}
|
||||
end
|
||||
|
||||
def handle_cast({:subscriptions_change, listener_id, delta}, %__MODULE__{} = state) do
|
||||
listener_id = normalize_listener_id(listener_id)
|
||||
next_state = %{state | subscriptions: increment(state.subscriptions, listener_id, delta)}
|
||||
emit_population(listener_id, next_state)
|
||||
{:noreply, next_state}
|
||||
end
|
||||
|
||||
defp cast(message) do
|
||||
GenServer.cast(__MODULE__, message)
|
||||
:ok
|
||||
catch
|
||||
:exit, {:noproc, _details} -> :ok
|
||||
:exit, {:normal, _details} -> :ok
|
||||
end
|
||||
|
||||
defp increment(counts, key, delta) do
|
||||
current = Map.get(counts, key, 0)
|
||||
Map.put(counts, key, max(current + delta, 0))
|
||||
end
|
||||
|
||||
defp emit_population(listener_id, %__MODULE__{} = state) do
|
||||
Telemetry.emit(
|
||||
[:parrhesia, :listener, :population],
|
||||
%{
|
||||
connections: Map.get(state.connections, listener_id, 0),
|
||||
subscriptions: Map.get(state.subscriptions, listener_id, 0)
|
||||
},
|
||||
%{listener_id: listener_id}
|
||||
)
|
||||
end
|
||||
|
||||
defp normalize_listener_id(listener_id) when is_atom(listener_id), do: listener_id
|
||||
defp normalize_listener_id(listener_id) when is_binary(listener_id), do: listener_id
|
||||
defp normalize_listener_id(_listener_id), do: :unknown
|
||||
end
|
||||
46
lib/parrhesia/fanout/dispatcher.ex
Normal file
46
lib/parrhesia/fanout/dispatcher.ex
Normal file
@@ -0,0 +1,46 @@
|
||||
defmodule Parrhesia.Fanout.Dispatcher do
|
||||
@moduledoc """
|
||||
Asynchronous local fanout dispatcher.
|
||||
"""
|
||||
|
||||
use GenServer
|
||||
|
||||
alias Parrhesia.Subscriptions.Index
|
||||
|
||||
@spec start_link(keyword()) :: GenServer.on_start()
|
||||
def start_link(opts \\ []) do
|
||||
name = Keyword.get(opts, :name, __MODULE__)
|
||||
GenServer.start_link(__MODULE__, :ok, name: name)
|
||||
end
|
||||
|
||||
@spec dispatch(map()) :: :ok
|
||||
def dispatch(event), do: dispatch(__MODULE__, event)
|
||||
|
||||
@spec dispatch(GenServer.server(), map()) :: :ok
|
||||
def dispatch(server, event) when is_map(event) do
|
||||
GenServer.cast(server, {:dispatch, event})
|
||||
end
|
||||
|
||||
@impl true
|
||||
def init(:ok), do: {:ok, %{}}
|
||||
|
||||
@impl true
|
||||
def handle_cast({:dispatch, event}, state) do
|
||||
dispatch_to_candidates(event)
|
||||
{:noreply, state}
|
||||
end
|
||||
|
||||
defp dispatch_to_candidates(event) do
|
||||
case Index.candidate_subscription_keys(event) do
|
||||
candidates when is_list(candidates) ->
|
||||
Enum.each(candidates, fn {owner_pid, subscription_id} ->
|
||||
send(owner_pid, {:fanout_event, subscription_id, event})
|
||||
end)
|
||||
|
||||
_other ->
|
||||
:ok
|
||||
end
|
||||
catch
|
||||
:exit, _reason -> :ok
|
||||
end
|
||||
end
|
||||
@@ -5,7 +5,7 @@ defmodule Parrhesia.Fanout.MultiNode do
|
||||
|
||||
use GenServer
|
||||
|
||||
alias Parrhesia.Subscriptions.Index
|
||||
alias Parrhesia.Fanout.Dispatcher
|
||||
|
||||
@group __MODULE__
|
||||
|
||||
@@ -44,11 +44,7 @@ defmodule Parrhesia.Fanout.MultiNode do
|
||||
|
||||
@impl true
|
||||
def handle_info({:remote_fanout_event, event}, state) do
|
||||
Index.candidate_subscription_keys(event)
|
||||
|> Enum.each(fn {owner_pid, subscription_id} ->
|
||||
send(owner_pid, {:fanout_event, subscription_id, event})
|
||||
end)
|
||||
|
||||
Dispatcher.dispatch(event)
|
||||
{:noreply, state}
|
||||
end
|
||||
|
||||
|
||||
@@ -1,52 +1,62 @@
|
||||
defmodule Parrhesia.Groups.Flow do
|
||||
@moduledoc """
|
||||
Minimal group and membership flow handling for NIP-29/NIP-43 related kinds.
|
||||
Relay access membership projection backed by the shared group storage adapter.
|
||||
"""
|
||||
|
||||
alias Parrhesia.Storage
|
||||
|
||||
@membership_request_kind 8_000
|
||||
@membership_approval_kind 8_001
|
||||
@relay_metadata_kind 28_934
|
||||
@relay_admins_kind 28_935
|
||||
@relay_rules_kind 28_936
|
||||
@membership_event_kind 13_534
|
||||
@relay_access_group_id "__relay_access__"
|
||||
@add_user_kind 8_000
|
||||
@remove_user_kind 8_001
|
||||
@join_request_kind 28_934
|
||||
@invite_request_kind 28_935
|
||||
@leave_request_kind 28_936
|
||||
@membership_list_kind 13_534
|
||||
|
||||
@spec handle_event(map()) :: :ok | {:error, term()}
|
||||
def handle_event(event) when is_map(event) do
|
||||
case Map.get(event, "kind") do
|
||||
@membership_request_kind -> upsert_membership(event, "requested")
|
||||
@membership_approval_kind -> upsert_membership(event, "member")
|
||||
@membership_event_kind -> upsert_membership(event, "member")
|
||||
@relay_metadata_kind -> :ok
|
||||
@relay_admins_kind -> :ok
|
||||
@relay_rules_kind -> :ok
|
||||
@join_request_kind -> put_member(event, membership_pubkey_from_event(event))
|
||||
@leave_request_kind -> delete_member(event, membership_pubkey_from_event(event))
|
||||
@add_user_kind -> put_member(event, tagged_pubkey(event, "p"))
|
||||
@remove_user_kind -> delete_member(event, tagged_pubkey(event, "p"))
|
||||
@membership_list_kind -> replace_membership_snapshot(event)
|
||||
@invite_request_kind -> :ok
|
||||
_other -> :ok
|
||||
end
|
||||
end
|
||||
|
||||
@spec group_related_kind?(non_neg_integer()) :: boolean()
|
||||
def group_related_kind?(kind)
|
||||
@spec relay_access_kind?(non_neg_integer()) :: boolean()
|
||||
def relay_access_kind?(kind)
|
||||
when kind in [
|
||||
@membership_request_kind,
|
||||
@membership_approval_kind,
|
||||
@relay_metadata_kind,
|
||||
@relay_admins_kind,
|
||||
@relay_rules_kind,
|
||||
@membership_event_kind
|
||||
@add_user_kind,
|
||||
@remove_user_kind,
|
||||
@join_request_kind,
|
||||
@invite_request_kind,
|
||||
@leave_request_kind,
|
||||
@membership_list_kind
|
||||
],
|
||||
do: true
|
||||
|
||||
def group_related_kind?(_kind), do: false
|
||||
def relay_access_kind?(_kind), do: false
|
||||
|
||||
defp upsert_membership(event, role) do
|
||||
with {:ok, group_id} <- group_id_from_event(event),
|
||||
{:ok, pubkey} <- pubkey_from_event(event) do
|
||||
@spec get_membership(binary()) :: {:ok, map() | nil} | {:error, term()}
|
||||
def get_membership(pubkey) when is_binary(pubkey) do
|
||||
Storage.groups().get_membership(%{}, @relay_access_group_id, pubkey)
|
||||
end
|
||||
|
||||
@spec list_memberships() :: {:ok, [map()]} | {:error, term()}
|
||||
def list_memberships do
|
||||
Storage.groups().list_memberships(%{}, @relay_access_group_id)
|
||||
end
|
||||
|
||||
defp put_member(event, {:ok, pubkey}) do
|
||||
with {:ok, metadata} <- membership_metadata(event) do
|
||||
Storage.groups().put_membership(%{}, %{
|
||||
group_id: group_id,
|
||||
group_id: @relay_access_group_id,
|
||||
pubkey: pubkey,
|
||||
role: role,
|
||||
metadata: %{"source_kind" => Map.get(event, "kind")}
|
||||
role: "member",
|
||||
metadata: metadata
|
||||
})
|
||||
|> case do
|
||||
{:ok, _membership} -> :ok
|
||||
@@ -55,21 +65,85 @@ defmodule Parrhesia.Groups.Flow do
|
||||
end
|
||||
end
|
||||
|
||||
defp group_id_from_event(event) do
|
||||
group_id =
|
||||
event
|
||||
|> Map.get("tags", [])
|
||||
|> Enum.find_value(fn
|
||||
["h", value | _rest] when is_binary(value) and value != "" -> value
|
||||
_tag -> nil
|
||||
end)
|
||||
defp put_member(_event, {:error, reason}), do: {:error, reason}
|
||||
|
||||
case group_id do
|
||||
nil -> {:error, :missing_group_id}
|
||||
value -> {:ok, value}
|
||||
defp delete_member(_event, {:ok, pubkey}) do
|
||||
Storage.groups().delete_membership(%{}, @relay_access_group_id, pubkey)
|
||||
end
|
||||
|
||||
defp delete_member(_event, {:error, reason}), do: {:error, reason}
|
||||
|
||||
defp replace_membership_snapshot(event) do
|
||||
with {:ok, tagged_members} <- tagged_pubkeys(event, "member"),
|
||||
{:ok, existing_memberships} <- list_memberships() do
|
||||
incoming_pubkeys = MapSet.new(tagged_members)
|
||||
existing_pubkeys = MapSet.new(Enum.map(existing_memberships, & &1.pubkey))
|
||||
|
||||
remove_members =
|
||||
existing_pubkeys
|
||||
|> MapSet.difference(incoming_pubkeys)
|
||||
|> MapSet.to_list()
|
||||
|
||||
add_members =
|
||||
incoming_pubkeys
|
||||
|> MapSet.to_list()
|
||||
|
||||
:ok = remove_memberships(remove_members)
|
||||
add_memberships(event, add_members)
|
||||
else
|
||||
{:error, reason} -> {:error, reason}
|
||||
end
|
||||
end
|
||||
|
||||
defp pubkey_from_event(%{"pubkey" => pubkey}) when is_binary(pubkey), do: {:ok, pubkey}
|
||||
defp pubkey_from_event(_event), do: {:error, :missing_pubkey}
|
||||
defp membership_pubkey_from_event(%{"pubkey" => pubkey}) when is_binary(pubkey),
|
||||
do: {:ok, pubkey}
|
||||
|
||||
defp membership_pubkey_from_event(_event), do: {:error, :missing_pubkey}
|
||||
|
||||
defp tagged_pubkey(event, tag_name) do
|
||||
event
|
||||
|> tagged_pubkeys(tag_name)
|
||||
|> case do
|
||||
{:ok, [pubkey]} -> {:ok, pubkey}
|
||||
{:ok, []} -> {:error, :missing_pubkey}
|
||||
{:ok, _pubkeys} -> {:error, :invalid_pubkey}
|
||||
end
|
||||
end
|
||||
|
||||
defp tagged_pubkeys(event, tag_name) do
|
||||
pubkeys =
|
||||
event
|
||||
|> Map.get("tags", [])
|
||||
|> Enum.flat_map(fn
|
||||
[^tag_name, pubkey | _rest] when is_binary(pubkey) and pubkey != "" -> [pubkey]
|
||||
_tag -> []
|
||||
end)
|
||||
|
||||
{:ok, Enum.uniq(pubkeys)}
|
||||
end
|
||||
|
||||
defp membership_metadata(event) do
|
||||
{:ok,
|
||||
%{
|
||||
"source_kind" => Map.get(event, "kind"),
|
||||
"source_event_id" => Map.get(event, "id")
|
||||
}}
|
||||
end
|
||||
|
||||
defp remove_memberships(pubkeys) when is_list(pubkeys) do
|
||||
Enum.each(pubkeys, fn pubkey ->
|
||||
:ok = Storage.groups().delete_membership(%{}, @relay_access_group_id, pubkey)
|
||||
end)
|
||||
|
||||
:ok
|
||||
end
|
||||
|
||||
defp add_memberships(event, pubkeys) when is_list(pubkeys) do
|
||||
Enum.reduce_while(pubkeys, :ok, fn pubkey, :ok ->
|
||||
case put_member(event, {:ok, pubkey}) do
|
||||
:ok -> {:cont, :ok}
|
||||
{:error, _reason} = error -> {:halt, error}
|
||||
end
|
||||
end)
|
||||
end
|
||||
end
|
||||
|
||||
48
lib/parrhesia/http.ex
Normal file
48
lib/parrhesia/http.ex
Normal file
@@ -0,0 +1,48 @@
|
||||
defmodule Parrhesia.HTTP do
|
||||
@moduledoc false
|
||||
|
||||
alias Parrhesia.Metadata
|
||||
|
||||
@default_headers [{"user-agent", Metadata.user_agent()}]
|
||||
|
||||
@spec default_headers() :: [{String.t(), String.t()}]
|
||||
def default_headers, do: @default_headers
|
||||
|
||||
@spec get(Keyword.t()) :: {:ok, Req.Response.t()} | {:error, Exception.t()}
|
||||
def get(options) when is_list(options) do
|
||||
Req.get(put_default_headers(options))
|
||||
end
|
||||
|
||||
@spec post(Keyword.t()) :: {:ok, Req.Response.t()} | {:error, Exception.t()}
|
||||
def post(options) when is_list(options) do
|
||||
Req.post(put_default_headers(options))
|
||||
end
|
||||
|
||||
@spec put_default_headers(Keyword.t()) :: Keyword.t()
|
||||
def put_default_headers(options) when is_list(options) do
|
||||
Keyword.update(options, :headers, @default_headers, &merge_headers(&1, @default_headers))
|
||||
end
|
||||
|
||||
defp merge_headers(headers, defaults) do
|
||||
existing_names =
|
||||
headers
|
||||
|> List.wrap()
|
||||
|> Enum.reduce(MapSet.new(), fn
|
||||
{name, _value}, acc -> MapSet.put(acc, normalize_header_name(name))
|
||||
_other, acc -> acc
|
||||
end)
|
||||
|
||||
headers ++
|
||||
Enum.reject(defaults, fn {name, _value} ->
|
||||
MapSet.member?(existing_names, normalize_header_name(name))
|
||||
end)
|
||||
end
|
||||
|
||||
defp normalize_header_name(name) when is_atom(name) do
|
||||
name
|
||||
|> Atom.to_string()
|
||||
|> String.downcase()
|
||||
end
|
||||
|
||||
defp normalize_header_name(name) when is_binary(name), do: String.downcase(name)
|
||||
end
|
||||
29
lib/parrhesia/metadata.ex
Normal file
29
lib/parrhesia/metadata.ex
Normal file
@@ -0,0 +1,29 @@
|
||||
defmodule Parrhesia.Metadata do
|
||||
@moduledoc false
|
||||
|
||||
@metadata Application.compile_env(:parrhesia, :metadata, [])
|
||||
@name Keyword.get(@metadata, :name, "Parrhesia")
|
||||
@version Keyword.get(@metadata, :version, "0.0.0")
|
||||
@hide_version? Keyword.get(@metadata, :hide_version?, true)
|
||||
|
||||
@spec name() :: String.t()
|
||||
def name, do: @name
|
||||
|
||||
@spec version() :: String.t()
|
||||
def version, do: @version
|
||||
|
||||
@spec hide_version?() :: boolean()
|
||||
def hide_version?, do: @hide_version?
|
||||
|
||||
@spec name_and_version() :: String.t()
|
||||
def name_and_version, do: "#{@name}/#{@version}"
|
||||
|
||||
@spec user_agent() :: String.t()
|
||||
def user_agent do
|
||||
if hide_version?() do
|
||||
name()
|
||||
else
|
||||
name_and_version()
|
||||
end
|
||||
end
|
||||
end
|
||||
136
lib/parrhesia/negentropy/engine.ex
Normal file
136
lib/parrhesia/negentropy/engine.ex
Normal file
@@ -0,0 +1,136 @@
|
||||
defmodule Parrhesia.Negentropy.Engine do
|
||||
@moduledoc """
|
||||
Relay/client-agnostic negentropy reconciliation engine.
|
||||
"""
|
||||
|
||||
alias Parrhesia.Negentropy.Message
|
||||
|
||||
@default_id_list_threshold 32
|
||||
|
||||
@type item :: Message.item()
|
||||
|
||||
@spec initial_message([item()], keyword()) :: binary()
|
||||
def initial_message(items, opts \\ []) when is_list(opts) do
|
||||
normalized_items = normalize_items(items)
|
||||
|
||||
Message.encode([
|
||||
describe_range(normalized_items, :infinity, id_list_threshold(opts))
|
||||
])
|
||||
end
|
||||
|
||||
@spec answer([item()], binary(), keyword()) :: {:ok, binary()} | {:error, term()}
|
||||
def answer(items, incoming_message, opts \\ [])
|
||||
when is_binary(incoming_message) and is_list(opts) do
|
||||
normalized_items = normalize_items(items)
|
||||
threshold = id_list_threshold(opts)
|
||||
|
||||
case Message.decode(incoming_message) do
|
||||
{:ok, ranges} ->
|
||||
response_ranges =
|
||||
respond_to_ranges(normalized_items, ranges, Message.initial_lower_bound(), threshold)
|
||||
|
||||
{:ok, Message.encode(response_ranges)}
|
||||
|
||||
{:unsupported_version, _supported_version} ->
|
||||
{:ok, Message.supported_version_message()}
|
||||
|
||||
{:error, reason} ->
|
||||
{:error, reason}
|
||||
end
|
||||
end
|
||||
|
||||
defp respond_to_ranges(_items, [], _lower_bound, _threshold), do: []
|
||||
|
||||
defp respond_to_ranges(items, [range | rest], lower_bound, threshold) do
|
||||
upper_bound = Map.fetch!(range, :upper_bound)
|
||||
|
||||
items_in_range =
|
||||
Enum.filter(items, fn item ->
|
||||
Message.item_in_range?(item, lower_bound, upper_bound)
|
||||
end)
|
||||
|
||||
response =
|
||||
case range.mode do
|
||||
:skip ->
|
||||
[%{upper_bound: upper_bound, mode: :skip, payload: nil}]
|
||||
|
||||
:fingerprint ->
|
||||
respond_to_fingerprint_range(items_in_range, upper_bound, range.payload, threshold)
|
||||
|
||||
:id_list ->
|
||||
respond_to_id_list_range(items_in_range, upper_bound, range.payload, threshold)
|
||||
end
|
||||
|
||||
response ++ respond_to_ranges(items, rest, upper_bound, threshold)
|
||||
end
|
||||
|
||||
defp respond_to_fingerprint_range(items, upper_bound, remote_fingerprint, threshold) do
|
||||
if Message.fingerprint(items) == remote_fingerprint do
|
||||
[%{upper_bound: upper_bound, mode: :skip, payload: nil}]
|
||||
else
|
||||
mismatch_response(items, upper_bound, threshold)
|
||||
end
|
||||
end
|
||||
|
||||
defp respond_to_id_list_range(items, upper_bound, remote_ids, threshold) do
|
||||
if Enum.map(items, & &1.id) == remote_ids do
|
||||
[%{upper_bound: upper_bound, mode: :skip, payload: nil}]
|
||||
else
|
||||
mismatch_response(items, upper_bound, threshold)
|
||||
end
|
||||
end
|
||||
|
||||
defp mismatch_response(items, upper_bound, threshold) do
|
||||
if length(items) <= threshold do
|
||||
[%{upper_bound: upper_bound, mode: :id_list, payload: Enum.map(items, & &1.id)}]
|
||||
else
|
||||
split_response(items, upper_bound, threshold)
|
||||
end
|
||||
end
|
||||
|
||||
defp split_response(items, upper_bound, threshold) do
|
||||
midpoint = div(length(items), 2)
|
||||
left_items = Enum.take(items, midpoint)
|
||||
right_items = Enum.drop(items, midpoint)
|
||||
|
||||
boundary =
|
||||
left_items
|
||||
|> List.last()
|
||||
|> then(&Message.split_bound(&1, hd(right_items)))
|
||||
|
||||
[
|
||||
describe_range(left_items, boundary, threshold),
|
||||
describe_range(right_items, upper_bound, threshold)
|
||||
]
|
||||
end
|
||||
|
||||
defp describe_range(items, upper_bound, threshold) do
|
||||
if length(items) <= threshold do
|
||||
%{upper_bound: upper_bound, mode: :id_list, payload: Enum.map(items, & &1.id)}
|
||||
else
|
||||
%{upper_bound: upper_bound, mode: :fingerprint, payload: Message.fingerprint(items)}
|
||||
end
|
||||
end
|
||||
|
||||
defp normalize_items(items) do
|
||||
items
|
||||
|> Enum.map(&normalize_item/1)
|
||||
|> Enum.sort(&(Message.compare_items(&1, &2) != :gt))
|
||||
end
|
||||
|
||||
defp normalize_item(%{created_at: created_at, id: id})
|
||||
when is_integer(created_at) and created_at >= 0 and is_binary(id) and byte_size(id) == 32 do
|
||||
%{created_at: created_at, id: id}
|
||||
end
|
||||
|
||||
defp normalize_item(item) do
|
||||
raise ArgumentError, "invalid negentropy item: #{inspect(item)}"
|
||||
end
|
||||
|
||||
defp id_list_threshold(opts) do
|
||||
case Keyword.get(opts, :id_list_threshold, @default_id_list_threshold) do
|
||||
threshold when is_integer(threshold) and threshold > 0 -> threshold
|
||||
_other -> @default_id_list_threshold
|
||||
end
|
||||
end
|
||||
end
|
||||
349
lib/parrhesia/negentropy/message.ex
Normal file
349
lib/parrhesia/negentropy/message.ex
Normal file
@@ -0,0 +1,349 @@
|
||||
defmodule Parrhesia.Negentropy.Message do
|
||||
@moduledoc """
|
||||
NIP-77 negentropy message codec and helpers.
|
||||
"""
|
||||
|
||||
import Bitwise
|
||||
|
||||
@protocol_version 0x61
|
||||
@id_size 32
|
||||
@fingerprint_size 16
|
||||
@u256_mod 1 <<< 256
|
||||
@zero_id <<0::size(256)>>
|
||||
|
||||
@type item :: %{created_at: non_neg_integer(), id: binary()}
|
||||
@type bound :: :infinity | {non_neg_integer(), binary()}
|
||||
@type range ::
|
||||
%{
|
||||
upper_bound: bound(),
|
||||
mode: :skip | :fingerprint | :id_list,
|
||||
payload: nil | binary() | [binary()]
|
||||
}
|
||||
|
||||
@spec protocol_version() :: byte()
|
||||
def protocol_version, do: @protocol_version
|
||||
|
||||
@spec supported_version_message() :: binary()
|
||||
def supported_version_message, do: <<@protocol_version>>
|
||||
|
||||
@spec decode(binary()) :: {:ok, [range()]} | {:unsupported_version, byte()} | {:error, term()}
|
||||
def decode(<<version, _rest::binary>>) when version != @protocol_version,
|
||||
do: {:unsupported_version, @protocol_version}
|
||||
|
||||
def decode(<<@protocol_version, rest::binary>>) do
|
||||
decode_ranges(rest, 0, initial_lower_bound(), [])
|
||||
end
|
||||
|
||||
def decode(_message), do: {:error, :invalid_message}
|
||||
|
||||
@spec encode([range()]) :: binary()
|
||||
def encode(ranges) when is_list(ranges) do
|
||||
ranges
|
||||
|> drop_trailing_skip_ranges()
|
||||
|> Enum.reduce({[@protocol_version], 0}, fn range, {acc, previous_timestamp} ->
|
||||
{encoded_range, next_timestamp} = encode_range(range, previous_timestamp)
|
||||
{[acc, encoded_range], next_timestamp}
|
||||
end)
|
||||
|> elem(0)
|
||||
|> IO.iodata_to_binary()
|
||||
end
|
||||
|
||||
@spec fingerprint([item()]) :: binary()
|
||||
def fingerprint(items) when is_list(items) do
|
||||
sum =
|
||||
Enum.reduce(items, 0, fn %{id: id}, acc ->
|
||||
<<id_integer::unsigned-little-size(256)>> = id
|
||||
rem(acc + id_integer, @u256_mod)
|
||||
end)
|
||||
|
||||
payload = [<<sum::unsigned-little-size(256)>>, encode_varint(length(items))]
|
||||
|
||||
payload
|
||||
|> IO.iodata_to_binary()
|
||||
|> then(&:crypto.hash(:sha256, &1))
|
||||
|> binary_part(0, @fingerprint_size)
|
||||
end
|
||||
|
||||
@spec compare_items(item(), item()) :: :lt | :eq | :gt
|
||||
def compare_items(left, right) do
|
||||
cond do
|
||||
left.created_at < right.created_at -> :lt
|
||||
left.created_at > right.created_at -> :gt
|
||||
left.id < right.id -> :lt
|
||||
left.id > right.id -> :gt
|
||||
true -> :eq
|
||||
end
|
||||
end
|
||||
|
||||
@spec compare_bound(bound(), bound()) :: :lt | :eq | :gt
|
||||
def compare_bound(:infinity, :infinity), do: :eq
|
||||
def compare_bound(:infinity, _other), do: :gt
|
||||
def compare_bound(_other, :infinity), do: :lt
|
||||
|
||||
def compare_bound({left_timestamp, left_id}, {right_timestamp, right_id}) do
|
||||
cond do
|
||||
left_timestamp < right_timestamp -> :lt
|
||||
left_timestamp > right_timestamp -> :gt
|
||||
left_id < right_id -> :lt
|
||||
left_id > right_id -> :gt
|
||||
true -> :eq
|
||||
end
|
||||
end
|
||||
|
||||
@spec item_in_range?(item(), bound(), bound()) :: boolean()
|
||||
def item_in_range?(item, lower_bound, upper_bound) do
|
||||
compare_item_to_bound(item, lower_bound) != :lt and
|
||||
compare_item_to_bound(item, upper_bound) == :lt
|
||||
end
|
||||
|
||||
@spec initial_lower_bound() :: bound()
|
||||
def initial_lower_bound, do: {0, @zero_id}
|
||||
|
||||
@spec zero_id() :: binary()
|
||||
def zero_id, do: @zero_id
|
||||
|
||||
@spec split_bound(item(), item()) :: bound()
|
||||
def split_bound(previous_item, next_item)
|
||||
when is_map(previous_item) and is_map(next_item) do
|
||||
cond do
|
||||
previous_item.created_at < next_item.created_at ->
|
||||
{next_item.created_at, @zero_id}
|
||||
|
||||
previous_item.created_at == next_item.created_at ->
|
||||
prefix_length = shared_prefix_length(previous_item.id, next_item.id) + 1
|
||||
<<prefix::binary-size(prefix_length), _rest::binary>> = next_item.id
|
||||
{next_item.created_at, prefix <> :binary.copy(<<0>>, @id_size - prefix_length)}
|
||||
|
||||
true ->
|
||||
raise ArgumentError, "split_bound/2 requires previous_item <= next_item"
|
||||
end
|
||||
end
|
||||
|
||||
defp decode_ranges(<<>>, _previous_timestamp, _lower_bound, ranges),
|
||||
do: {:ok, Enum.reverse(ranges)}
|
||||
|
||||
defp decode_ranges(binary, previous_timestamp, lower_bound, ranges) do
|
||||
with {:ok, upper_bound, rest, next_timestamp} <- decode_bound(binary, previous_timestamp),
|
||||
:ok <- validate_upper_bound(lower_bound, upper_bound),
|
||||
{:ok, mode, payload, tail} <- decode_payload(rest) do
|
||||
next_ranges = [%{upper_bound: upper_bound, mode: mode, payload: payload} | ranges]
|
||||
|
||||
if upper_bound == :infinity and tail != <<>> do
|
||||
{:error, :invalid_message}
|
||||
else
|
||||
decode_ranges(tail, next_timestamp, upper_bound, next_ranges)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
defp validate_upper_bound(lower_bound, upper_bound) do
|
||||
if compare_bound(lower_bound, upper_bound) == :lt do
|
||||
:ok
|
||||
else
|
||||
{:error, :invalid_message}
|
||||
end
|
||||
end
|
||||
|
||||
defp decode_bound(binary, previous_timestamp) do
|
||||
with {:ok, encoded_timestamp, rest} <- decode_varint(binary),
|
||||
{:ok, length, tail} <- decode_varint(rest),
|
||||
:ok <- validate_bound_prefix_length(length),
|
||||
{:ok, prefix, remainder} <- decode_prefix(tail, length) do
|
||||
decode_bound_value(encoded_timestamp, length, prefix, remainder, previous_timestamp)
|
||||
end
|
||||
end
|
||||
|
||||
defp decode_payload(binary) do
|
||||
with {:ok, mode_value, rest} <- decode_varint(binary) do
|
||||
case mode_value do
|
||||
0 ->
|
||||
{:ok, :skip, nil, rest}
|
||||
|
||||
1 ->
|
||||
decode_fingerprint_payload(rest)
|
||||
|
||||
2 ->
|
||||
decode_id_list_payload(rest)
|
||||
|
||||
_other ->
|
||||
{:error, :invalid_message}
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
defp decode_varint(binary), do: decode_varint(binary, 0)
|
||||
|
||||
defp decode_varint(<<>>, _acc), do: {:error, :invalid_message}
|
||||
|
||||
defp decode_varint(<<byte, rest::binary>>, acc) do
|
||||
value = acc * 128 + band(byte, 0x7F)
|
||||
|
||||
if band(byte, 0x80) == 0 do
|
||||
{:ok, value, rest}
|
||||
else
|
||||
decode_varint(rest, value)
|
||||
end
|
||||
end
|
||||
|
||||
defp encode_range(range, previous_timestamp) do
|
||||
{encoded_bound, next_timestamp} = encode_bound(range.upper_bound, previous_timestamp)
|
||||
{mode, payload} = encode_payload(range)
|
||||
{[encoded_bound, mode, payload], next_timestamp}
|
||||
end
|
||||
|
||||
defp encode_bound(:infinity, previous_timestamp),
|
||||
do: {[encode_varint(0), encode_varint(0)], previous_timestamp}
|
||||
|
||||
defp encode_bound({timestamp, id}, previous_timestamp) do
|
||||
prefix_length = id_prefix_length(id)
|
||||
<<prefix::binary-size(prefix_length), _rest::binary>> = id
|
||||
|
||||
{
|
||||
[encode_varint(timestamp - previous_timestamp + 1), encode_varint(prefix_length), prefix],
|
||||
timestamp
|
||||
}
|
||||
end
|
||||
|
||||
defp encode_payload(%{mode: :skip}) do
|
||||
{encode_varint(0), <<>>}
|
||||
end
|
||||
|
||||
defp encode_payload(%{mode: :fingerprint, payload: fingerprint})
|
||||
when is_binary(fingerprint) and byte_size(fingerprint) == @fingerprint_size do
|
||||
{encode_varint(1), fingerprint}
|
||||
end
|
||||
|
||||
defp encode_payload(%{mode: :id_list, payload: ids}) when is_list(ids) do
|
||||
encoded_ids = Enum.map(ids, fn id -> validate_id!(id) end)
|
||||
{encode_varint(2), [encode_varint(length(encoded_ids)), encoded_ids]}
|
||||
end
|
||||
|
||||
defp encode_varint(value) when is_integer(value) and value >= 0 do
|
||||
digits = collect_base128_digits(value, [])
|
||||
last_index = length(digits) - 1
|
||||
|
||||
digits
|
||||
|> Enum.with_index()
|
||||
|> Enum.map(fn {digit, index} ->
|
||||
if index == last_index do
|
||||
digit
|
||||
else
|
||||
digit + 128
|
||||
end
|
||||
end)
|
||||
|> :erlang.list_to_binary()
|
||||
end
|
||||
|
||||
defp collect_base128_digits(value, acc) do
|
||||
quotient = div(value, 128)
|
||||
remainder = rem(value, 128)
|
||||
|
||||
if quotient == 0 do
|
||||
[remainder | acc]
|
||||
else
|
||||
collect_base128_digits(quotient, [remainder | acc])
|
||||
end
|
||||
end
|
||||
|
||||
defp unpack_ids(binary), do: unpack_ids(binary, [])
|
||||
|
||||
defp unpack_ids(<<>>, acc), do: Enum.reverse(acc)
|
||||
|
||||
defp unpack_ids(<<id::binary-size(@id_size), rest::binary>>, acc),
|
||||
do: unpack_ids(rest, [id | acc])
|
||||
|
||||
defp decode_prefix(binary, length) when byte_size(binary) >= length do
|
||||
<<prefix::binary-size(length), rest::binary>> = binary
|
||||
{:ok, prefix, rest}
|
||||
end
|
||||
|
||||
defp decode_prefix(_binary, _length), do: {:error, :invalid_message}
|
||||
|
||||
defp decode_bound_value(0, 0, _prefix, remainder, previous_timestamp),
|
||||
do: {:ok, :infinity, remainder, previous_timestamp}
|
||||
|
||||
defp decode_bound_value(0, _length, _prefix, _remainder, _previous_timestamp),
|
||||
do: {:error, :invalid_message}
|
||||
|
||||
defp decode_bound_value(encoded_timestamp, length, prefix, remainder, previous_timestamp) do
|
||||
timestamp = previous_timestamp + encoded_timestamp - 1
|
||||
id = prefix <> :binary.copy(<<0>>, @id_size - length)
|
||||
{:ok, {timestamp, id}, remainder, timestamp}
|
||||
end
|
||||
|
||||
defp decode_fingerprint_payload(<<fingerprint::binary-size(@fingerprint_size), tail::binary>>),
|
||||
do: {:ok, :fingerprint, fingerprint, tail}
|
||||
|
||||
defp decode_fingerprint_payload(_payload), do: {:error, :invalid_message}
|
||||
|
||||
defp decode_id_list_payload(rest) do
|
||||
with {:ok, count, tail} <- decode_varint(rest),
|
||||
{:ok, ids, remainder} <- decode_id_list_bytes(tail, count) do
|
||||
{:ok, :id_list, ids, remainder}
|
||||
end
|
||||
end
|
||||
|
||||
defp decode_id_list_bytes(tail, count) do
|
||||
expected_bytes = count * @id_size
|
||||
|
||||
if byte_size(tail) >= expected_bytes do
|
||||
<<ids::binary-size(expected_bytes), remainder::binary>> = tail
|
||||
{:ok, unpack_ids(ids), remainder}
|
||||
else
|
||||
{:error, :invalid_message}
|
||||
end
|
||||
end
|
||||
|
||||
defp validate_bound_prefix_length(length)
|
||||
when is_integer(length) and length >= 0 and length <= @id_size,
|
||||
do: :ok
|
||||
|
||||
defp validate_bound_prefix_length(_length), do: {:error, :invalid_message}
|
||||
|
||||
defp id_prefix_length(id) do
|
||||
id
|
||||
|> validate_id!()
|
||||
|> :binary.bin_to_list()
|
||||
|> Enum.reverse()
|
||||
|> Enum.drop_while(&(&1 == 0))
|
||||
|> length()
|
||||
end
|
||||
|
||||
defp shared_prefix_length(left_id, right_id) do
|
||||
left_id = validate_id!(left_id)
|
||||
right_id = validate_id!(right_id)
|
||||
|
||||
left_id
|
||||
|> :binary.bin_to_list()
|
||||
|> Enum.zip(:binary.bin_to_list(right_id))
|
||||
|> Enum.reduce_while(0, fn
|
||||
{left_byte, right_byte}, acc when left_byte == right_byte -> {:cont, acc + 1}
|
||||
_pair, acc -> {:halt, acc}
|
||||
end)
|
||||
end
|
||||
|
||||
defp drop_trailing_skip_ranges(ranges) do
|
||||
ranges
|
||||
|> Enum.reverse()
|
||||
|> Enum.drop_while(fn range -> range.mode == :skip end)
|
||||
|> Enum.reverse()
|
||||
end
|
||||
|
||||
defp compare_item_to_bound(_item, :infinity), do: :lt
|
||||
|
||||
defp compare_item_to_bound(item, {timestamp, id}) do
|
||||
cond do
|
||||
item.created_at < timestamp -> :lt
|
||||
item.created_at > timestamp -> :gt
|
||||
item.id < id -> :lt
|
||||
item.id > id -> :gt
|
||||
true -> :eq
|
||||
end
|
||||
end
|
||||
|
||||
defp validate_id!(id) when is_binary(id) and byte_size(id) == @id_size, do: id
|
||||
|
||||
defp validate_id!(_id) do
|
||||
raise ArgumentError, "negentropy ids must be 32-byte binaries"
|
||||
end
|
||||
end
|
||||
@@ -1,10 +1,13 @@
|
||||
defmodule Parrhesia.Negentropy.Sessions do
|
||||
@moduledoc """
|
||||
In-memory NEG-* session tracking.
|
||||
In-memory NIP-77 session tracking over bounded local event snapshots.
|
||||
"""
|
||||
|
||||
use GenServer
|
||||
|
||||
alias Parrhesia.Negentropy.Engine
|
||||
alias Parrhesia.Storage
|
||||
|
||||
@type session_key :: {pid(), String.t()}
|
||||
|
||||
@default_max_payload_bytes 4096
|
||||
@@ -12,6 +15,8 @@ defmodule Parrhesia.Negentropy.Sessions do
|
||||
@default_max_total_sessions 10_000
|
||||
@default_max_idle_seconds 60
|
||||
@default_sweep_interval_seconds 10
|
||||
@default_max_items_per_session 50_000
|
||||
@default_id_list_threshold 32
|
||||
@sweep_idle_sessions :sweep_idle_sessions
|
||||
|
||||
@spec start_link(keyword()) :: GenServer.on_start()
|
||||
@@ -20,16 +25,19 @@ defmodule Parrhesia.Negentropy.Sessions do
|
||||
GenServer.start_link(__MODULE__, opts, name: name)
|
||||
end
|
||||
|
||||
@spec open(GenServer.server(), pid(), String.t(), map()) :: {:ok, map()} | {:error, term()}
|
||||
def open(server \\ __MODULE__, owner_pid, subscription_id, params)
|
||||
when is_pid(owner_pid) and is_binary(subscription_id) and is_map(params) do
|
||||
GenServer.call(server, {:open, owner_pid, subscription_id, params})
|
||||
@spec open(GenServer.server(), pid(), String.t(), map(), binary(), keyword()) ::
|
||||
{:ok, binary()} | {:error, term()}
|
||||
def open(server \\ __MODULE__, owner_pid, subscription_id, filter, message, opts \\ [])
|
||||
when is_pid(owner_pid) and is_binary(subscription_id) and is_map(filter) and
|
||||
is_binary(message) and is_list(opts) do
|
||||
GenServer.call(server, {:open, owner_pid, subscription_id, filter, message, opts})
|
||||
end
|
||||
|
||||
@spec message(GenServer.server(), pid(), String.t(), map()) :: {:ok, map()} | {:error, term()}
|
||||
def message(server \\ __MODULE__, owner_pid, subscription_id, payload)
|
||||
when is_pid(owner_pid) and is_binary(subscription_id) and is_map(payload) do
|
||||
GenServer.call(server, {:message, owner_pid, subscription_id, payload})
|
||||
@spec message(GenServer.server(), pid(), String.t(), binary()) ::
|
||||
{:ok, binary()} | {:error, term()}
|
||||
def message(server \\ __MODULE__, owner_pid, subscription_id, message)
|
||||
when is_pid(owner_pid) and is_binary(subscription_id) and is_binary(message) do
|
||||
GenServer.call(server, {:message, owner_pid, subscription_id, message})
|
||||
end
|
||||
|
||||
@spec close(GenServer.server(), pid(), String.t()) :: :ok
|
||||
@@ -63,7 +71,17 @@ defmodule Parrhesia.Negentropy.Sessions do
|
||||
max_total_sessions:
|
||||
normalize_positive_integer(Keyword.get(opts, :max_total_sessions), max_total_sessions()),
|
||||
max_idle_ms: max_idle_ms,
|
||||
sweep_interval_ms: sweep_interval_ms
|
||||
sweep_interval_ms: sweep_interval_ms,
|
||||
max_items_per_session:
|
||||
normalize_positive_integer(
|
||||
Keyword.get(opts, :max_items_per_session),
|
||||
max_items_per_session()
|
||||
),
|
||||
id_list_threshold:
|
||||
normalize_positive_integer(
|
||||
Keyword.get(opts, :id_list_threshold),
|
||||
id_list_threshold()
|
||||
)
|
||||
}
|
||||
|
||||
:ok = schedule_idle_sweep(sweep_interval_ms)
|
||||
@@ -72,16 +90,19 @@ defmodule Parrhesia.Negentropy.Sessions do
|
||||
end
|
||||
|
||||
@impl true
|
||||
def handle_call({:open, owner_pid, subscription_id, params}, _from, state) do
|
||||
def handle_call({:open, owner_pid, subscription_id, filter, message, opts}, _from, state) do
|
||||
key = {owner_pid, subscription_id}
|
||||
|
||||
with :ok <- validate_payload_size(params, state.max_payload_bytes),
|
||||
:ok <- enforce_session_limits(state, owner_pid, key) do
|
||||
with :ok <- validate_payload_size(filter, message, state.max_payload_bytes),
|
||||
:ok <- enforce_session_limits(state, owner_pid, key),
|
||||
{:ok, refs} <- fetch_event_refs(filter, opts, state.max_items_per_session),
|
||||
{:ok, response} <-
|
||||
Engine.answer(refs, message, id_list_threshold: state.id_list_threshold) do
|
||||
now_ms = System.monotonic_time(:millisecond)
|
||||
|
||||
session = %{
|
||||
cursor: 0,
|
||||
params: params,
|
||||
filter: filter,
|
||||
refs: refs,
|
||||
opened_at: System.system_time(:second),
|
||||
last_active_at_ms: now_ms
|
||||
}
|
||||
@@ -91,14 +112,14 @@ defmodule Parrhesia.Negentropy.Sessions do
|
||||
|> ensure_monitor(owner_pid)
|
||||
|> put_in([:sessions, key], session)
|
||||
|
||||
{:reply, {:ok, %{"status" => "open", "cursor" => 0}}, state}
|
||||
{:reply, {:ok, response}, state}
|
||||
else
|
||||
{:error, reason} ->
|
||||
{:reply, {:error, reason}, state}
|
||||
end
|
||||
end
|
||||
|
||||
def handle_call({:message, owner_pid, subscription_id, payload}, _from, state) do
|
||||
def handle_call({:message, owner_pid, subscription_id, message}, _from, state) do
|
||||
key = {owner_pid, subscription_id}
|
||||
|
||||
case Map.get(state.sessions, key) do
|
||||
@@ -106,20 +127,18 @@ defmodule Parrhesia.Negentropy.Sessions do
|
||||
{:reply, {:error, :unknown_session}, state}
|
||||
|
||||
session ->
|
||||
case validate_payload_size(payload, state.max_payload_bytes) do
|
||||
:ok ->
|
||||
cursor = session.cursor + 1
|
||||
with :ok <- validate_payload_size(session.filter, message, state.max_payload_bytes),
|
||||
{:ok, response} <-
|
||||
Engine.answer(session.refs, message, id_list_threshold: state.id_list_threshold) do
|
||||
next_session = %{
|
||||
session
|
||||
| last_active_at_ms: System.monotonic_time(:millisecond)
|
||||
}
|
||||
|
||||
next_session = %{
|
||||
session
|
||||
| cursor: cursor,
|
||||
last_active_at_ms: System.monotonic_time(:millisecond)
|
||||
}
|
||||
|
||||
state = put_in(state, [:sessions, key], next_session)
|
||||
|
||||
{:reply, {:ok, %{"status" => "ack", "cursor" => cursor}}, state}
|
||||
state = put_in(state, [:sessions, key], next_session)
|
||||
|
||||
{:reply, {:ok, response}, state}
|
||||
else
|
||||
{:error, reason} ->
|
||||
{:reply, {:error, reason}, state}
|
||||
end
|
||||
@@ -185,6 +204,21 @@ defmodule Parrhesia.Negentropy.Sessions do
|
||||
|
||||
def handle_info(_message, state), do: {:noreply, state}
|
||||
|
||||
defp fetch_event_refs(filter, opts, max_items_per_session) do
|
||||
query_opts =
|
||||
opts
|
||||
|> Keyword.take([:now, :requester_pubkeys])
|
||||
|> Keyword.put(:limit, max_items_per_session + 1)
|
||||
|
||||
with {:ok, refs} <- Storage.events().query_event_refs(%{}, [filter], query_opts) do
|
||||
if length(refs) > max_items_per_session do
|
||||
{:error, :query_too_big}
|
||||
else
|
||||
{:ok, refs}
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
defp clear_monitors_without_sessions(state, owner_pids) do
|
||||
Enum.reduce(Map.keys(state.monitors), state, fn owner_pid, acc ->
|
||||
if MapSet.member?(owner_pids, owner_pid) do
|
||||
@@ -203,8 +237,8 @@ defmodule Parrhesia.Negentropy.Sessions do
|
||||
end)
|
||||
end
|
||||
|
||||
defp validate_payload_size(payload, max_payload_bytes) do
|
||||
if :erlang.external_size(payload) <= max_payload_bytes do
|
||||
defp validate_payload_size(filter, message, max_payload_bytes) do
|
||||
if :erlang.external_size({filter, message}) <= max_payload_bytes do
|
||||
:ok
|
||||
else
|
||||
{:error, :payload_too_large}
|
||||
@@ -296,6 +330,18 @@ defmodule Parrhesia.Negentropy.Sessions do
|
||||
|> Keyword.get(:negentropy_session_sweep_interval_seconds, @default_sweep_interval_seconds)
|
||||
end
|
||||
|
||||
defp max_items_per_session do
|
||||
:parrhesia
|
||||
|> Application.get_env(:limits, [])
|
||||
|> Keyword.get(:max_negentropy_items_per_session, @default_max_items_per_session)
|
||||
end
|
||||
|
||||
defp id_list_threshold do
|
||||
:parrhesia
|
||||
|> Application.get_env(:limits, [])
|
||||
|> Keyword.get(:negentropy_id_list_threshold, @default_id_list_threshold)
|
||||
end
|
||||
|
||||
defp normalize_positive_integer(value, _default) when is_integer(value) and value > 0,
|
||||
do: value
|
||||
|
||||
|
||||
389
lib/parrhesia/nip43.ex
Normal file
389
lib/parrhesia/nip43.ex
Normal file
@@ -0,0 +1,389 @@
|
||||
defmodule Parrhesia.NIP43 do
|
||||
@moduledoc false
|
||||
|
||||
alias Parrhesia.API.Events
|
||||
alias Parrhesia.API.Identity
|
||||
alias Parrhesia.API.RequestContext
|
||||
alias Parrhesia.Groups.Flow
|
||||
alias Parrhesia.Protocol
|
||||
alias Parrhesia.Protocol.Filter
|
||||
|
||||
@join_request_kind 28_934
|
||||
@invite_request_kind 28_935
|
||||
@leave_request_kind 28_936
|
||||
@add_user_kind 8_000
|
||||
@remove_user_kind 8_001
|
||||
@membership_list_kind 13_534
|
||||
@claim_token_kind 31_943
|
||||
@default_invite_ttl_seconds 900
|
||||
|
||||
@type publish_state ::
|
||||
:ok
|
||||
| %{action: :join, duplicate?: boolean(), message: String.t()}
|
||||
| %{action: :leave, duplicate?: boolean(), message: String.t()}
|
||||
|
||||
@spec enabled?(keyword()) :: boolean()
|
||||
def enabled?(opts \\ []) do
|
||||
config(opts)
|
||||
|> Keyword.get(:enabled, true)
|
||||
|> Kernel.==(true)
|
||||
end
|
||||
|
||||
@spec prepare_publish(map(), keyword()) :: {:ok, publish_state()} | {:error, term()}
|
||||
def prepare_publish(event, opts \\ []) when is_map(event) and is_list(opts) do
|
||||
if enabled?(opts) do
|
||||
prepare_enabled_publish(event, opts)
|
||||
else
|
||||
prepare_disabled_publish(event)
|
||||
end
|
||||
end
|
||||
|
||||
@spec finalize_publish(map(), publish_state(), keyword()) :: :ok | {:ok, String.t()}
|
||||
def finalize_publish(event, publish_state, opts \\ [])
|
||||
|
||||
def finalize_publish(event, :ok, _opts) when is_map(event) do
|
||||
case Map.get(event, "kind") do
|
||||
kind when kind in [@add_user_kind, @remove_user_kind, @membership_list_kind] ->
|
||||
Flow.handle_event(event)
|
||||
|
||||
_other ->
|
||||
:ok
|
||||
end
|
||||
end
|
||||
|
||||
def finalize_publish(event, %{action: :join, duplicate?: true, message: message}, _opts)
|
||||
when is_map(event) do
|
||||
{:ok, message}
|
||||
end
|
||||
|
||||
def finalize_publish(event, %{action: :join, duplicate?: false, message: message}, opts)
|
||||
when is_map(event) do
|
||||
opts = Keyword.put_new(opts, :now, Map.get(event, "created_at"))
|
||||
:ok = Flow.handle_event(event)
|
||||
publish_membership_events(Map.get(event, "pubkey"), :add, opts)
|
||||
{:ok, message}
|
||||
end
|
||||
|
||||
def finalize_publish(event, %{action: :leave, duplicate?: true, message: message}, _opts)
|
||||
when is_map(event) do
|
||||
{:ok, message}
|
||||
end
|
||||
|
||||
def finalize_publish(event, %{action: :leave, duplicate?: false, message: message}, opts)
|
||||
when is_map(event) do
|
||||
opts = Keyword.put_new(opts, :now, Map.get(event, "created_at"))
|
||||
:ok = Flow.handle_event(event)
|
||||
publish_membership_events(Map.get(event, "pubkey"), :remove, opts)
|
||||
{:ok, message}
|
||||
end
|
||||
|
||||
@spec dynamic_events([map()], keyword()) :: [map()]
|
||||
def dynamic_events(filters, opts \\ []) when is_list(filters) and is_list(opts) do
|
||||
if enabled?(opts) and requests_invite?(filters) do
|
||||
filters
|
||||
|> build_invite_event(opts)
|
||||
|> maybe_wrap_event()
|
||||
else
|
||||
[]
|
||||
end
|
||||
end
|
||||
|
||||
@spec dynamic_count([map()], keyword()) :: non_neg_integer()
|
||||
def dynamic_count(filters, opts \\ []) do
|
||||
filters
|
||||
|> dynamic_events(opts)
|
||||
|> length()
|
||||
end
|
||||
|
||||
defp prepare_enabled_publish(%{"kind" => @join_request_kind, "pubkey" => pubkey} = event, opts)
|
||||
when is_binary(pubkey) do
|
||||
with {:ok, _claim} <- validate_claim_from_event(event),
|
||||
{:ok, membership} <- Flow.get_membership(pubkey) do
|
||||
if membership_active?(membership) do
|
||||
{:ok,
|
||||
%{
|
||||
action: :join,
|
||||
duplicate?: true,
|
||||
message: "duplicate: you are already a member of this relay."
|
||||
}}
|
||||
else
|
||||
{:ok,
|
||||
%{
|
||||
action: :join,
|
||||
duplicate?: false,
|
||||
message: "info: welcome to #{relay_url(opts)}!"
|
||||
}}
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
defp prepare_enabled_publish(%{"kind" => @leave_request_kind, "pubkey" => pubkey}, _opts)
|
||||
when is_binary(pubkey) do
|
||||
with {:ok, membership} <- Flow.get_membership(pubkey) do
|
||||
if membership_active?(membership) do
|
||||
{:ok, %{action: :leave, duplicate?: false, message: "info: membership revoked."}}
|
||||
else
|
||||
{:ok,
|
||||
%{
|
||||
action: :leave,
|
||||
duplicate?: true,
|
||||
message: "duplicate: you are not a member of this relay."
|
||||
}}
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
defp prepare_enabled_publish(%{"kind" => @invite_request_kind}, _opts) do
|
||||
{:error, "restricted: kind 28935 invite claims are generated via REQ"}
|
||||
end
|
||||
|
||||
defp prepare_enabled_publish(%{"kind" => kind, "pubkey" => pubkey}, _opts)
|
||||
when kind in [@add_user_kind, @remove_user_kind, @membership_list_kind] and
|
||||
is_binary(pubkey) do
|
||||
case relay_pubkey() do
|
||||
{:ok, ^pubkey} -> {:ok, :ok}
|
||||
{:ok, _other} -> {:error, "restricted: relay access metadata must be relay-signed"}
|
||||
{:error, _reason} -> {:error, "error: relay identity unavailable"}
|
||||
end
|
||||
end
|
||||
|
||||
defp prepare_enabled_publish(_event, _opts), do: {:ok, :ok}
|
||||
|
||||
defp prepare_disabled_publish(%{"kind" => kind})
|
||||
when kind in [
|
||||
@join_request_kind,
|
||||
@invite_request_kind,
|
||||
@leave_request_kind,
|
||||
@add_user_kind,
|
||||
@remove_user_kind,
|
||||
@membership_list_kind
|
||||
] do
|
||||
{:error, "blocked: NIP-43 relay access requests are disabled"}
|
||||
end
|
||||
|
||||
defp prepare_disabled_publish(_event), do: {:ok, :ok}
|
||||
|
||||
defp build_invite_event(filters, opts) do
|
||||
now = Keyword.get(opts, :now, System.system_time(:second))
|
||||
identity_opts = identity_opts(opts)
|
||||
|
||||
with {:ok, claim} <- issue_claim(now, opts),
|
||||
{:ok, signed_event} <-
|
||||
%{
|
||||
"created_at" => now,
|
||||
"kind" => @invite_request_kind,
|
||||
"tags" => [["-"], ["claim", claim]],
|
||||
"content" => ""
|
||||
}
|
||||
|> Identity.sign_event(identity_opts),
|
||||
true <- Filter.matches_any?(signed_event, filters) do
|
||||
{:ok, signed_event}
|
||||
else
|
||||
_other -> :error
|
||||
end
|
||||
end
|
||||
|
||||
defp maybe_wrap_event({:ok, event}), do: [event]
|
||||
defp maybe_wrap_event(_other), do: []
|
||||
|
||||
defp requests_invite?(filters) do
|
||||
Enum.any?(filters, fn filter ->
|
||||
case Map.get(filter, "kinds") do
|
||||
kinds when is_list(kinds) -> @invite_request_kind in kinds
|
||||
_other -> false
|
||||
end
|
||||
end)
|
||||
end
|
||||
|
||||
defp issue_claim(now, opts) do
|
||||
ttl_seconds =
|
||||
config(opts)
|
||||
|> Keyword.get(:invite_ttl_seconds, @default_invite_ttl_seconds)
|
||||
|> normalize_positive_integer(@default_invite_ttl_seconds)
|
||||
|
||||
identity_opts = identity_opts(opts)
|
||||
|
||||
token_event = %{
|
||||
"created_at" => now,
|
||||
"kind" => @claim_token_kind,
|
||||
"tags" => [["exp", Integer.to_string(now + ttl_seconds)]],
|
||||
"content" => Base.encode16(:crypto.strong_rand_bytes(16), case: :lower)
|
||||
}
|
||||
|
||||
with {:ok, signed_token} <- Identity.sign_event(token_event, identity_opts) do
|
||||
signed_token
|
||||
|> JSON.encode!()
|
||||
|> Base.url_encode64(padding: false)
|
||||
|> then(&{:ok, &1})
|
||||
end
|
||||
end
|
||||
|
||||
defp validate_claim_from_event(event) do
|
||||
claim =
|
||||
event
|
||||
|> Map.get("tags", [])
|
||||
|> Enum.find_value(fn
|
||||
["claim", value | _rest] when is_binary(value) and value != "" -> value
|
||||
_tag -> nil
|
||||
end)
|
||||
|
||||
case claim do
|
||||
nil -> {:error, "restricted: that is an invalid invite code."}
|
||||
value -> validate_claim(value)
|
||||
end
|
||||
end
|
||||
|
||||
defp validate_claim(claim) when is_binary(claim) do
|
||||
with {:ok, payload} <- Base.url_decode64(claim, padding: false),
|
||||
{:ok, decoded} <- JSON.decode(payload),
|
||||
:ok <- Protocol.validate_event(decoded),
|
||||
:ok <- validate_claim_token(decoded) do
|
||||
{:ok, decoded}
|
||||
else
|
||||
{:error, :expired_claim} ->
|
||||
{:error, "restricted: that invite code is expired."}
|
||||
|
||||
_other ->
|
||||
{:error, "restricted: that is an invalid invite code."}
|
||||
end
|
||||
end
|
||||
|
||||
defp validate_claim(_claim), do: {:error, "restricted: that is an invalid invite code."}
|
||||
|
||||
defp validate_claim_token(%{
|
||||
"kind" => @claim_token_kind,
|
||||
"pubkey" => pubkey,
|
||||
"tags" => tags
|
||||
}) do
|
||||
with {:ok, relay_pubkey} <- relay_pubkey(),
|
||||
true <- pubkey == relay_pubkey,
|
||||
{:ok, expires_at} <- fetch_expiration(tags),
|
||||
true <- expires_at >= System.system_time(:second) do
|
||||
:ok
|
||||
else
|
||||
false -> {:error, :invalid_claim}
|
||||
{:error, _reason} -> {:error, :invalid_claim}
|
||||
end
|
||||
end
|
||||
|
||||
defp validate_claim_token(_event), do: {:error, :invalid_claim}
|
||||
|
||||
defp fetch_expiration(tags) when is_list(tags) do
|
||||
case Enum.find(tags, &match?(["exp", _value | _rest], &1)) do
|
||||
["exp", value | _rest] ->
|
||||
parse_expiration(value)
|
||||
|
||||
_other ->
|
||||
{:error, :invalid_claim}
|
||||
end
|
||||
end
|
||||
|
||||
defp parse_expiration(value) when is_binary(value) do
|
||||
case Integer.parse(value) do
|
||||
{expires_at, ""} when expires_at > 0 -> validate_expiration(expires_at)
|
||||
_other -> {:error, :invalid_claim}
|
||||
end
|
||||
end
|
||||
|
||||
defp parse_expiration(_value), do: {:error, :invalid_claim}
|
||||
|
||||
defp validate_expiration(expires_at) when is_integer(expires_at) do
|
||||
if expires_at >= System.system_time(:second) do
|
||||
{:ok, expires_at}
|
||||
else
|
||||
{:error, :expired_claim}
|
||||
end
|
||||
end
|
||||
|
||||
defp validate_expiration(_expires_at), do: {:error, :expired_claim}
|
||||
|
||||
defp publish_membership_events(member_pubkey, action, opts) when is_binary(member_pubkey) do
|
||||
now = Keyword.get(opts, :now, System.system_time(:second))
|
||||
identity_opts = identity_opts(opts)
|
||||
context = Keyword.get(opts, :context, %RequestContext{})
|
||||
|
||||
action
|
||||
|> build_membership_delta_event(member_pubkey, now)
|
||||
|> sign_and_publish(context, identity_opts)
|
||||
|
||||
current_membership_snapshot(now)
|
||||
|> sign_and_publish(context, identity_opts)
|
||||
|
||||
:ok
|
||||
end
|
||||
|
||||
defp build_membership_delta_event(:add, member_pubkey, now) do
|
||||
%{
|
||||
"created_at" => now,
|
||||
"kind" => @add_user_kind,
|
||||
"tags" => [["-"], ["p", member_pubkey]],
|
||||
"content" => ""
|
||||
}
|
||||
end
|
||||
|
||||
defp build_membership_delta_event(:remove, member_pubkey, now) do
|
||||
%{
|
||||
"created_at" => now,
|
||||
"kind" => @remove_user_kind,
|
||||
"tags" => [["-"], ["p", member_pubkey]],
|
||||
"content" => ""
|
||||
}
|
||||
end
|
||||
|
||||
defp current_membership_snapshot(now) do
|
||||
tags =
|
||||
case Flow.list_memberships() do
|
||||
{:ok, memberships} ->
|
||||
[["-"] | Enum.map(memberships, &["member", &1.pubkey])]
|
||||
|
||||
{:error, _reason} ->
|
||||
[["-"]]
|
||||
end
|
||||
|
||||
%{
|
||||
"created_at" => now,
|
||||
"kind" => @membership_list_kind,
|
||||
"tags" => tags,
|
||||
"content" => ""
|
||||
}
|
||||
end
|
||||
|
||||
defp sign_and_publish(unsigned_event, context, identity_opts) do
|
||||
with {:ok, signed_event} <- Identity.sign_event(unsigned_event, identity_opts),
|
||||
{:ok, %{accepted: true}} <- Events.publish(signed_event, context: context) do
|
||||
:ok
|
||||
else
|
||||
_other -> :ok
|
||||
end
|
||||
end
|
||||
|
||||
defp membership_active?(nil), do: false
|
||||
defp membership_active?(%{role: "member"}), do: true
|
||||
defp membership_active?(_membership), do: false
|
||||
|
||||
defp relay_pubkey do
|
||||
case Identity.get() do
|
||||
{:ok, %{pubkey: pubkey}} when is_binary(pubkey) -> {:ok, pubkey}
|
||||
{:error, reason} -> {:error, reason}
|
||||
end
|
||||
end
|
||||
|
||||
defp relay_url(opts) do
|
||||
Keyword.get(opts, :relay_url, Application.get_env(:parrhesia, :relay_url))
|
||||
end
|
||||
|
||||
defp identity_opts(opts) do
|
||||
opts
|
||||
|> Keyword.take([:path, :private_key, :configured_private_key])
|
||||
end
|
||||
|
||||
defp config(opts) do
|
||||
case Keyword.get(opts, :config) do
|
||||
config when is_list(config) -> config
|
||||
_other -> Application.get_env(:parrhesia, :nip43, [])
|
||||
end
|
||||
end
|
||||
|
||||
defp normalize_positive_integer(value, _default) when is_integer(value) and value > 0, do: value
|
||||
defp normalize_positive_integer(_value, default), do: default
|
||||
end
|
||||
400
lib/parrhesia/nip66.ex
Normal file
400
lib/parrhesia/nip66.ex
Normal file
@@ -0,0 +1,400 @@
|
||||
defmodule Parrhesia.NIP66 do
|
||||
@moduledoc false
|
||||
|
||||
alias Parrhesia.API.Events
|
||||
alias Parrhesia.API.Identity
|
||||
alias Parrhesia.API.RequestContext
|
||||
alias Parrhesia.NIP66.Probe
|
||||
alias Parrhesia.Web.Listener
|
||||
alias Parrhesia.Web.RelayInfo
|
||||
|
||||
@default_publish_interval_seconds 900
|
||||
@default_timeout_ms 5_000
|
||||
@default_checks [:open, :read, :nip11]
|
||||
@allowed_requirement_keys MapSet.new(~w[auth writes pow payment])
|
||||
|
||||
@spec enabled?(keyword()) :: boolean()
|
||||
def enabled?(opts \\ []) do
|
||||
config = config(opts)
|
||||
config_enabled?(config) and active_targets(config, listeners(opts)) != []
|
||||
end
|
||||
|
||||
@spec publish_snapshot(keyword()) :: {:ok, [map()]}
|
||||
def publish_snapshot(opts \\ []) when is_list(opts) do
|
||||
config = config(opts)
|
||||
targets = active_targets(config, listeners(opts))
|
||||
|
||||
if config_enabled?(config) and targets != [] do
|
||||
probe_fun = Keyword.get(opts, :probe_fun, &Probe.probe/3)
|
||||
context = Keyword.get(opts, :context, %RequestContext{})
|
||||
now = Keyword.get(opts, :now, System.system_time(:second))
|
||||
identity_opts = identity_opts(opts)
|
||||
|
||||
events =
|
||||
maybe_publish_monitor_announcement(config, now, context, identity_opts)
|
||||
|> Kernel.++(
|
||||
publish_discovery_events(targets, config, probe_fun, now, context, identity_opts)
|
||||
)
|
||||
|
||||
{:ok, events}
|
||||
else
|
||||
{:ok, []}
|
||||
end
|
||||
end
|
||||
|
||||
@spec publish_interval_ms(keyword()) :: pos_integer()
|
||||
def publish_interval_ms(opts \\ []) when is_list(opts) do
|
||||
config = config(opts)
|
||||
|
||||
config
|
||||
|> Keyword.get(:publish_interval_seconds, @default_publish_interval_seconds)
|
||||
|> normalize_positive_integer(@default_publish_interval_seconds)
|
||||
|> Kernel.*(1_000)
|
||||
end
|
||||
|
||||
defp maybe_publish_monitor_announcement(config, now, context, identity_opts) do
|
||||
if Keyword.get(config, :publish_monitor_announcement?, true) do
|
||||
config
|
||||
|> build_monitor_announcement(now)
|
||||
|> sign_and_publish(context, identity_opts)
|
||||
|> maybe_wrap_event()
|
||||
else
|
||||
[]
|
||||
end
|
||||
end
|
||||
|
||||
defp publish_discovery_events(targets, config, probe_fun, now, context, identity_opts) do
|
||||
probe_opts = [
|
||||
timeout_ms:
|
||||
config
|
||||
|> Keyword.get(:timeout_ms, @default_timeout_ms)
|
||||
|> normalize_positive_integer(@default_timeout_ms),
|
||||
checks: normalize_checks(Keyword.get(config, :checks, @default_checks))
|
||||
]
|
||||
|
||||
Enum.flat_map(targets, fn target ->
|
||||
probe_result =
|
||||
case probe_fun.(target, probe_opts, identity_opts) do
|
||||
{:ok, result} when is_map(result) -> result
|
||||
_other -> %{checks: [], metrics: %{}, relay_info: nil, relay_info_body: nil}
|
||||
end
|
||||
|
||||
target
|
||||
|> build_discovery_event(now, probe_result, identity_opts)
|
||||
|> sign_and_publish(context, identity_opts)
|
||||
|> maybe_wrap_event()
|
||||
end)
|
||||
end
|
||||
|
||||
defp sign_and_publish(event, context, identity_opts) do
|
||||
with {:ok, signed_event} <- Identity.sign_event(event, identity_opts),
|
||||
{:ok, %{accepted: true}} <- Events.publish(signed_event, context: context) do
|
||||
{:ok, signed_event}
|
||||
else
|
||||
_other -> :error
|
||||
end
|
||||
end
|
||||
|
||||
defp maybe_wrap_event({:ok, event}), do: [event]
|
||||
defp maybe_wrap_event(_other), do: []
|
||||
|
||||
defp build_monitor_announcement(config, now) do
|
||||
checks = normalize_checks(Keyword.get(config, :checks, @default_checks))
|
||||
timeout_ms = Keyword.get(config, :timeout_ms, @default_timeout_ms)
|
||||
frequency = Keyword.get(config, :publish_interval_seconds, @default_publish_interval_seconds)
|
||||
|
||||
tags =
|
||||
[
|
||||
[
|
||||
"frequency",
|
||||
Integer.to_string(
|
||||
normalize_positive_integer(frequency, @default_publish_interval_seconds)
|
||||
)
|
||||
]
|
||||
] ++
|
||||
Enum.map(checks, fn check ->
|
||||
["timeout", Atom.to_string(check), Integer.to_string(timeout_ms)]
|
||||
end) ++
|
||||
Enum.map(checks, fn check -> ["c", Atom.to_string(check)] end) ++
|
||||
maybe_geohash_tag(config)
|
||||
|
||||
%{
|
||||
"created_at" => now,
|
||||
"kind" => 10_166,
|
||||
"tags" => tags,
|
||||
"content" => ""
|
||||
}
|
||||
end
|
||||
|
||||
defp build_discovery_event(target, now, probe_result, identity_opts) do
|
||||
relay_info = probe_result[:relay_info] || local_relay_info(target.listener, identity_opts)
|
||||
content = probe_result[:relay_info_body] || JSON.encode!(relay_info)
|
||||
|
||||
tags =
|
||||
[["d", target.relay_url]]
|
||||
|> append_network_tag(target)
|
||||
|> append_relay_type_tag(target)
|
||||
|> append_geohash_tag(target)
|
||||
|> append_topic_tags(target)
|
||||
|> Kernel.++(nip_tags(relay_info))
|
||||
|> Kernel.++(requirement_tags(relay_info))
|
||||
|> Kernel.++(rtt_tags(probe_result[:metrics] || %{}))
|
||||
|
||||
%{
|
||||
"created_at" => now,
|
||||
"kind" => 30_166,
|
||||
"tags" => tags,
|
||||
"content" => content
|
||||
}
|
||||
end
|
||||
|
||||
defp nip_tags(relay_info) do
|
||||
relay_info
|
||||
|> Map.get("supported_nips", [])
|
||||
|> Enum.map(&["N", Integer.to_string(&1)])
|
||||
end
|
||||
|
||||
defp requirement_tags(relay_info) do
|
||||
limitation = Map.get(relay_info, "limitation", %{})
|
||||
|
||||
[
|
||||
requirement_value("auth", Map.get(limitation, "auth_required", false)),
|
||||
requirement_value("writes", Map.get(limitation, "restricted_writes", false)),
|
||||
requirement_value("pow", Map.get(limitation, "min_pow_difficulty", 0) > 0),
|
||||
requirement_value("payment", Map.get(limitation, "payment_required", false))
|
||||
]
|
||||
|> Enum.filter(&MapSet.member?(@allowed_requirement_keys, String.trim_leading(&1, "!")))
|
||||
|> Enum.map(&["R", &1])
|
||||
end
|
||||
|
||||
defp requirement_value(name, true), do: name
|
||||
defp requirement_value(name, false), do: "!" <> name
|
||||
|
||||
defp rtt_tags(metrics) when is_map(metrics) do
|
||||
[]
|
||||
|> maybe_put_metric_tag("rtt-open", Map.get(metrics, :rtt_open_ms))
|
||||
|> maybe_put_metric_tag("rtt-read", Map.get(metrics, :rtt_read_ms))
|
||||
|> maybe_put_metric_tag("rtt-write", Map.get(metrics, :rtt_write_ms))
|
||||
end
|
||||
|
||||
defp append_network_tag(tags, target) do
|
||||
case target.network do
|
||||
nil -> tags
|
||||
value -> tags ++ [["n", value]]
|
||||
end
|
||||
end
|
||||
|
||||
defp append_relay_type_tag(tags, target) do
|
||||
case target.relay_type do
|
||||
nil -> tags
|
||||
value -> tags ++ [["T", value]]
|
||||
end
|
||||
end
|
||||
|
||||
defp append_geohash_tag(tags, target) do
|
||||
case target.geohash do
|
||||
nil -> tags
|
||||
value -> tags ++ [["g", value]]
|
||||
end
|
||||
end
|
||||
|
||||
defp append_topic_tags(tags, target) do
|
||||
tags ++ Enum.map(target.topics, &["t", &1])
|
||||
end
|
||||
|
||||
defp maybe_put_metric_tag(tags, _name, nil), do: tags
|
||||
|
||||
defp maybe_put_metric_tag(tags, name, value) when is_integer(value) and value >= 0 do
|
||||
tags ++ [[name, Integer.to_string(value)]]
|
||||
end
|
||||
|
||||
defp maybe_put_metric_tag(tags, _name, _value), do: tags
|
||||
|
||||
defp local_relay_info(listener, identity_opts) do
|
||||
relay_info = RelayInfo.document(listener)
|
||||
|
||||
case Identity.get(identity_opts) do
|
||||
{:ok, %{pubkey: pubkey}} ->
|
||||
relay_info
|
||||
|> Map.put("pubkey", pubkey)
|
||||
|> Map.put("self", pubkey)
|
||||
|
||||
{:error, _reason} ->
|
||||
relay_info
|
||||
end
|
||||
end
|
||||
|
||||
defp maybe_geohash_tag(config) do
|
||||
case fetch_value(config, :geohash) do
|
||||
value when is_binary(value) and value != "" -> [["g", value]]
|
||||
_other -> []
|
||||
end
|
||||
end
|
||||
|
||||
defp active_targets(config, listeners) do
|
||||
listeners_by_id = Map.new(listeners, &{&1.id, &1})
|
||||
|
||||
raw_targets =
|
||||
case Keyword.get(config, :targets, []) do
|
||||
[] -> [default_target()]
|
||||
targets when is_list(targets) -> targets
|
||||
_other -> []
|
||||
end
|
||||
|
||||
Enum.flat_map(raw_targets, fn raw_target ->
|
||||
case normalize_target(raw_target, listeners_by_id) do
|
||||
{:ok, target} -> [target]
|
||||
:error -> []
|
||||
end
|
||||
end)
|
||||
end
|
||||
|
||||
defp normalize_target(target, listeners_by_id) when is_map(target) or is_list(target) do
|
||||
listener_id = fetch_value(target, :listener) || :public
|
||||
relay_url = fetch_value(target, :relay_url) || Application.get_env(:parrhesia, :relay_url)
|
||||
|
||||
with %{} = listener <- Map.get(listeners_by_id, normalize_listener_id(listener_id)),
|
||||
true <- listener.enabled and Listener.feature_enabled?(listener, :nostr),
|
||||
{:ok, normalized_relay_url} <- normalize_relay_url(relay_url) do
|
||||
{:ok,
|
||||
%{
|
||||
listener: listener,
|
||||
relay_url: normalized_relay_url,
|
||||
network: normalize_network(fetch_value(target, :network), normalized_relay_url),
|
||||
relay_type: normalize_optional_string(fetch_value(target, :relay_type)),
|
||||
geohash: normalize_optional_string(fetch_value(target, :geohash)),
|
||||
topics: normalize_string_list(fetch_value(target, :topics))
|
||||
}}
|
||||
else
|
||||
_other -> :error
|
||||
end
|
||||
end
|
||||
|
||||
defp normalize_target(_target, _listeners_by_id), do: :error
|
||||
|
||||
defp normalize_relay_url(relay_url) when is_binary(relay_url) and relay_url != "" do
|
||||
case URI.parse(relay_url) do
|
||||
%URI{scheme: scheme, host: host} = uri
|
||||
when scheme in ["ws", "wss"] and is_binary(host) and host != "" ->
|
||||
normalized_uri = %URI{
|
||||
uri
|
||||
| scheme: String.downcase(scheme),
|
||||
host: String.downcase(host),
|
||||
path: normalize_path(uri.path),
|
||||
query: nil,
|
||||
fragment: nil,
|
||||
port: normalize_port(uri.port, scheme)
|
||||
}
|
||||
|
||||
{:ok, URI.to_string(normalized_uri)}
|
||||
|
||||
_other ->
|
||||
:error
|
||||
end
|
||||
end
|
||||
|
||||
defp normalize_relay_url(_relay_url), do: :error
|
||||
|
||||
defp normalize_path(nil), do: "/"
|
||||
defp normalize_path(""), do: "/"
|
||||
defp normalize_path(path), do: path
|
||||
|
||||
defp normalize_port(80, "ws"), do: nil
|
||||
defp normalize_port(443, "wss"), do: nil
|
||||
defp normalize_port(port, _scheme), do: port
|
||||
|
||||
defp normalize_network(value, _relay_url)
|
||||
when is_binary(value) and value in ["clearnet", "tor", "i2p", "loki"],
|
||||
do: value
|
||||
|
||||
defp normalize_network(_value, relay_url) do
|
||||
relay_url
|
||||
|> URI.parse()
|
||||
|> Map.get(:host)
|
||||
|> infer_network()
|
||||
end
|
||||
|
||||
defp infer_network(host) when is_binary(host) do
|
||||
cond do
|
||||
String.ends_with?(host, ".onion") -> "tor"
|
||||
String.ends_with?(host, ".i2p") -> "i2p"
|
||||
true -> "clearnet"
|
||||
end
|
||||
end
|
||||
|
||||
defp infer_network(_host), do: "clearnet"
|
||||
|
||||
defp normalize_checks(checks) when is_list(checks) do
|
||||
checks
|
||||
|> Enum.map(&normalize_check/1)
|
||||
|> Enum.reject(&is_nil/1)
|
||||
|> Enum.uniq()
|
||||
end
|
||||
|
||||
defp normalize_checks(_checks), do: @default_checks
|
||||
|
||||
defp normalize_check(:open), do: :open
|
||||
defp normalize_check("open"), do: :open
|
||||
defp normalize_check(:read), do: :read
|
||||
defp normalize_check("read"), do: :read
|
||||
defp normalize_check(:nip11), do: :nip11
|
||||
defp normalize_check("nip11"), do: :nip11
|
||||
defp normalize_check(_check), do: nil
|
||||
|
||||
defp listeners(opts) do
|
||||
case Keyword.get(opts, :listeners) do
|
||||
listeners when is_list(listeners) -> listeners
|
||||
_other -> Listener.all()
|
||||
end
|
||||
end
|
||||
|
||||
defp identity_opts(opts) do
|
||||
opts
|
||||
|> Keyword.take([:path, :private_key, :configured_private_key])
|
||||
end
|
||||
|
||||
defp config(opts) do
|
||||
case Keyword.get(opts, :config) do
|
||||
config when is_list(config) -> config
|
||||
_other -> Application.get_env(:parrhesia, :nip66, [])
|
||||
end
|
||||
end
|
||||
|
||||
defp config_enabled?(config), do: Keyword.get(config, :enabled, true)
|
||||
|
||||
defp default_target do
|
||||
%{listener: :public, relay_url: Application.get_env(:parrhesia, :relay_url)}
|
||||
end
|
||||
|
||||
defp normalize_listener_id(value) when is_atom(value), do: value
|
||||
|
||||
defp normalize_listener_id(value) when is_binary(value) do
|
||||
String.to_existing_atom(value)
|
||||
rescue
|
||||
ArgumentError -> :public
|
||||
end
|
||||
|
||||
defp normalize_listener_id(_value), do: :public
|
||||
|
||||
defp normalize_positive_integer(value, _default) when is_integer(value) and value > 0, do: value
|
||||
defp normalize_positive_integer(_value, default), do: default
|
||||
|
||||
defp normalize_optional_string(value) when is_binary(value) and value != "", do: value
|
||||
defp normalize_optional_string(_value), do: nil
|
||||
|
||||
defp normalize_string_list(values) when is_list(values) do
|
||||
Enum.filter(values, &(is_binary(&1) and &1 != ""))
|
||||
end
|
||||
|
||||
defp normalize_string_list(_values), do: []
|
||||
|
||||
defp fetch_value(map, key) when is_map(map) do
|
||||
Map.get(map, key) || Map.get(map, Atom.to_string(key))
|
||||
end
|
||||
|
||||
defp fetch_value(list, key) when is_list(list) do
|
||||
if Keyword.keyword?(list), do: Keyword.get(list, key), else: nil
|
||||
end
|
||||
|
||||
defp fetch_value(_container, _key), do: nil
|
||||
end
|
||||
218
lib/parrhesia/nip66/probe.ex
Normal file
218
lib/parrhesia/nip66/probe.ex
Normal file
@@ -0,0 +1,218 @@
|
||||
defmodule Parrhesia.NIP66.Probe do
|
||||
@moduledoc false
|
||||
|
||||
alias Parrhesia.HTTP
|
||||
alias Parrhesia.Sync.Transport.WebSockexClient
|
||||
|
||||
@type result :: %{
|
||||
checks: [atom()],
|
||||
metrics: map(),
|
||||
relay_info: map() | nil,
|
||||
relay_info_body: String.t() | nil
|
||||
}
|
||||
|
||||
@spec probe(map(), keyword(), keyword()) :: {:ok, result()}
|
||||
def probe(target, opts \\ [], publish_opts \\ [])
|
||||
|
||||
def probe(target, opts, _publish_opts) when is_map(target) and is_list(opts) do
|
||||
timeout_ms = Keyword.get(opts, :timeout_ms, 5_000)
|
||||
checks = normalize_checks(Keyword.get(opts, :checks, [:open, :read, :nip11]))
|
||||
|
||||
initial = %{checks: [], metrics: %{}, relay_info: nil, relay_info_body: nil}
|
||||
|
||||
result =
|
||||
Enum.reduce(checks, initial, fn check, acc ->
|
||||
merge_probe_result(acc, check_result(check, target, timeout_ms))
|
||||
end)
|
||||
|
||||
{:ok, result}
|
||||
end
|
||||
|
||||
def probe(_target, _opts, _publish_opts),
|
||||
do: {:ok, %{checks: [], metrics: %{}, relay_info: nil, relay_info_body: nil}}
|
||||
|
||||
defp merge_probe_result(acc, %{check: check, metric_key: metric_key, metric_value: metric_value}) do
|
||||
acc
|
||||
|> Map.update!(:checks, &[check | &1])
|
||||
|> Map.update!(:metrics, &Map.put(&1, metric_key, metric_value))
|
||||
end
|
||||
|
||||
defp merge_probe_result(acc, %{
|
||||
check: check,
|
||||
relay_info: relay_info,
|
||||
relay_info_body: relay_info_body
|
||||
}) do
|
||||
acc
|
||||
|> Map.update!(:checks, &[check | &1])
|
||||
|> Map.put(:relay_info, relay_info)
|
||||
|> Map.put(:relay_info_body, relay_info_body)
|
||||
end
|
||||
|
||||
defp merge_probe_result(acc, :skip), do: acc
|
||||
defp merge_probe_result(acc, {:error, _reason}), do: acc
|
||||
|
||||
defp check_result(:open, target, timeout_ms) do
|
||||
case measure_websocket_connect(Map.fetch!(target, :relay_url), timeout_ms) do
|
||||
{:ok, metric_value} ->
|
||||
%{check: :open, metric_key: :rtt_open_ms, metric_value: metric_value}
|
||||
|
||||
{:error, reason} ->
|
||||
{:error, reason}
|
||||
end
|
||||
end
|
||||
|
||||
defp check_result(:read, %{listener: listener} = target, timeout_ms) do
|
||||
if listener.auth.nip42_required do
|
||||
:skip
|
||||
else
|
||||
case measure_websocket_read(Map.fetch!(target, :relay_url), timeout_ms) do
|
||||
{:ok, metric_value} ->
|
||||
%{check: :read, metric_key: :rtt_read_ms, metric_value: metric_value}
|
||||
|
||||
{:error, reason} ->
|
||||
{:error, reason}
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
defp check_result(:nip11, target, timeout_ms) do
|
||||
case fetch_nip11(Map.fetch!(target, :relay_url), timeout_ms) do
|
||||
{:ok, relay_info, relay_info_body, _metric_value} ->
|
||||
%{check: :nip11, relay_info: relay_info, relay_info_body: relay_info_body}
|
||||
|
||||
{:error, reason} ->
|
||||
{:error, reason}
|
||||
end
|
||||
end
|
||||
|
||||
defp check_result(_check, _target, _timeout_ms), do: :skip
|
||||
|
||||
defp measure_websocket_connect(relay_url, timeout_ms) do
|
||||
with {:ok, websocket} <- connect(relay_url, timeout_ms),
|
||||
{:ok, metric_value} <- await_connected(websocket, timeout_ms) do
|
||||
:ok = WebSockexClient.close(websocket)
|
||||
{:ok, metric_value}
|
||||
end
|
||||
end
|
||||
|
||||
defp measure_websocket_read(relay_url, timeout_ms) do
|
||||
with {:ok, websocket} <- connect(relay_url, timeout_ms),
|
||||
{:ok, started_at} <- await_connected_started_at(websocket, timeout_ms),
|
||||
:ok <- WebSockexClient.send_json(websocket, ["COUNT", "nip66-probe", %{"kinds" => [1]}]),
|
||||
{:ok, metric_value} <- await_count_response(websocket, timeout_ms, started_at) do
|
||||
:ok = WebSockexClient.close(websocket)
|
||||
{:ok, metric_value}
|
||||
end
|
||||
end
|
||||
|
||||
defp connect(relay_url, timeout_ms) do
|
||||
server = %{url: relay_url, tls: tls_config(relay_url)}
|
||||
|
||||
WebSockexClient.connect(self(), server, websocket_opts: [timeout: timeout_ms, protocols: nil])
|
||||
end
|
||||
|
||||
defp await_connected(websocket, timeout_ms) do
|
||||
with {:ok, started_at} <- await_connected_started_at(websocket, timeout_ms) do
|
||||
{:ok, monotonic_duration_ms(started_at)}
|
||||
end
|
||||
end
|
||||
|
||||
defp await_connected_started_at(websocket, timeout_ms) do
|
||||
started_at = System.monotonic_time()
|
||||
|
||||
receive do
|
||||
{:sync_transport, ^websocket, :connected, _metadata} -> {:ok, started_at}
|
||||
{:sync_transport, ^websocket, :disconnected, reason} -> {:error, reason}
|
||||
after
|
||||
timeout_ms -> {:error, :timeout}
|
||||
end
|
||||
end
|
||||
|
||||
defp await_count_response(websocket, timeout_ms, started_at) do
|
||||
receive do
|
||||
{:sync_transport, ^websocket, :frame, ["COUNT", "nip66-probe", _payload]} ->
|
||||
{:ok, monotonic_duration_ms(started_at)}
|
||||
|
||||
{:sync_transport, ^websocket, :frame, ["CLOSED", "nip66-probe", _message]} ->
|
||||
{:error, :closed}
|
||||
|
||||
{:sync_transport, ^websocket, :disconnected, reason} ->
|
||||
{:error, reason}
|
||||
after
|
||||
timeout_ms -> {:error, :timeout}
|
||||
end
|
||||
end
|
||||
|
||||
defp fetch_nip11(relay_url, timeout_ms) do
|
||||
started_at = System.monotonic_time()
|
||||
|
||||
case HTTP.get(
|
||||
url: relay_info_url(relay_url),
|
||||
headers: [{"accept", "application/nostr+json"}],
|
||||
decode_body: false,
|
||||
connect_options: [timeout: timeout_ms],
|
||||
receive_timeout: timeout_ms
|
||||
) do
|
||||
{:ok, %Req.Response{status: 200, body: body}} when is_binary(body) ->
|
||||
case JSON.decode(body) do
|
||||
{:ok, relay_info} when is_map(relay_info) ->
|
||||
{:ok, relay_info, body, monotonic_duration_ms(started_at)}
|
||||
|
||||
{:error, reason} ->
|
||||
{:error, reason}
|
||||
|
||||
_other ->
|
||||
{:error, :invalid_relay_info}
|
||||
end
|
||||
|
||||
{:ok, %Req.Response{status: status}} ->
|
||||
{:error, {:relay_info_request_failed, status}}
|
||||
|
||||
{:error, reason} ->
|
||||
{:error, reason}
|
||||
end
|
||||
end
|
||||
|
||||
defp relay_info_url(relay_url) do
|
||||
relay_url
|
||||
|> URI.parse()
|
||||
|> Map.update!(:scheme, fn
|
||||
"wss" -> "https"
|
||||
"ws" -> "http"
|
||||
end)
|
||||
|> URI.to_string()
|
||||
end
|
||||
|
||||
defp tls_config(relay_url) do
|
||||
case URI.parse(relay_url) do
|
||||
%URI{scheme: "wss", host: host} when is_binary(host) and host != "" ->
|
||||
%{mode: :required, hostname: host, pins: []}
|
||||
|
||||
_other ->
|
||||
%{mode: :disabled}
|
||||
end
|
||||
end
|
||||
|
||||
defp normalize_checks(checks) when is_list(checks) do
|
||||
checks
|
||||
|> Enum.map(&normalize_check/1)
|
||||
|> Enum.reject(&is_nil/1)
|
||||
|> Enum.uniq()
|
||||
end
|
||||
|
||||
defp normalize_checks(_checks), do: []
|
||||
|
||||
defp normalize_check(:open), do: :open
|
||||
defp normalize_check("open"), do: :open
|
||||
defp normalize_check(:read), do: :read
|
||||
defp normalize_check("read"), do: :read
|
||||
defp normalize_check(:nip11), do: :nip11
|
||||
defp normalize_check("nip11"), do: :nip11
|
||||
defp normalize_check(_check), do: nil
|
||||
|
||||
defp monotonic_duration_ms(started_at) do
|
||||
System.monotonic_time()
|
||||
|> Kernel.-(started_at)
|
||||
|> System.convert_time_unit(:native, :millisecond)
|
||||
end
|
||||
end
|
||||
68
lib/parrhesia/policy/connection_policy.ex
Normal file
68
lib/parrhesia/policy/connection_policy.ex
Normal file
@@ -0,0 +1,68 @@
|
||||
defmodule Parrhesia.Policy.ConnectionPolicy do
|
||||
@moduledoc """
|
||||
Connection/session-level policy checks shared by websocket and management entrypoints.
|
||||
"""
|
||||
|
||||
alias Parrhesia.Storage
|
||||
|
||||
@spec authorize_remote_ip(tuple() | String.t() | nil) :: :ok | {:error, :ip_blocked}
|
||||
def authorize_remote_ip(remote_ip) do
|
||||
case normalize_ip(remote_ip) do
|
||||
nil ->
|
||||
:ok
|
||||
|
||||
normalized_ip ->
|
||||
case Storage.moderation().ip_blocked?(%{}, normalized_ip) do
|
||||
{:ok, true} -> {:error, :ip_blocked}
|
||||
_other -> :ok
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
@spec authorize_authenticated_pubkey(String.t()) :: :ok | {:error, :pubkey_not_allowed}
|
||||
def authorize_authenticated_pubkey(pubkey) when is_binary(pubkey) do
|
||||
if allowlist_active?() do
|
||||
case Storage.moderation().pubkey_allowed?(%{}, pubkey) do
|
||||
{:ok, true} -> :ok
|
||||
_other -> {:error, :pubkey_not_allowed}
|
||||
end
|
||||
else
|
||||
:ok
|
||||
end
|
||||
end
|
||||
|
||||
@spec authorize_authenticated_pubkeys(MapSet.t(String.t())) ::
|
||||
:ok | {:error, :auth_required | :pubkey_not_allowed}
|
||||
def authorize_authenticated_pubkeys(authenticated_pubkeys) do
|
||||
if allowlist_active?() do
|
||||
cond do
|
||||
MapSet.size(authenticated_pubkeys) == 0 ->
|
||||
{:error, :auth_required}
|
||||
|
||||
Enum.any?(authenticated_pubkeys, &(authorize_authenticated_pubkey(&1) == :ok)) ->
|
||||
:ok
|
||||
|
||||
true ->
|
||||
{:error, :pubkey_not_allowed}
|
||||
end
|
||||
else
|
||||
:ok
|
||||
end
|
||||
end
|
||||
|
||||
defp allowlist_active? do
|
||||
case Storage.moderation().has_allowed_pubkeys?(%{}) do
|
||||
{:ok, true} -> true
|
||||
_other -> false
|
||||
end
|
||||
end
|
||||
|
||||
defp normalize_ip(nil), do: nil
|
||||
defp normalize_ip({_, _, _, _} = remote_ip), do: :inet.ntoa(remote_ip) |> to_string()
|
||||
|
||||
defp normalize_ip({_, _, _, _, _, _, _, _} = remote_ip),
|
||||
do: :inet.ntoa(remote_ip) |> to_string()
|
||||
|
||||
defp normalize_ip(remote_ip) when is_binary(remote_ip), do: remote_ip
|
||||
defp normalize_ip(_remote_ip), do: nil
|
||||
end
|
||||
@@ -3,11 +3,17 @@ defmodule Parrhesia.Policy.EventPolicy do
|
||||
Write/read policy checks for relay operations.
|
||||
"""
|
||||
|
||||
alias Parrhesia.API.ACL
|
||||
alias Parrhesia.API.RequestContext
|
||||
alias Parrhesia.Policy.ConnectionPolicy
|
||||
alias Parrhesia.Storage
|
||||
|
||||
@type policy_error ::
|
||||
:auth_required
|
||||
| :pubkey_not_allowed
|
||||
| :restricted_giftwrap
|
||||
| :sync_read_not_allowed
|
||||
| :sync_write_not_allowed
|
||||
| :marmot_group_h_tag_required
|
||||
| :marmot_group_h_values_exceeded
|
||||
| :marmot_group_filter_window_too_wide
|
||||
@@ -33,15 +39,31 @@ defmodule Parrhesia.Policy.EventPolicy do
|
||||
|
||||
@spec authorize_read([map()], MapSet.t(String.t())) :: :ok | {:error, policy_error()}
|
||||
def authorize_read(filters, authenticated_pubkeys) when is_list(filters) do
|
||||
authorize_read(filters, authenticated_pubkeys, request_context(authenticated_pubkeys))
|
||||
end
|
||||
|
||||
@spec authorize_read([map()], MapSet.t(String.t()), RequestContext.t()) ::
|
||||
:ok | {:error, policy_error()}
|
||||
def authorize_read(filters, authenticated_pubkeys, %RequestContext{} = context)
|
||||
when is_list(filters) do
|
||||
auth_required? = config_bool([:policies, :auth_required_for_reads], false)
|
||||
|
||||
cond do
|
||||
match?(
|
||||
{:error, _reason},
|
||||
ConnectionPolicy.authorize_authenticated_pubkeys(authenticated_pubkeys)
|
||||
) ->
|
||||
ConnectionPolicy.authorize_authenticated_pubkeys(authenticated_pubkeys)
|
||||
|
||||
auth_required? and MapSet.size(authenticated_pubkeys) == 0 ->
|
||||
{:error, :auth_required}
|
||||
|
||||
giftwrap_restricted?(filters, authenticated_pubkeys) ->
|
||||
{:error, :restricted_giftwrap}
|
||||
|
||||
match?({:error, _reason}, authorize_sync_reads(filters, context)) ->
|
||||
authorize_sync_reads(filters, context)
|
||||
|
||||
true ->
|
||||
enforce_marmot_group_read_guardrails(filters)
|
||||
end
|
||||
@@ -49,8 +71,17 @@ defmodule Parrhesia.Policy.EventPolicy do
|
||||
|
||||
@spec authorize_write(map(), MapSet.t(String.t())) :: :ok | {:error, policy_error()}
|
||||
def authorize_write(event, authenticated_pubkeys) when is_map(event) do
|
||||
authorize_write(event, authenticated_pubkeys, request_context(authenticated_pubkeys))
|
||||
end
|
||||
|
||||
@spec authorize_write(map(), MapSet.t(String.t()), RequestContext.t()) ::
|
||||
:ok | {:error, policy_error()}
|
||||
def authorize_write(event, authenticated_pubkeys, %RequestContext{} = context)
|
||||
when is_map(event) do
|
||||
checks = [
|
||||
fn -> ConnectionPolicy.authorize_authenticated_pubkeys(authenticated_pubkeys) end,
|
||||
fn -> maybe_require_auth_for_write(authenticated_pubkeys) end,
|
||||
fn -> authorize_sync_write(event, context) end,
|
||||
fn -> reject_if_pubkey_banned(event) end,
|
||||
fn -> reject_if_event_banned(event) end,
|
||||
fn -> enforce_pow(event) end,
|
||||
@@ -69,10 +100,17 @@ defmodule Parrhesia.Policy.EventPolicy do
|
||||
|
||||
@spec error_message(policy_error()) :: String.t()
|
||||
def error_message(:auth_required), do: "auth-required: authentication required"
|
||||
def error_message(:pubkey_not_allowed), do: "restricted: authenticated pubkey is not allowed"
|
||||
|
||||
def error_message(:restricted_giftwrap),
|
||||
do: "restricted: giftwrap access requires recipient authentication"
|
||||
|
||||
def error_message(:sync_read_not_allowed),
|
||||
do: "restricted: sync read not allowed for authenticated pubkey"
|
||||
|
||||
def error_message(:sync_write_not_allowed),
|
||||
do: "restricted: sync write not allowed for authenticated pubkey"
|
||||
|
||||
def error_message(:marmot_group_h_tag_required),
|
||||
do: "restricted: kind 445 queries must include a #h tag"
|
||||
|
||||
@@ -143,6 +181,19 @@ defmodule Parrhesia.Policy.EventPolicy do
|
||||
end
|
||||
end
|
||||
|
||||
defp authorize_sync_reads(filters, %RequestContext{} = context) do
|
||||
Enum.reduce_while(filters, :ok, fn filter, :ok ->
|
||||
case ACL.check(:sync_read, filter, context: context) do
|
||||
:ok -> {:cont, :ok}
|
||||
{:error, reason} -> {:halt, {:error, reason}}
|
||||
end
|
||||
end)
|
||||
end
|
||||
|
||||
defp authorize_sync_write(event, %RequestContext{} = context) do
|
||||
ACL.check(:sync_write, event, context: context)
|
||||
end
|
||||
|
||||
defp giftwrap_restricted?(filters, authenticated_pubkeys) do
|
||||
if MapSet.size(authenticated_pubkeys) == 0 do
|
||||
any_filter_targets_giftwrap?(filters)
|
||||
@@ -635,19 +686,29 @@ defmodule Parrhesia.Policy.EventPolicy do
|
||||
_tag -> false
|
||||
end)
|
||||
|
||||
if protected? do
|
||||
pubkey = Map.get(event, "pubkey")
|
||||
cond do
|
||||
not protected? ->
|
||||
:ok
|
||||
|
||||
cond do
|
||||
MapSet.size(authenticated_pubkeys) == 0 -> {:error, :protected_event_requires_auth}
|
||||
MapSet.member?(authenticated_pubkeys, pubkey) -> :ok
|
||||
true -> {:error, :protected_event_pubkey_mismatch}
|
||||
end
|
||||
else
|
||||
:ok
|
||||
nip43_relay_access_kind?(Map.get(event, "kind")) ->
|
||||
:ok
|
||||
|
||||
true ->
|
||||
pubkey = Map.get(event, "pubkey")
|
||||
|
||||
cond do
|
||||
MapSet.size(authenticated_pubkeys) == 0 -> {:error, :protected_event_requires_auth}
|
||||
MapSet.member?(authenticated_pubkeys, pubkey) -> :ok
|
||||
true -> {:error, :protected_event_pubkey_mismatch}
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
defp nip43_relay_access_kind?(kind) when kind in [8_000, 8_001, 13_534, 28_934, 28_935, 28_936],
|
||||
do: true
|
||||
|
||||
defp nip43_relay_access_kind?(_kind), do: false
|
||||
|
||||
defp config_bool([scope, key], default) do
|
||||
case Application.get_env(:parrhesia, scope, []) |> Keyword.get(key, default) do
|
||||
true -> true
|
||||
@@ -672,4 +733,8 @@ defmodule Parrhesia.Policy.EventPolicy do
|
||||
default
|
||||
end
|
||||
end
|
||||
|
||||
defp request_context(authenticated_pubkeys) do
|
||||
%RequestContext{authenticated_pubkeys: authenticated_pubkeys}
|
||||
end
|
||||
end
|
||||
|
||||
73
lib/parrhesia/postgres_repos.ex
Normal file
73
lib/parrhesia/postgres_repos.ex
Normal file
@@ -0,0 +1,73 @@
|
||||
defmodule Parrhesia.PostgresRepos do
|
||||
@moduledoc false
|
||||
|
||||
alias Parrhesia.Config
|
||||
alias Parrhesia.ReadRepo
|
||||
alias Parrhesia.Repo
|
||||
|
||||
@spec write() :: module()
|
||||
def write, do: Repo
|
||||
|
||||
@spec read() :: module()
|
||||
def read do
|
||||
if separate_read_pool_enabled?() and is_pid(Process.whereis(ReadRepo)) do
|
||||
ReadRepo
|
||||
else
|
||||
Repo
|
||||
end
|
||||
end
|
||||
|
||||
@spec started_repos() :: [module()]
|
||||
def started_repos do
|
||||
cond do
|
||||
not postgres_enabled?() ->
|
||||
[]
|
||||
|
||||
separate_read_pool_enabled?() ->
|
||||
[Repo, ReadRepo]
|
||||
|
||||
true ->
|
||||
[Repo]
|
||||
end
|
||||
end
|
||||
|
||||
@spec postgres_enabled?() :: boolean()
|
||||
def postgres_enabled? do
|
||||
case Process.whereis(Config) do
|
||||
pid when is_pid(pid) ->
|
||||
Config.get([:storage, :backend], storage_backend_default()) == :postgres
|
||||
|
||||
nil ->
|
||||
storage_backend_default() == :postgres
|
||||
end
|
||||
end
|
||||
|
||||
@spec separate_read_pool_enabled?() :: boolean()
|
||||
def separate_read_pool_enabled? do
|
||||
case {postgres_enabled?(), Process.whereis(Config)} do
|
||||
{false, _pid} ->
|
||||
false
|
||||
|
||||
{true, pid} when is_pid(pid) ->
|
||||
Config.get(
|
||||
[:database, :separate_read_pool?],
|
||||
application_default(:separate_read_pool?, false)
|
||||
)
|
||||
|
||||
{true, nil} ->
|
||||
application_default(:separate_read_pool?, false)
|
||||
end
|
||||
end
|
||||
|
||||
defp application_default(key, default) do
|
||||
:parrhesia
|
||||
|> Application.get_env(:database, [])
|
||||
|> Keyword.get(key, default)
|
||||
end
|
||||
|
||||
defp storage_backend_default do
|
||||
:parrhesia
|
||||
|> Application.get_env(:storage, [])
|
||||
|> Keyword.get(:backend, :postgres)
|
||||
end
|
||||
end
|
||||
@@ -1 +1,4 @@
|
||||
Postgrex.Types.define(Parrhesia.PostgresTypes, [], json: JSON)
|
||||
Postgrex.Types.define(Parrhesia.PostgresTypes, [],
|
||||
json: JSON,
|
||||
moduledoc: "Custom Postgrex type definitions used by `Parrhesia.Repo` and `Parrhesia.ReadRepo`."
|
||||
)
|
||||
|
||||
@@ -1,6 +1,15 @@
|
||||
defmodule Parrhesia.Protocol do
|
||||
@moduledoc """
|
||||
Nostr protocol message decode/encode helpers.
|
||||
|
||||
This module is transport-oriented: it turns websocket payloads into structured tuples and
|
||||
back again.
|
||||
|
||||
For programmatic API calls inside the application, prefer the `Parrhesia.API.*` modules.
|
||||
In particular:
|
||||
|
||||
- `validate_event/1` returns user-facing error strings
|
||||
- `Parrhesia.API.Auth.validate_event/1` returns machine-friendly validator atoms
|
||||
"""
|
||||
|
||||
alias Parrhesia.Protocol.EventValidator
|
||||
@@ -14,8 +23,8 @@ defmodule Parrhesia.Protocol do
|
||||
| {:close, String.t()}
|
||||
| {:auth, event()}
|
||||
| {:count, String.t(), [filter()], map()}
|
||||
| {:neg_open, String.t(), map()}
|
||||
| {:neg_msg, String.t(), map()}
|
||||
| {:neg_open, String.t(), filter(), binary()}
|
||||
| {:neg_msg, String.t(), binary()}
|
||||
| {:neg_close, String.t()}
|
||||
|
||||
@type relay_message ::
|
||||
@@ -26,7 +35,8 @@ defmodule Parrhesia.Protocol do
|
||||
| {:event, String.t(), event()}
|
||||
| {:auth, String.t()}
|
||||
| {:count, String.t(), map()}
|
||||
| {:neg_msg, String.t(), map()}
|
||||
| {:neg_msg, String.t(), String.t()}
|
||||
| {:neg_err, String.t(), String.t()}
|
||||
|
||||
@type decode_error ::
|
||||
:invalid_json
|
||||
@@ -40,6 +50,9 @@ defmodule Parrhesia.Protocol do
|
||||
|
||||
@count_options_keys MapSet.new(["hll", "approximate"])
|
||||
|
||||
@doc """
|
||||
Decodes a client websocket payload into a structured protocol tuple.
|
||||
"""
|
||||
@spec decode_client(binary()) :: {:ok, client_message()} | {:error, decode_error()}
|
||||
def decode_client(payload) when is_binary(payload) do
|
||||
with {:ok, decoded} <- decode_json(payload) do
|
||||
@@ -47,6 +60,9 @@ defmodule Parrhesia.Protocol do
|
||||
end
|
||||
end
|
||||
|
||||
@doc """
|
||||
Validates an event and returns relay-facing error strings.
|
||||
"""
|
||||
@spec validate_event(event()) :: :ok | {:error, String.t()}
|
||||
def validate_event(event) do
|
||||
case EventValidator.validate(event) do
|
||||
@@ -55,6 +71,9 @@ defmodule Parrhesia.Protocol do
|
||||
end
|
||||
end
|
||||
|
||||
@doc """
|
||||
Encodes a relay message tuple into the JSON frame sent to clients.
|
||||
"""
|
||||
@spec encode_relay(relay_message()) :: binary()
|
||||
def encode_relay(message) do
|
||||
message
|
||||
@@ -62,6 +81,9 @@ defmodule Parrhesia.Protocol do
|
||||
|> JSON.encode!()
|
||||
end
|
||||
|
||||
@doc """
|
||||
Converts a decode error into the relay notice string that should be sent to a client.
|
||||
"""
|
||||
@spec decode_error_notice(decode_error()) :: String.t()
|
||||
def decode_error_notice(reason) do
|
||||
case reason do
|
||||
@@ -122,21 +144,25 @@ defmodule Parrhesia.Protocol do
|
||||
|
||||
defp decode_message(["AUTH", _invalid]), do: {:error, :invalid_auth}
|
||||
|
||||
defp decode_message(["NEG-OPEN", subscription_id, payload])
|
||||
when is_binary(subscription_id) and is_map(payload) do
|
||||
if valid_subscription_id?(subscription_id) do
|
||||
{:ok, {:neg_open, subscription_id, payload}}
|
||||
defp decode_message(["NEG-OPEN", subscription_id, filter, initial_message])
|
||||
when is_binary(subscription_id) and is_map(filter) and is_binary(initial_message) do
|
||||
with true <- valid_subscription_id?(subscription_id),
|
||||
{:ok, decoded_message} <- decode_negentropy_hex(initial_message) do
|
||||
{:ok, {:neg_open, subscription_id, filter, decoded_message}}
|
||||
else
|
||||
{:error, :invalid_subscription_id}
|
||||
false -> {:error, :invalid_subscription_id}
|
||||
{:error, _reason} -> {:error, :invalid_negentropy}
|
||||
end
|
||||
end
|
||||
|
||||
defp decode_message(["NEG-MSG", subscription_id, payload])
|
||||
when is_binary(subscription_id) and is_map(payload) do
|
||||
if valid_subscription_id?(subscription_id) do
|
||||
{:ok, {:neg_msg, subscription_id, payload}}
|
||||
when is_binary(subscription_id) and is_binary(payload) do
|
||||
with true <- valid_subscription_id?(subscription_id),
|
||||
{:ok, decoded_payload} <- decode_negentropy_hex(payload) do
|
||||
{:ok, {:neg_msg, subscription_id, decoded_payload}}
|
||||
else
|
||||
{:error, :invalid_subscription_id}
|
||||
false -> {:error, :invalid_subscription_id}
|
||||
{:error, _reason} -> {:error, :invalid_negentropy}
|
||||
end
|
||||
end
|
||||
|
||||
@@ -215,7 +241,19 @@ defmodule Parrhesia.Protocol do
|
||||
defp relay_frame({:neg_msg, subscription_id, payload}),
|
||||
do: ["NEG-MSG", subscription_id, payload]
|
||||
|
||||
defp relay_frame({:neg_err, subscription_id, reason}),
|
||||
do: ["NEG-ERR", subscription_id, reason]
|
||||
|
||||
defp valid_subscription_id?(subscription_id) do
|
||||
subscription_id != "" and String.length(subscription_id) <= 64
|
||||
end
|
||||
|
||||
defp decode_negentropy_hex(payload) when is_binary(payload) and payload != "" do
|
||||
case Base.decode16(payload, case: :mixed) do
|
||||
{:ok, decoded} when decoded != <<>> -> {:ok, decoded}
|
||||
_other -> {:error, :invalid_negentropy}
|
||||
end
|
||||
end
|
||||
|
||||
defp decode_negentropy_hex(_payload), do: {:error, :invalid_negentropy}
|
||||
end
|
||||
|
||||
@@ -6,6 +6,14 @@ defmodule Parrhesia.Protocol.EventValidator do
|
||||
@required_fields ~w[id pubkey created_at kind tags content sig]
|
||||
@max_kind 65_535
|
||||
@default_max_event_future_skew_seconds 900
|
||||
@default_max_tags_per_event 256
|
||||
@default_nip43_request_max_age_seconds 300
|
||||
@verify_event_signatures_locked Application.compile_env(
|
||||
:parrhesia,
|
||||
[:features, :verify_event_signatures_locked?],
|
||||
false
|
||||
)
|
||||
|
||||
@supported_mls_ciphersuites MapSet.new(~w[0x0001 0x0002 0x0003 0x0004 0x0005 0x0006 0x0007])
|
||||
@required_mls_extensions MapSet.new(["0xf2ee", "0x000a"])
|
||||
@supported_keypackage_ref_sizes [32, 48, 64]
|
||||
@@ -17,6 +25,7 @@ defmodule Parrhesia.Protocol.EventValidator do
|
||||
| :invalid_created_at
|
||||
| :created_at_too_far_in_future
|
||||
| :invalid_kind
|
||||
| :too_many_tags
|
||||
| :invalid_tags
|
||||
| :invalid_content
|
||||
| :invalid_sig
|
||||
@@ -44,6 +53,22 @@ defmodule Parrhesia.Protocol.EventValidator do
|
||||
| :missing_marmot_group_tag
|
||||
| :invalid_marmot_group_tag
|
||||
| :invalid_marmot_group_content
|
||||
| :missing_nip66_d_tag
|
||||
| :invalid_nip66_d_tag
|
||||
| :invalid_nip66_discovery_tag
|
||||
| :missing_nip66_frequency_tag
|
||||
| :invalid_nip66_frequency_tag
|
||||
| :invalid_nip66_timeout_tag
|
||||
| :invalid_nip66_check_tag
|
||||
| :missing_nip43_protected_tag
|
||||
| :missing_nip43_claim_tag
|
||||
| :invalid_nip43_claim_tag
|
||||
| :missing_nip43_member_tag
|
||||
| :invalid_nip43_member_tag
|
||||
| :missing_nip43_pubkey_tag
|
||||
| :invalid_nip43_pubkey_tag
|
||||
| :stale_nip43_join_request
|
||||
| :stale_nip43_leave_request
|
||||
|
||||
@spec validate(map()) :: :ok | {:error, error_reason()}
|
||||
def validate(event) when is_map(event) do
|
||||
@@ -87,6 +112,7 @@ defmodule Parrhesia.Protocol.EventValidator do
|
||||
created_at_too_far_in_future:
|
||||
"invalid: event creation date is too far off from the current time",
|
||||
invalid_kind: "invalid: kind must be an integer between 0 and 65535",
|
||||
too_many_tags: "invalid: event tags exceed configured limit",
|
||||
invalid_tags: "invalid: tags must be an array of non-empty string arrays",
|
||||
invalid_content: "invalid: content must be a string",
|
||||
invalid_sig: "invalid: sig must be 64-byte lowercase hex",
|
||||
@@ -127,7 +153,35 @@ defmodule Parrhesia.Protocol.EventValidator do
|
||||
missing_marmot_group_tag: "invalid: kind 445 must include at least one h tag with a group id",
|
||||
invalid_marmot_group_tag:
|
||||
"invalid: kind 445 h tags must contain 32-byte lowercase hex group ids",
|
||||
invalid_marmot_group_content: "invalid: kind 445 content must be non-empty base64"
|
||||
invalid_marmot_group_content: "invalid: kind 445 content must be non-empty base64",
|
||||
missing_nip66_d_tag:
|
||||
"invalid: kind 30166 must include a single [\"d\", <normalized ws/wss url or relay pubkey>] tag",
|
||||
invalid_nip66_d_tag:
|
||||
"invalid: kind 30166 must include a single [\"d\", <normalized ws/wss url or relay pubkey>] tag",
|
||||
invalid_nip66_discovery_tag: "invalid: kind 30166 includes malformed NIP-66 discovery tags",
|
||||
missing_nip66_frequency_tag:
|
||||
"invalid: kind 10166 must include a single [\"frequency\", <seconds>] tag",
|
||||
invalid_nip66_frequency_tag:
|
||||
"invalid: kind 10166 must include a single [\"frequency\", <seconds>] tag",
|
||||
invalid_nip66_timeout_tag:
|
||||
"invalid: kind 10166 timeout tags must be [\"timeout\", <check>, <ms>]",
|
||||
invalid_nip66_check_tag: "invalid: kind 10166 c tags must contain lowercase check names",
|
||||
missing_nip43_protected_tag:
|
||||
"invalid: NIP-43 events must include a NIP-70 protected [\"-\"] tag",
|
||||
missing_nip43_claim_tag:
|
||||
"invalid: kinds 28934 and 28935 must include a single [\"claim\", <invite code>] tag",
|
||||
invalid_nip43_claim_tag:
|
||||
"invalid: kinds 28934 and 28935 must include a single [\"claim\", <invite code>] tag",
|
||||
missing_nip43_member_tag:
|
||||
"invalid: kind 13534 must include at least one [\"member\", <hex pubkey>] tag",
|
||||
invalid_nip43_member_tag:
|
||||
"invalid: kind 13534 member tags must contain lowercase hex pubkeys",
|
||||
missing_nip43_pubkey_tag:
|
||||
"invalid: kinds 8000 and 8001 must include a single [\"p\", <hex pubkey>] tag",
|
||||
invalid_nip43_pubkey_tag:
|
||||
"invalid: kinds 8000 and 8001 must include a single [\"p\", <hex pubkey>] tag",
|
||||
stale_nip43_join_request: "invalid: kind 28934 created_at must be recent",
|
||||
stale_nip43_leave_request: "invalid: kind 28936 created_at must be recent"
|
||||
}
|
||||
|
||||
@spec error_message(error_reason()) :: String.t()
|
||||
@@ -169,16 +223,25 @@ defmodule Parrhesia.Protocol.EventValidator do
|
||||
defp validate_kind(kind) when is_integer(kind) and kind >= 0 and kind <= @max_kind, do: :ok
|
||||
defp validate_kind(_kind), do: {:error, :invalid_kind}
|
||||
|
||||
defp validate_tags(tags) when is_list(tags) do
|
||||
if Enum.all?(tags, &valid_tag?/1) do
|
||||
:ok
|
||||
else
|
||||
{:error, :invalid_tags}
|
||||
end
|
||||
end
|
||||
defp validate_tags(tags) when is_list(tags), do: validate_tags(tags, max_tags_per_event(), 0)
|
||||
|
||||
defp validate_tags(_tags), do: {:error, :invalid_tags}
|
||||
|
||||
defp validate_tags([], _max_tags, _count), do: :ok
|
||||
|
||||
defp validate_tags([tag | rest], max_tags, count) do
|
||||
cond do
|
||||
count + 1 > max_tags ->
|
||||
{:error, :too_many_tags}
|
||||
|
||||
valid_tag?(tag) ->
|
||||
validate_tags(rest, max_tags, count + 1)
|
||||
|
||||
true ->
|
||||
{:error, :invalid_tags}
|
||||
end
|
||||
end
|
||||
|
||||
defp validate_content(content) when is_binary(content), do: :ok
|
||||
defp validate_content(_content), do: {:error, :invalid_content}
|
||||
|
||||
@@ -197,7 +260,7 @@ defmodule Parrhesia.Protocol.EventValidator do
|
||||
end
|
||||
|
||||
defp validate_signature(event) do
|
||||
if verify_event_signatures?() do
|
||||
if @verify_event_signatures_locked or verify_event_signatures?() do
|
||||
verify_signature(event)
|
||||
else
|
||||
:ok
|
||||
@@ -240,6 +303,27 @@ defmodule Parrhesia.Protocol.EventValidator do
|
||||
defp validate_kind_specific(%{"kind" => 1059} = event),
|
||||
do: validate_giftwrap_event(event)
|
||||
|
||||
defp validate_kind_specific(%{"kind" => 30_166} = event),
|
||||
do: validate_nip66_discovery_event(event)
|
||||
|
||||
defp validate_kind_specific(%{"kind" => 10_166} = event),
|
||||
do: validate_nip66_monitor_announcement(event)
|
||||
|
||||
defp validate_kind_specific(%{"kind" => 13_534} = event),
|
||||
do: validate_nip43_membership_list(event)
|
||||
|
||||
defp validate_kind_specific(%{"kind" => kind} = event) when kind in [8_000, 8_001],
|
||||
do: validate_nip43_membership_delta(event)
|
||||
|
||||
defp validate_kind_specific(%{"kind" => 28_934} = event),
|
||||
do: validate_nip43_join_request(event)
|
||||
|
||||
defp validate_kind_specific(%{"kind" => 28_935} = event),
|
||||
do: validate_nip43_invite_response(event)
|
||||
|
||||
defp validate_kind_specific(%{"kind" => 28_936} = event),
|
||||
do: validate_nip43_leave_request(event)
|
||||
|
||||
defp validate_kind_specific(_event), do: :ok
|
||||
|
||||
defp validate_marmot_keypackage_event(event) do
|
||||
@@ -313,6 +397,184 @@ defmodule Parrhesia.Protocol.EventValidator do
|
||||
end
|
||||
end
|
||||
|
||||
defp validate_nip66_discovery_event(event) do
|
||||
tags = Map.get(event, "tags", [])
|
||||
|
||||
with :ok <- validate_nip66_d_tag(tags),
|
||||
:ok <-
|
||||
validate_optional_single_string_tag_with_predicate(
|
||||
tags,
|
||||
"n",
|
||||
:invalid_nip66_discovery_tag,
|
||||
&(&1 in ["clearnet", "tor", "i2p", "loki"])
|
||||
),
|
||||
:ok <-
|
||||
validate_optional_single_string_tag_with_predicate(
|
||||
tags,
|
||||
"T",
|
||||
:invalid_nip66_discovery_tag,
|
||||
&valid_pascal_case?/1
|
||||
),
|
||||
:ok <-
|
||||
validate_optional_single_string_tag_with_predicate(
|
||||
tags,
|
||||
"g",
|
||||
:invalid_nip66_discovery_tag,
|
||||
&non_empty_string?/1
|
||||
),
|
||||
:ok <-
|
||||
validate_optional_repeated_tag(
|
||||
tags,
|
||||
"N",
|
||||
&positive_integer_string?/1,
|
||||
:invalid_nip66_discovery_tag
|
||||
),
|
||||
:ok <-
|
||||
validate_optional_repeated_tag(
|
||||
tags,
|
||||
"R",
|
||||
&valid_nip66_requirement_value?/1,
|
||||
:invalid_nip66_discovery_tag
|
||||
),
|
||||
:ok <-
|
||||
validate_optional_repeated_tag(
|
||||
tags,
|
||||
"k",
|
||||
&valid_nip66_kind_value?/1,
|
||||
:invalid_nip66_discovery_tag
|
||||
),
|
||||
:ok <-
|
||||
validate_optional_repeated_tag(
|
||||
tags,
|
||||
"t",
|
||||
&non_empty_string?/1,
|
||||
:invalid_nip66_discovery_tag
|
||||
),
|
||||
:ok <-
|
||||
validate_optional_single_string_tag_with_predicate(
|
||||
tags,
|
||||
"rtt-open",
|
||||
:invalid_nip66_discovery_tag,
|
||||
&positive_integer_string?/1
|
||||
),
|
||||
:ok <-
|
||||
validate_optional_single_string_tag_with_predicate(
|
||||
tags,
|
||||
"rtt-read",
|
||||
:invalid_nip66_discovery_tag,
|
||||
&positive_integer_string?/1
|
||||
) do
|
||||
validate_optional_single_string_tag_with_predicate(
|
||||
tags,
|
||||
"rtt-write",
|
||||
:invalid_nip66_discovery_tag,
|
||||
&positive_integer_string?/1
|
||||
)
|
||||
end
|
||||
end
|
||||
|
||||
defp validate_nip66_monitor_announcement(event) do
|
||||
tags = Map.get(event, "tags", [])
|
||||
|
||||
with :ok <-
|
||||
validate_single_string_tag_with_predicate(
|
||||
tags,
|
||||
"frequency",
|
||||
:missing_nip66_frequency_tag,
|
||||
:invalid_nip66_frequency_tag,
|
||||
&positive_integer_string?/1
|
||||
),
|
||||
:ok <- validate_optional_repeated_timeout_tags(tags),
|
||||
:ok <-
|
||||
validate_optional_repeated_tag(
|
||||
tags,
|
||||
"c",
|
||||
&valid_nip66_check_name?/1,
|
||||
:invalid_nip66_check_tag
|
||||
) do
|
||||
validate_optional_single_string_tag_with_predicate(
|
||||
tags,
|
||||
"g",
|
||||
:invalid_nip66_discovery_tag,
|
||||
&non_empty_string?/1
|
||||
)
|
||||
end
|
||||
end
|
||||
|
||||
defp validate_nip43_membership_list(event) do
|
||||
tags = Map.get(event, "tags", [])
|
||||
|
||||
case validate_protected_tag(tags) do
|
||||
:ok -> validate_optional_repeated_pubkey_tag(tags, "member", :invalid_nip43_member_tag)
|
||||
{:error, _reason} = error -> error
|
||||
end
|
||||
end
|
||||
|
||||
defp validate_nip43_membership_delta(event) do
|
||||
tags = Map.get(event, "tags", [])
|
||||
|
||||
case validate_protected_tag(tags) do
|
||||
:ok ->
|
||||
validate_single_pubkey_tag(
|
||||
tags,
|
||||
"p",
|
||||
:missing_nip43_pubkey_tag,
|
||||
:invalid_nip43_pubkey_tag
|
||||
)
|
||||
|
||||
{:error, _reason} = error ->
|
||||
error
|
||||
end
|
||||
end
|
||||
|
||||
defp validate_nip43_join_request(event) do
|
||||
tags = Map.get(event, "tags", [])
|
||||
|
||||
case validate_protected_tag(tags) do
|
||||
:ok ->
|
||||
with :ok <-
|
||||
validate_single_string_tag_with_predicate(
|
||||
tags,
|
||||
"claim",
|
||||
:missing_nip43_claim_tag,
|
||||
:invalid_nip43_claim_tag,
|
||||
&non_empty_string?/1
|
||||
) do
|
||||
validate_recent_created_at(event, :stale_nip43_join_request)
|
||||
end
|
||||
|
||||
{:error, _reason} = error ->
|
||||
error
|
||||
end
|
||||
end
|
||||
|
||||
defp validate_nip43_invite_response(event) do
|
||||
tags = Map.get(event, "tags", [])
|
||||
|
||||
case validate_protected_tag(tags) do
|
||||
:ok ->
|
||||
validate_single_string_tag_with_predicate(
|
||||
tags,
|
||||
"claim",
|
||||
:missing_nip43_claim_tag,
|
||||
:invalid_nip43_claim_tag,
|
||||
&non_empty_string?/1
|
||||
)
|
||||
|
||||
{:error, _reason} = error ->
|
||||
error
|
||||
end
|
||||
end
|
||||
|
||||
defp validate_nip43_leave_request(event) do
|
||||
tags = Map.get(event, "tags", [])
|
||||
|
||||
case validate_protected_tag(tags) do
|
||||
:ok -> validate_recent_created_at(event, :stale_nip43_leave_request)
|
||||
{:error, _reason} = error -> error
|
||||
end
|
||||
end
|
||||
|
||||
defp validate_non_empty_base64_content(event),
|
||||
do: validate_non_empty_base64_content(event, :invalid_marmot_keypackage_content)
|
||||
|
||||
@@ -394,6 +656,25 @@ defmodule Parrhesia.Protocol.EventValidator do
|
||||
end
|
||||
end
|
||||
|
||||
defp validate_optional_single_string_tag_with_predicate(
|
||||
tags,
|
||||
tag_name,
|
||||
invalid_error,
|
||||
predicate
|
||||
)
|
||||
when is_function(predicate, 1) do
|
||||
case Enum.filter(tags, &match_tag_name?(&1, tag_name)) do
|
||||
[] ->
|
||||
:ok
|
||||
|
||||
[[^tag_name, value]] ->
|
||||
if predicate.(value), do: :ok, else: {:error, invalid_error}
|
||||
|
||||
_other ->
|
||||
{:error, invalid_error}
|
||||
end
|
||||
end
|
||||
|
||||
defp validate_mls_extensions_tag(tags) do
|
||||
with {:ok, ["mls_extensions" | extensions]} <-
|
||||
fetch_single_tag(tags, "mls_extensions", :missing_marmot_extensions_tag),
|
||||
@@ -432,6 +713,89 @@ defmodule Parrhesia.Protocol.EventValidator do
|
||||
end
|
||||
end
|
||||
|
||||
defp validate_nip66_d_tag(tags) do
|
||||
with {:ok, ["d", value]} <- fetch_single_tag(tags, "d", :missing_nip66_d_tag),
|
||||
true <- valid_websocket_url?(value) or lowercase_hex?(value, 32) do
|
||||
:ok
|
||||
else
|
||||
{:ok, _invalid_tag_shape} -> {:error, :invalid_nip66_d_tag}
|
||||
false -> {:error, :invalid_nip66_d_tag}
|
||||
{:error, _reason} = error -> error
|
||||
end
|
||||
end
|
||||
|
||||
defp validate_optional_repeated_timeout_tags(tags) do
|
||||
timeout_tags = Enum.filter(tags, &match_tag_name?(&1, "timeout"))
|
||||
|
||||
if Enum.all?(timeout_tags, &valid_nip66_timeout_tag?/1) do
|
||||
:ok
|
||||
else
|
||||
{:error, :invalid_nip66_timeout_tag}
|
||||
end
|
||||
end
|
||||
|
||||
defp validate_optional_repeated_tag(tags, tag_name, predicate, invalid_error)
|
||||
when is_function(predicate, 1) do
|
||||
tags
|
||||
|> Enum.filter(&match_tag_name?(&1, tag_name))
|
||||
|> Enum.reduce_while(:ok, fn
|
||||
[^tag_name, value], :ok ->
|
||||
if predicate.(value), do: {:cont, :ok}, else: {:halt, {:error, invalid_error}}
|
||||
|
||||
_other, :ok ->
|
||||
{:halt, {:error, invalid_error}}
|
||||
end)
|
||||
end
|
||||
|
||||
defp validate_protected_tag(tags) do
|
||||
if Enum.any?(tags, &match?(["-"], &1)) do
|
||||
:ok
|
||||
else
|
||||
{:error, :missing_nip43_protected_tag}
|
||||
end
|
||||
end
|
||||
|
||||
defp validate_single_pubkey_tag(tags, tag_name, missing_error, invalid_error) do
|
||||
case fetch_single_tag(tags, tag_name, missing_error) do
|
||||
{:ok, [^tag_name, value]} ->
|
||||
if lowercase_hex?(value, 32) do
|
||||
:ok
|
||||
else
|
||||
{:error, invalid_error}
|
||||
end
|
||||
|
||||
{:ok, _invalid_tag_shape} ->
|
||||
{:error, invalid_error}
|
||||
|
||||
{:error, _reason} = error ->
|
||||
error
|
||||
end
|
||||
end
|
||||
|
||||
defp validate_optional_repeated_pubkey_tag(tags, tag_name, invalid_error) do
|
||||
matching_tags = Enum.filter(tags, &match_tag_name?(&1, tag_name))
|
||||
|
||||
if Enum.all?(matching_tags, fn
|
||||
[^tag_name, pubkey | _rest] -> lowercase_hex?(pubkey, 32)
|
||||
_other -> false
|
||||
end) do
|
||||
:ok
|
||||
else
|
||||
{:error, invalid_error}
|
||||
end
|
||||
end
|
||||
|
||||
defp validate_recent_created_at(%{"created_at" => created_at}, error_reason)
|
||||
when is_integer(created_at) do
|
||||
if created_at >= System.system_time(:second) - nip43_request_max_age_seconds() do
|
||||
:ok
|
||||
else
|
||||
{:error, error_reason}
|
||||
end
|
||||
end
|
||||
|
||||
defp validate_recent_created_at(_event, error_reason), do: {:error, error_reason}
|
||||
|
||||
defp fetch_single_tag(tags, tag_name, missing_error) do
|
||||
case Enum.filter(tags, &match_tag_name?(&1, tag_name)) do
|
||||
[tag] -> {:ok, tag}
|
||||
@@ -488,6 +852,49 @@ defmodule Parrhesia.Protocol.EventValidator do
|
||||
|
||||
defp valid_websocket_url?(_url), do: false
|
||||
|
||||
defp valid_nip66_timeout_tag?(["timeout", milliseconds]),
|
||||
do: positive_integer_string?(milliseconds)
|
||||
|
||||
defp valid_nip66_timeout_tag?(["timeout", check, milliseconds]) do
|
||||
valid_nip66_check_name?(check) and positive_integer_string?(milliseconds)
|
||||
end
|
||||
|
||||
defp valid_nip66_timeout_tag?(_tag), do: false
|
||||
|
||||
defp valid_nip66_requirement_value?(value) when is_binary(value) do
|
||||
normalized = String.trim_leading(value, "!")
|
||||
normalized in ["auth", "writes", "pow", "payment"]
|
||||
end
|
||||
|
||||
defp valid_nip66_requirement_value?(_value), do: false
|
||||
|
||||
defp valid_nip66_kind_value?(<<"!", rest::binary>>), do: positive_integer_string?(rest)
|
||||
defp valid_nip66_kind_value?(value), do: positive_integer_string?(value)
|
||||
|
||||
defp valid_nip66_check_name?(value) when is_binary(value) do
|
||||
String.match?(value, ~r/^[a-z0-9-]+$/)
|
||||
end
|
||||
|
||||
defp valid_nip66_check_name?(_value), do: false
|
||||
|
||||
defp valid_pascal_case?(value) when is_binary(value) do
|
||||
String.match?(value, ~r/^[A-Z][A-Za-z0-9]*$/)
|
||||
end
|
||||
|
||||
defp valid_pascal_case?(_value), do: false
|
||||
|
||||
defp positive_integer_string?(value) when is_binary(value) do
|
||||
case Integer.parse(value) do
|
||||
{integer, ""} when integer >= 0 -> true
|
||||
_other -> false
|
||||
end
|
||||
end
|
||||
|
||||
defp positive_integer_string?(_value), do: false
|
||||
|
||||
defp non_empty_string?(value) when is_binary(value), do: value != ""
|
||||
defp non_empty_string?(_value), do: false
|
||||
|
||||
defp valid_keypackage_ref?(value) when is_binary(value) do
|
||||
Enum.any?(@supported_keypackage_ref_sizes, &lowercase_hex?(value, &1))
|
||||
end
|
||||
@@ -510,4 +917,17 @@ defmodule Parrhesia.Protocol.EventValidator do
|
||||
|> Application.get_env(:limits, [])
|
||||
|> Keyword.get(:max_event_future_skew_seconds, @default_max_event_future_skew_seconds)
|
||||
end
|
||||
|
||||
defp max_tags_per_event do
|
||||
case Application.get_env(:parrhesia, :limits, []) |> Keyword.get(:max_tags_per_event) do
|
||||
value when is_integer(value) and value > 0 -> value
|
||||
_other -> @default_max_tags_per_event
|
||||
end
|
||||
end
|
||||
|
||||
defp nip43_request_max_age_seconds do
|
||||
:parrhesia
|
||||
|> Application.get_env(:nip43, [])
|
||||
|> Keyword.get(:request_max_age_seconds, @default_nip43_request_max_age_seconds)
|
||||
end
|
||||
end
|
||||
|
||||
@@ -5,6 +5,7 @@ defmodule Parrhesia.Protocol.Filter do
|
||||
|
||||
@max_kind 65_535
|
||||
@default_max_filters_per_req 16
|
||||
@default_max_tag_values_per_filter 128
|
||||
|
||||
@type validation_error ::
|
||||
:invalid_filters
|
||||
@@ -19,6 +20,7 @@ defmodule Parrhesia.Protocol.Filter do
|
||||
| :invalid_until
|
||||
| :invalid_limit
|
||||
| :invalid_search
|
||||
| :too_many_tag_values
|
||||
| :invalid_tag_filter
|
||||
|
||||
@allowed_keys MapSet.new(["ids", "authors", "kinds", "since", "until", "limit", "search"])
|
||||
@@ -36,6 +38,7 @@ defmodule Parrhesia.Protocol.Filter do
|
||||
invalid_until: "invalid: until must be a non-negative integer",
|
||||
invalid_limit: "invalid: limit must be a positive integer",
|
||||
invalid_search: "invalid: search must be a non-empty string",
|
||||
too_many_tag_values: "invalid: tag filters exceed configured value limit",
|
||||
invalid_tag_filter:
|
||||
"invalid: tag filters must use #<single-letter> with non-empty string arrays"
|
||||
}
|
||||
@@ -178,19 +181,33 @@ defmodule Parrhesia.Protocol.Filter do
|
||||
filter
|
||||
|> Enum.filter(fn {key, _value} -> valid_tag_filter_key?(key) end)
|
||||
|> Enum.reduce_while(:ok, fn {_key, values}, :ok ->
|
||||
if valid_tag_filter_values?(values) do
|
||||
{:cont, :ok}
|
||||
else
|
||||
{:halt, {:error, :invalid_tag_filter}}
|
||||
case validate_tag_filter_values(values) do
|
||||
:ok -> {:cont, :ok}
|
||||
{:error, reason} -> {:halt, {:error, reason}}
|
||||
end
|
||||
end)
|
||||
end
|
||||
|
||||
defp valid_tag_filter_values?(values) when is_list(values) do
|
||||
values != [] and Enum.all?(values, &is_binary/1)
|
||||
end
|
||||
defp validate_tag_filter_values(values) when is_list(values),
|
||||
do: validate_tag_filter_values(values, max_tag_values_per_filter(), 0)
|
||||
|
||||
defp valid_tag_filter_values?(_values), do: false
|
||||
defp validate_tag_filter_values(_values), do: {:error, :invalid_tag_filter}
|
||||
|
||||
defp validate_tag_filter_values([], _max_values, 0), do: {:error, :invalid_tag_filter}
|
||||
defp validate_tag_filter_values([], _max_values, _count), do: :ok
|
||||
|
||||
defp validate_tag_filter_values([value | rest], max_values, count) do
|
||||
cond do
|
||||
count + 1 > max_values ->
|
||||
{:error, :too_many_tag_values}
|
||||
|
||||
is_binary(value) ->
|
||||
validate_tag_filter_values(rest, max_values, count + 1)
|
||||
|
||||
true ->
|
||||
{:error, :invalid_tag_filter}
|
||||
end
|
||||
end
|
||||
|
||||
defp filter_predicates(event, filter) do
|
||||
[
|
||||
@@ -278,4 +295,12 @@ defmodule Parrhesia.Protocol.Filter do
|
||||
|> Application.get_env(:limits, [])
|
||||
|> Keyword.get(:max_filters_per_req, @default_max_filters_per_req)
|
||||
end
|
||||
|
||||
defp max_tag_values_per_filter do
|
||||
case Application.get_env(:parrhesia, :limits, [])
|
||||
|> Keyword.get(:max_tag_values_per_filter) do
|
||||
value when is_integer(value) and value > 0 -> value
|
||||
_other -> @default_max_tag_values_per_filter
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
9
lib/parrhesia/read_repo.ex
Normal file
9
lib/parrhesia/read_repo.ex
Normal file
@@ -0,0 +1,9 @@
|
||||
defmodule Parrhesia.ReadRepo do
|
||||
@moduledoc """
|
||||
PostgreSQL repository dedicated to read-heavy workloads when a separate read pool is enabled.
|
||||
"""
|
||||
|
||||
use Ecto.Repo,
|
||||
otp_app: :parrhesia,
|
||||
adapter: Ecto.Adapters.Postgres
|
||||
end
|
||||
46
lib/parrhesia/release.ex
Normal file
46
lib/parrhesia/release.ex
Normal file
@@ -0,0 +1,46 @@
|
||||
defmodule Parrhesia.Release do
|
||||
@moduledoc """
|
||||
Helpers for running Ecto tasks from a production release.
|
||||
|
||||
Intended for use from a release `eval` command where Mix is not available:
|
||||
|
||||
bin/parrhesia eval "Parrhesia.Release.migrate()"
|
||||
bin/parrhesia eval "Parrhesia.Release.rollback(Parrhesia.Repo, 20260101000000)"
|
||||
"""
|
||||
|
||||
@app :parrhesia
|
||||
|
||||
@doc """
|
||||
Runs all pending Ecto migrations for every configured repo.
|
||||
"""
|
||||
def migrate do
|
||||
load_app()
|
||||
|
||||
for repo <- repos() do
|
||||
{:ok, _, _} =
|
||||
Ecto.Migrator.with_repo(repo, fn repo ->
|
||||
Ecto.Migrator.run(repo, :up, all: true)
|
||||
end)
|
||||
end
|
||||
end
|
||||
|
||||
@doc """
|
||||
Rolls back the given `repo` to the specified migration `version`.
|
||||
"""
|
||||
def rollback(repo, version) when is_atom(repo) and is_integer(version) do
|
||||
load_app()
|
||||
|
||||
{:ok, _, _} =
|
||||
Ecto.Migrator.with_repo(repo, fn repo ->
|
||||
Ecto.Migrator.run(repo, :down, to: version)
|
||||
end)
|
||||
end
|
||||
|
||||
defp load_app do
|
||||
Application.load(@app)
|
||||
end
|
||||
|
||||
defp repos do
|
||||
Application.fetch_env!(@app, :ecto_repos)
|
||||
end
|
||||
end
|
||||
@@ -1,6 +1,9 @@
|
||||
defmodule Parrhesia.Repo do
|
||||
@moduledoc """
|
||||
PostgreSQL repository for storage adapter persistence.
|
||||
PostgreSQL repository for write traffic and storage adapter persistence.
|
||||
|
||||
Separated from `Parrhesia.ReadRepo` so that ingest writes and read-heavy
|
||||
queries use independent connection pools.
|
||||
"""
|
||||
|
||||
use Ecto.Repo,
|
||||
|
||||
52
lib/parrhesia/runtime.ex
Normal file
52
lib/parrhesia/runtime.ex
Normal file
@@ -0,0 +1,52 @@
|
||||
defmodule Parrhesia.Runtime do
|
||||
@moduledoc """
|
||||
Top-level Parrhesia supervisor.
|
||||
|
||||
In normal standalone use, the `:parrhesia` application starts this supervisor automatically.
|
||||
Host applications can also embed it directly under their own supervision tree:
|
||||
|
||||
children = [
|
||||
{Parrhesia.Runtime, name: Parrhesia.Supervisor}
|
||||
]
|
||||
|
||||
Parrhesia currently assumes a single runtime per BEAM node and uses globally registered
|
||||
process names for core services.
|
||||
"""
|
||||
|
||||
use Supervisor
|
||||
|
||||
@doc """
|
||||
Starts the Parrhesia runtime supervisor.
|
||||
|
||||
Accepts a `:name` option (defaults to `Parrhesia.Supervisor`).
|
||||
"""
|
||||
def start_link(opts \\ []) do
|
||||
name = Keyword.get(opts, :name, Parrhesia.Supervisor)
|
||||
Supervisor.start_link(__MODULE__, opts, name: name)
|
||||
end
|
||||
|
||||
@impl true
|
||||
def init(_opts) do
|
||||
Supervisor.init(children(), strategy: :one_for_one)
|
||||
end
|
||||
|
||||
@doc """
|
||||
Returns the list of child specifications started by the runtime supervisor.
|
||||
"""
|
||||
def children do
|
||||
[
|
||||
Parrhesia.Telemetry,
|
||||
Parrhesia.ConnectionStats,
|
||||
Parrhesia.Config,
|
||||
Parrhesia.Web.EventIngestLimiter,
|
||||
Parrhesia.Web.IPEventIngestLimiter,
|
||||
Parrhesia.Storage.Supervisor,
|
||||
Parrhesia.Subscriptions.Supervisor,
|
||||
Parrhesia.Auth.Supervisor,
|
||||
Parrhesia.Sync.Supervisor,
|
||||
Parrhesia.Policy.Supervisor,
|
||||
Parrhesia.Web.Endpoint,
|
||||
Parrhesia.Tasks.Supervisor
|
||||
]
|
||||
end
|
||||
end
|
||||
@@ -4,24 +4,46 @@ defmodule Parrhesia.Storage do
|
||||
|
||||
Domain/runtime code should resolve behavior modules through this module instead of
|
||||
depending on concrete adapter implementations directly.
|
||||
|
||||
Each accessor validates that the configured module is loaded and declares the expected
|
||||
behaviour before returning it.
|
||||
"""
|
||||
|
||||
@default_modules [
|
||||
events: Parrhesia.Storage.Adapters.Postgres.Events,
|
||||
acl: Parrhesia.Storage.Adapters.Postgres.ACL,
|
||||
moderation: Parrhesia.Storage.Adapters.Postgres.Moderation,
|
||||
groups: Parrhesia.Storage.Adapters.Postgres.Groups,
|
||||
admin: Parrhesia.Storage.Adapters.Postgres.Admin
|
||||
]
|
||||
|
||||
@doc """
|
||||
Returns the configured events storage module.
|
||||
"""
|
||||
@spec events() :: module()
|
||||
def events, do: fetch_module!(:events, Parrhesia.Storage.Events)
|
||||
|
||||
@doc """
|
||||
Returns the configured moderation storage module.
|
||||
"""
|
||||
@spec moderation() :: module()
|
||||
def moderation, do: fetch_module!(:moderation, Parrhesia.Storage.Moderation)
|
||||
|
||||
@doc """
|
||||
Returns the configured ACL storage module.
|
||||
"""
|
||||
@spec acl() :: module()
|
||||
def acl, do: fetch_module!(:acl, Parrhesia.Storage.ACL)
|
||||
|
||||
@doc """
|
||||
Returns the configured groups storage module.
|
||||
"""
|
||||
@spec groups() :: module()
|
||||
def groups, do: fetch_module!(:groups, Parrhesia.Storage.Groups)
|
||||
|
||||
@doc """
|
||||
Returns the configured admin storage module.
|
||||
"""
|
||||
@spec admin() :: module()
|
||||
def admin, do: fetch_module!(:admin, Parrhesia.Storage.Admin)
|
||||
|
||||
|
||||
14
lib/parrhesia/storage/acl.ex
Normal file
14
lib/parrhesia/storage/acl.ex
Normal file
@@ -0,0 +1,14 @@
|
||||
defmodule Parrhesia.Storage.ACL do
|
||||
@moduledoc """
|
||||
Storage callbacks for persisted ACL rules.
|
||||
"""
|
||||
|
||||
@type context :: map()
|
||||
@type rule :: map()
|
||||
@type opts :: keyword()
|
||||
@type reason :: term()
|
||||
|
||||
@callback put_rule(context(), rule()) :: {:ok, rule()} | {:error, reason()}
|
||||
@callback delete_rule(context(), map()) :: :ok | {:error, reason()}
|
||||
@callback list_rules(context(), opts()) :: {:ok, [rule()]} | {:error, reason()}
|
||||
end
|
||||
157
lib/parrhesia/storage/adapters/memory/acl.ex
Normal file
157
lib/parrhesia/storage/adapters/memory/acl.ex
Normal file
@@ -0,0 +1,157 @@
|
||||
defmodule Parrhesia.Storage.Adapters.Memory.ACL do
|
||||
@moduledoc """
|
||||
In-memory prototype adapter for `Parrhesia.Storage.ACL`.
|
||||
"""
|
||||
|
||||
alias Parrhesia.Storage.Adapters.Memory.Store
|
||||
|
||||
@behaviour Parrhesia.Storage.ACL
|
||||
|
||||
@impl true
|
||||
def put_rule(_context, rule) when is_map(rule) do
|
||||
with {:ok, normalized_rule} <- normalize_rule(rule) do
|
||||
Store.get_and_update(fn state -> put_rule_in_state(state, normalized_rule) end)
|
||||
end
|
||||
end
|
||||
|
||||
def put_rule(_context, _rule), do: {:error, :invalid_acl_rule}
|
||||
|
||||
@impl true
|
||||
def delete_rule(_context, selector) when is_map(selector) do
|
||||
case normalize_delete_selector(selector) do
|
||||
{:ok, {:id, id}} ->
|
||||
Store.update(fn state ->
|
||||
%{state | acl_rules: Enum.reject(state.acl_rules, &(&1.id == id))}
|
||||
end)
|
||||
|
||||
:ok
|
||||
|
||||
{:ok, {:exact, rule}} ->
|
||||
Store.update(fn state ->
|
||||
%{state | acl_rules: Enum.reject(state.acl_rules, &same_rule?(&1, rule))}
|
||||
end)
|
||||
|
||||
:ok
|
||||
|
||||
{:error, reason} ->
|
||||
{:error, reason}
|
||||
end
|
||||
end
|
||||
|
||||
def delete_rule(_context, _selector), do: {:error, :invalid_acl_rule}
|
||||
|
||||
@impl true
|
||||
def list_rules(_context, opts) when is_list(opts) do
|
||||
rules =
|
||||
Store.get(fn state -> Enum.reverse(state.acl_rules) end)
|
||||
|> Enum.filter(fn rule ->
|
||||
matches_principal_type?(rule, Keyword.get(opts, :principal_type)) and
|
||||
matches_principal?(rule, Keyword.get(opts, :principal)) and
|
||||
matches_capability?(rule, Keyword.get(opts, :capability))
|
||||
end)
|
||||
|
||||
{:ok, rules}
|
||||
end
|
||||
|
||||
def list_rules(_context, _opts), do: {:error, :invalid_opts}
|
||||
|
||||
defp put_rule_in_state(state, normalized_rule) do
|
||||
case Enum.find(state.acl_rules, &same_rule?(&1, normalized_rule)) do
|
||||
nil ->
|
||||
next_id = state.next_acl_rule_id
|
||||
persisted_rule = Map.put(normalized_rule, :id, next_id)
|
||||
|
||||
{{:ok, persisted_rule},
|
||||
%{
|
||||
state
|
||||
| acl_rules: [persisted_rule | state.acl_rules],
|
||||
next_acl_rule_id: next_id + 1
|
||||
}}
|
||||
|
||||
existing_rule ->
|
||||
{{:ok, existing_rule}, state}
|
||||
end
|
||||
end
|
||||
|
||||
defp matches_principal_type?(_rule, nil), do: true
|
||||
defp matches_principal_type?(rule, principal_type), do: rule.principal_type == principal_type
|
||||
|
||||
defp matches_principal?(_rule, nil), do: true
|
||||
defp matches_principal?(rule, principal), do: rule.principal == principal
|
||||
|
||||
defp matches_capability?(_rule, nil), do: true
|
||||
defp matches_capability?(rule, capability), do: rule.capability == capability
|
||||
|
||||
defp same_rule?(left, right) do
|
||||
left.principal_type == right.principal_type and
|
||||
left.principal == right.principal and
|
||||
left.capability == right.capability and
|
||||
left.match == right.match
|
||||
end
|
||||
|
||||
defp normalize_delete_selector(%{"id" => id}), do: normalize_delete_selector(%{id: id})
|
||||
|
||||
defp normalize_delete_selector(%{id: id}) when is_integer(id) and id > 0,
|
||||
do: {:ok, {:id, id}}
|
||||
|
||||
defp normalize_delete_selector(selector) do
|
||||
case normalize_rule(selector) do
|
||||
{:ok, rule} -> {:ok, {:exact, rule}}
|
||||
{:error, reason} -> {:error, reason}
|
||||
end
|
||||
end
|
||||
|
||||
defp normalize_rule(rule) when is_map(rule) do
|
||||
with {:ok, principal_type} <- normalize_principal_type(fetch(rule, :principal_type)),
|
||||
{:ok, principal} <- normalize_principal(fetch(rule, :principal)),
|
||||
{:ok, capability} <- normalize_capability(fetch(rule, :capability)),
|
||||
{:ok, match} <- normalize_match(fetch(rule, :match)) do
|
||||
{:ok,
|
||||
%{
|
||||
principal_type: principal_type,
|
||||
principal: principal,
|
||||
capability: capability,
|
||||
match: match
|
||||
}}
|
||||
end
|
||||
end
|
||||
|
||||
defp normalize_rule(_rule), do: {:error, :invalid_acl_rule}
|
||||
|
||||
defp normalize_principal_type(:pubkey), do: {:ok, :pubkey}
|
||||
defp normalize_principal_type("pubkey"), do: {:ok, :pubkey}
|
||||
defp normalize_principal_type(_value), do: {:error, :invalid_acl_principal_type}
|
||||
|
||||
defp normalize_principal(value) when is_binary(value) and byte_size(value) == 64,
|
||||
do: {:ok, String.downcase(value)}
|
||||
|
||||
defp normalize_principal(_value), do: {:error, :invalid_acl_principal}
|
||||
|
||||
defp normalize_capability(:sync_read), do: {:ok, :sync_read}
|
||||
defp normalize_capability(:sync_write), do: {:ok, :sync_write}
|
||||
defp normalize_capability("sync_read"), do: {:ok, :sync_read}
|
||||
defp normalize_capability("sync_write"), do: {:ok, :sync_write}
|
||||
defp normalize_capability(_value), do: {:error, :invalid_acl_capability}
|
||||
|
||||
defp normalize_match(match) when is_map(match) do
|
||||
normalized_match =
|
||||
Enum.reduce(match, %{}, fn
|
||||
{key, values}, acc when is_binary(key) ->
|
||||
Map.put(acc, key, values)
|
||||
|
||||
{key, values}, acc when is_atom(key) ->
|
||||
Map.put(acc, Atom.to_string(key), values)
|
||||
|
||||
_entry, acc ->
|
||||
acc
|
||||
end)
|
||||
|
||||
{:ok, normalized_match}
|
||||
end
|
||||
|
||||
defp normalize_match(_match), do: {:error, :invalid_acl_match}
|
||||
|
||||
defp fetch(map, key) do
|
||||
Map.get(map, key) || Map.get(map, Atom.to_string(key))
|
||||
end
|
||||
end
|
||||
@@ -6,6 +6,9 @@ defmodule Parrhesia.Storage.Adapters.Memory.Admin do
|
||||
alias Parrhesia.Storage.Adapters.Memory.Store
|
||||
|
||||
@behaviour Parrhesia.Storage.Admin
|
||||
@default_limit 100
|
||||
@max_limit 1_000
|
||||
@max_audit_logs 1_000
|
||||
|
||||
@impl true
|
||||
def execute(_context, method, _params) do
|
||||
@@ -17,18 +20,59 @@ defmodule Parrhesia.Storage.Adapters.Memory.Admin do
|
||||
|
||||
@impl true
|
||||
def append_audit_log(_context, audit_entry) when is_map(audit_entry) do
|
||||
Store.update(fn state -> update_in(state.audit_logs, &[audit_entry | &1]) end)
|
||||
Store.update(fn state ->
|
||||
update_in(state.audit_logs, fn logs ->
|
||||
[audit_entry | logs] |> Enum.take(@max_audit_logs)
|
||||
end)
|
||||
end)
|
||||
|
||||
:ok
|
||||
end
|
||||
|
||||
def append_audit_log(_context, _audit_entry), do: {:error, :invalid_audit_entry}
|
||||
|
||||
@impl true
|
||||
def list_audit_logs(_context, _opts) do
|
||||
{:ok, Store.get(fn state -> Enum.reverse(state.audit_logs) end)}
|
||||
def list_audit_logs(_context, opts) when is_list(opts) do
|
||||
limit = normalize_limit(Keyword.get(opts, :limit, @default_limit))
|
||||
method = normalize_method_filter(Keyword.get(opts, :method))
|
||||
actor_pubkey = Keyword.get(opts, :actor_pubkey)
|
||||
|
||||
logs =
|
||||
Store.get(fn state ->
|
||||
state.audit_logs
|
||||
|> Enum.filter(&matches_filters?(&1, method, actor_pubkey))
|
||||
|> Enum.take(limit)
|
||||
end)
|
||||
|
||||
{:ok, logs}
|
||||
end
|
||||
|
||||
def list_audit_logs(_context, _opts), do: {:error, :invalid_opts}
|
||||
|
||||
defp normalize_method(method) when is_binary(method), do: method
|
||||
defp normalize_method(method) when is_atom(method), do: Atom.to_string(method)
|
||||
defp normalize_method(method), do: inspect(method)
|
||||
|
||||
defp normalize_limit(limit) when is_integer(limit) and limit > 0, do: min(limit, @max_limit)
|
||||
defp normalize_limit(_limit), do: @default_limit
|
||||
|
||||
defp normalize_method_filter(nil), do: nil
|
||||
defp normalize_method_filter(method), do: normalize_method(method)
|
||||
|
||||
defp matches_method?(_entry, nil), do: true
|
||||
|
||||
defp matches_method?(entry, method) do
|
||||
normalize_method(Map.get(entry, :method) || Map.get(entry, "method")) == method
|
||||
end
|
||||
|
||||
defp matches_actor_pubkey?(_entry, nil), do: true
|
||||
|
||||
defp matches_actor_pubkey?(entry, actor_pubkey) do
|
||||
Map.get(entry, :actor_pubkey) == actor_pubkey or
|
||||
Map.get(entry, "actor_pubkey") == actor_pubkey
|
||||
end
|
||||
|
||||
defp matches_filters?(entry, method, actor_pubkey) do
|
||||
matches_method?(entry, method) and matches_actor_pubkey?(entry, actor_pubkey)
|
||||
end
|
||||
end
|
||||
|
||||
@@ -12,53 +12,75 @@ defmodule Parrhesia.Storage.Adapters.Memory.Events do
|
||||
def put_event(_context, event) do
|
||||
event_id = Map.fetch!(event, "id")
|
||||
|
||||
result =
|
||||
Store.get_and_update(fn state ->
|
||||
if Map.has_key?(state.events, event_id) do
|
||||
{{:error, :duplicate_event}, state}
|
||||
else
|
||||
next_state = put_in(state.events[event_id], event)
|
||||
{{:ok, event}, next_state}
|
||||
end
|
||||
end)
|
||||
|
||||
result
|
||||
case Store.put_event(event_id, event) do
|
||||
:ok -> {:ok, event}
|
||||
{:error, :duplicate_event} -> {:error, :duplicate_event}
|
||||
end
|
||||
end
|
||||
|
||||
@impl true
|
||||
def get_event(_context, event_id) do
|
||||
deleted? = Store.get(fn state -> MapSet.member?(state.deleted, event_id) end)
|
||||
|
||||
if deleted? do
|
||||
{:ok, nil}
|
||||
else
|
||||
{:ok, Store.get(fn state -> Map.get(state.events, event_id) end)}
|
||||
case Store.get_event(event_id) do
|
||||
{:ok, _event, true} -> {:ok, nil}
|
||||
{:ok, event, false} -> {:ok, event}
|
||||
:error -> {:ok, nil}
|
||||
end
|
||||
end
|
||||
|
||||
@impl true
|
||||
def query(_context, filters, opts) do
|
||||
with :ok <- Filter.validate_filters(filters) do
|
||||
state = Store.get(& &1)
|
||||
requester_pubkeys = Keyword.get(opts, :requester_pubkeys, [])
|
||||
|
||||
events =
|
||||
state.events
|
||||
|> Map.values()
|
||||
|> Enum.filter(fn event ->
|
||||
not MapSet.member?(state.deleted, event["id"]) and
|
||||
Filter.matches_any?(event, filters) and
|
||||
giftwrap_visible_to_requester?(event, requester_pubkeys)
|
||||
end)
|
||||
filters
|
||||
|> Enum.flat_map(&matching_events_for_filter(&1, requester_pubkeys, opts))
|
||||
|> deduplicate_events()
|
||||
|> sort_events()
|
||||
|> maybe_apply_query_limit(opts)
|
||||
|
||||
{:ok, events}
|
||||
end
|
||||
end
|
||||
|
||||
@impl true
|
||||
def count(context, filters, opts) do
|
||||
with {:ok, events} <- query(context, filters, opts) do
|
||||
{:ok, length(events)}
|
||||
def query_event_refs(_context, filters, opts) do
|
||||
with :ok <- Filter.validate_filters(filters) do
|
||||
requester_pubkeys = Keyword.get(opts, :requester_pubkeys, [])
|
||||
query_opts = Keyword.put(opts, :apply_filter_limits?, false)
|
||||
|
||||
{_, refs} =
|
||||
reduce_unique_matching_events(
|
||||
filters,
|
||||
requester_pubkeys,
|
||||
query_opts,
|
||||
{MapSet.new(), []},
|
||||
&append_unique_event_ref/2
|
||||
)
|
||||
|
||||
refs =
|
||||
refs |> Enum.sort(&(compare_event_refs(&1, &2) != :gt)) |> maybe_limit_event_refs(opts)
|
||||
|
||||
{:ok, refs}
|
||||
end
|
||||
end
|
||||
|
||||
@impl true
|
||||
def count(_context, filters, opts) do
|
||||
with :ok <- Filter.validate_filters(filters) do
|
||||
requester_pubkeys = Keyword.get(opts, :requester_pubkeys, [])
|
||||
query_opts = Keyword.put(opts, :apply_filter_limits?, false)
|
||||
|
||||
{_seen_ids, count} =
|
||||
reduce_unique_matching_events(
|
||||
filters,
|
||||
requester_pubkeys,
|
||||
query_opts,
|
||||
{MapSet.new(), 0},
|
||||
&count_unique_event/2
|
||||
)
|
||||
|
||||
{:ok, count}
|
||||
end
|
||||
end
|
||||
|
||||
@@ -89,22 +111,14 @@ defmodule Parrhesia.Storage.Adapters.Memory.Events do
|
||||
end)
|
||||
|
||||
coordinate_delete_ids =
|
||||
Store.get(fn state ->
|
||||
state.events
|
||||
|> Map.values()
|
||||
|> Enum.filter(fn candidate ->
|
||||
matches_delete_coordinate?(candidate, delete_coordinates, deleter_pubkey)
|
||||
end)
|
||||
|> Enum.map(& &1["id"])
|
||||
end)
|
||||
delete_coordinates
|
||||
|> coordinate_delete_candidates(deleter_pubkey)
|
||||
|> Enum.filter(&matches_delete_coordinate?(&1, delete_coordinates, deleter_pubkey))
|
||||
|> Enum.map(& &1["id"])
|
||||
|
||||
all_delete_ids = Enum.uniq(delete_event_ids ++ coordinate_delete_ids)
|
||||
|
||||
Store.update(fn state ->
|
||||
Enum.reduce(all_delete_ids, state, fn event_id, acc ->
|
||||
update_in(acc.deleted, &MapSet.put(&1, event_id))
|
||||
end)
|
||||
end)
|
||||
Enum.each(all_delete_ids, &Store.mark_deleted/1)
|
||||
|
||||
{:ok, length(all_delete_ids)}
|
||||
end
|
||||
@@ -114,18 +128,11 @@ defmodule Parrhesia.Storage.Adapters.Memory.Events do
|
||||
pubkey = Map.get(event, "pubkey")
|
||||
|
||||
deleted_ids =
|
||||
Store.get(fn state ->
|
||||
state.events
|
||||
|> Map.values()
|
||||
|> Enum.filter(fn candidate -> candidate["pubkey"] == pubkey end)
|
||||
|> Enum.map(& &1["id"])
|
||||
end)
|
||||
pubkey
|
||||
|> vanish_candidates(Map.get(event, "created_at"))
|
||||
|> Enum.map(& &1["id"])
|
||||
|
||||
Store.update(fn state ->
|
||||
Enum.reduce(deleted_ids, state, fn event_id, acc ->
|
||||
update_in(acc.deleted, &MapSet.put(&1, event_id))
|
||||
end)
|
||||
end)
|
||||
Enum.each(deleted_ids, &Store.mark_deleted/1)
|
||||
|
||||
{:ok, length(deleted_ids)}
|
||||
end
|
||||
@@ -189,4 +196,328 @@ defmodule Parrhesia.Storage.Adapters.Memory.Events do
|
||||
_tag -> false
|
||||
end)
|
||||
end
|
||||
|
||||
defp compare_event_refs(left, right) do
|
||||
cond do
|
||||
left.created_at < right.created_at -> :lt
|
||||
left.created_at > right.created_at -> :gt
|
||||
left.id < right.id -> :lt
|
||||
left.id > right.id -> :gt
|
||||
true -> :eq
|
||||
end
|
||||
end
|
||||
|
||||
defp maybe_limit_event_refs(refs, opts) do
|
||||
case Keyword.get(opts, :limit) do
|
||||
limit when is_integer(limit) and limit > 0 -> Enum.take(refs, limit)
|
||||
_other -> refs
|
||||
end
|
||||
end
|
||||
|
||||
defp matching_events_for_filter(filter, requester_pubkeys, opts) do
|
||||
cond do
|
||||
Map.has_key?(filter, "ids") ->
|
||||
direct_id_lookup_events(filter, requester_pubkeys, opts)
|
||||
|
||||
indexed_candidate_spec(filter) != nil ->
|
||||
indexed_tag_lookup_events(filter, requester_pubkeys, opts)
|
||||
|
||||
true ->
|
||||
scan_filter_matches(filter, requester_pubkeys, opts)
|
||||
end
|
||||
end
|
||||
|
||||
defp direct_id_lookup_events(filter, requester_pubkeys, opts) do
|
||||
filter
|
||||
|> Map.get("ids", [])
|
||||
|> Enum.reduce([], fn event_id, acc ->
|
||||
maybe_prepend_direct_lookup_match(acc, event_id, filter, requester_pubkeys)
|
||||
end)
|
||||
|> deduplicate_events()
|
||||
|> sort_events()
|
||||
|> maybe_take_filter_limit(filter, opts)
|
||||
end
|
||||
|
||||
defp scan_filter_matches(filter, requester_pubkeys, opts) do
|
||||
limit =
|
||||
if Keyword.get(opts, :apply_filter_limits?, true) do
|
||||
effective_filter_limit(filter, opts)
|
||||
else
|
||||
nil
|
||||
end
|
||||
|
||||
{matches, _count} =
|
||||
Store.reduce_events_newest(
|
||||
{[], 0},
|
||||
&reduce_scan_match(&1, &2, filter, requester_pubkeys, limit)
|
||||
)
|
||||
|
||||
matches
|
||||
|> Enum.reverse()
|
||||
|> sort_events()
|
||||
end
|
||||
|
||||
defp indexed_tag_lookup_events(filter, requester_pubkeys, opts) do
|
||||
filter
|
||||
|> indexed_candidate_events()
|
||||
|> Enum.filter(&filter_match_visible?(&1, filter, requester_pubkeys))
|
||||
|> maybe_take_filter_limit(filter, opts)
|
||||
end
|
||||
|
||||
defp indexed_tag_filter(filter) do
|
||||
filter
|
||||
|> Enum.filter(fn
|
||||
{"#" <> _tag_name, values} when is_list(values) -> values != []
|
||||
_entry -> false
|
||||
end)
|
||||
|> Enum.sort_by(fn {key, _values} -> key end)
|
||||
|> List.first()
|
||||
|> case do
|
||||
{"#" <> tag_name, values} -> {tag_name, values}
|
||||
nil -> nil
|
||||
end
|
||||
end
|
||||
|
||||
defp indexed_candidate_spec(filter) do
|
||||
authors = Map.get(filter, "authors")
|
||||
kinds = Map.get(filter, "kinds")
|
||||
tag_filter = indexed_tag_filter(filter)
|
||||
|
||||
cond do
|
||||
is_tuple(tag_filter) ->
|
||||
{tag_name, tag_values} = tag_filter
|
||||
{:tag, tag_name, effective_indexed_tag_values(filter, tag_values)}
|
||||
|
||||
is_list(authors) and is_list(kinds) ->
|
||||
{:pubkey_kind, authors, kinds}
|
||||
|
||||
is_list(authors) ->
|
||||
{:pubkey, authors}
|
||||
|
||||
is_list(kinds) ->
|
||||
{:kind, kinds}
|
||||
|
||||
true ->
|
||||
nil
|
||||
end
|
||||
end
|
||||
|
||||
defp indexed_candidate_events(filter) do
|
||||
case indexed_candidate_spec(filter) do
|
||||
{:tag, tag_name, tag_values} ->
|
||||
Store.tagged_events(tag_name, tag_values)
|
||||
|
||||
{:pubkey_kind, authors, kinds} ->
|
||||
Store.events_by_pubkeys_and_kinds(authors, kinds)
|
||||
|
||||
{:pubkey, authors} ->
|
||||
Store.events_by_pubkeys(authors)
|
||||
|
||||
{:kind, kinds} ->
|
||||
Store.events_by_kinds(kinds)
|
||||
|
||||
nil ->
|
||||
[]
|
||||
end
|
||||
end
|
||||
|
||||
defp effective_indexed_tag_values(filter, tag_values) do
|
||||
case Map.get(filter, "limit") do
|
||||
limit when is_integer(limit) and limit == 1 ->
|
||||
Enum.take(tag_values, 1)
|
||||
|
||||
_other ->
|
||||
tag_values
|
||||
end
|
||||
end
|
||||
|
||||
defp filter_match_visible?(event, filter, requester_pubkeys) do
|
||||
Filter.matches_filter?(event, filter) and
|
||||
giftwrap_visible_to_requester?(event, requester_pubkeys)
|
||||
end
|
||||
|
||||
defp maybe_prepend_direct_lookup_match(acc, event_id, filter, requester_pubkeys) do
|
||||
case Store.get_event(event_id) do
|
||||
{:ok, event, false} ->
|
||||
if filter_match_visible?(event, filter, requester_pubkeys) do
|
||||
[event | acc]
|
||||
else
|
||||
acc
|
||||
end
|
||||
|
||||
_other ->
|
||||
acc
|
||||
end
|
||||
end
|
||||
|
||||
defp reduce_scan_match(event, {acc, count}, filter, requester_pubkeys, limit) do
|
||||
if filter_match_visible?(event, filter, requester_pubkeys) do
|
||||
maybe_halt_scan([event | acc], count + 1, limit)
|
||||
else
|
||||
{acc, count}
|
||||
end
|
||||
end
|
||||
|
||||
defp maybe_halt_scan(acc, count, limit) when is_integer(limit) and count >= limit do
|
||||
{:halt, {acc, count}}
|
||||
end
|
||||
|
||||
defp maybe_halt_scan(acc, count, _limit), do: {acc, count}
|
||||
|
||||
defp reduce_unique_matching_events(filters, requester_pubkeys, opts, acc, reducer) do
|
||||
Enum.reduce(filters, acc, fn filter, current_acc ->
|
||||
reduce_matching_events_for_filter(filter, requester_pubkeys, opts, current_acc, reducer)
|
||||
end)
|
||||
end
|
||||
|
||||
defp reduce_matching_events_for_filter(filter, requester_pubkeys, _opts, acc, reducer) do
|
||||
cond do
|
||||
Map.has_key?(filter, "ids") ->
|
||||
filter
|
||||
|> Map.get("ids", [])
|
||||
|> Enum.reduce(acc, &reduce_event_id_match(&1, filter, requester_pubkeys, &2, reducer))
|
||||
|
||||
indexed_candidate_spec(filter) != nil ->
|
||||
filter
|
||||
|> indexed_candidate_events()
|
||||
|> Enum.reduce(
|
||||
acc,
|
||||
&maybe_reduce_visible_event(&1, filter, requester_pubkeys, &2, reducer)
|
||||
)
|
||||
|
||||
true ->
|
||||
Store.reduce_events_newest(
|
||||
acc,
|
||||
&maybe_reduce_visible_event(&1, filter, requester_pubkeys, &2, reducer)
|
||||
)
|
||||
end
|
||||
end
|
||||
|
||||
defp coordinate_delete_candidates(delete_coordinates, deleter_pubkey) do
|
||||
delete_coordinates
|
||||
|> Enum.flat_map(fn coordinate ->
|
||||
cond do
|
||||
coordinate.pubkey != deleter_pubkey ->
|
||||
[]
|
||||
|
||||
addressable_kind?(coordinate.kind) ->
|
||||
Store.events_by_addresses([{coordinate.kind, deleter_pubkey, coordinate.d_tag}])
|
||||
|
||||
replaceable_kind?(coordinate.kind) ->
|
||||
Store.events_by_pubkeys_and_kinds([deleter_pubkey], [coordinate.kind])
|
||||
|
||||
true ->
|
||||
[]
|
||||
end
|
||||
end)
|
||||
|> deduplicate_events()
|
||||
end
|
||||
|
||||
defp vanish_candidates(pubkey, created_at) do
|
||||
own_events =
|
||||
Store.events_by_pubkeys([pubkey])
|
||||
|> Enum.filter(&(&1["created_at"] <= created_at))
|
||||
|
||||
giftwrap_events =
|
||||
Store.tagged_events("p", [pubkey])
|
||||
|> Enum.filter(&(&1["kind"] == 1059 and &1["created_at"] <= created_at))
|
||||
|
||||
deduplicate_events(own_events ++ giftwrap_events)
|
||||
end
|
||||
|
||||
defp event_ref(event) do
|
||||
%{
|
||||
created_at: Map.fetch!(event, "created_at"),
|
||||
id: Base.decode16!(Map.fetch!(event, "id"), case: :mixed)
|
||||
}
|
||||
end
|
||||
|
||||
defp append_unique_event_ref(event, {seen_ids, acc}) do
|
||||
reduce_unique_event(event, {seen_ids, acc}, fn _event_id, next_seen_ids ->
|
||||
{next_seen_ids, [event_ref(event) | acc]}
|
||||
end)
|
||||
end
|
||||
|
||||
defp count_unique_event(event, {seen_ids, acc}) do
|
||||
reduce_unique_event(event, {seen_ids, acc}, fn _event_id, next_seen_ids ->
|
||||
{next_seen_ids, acc + 1}
|
||||
end)
|
||||
end
|
||||
|
||||
defp reduce_unique_event(event, {seen_ids, acc}, fun) do
|
||||
event_id = Map.fetch!(event, "id")
|
||||
|
||||
if MapSet.member?(seen_ids, event_id) do
|
||||
{seen_ids, acc}
|
||||
else
|
||||
fun.(event_id, MapSet.put(seen_ids, event_id))
|
||||
end
|
||||
end
|
||||
|
||||
defp maybe_reduce_visible_event(event, filter, requester_pubkeys, acc, reducer) do
|
||||
if filter_match_visible?(event, filter, requester_pubkeys) do
|
||||
reducer.(event, acc)
|
||||
else
|
||||
acc
|
||||
end
|
||||
end
|
||||
|
||||
defp reduce_event_id_match(event_id, filter, requester_pubkeys, acc, reducer) do
|
||||
case Store.get_event(event_id) do
|
||||
{:ok, event, false} ->
|
||||
maybe_reduce_visible_event(event, filter, requester_pubkeys, acc, reducer)
|
||||
|
||||
_other ->
|
||||
acc
|
||||
end
|
||||
end
|
||||
|
||||
defp deduplicate_events(events) do
|
||||
events
|
||||
|> Enum.reduce(%{}, fn event, acc -> Map.put(acc, event["id"], event) end)
|
||||
|> Map.values()
|
||||
end
|
||||
|
||||
defp sort_events(events) do
|
||||
Enum.sort(events, &chronological_sorter/2)
|
||||
end
|
||||
|
||||
defp chronological_sorter(left, right) do
|
||||
cond do
|
||||
left["created_at"] > right["created_at"] -> true
|
||||
left["created_at"] < right["created_at"] -> false
|
||||
true -> left["id"] < right["id"]
|
||||
end
|
||||
end
|
||||
|
||||
defp maybe_apply_query_limit(events, opts) do
|
||||
case Keyword.get(opts, :limit) do
|
||||
limit when is_integer(limit) and limit > 0 -> Enum.take(events, limit)
|
||||
_other -> events
|
||||
end
|
||||
end
|
||||
|
||||
defp maybe_take_filter_limit(events, filter, opts) do
|
||||
case effective_filter_limit(filter, opts) do
|
||||
limit when is_integer(limit) and limit > 0 -> Enum.take(events, limit)
|
||||
_other -> events
|
||||
end
|
||||
end
|
||||
|
||||
defp effective_filter_limit(filter, opts) do
|
||||
max_filter_limit = Keyword.get(opts, :max_filter_limit)
|
||||
|
||||
case Map.get(filter, "limit") do
|
||||
limit
|
||||
when is_integer(limit) and limit > 0 and is_integer(max_filter_limit) and
|
||||
max_filter_limit > 0 ->
|
||||
min(limit, max_filter_limit)
|
||||
|
||||
limit when is_integer(limit) and limit > 0 ->
|
||||
limit
|
||||
|
||||
_other ->
|
||||
nil
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
@@ -33,6 +33,11 @@ defmodule Parrhesia.Storage.Adapters.Memory.Moderation do
|
||||
{:ok, Store.get(fn state -> MapSet.member?(state.allowed_pubkeys, pubkey) end)}
|
||||
end
|
||||
|
||||
@impl true
|
||||
def has_allowed_pubkeys?(_context) do
|
||||
{:ok, Store.get(fn state -> MapSet.size(state.allowed_pubkeys) > 0 end)}
|
||||
end
|
||||
|
||||
@impl true
|
||||
def ban_event(_context, event_id), do: update_ban_set(:events, event_id, :add)
|
||||
|
||||
|
||||
@@ -4,33 +4,160 @@ defmodule Parrhesia.Storage.Adapters.Memory.Store do
|
||||
use Agent
|
||||
|
||||
@name __MODULE__
|
||||
@events_table :parrhesia_memory_events
|
||||
@events_by_time_table :parrhesia_memory_events_by_time
|
||||
@events_by_tag_table :parrhesia_memory_events_by_tag
|
||||
@events_by_pubkey_table :parrhesia_memory_events_by_pubkey
|
||||
@events_by_kind_table :parrhesia_memory_events_by_kind
|
||||
@events_by_pubkey_kind_table :parrhesia_memory_events_by_pubkey_kind
|
||||
@events_by_address_table :parrhesia_memory_events_by_address
|
||||
|
||||
@initial_state %{
|
||||
events: %{},
|
||||
deleted: MapSet.new(),
|
||||
bans: %{pubkeys: MapSet.new(), events: MapSet.new(), ips: MapSet.new()},
|
||||
allowed_pubkeys: MapSet.new(),
|
||||
acl_rules: [],
|
||||
next_acl_rule_id: 1,
|
||||
groups: %{},
|
||||
roles: %{},
|
||||
audit_logs: []
|
||||
}
|
||||
|
||||
def ensure_started do
|
||||
if Process.whereis(@name) do
|
||||
def ensure_started, do: start_store()
|
||||
|
||||
def put_event(event_id, event) when is_binary(event_id) and is_map(event) do
|
||||
:ok = ensure_started()
|
||||
|
||||
if :ets.insert_new(@events_table, {event_id, event, false}) do
|
||||
true = :ets.insert(@events_by_time_table, {{sort_key(event), event_id}, event_id})
|
||||
index_event_tags(event_id, event)
|
||||
index_event_secondary_keys(event_id, event)
|
||||
:ok
|
||||
else
|
||||
start_store()
|
||||
{:error, :duplicate_event}
|
||||
end
|
||||
end
|
||||
|
||||
defp start_store do
|
||||
case Agent.start_link(fn -> @initial_state end, name: @name) do
|
||||
{:ok, _pid} -> :ok
|
||||
{:error, {:already_started, _pid}} -> :ok
|
||||
{:error, reason} -> {:error, reason}
|
||||
def get_event(event_id) when is_binary(event_id) do
|
||||
:ok = ensure_started()
|
||||
|
||||
case :ets.lookup(@events_table, event_id) do
|
||||
[{^event_id, event, deleted?}] -> {:ok, event, deleted?}
|
||||
[] -> :error
|
||||
end
|
||||
end
|
||||
|
||||
def mark_deleted(event_id) when is_binary(event_id) do
|
||||
:ok = ensure_started()
|
||||
|
||||
case lookup_event(event_id) do
|
||||
{:ok, event, false} ->
|
||||
true = :ets.insert(@events_table, {event_id, event, true})
|
||||
true = :ets.delete(@events_by_time_table, {sort_key(event), event_id})
|
||||
unindex_event_tags(event_id, event)
|
||||
unindex_event_secondary_keys(event_id, event)
|
||||
:ok
|
||||
|
||||
{:ok, _event, true} ->
|
||||
:ok
|
||||
|
||||
:error ->
|
||||
:ok
|
||||
end
|
||||
end
|
||||
|
||||
def reduce_events(acc, fun) when is_function(fun, 2) do
|
||||
:ok = ensure_started()
|
||||
|
||||
:ets.foldl(
|
||||
fn {_event_id, event, deleted?}, current_acc ->
|
||||
if deleted? do
|
||||
current_acc
|
||||
else
|
||||
fun.(event, current_acc)
|
||||
end
|
||||
end,
|
||||
acc,
|
||||
@events_table
|
||||
)
|
||||
end
|
||||
|
||||
def reduce_events_newest(acc, fun) when is_function(fun, 2) do
|
||||
:ok = ensure_started()
|
||||
reduce_events_newest_from(:ets.first(@events_by_time_table), acc, fun)
|
||||
end
|
||||
|
||||
def tagged_events(tag_name, tag_values) when is_binary(tag_name) and is_list(tag_values) do
|
||||
:ok = ensure_started()
|
||||
|
||||
tag_values
|
||||
|> Enum.flat_map(&indexed_events_for_value(@events_by_tag_table, {tag_name, &1}))
|
||||
|> sort_and_deduplicate_events()
|
||||
end
|
||||
|
||||
def events_by_pubkeys(pubkeys) when is_list(pubkeys) do
|
||||
:ok = ensure_started()
|
||||
|
||||
pubkeys
|
||||
|> Enum.flat_map(&indexed_events_for_value(@events_by_pubkey_table, &1))
|
||||
|> sort_and_deduplicate_events()
|
||||
end
|
||||
|
||||
def events_by_kinds(kinds) when is_list(kinds) do
|
||||
:ok = ensure_started()
|
||||
|
||||
kinds
|
||||
|> Enum.flat_map(&indexed_events_for_value(@events_by_kind_table, &1))
|
||||
|> sort_and_deduplicate_events()
|
||||
end
|
||||
|
||||
def events_by_pubkeys_and_kinds(pubkeys, kinds) when is_list(pubkeys) and is_list(kinds) do
|
||||
:ok = ensure_started()
|
||||
|
||||
pubkeys
|
||||
|> Enum.flat_map(fn pubkey ->
|
||||
kinds
|
||||
|> Enum.flat_map(&indexed_events_for_value(@events_by_pubkey_kind_table, {pubkey, &1}))
|
||||
end)
|
||||
|> sort_and_deduplicate_events()
|
||||
end
|
||||
|
||||
def events_by_addresses(addresses) when is_list(addresses) do
|
||||
:ok = ensure_started()
|
||||
|
||||
addresses
|
||||
|> Enum.flat_map(&indexed_events_for_value(@events_by_address_table, &1))
|
||||
|> sort_and_deduplicate_events()
|
||||
end
|
||||
|
||||
defp reduce_events_newest_from(:"$end_of_table", acc, _fun), do: acc
|
||||
|
||||
defp reduce_events_newest_from(key, acc, fun) do
|
||||
next_key = :ets.next(@events_by_time_table, key)
|
||||
acc = reduce_indexed_event(key, acc, fun)
|
||||
|
||||
case acc do
|
||||
{:halt, final_acc} -> final_acc
|
||||
next_acc -> reduce_events_newest_from(next_key, next_acc, fun)
|
||||
end
|
||||
end
|
||||
|
||||
defp reduce_indexed_event(key, acc, fun) do
|
||||
case :ets.lookup(@events_by_time_table, key) do
|
||||
[{^key, event_id}] -> apply_reduce_fun(event_id, acc, fun)
|
||||
[] -> acc
|
||||
end
|
||||
end
|
||||
|
||||
defp apply_reduce_fun(event_id, acc, fun) do
|
||||
case lookup_event(event_id) do
|
||||
{:ok, event, false} -> normalize_reduce_result(fun.(event, acc))
|
||||
_other -> acc
|
||||
end
|
||||
end
|
||||
|
||||
defp normalize_reduce_result({:halt, next_acc}), do: {:halt, next_acc}
|
||||
defp normalize_reduce_result(next_acc), do: next_acc
|
||||
|
||||
def get(fun) do
|
||||
:ok = ensure_started()
|
||||
Agent.get(@name, fun)
|
||||
@@ -45,4 +172,208 @@ defmodule Parrhesia.Storage.Adapters.Memory.Store do
|
||||
:ok = ensure_started()
|
||||
Agent.get_and_update(@name, fun)
|
||||
end
|
||||
|
||||
defp start_store do
|
||||
case Agent.start_link(&init_state/0, name: @name) do
|
||||
{:ok, _pid} -> :ok
|
||||
{:error, {:already_started, _pid}} -> :ok
|
||||
{:error, reason} -> {:error, reason}
|
||||
end
|
||||
end
|
||||
|
||||
defp init_state do
|
||||
ensure_tables_started()
|
||||
|
||||
@initial_state
|
||||
end
|
||||
|
||||
defp ensure_tables_started do
|
||||
ensure_table(@events_table, [
|
||||
:named_table,
|
||||
:public,
|
||||
:set,
|
||||
read_concurrency: true,
|
||||
write_concurrency: true
|
||||
])
|
||||
|
||||
ensure_table(@events_by_time_table, [
|
||||
:named_table,
|
||||
:public,
|
||||
:ordered_set,
|
||||
read_concurrency: true,
|
||||
write_concurrency: true
|
||||
])
|
||||
|
||||
ensure_table(@events_by_tag_table, [
|
||||
:named_table,
|
||||
:public,
|
||||
:bag,
|
||||
read_concurrency: true,
|
||||
write_concurrency: true
|
||||
])
|
||||
|
||||
ensure_table(@events_by_pubkey_table, [
|
||||
:named_table,
|
||||
:public,
|
||||
:bag,
|
||||
read_concurrency: true,
|
||||
write_concurrency: true
|
||||
])
|
||||
|
||||
ensure_table(@events_by_kind_table, [
|
||||
:named_table,
|
||||
:public,
|
||||
:bag,
|
||||
read_concurrency: true,
|
||||
write_concurrency: true
|
||||
])
|
||||
|
||||
ensure_table(@events_by_pubkey_kind_table, [
|
||||
:named_table,
|
||||
:public,
|
||||
:bag,
|
||||
read_concurrency: true,
|
||||
write_concurrency: true
|
||||
])
|
||||
|
||||
ensure_table(@events_by_address_table, [
|
||||
:named_table,
|
||||
:public,
|
||||
:bag,
|
||||
read_concurrency: true,
|
||||
write_concurrency: true
|
||||
])
|
||||
end
|
||||
|
||||
defp ensure_table(name, options) do
|
||||
case :ets.whereis(name) do
|
||||
:undefined -> :ets.new(name, options)
|
||||
_table -> :ok
|
||||
end
|
||||
end
|
||||
|
||||
defp lookup_event(event_id) do
|
||||
case :ets.lookup(@events_table, event_id) do
|
||||
[{^event_id, event, deleted?}] -> {:ok, event, deleted?}
|
||||
[] -> :error
|
||||
end
|
||||
end
|
||||
|
||||
defp index_event_tags(event_id, event) do
|
||||
event
|
||||
|> event_tag_index_entries(event_id)
|
||||
|> Enum.each(fn entry ->
|
||||
true = :ets.insert(@events_by_tag_table, entry)
|
||||
end)
|
||||
end
|
||||
|
||||
defp index_event_secondary_keys(event_id, event) do
|
||||
event
|
||||
|> secondary_index_entries(event_id)
|
||||
|> Enum.each(fn {table, entry} ->
|
||||
true = :ets.insert(table, entry)
|
||||
end)
|
||||
end
|
||||
|
||||
defp unindex_event_tags(event_id, event) do
|
||||
event
|
||||
|> event_tag_index_entries(event_id)
|
||||
|> Enum.each(&:ets.delete_object(@events_by_tag_table, &1))
|
||||
end
|
||||
|
||||
defp unindex_event_secondary_keys(event_id, event) do
|
||||
event
|
||||
|> secondary_index_entries(event_id)
|
||||
|> Enum.each(fn {table, entry} ->
|
||||
:ets.delete_object(table, entry)
|
||||
end)
|
||||
end
|
||||
|
||||
defp event_tag_index_entries(event, event_id) do
|
||||
created_sort_key = sort_key(event)
|
||||
|
||||
event
|
||||
|> Map.get("tags", [])
|
||||
|> Enum.flat_map(fn
|
||||
[tag_name, tag_value | _rest] when is_binary(tag_name) and is_binary(tag_value) ->
|
||||
[{{tag_name, tag_value}, created_sort_key, event_id}]
|
||||
|
||||
_tag ->
|
||||
[]
|
||||
end)
|
||||
end
|
||||
|
||||
defp secondary_index_entries(event, event_id) do
|
||||
created_sort_key = sort_key(event)
|
||||
pubkey = Map.get(event, "pubkey")
|
||||
kind = Map.get(event, "kind")
|
||||
|
||||
[]
|
||||
|> maybe_put_secondary_entry(@events_by_pubkey_table, pubkey, created_sort_key, event_id)
|
||||
|> maybe_put_secondary_entry(@events_by_kind_table, kind, created_sort_key, event_id)
|
||||
|> maybe_put_pubkey_kind_entry(pubkey, kind, created_sort_key, event_id)
|
||||
|> maybe_put_address_entry(event, pubkey, kind, event_id)
|
||||
end
|
||||
|
||||
defp maybe_put_secondary_entry(entries, _table, key, _created_sort_key, _event_id)
|
||||
when is_nil(key),
|
||||
do: entries
|
||||
|
||||
defp maybe_put_secondary_entry(entries, table, key, created_sort_key, event_id) do
|
||||
[{table, {key, created_sort_key, event_id}} | entries]
|
||||
end
|
||||
|
||||
defp maybe_put_pubkey_kind_entry(entries, pubkey, kind, created_sort_key, event_id)
|
||||
when is_binary(pubkey) and is_integer(kind) do
|
||||
[{@events_by_pubkey_kind_table, {{pubkey, kind}, created_sort_key, event_id}} | entries]
|
||||
end
|
||||
|
||||
defp maybe_put_pubkey_kind_entry(entries, _pubkey, _kind, _created_sort_key, _event_id),
|
||||
do: entries
|
||||
|
||||
defp maybe_put_address_entry(entries, event, pubkey, kind, event_id)
|
||||
when is_binary(pubkey) and is_integer(kind) and kind >= 30_000 and kind < 40_000 do
|
||||
d_tag =
|
||||
event
|
||||
|> Map.get("tags", [])
|
||||
|> Enum.find_value("", fn
|
||||
["d", value | _rest] -> value
|
||||
_tag -> nil
|
||||
end)
|
||||
|
||||
[{@events_by_address_table, {{kind, pubkey, d_tag}, sort_key(event), event_id}} | entries]
|
||||
end
|
||||
|
||||
defp maybe_put_address_entry(entries, _event, _pubkey, _kind, _event_id), do: entries
|
||||
|
||||
defp indexed_events_for_value(_table, value)
|
||||
when not is_binary(value) and not is_integer(value) and not is_tuple(value),
|
||||
do: []
|
||||
|
||||
defp indexed_events_for_value(table, value) do
|
||||
table
|
||||
|> :ets.lookup(value)
|
||||
|> Enum.reduce([], fn {^value, _created_sort_key, event_id}, acc ->
|
||||
case lookup_event(event_id) do
|
||||
{:ok, event, false} -> [event | acc]
|
||||
_other -> acc
|
||||
end
|
||||
end)
|
||||
end
|
||||
|
||||
defp sort_and_deduplicate_events(events) do
|
||||
events
|
||||
|> Enum.uniq_by(& &1["id"])
|
||||
|> Enum.sort(&chronological_sorter/2)
|
||||
end
|
||||
|
||||
defp chronological_sorter(left, right) do
|
||||
cond do
|
||||
left["created_at"] > right["created_at"] -> true
|
||||
left["created_at"] < right["created_at"] -> false
|
||||
true -> left["id"] < right["id"]
|
||||
end
|
||||
end
|
||||
|
||||
defp sort_key(event), do: -Map.get(event, "created_at", 0)
|
||||
end
|
||||
|
||||
279
lib/parrhesia/storage/adapters/postgres/acl.ex
Normal file
279
lib/parrhesia/storage/adapters/postgres/acl.ex
Normal file
@@ -0,0 +1,279 @@
|
||||
defmodule Parrhesia.Storage.Adapters.Postgres.ACL do
|
||||
@moduledoc """
|
||||
PostgreSQL-backed implementation for `Parrhesia.Storage.ACL`.
|
||||
"""
|
||||
|
||||
import Ecto.Query
|
||||
|
||||
alias Parrhesia.PostgresRepos
|
||||
alias Parrhesia.Repo
|
||||
|
||||
@behaviour Parrhesia.Storage.ACL
|
||||
|
||||
@impl true
|
||||
def put_rule(_context, rule) when is_map(rule) do
|
||||
with {:ok, normalized_rule} <- normalize_rule(rule) do
|
||||
normalized_rule
|
||||
|> find_matching_rule()
|
||||
|> maybe_insert_rule(normalized_rule)
|
||||
end
|
||||
end
|
||||
|
||||
def put_rule(_context, _rule), do: {:error, :invalid_acl_rule}
|
||||
|
||||
defp maybe_insert_rule(nil, normalized_rule), do: insert_rule(normalized_rule)
|
||||
defp maybe_insert_rule(existing_rule, _normalized_rule), do: {:ok, existing_rule}
|
||||
|
||||
@impl true
|
||||
def delete_rule(_context, selector) when is_map(selector) do
|
||||
case normalize_delete_selector(selector) do
|
||||
{:ok, {:id, id}} ->
|
||||
query = from(rule in "acl_rules", where: rule.id == ^id)
|
||||
{_deleted, _result} = Repo.delete_all(query)
|
||||
:ok
|
||||
|
||||
{:ok, {:exact, rule}} ->
|
||||
query =
|
||||
from(stored_rule in "acl_rules",
|
||||
where:
|
||||
stored_rule.principal_type == ^rule.principal_type and
|
||||
stored_rule.principal == ^rule.principal and
|
||||
stored_rule.capability == ^rule.capability and
|
||||
stored_rule.match == ^rule.match
|
||||
)
|
||||
|
||||
{_deleted, _result} = Repo.delete_all(query)
|
||||
:ok
|
||||
|
||||
{:error, reason} ->
|
||||
{:error, reason}
|
||||
end
|
||||
end
|
||||
|
||||
def delete_rule(_context, _selector), do: {:error, :invalid_acl_rule}
|
||||
|
||||
@impl true
|
||||
def list_rules(_context, opts) when is_list(opts) do
|
||||
query =
|
||||
from(rule in "acl_rules",
|
||||
order_by: [
|
||||
asc: rule.principal_type,
|
||||
asc: rule.principal,
|
||||
asc: rule.capability,
|
||||
asc: rule.id
|
||||
],
|
||||
select: %{
|
||||
id: rule.id,
|
||||
principal_type: rule.principal_type,
|
||||
principal: rule.principal,
|
||||
capability: rule.capability,
|
||||
match: rule.match,
|
||||
inserted_at: rule.inserted_at
|
||||
}
|
||||
)
|
||||
|> maybe_filter_principal_type(Keyword.get(opts, :principal_type))
|
||||
|> maybe_filter_principal(Keyword.get(opts, :principal))
|
||||
|> maybe_filter_capability(Keyword.get(opts, :capability))
|
||||
|
||||
repo = read_repo()
|
||||
{:ok, Enum.map(repo.all(query), &normalize_persisted_rule/1)}
|
||||
end
|
||||
|
||||
def list_rules(_context, _opts), do: {:error, :invalid_opts}
|
||||
|
||||
defp maybe_filter_principal_type(query, nil), do: query
|
||||
|
||||
defp maybe_filter_principal_type(query, principal_type) when is_atom(principal_type) do
|
||||
maybe_filter_principal_type(query, Atom.to_string(principal_type))
|
||||
end
|
||||
|
||||
defp maybe_filter_principal_type(query, principal_type) when is_binary(principal_type) do
|
||||
where(query, [rule], rule.principal_type == ^principal_type)
|
||||
end
|
||||
|
||||
defp maybe_filter_principal_type(query, _principal_type), do: query
|
||||
|
||||
defp maybe_filter_principal(query, nil), do: query
|
||||
|
||||
defp maybe_filter_principal(query, principal) when is_binary(principal) do
|
||||
case decode_hex_or_binary(principal, 32, :invalid_acl_principal) do
|
||||
{:ok, decoded_principal} -> where(query, [rule], rule.principal == ^decoded_principal)
|
||||
{:error, _reason} -> where(query, [rule], false)
|
||||
end
|
||||
end
|
||||
|
||||
defp maybe_filter_principal(query, _principal), do: query
|
||||
|
||||
defp maybe_filter_capability(query, nil), do: query
|
||||
|
||||
defp maybe_filter_capability(query, capability) when is_atom(capability) do
|
||||
maybe_filter_capability(query, Atom.to_string(capability))
|
||||
end
|
||||
|
||||
defp maybe_filter_capability(query, capability) when is_binary(capability) do
|
||||
where(query, [rule], rule.capability == ^capability)
|
||||
end
|
||||
|
||||
defp maybe_filter_capability(query, _capability), do: query
|
||||
|
||||
defp find_matching_rule(normalized_rule) do
|
||||
query =
|
||||
from(stored_rule in "acl_rules",
|
||||
where:
|
||||
stored_rule.principal_type == ^normalized_rule.principal_type and
|
||||
stored_rule.principal == ^normalized_rule.principal and
|
||||
stored_rule.capability == ^normalized_rule.capability and
|
||||
stored_rule.match == ^normalized_rule.match,
|
||||
limit: 1,
|
||||
select: %{
|
||||
id: stored_rule.id,
|
||||
principal_type: stored_rule.principal_type,
|
||||
principal: stored_rule.principal,
|
||||
capability: stored_rule.capability,
|
||||
match: stored_rule.match,
|
||||
inserted_at: stored_rule.inserted_at
|
||||
}
|
||||
)
|
||||
|
||||
repo = read_repo()
|
||||
|
||||
case repo.one(query) do
|
||||
nil -> nil
|
||||
stored_rule -> normalize_persisted_rule(stored_rule)
|
||||
end
|
||||
end
|
||||
|
||||
defp read_repo, do: PostgresRepos.read()
|
||||
|
||||
defp insert_rule(normalized_rule) do
|
||||
now = DateTime.utc_now() |> DateTime.truncate(:microsecond)
|
||||
|
||||
row = %{
|
||||
principal_type: normalized_rule.principal_type,
|
||||
principal: normalized_rule.principal,
|
||||
capability: normalized_rule.capability,
|
||||
match: normalized_rule.match,
|
||||
inserted_at: now
|
||||
}
|
||||
|
||||
case Repo.insert_all("acl_rules", [row], returning: [:id, :inserted_at]) do
|
||||
{1, [inserted_row]} ->
|
||||
{:ok, normalize_persisted_rule(Map.merge(row, Map.new(inserted_row)))}
|
||||
|
||||
_other ->
|
||||
{:error, :acl_rule_insert_failed}
|
||||
end
|
||||
end
|
||||
|
||||
defp normalize_persisted_rule(rule) do
|
||||
%{
|
||||
id: rule.id,
|
||||
principal_type: normalize_principal_type(rule.principal_type),
|
||||
principal: Base.encode16(rule.principal, case: :lower),
|
||||
capability: normalize_capability(rule.capability),
|
||||
match: normalize_match(rule.match),
|
||||
inserted_at: rule.inserted_at
|
||||
}
|
||||
end
|
||||
|
||||
defp normalize_delete_selector(%{"id" => id}), do: normalize_delete_selector(%{id: id})
|
||||
|
||||
defp normalize_delete_selector(%{id: id}) when is_integer(id) and id > 0,
|
||||
do: {:ok, {:id, id}}
|
||||
|
||||
defp normalize_delete_selector(selector) do
|
||||
case normalize_rule(selector) do
|
||||
{:ok, normalized_rule} -> {:ok, {:exact, normalized_rule}}
|
||||
{:error, reason} -> {:error, reason}
|
||||
end
|
||||
end
|
||||
|
||||
defp normalize_rule(rule) when is_map(rule) do
|
||||
with {:ok, principal_type} <- normalize_principal_type_value(fetch(rule, :principal_type)),
|
||||
{:ok, principal} <-
|
||||
decode_hex_or_binary(fetch(rule, :principal), 32, :invalid_acl_principal),
|
||||
{:ok, capability} <- normalize_capability_value(fetch(rule, :capability)),
|
||||
{:ok, match} <- normalize_match_value(fetch(rule, :match)) do
|
||||
{:ok,
|
||||
%{
|
||||
principal_type: principal_type,
|
||||
principal: principal,
|
||||
capability: capability,
|
||||
match: match
|
||||
}}
|
||||
end
|
||||
end
|
||||
|
||||
defp normalize_rule(_rule), do: {:error, :invalid_acl_rule}
|
||||
|
||||
defp normalize_principal_type("pubkey"), do: :pubkey
|
||||
defp normalize_principal_type(principal_type), do: principal_type
|
||||
|
||||
defp normalize_capability("sync_read"), do: :sync_read
|
||||
defp normalize_capability("sync_write"), do: :sync_write
|
||||
defp normalize_capability(capability), do: capability
|
||||
|
||||
defp normalize_principal_type_value(:pubkey), do: {:ok, "pubkey"}
|
||||
defp normalize_principal_type_value("pubkey"), do: {:ok, "pubkey"}
|
||||
defp normalize_principal_type_value(_principal_type), do: {:error, :invalid_acl_principal_type}
|
||||
|
||||
defp normalize_capability_value(:sync_read), do: {:ok, "sync_read"}
|
||||
defp normalize_capability_value(:sync_write), do: {:ok, "sync_write"}
|
||||
defp normalize_capability_value("sync_read"), do: {:ok, "sync_read"}
|
||||
defp normalize_capability_value("sync_write"), do: {:ok, "sync_write"}
|
||||
defp normalize_capability_value(_capability), do: {:error, :invalid_acl_capability}
|
||||
|
||||
defp normalize_match_value(match) when is_map(match) do
|
||||
normalized_match =
|
||||
Enum.reduce(match, %{}, fn
|
||||
{key, values}, acc when is_binary(key) ->
|
||||
Map.put(acc, key, values)
|
||||
|
||||
{key, values}, acc when is_atom(key) ->
|
||||
Map.put(acc, Atom.to_string(key), values)
|
||||
|
||||
_entry, acc ->
|
||||
acc
|
||||
end)
|
||||
|
||||
{:ok, normalize_match(normalized_match)}
|
||||
end
|
||||
|
||||
defp normalize_match_value(_match), do: {:error, :invalid_acl_match}
|
||||
|
||||
defp normalize_match(match) when is_map(match) do
|
||||
Enum.reduce(match, %{}, fn
|
||||
{key, values}, acc when is_binary(key) and is_list(values) ->
|
||||
Map.put(acc, key, Enum.uniq(values))
|
||||
|
||||
{key, value}, acc when is_binary(key) ->
|
||||
Map.put(acc, key, value)
|
||||
|
||||
_entry, acc ->
|
||||
acc
|
||||
end)
|
||||
end
|
||||
|
||||
defp normalize_match(_match), do: %{}
|
||||
|
||||
defp fetch(map, key) do
|
||||
Map.get(map, key) || Map.get(map, Atom.to_string(key))
|
||||
end
|
||||
|
||||
defp decode_hex_or_binary(value, expected_bytes, _reason)
|
||||
when is_binary(value) and byte_size(value) == expected_bytes,
|
||||
do: {:ok, value}
|
||||
|
||||
defp decode_hex_or_binary(value, expected_bytes, reason) when is_binary(value) do
|
||||
if byte_size(value) == expected_bytes * 2 do
|
||||
case Base.decode16(value, case: :mixed) do
|
||||
{:ok, decoded} -> {:ok, decoded}
|
||||
:error -> {:error, reason}
|
||||
end
|
||||
else
|
||||
{:error, reason}
|
||||
end
|
||||
end
|
||||
|
||||
defp decode_hex_or_binary(_value, _expected_bytes, reason), do: {:error, reason}
|
||||
end
|
||||
@@ -5,6 +5,7 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Admin do
|
||||
|
||||
import Ecto.Query
|
||||
|
||||
alias Parrhesia.PostgresRepos
|
||||
alias Parrhesia.Repo
|
||||
|
||||
@behaviour Parrhesia.Storage.Admin
|
||||
@@ -20,6 +21,7 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Admin do
|
||||
case method_name do
|
||||
"ping" -> {:ok, %{"status" => "ok"}}
|
||||
"stats" -> {:ok, relay_stats()}
|
||||
"supportedmethods" -> {:ok, %{"methods" => supported_methods()}}
|
||||
"list_audit_logs" -> list_audit_logs(%{}, audit_list_opts(params))
|
||||
_other -> execute_moderation_method(moderation, method_name, params)
|
||||
end
|
||||
@@ -72,8 +74,8 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Admin do
|
||||
|> maybe_filter_actor_pubkey(Keyword.get(opts, :actor_pubkey))
|
||||
|
||||
logs =
|
||||
query
|
||||
|> Repo.all()
|
||||
read_repo()
|
||||
|> then(fn repo -> repo.all(query) end)
|
||||
|> Enum.map(&to_audit_log_map/1)
|
||||
|
||||
{:ok, logs}
|
||||
@@ -82,17 +84,39 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Admin do
|
||||
def list_audit_logs(_context, _opts), do: {:error, :invalid_opts}
|
||||
|
||||
defp relay_stats do
|
||||
events_count = Repo.aggregate("events", :count, :id)
|
||||
banned_pubkeys = Repo.aggregate("banned_pubkeys", :count, :pubkey)
|
||||
blocked_ips = Repo.aggregate("blocked_ips", :count, :ip)
|
||||
repo = read_repo()
|
||||
events_count = repo.aggregate("events", :count, :id)
|
||||
banned_pubkeys = repo.aggregate("banned_pubkeys", :count, :pubkey)
|
||||
allowed_pubkeys = repo.aggregate("allowed_pubkeys", :count, :pubkey)
|
||||
blocked_ips = repo.aggregate("blocked_ips", :count, :ip)
|
||||
acl_rules = repo.aggregate("acl_rules", :count, :id)
|
||||
|
||||
%{
|
||||
"events" => events_count,
|
||||
"banned_pubkeys" => banned_pubkeys,
|
||||
"allowed_pubkeys" => allowed_pubkeys,
|
||||
"acl_rules" => acl_rules,
|
||||
"blocked_ips" => blocked_ips
|
||||
}
|
||||
end
|
||||
|
||||
defp supported_methods do
|
||||
[
|
||||
"allow_pubkey",
|
||||
"ban_event",
|
||||
"ban_pubkey",
|
||||
"block_ip",
|
||||
"disallow_pubkey",
|
||||
"list_audit_logs",
|
||||
"ping",
|
||||
"stats",
|
||||
"supportedmethods",
|
||||
"unban_event",
|
||||
"unban_pubkey",
|
||||
"unblock_ip"
|
||||
]
|
||||
end
|
||||
|
||||
defp execute_moderation_method(moderation, "ban_pubkey", params),
|
||||
do: execute_pubkey_method(fn ctx, value -> moderation.ban_pubkey(ctx, value) end, params)
|
||||
|
||||
@@ -212,6 +236,8 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Admin do
|
||||
|
||||
defp normalize_pubkey(_value), do: {:error, :invalid_actor_pubkey}
|
||||
|
||||
defp read_repo, do: PostgresRepos.read()
|
||||
|
||||
defp invalid_key_reason(:params), do: :invalid_params
|
||||
defp invalid_key_reason(:result), do: :invalid_result
|
||||
|
||||
|
||||
@@ -5,10 +5,16 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Events do
|
||||
|
||||
import Ecto.Query
|
||||
|
||||
alias Parrhesia.PostgresRepos
|
||||
alias Parrhesia.Protocol.Filter
|
||||
alias Parrhesia.Repo
|
||||
|
||||
@behaviour Parrhesia.Storage.Events
|
||||
@trigram_fallback_max_single_term_length 4
|
||||
@trigram_fallback_pattern ~r/[^\p{L}\p{N}\s"]/u
|
||||
@fts_match_fragment "to_tsvector('simple', ?) @@ websearch_to_tsquery('simple', ?)"
|
||||
@fts_rank_fragment "ts_rank_cd(to_tsvector('simple', ?), websearch_to_tsquery('simple', ?))"
|
||||
@trigram_rank_fragment "word_similarity(lower(?), lower(?))"
|
||||
|
||||
@type normalized_event :: %{
|
||||
id: binary(),
|
||||
@@ -62,7 +68,9 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Events do
|
||||
}
|
||||
)
|
||||
|
||||
case Repo.one(event_query) do
|
||||
repo = read_repo()
|
||||
|
||||
case repo.one(event_query) do
|
||||
nil ->
|
||||
{:ok, nil}
|
||||
|
||||
@@ -76,16 +84,17 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Events do
|
||||
def query(_context, filters, opts) when is_list(opts) do
|
||||
with :ok <- Filter.validate_filters(filters) do
|
||||
now = Keyword.get(opts, :now, System.system_time(:second))
|
||||
repo = read_repo()
|
||||
|
||||
persisted_events =
|
||||
filters
|
||||
|> Enum.flat_map(fn filter ->
|
||||
filter
|
||||
|> event_query_for_filter(now, opts)
|
||||
|> Repo.all()
|
||||
|> repo.all()
|
||||
end)
|
||||
|> deduplicate_events()
|
||||
|> sort_persisted_events()
|
||||
|> sort_persisted_events(filters)
|
||||
|> maybe_apply_query_limit(opts)
|
||||
|
||||
{:ok, Enum.map(persisted_events, &to_nostr_event/1)}
|
||||
@@ -94,21 +103,21 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Events do
|
||||
|
||||
def query(_context, _filters, _opts), do: {:error, :invalid_opts}
|
||||
|
||||
@impl true
|
||||
def query_event_refs(_context, filters, opts) when is_list(opts) do
|
||||
with :ok <- Filter.validate_filters(filters) do
|
||||
now = Keyword.get(opts, :now, System.system_time(:second))
|
||||
{:ok, fetch_event_refs(filters, now, opts)}
|
||||
end
|
||||
end
|
||||
|
||||
def query_event_refs(_context, _filters, _opts), do: {:error, :invalid_opts}
|
||||
|
||||
@impl true
|
||||
def count(_context, filters, opts) when is_list(opts) do
|
||||
with :ok <- Filter.validate_filters(filters) do
|
||||
now = Keyword.get(opts, :now, System.system_time(:second))
|
||||
|
||||
total_count =
|
||||
filters
|
||||
|> event_id_union_query_for_filters(now, opts)
|
||||
|> subquery()
|
||||
|> then(fn union_query ->
|
||||
from(event in union_query, select: count(event.id, :distinct))
|
||||
end)
|
||||
|> Repo.one()
|
||||
|
||||
{:ok, total_count}
|
||||
{:ok, count_events(filters, now, opts)}
|
||||
end
|
||||
end
|
||||
|
||||
@@ -360,30 +369,7 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Events do
|
||||
|
||||
defp maybe_upsert_replaceable_state(normalized_event, now, deleted_at) do
|
||||
if replaceable_kind?(normalized_event.kind) do
|
||||
lookup_query =
|
||||
from(state in "replaceable_event_state",
|
||||
where:
|
||||
state.pubkey == ^normalized_event.pubkey and state.kind == ^normalized_event.kind,
|
||||
select: %{event_created_at: state.event_created_at, event_id: state.event_id}
|
||||
)
|
||||
|
||||
update_query =
|
||||
from(state in "replaceable_event_state",
|
||||
where:
|
||||
state.pubkey == ^normalized_event.pubkey and
|
||||
state.kind == ^normalized_event.kind
|
||||
)
|
||||
|
||||
upsert_state_table(
|
||||
"replaceable_event_state",
|
||||
lookup_query,
|
||||
update_query,
|
||||
replaceable_state_row(normalized_event, now),
|
||||
normalized_event,
|
||||
now,
|
||||
deleted_at,
|
||||
:replaceable_state_update_failed
|
||||
)
|
||||
upsert_replaceable_state_table(normalized_event, now, deleted_at)
|
||||
else
|
||||
:ok
|
||||
end
|
||||
@@ -391,159 +377,94 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Events do
|
||||
|
||||
defp maybe_upsert_addressable_state(normalized_event, now, deleted_at) do
|
||||
if addressable_kind?(normalized_event.kind) do
|
||||
lookup_query =
|
||||
from(state in "addressable_event_state",
|
||||
where:
|
||||
state.pubkey == ^normalized_event.pubkey and
|
||||
state.kind == ^normalized_event.kind and
|
||||
state.d_tag == ^normalized_event.d_tag,
|
||||
select: %{event_created_at: state.event_created_at, event_id: state.event_id}
|
||||
)
|
||||
|
||||
update_query =
|
||||
from(state in "addressable_event_state",
|
||||
where:
|
||||
state.pubkey == ^normalized_event.pubkey and
|
||||
state.kind == ^normalized_event.kind and
|
||||
state.d_tag == ^normalized_event.d_tag
|
||||
)
|
||||
|
||||
upsert_state_table(
|
||||
"addressable_event_state",
|
||||
lookup_query,
|
||||
update_query,
|
||||
addressable_state_row(normalized_event, now),
|
||||
normalized_event,
|
||||
now,
|
||||
deleted_at,
|
||||
:addressable_state_update_failed
|
||||
)
|
||||
upsert_addressable_state_table(normalized_event, now, deleted_at)
|
||||
else
|
||||
:ok
|
||||
end
|
||||
end
|
||||
|
||||
defp upsert_state_table(
|
||||
table_name,
|
||||
lookup_query,
|
||||
update_query,
|
||||
insert_row,
|
||||
normalized_event,
|
||||
now,
|
||||
deleted_at,
|
||||
failure_reason
|
||||
) do
|
||||
case Repo.one(lookup_query) do
|
||||
nil ->
|
||||
insert_state_or_resolve_race(
|
||||
table_name,
|
||||
lookup_query,
|
||||
update_query,
|
||||
insert_row,
|
||||
normalized_event,
|
||||
now,
|
||||
deleted_at,
|
||||
failure_reason
|
||||
)
|
||||
defp upsert_replaceable_state_table(normalized_event, now, deleted_at) do
|
||||
params = [
|
||||
normalized_event.pubkey,
|
||||
normalized_event.kind,
|
||||
normalized_event.created_at,
|
||||
normalized_event.id,
|
||||
now,
|
||||
now
|
||||
]
|
||||
|
||||
current_state ->
|
||||
maybe_update_state(
|
||||
update_query,
|
||||
normalized_event,
|
||||
current_state,
|
||||
now,
|
||||
deleted_at,
|
||||
failure_reason
|
||||
)
|
||||
case Repo.query(replaceable_state_upsert_sql(), params) do
|
||||
{:ok, %{rows: [row]}} ->
|
||||
finalize_state_upsert(row, normalized_event, deleted_at, :replaceable_state_update_failed)
|
||||
|
||||
{:ok, _result} ->
|
||||
Repo.rollback(:replaceable_state_update_failed)
|
||||
|
||||
{:error, _reason} ->
|
||||
Repo.rollback(:replaceable_state_update_failed)
|
||||
end
|
||||
end
|
||||
|
||||
defp insert_state_or_resolve_race(
|
||||
table_name,
|
||||
lookup_query,
|
||||
update_query,
|
||||
insert_row,
|
||||
defp upsert_addressable_state_table(normalized_event, now, deleted_at) do
|
||||
params = [
|
||||
normalized_event.pubkey,
|
||||
normalized_event.kind,
|
||||
normalized_event.d_tag,
|
||||
normalized_event.created_at,
|
||||
normalized_event.id,
|
||||
now,
|
||||
now
|
||||
]
|
||||
|
||||
case Repo.query(addressable_state_upsert_sql(), params) do
|
||||
{:ok, %{rows: [row]}} ->
|
||||
finalize_state_upsert(row, normalized_event, deleted_at, :addressable_state_update_failed)
|
||||
|
||||
{:ok, _result} ->
|
||||
Repo.rollback(:addressable_state_update_failed)
|
||||
|
||||
{:error, _reason} ->
|
||||
Repo.rollback(:addressable_state_update_failed)
|
||||
end
|
||||
end
|
||||
|
||||
defp finalize_state_upsert(
|
||||
[retired_event_created_at, retired_event_id, winner_event_created_at, winner_event_id],
|
||||
normalized_event,
|
||||
now,
|
||||
deleted_at,
|
||||
failure_reason
|
||||
) do
|
||||
case Repo.insert_all(table_name, [insert_row], on_conflict: :nothing) do
|
||||
{1, _result} ->
|
||||
:ok
|
||||
|
||||
{0, _result} ->
|
||||
resolve_state_race(
|
||||
lookup_query,
|
||||
update_query,
|
||||
normalized_event,
|
||||
now,
|
||||
case {winner_event_created_at, winner_event_id} do
|
||||
{created_at, event_id}
|
||||
when created_at == normalized_event.created_at and event_id == normalized_event.id ->
|
||||
maybe_retire_previous_state_event(
|
||||
retired_event_created_at,
|
||||
retired_event_id,
|
||||
deleted_at,
|
||||
failure_reason
|
||||
)
|
||||
|
||||
{_inserted, _result} ->
|
||||
Repo.rollback(failure_reason)
|
||||
end
|
||||
end
|
||||
|
||||
defp resolve_state_race(
|
||||
lookup_query,
|
||||
update_query,
|
||||
normalized_event,
|
||||
now,
|
||||
deleted_at,
|
||||
failure_reason
|
||||
) do
|
||||
case Repo.one(lookup_query) do
|
||||
nil ->
|
||||
Repo.rollback(failure_reason)
|
||||
|
||||
current_state ->
|
||||
maybe_update_state(
|
||||
update_query,
|
||||
normalized_event,
|
||||
current_state,
|
||||
now,
|
||||
deleted_at,
|
||||
failure_reason
|
||||
)
|
||||
end
|
||||
end
|
||||
|
||||
defp maybe_update_state(
|
||||
update_query,
|
||||
normalized_event,
|
||||
current_state,
|
||||
now,
|
||||
deleted_at,
|
||||
failure_reason
|
||||
) do
|
||||
if candidate_wins_state?(normalized_event, current_state) do
|
||||
{updated, _result} =
|
||||
Repo.update_all(update_query,
|
||||
set: [
|
||||
event_created_at: normalized_event.created_at,
|
||||
event_id: normalized_event.id,
|
||||
updated_at: now
|
||||
]
|
||||
)
|
||||
|
||||
if updated == 1 do
|
||||
{_created_at, _event_id} ->
|
||||
retire_event!(
|
||||
current_state.event_created_at,
|
||||
current_state.event_id,
|
||||
normalized_event.created_at,
|
||||
normalized_event.id,
|
||||
deleted_at,
|
||||
failure_reason
|
||||
)
|
||||
else
|
||||
Repo.rollback(failure_reason)
|
||||
end
|
||||
else
|
||||
retire_event!(normalized_event.created_at, normalized_event.id, deleted_at, failure_reason)
|
||||
end
|
||||
end
|
||||
|
||||
defp maybe_retire_previous_state_event(nil, nil, _deleted_at, _failure_reason), do: :ok
|
||||
|
||||
defp maybe_retire_previous_state_event(
|
||||
retired_event_created_at,
|
||||
retired_event_id,
|
||||
deleted_at,
|
||||
failure_reason
|
||||
) do
|
||||
retire_event!(retired_event_created_at, retired_event_id, deleted_at, failure_reason)
|
||||
end
|
||||
|
||||
defp retire_event!(event_created_at, event_id, deleted_at, failure_reason) do
|
||||
{updated, _result} =
|
||||
Repo.update_all(
|
||||
@@ -567,27 +488,147 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Events do
|
||||
|
||||
defp addressable_kind?(kind), do: kind >= 30_000 and kind < 40_000
|
||||
|
||||
defp replaceable_state_row(normalized_event, now) do
|
||||
%{
|
||||
pubkey: normalized_event.pubkey,
|
||||
kind: normalized_event.kind,
|
||||
event_created_at: normalized_event.created_at,
|
||||
event_id: normalized_event.id,
|
||||
inserted_at: now,
|
||||
updated_at: now
|
||||
}
|
||||
defp replaceable_state_upsert_sql do
|
||||
"""
|
||||
WITH inserted AS (
|
||||
INSERT INTO replaceable_event_state (
|
||||
pubkey,
|
||||
kind,
|
||||
event_created_at,
|
||||
event_id,
|
||||
inserted_at,
|
||||
updated_at
|
||||
)
|
||||
VALUES ($1, $2, $3, $4, $5, $6)
|
||||
ON CONFLICT (pubkey, kind) DO NOTHING
|
||||
RETURNING
|
||||
NULL::bigint AS retired_event_created_at,
|
||||
NULL::bytea AS retired_event_id,
|
||||
event_created_at AS winner_event_created_at,
|
||||
event_id AS winner_event_id
|
||||
),
|
||||
updated AS (
|
||||
UPDATE replaceable_event_state AS state
|
||||
SET
|
||||
event_created_at = $3,
|
||||
event_id = $4,
|
||||
updated_at = $6
|
||||
FROM (
|
||||
SELECT current.event_created_at, current.event_id
|
||||
FROM replaceable_event_state AS current
|
||||
WHERE current.pubkey = $1 AND current.kind = $2
|
||||
FOR UPDATE
|
||||
) AS previous
|
||||
WHERE
|
||||
NOT EXISTS (SELECT 1 FROM inserted)
|
||||
AND state.pubkey = $1
|
||||
AND state.kind = $2
|
||||
AND (
|
||||
state.event_created_at < $3
|
||||
OR (state.event_created_at = $3 AND state.event_id > $4)
|
||||
)
|
||||
RETURNING
|
||||
previous.event_created_at AS retired_event_created_at,
|
||||
previous.event_id AS retired_event_id,
|
||||
state.event_created_at AS winner_event_created_at,
|
||||
state.event_id AS winner_event_id
|
||||
),
|
||||
current AS (
|
||||
SELECT
|
||||
NULL::bigint AS retired_event_created_at,
|
||||
NULL::bytea AS retired_event_id,
|
||||
state.event_created_at AS winner_event_created_at,
|
||||
state.event_id AS winner_event_id
|
||||
FROM replaceable_event_state AS state
|
||||
WHERE
|
||||
NOT EXISTS (SELECT 1 FROM inserted)
|
||||
AND NOT EXISTS (SELECT 1 FROM updated)
|
||||
AND state.pubkey = $1
|
||||
AND state.kind = $2
|
||||
)
|
||||
SELECT *
|
||||
FROM inserted
|
||||
UNION ALL
|
||||
SELECT *
|
||||
FROM updated
|
||||
UNION ALL
|
||||
SELECT *
|
||||
FROM current
|
||||
LIMIT 1
|
||||
"""
|
||||
end
|
||||
|
||||
defp addressable_state_row(normalized_event, now) do
|
||||
%{
|
||||
pubkey: normalized_event.pubkey,
|
||||
kind: normalized_event.kind,
|
||||
d_tag: normalized_event.d_tag,
|
||||
event_created_at: normalized_event.created_at,
|
||||
event_id: normalized_event.id,
|
||||
inserted_at: now,
|
||||
updated_at: now
|
||||
}
|
||||
defp addressable_state_upsert_sql do
|
||||
"""
|
||||
WITH inserted AS (
|
||||
INSERT INTO addressable_event_state (
|
||||
pubkey,
|
||||
kind,
|
||||
d_tag,
|
||||
event_created_at,
|
||||
event_id,
|
||||
inserted_at,
|
||||
updated_at
|
||||
)
|
||||
VALUES ($1, $2, $3, $4, $5, $6, $7)
|
||||
ON CONFLICT (pubkey, kind, d_tag) DO NOTHING
|
||||
RETURNING
|
||||
NULL::bigint AS retired_event_created_at,
|
||||
NULL::bytea AS retired_event_id,
|
||||
event_created_at AS winner_event_created_at,
|
||||
event_id AS winner_event_id
|
||||
),
|
||||
updated AS (
|
||||
UPDATE addressable_event_state AS state
|
||||
SET
|
||||
event_created_at = $4,
|
||||
event_id = $5,
|
||||
updated_at = $7
|
||||
FROM (
|
||||
SELECT current.event_created_at, current.event_id
|
||||
FROM addressable_event_state AS current
|
||||
WHERE current.pubkey = $1 AND current.kind = $2 AND current.d_tag = $3
|
||||
FOR UPDATE
|
||||
) AS previous
|
||||
WHERE
|
||||
NOT EXISTS (SELECT 1 FROM inserted)
|
||||
AND state.pubkey = $1
|
||||
AND state.kind = $2
|
||||
AND state.d_tag = $3
|
||||
AND (
|
||||
state.event_created_at < $4
|
||||
OR (state.event_created_at = $4 AND state.event_id > $5)
|
||||
)
|
||||
RETURNING
|
||||
previous.event_created_at AS retired_event_created_at,
|
||||
previous.event_id AS retired_event_id,
|
||||
state.event_created_at AS winner_event_created_at,
|
||||
state.event_id AS winner_event_id
|
||||
),
|
||||
current AS (
|
||||
SELECT
|
||||
NULL::bigint AS retired_event_created_at,
|
||||
NULL::bytea AS retired_event_id,
|
||||
state.event_created_at AS winner_event_created_at,
|
||||
state.event_id AS winner_event_id
|
||||
FROM addressable_event_state AS state
|
||||
WHERE
|
||||
NOT EXISTS (SELECT 1 FROM inserted)
|
||||
AND NOT EXISTS (SELECT 1 FROM updated)
|
||||
AND state.pubkey = $1
|
||||
AND state.kind = $2
|
||||
AND state.d_tag = $3
|
||||
)
|
||||
SELECT *
|
||||
FROM inserted
|
||||
UNION ALL
|
||||
SELECT *
|
||||
FROM updated
|
||||
UNION ALL
|
||||
SELECT *
|
||||
FROM current
|
||||
LIMIT 1
|
||||
"""
|
||||
end
|
||||
|
||||
defp event_row(normalized_event, now) do
|
||||
@@ -607,95 +648,219 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Events do
|
||||
end
|
||||
|
||||
defp event_query_for_filter(filter, now, opts) do
|
||||
base_query =
|
||||
from(event in "events",
|
||||
where: is_nil(event.deleted_at) and (is_nil(event.expires_at) or event.expires_at > ^now),
|
||||
order_by: [desc: event.created_at, asc: event.id],
|
||||
select: %{
|
||||
id: event.id,
|
||||
pubkey: event.pubkey,
|
||||
created_at: event.created_at,
|
||||
kind: event.kind,
|
||||
tags: event.tags,
|
||||
content: event.content,
|
||||
sig: event.sig
|
||||
}
|
||||
)
|
||||
search_plan = search_plan(Map.get(filter, "search"))
|
||||
{base_query, remaining_tag_filters} = event_source_query(filter, now)
|
||||
|
||||
query =
|
||||
base_query
|
||||
|> maybe_filter_ids(Map.get(filter, "ids"))
|
||||
|> maybe_filter_authors(Map.get(filter, "authors"))
|
||||
|> maybe_filter_kinds(Map.get(filter, "kinds"))
|
||||
|> maybe_filter_since(Map.get(filter, "since"))
|
||||
|> maybe_filter_until(Map.get(filter, "until"))
|
||||
|> maybe_filter_search(Map.get(filter, "search"))
|
||||
|> filter_by_tags(filter)
|
||||
|> maybe_restrict_giftwrap_access(filter, opts)
|
||||
|
||||
maybe_limit_query(query, effective_filter_limit(filter, opts))
|
||||
base_query
|
||||
|> apply_common_event_filters(filter, remaining_tag_filters, opts, search_plan)
|
||||
|> maybe_order_by_search_rank(search_plan)
|
||||
|> select([event: event], %{
|
||||
id: event.id,
|
||||
pubkey: event.pubkey,
|
||||
created_at: event.created_at,
|
||||
kind: event.kind,
|
||||
tags: event.tags,
|
||||
content: event.content,
|
||||
sig: event.sig
|
||||
})
|
||||
|> maybe_select_search_score(search_plan)
|
||||
|> maybe_limit_query(effective_filter_limit(filter, opts))
|
||||
end
|
||||
|
||||
defp event_id_query_for_filter(filter, now, opts) do
|
||||
from(event in "events",
|
||||
where: is_nil(event.deleted_at) and (is_nil(event.expires_at) or event.expires_at > ^now),
|
||||
select: event.id
|
||||
)
|
||||
|> maybe_filter_ids(Map.get(filter, "ids"))
|
||||
|> maybe_filter_authors(Map.get(filter, "authors"))
|
||||
|> maybe_filter_kinds(Map.get(filter, "kinds"))
|
||||
|> maybe_filter_since(Map.get(filter, "since"))
|
||||
|> maybe_filter_until(Map.get(filter, "until"))
|
||||
|> maybe_filter_search(Map.get(filter, "search"))
|
||||
|> filter_by_tags(filter)
|
||||
|> maybe_restrict_giftwrap_access(filter, opts)
|
||||
search_plan = search_plan(Map.get(filter, "search"))
|
||||
{base_query, remaining_tag_filters} = event_source_query(filter, now)
|
||||
|
||||
base_query
|
||||
|> apply_common_event_filters(filter, remaining_tag_filters, opts, search_plan)
|
||||
|> select([event: event], event.id)
|
||||
end
|
||||
|
||||
defp event_id_union_query_for_filters([], now, _opts) do
|
||||
defp event_id_distinct_union_query_for_filters([], now, _opts) do
|
||||
from(event in "events",
|
||||
where: event.created_at > ^now and event.created_at < ^now,
|
||||
select: event.id
|
||||
)
|
||||
end
|
||||
|
||||
defp event_id_union_query_for_filters([first_filter | rest_filters], now, opts) do
|
||||
defp event_id_distinct_union_query_for_filters([first_filter | rest_filters], now, opts) do
|
||||
Enum.reduce(rest_filters, event_id_query_for_filter(first_filter, now, opts), fn filter,
|
||||
acc ->
|
||||
union_all(acc, ^event_id_query_for_filter(filter, now, opts))
|
||||
union(acc, ^event_id_query_for_filter(filter, now, opts))
|
||||
end)
|
||||
end
|
||||
|
||||
defp event_ref_query_for_filter(filter, now, opts) do
|
||||
search_plan = search_plan(Map.get(filter, "search"))
|
||||
{base_query, remaining_tag_filters} = event_source_query(filter, now)
|
||||
|
||||
base_query
|
||||
|> apply_common_event_filters(filter, remaining_tag_filters, opts, search_plan)
|
||||
|> order_by([event: event], asc: event.created_at, asc: event.id)
|
||||
|> select([event: event], %{
|
||||
created_at: event.created_at,
|
||||
id: event.id
|
||||
})
|
||||
|> maybe_limit_query(effective_filter_limit(filter, opts))
|
||||
end
|
||||
|
||||
defp event_ref_union_query_for_filters([], now, _opts) do
|
||||
from(event in "events",
|
||||
where: event.created_at > ^now and event.created_at < ^now,
|
||||
select: %{created_at: event.created_at, id: event.id}
|
||||
)
|
||||
end
|
||||
|
||||
defp event_ref_union_query_for_filters([first_filter | rest_filters], now, opts) do
|
||||
Enum.reduce(rest_filters, event_ref_query_for_filter(first_filter, now, opts), fn filter,
|
||||
acc ->
|
||||
union_all(acc, ^event_ref_query_for_filter(filter, now, opts))
|
||||
end)
|
||||
end
|
||||
|
||||
defp fetch_event_refs([filter], now, opts) do
|
||||
query =
|
||||
filter
|
||||
|> event_ref_query_for_filter(now, opts)
|
||||
|> maybe_limit_query(Keyword.get(opts, :limit))
|
||||
|
||||
read_repo()
|
||||
|> then(fn repo -> repo.all(query) end)
|
||||
end
|
||||
|
||||
defp fetch_event_refs(filters, now, opts) do
|
||||
query =
|
||||
filters
|
||||
|> event_ref_union_query_for_filters(now, opts)
|
||||
|> subquery()
|
||||
|> then(fn union_query ->
|
||||
from(ref in union_query,
|
||||
group_by: [ref.created_at, ref.id],
|
||||
order_by: [asc: ref.created_at, asc: ref.id],
|
||||
select: %{created_at: ref.created_at, id: ref.id}
|
||||
)
|
||||
end)
|
||||
|> maybe_limit_query(Keyword.get(opts, :limit))
|
||||
|
||||
read_repo()
|
||||
|> then(fn repo -> repo.all(query) end)
|
||||
end
|
||||
|
||||
defp count_events([filter], now, opts) do
|
||||
query =
|
||||
filter
|
||||
|> event_id_query_for_filter(now, opts)
|
||||
|> subquery()
|
||||
|> then(fn query ->
|
||||
from(event in query, select: count())
|
||||
end)
|
||||
|
||||
read_repo()
|
||||
|> then(fn repo -> repo.one(query) end)
|
||||
end
|
||||
|
||||
defp count_events(filters, now, opts) do
|
||||
query =
|
||||
filters
|
||||
|> event_id_distinct_union_query_for_filters(now, opts)
|
||||
|> subquery()
|
||||
|> then(fn union_query ->
|
||||
from(event in union_query, select: count())
|
||||
end)
|
||||
|
||||
read_repo()
|
||||
|> then(fn repo -> repo.one(query) end)
|
||||
end
|
||||
|
||||
defp event_source_query(filter, now) do
|
||||
tag_filters = tag_filters(filter)
|
||||
|
||||
case primary_tag_filter(tag_filters) do
|
||||
nil ->
|
||||
{from(event in "events",
|
||||
as: :event,
|
||||
where:
|
||||
is_nil(event.deleted_at) and
|
||||
(is_nil(event.expires_at) or event.expires_at > ^now)
|
||||
), []}
|
||||
|
||||
{tag_name, values} = primary_tag_filter ->
|
||||
remaining_tag_filters = List.delete(tag_filters, primary_tag_filter)
|
||||
|
||||
{from(tag in "event_tags",
|
||||
as: :primary_tag,
|
||||
where: tag.name == ^tag_name and tag.value in ^values,
|
||||
join: event in "events",
|
||||
as: :event,
|
||||
on: event.created_at == tag.event_created_at and event.id == tag.event_id,
|
||||
where:
|
||||
is_nil(event.deleted_at) and
|
||||
(is_nil(event.expires_at) or event.expires_at > ^now),
|
||||
distinct: [event.created_at, event.id]
|
||||
), remaining_tag_filters}
|
||||
end
|
||||
end
|
||||
|
||||
defp apply_common_event_filters(query, filter, remaining_tag_filters, opts, search_plan) do
|
||||
query
|
||||
|> maybe_filter_ids(Map.get(filter, "ids"))
|
||||
|> maybe_filter_authors(Map.get(filter, "authors"))
|
||||
|> maybe_filter_kinds(Map.get(filter, "kinds"))
|
||||
|> maybe_filter_since(Map.get(filter, "since"))
|
||||
|> maybe_filter_until(Map.get(filter, "until"))
|
||||
|> maybe_filter_search(search_plan)
|
||||
|> filter_by_tag_filters(remaining_tag_filters)
|
||||
|> maybe_restrict_giftwrap_access(filter, opts)
|
||||
end
|
||||
|
||||
defp primary_tag_filter([]), do: nil
|
||||
|
||||
defp primary_tag_filter(tag_filters) do
|
||||
Enum.find(tag_filters, fn {tag_name, _values} -> tag_name in ["h", "i"] end) ||
|
||||
List.first(tag_filters)
|
||||
end
|
||||
|
||||
defp maybe_filter_ids(query, nil), do: query
|
||||
|
||||
defp maybe_filter_ids(query, ids) do
|
||||
decoded_ids = decode_hex_list(ids, :lower)
|
||||
where(query, [event], event.id in ^decoded_ids)
|
||||
where(query, [event: event], event.id in ^decoded_ids)
|
||||
end
|
||||
|
||||
defp maybe_filter_authors(query, nil), do: query
|
||||
|
||||
defp maybe_filter_authors(query, authors) do
|
||||
decoded_authors = decode_hex_list(authors, :lower)
|
||||
where(query, [event], event.pubkey in ^decoded_authors)
|
||||
where(query, [event: event], event.pubkey in ^decoded_authors)
|
||||
end
|
||||
|
||||
defp maybe_filter_kinds(query, nil), do: query
|
||||
defp maybe_filter_kinds(query, kinds), do: where(query, [event], event.kind in ^kinds)
|
||||
defp maybe_filter_kinds(query, kinds), do: where(query, [event: event], event.kind in ^kinds)
|
||||
|
||||
defp maybe_filter_since(query, nil), do: query
|
||||
defp maybe_filter_since(query, since), do: where(query, [event], event.created_at >= ^since)
|
||||
|
||||
defp maybe_filter_since(query, since),
|
||||
do: where(query, [event: event], event.created_at >= ^since)
|
||||
|
||||
defp maybe_filter_until(query, nil), do: query
|
||||
defp maybe_filter_until(query, until), do: where(query, [event], event.created_at <= ^until)
|
||||
|
||||
defp maybe_filter_until(query, until),
|
||||
do: where(query, [event: event], event.created_at <= ^until)
|
||||
|
||||
defp maybe_filter_search(query, nil), do: query
|
||||
|
||||
defp maybe_filter_search(query, search) when is_binary(search) and search != "" do
|
||||
escaped_search = escape_like_pattern(search)
|
||||
where(query, [event], ilike(event.content, ^"%#{escaped_search}%"))
|
||||
defp maybe_filter_search(query, %{mode: :fts, query: search}) do
|
||||
where(
|
||||
query,
|
||||
[event: event],
|
||||
fragment(@fts_match_fragment, event.content, ^search)
|
||||
)
|
||||
end
|
||||
|
||||
defp maybe_filter_search(query, _search), do: query
|
||||
defp maybe_filter_search(query, %{mode: :trigram, query: search}) do
|
||||
escaped_search = escape_like_pattern(search)
|
||||
where(query, [event: event], ilike(event.content, ^"%#{escaped_search}%"))
|
||||
end
|
||||
|
||||
defp escape_like_pattern(search) do
|
||||
search
|
||||
@@ -704,13 +869,11 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Events do
|
||||
|> String.replace("_", "\\_")
|
||||
end
|
||||
|
||||
defp filter_by_tags(query, filter) do
|
||||
filter
|
||||
|> tag_filters()
|
||||
|> Enum.reduce(query, fn {tag_name, values}, acc ->
|
||||
defp filter_by_tag_filters(query, tag_filters) do
|
||||
Enum.reduce(tag_filters, query, fn {tag_name, values}, acc ->
|
||||
where(
|
||||
acc,
|
||||
[event],
|
||||
[event: event],
|
||||
fragment(
|
||||
"EXISTS (SELECT 1 FROM event_tags AS tag WHERE tag.event_created_at = ? AND tag.event_id = ? AND tag.name = ? AND tag.value = ANY(?))",
|
||||
event.created_at,
|
||||
@@ -740,7 +903,7 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Events do
|
||||
targets_giftwrap?(filter) and requester_pubkeys != [] ->
|
||||
where(
|
||||
query,
|
||||
[event],
|
||||
[event: event],
|
||||
fragment(
|
||||
"EXISTS (SELECT 1 FROM event_tags AS tag WHERE tag.event_created_at = ? AND tag.event_id = ? AND tag.name = 'p' AND tag.value = ANY(?))",
|
||||
event.created_at,
|
||||
@@ -750,7 +913,7 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Events do
|
||||
)
|
||||
|
||||
targets_giftwrap?(filter) ->
|
||||
where(query, [_event], false)
|
||||
where(query, [event: _event], false)
|
||||
|
||||
true ->
|
||||
query
|
||||
@@ -786,20 +949,90 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Events do
|
||||
defp maybe_limit_query(query, nil), do: query
|
||||
defp maybe_limit_query(query, limit), do: limit(query, ^limit)
|
||||
|
||||
defp maybe_order_by_search_rank(query, nil) do
|
||||
order_by(query, [event: event], desc: event.created_at, asc: event.id)
|
||||
end
|
||||
|
||||
defp maybe_order_by_search_rank(query, %{mode: :fts, query: search}) do
|
||||
order_by(
|
||||
query,
|
||||
[event: event],
|
||||
desc: fragment(@fts_rank_fragment, event.content, ^search),
|
||||
desc: event.created_at,
|
||||
asc: event.id
|
||||
)
|
||||
end
|
||||
|
||||
defp maybe_order_by_search_rank(query, %{mode: :trigram, query: search}) do
|
||||
order_by(
|
||||
query,
|
||||
[event: event],
|
||||
desc: fragment(@trigram_rank_fragment, ^search, event.content),
|
||||
desc: event.created_at,
|
||||
asc: event.id
|
||||
)
|
||||
end
|
||||
|
||||
defp maybe_select_search_score(query, nil), do: query
|
||||
|
||||
defp maybe_select_search_score(query, %{mode: :fts, query: search}) do
|
||||
select_merge(
|
||||
query,
|
||||
[event: event],
|
||||
%{search_score: fragment(@fts_rank_fragment, event.content, ^search)}
|
||||
)
|
||||
end
|
||||
|
||||
defp maybe_select_search_score(query, %{mode: :trigram, query: search}) do
|
||||
select_merge(
|
||||
query,
|
||||
[event: event],
|
||||
%{search_score: fragment(@trigram_rank_fragment, ^search, event.content)}
|
||||
)
|
||||
end
|
||||
|
||||
defp search_plan(nil), do: nil
|
||||
|
||||
defp search_plan(search) when is_binary(search) do
|
||||
normalized_search = String.trim(search)
|
||||
|
||||
cond do
|
||||
normalized_search == "" ->
|
||||
nil
|
||||
|
||||
trigram_fallback_search?(normalized_search) ->
|
||||
%{mode: :trigram, query: normalized_search}
|
||||
|
||||
true ->
|
||||
%{mode: :fts, query: normalized_search}
|
||||
end
|
||||
end
|
||||
|
||||
defp trigram_fallback_search?(search) do
|
||||
String.match?(search, @trigram_fallback_pattern) or short_single_term_search?(search)
|
||||
end
|
||||
|
||||
defp short_single_term_search?(search) do
|
||||
case String.split(search, ~r/\s+/, trim: true) do
|
||||
[term] -> String.length(term) <= @trigram_fallback_max_single_term_length
|
||||
_other -> false
|
||||
end
|
||||
end
|
||||
|
||||
defp deduplicate_events(events) do
|
||||
events
|
||||
|> Enum.reduce(%{}, fn event, acc -> Map.put_new(acc, event.id, event) end)
|
||||
|> Enum.reduce(%{}, fn event, acc ->
|
||||
Map.update(acc, event.id, event, fn existing -> preferred_event(existing, event) end)
|
||||
end)
|
||||
|> Map.values()
|
||||
end
|
||||
|
||||
defp sort_persisted_events(events) do
|
||||
Enum.sort(events, fn left, right ->
|
||||
cond do
|
||||
left.created_at > right.created_at -> true
|
||||
left.created_at < right.created_at -> false
|
||||
true -> left.id < right.id
|
||||
end
|
||||
end)
|
||||
defp sort_persisted_events(events, filters) do
|
||||
if Enum.any?(filters, &search_filter?/1) do
|
||||
Enum.sort(events, &search_result_sorter/2)
|
||||
else
|
||||
Enum.sort(events, &chronological_sorter/2)
|
||||
end
|
||||
end
|
||||
|
||||
defp maybe_apply_query_limit(events, opts) do
|
||||
@@ -821,6 +1054,50 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Events do
|
||||
}
|
||||
end
|
||||
|
||||
defp preferred_event(existing, candidate) do
|
||||
if search_result_sorter(candidate, existing) do
|
||||
candidate
|
||||
else
|
||||
existing
|
||||
end
|
||||
end
|
||||
|
||||
defp search_filter?(filter) do
|
||||
filter
|
||||
|> Map.get("search")
|
||||
|> search_plan()
|
||||
|> Kernel.!=(nil)
|
||||
end
|
||||
|
||||
defp search_result_sorter(left, right) do
|
||||
left_score = search_score(left)
|
||||
right_score = search_score(right)
|
||||
|
||||
cond do
|
||||
left_score > right_score -> true
|
||||
left_score < right_score -> false
|
||||
true -> chronological_sorter(left, right)
|
||||
end
|
||||
end
|
||||
|
||||
defp chronological_sorter(left, right) do
|
||||
cond do
|
||||
left.created_at > right.created_at -> true
|
||||
left.created_at < right.created_at -> false
|
||||
true -> left.id < right.id
|
||||
end
|
||||
end
|
||||
|
||||
defp search_score(event) do
|
||||
event
|
||||
|> Map.get(:search_score, 0.0)
|
||||
|> case do
|
||||
score when is_float(score) -> score
|
||||
score when is_integer(score) -> score / 1
|
||||
_other -> 0.0
|
||||
end
|
||||
end
|
||||
|
||||
defp normalize_persisted_tags(tags) when is_list(tags), do: tags
|
||||
defp normalize_persisted_tags(_tags), do: []
|
||||
|
||||
@@ -966,4 +1243,6 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Events do
|
||||
end
|
||||
|
||||
defp maybe_apply_mls_group_retention(expires_at, _kind, _created_at), do: expires_at
|
||||
|
||||
defp read_repo, do: PostgresRepos.read()
|
||||
end
|
||||
|
||||
@@ -5,6 +5,7 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Groups do
|
||||
|
||||
import Ecto.Query
|
||||
|
||||
alias Parrhesia.PostgresRepos
|
||||
alias Parrhesia.Repo
|
||||
|
||||
@behaviour Parrhesia.Storage.Groups
|
||||
@@ -46,7 +47,9 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Groups do
|
||||
limit: 1
|
||||
)
|
||||
|
||||
case Repo.one(query) do
|
||||
repo = read_repo()
|
||||
|
||||
case repo.one(query) do
|
||||
nil ->
|
||||
{:ok, nil}
|
||||
|
||||
@@ -94,8 +97,8 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Groups do
|
||||
)
|
||||
|
||||
memberships =
|
||||
query
|
||||
|> Repo.all()
|
||||
read_repo()
|
||||
|> then(fn repo -> repo.all(query) end)
|
||||
|> Enum.map(fn membership ->
|
||||
to_membership_map(
|
||||
membership.group_id,
|
||||
@@ -163,8 +166,8 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Groups do
|
||||
)
|
||||
|
||||
roles =
|
||||
query
|
||||
|> Repo.all()
|
||||
read_repo()
|
||||
|> then(fn repo -> repo.all(query) end)
|
||||
|> Enum.map(fn role ->
|
||||
to_role_map(role.group_id, role.pubkey, role.role, role.metadata)
|
||||
end)
|
||||
@@ -242,6 +245,7 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Groups do
|
||||
|
||||
defp unwrap_transaction_result({:ok, result}), do: {:ok, result}
|
||||
defp unwrap_transaction_result({:error, reason}), do: {:error, reason}
|
||||
defp read_repo, do: PostgresRepos.read()
|
||||
|
||||
defp fetch_required_string(map, key) do
|
||||
map
|
||||
|
||||
@@ -5,6 +5,7 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Moderation do
|
||||
|
||||
import Ecto.Query
|
||||
|
||||
alias Parrhesia.PostgresRepos
|
||||
alias Parrhesia.Repo
|
||||
|
||||
@behaviour Parrhesia.Storage.Moderation
|
||||
@@ -67,6 +68,11 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Moderation do
|
||||
end
|
||||
end
|
||||
|
||||
@impl true
|
||||
def has_allowed_pubkeys?(_context) do
|
||||
{:ok, scope_populated?(:allowed_pubkeys)}
|
||||
end
|
||||
|
||||
@impl true
|
||||
def ban_event(_context, event_id) do
|
||||
with {:ok, normalized_event_id} <- normalize_hex_or_binary(event_id, 32, :invalid_event_id),
|
||||
@@ -163,6 +169,24 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Moderation do
|
||||
end
|
||||
end
|
||||
|
||||
defp scope_populated?(scope) do
|
||||
{table, field} = cache_scope_source!(scope)
|
||||
|
||||
if moderation_cache_enabled?() do
|
||||
case cache_table_ref() do
|
||||
:undefined ->
|
||||
scope_populated_db?(table, field)
|
||||
|
||||
cache_table ->
|
||||
ensure_cache_scope_loaded(scope, cache_table)
|
||||
|
||||
:ets.select_count(cache_table, [{{{:member, scope, :_}, true}, [], [true]}]) > 0
|
||||
end
|
||||
else
|
||||
scope_populated_db?(table, field)
|
||||
end
|
||||
end
|
||||
|
||||
defp ensure_cache_scope_loaded(scope, table) do
|
||||
loaded_key = cache_loaded_key(scope)
|
||||
|
||||
@@ -189,7 +213,8 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Moderation do
|
||||
select: field(record, ^field)
|
||||
)
|
||||
|
||||
Repo.all(query)
|
||||
read_repo()
|
||||
|> then(fn repo -> repo.all(query) end)
|
||||
end
|
||||
|
||||
defp cache_put(scope, value) do
|
||||
@@ -243,7 +268,22 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Moderation do
|
||||
limit: 1
|
||||
)
|
||||
|
||||
Repo.one(query) == 1
|
||||
read_repo()
|
||||
|> then(fn repo -> repo.one(query) end)
|
||||
|> Kernel.==(1)
|
||||
end
|
||||
|
||||
defp scope_populated_db?(table, field) do
|
||||
query =
|
||||
from(record in table,
|
||||
select: field(record, ^field),
|
||||
limit: 1
|
||||
)
|
||||
|
||||
read_repo()
|
||||
|> then(fn repo -> repo.one(query) end)
|
||||
|> is_nil()
|
||||
|> Kernel.not()
|
||||
end
|
||||
|
||||
defp normalize_hex_or_binary(value, expected_bytes, _reason)
|
||||
@@ -282,4 +322,6 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Moderation do
|
||||
|
||||
defp to_inet({_, _, _, _, _, _, _, _} = ip_tuple),
|
||||
do: %Postgrex.INET{address: ip_tuple, netmask: 128}
|
||||
|
||||
defp read_repo, do: PostgresRepos.read()
|
||||
end
|
||||
|
||||
@@ -1,51 +0,0 @@
|
||||
defmodule Parrhesia.Storage.Archiver do
|
||||
@moduledoc """
|
||||
Partition-aware archival helpers for Postgres event partitions.
|
||||
"""
|
||||
|
||||
import Ecto.Query
|
||||
|
||||
alias Parrhesia.Repo
|
||||
|
||||
@doc """
|
||||
Lists all `events_*` partitions excluding the default partition.
|
||||
"""
|
||||
@spec list_partitions() :: [String.t()]
|
||||
def list_partitions do
|
||||
query =
|
||||
from(table in "pg_tables",
|
||||
where: table.schemaname == "public",
|
||||
where: like(table.tablename, "events_%"),
|
||||
where: table.tablename != "events_default",
|
||||
select: table.tablename,
|
||||
order_by: [asc: table.tablename]
|
||||
)
|
||||
|
||||
Repo.all(query)
|
||||
end
|
||||
|
||||
@identifier_pattern ~r/^[a-zA-Z_][a-zA-Z0-9_]*$/
|
||||
|
||||
@doc """
|
||||
Generates an archive SQL statement for the given partition.
|
||||
"""
|
||||
@spec archive_sql(String.t(), String.t()) :: String.t()
|
||||
def archive_sql(partition_name, archive_table_name) do
|
||||
quoted_archive_table_name = quote_identifier!(archive_table_name)
|
||||
quoted_partition_name = quote_identifier!(partition_name)
|
||||
|
||||
"INSERT INTO #{quoted_archive_table_name} SELECT * FROM #{quoted_partition_name};"
|
||||
end
|
||||
|
||||
defp quote_identifier!(identifier) when is_binary(identifier) do
|
||||
if Regex.match?(@identifier_pattern, identifier) do
|
||||
~s("#{identifier}")
|
||||
else
|
||||
raise ArgumentError, "invalid SQL identifier: #{inspect(identifier)}"
|
||||
end
|
||||
end
|
||||
|
||||
defp quote_identifier!(identifier) do
|
||||
raise ArgumentError, "invalid SQL identifier: #{inspect(identifier)}"
|
||||
end
|
||||
end
|
||||
@@ -7,6 +7,7 @@ defmodule Parrhesia.Storage.Events do
|
||||
@type event_id :: binary()
|
||||
@type event :: map()
|
||||
@type filter :: map()
|
||||
@type event_ref :: %{created_at: non_neg_integer(), id: binary()}
|
||||
@type query_opts :: keyword()
|
||||
@type count_result :: non_neg_integer() | %{optional(atom()) => term()}
|
||||
@type reason :: term()
|
||||
@@ -14,6 +15,8 @@ defmodule Parrhesia.Storage.Events do
|
||||
@callback put_event(context(), event()) :: {:ok, event()} | {:error, reason()}
|
||||
@callback get_event(context(), event_id()) :: {:ok, event() | nil} | {:error, reason()}
|
||||
@callback query(context(), [filter()], query_opts()) :: {:ok, [event()]} | {:error, reason()}
|
||||
@callback query_event_refs(context(), [filter()], query_opts()) ::
|
||||
{:ok, [event_ref()]} | {:error, reason()}
|
||||
@callback count(context(), [filter()], query_opts()) ::
|
||||
{:ok, count_result()} | {:error, reason()}
|
||||
@callback delete_by_request(context(), event()) :: {:ok, non_neg_integer()} | {:error, reason()}
|
||||
|
||||
@@ -16,6 +16,7 @@ defmodule Parrhesia.Storage.Moderation do
|
||||
@callback allow_pubkey(context(), pubkey()) :: :ok | {:error, reason()}
|
||||
@callback disallow_pubkey(context(), pubkey()) :: :ok | {:error, reason()}
|
||||
@callback pubkey_allowed?(context(), pubkey()) :: {:ok, boolean()} | {:error, reason()}
|
||||
@callback has_allowed_pubkeys?(context()) :: {:ok, boolean()} | {:error, reason()}
|
||||
|
||||
@callback ban_event(context(), event_id()) :: :ok | {:error, reason()}
|
||||
@callback unban_event(context(), event_id()) :: :ok | {:error, reason()}
|
||||
|
||||
318
lib/parrhesia/storage/partitions.ex
Normal file
318
lib/parrhesia/storage/partitions.ex
Normal file
@@ -0,0 +1,318 @@
|
||||
defmodule Parrhesia.Storage.Partitions do
|
||||
@moduledoc """
|
||||
Partition lifecycle helpers for Postgres `events` and `event_tags` monthly partitions.
|
||||
"""
|
||||
|
||||
import Ecto.Query
|
||||
|
||||
alias Parrhesia.PostgresRepos
|
||||
alias Parrhesia.Repo
|
||||
|
||||
@identifier_pattern ~r/^[a-zA-Z_][a-zA-Z0-9_]*$/
|
||||
@monthly_partition_pattern ~r/^events_(\d{4})_(\d{2})$/
|
||||
@events_partition_prefix "events"
|
||||
@event_tags_partition_prefix "event_tags"
|
||||
@default_months_ahead 2
|
||||
|
||||
@type monthly_partition :: %{
|
||||
name: String.t(),
|
||||
year: pos_integer(),
|
||||
month: pos_integer(),
|
||||
month_start_unix: non_neg_integer(),
|
||||
month_end_unix: non_neg_integer()
|
||||
}
|
||||
|
||||
@doc """
|
||||
Lists all `events_*` partitions excluding the default partition.
|
||||
"""
|
||||
@spec list_partitions() :: [String.t()]
|
||||
def list_partitions do
|
||||
query =
|
||||
from(table in "pg_tables",
|
||||
where: table.schemaname == "public",
|
||||
where: like(table.tablename, "events_%"),
|
||||
where: table.tablename != "events_default",
|
||||
select: table.tablename,
|
||||
order_by: [asc: table.tablename]
|
||||
)
|
||||
|
||||
read_repo()
|
||||
|> then(fn repo -> repo.all(query) end)
|
||||
end
|
||||
|
||||
@doc """
|
||||
Lists monthly event partitions that match `events_YYYY_MM` naming.
|
||||
"""
|
||||
@spec list_monthly_partitions() :: [monthly_partition()]
|
||||
def list_monthly_partitions do
|
||||
list_partitions()
|
||||
|> Enum.map(&parse_monthly_partition/1)
|
||||
|> Enum.reject(&is_nil/1)
|
||||
|> Enum.sort_by(&{&1.year, &1.month})
|
||||
end
|
||||
|
||||
@doc """
|
||||
Ensures monthly partitions exist for the current month and `months_ahead` future months.
|
||||
"""
|
||||
@spec ensure_monthly_partitions(keyword()) :: :ok | {:error, term()}
|
||||
def ensure_monthly_partitions(opts \\ []) when is_list(opts) do
|
||||
months_ahead =
|
||||
opts
|
||||
|> Keyword.get(:months_ahead, @default_months_ahead)
|
||||
|> normalize_non_negative_integer(@default_months_ahead)
|
||||
|
||||
reference_date =
|
||||
opts
|
||||
|> Keyword.get(:reference_date, Date.utc_today())
|
||||
|> normalize_reference_date()
|
||||
|
||||
reference_month = month_start(reference_date)
|
||||
|
||||
offsets =
|
||||
if months_ahead == 0 do
|
||||
[0]
|
||||
else
|
||||
Enum.to_list(0..months_ahead)
|
||||
end
|
||||
|
||||
Enum.reduce_while(offsets, :ok, fn offset, :ok ->
|
||||
target_month = shift_month(reference_month, offset)
|
||||
|
||||
case create_monthly_partitions(target_month) do
|
||||
:ok -> {:cont, :ok}
|
||||
{:error, reason} -> {:halt, {:error, reason}}
|
||||
end
|
||||
end)
|
||||
end
|
||||
|
||||
@doc """
|
||||
Returns the current database size in bytes.
|
||||
"""
|
||||
@spec database_size_bytes() :: {:ok, non_neg_integer()} | {:error, term()}
|
||||
def database_size_bytes do
|
||||
repo = read_repo()
|
||||
|
||||
case repo.query("SELECT pg_database_size(current_database())") do
|
||||
{:ok, %{rows: [[size]]}} when is_integer(size) and size >= 0 -> {:ok, size}
|
||||
{:ok, _result} -> {:error, :unexpected_result}
|
||||
{:error, reason} -> {:error, reason}
|
||||
end
|
||||
end
|
||||
|
||||
@doc """
|
||||
Drops an event partition table by name.
|
||||
|
||||
For monthly `events_YYYY_MM` partitions, the matching `event_tags_YYYY_MM`
|
||||
partition is dropped first to keep partition lifecycle aligned.
|
||||
"""
|
||||
@spec drop_partition(String.t()) :: :ok | {:error, term()}
|
||||
def drop_partition(partition_name) when is_binary(partition_name) do
|
||||
if protected_partition?(partition_name) do
|
||||
{:error, :protected_partition}
|
||||
else
|
||||
drop_partition_tables(partition_name)
|
||||
end
|
||||
end
|
||||
|
||||
@doc """
|
||||
Returns the monthly `events` partition name for a date.
|
||||
"""
|
||||
@spec month_partition_name(Date.t()) :: String.t()
|
||||
def month_partition_name(%Date{} = date) do
|
||||
monthly_partition_name(@events_partition_prefix, date)
|
||||
end
|
||||
|
||||
@doc """
|
||||
Returns the monthly `event_tags` partition name for a date.
|
||||
"""
|
||||
@spec event_tags_month_partition_name(Date.t()) :: String.t()
|
||||
def event_tags_month_partition_name(%Date{} = date) do
|
||||
monthly_partition_name(@event_tags_partition_prefix, date)
|
||||
end
|
||||
|
||||
defp monthly_partition_name(prefix, %Date{} = date) do
|
||||
month_suffix = date.month |> Integer.to_string() |> String.pad_leading(2, "0")
|
||||
"#{prefix}_#{date.year}_#{month_suffix}"
|
||||
end
|
||||
|
||||
defp create_monthly_partitions(%Date{} = month_date) do
|
||||
{start_unix, end_unix} = month_bounds_unix(month_date.year, month_date.month)
|
||||
|
||||
case create_monthly_partition(
|
||||
month_partition_name(month_date),
|
||||
@events_partition_prefix,
|
||||
start_unix,
|
||||
end_unix
|
||||
) do
|
||||
:ok ->
|
||||
create_monthly_partition(
|
||||
event_tags_month_partition_name(month_date),
|
||||
@event_tags_partition_prefix,
|
||||
start_unix,
|
||||
end_unix
|
||||
)
|
||||
|
||||
{:error, reason} ->
|
||||
{:error, reason}
|
||||
end
|
||||
end
|
||||
|
||||
defp create_monthly_partition(partition_name, parent_table_name, start_unix, end_unix) do
|
||||
quoted_partition_name = quote_identifier!(partition_name)
|
||||
quoted_parent_table_name = quote_identifier!(parent_table_name)
|
||||
|
||||
sql =
|
||||
"""
|
||||
CREATE TABLE IF NOT EXISTS #{quoted_partition_name}
|
||||
PARTITION OF #{quoted_parent_table_name}
|
||||
FOR VALUES FROM (#{start_unix}) TO (#{end_unix})
|
||||
"""
|
||||
|
||||
case Repo.query(sql) do
|
||||
{:ok, _result} -> :ok
|
||||
{:error, reason} -> {:error, reason}
|
||||
end
|
||||
end
|
||||
|
||||
defp drop_partition_tables(partition_name) do
|
||||
case parse_monthly_partition(partition_name) do
|
||||
nil -> drop_table(partition_name)
|
||||
monthly_partition -> drop_monthly_partition(partition_name, monthly_partition)
|
||||
end
|
||||
end
|
||||
|
||||
defp drop_monthly_partition(partition_name, %{year: year, month: month}) do
|
||||
month_date = Date.new!(year, month, 1)
|
||||
tags_partition_name = monthly_partition_name(@event_tags_partition_prefix, month_date)
|
||||
|
||||
with :ok <- maybe_detach_events_partition(partition_name),
|
||||
:ok <- drop_table(tags_partition_name) do
|
||||
drop_table(partition_name)
|
||||
end
|
||||
end
|
||||
|
||||
defp maybe_detach_events_partition(partition_name) do
|
||||
if attached_partition?(partition_name, @events_partition_prefix) do
|
||||
quoted_parent_table_name = quote_identifier!(@events_partition_prefix)
|
||||
quoted_partition_name = quote_identifier!(partition_name)
|
||||
|
||||
case Repo.query(
|
||||
"ALTER TABLE #{quoted_parent_table_name} DETACH PARTITION #{quoted_partition_name}"
|
||||
) do
|
||||
{:ok, _result} -> :ok
|
||||
{:error, reason} -> {:error, reason}
|
||||
end
|
||||
else
|
||||
:ok
|
||||
end
|
||||
end
|
||||
|
||||
defp attached_partition?(partition_name, parent_table_name) do
|
||||
query =
|
||||
"""
|
||||
SELECT 1
|
||||
FROM pg_inherits AS inheritance
|
||||
JOIN pg_class AS child ON child.oid = inheritance.inhrelid
|
||||
JOIN pg_namespace AS child_ns ON child_ns.oid = child.relnamespace
|
||||
JOIN pg_class AS parent ON parent.oid = inheritance.inhparent
|
||||
JOIN pg_namespace AS parent_ns ON parent_ns.oid = parent.relnamespace
|
||||
WHERE child_ns.nspname = 'public'
|
||||
AND parent_ns.nspname = 'public'
|
||||
AND child.relname = $1
|
||||
AND parent.relname = $2
|
||||
LIMIT 1
|
||||
"""
|
||||
|
||||
repo = read_repo()
|
||||
|
||||
case repo.query(query, [partition_name, parent_table_name]) do
|
||||
{:ok, %{rows: [[1]]}} -> true
|
||||
{:ok, %{rows: []}} -> false
|
||||
{:ok, _result} -> false
|
||||
{:error, _reason} -> false
|
||||
end
|
||||
end
|
||||
|
||||
defp drop_table(table_name) do
|
||||
quoted_table_name = quote_identifier!(table_name)
|
||||
|
||||
case Repo.query("DROP TABLE IF EXISTS #{quoted_table_name}") do
|
||||
{:ok, _result} -> :ok
|
||||
{:error, reason} -> {:error, reason}
|
||||
end
|
||||
end
|
||||
|
||||
defp protected_partition?(partition_name) do
|
||||
partition_name in ["events", "events_default", "event_tags", "event_tags_default"]
|
||||
end
|
||||
|
||||
defp parse_monthly_partition(partition_name) do
|
||||
case Regex.run(@monthly_partition_pattern, partition_name, capture: :all_but_first) do
|
||||
[year_text, month_text] ->
|
||||
{year, ""} = Integer.parse(year_text)
|
||||
{month, ""} = Integer.parse(month_text)
|
||||
|
||||
if month in 1..12 do
|
||||
{month_start_unix, month_end_unix} = month_bounds_unix(year, month)
|
||||
|
||||
%{
|
||||
name: partition_name,
|
||||
year: year,
|
||||
month: month,
|
||||
month_start_unix: month_start_unix,
|
||||
month_end_unix: month_end_unix
|
||||
}
|
||||
else
|
||||
nil
|
||||
end
|
||||
|
||||
_other ->
|
||||
nil
|
||||
end
|
||||
end
|
||||
|
||||
defp month_bounds_unix(year, month) do
|
||||
month_date = Date.new!(year, month, 1)
|
||||
next_month_date = shift_month(month_date, 1)
|
||||
|
||||
{date_to_unix(month_date), date_to_unix(next_month_date)}
|
||||
end
|
||||
|
||||
defp date_to_unix(%Date{} = date) do
|
||||
date
|
||||
|> DateTime.new!(~T[00:00:00], "Etc/UTC")
|
||||
|> DateTime.to_unix()
|
||||
end
|
||||
|
||||
defp read_repo, do: PostgresRepos.read()
|
||||
|
||||
defp month_start(%Date{} = date), do: Date.new!(date.year, date.month, 1)
|
||||
|
||||
defp shift_month(%Date{} = date, month_delta) when is_integer(month_delta) do
|
||||
month_index = date.year * 12 + date.month - 1 + month_delta
|
||||
shifted_year = div(month_index, 12)
|
||||
shifted_month = rem(month_index, 12) + 1
|
||||
|
||||
Date.new!(shifted_year, shifted_month, 1)
|
||||
end
|
||||
|
||||
defp normalize_reference_date(%Date{} = date), do: date
|
||||
defp normalize_reference_date(_other), do: Date.utc_today()
|
||||
|
||||
defp normalize_non_negative_integer(value, _default) when is_integer(value) and value >= 0,
|
||||
do: value
|
||||
|
||||
defp normalize_non_negative_integer(_value, default), do: default
|
||||
|
||||
defp quote_identifier!(identifier) when is_binary(identifier) do
|
||||
if Regex.match?(@identifier_pattern, identifier) do
|
||||
~s("#{identifier}")
|
||||
else
|
||||
raise ArgumentError, "invalid SQL identifier: #{inspect(identifier)}"
|
||||
end
|
||||
end
|
||||
|
||||
defp quote_identifier!(identifier) do
|
||||
raise ArgumentError, "invalid SQL identifier: #{inspect(identifier)}"
|
||||
end
|
||||
end
|
||||
@@ -5,18 +5,28 @@ defmodule Parrhesia.Storage.Supervisor do
|
||||
|
||||
use Supervisor
|
||||
|
||||
alias Parrhesia.PostgresRepos
|
||||
|
||||
def start_link(init_arg \\ []) do
|
||||
Supervisor.start_link(__MODULE__, init_arg, name: __MODULE__)
|
||||
end
|
||||
|
||||
@impl true
|
||||
def init(_init_arg) do
|
||||
children = [
|
||||
{Parrhesia.Storage.Adapters.Postgres.ModerationCache,
|
||||
name: Parrhesia.Storage.Adapters.Postgres.ModerationCache},
|
||||
Parrhesia.Repo
|
||||
]
|
||||
children = moderation_cache_children() ++ PostgresRepos.started_repos()
|
||||
|
||||
Supervisor.init(children, strategy: :one_for_one)
|
||||
end
|
||||
|
||||
defp moderation_cache_children do
|
||||
if PostgresRepos.postgres_enabled?() and
|
||||
Application.get_env(:parrhesia, :moderation_cache_enabled, true) do
|
||||
[
|
||||
{Parrhesia.Storage.Adapters.Postgres.ModerationCache,
|
||||
name: Parrhesia.Storage.Adapters.Postgres.ModerationCache}
|
||||
]
|
||||
else
|
||||
[]
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
@@ -13,7 +13,10 @@ defmodule Parrhesia.Subscriptions.Supervisor do
|
||||
def init(_init_arg) do
|
||||
children =
|
||||
[
|
||||
{Parrhesia.Subscriptions.Index, name: Parrhesia.Subscriptions.Index}
|
||||
{Parrhesia.Subscriptions.Index, name: Parrhesia.Subscriptions.Index},
|
||||
{Parrhesia.Fanout.Dispatcher, name: Parrhesia.Fanout.Dispatcher},
|
||||
{Registry, keys: :unique, name: Parrhesia.API.Stream.Registry},
|
||||
{DynamicSupervisor, strategy: :one_for_one, name: Parrhesia.API.Stream.Supervisor}
|
||||
] ++
|
||||
negentropy_children() ++ [{Parrhesia.Fanout.MultiNode, name: Parrhesia.Fanout.MultiNode}]
|
||||
|
||||
|
||||
62
lib/parrhesia/sync/relay_info_client.ex
Normal file
62
lib/parrhesia/sync/relay_info_client.ex
Normal file
@@ -0,0 +1,62 @@
|
||||
defmodule Parrhesia.Sync.RelayInfoClient do
|
||||
@moduledoc false
|
||||
|
||||
alias Parrhesia.HTTP
|
||||
alias Parrhesia.Sync.TLS
|
||||
|
||||
@spec verify_remote_identity(map(), keyword()) :: :ok | {:error, term()}
|
||||
def verify_remote_identity(server, opts \\ []) do
|
||||
request_fun = Keyword.get(opts, :request_fun, &default_request/2)
|
||||
|
||||
with {:ok, response} <- request_fun.(relay_info_url(server.url), request_opts(server)),
|
||||
{:ok, pubkey} <- extract_pubkey(response) do
|
||||
if pubkey == server.auth_pubkey do
|
||||
:ok
|
||||
else
|
||||
{:error, :remote_identity_mismatch}
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
defp default_request(url, opts) do
|
||||
case HTTP.get(
|
||||
url: url,
|
||||
headers: [{"accept", "application/nostr+json"}],
|
||||
decode_body: false,
|
||||
connect_options: Keyword.merge([timeout: 5_000], opts),
|
||||
receive_timeout: 5_000
|
||||
) do
|
||||
{:ok, response} -> {:ok, response}
|
||||
{:error, reason} -> {:error, reason}
|
||||
end
|
||||
end
|
||||
|
||||
defp extract_pubkey(%Req.Response{status: 200, body: body}) when is_binary(body) do
|
||||
with {:ok, payload} <- JSON.decode(body),
|
||||
pubkey when is_binary(pubkey) and pubkey != "" <- Map.get(payload, "pubkey") do
|
||||
{:ok, String.downcase(pubkey)}
|
||||
else
|
||||
nil -> {:error, :missing_remote_identity}
|
||||
{:error, reason} -> {:error, reason}
|
||||
_other -> {:error, :missing_remote_identity}
|
||||
end
|
||||
end
|
||||
|
||||
defp extract_pubkey(%Req.Response{status: status}),
|
||||
do: {:error, {:relay_info_request_failed, status}}
|
||||
|
||||
defp extract_pubkey(_response), do: {:error, :invalid_relay_info}
|
||||
|
||||
defp request_opts(%{tls: %{mode: :disabled}}), do: []
|
||||
defp request_opts(%{tls: tls}), do: TLS.req_connect_options(tls)
|
||||
|
||||
defp relay_info_url(relay_url) do
|
||||
relay_url
|
||||
|> URI.parse()
|
||||
|> Map.update!(:scheme, fn
|
||||
"wss" -> "https"
|
||||
"ws" -> "http"
|
||||
end)
|
||||
|> URI.to_string()
|
||||
end
|
||||
end
|
||||
43
lib/parrhesia/sync/supervisor.ex
Normal file
43
lib/parrhesia/sync/supervisor.ex
Normal file
@@ -0,0 +1,43 @@
|
||||
defmodule Parrhesia.Sync.Supervisor do
|
||||
@moduledoc """
|
||||
Supervision entrypoint for sync control-plane processes.
|
||||
"""
|
||||
|
||||
use Supervisor
|
||||
|
||||
def start_link(init_arg \\ []) do
|
||||
name = Keyword.get(init_arg, :name, __MODULE__)
|
||||
Supervisor.start_link(__MODULE__, init_arg, name: name)
|
||||
end
|
||||
|
||||
@impl true
|
||||
def init(init_arg) do
|
||||
worker_registry = Keyword.get(init_arg, :worker_registry, Parrhesia.Sync.WorkerRegistry)
|
||||
worker_supervisor = Keyword.get(init_arg, :worker_supervisor, Parrhesia.Sync.WorkerSupervisor)
|
||||
manager_name = Keyword.get(init_arg, :manager, Parrhesia.API.Sync.Manager)
|
||||
|
||||
children = [
|
||||
{Registry, keys: :unique, name: worker_registry},
|
||||
{DynamicSupervisor, strategy: :one_for_one, name: worker_supervisor},
|
||||
{Parrhesia.API.Sync.Manager,
|
||||
manager_opts(init_arg, manager_name, worker_registry, worker_supervisor)}
|
||||
]
|
||||
|
||||
Supervisor.init(children, strategy: :one_for_one)
|
||||
end
|
||||
|
||||
defp manager_opts(init_arg, manager_name, worker_registry, worker_supervisor) do
|
||||
[
|
||||
name: manager_name,
|
||||
worker_registry: worker_registry,
|
||||
worker_supervisor: worker_supervisor
|
||||
] ++
|
||||
Keyword.take(init_arg, [
|
||||
:path,
|
||||
:start_workers?,
|
||||
:transport_module,
|
||||
:relay_info_opts,
|
||||
:transport_opts
|
||||
])
|
||||
end
|
||||
end
|
||||
112
lib/parrhesia/sync/tls.ex
Normal file
112
lib/parrhesia/sync/tls.ex
Normal file
@@ -0,0 +1,112 @@
|
||||
defmodule Parrhesia.Sync.TLS do
|
||||
@moduledoc false
|
||||
|
||||
@type tls_config :: %{
|
||||
mode: :required | :disabled,
|
||||
hostname: String.t(),
|
||||
pins: [%{type: :spki_sha256, value: String.t()}]
|
||||
}
|
||||
|
||||
@spec websocket_options(tls_config()) :: keyword()
|
||||
def websocket_options(%{mode: :disabled}), do: [insecure: true]
|
||||
|
||||
def websocket_options(%{mode: :required} = tls) do
|
||||
[
|
||||
ssl_options: transport_opts(tls)
|
||||
]
|
||||
end
|
||||
|
||||
@spec req_connect_options(tls_config()) :: keyword()
|
||||
def req_connect_options(%{mode: :disabled}), do: []
|
||||
|
||||
def req_connect_options(%{mode: :required} = tls) do
|
||||
[
|
||||
transport_opts: transport_opts(tls)
|
||||
]
|
||||
end
|
||||
|
||||
def transport_opts(%{hostname: hostname, pins: pins}) do
|
||||
[
|
||||
verify: :verify_peer,
|
||||
cacerts: system_cacerts(),
|
||||
server_name_indication: String.to_charlist(hostname),
|
||||
customize_hostname_check: [
|
||||
match_fun: :public_key.pkix_verify_hostname_match_fun(:https)
|
||||
]
|
||||
]
|
||||
|> maybe_put_verify_fun(pins)
|
||||
end
|
||||
|
||||
defp maybe_put_verify_fun(options, []), do: options
|
||||
|
||||
defp maybe_put_verify_fun(options, pins) do
|
||||
Keyword.put(
|
||||
options,
|
||||
:verify_fun,
|
||||
{&verify_certificate/3, %{pins: MapSet.new(Enum.map(pins, & &1.value)), matched?: false}}
|
||||
)
|
||||
end
|
||||
|
||||
defp verify_certificate(_cert, :valid_peer, %{matched?: true} = state), do: {:valid, state}
|
||||
defp verify_certificate(_cert, :valid_peer, _state), do: {:fail, :pin_mismatch}
|
||||
|
||||
defp verify_certificate(_cert, {:bad_cert, reason}, _state), do: {:fail, reason}
|
||||
|
||||
defp verify_certificate(cert, _event, state) when is_binary(cert) do
|
||||
matched? = MapSet.member?(state.pins, spki_pin_from_verify(cert))
|
||||
{:valid, %{state | matched?: state.matched? or matched?}}
|
||||
rescue
|
||||
_error -> {:fail, :invalid_certificate}
|
||||
end
|
||||
|
||||
defp verify_certificate({:OTPCertificate, _tbs, _sig_alg, _sig} = cert, _event, state) do
|
||||
matched? = MapSet.member?(state.pins, spki_pin_from_verify(cert))
|
||||
{:valid, %{state | matched?: state.matched? or matched?}}
|
||||
rescue
|
||||
_error -> {:fail, :invalid_certificate}
|
||||
end
|
||||
|
||||
defp verify_certificate({:Certificate, _tbs, _sig_alg, _sig} = cert, _event, state) do
|
||||
matched? = MapSet.member?(state.pins, spki_pin_from_verify(cert))
|
||||
{:valid, %{state | matched?: state.matched? or matched?}}
|
||||
rescue
|
||||
_error -> {:fail, :invalid_certificate}
|
||||
end
|
||||
|
||||
defp verify_certificate(_cert, _event, state), do: {:valid, state}
|
||||
|
||||
defp spki_pin(cert_der) do
|
||||
cert = :public_key.pkix_decode_cert(cert_der, :plain)
|
||||
spki = cert |> elem(1) |> elem(7)
|
||||
|
||||
:public_key.der_encode(:SubjectPublicKeyInfo, spki)
|
||||
|> then(&:crypto.hash(:sha256, &1))
|
||||
|> Base.encode64()
|
||||
end
|
||||
|
||||
defp spki_pin_from_verify(cert) when is_binary(cert), do: spki_pin(cert)
|
||||
|
||||
defp spki_pin_from_verify({:OTPCertificate, _tbs, _sig_alg, _sig} = cert) do
|
||||
cert
|
||||
|> then(&:public_key.pkix_encode(:OTPCertificate, &1, :otp))
|
||||
|> spki_pin()
|
||||
end
|
||||
|
||||
defp spki_pin_from_verify({:Certificate, _tbs, _sig_alg, _sig} = cert) do
|
||||
cert
|
||||
|> then(&:public_key.der_encode(:Certificate, &1))
|
||||
|> spki_pin()
|
||||
end
|
||||
|
||||
defp spki_pin_from_verify(_cert) do
|
||||
raise(ArgumentError, "invalid certificate")
|
||||
end
|
||||
|
||||
defp system_cacerts do
|
||||
if function_exported?(:public_key, :cacerts_get, 0) do
|
||||
:public_key.cacerts_get()
|
||||
else
|
||||
[]
|
||||
end
|
||||
end
|
||||
end
|
||||
7
lib/parrhesia/sync/transport.ex
Normal file
7
lib/parrhesia/sync/transport.ex
Normal file
@@ -0,0 +1,7 @@
|
||||
defmodule Parrhesia.Sync.Transport do
|
||||
@moduledoc false
|
||||
|
||||
@callback connect(pid(), map(), keyword()) :: {:ok, pid()} | {:error, term()}
|
||||
@callback send_json(pid(), term()) :: :ok | {:error, term()}
|
||||
@callback close(pid()) :: :ok
|
||||
end
|
||||
93
lib/parrhesia/sync/transport/websockex_client.ex
Normal file
93
lib/parrhesia/sync/transport/websockex_client.ex
Normal file
@@ -0,0 +1,93 @@
|
||||
defmodule Parrhesia.Sync.Transport.WebSockexClient do
|
||||
@moduledoc false
|
||||
|
||||
use WebSockex
|
||||
|
||||
alias Parrhesia.Sync.TLS
|
||||
|
||||
@behaviour Parrhesia.Sync.Transport
|
||||
|
||||
@impl true
|
||||
def connect(owner, server, opts \\ []) do
|
||||
state = %{
|
||||
owner: owner,
|
||||
server: server
|
||||
}
|
||||
|
||||
transport_opts =
|
||||
server.tls
|
||||
|> TLS.websocket_options()
|
||||
|> merge_websocket_opts(Keyword.get(opts, :websocket_opts, []))
|
||||
|> Keyword.put(:handle_initial_conn_failure, true)
|
||||
|
||||
WebSockex.start(server.url, __MODULE__, state, transport_opts)
|
||||
end
|
||||
|
||||
@impl true
|
||||
def send_json(pid, payload) do
|
||||
WebSockex.cast(pid, {:send_json, payload})
|
||||
end
|
||||
|
||||
@impl true
|
||||
def close(pid) do
|
||||
WebSockex.cast(pid, :close)
|
||||
:ok
|
||||
end
|
||||
|
||||
@impl true
|
||||
def handle_connect(conn, state) do
|
||||
send(state.owner, {:sync_transport, self(), :connected, %{resp_headers: conn.resp_headers}})
|
||||
{:ok, state}
|
||||
end
|
||||
|
||||
@impl true
|
||||
def handle_frame({:text, payload}, state) do
|
||||
message =
|
||||
case JSON.decode(payload) do
|
||||
{:ok, frame} -> frame
|
||||
{:error, reason} -> {:decode_error, reason, payload}
|
||||
end
|
||||
|
||||
send(state.owner, {:sync_transport, self(), :frame, message})
|
||||
{:ok, state}
|
||||
end
|
||||
|
||||
def handle_frame(frame, state) do
|
||||
send(state.owner, {:sync_transport, self(), :frame, frame})
|
||||
{:ok, state}
|
||||
end
|
||||
|
||||
@impl true
|
||||
def handle_cast({:send_json, payload}, state) do
|
||||
{:reply, {:text, JSON.encode!(payload)}, state}
|
||||
end
|
||||
|
||||
def handle_cast(:close, state) do
|
||||
{:close, state}
|
||||
end
|
||||
|
||||
@impl true
|
||||
def handle_disconnect(status, state) do
|
||||
send(state.owner, {:sync_transport, self(), :disconnected, status})
|
||||
{:ok, state}
|
||||
end
|
||||
|
||||
defp merge_websocket_opts(base_opts, override_opts) do
|
||||
override_ssl_options = Keyword.get(override_opts, :ssl_options)
|
||||
|
||||
merged_ssl_options =
|
||||
case {Keyword.get(base_opts, :ssl_options), override_ssl_options} do
|
||||
{nil, nil} -> nil
|
||||
{base_ssl, nil} -> base_ssl
|
||||
{nil, override_ssl} -> override_ssl
|
||||
{base_ssl, override_ssl} -> Keyword.merge(base_ssl, override_ssl)
|
||||
end
|
||||
|
||||
base_opts
|
||||
|> Keyword.merge(Keyword.delete(override_opts, :ssl_options))
|
||||
|> maybe_put_ssl_options(merged_ssl_options)
|
||||
end
|
||||
|
||||
defp maybe_put_ssl_options(opts, nil), do: opts
|
||||
defp maybe_put_ssl_options(opts, ssl_options), do: Keyword.put(opts, :ssl_options, ssl_options)
|
||||
end
|
||||
367
lib/parrhesia/sync/worker.ex
Normal file
367
lib/parrhesia/sync/worker.ex
Normal file
@@ -0,0 +1,367 @@
|
||||
defmodule Parrhesia.Sync.Worker do
|
||||
@moduledoc false
|
||||
|
||||
use GenServer
|
||||
|
||||
alias Parrhesia.API.Events
|
||||
alias Parrhesia.API.Identity
|
||||
alias Parrhesia.API.RequestContext
|
||||
alias Parrhesia.API.Sync.Manager
|
||||
alias Parrhesia.Sync.RelayInfoClient
|
||||
alias Parrhesia.Sync.Transport.WebSockexClient
|
||||
|
||||
@initial_backoff_ms 1_000
|
||||
@max_backoff_ms 30_000
|
||||
@auth_kind 22_242
|
||||
|
||||
defstruct server: nil,
|
||||
manager: nil,
|
||||
transport_module: WebSockexClient,
|
||||
transport_pid: nil,
|
||||
phase: :idle,
|
||||
current_subscription_id: nil,
|
||||
backoff_ms: @initial_backoff_ms,
|
||||
authenticated?: false,
|
||||
auth_event_id: nil,
|
||||
resubscribe_after_auth?: false,
|
||||
cursor_created_at: nil,
|
||||
cursor_event_id: nil,
|
||||
relay_info_opts: [],
|
||||
transport_opts: []
|
||||
|
||||
@type t :: %__MODULE__{}
|
||||
|
||||
def child_spec(opts) do
|
||||
server = Keyword.fetch!(opts, :server)
|
||||
|
||||
%{
|
||||
id: {:sync_worker, server.id},
|
||||
start: {__MODULE__, :start_link, [opts]},
|
||||
restart: :transient
|
||||
}
|
||||
end
|
||||
|
||||
def start_link(opts) do
|
||||
name = Keyword.get(opts, :name)
|
||||
GenServer.start_link(__MODULE__, opts, name: name)
|
||||
end
|
||||
|
||||
def sync_now(worker), do: GenServer.cast(worker, :sync_now)
|
||||
def stop(worker), do: GenServer.stop(worker, :normal)
|
||||
|
||||
@impl true
|
||||
def init(opts) do
|
||||
server = Keyword.fetch!(opts, :server)
|
||||
runtime = Keyword.get(opts, :runtime, %{})
|
||||
|
||||
state = %__MODULE__{
|
||||
server: server,
|
||||
manager: Keyword.fetch!(opts, :manager),
|
||||
transport_module: Keyword.get(opts, :transport_module, WebSockexClient),
|
||||
cursor_created_at: Map.get(runtime, :cursor_created_at),
|
||||
cursor_event_id: Map.get(runtime, :cursor_event_id),
|
||||
relay_info_opts: Keyword.get(opts, :relay_info_opts, []),
|
||||
transport_opts: Keyword.get(opts, :transport_opts, [])
|
||||
}
|
||||
|
||||
send(self(), :connect)
|
||||
{:ok, state}
|
||||
end
|
||||
|
||||
@impl true
|
||||
def handle_cast(:sync_now, state) do
|
||||
Manager.runtime_event(state.manager, state.server.id, :subscription_restart)
|
||||
|
||||
next_state =
|
||||
state
|
||||
|> close_subscription()
|
||||
|> issue_subscription()
|
||||
|
||||
{:noreply, next_state}
|
||||
end
|
||||
|
||||
@impl true
|
||||
def handle_info(:connect, %__MODULE__{transport_pid: nil} = state) do
|
||||
case RelayInfoClient.verify_remote_identity(state.server, state.relay_info_opts) do
|
||||
:ok ->
|
||||
connect_transport(state)
|
||||
|
||||
{:error, reason} ->
|
||||
Manager.runtime_event(state.manager, state.server.id, :disconnected, %{reason: reason})
|
||||
{:noreply, schedule_reconnect(state)}
|
||||
end
|
||||
end
|
||||
|
||||
def handle_info(:connect, state), do: {:noreply, state}
|
||||
|
||||
def handle_info({:sync_transport, transport_pid, :connected, _info}, state) do
|
||||
Manager.runtime_event(state.manager, state.server.id, :connected, %{})
|
||||
|
||||
next_state =
|
||||
state
|
||||
|> Map.put(:transport_pid, transport_pid)
|
||||
|> Map.put(:backoff_ms, @initial_backoff_ms)
|
||||
|> Map.put(:authenticated?, false)
|
||||
|> Map.put(:auth_event_id, nil)
|
||||
|> Map.put(:resubscribe_after_auth?, false)
|
||||
|> issue_subscription()
|
||||
|
||||
{:noreply, next_state}
|
||||
end
|
||||
|
||||
def handle_info({:sync_transport, _transport_pid, :frame, frame}, state) do
|
||||
{:noreply, handle_transport_frame(state, frame)}
|
||||
end
|
||||
|
||||
def handle_info({:sync_transport, _transport_pid, :disconnected, status}, state) do
|
||||
Manager.runtime_event(state.manager, state.server.id, :disconnected, %{reason: status.reason})
|
||||
|
||||
next_state =
|
||||
state
|
||||
|> Map.put(:transport_pid, nil)
|
||||
|> Map.put(:phase, :idle)
|
||||
|> Map.put(:authenticated?, false)
|
||||
|> Map.put(:auth_event_id, nil)
|
||||
|> Map.put(:resubscribe_after_auth?, false)
|
||||
|> Map.put(:current_subscription_id, nil)
|
||||
|> schedule_reconnect()
|
||||
|
||||
{:noreply, next_state}
|
||||
end
|
||||
|
||||
def handle_info(_message, state), do: {:noreply, state}
|
||||
|
||||
defp connect_transport(state) do
|
||||
case state.transport_module.connect(self(), state.server, state.transport_opts) do
|
||||
{:ok, transport_pid} ->
|
||||
{:noreply, %{state | transport_pid: transport_pid, phase: :connecting}}
|
||||
|
||||
{:error, reason} ->
|
||||
Manager.runtime_event(state.manager, state.server.id, :disconnected, %{reason: reason})
|
||||
{:noreply, schedule_reconnect(state)}
|
||||
end
|
||||
end
|
||||
|
||||
defp handle_transport_frame(state, ["AUTH", challenge]) when is_binary(challenge) do
|
||||
case send_auth_event(state, challenge) do
|
||||
{:ok, auth_event_id} ->
|
||||
%{state | auth_event_id: auth_event_id, phase: :authenticating}
|
||||
|
||||
{:error, reason} ->
|
||||
Manager.runtime_event(state.manager, state.server.id, :error, %{reason: reason})
|
||||
state
|
||||
end
|
||||
end
|
||||
|
||||
defp handle_transport_frame(state, ["OK", event_id, true, _message])
|
||||
when event_id == state.auth_event_id do
|
||||
next_state = %{state | authenticated?: true, auth_event_id: nil}
|
||||
|
||||
if next_state.resubscribe_after_auth? do
|
||||
next_state
|
||||
|> Map.put(:resubscribe_after_auth?, false)
|
||||
|> issue_subscription()
|
||||
else
|
||||
next_state
|
||||
end
|
||||
end
|
||||
|
||||
defp handle_transport_frame(state, ["OK", event_id, false, message])
|
||||
when event_id == state.auth_event_id do
|
||||
Manager.runtime_event(state.manager, state.server.id, :error, %{reason: message})
|
||||
schedule_reconnect(%{state | auth_event_id: nil, authenticated?: false})
|
||||
end
|
||||
|
||||
defp handle_transport_frame(state, ["EVENT", subscription_id, event])
|
||||
when subscription_id == state.current_subscription_id and is_map(event) do
|
||||
handle_remote_event(state, event)
|
||||
end
|
||||
|
||||
defp handle_transport_frame(state, ["EOSE", subscription_id])
|
||||
when subscription_id == state.current_subscription_id do
|
||||
Manager.runtime_event(state.manager, state.server.id, :sync_completed, %{})
|
||||
%{state | phase: :streaming}
|
||||
end
|
||||
|
||||
defp handle_transport_frame(state, ["CLOSED", subscription_id, message])
|
||||
when subscription_id == state.current_subscription_id do
|
||||
auth_required? = is_binary(message) and String.contains?(String.downcase(message), "auth")
|
||||
|
||||
next_state =
|
||||
state
|
||||
|> Map.put(:current_subscription_id, nil)
|
||||
|> Map.put(:phase, :idle)
|
||||
|
||||
if auth_required? and not state.authenticated? do
|
||||
%{next_state | resubscribe_after_auth?: true}
|
||||
else
|
||||
Manager.runtime_event(state.manager, state.server.id, :error, %{reason: message})
|
||||
schedule_reconnect(next_state)
|
||||
end
|
||||
end
|
||||
|
||||
defp handle_transport_frame(state, {:decode_error, reason, _payload}) do
|
||||
Manager.runtime_event(state.manager, state.server.id, :error, %{reason: reason})
|
||||
state
|
||||
end
|
||||
|
||||
defp handle_transport_frame(state, _frame), do: state
|
||||
|
||||
defp issue_subscription(%__MODULE__{transport_pid: nil} = state), do: state
|
||||
|
||||
defp issue_subscription(state) do
|
||||
subscription_id = subscription_id(state.server.id)
|
||||
filters = sync_filters(state)
|
||||
|
||||
:ok =
|
||||
state.transport_module.send_json(state.transport_pid, ["REQ", subscription_id | filters])
|
||||
|
||||
Manager.runtime_event(state.manager, state.server.id, :sync_started, %{})
|
||||
|
||||
%{
|
||||
state
|
||||
| current_subscription_id: subscription_id,
|
||||
phase: :catchup
|
||||
}
|
||||
end
|
||||
|
||||
defp close_subscription(%__MODULE__{transport_pid: nil} = state), do: state
|
||||
defp close_subscription(%__MODULE__{current_subscription_id: nil} = state), do: state
|
||||
|
||||
defp close_subscription(state) do
|
||||
:ok =
|
||||
state.transport_module.send_json(state.transport_pid, [
|
||||
"CLOSE",
|
||||
state.current_subscription_id
|
||||
])
|
||||
|
||||
%{state | current_subscription_id: nil}
|
||||
end
|
||||
|
||||
defp send_auth_event(state, challenge) do
|
||||
event = %{
|
||||
"created_at" => System.system_time(:second),
|
||||
"kind" => @auth_kind,
|
||||
"tags" => [["challenge", challenge], ["relay", state.server.url]],
|
||||
"content" => ""
|
||||
}
|
||||
|
||||
with {:ok, signed_event} <- Identity.sign_event(event) do
|
||||
:ok = state.transport_module.send_json(state.transport_pid, ["AUTH", signed_event])
|
||||
{:ok, signed_event["id"]}
|
||||
end
|
||||
end
|
||||
|
||||
defp handle_remote_event(state, event) do
|
||||
context = request_context(state)
|
||||
|
||||
case Events.publish(event, context: context) do
|
||||
{:ok, %{accepted: true}} ->
|
||||
Manager.runtime_event(state.manager, state.server.id, :event_result, %{
|
||||
result: :accepted,
|
||||
event: event
|
||||
})
|
||||
|
||||
advance_cursor(state, event)
|
||||
|
||||
{:ok, %{accepted: false, reason: :duplicate_event}} ->
|
||||
Manager.runtime_event(state.manager, state.server.id, :event_result, %{
|
||||
result: :duplicate,
|
||||
event: event
|
||||
})
|
||||
|
||||
advance_cursor(state, event)
|
||||
|
||||
{:ok, %{accepted: false, reason: reason}} ->
|
||||
Manager.runtime_event(state.manager, state.server.id, :event_result, %{
|
||||
result: :rejected,
|
||||
event: event,
|
||||
reason: reason
|
||||
})
|
||||
|
||||
state
|
||||
|
||||
{:error, reason} ->
|
||||
Manager.runtime_event(state.manager, state.server.id, :event_result, %{
|
||||
result: :rejected,
|
||||
event: event,
|
||||
reason: reason
|
||||
})
|
||||
|
||||
state
|
||||
end
|
||||
end
|
||||
|
||||
defp request_context(state) do
|
||||
%RequestContext{
|
||||
authenticated_pubkeys: MapSet.new([state.server.auth_pubkey]),
|
||||
caller: :sync,
|
||||
subscription_id: state.current_subscription_id,
|
||||
peer_id: state.server.id,
|
||||
metadata: %{
|
||||
sync_server_id: state.server.id,
|
||||
remote_url: state.server.url
|
||||
}
|
||||
}
|
||||
end
|
||||
|
||||
defp advance_cursor(state, event) do
|
||||
created_at = Map.get(event, "created_at")
|
||||
event_id = Map.get(event, "id")
|
||||
|
||||
if newer_cursor?(state.cursor_created_at, state.cursor_event_id, created_at, event_id) do
|
||||
Manager.runtime_event(state.manager, state.server.id, :cursor_advanced, %{
|
||||
created_at: created_at,
|
||||
event_id: event_id
|
||||
})
|
||||
|
||||
%{state | cursor_created_at: created_at, cursor_event_id: event_id}
|
||||
else
|
||||
state
|
||||
end
|
||||
end
|
||||
|
||||
defp newer_cursor?(nil, _cursor_event_id, created_at, event_id),
|
||||
do: is_integer(created_at) and is_binary(event_id)
|
||||
|
||||
defp newer_cursor?(cursor_created_at, cursor_event_id, created_at, event_id) do
|
||||
cond do
|
||||
not is_integer(created_at) or not is_binary(event_id) ->
|
||||
false
|
||||
|
||||
created_at > cursor_created_at ->
|
||||
true
|
||||
|
||||
created_at == cursor_created_at and is_binary(cursor_event_id) and
|
||||
event_id > cursor_event_id ->
|
||||
true
|
||||
|
||||
true ->
|
||||
false
|
||||
end
|
||||
end
|
||||
|
||||
defp sync_filters(state) do
|
||||
Enum.map(state.server.filters, fn filter ->
|
||||
case since_value(state, filter) do
|
||||
nil -> filter
|
||||
since -> Map.put(filter, "since", since)
|
||||
end
|
||||
end)
|
||||
end
|
||||
|
||||
defp since_value(%__MODULE__{cursor_created_at: nil}, _filter), do: nil
|
||||
|
||||
defp since_value(state, _filter) do
|
||||
max(state.cursor_created_at - state.server.overlap_window_seconds, 0)
|
||||
end
|
||||
|
||||
defp schedule_reconnect(state) do
|
||||
Process.send_after(self(), :connect, state.backoff_ms)
|
||||
%{state | backoff_ms: min(state.backoff_ms * 2, @max_backoff_ms)}
|
||||
end
|
||||
|
||||
defp subscription_id(server_id) do
|
||||
"sync-#{server_id}-#{System.unique_integer([:positive, :monotonic])}"
|
||||
end
|
||||
end
|
||||
@@ -30,10 +30,19 @@ defmodule Parrhesia.Tasks.ExpirationWorker do
|
||||
def handle_info(:tick, state) do
|
||||
started_at = System.monotonic_time()
|
||||
|
||||
_result = Storage.events().purge_expired([])
|
||||
purged_events =
|
||||
case Storage.events().purge_expired([]) do
|
||||
{:ok, count} when is_integer(count) and count >= 0 -> count
|
||||
_other -> 0
|
||||
end
|
||||
|
||||
duration = System.monotonic_time() - started_at
|
||||
Telemetry.emit([:parrhesia, :maintenance, :purge_expired, :stop], %{duration: duration}, %{})
|
||||
|
||||
Telemetry.emit(
|
||||
[:parrhesia, :maintenance, :purge_expired, :stop],
|
||||
%{duration: duration, purged_events: purged_events},
|
||||
%{}
|
||||
)
|
||||
|
||||
schedule_tick(state.interval_ms)
|
||||
{:noreply, state}
|
||||
|
||||
40
lib/parrhesia/tasks/nip66_publisher.ex
Normal file
40
lib/parrhesia/tasks/nip66_publisher.ex
Normal file
@@ -0,0 +1,40 @@
|
||||
defmodule Parrhesia.Tasks.Nip66Publisher do
|
||||
@moduledoc """
|
||||
Periodic worker that publishes NIP-66 monitor and discovery events.
|
||||
"""
|
||||
|
||||
use GenServer
|
||||
|
||||
alias Parrhesia.NIP66
|
||||
|
||||
@spec start_link(keyword()) :: GenServer.on_start()
|
||||
def start_link(opts \\ []) do
|
||||
name = Keyword.get(opts, :name, __MODULE__)
|
||||
GenServer.start_link(__MODULE__, opts, name: name)
|
||||
end
|
||||
|
||||
@impl true
|
||||
def init(opts) do
|
||||
state = %{
|
||||
interval_ms: Keyword.get(opts, :interval_ms, NIP66.publish_interval_ms()),
|
||||
publish_opts: Keyword.drop(opts, [:name, :interval_ms, :nip66_module]),
|
||||
nip66_module: Keyword.get(opts, :nip66_module, NIP66)
|
||||
}
|
||||
|
||||
schedule_tick(0)
|
||||
{:ok, state}
|
||||
end
|
||||
|
||||
@impl true
|
||||
def handle_info(:tick, state) do
|
||||
_result = state.nip66_module.publish_snapshot(state.publish_opts)
|
||||
schedule_tick(state.interval_ms)
|
||||
{:noreply, state}
|
||||
end
|
||||
|
||||
def handle_info(_message, state), do: {:noreply, state}
|
||||
|
||||
defp schedule_tick(interval_ms) do
|
||||
Process.send_after(self(), :tick, interval_ms)
|
||||
end
|
||||
end
|
||||
280
lib/parrhesia/tasks/partition_retention_worker.ex
Normal file
280
lib/parrhesia/tasks/partition_retention_worker.ex
Normal file
@@ -0,0 +1,280 @@
|
||||
defmodule Parrhesia.Tasks.PartitionRetentionWorker do
|
||||
@moduledoc """
|
||||
Periodic worker that ensures monthly event partitions and applies retention pruning.
|
||||
"""
|
||||
|
||||
use GenServer
|
||||
|
||||
alias Parrhesia.Storage.Partitions
|
||||
alias Parrhesia.Telemetry
|
||||
|
||||
@default_check_interval_hours 24
|
||||
@default_months_ahead 2
|
||||
@default_max_partitions_to_drop_per_run 1
|
||||
@bytes_per_gib 1_073_741_824
|
||||
|
||||
@type monthly_partition :: Partitions.monthly_partition()
|
||||
|
||||
@spec start_link(keyword()) :: GenServer.on_start()
|
||||
def start_link(opts \\ []) do
|
||||
name = Keyword.get(opts, :name, __MODULE__)
|
||||
GenServer.start_link(__MODULE__, opts, name: name)
|
||||
end
|
||||
|
||||
@impl true
|
||||
def init(opts) do
|
||||
retention_config = Application.get_env(:parrhesia, :retention, [])
|
||||
|
||||
state = %{
|
||||
partition_ops: Keyword.get(opts, :partition_ops, Partitions),
|
||||
interval_ms: interval_ms(opts, retention_config),
|
||||
months_ahead: months_ahead(opts, retention_config),
|
||||
max_db_gib: max_db_gib(opts, retention_config),
|
||||
max_months_to_keep: max_months_to_keep(opts, retention_config),
|
||||
max_partitions_to_drop_per_run: max_partitions_to_drop_per_run(opts, retention_config),
|
||||
today_fun: today_fun(opts)
|
||||
}
|
||||
|
||||
schedule_tick(0)
|
||||
{:ok, state}
|
||||
end
|
||||
|
||||
@impl true
|
||||
def handle_info(:tick, state) do
|
||||
started_at = System.monotonic_time()
|
||||
|
||||
{dropped_count, status} =
|
||||
case run_maintenance(state) do
|
||||
{:ok, count} -> {count, :ok}
|
||||
{:error, _reason} -> {0, :error}
|
||||
end
|
||||
|
||||
Telemetry.emit(
|
||||
[:parrhesia, :maintenance, :partition_retention, :stop],
|
||||
%{
|
||||
duration: System.monotonic_time() - started_at,
|
||||
dropped_partitions: dropped_count
|
||||
},
|
||||
%{status: status}
|
||||
)
|
||||
|
||||
schedule_tick(state.interval_ms)
|
||||
{:noreply, state}
|
||||
end
|
||||
|
||||
def handle_info(_message, state), do: {:noreply, state}
|
||||
|
||||
defp run_maintenance(state) do
|
||||
case state.partition_ops.ensure_monthly_partitions(months_ahead: state.months_ahead) do
|
||||
:ok -> maybe_drop_oldest_partitions(state)
|
||||
{:error, reason} -> {:error, reason}
|
||||
end
|
||||
end
|
||||
|
||||
defp maybe_drop_oldest_partitions(%{max_partitions_to_drop_per_run: max_drops})
|
||||
when max_drops <= 0,
|
||||
do: {:ok, 0}
|
||||
|
||||
defp maybe_drop_oldest_partitions(state) do
|
||||
1..state.max_partitions_to_drop_per_run
|
||||
|> Enum.reduce_while({:ok, 0}, fn _attempt, {:ok, dropped_count} ->
|
||||
drop_oldest_partition_once(state, dropped_count)
|
||||
end)
|
||||
end
|
||||
|
||||
defp drop_oldest_partition_once(state, dropped_count) do
|
||||
case next_partition_to_drop(state) do
|
||||
{:ok, partition} -> apply_partition_drop(state, partition, dropped_count)
|
||||
{:error, reason} -> {:halt, {:error, reason}}
|
||||
end
|
||||
end
|
||||
|
||||
defp apply_partition_drop(_state, nil, dropped_count), do: {:halt, {:ok, dropped_count}}
|
||||
|
||||
defp apply_partition_drop(state, partition, dropped_count) do
|
||||
case state.partition_ops.drop_partition(partition.name) do
|
||||
:ok -> {:cont, {:ok, dropped_count + 1}}
|
||||
{:error, reason} -> {:halt, {:error, reason}}
|
||||
end
|
||||
end
|
||||
|
||||
defp next_partition_to_drop(state) do
|
||||
partitions = state.partition_ops.list_monthly_partitions()
|
||||
current_month_index = current_month_index(state.today_fun)
|
||||
|
||||
month_limit_candidate =
|
||||
oldest_partition_exceeding_month_limit(
|
||||
partitions,
|
||||
state.max_months_to_keep,
|
||||
current_month_index
|
||||
)
|
||||
|
||||
with {:ok, size_limit_candidate} <-
|
||||
oldest_partition_exceeding_size_limit(
|
||||
partitions,
|
||||
state.max_db_gib,
|
||||
current_month_index,
|
||||
state.partition_ops
|
||||
) do
|
||||
{:ok, pick_oldest_partition(month_limit_candidate, size_limit_candidate)}
|
||||
end
|
||||
end
|
||||
|
||||
defp oldest_partition_exceeding_month_limit(_partitions, :infinity, _current_month_index),
|
||||
do: nil
|
||||
|
||||
defp oldest_partition_exceeding_month_limit(partitions, max_months_to_keep, current_month_index)
|
||||
when is_integer(max_months_to_keep) and max_months_to_keep > 0 do
|
||||
oldest_month_to_keep_index = current_month_index - (max_months_to_keep - 1)
|
||||
|
||||
partitions
|
||||
|> Enum.filter(fn partition ->
|
||||
month_index(partition) < current_month_index and
|
||||
month_index(partition) < oldest_month_to_keep_index
|
||||
end)
|
||||
|> Enum.min_by(&month_index/1, fn -> nil end)
|
||||
end
|
||||
|
||||
defp oldest_partition_exceeding_month_limit(
|
||||
_partitions,
|
||||
_max_months_to_keep,
|
||||
_current_month_index
|
||||
),
|
||||
do: nil
|
||||
|
||||
defp oldest_partition_exceeding_size_limit(
|
||||
_partitions,
|
||||
:infinity,
|
||||
_current_month_index,
|
||||
_archiver
|
||||
),
|
||||
do: {:ok, nil}
|
||||
|
||||
defp oldest_partition_exceeding_size_limit(
|
||||
partitions,
|
||||
max_db_gib,
|
||||
current_month_index,
|
||||
archiver
|
||||
)
|
||||
when is_integer(max_db_gib) and max_db_gib > 0 do
|
||||
with {:ok, current_size_bytes} <- archiver.database_size_bytes() do
|
||||
max_size_bytes = max_db_gib * @bytes_per_gib
|
||||
|
||||
if current_size_bytes > max_size_bytes do
|
||||
{:ok, oldest_completed_partition(partitions, current_month_index)}
|
||||
else
|
||||
{:ok, nil}
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
defp oldest_partition_exceeding_size_limit(
|
||||
_partitions,
|
||||
_max_db_gib,
|
||||
_current_month_index,
|
||||
_archiver
|
||||
),
|
||||
do: {:ok, nil}
|
||||
|
||||
defp oldest_completed_partition(partitions, current_month_index) do
|
||||
partitions
|
||||
|> Enum.filter(&(month_index(&1) < current_month_index))
|
||||
|> Enum.min_by(&month_index/1, fn -> nil end)
|
||||
end
|
||||
|
||||
defp pick_oldest_partition(nil, nil), do: nil
|
||||
defp pick_oldest_partition(partition, nil), do: partition
|
||||
defp pick_oldest_partition(nil, partition), do: partition
|
||||
|
||||
defp pick_oldest_partition(left, right) do
|
||||
if month_index(left) <= month_index(right) do
|
||||
left
|
||||
else
|
||||
right
|
||||
end
|
||||
end
|
||||
|
||||
defp month_index(%{year: year, month: month}) when is_integer(year) and is_integer(month) do
|
||||
year * 12 + month
|
||||
end
|
||||
|
||||
defp current_month_index(today_fun) do
|
||||
today = today_fun.()
|
||||
today.year * 12 + today.month
|
||||
end
|
||||
|
||||
defp interval_ms(opts, retention_config) do
|
||||
case Keyword.get(opts, :interval_ms) do
|
||||
value when is_integer(value) and value > 0 ->
|
||||
value
|
||||
|
||||
_other ->
|
||||
retention_config
|
||||
|> Keyword.get(:check_interval_hours, @default_check_interval_hours)
|
||||
|> normalize_positive_integer(@default_check_interval_hours)
|
||||
|> hours_to_ms()
|
||||
end
|
||||
end
|
||||
|
||||
defp months_ahead(opts, retention_config) do
|
||||
opts
|
||||
|> Keyword.get(
|
||||
:months_ahead,
|
||||
Keyword.get(retention_config, :months_ahead, @default_months_ahead)
|
||||
)
|
||||
|> normalize_non_negative_integer(@default_months_ahead)
|
||||
end
|
||||
|
||||
defp max_db_gib(opts, retention_config) do
|
||||
opts
|
||||
|> Keyword.get(:max_db_bytes, Keyword.get(retention_config, :max_db_bytes, :infinity))
|
||||
|> normalize_limit()
|
||||
end
|
||||
|
||||
defp max_months_to_keep(opts, retention_config) do
|
||||
opts
|
||||
|> Keyword.get(
|
||||
:max_months_to_keep,
|
||||
Keyword.get(retention_config, :max_months_to_keep, :infinity)
|
||||
)
|
||||
|> normalize_limit()
|
||||
end
|
||||
|
||||
defp max_partitions_to_drop_per_run(opts, retention_config) do
|
||||
opts
|
||||
|> Keyword.get(
|
||||
:max_partitions_to_drop_per_run,
|
||||
Keyword.get(
|
||||
retention_config,
|
||||
:max_partitions_to_drop_per_run,
|
||||
@default_max_partitions_to_drop_per_run
|
||||
)
|
||||
)
|
||||
|> normalize_non_negative_integer(@default_max_partitions_to_drop_per_run)
|
||||
end
|
||||
|
||||
defp today_fun(opts) do
|
||||
case Keyword.get(opts, :today_fun, &Date.utc_today/0) do
|
||||
function when is_function(function, 0) -> function
|
||||
_other -> &Date.utc_today/0
|
||||
end
|
||||
end
|
||||
|
||||
defp normalize_limit(:infinity), do: :infinity
|
||||
defp normalize_limit(value) when is_integer(value) and value > 0, do: value
|
||||
defp normalize_limit(_value), do: :infinity
|
||||
|
||||
defp normalize_positive_integer(value, _default) when is_integer(value) and value > 0, do: value
|
||||
defp normalize_positive_integer(_value, default), do: default
|
||||
|
||||
defp normalize_non_negative_integer(value, _default) when is_integer(value) and value >= 0,
|
||||
do: value
|
||||
|
||||
defp normalize_non_negative_integer(_value, default), do: default
|
||||
|
||||
defp hours_to_ms(hours), do: hours * 60 * 60 * 1000
|
||||
|
||||
defp schedule_tick(interval_ms) do
|
||||
Process.send_after(self(), :tick, interval_ms)
|
||||
end
|
||||
end
|
||||
@@ -11,13 +11,34 @@ defmodule Parrhesia.Tasks.Supervisor do
|
||||
|
||||
@impl true
|
||||
def init(_init_arg) do
|
||||
children =
|
||||
if Application.get_env(:parrhesia, :enable_expiration_worker, true) do
|
||||
[{Parrhesia.Tasks.ExpirationWorker, name: Parrhesia.Tasks.ExpirationWorker}]
|
||||
else
|
||||
[]
|
||||
end
|
||||
children = expiration_children() ++ partition_retention_children() ++ nip66_children()
|
||||
|
||||
Supervisor.init(children, strategy: :one_for_one)
|
||||
end
|
||||
|
||||
defp expiration_children do
|
||||
if Application.get_env(:parrhesia, :enable_expiration_worker, true) do
|
||||
[{Parrhesia.Tasks.ExpirationWorker, name: Parrhesia.Tasks.ExpirationWorker}]
|
||||
else
|
||||
[]
|
||||
end
|
||||
end
|
||||
|
||||
defp partition_retention_children do
|
||||
if Application.get_env(:parrhesia, :enable_partition_retention_worker, true) do
|
||||
[
|
||||
{Parrhesia.Tasks.PartitionRetentionWorker, name: Parrhesia.Tasks.PartitionRetentionWorker}
|
||||
]
|
||||
else
|
||||
[]
|
||||
end
|
||||
end
|
||||
|
||||
defp nip66_children do
|
||||
if Parrhesia.NIP66.enabled?() do
|
||||
[{Parrhesia.Tasks.Nip66Publisher, name: Parrhesia.Tasks.Nip66Publisher}]
|
||||
else
|
||||
[]
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user