Compare commits
49 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 3e5bf462e9 | |||
| fc3d121599 | |||
| 970cee2c0e | |||
| 7a43ebd395 | |||
| 4c40edfd83 | |||
| f60b8ba02a | |||
| 2225dfdc9e | |||
| 9014912e9d | |||
| c30449b318 | |||
| c377ed4b62 | |||
| dce473662f | |||
| a2bdf11139 | |||
| bc66dfcbbe | |||
| f732d9cf24 | |||
| f2856d000e | |||
| dc5f0c1e5d | |||
| b56925f413 | |||
| 05718d4b91 | |||
| 1fef184f50 | |||
| 57fdb4ed85 | |||
| 8dbf05b7fe | |||
| 7b2d92b714 | |||
| a19b7d97f0 | |||
| 65b47ec191 | |||
| e13c08fd5a | |||
| 101ded43cb | |||
| f4d94c9fcb | |||
| 35c8d50db0 | |||
| 4d169c23ae | |||
| a1a8b30d12 | |||
| e557eba437 | |||
| a652bf3448 | |||
| f518a25cab | |||
| 02f2584757 | |||
| e8fd6c7328 | |||
| 1f608ee2bd | |||
| 5f4f086d28 | |||
| 9be3b6ca52 | |||
| 769177a63e | |||
| 987415d80c | |||
| d119d21d99 | |||
| 5d4d181d00 | |||
| fd17026c32 | |||
| 14fb0f7ffb | |||
| 186d0f98ee | |||
| 39dbc069a7 | |||
| 4c2c93deb3 | |||
| b628770517 | |||
| 705971cbc4 |
6
.github/workflows/ci.yaml
vendored
6
.github/workflows/ci.yaml
vendored
@@ -25,7 +25,7 @@ jobs:
|
|||||||
otp: "27.2"
|
otp: "27.2"
|
||||||
elixir: "1.18.2"
|
elixir: "1.18.2"
|
||||||
main: false
|
main: false
|
||||||
- name: Test (OTP 28.4 / Elixir 1.19.4 + Marmot E2E)
|
- name: Test (OTP 28.4 / Elixir 1.19.4 + E2E)
|
||||||
otp: "28.4"
|
otp: "28.4"
|
||||||
elixir: "1.19.4"
|
elixir: "1.19.4"
|
||||||
main: true
|
main: true
|
||||||
@@ -113,5 +113,9 @@ jobs:
|
|||||||
- name: Run tests
|
- name: Run tests
|
||||||
run: mix test --color
|
run: mix test --color
|
||||||
|
|
||||||
|
- name: Run Node Sync E2E tests
|
||||||
|
if: ${{ matrix.main }}
|
||||||
|
run: mix test.node_sync_e2e
|
||||||
|
|
||||||
- name: Run Marmot E2E tests
|
- name: Run Marmot E2E tests
|
||||||
run: mix test.marmot_e2e
|
run: mix test.marmot_e2e
|
||||||
|
|||||||
14
.github/workflows/release.yaml
vendored
14
.github/workflows/release.yaml
vendored
@@ -89,6 +89,17 @@ jobs:
|
|||||||
if: steps.deps-cache.outputs.cache-hit != 'true'
|
if: steps.deps-cache.outputs.cache-hit != 'true'
|
||||||
run: mix deps.get
|
run: mix deps.get
|
||||||
|
|
||||||
|
- name: Check tag matches Mix version
|
||||||
|
if: ${{ startsWith(github.ref, 'refs/tags/v') }}
|
||||||
|
run: |
|
||||||
|
TAG_VERSION="${GITHUB_REF_NAME#v}"
|
||||||
|
MIX_VERSION="$(mix run --no-start -e 'IO.puts(Mix.Project.config()[:version])' | tail -n 1)"
|
||||||
|
|
||||||
|
if [ "$TAG_VERSION" != "$MIX_VERSION" ]; then
|
||||||
|
echo "Tag version $TAG_VERSION does not match mix.exs version $MIX_VERSION"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
- name: Compile
|
- name: Compile
|
||||||
run: mix compile --warnings-as-errors
|
run: mix compile --warnings-as-errors
|
||||||
|
|
||||||
@@ -101,6 +112,9 @@ jobs:
|
|||||||
- name: Run tests
|
- name: Run tests
|
||||||
run: mix test --color
|
run: mix test --color
|
||||||
|
|
||||||
|
- name: Run Node Sync E2E
|
||||||
|
run: mix test.node_sync_e2e
|
||||||
|
|
||||||
- name: Run Marmot E2E
|
- name: Run Marmot E2E
|
||||||
run: mix test.marmot_e2e
|
run: mix test.marmot_e2e
|
||||||
|
|
||||||
|
|||||||
33
BENCHMARK.md
33
BENCHMARK.md
@@ -1,33 +0,0 @@
|
|||||||
Running 2 comparison run(s)...
|
|
||||||
Versions:
|
|
||||||
parrhesia 0.4.0
|
|
||||||
strfry 1.0.4 (nixpkgs)
|
|
||||||
nostr-rs-relay 0.9.0
|
|
||||||
nostr-bench 0.4.0
|
|
||||||
|
|
||||||
[run 1/2] Parrhesia
|
|
||||||
[run 1/2] strfry
|
|
||||||
[run 1/2] nostr-rs-relay
|
|
||||||
|
|
||||||
[run 2/2] Parrhesia
|
|
||||||
[run 2/2] strfry
|
|
||||||
[run 2/2] nostr-rs-relay
|
|
||||||
|
|
||||||
=== Bench comparison (averages) ===
|
|
||||||
metric parrhesia strfry nostr-rs-relay strfry/parrhesia nostr-rs/parrhesia
|
|
||||||
-------------------------- --------- -------- -------------- ---------------- ------------------
|
|
||||||
connect avg latency (ms) ↓ 10.50 4.00 3.00 0.38x 0.29x
|
|
||||||
connect max latency (ms) ↓ 19.50 7.50 4.00 0.38x 0.21x
|
|
||||||
echo throughput (TPS) ↑ 78520.00 60353.00 164420.50 0.77x 2.09x
|
|
||||||
echo throughput (MiB/s) ↑ 43.00 33.75 90.05 0.78x 2.09x
|
|
||||||
event throughput (TPS) ↑ 1919.50 3520.50 781.00 1.83x 0.41x
|
|
||||||
event throughput (MiB/s) ↑ 1.25 2.25 0.50 1.80x 0.40x
|
|
||||||
req throughput (TPS) ↑ 4608.50 1809.50 875.50 0.39x 0.19x
|
|
||||||
req throughput (MiB/s) ↑ 26.20 11.75 2.40 0.45x 0.09x
|
|
||||||
|
|
||||||
Legend: ↑ higher is better, ↓ lower is better.
|
|
||||||
Ratio columns are server/parrhesia (for ↓ metrics, <1.00x means that server is faster).
|
|
||||||
|
|
||||||
Run details:
|
|
||||||
run 1: parrhesia(echo_tps=78892, event_tps=1955, req_tps=4671, connect_avg_ms=10) | strfry(echo_tps=59132, event_tps=3462, req_tps=1806, connect_avg_ms=4) | nostr-rs-relay(echo_tps=159714, event_tps=785, req_tps=873, connect_avg_ms=3)
|
|
||||||
run 2: parrhesia(echo_tps=78148, event_tps=1884, req_tps=4546, connect_avg_ms=11) | strfry(echo_tps=61574, event_tps=3579, req_tps=1813, connect_avg_ms=4) | nostr-rs-relay(echo_tps=169127, event_tps=777, req_tps=878, connect_avg_ms=3)
|
|
||||||
247
README.md
247
README.md
@@ -2,14 +2,34 @@
|
|||||||
|
|
||||||
<img alt="Parrhesia Logo" src="./docs/logo.svg" width="150" align="right">
|
<img alt="Parrhesia Logo" src="./docs/logo.svg" width="150" align="right">
|
||||||
|
|
||||||
Parrhesia is a Nostr relay server written in Elixir/OTP with PostgreSQL storage.
|
Parrhesia is a Nostr relay server written in Elixir/OTP.
|
||||||
|
|
||||||
|
Supported storage backends:
|
||||||
|
|
||||||
|
- PostgreSQL, which is the primary and production-oriented backend
|
||||||
|
- in-memory storage, which is useful for tests, local experiments, and benchmarks
|
||||||
|
|
||||||
|
**ALPHA CONDITION – BREAKING CHANGES MIGHT HAPPEN!**
|
||||||
|
|
||||||
|
- Advanced Querying: Full-text search (NIP-50) and COUNT queries (NIP-45).
|
||||||
|
- Secure Messaging: First-class support for Marmot MLS-encrypted groups and NIP-17/44/59 gift-wrapped DMs.
|
||||||
|
- Identity & Auth: NIP-42 authentication flows and NIP-86 management API with NIP-98 HTTP auth.
|
||||||
|
- Data Integrity: Negentropy-based synchronization and NIP-62 vanish flows.
|
||||||
|
|
||||||
It exposes:
|
It exposes:
|
||||||
- a WebSocket relay endpoint at `/relay`
|
|
||||||
|
- listener-configurable WS/HTTP ingress, with a default `public` listener on port `4413`
|
||||||
|
- a WebSocket relay endpoint at `/relay` on listeners that enable the `nostr` feature
|
||||||
- NIP-11 relay info on `GET /relay` with `Accept: application/nostr+json`
|
- NIP-11 relay info on `GET /relay` with `Accept: application/nostr+json`
|
||||||
- operational HTTP endpoints (`/health`, `/ready`, `/metrics`)
|
- operational HTTP endpoints such as `/health`, `/ready`, and `/metrics` on listeners that enable them
|
||||||
- `/metrics` is restricted by default to private/loopback source IPs
|
- a NIP-86-style management API at `POST /management` on listeners that enable the `admin` feature
|
||||||
- a NIP-86-style management API at `POST /management` (NIP-98 auth)
|
|
||||||
|
Listeners can run in plain HTTP, HTTPS, mutual TLS, or proxy-terminated TLS modes. The current TLS implementation supports:
|
||||||
|
|
||||||
|
- server TLS on listener sockets
|
||||||
|
- optional client certificate admission with listener-side client pin checks
|
||||||
|
- proxy-asserted client TLS identity on trusted proxy hops
|
||||||
|
- admin-triggered certificate reload by restarting an individual listener from disk
|
||||||
|
|
||||||
## Supported NIPs
|
## Supported NIPs
|
||||||
|
|
||||||
@@ -17,9 +37,15 @@ Current `supported_nips` list:
|
|||||||
|
|
||||||
`1, 9, 11, 13, 17, 40, 42, 43, 44, 45, 50, 59, 62, 66, 70, 77, 86, 98`
|
`1, 9, 11, 13, 17, 40, 42, 43, 44, 45, 50, 59, 62, 66, 70, 77, 86, 98`
|
||||||
|
|
||||||
|
`43` is advertised when the built-in NIP-43 relay access flow is enabled. Parrhesia generates relay-signed `28935` invite responses on `REQ`, validates join and leave requests locally, and publishes the resulting signed `8000`, `8001`, and `13534` relay membership events into its own local event store.
|
||||||
|
|
||||||
|
`50` uses ranked PostgreSQL full-text search over event `content` by default. Parrhesia applies the filter `limit` after ordering by match quality, and falls back to trigram-backed substring matching for short or symbol-heavy queries such as search-as-you-type prefixes, domains, and punctuation-rich tokens.
|
||||||
|
|
||||||
|
`66` is advertised when the built-in NIP-66 publisher is enabled and has at least one relay target. The default config enables it for the `public` relay URL. Parrhesia probes those target relays, collects the resulting NIP-11 / websocket liveness data, and then publishes the signed `10166` and `30166` events locally on this relay.
|
||||||
|
|
||||||
## Requirements
|
## Requirements
|
||||||
|
|
||||||
- Elixir `~> 1.19`
|
- Elixir `~> 1.18`
|
||||||
- Erlang/OTP 28
|
- Erlang/OTP 28
|
||||||
- PostgreSQL (18 used in the dev environment; 16+ recommended)
|
- PostgreSQL (18 used in the dev environment; 16+ recommended)
|
||||||
- Docker or Podman plus Docker Compose support if you want to run the published container image
|
- Docker or Podman plus Docker Compose support if you want to run the published container image
|
||||||
@@ -48,7 +74,7 @@ mix setup
|
|||||||
mix run --no-halt
|
mix run --no-halt
|
||||||
```
|
```
|
||||||
|
|
||||||
Server listens on `http://localhost:4413` by default.
|
The default `public` listener binds to `http://localhost:4413`.
|
||||||
|
|
||||||
WebSocket clients should connect to:
|
WebSocket clients should connect to:
|
||||||
|
|
||||||
@@ -66,6 +92,60 @@ ws://localhost:4413/relay
|
|||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
|
## Test suites
|
||||||
|
|
||||||
|
Primary test entrypoints:
|
||||||
|
|
||||||
|
- `mix test` for the ExUnit suite
|
||||||
|
- `mix test.marmot_e2e` for the Marmot client end-to-end suite
|
||||||
|
- `mix test.node_sync_e2e` for the two-node relay sync end-to-end suite
|
||||||
|
- `mix test.node_sync_docker_e2e` for the release-image Docker two-node relay sync suite
|
||||||
|
|
||||||
|
The node-sync harnesses are driven by:
|
||||||
|
|
||||||
|
- [`scripts/run_node_sync_e2e.sh`](./scripts/run_node_sync_e2e.sh)
|
||||||
|
- [`scripts/run_node_sync_docker_e2e.sh`](./scripts/run_node_sync_docker_e2e.sh)
|
||||||
|
- [`scripts/node_sync_e2e.exs`](./scripts/node_sync_e2e.exs)
|
||||||
|
- [`compose.node-sync-e2e.yaml`](./compose.node-sync-e2e.yaml)
|
||||||
|
|
||||||
|
`mix test.node_sync_e2e` runs two real Parrhesia nodes against separate PostgreSQL databases, verifies catch-up and live sync, restarts one node, and verifies persisted resume behavior. `mix test.node_sync_docker_e2e` runs the same scenario against the release Docker image.
|
||||||
|
|
||||||
|
GitHub CI currently runs the non-Docker node-sync e2e on the main Linux matrix job. The Docker node-sync e2e remains an explicit/manual check because it depends on release-image build/runtime fidelity and a working Docker host.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Embedding in another Elixir app
|
||||||
|
|
||||||
|
Parrhesia is usable as an embedded OTP dependency, not just as a standalone relay process.
|
||||||
|
The intended in-process surface is `Parrhesia.API.*`, especially:
|
||||||
|
|
||||||
|
- `Parrhesia.API.Events` for publish, query, and count
|
||||||
|
- `Parrhesia.API.Stream` for local REQ-like subscriptions
|
||||||
|
- `Parrhesia.API.Admin` for management operations
|
||||||
|
- `Parrhesia.API.Identity`, `Parrhesia.API.ACL`, and `Parrhesia.API.Sync` for relay identity, protected sync ACLs, and outbound relay sync
|
||||||
|
|
||||||
|
Start with:
|
||||||
|
|
||||||
|
- [`docs/LOCAL_API.md`](./docs/LOCAL_API.md) for the embedding model and a minimal host setup
|
||||||
|
- generated ExDoc for the `Embedded API` module group when running `mix docs`
|
||||||
|
|
||||||
|
Important caveats for host applications:
|
||||||
|
|
||||||
|
- Parrhesia is still alpha; expect some public API and config churn.
|
||||||
|
- Parrhesia currently assumes a single runtime per BEAM node and uses globally registered process names.
|
||||||
|
- The defaults in this repo's `config/*.exs` are not imported automatically when Parrhesia is used as a dependency. A host app must set `config :parrhesia, ...` explicitly.
|
||||||
|
- The host app is responsible for migrating Parrhesia's schema, for example with `Parrhesia.Release.migrate()` or `mix ecto.migrate -r Parrhesia.Repo`.
|
||||||
|
|
||||||
|
If you only want the in-process API and not the HTTP/WebSocket edge, configure:
|
||||||
|
|
||||||
|
```elixir
|
||||||
|
config :parrhesia, :listeners, %{}
|
||||||
|
```
|
||||||
|
|
||||||
|
The config reference below still applies when embedded. That is the primary place to document basic setup and runtime configuration changes.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
## Production configuration
|
## Production configuration
|
||||||
|
|
||||||
### Minimal setup
|
### Minimal setup
|
||||||
@@ -75,8 +155,11 @@ Before a Nostr client can publish its first event successfully, make sure these
|
|||||||
1. PostgreSQL is reachable from Parrhesia.
|
1. PostgreSQL is reachable from Parrhesia.
|
||||||
Set `DATABASE_URL` and create/migrate the database with `Parrhesia.Release.migrate()` or `mix ecto.migrate`.
|
Set `DATABASE_URL` and create/migrate the database with `Parrhesia.Release.migrate()` or `mix ecto.migrate`.
|
||||||
|
|
||||||
2. Parrhesia is reachable behind your reverse proxy.
|
PostgreSQL is the supported production datastore. The in-memory backend is intended for
|
||||||
Parrhesia itself listens on plain HTTP on port `4413`, and the reverse proxy is expected to terminate TLS and forward WebSocket traffic to `/relay`.
|
non-persistent runs such as tests and benchmarks.
|
||||||
|
|
||||||
|
2. Parrhesia listeners are configured for your deployment.
|
||||||
|
The default config exposes a `public` listener on plain HTTP port `4413`, and a reverse proxy can terminate TLS and forward WebSocket traffic to `/relay`. Additional listeners can be defined in `config/*.exs`.
|
||||||
|
|
||||||
3. `:relay_url` matches the public relay URL clients should use.
|
3. `:relay_url` matches the public relay URL clients should use.
|
||||||
Set `PARRHESIA_RELAY_URL` to the public relay URL exposed by the reverse proxy.
|
Set `PARRHESIA_RELAY_URL` to the public relay URL exposed by the reverse proxy.
|
||||||
@@ -92,7 +175,7 @@ In `prod`, these environment variables are used:
|
|||||||
- `DATABASE_URL` (**required**), e.g. `ecto://USER:PASS@HOST/parrhesia_prod`
|
- `DATABASE_URL` (**required**), e.g. `ecto://USER:PASS@HOST/parrhesia_prod`
|
||||||
- `POOL_SIZE` (optional, default `32`)
|
- `POOL_SIZE` (optional, default `32`)
|
||||||
- `PORT` (optional, default `4413`)
|
- `PORT` (optional, default `4413`)
|
||||||
- `PARRHESIA_*` runtime overrides for relay config, limits, policies, metrics, and features
|
- `PARRHESIA_*` runtime overrides for relay config, metadata, identity, sync, ACL, limits, policies, listeners, retention, and features
|
||||||
- `PARRHESIA_EXTRA_CONFIG` (optional path to an extra runtime config file)
|
- `PARRHESIA_EXTRA_CONFIG` (optional path to an extra runtime config file)
|
||||||
|
|
||||||
`config/runtime.exs` reads these values at runtime in production releases.
|
`config/runtime.exs` reads these values at runtime in production releases.
|
||||||
@@ -102,11 +185,20 @@ In `prod`, these environment variables are used:
|
|||||||
For runtime overrides, use the `PARRHESIA_...` prefix:
|
For runtime overrides, use the `PARRHESIA_...` prefix:
|
||||||
|
|
||||||
- `PARRHESIA_RELAY_URL`
|
- `PARRHESIA_RELAY_URL`
|
||||||
|
- `PARRHESIA_METADATA_HIDE_VERSION`
|
||||||
|
- `PARRHESIA_IDENTITY_*`
|
||||||
|
- `PARRHESIA_SYNC_*`
|
||||||
|
- `PARRHESIA_ACL_*`
|
||||||
|
- `PARRHESIA_TRUSTED_PROXIES`
|
||||||
|
- `PARRHESIA_PUBLIC_MAX_CONNECTIONS`
|
||||||
- `PARRHESIA_MODERATION_CACHE_ENABLED`
|
- `PARRHESIA_MODERATION_CACHE_ENABLED`
|
||||||
- `PARRHESIA_ENABLE_EXPIRATION_WORKER`
|
- `PARRHESIA_ENABLE_EXPIRATION_WORKER`
|
||||||
|
- `PARRHESIA_ENABLE_PARTITION_RETENTION_WORKER`
|
||||||
|
- `PARRHESIA_STORAGE_BACKEND`
|
||||||
- `PARRHESIA_LIMITS_*`
|
- `PARRHESIA_LIMITS_*`
|
||||||
- `PARRHESIA_POLICIES_*`
|
- `PARRHESIA_POLICIES_*`
|
||||||
- `PARRHESIA_METRICS_*`
|
- `PARRHESIA_METRICS_*`
|
||||||
|
- `PARRHESIA_METRICS_ENDPOINT_MAX_CONNECTIONS`
|
||||||
- `PARRHESIA_RETENTION_*`
|
- `PARRHESIA_RETENTION_*`
|
||||||
- `PARRHESIA_FEATURES_*`
|
- `PARRHESIA_FEATURES_*`
|
||||||
- `PARRHESIA_METRICS_ENDPOINT_*`
|
- `PARRHESIA_METRICS_ENDPOINT_*`
|
||||||
@@ -115,11 +207,12 @@ Examples:
|
|||||||
|
|
||||||
```bash
|
```bash
|
||||||
export PARRHESIA_POLICIES_AUTH_REQUIRED_FOR_WRITES=true
|
export PARRHESIA_POLICIES_AUTH_REQUIRED_FOR_WRITES=true
|
||||||
export PARRHESIA_FEATURES_VERIFY_EVENT_SIGNATURES=true
|
|
||||||
export PARRHESIA_METRICS_ALLOWED_CIDRS="10.0.0.0/8,192.168.0.0/16"
|
export PARRHESIA_METRICS_ALLOWED_CIDRS="10.0.0.0/8,192.168.0.0/16"
|
||||||
export PARRHESIA_LIMITS_OUTBOUND_OVERFLOW_STRATEGY=drop_oldest
|
export PARRHESIA_LIMITS_OUTBOUND_OVERFLOW_STRATEGY=drop_oldest
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Listeners themselves are primarily configured under `config :parrhesia, :listeners, ...`. The current runtime env helpers tune the default public listener and the optional dedicated metrics listener, including their connection ceilings.
|
||||||
|
|
||||||
For settings that are awkward to express as env vars, mount an extra config file and set `PARRHESIA_EXTRA_CONFIG` to its path inside the container.
|
For settings that are awkward to express as env vars, mount an extra config file and set `PARRHESIA_EXTRA_CONFIG` to its path inside the container.
|
||||||
|
|
||||||
### Config reference
|
### Config reference
|
||||||
@@ -131,11 +224,19 @@ CSV env vars use comma-separated values. Boolean env vars accept `1/0`, `true/fa
|
|||||||
| Atom key | ENV | Default | Notes |
|
| Atom key | ENV | Default | Notes |
|
||||||
| --- | --- | --- | --- |
|
| --- | --- | --- | --- |
|
||||||
| `:relay_url` | `PARRHESIA_RELAY_URL` | `ws://localhost:4413/relay` | Advertised relay URL and auth relay tag target |
|
| `:relay_url` | `PARRHESIA_RELAY_URL` | `ws://localhost:4413/relay` | Advertised relay URL and auth relay tag target |
|
||||||
|
| `:metadata.hide_version?` | `PARRHESIA_METADATA_HIDE_VERSION` | `true` | Hides the relay version from outbound `User-Agent` and NIP-11 when enabled |
|
||||||
|
| `:acl.protected_filters` | `PARRHESIA_ACL_PROTECTED_FILTERS` | `[]` | JSON-encoded protected filter list for sync ACL checks |
|
||||||
|
| `:identity.path` | `PARRHESIA_IDENTITY_PATH` | `nil` | Optional path for persisted relay identity material |
|
||||||
|
| `:identity.private_key` | `PARRHESIA_IDENTITY_PRIVATE_KEY` | `nil` | Optional inline relay private key |
|
||||||
| `:moderation_cache_enabled` | `PARRHESIA_MODERATION_CACHE_ENABLED` | `true` | Toggle moderation cache |
|
| `:moderation_cache_enabled` | `PARRHESIA_MODERATION_CACHE_ENABLED` | `true` | Toggle moderation cache |
|
||||||
| `:enable_expiration_worker` | `PARRHESIA_ENABLE_EXPIRATION_WORKER` | `true` | Toggle background expiration worker |
|
| `:enable_expiration_worker` | `PARRHESIA_ENABLE_EXPIRATION_WORKER` | `true` | Toggle background expiration worker |
|
||||||
|
| `:nip43` | config-file driven | see table below | Built-in NIP-43 relay access invite / membership flow |
|
||||||
|
| `:nip66` | config-file driven | see table below | Built-in NIP-66 discovery / monitor publisher |
|
||||||
|
| `:sync.path` | `PARRHESIA_SYNC_PATH` | `nil` | Optional path to sync peer config |
|
||||||
|
| `:sync.start_workers?` | `PARRHESIA_SYNC_START_WORKERS` | `true` | Start outbound sync workers on boot |
|
||||||
| `:limits` | `PARRHESIA_LIMITS_*` | see table below | Runtime override group |
|
| `:limits` | `PARRHESIA_LIMITS_*` | see table below | Runtime override group |
|
||||||
| `:policies` | `PARRHESIA_POLICIES_*` | see table below | Runtime override group |
|
| `:policies` | `PARRHESIA_POLICIES_*` | see table below | Runtime override group |
|
||||||
| `:metrics` | `PARRHESIA_METRICS_*` | see table below | Runtime override group |
|
| `:listeners` | config-file driven | see notes below | Ingress listeners with bind, transport, feature, auth, network, and baseline ACL settings |
|
||||||
| `:retention` | `PARRHESIA_RETENTION_*` | see table below | Partition lifecycle and pruning policy |
|
| `:retention` | `PARRHESIA_RETENTION_*` | see table below | Partition lifecycle and pruning policy |
|
||||||
| `:features` | `PARRHESIA_FEATURES_*` | see table below | Runtime override group |
|
| `:features` | `PARRHESIA_FEATURES_*` | see table below | Runtime override group |
|
||||||
| `:storage.events` | `-` | `Parrhesia.Storage.Adapters.Postgres.Events` | Config-file override only |
|
| `:storage.events` | `-` | `Parrhesia.Storage.Adapters.Postgres.Events` | Config-file override only |
|
||||||
@@ -153,19 +254,85 @@ CSV env vars use comma-separated values. Boolean env vars accept `1/0`, `true/fa
|
|||||||
| `:queue_interval` | `DB_QUEUE_INTERVAL_MS` | `5000` | Ecto queue interval in ms |
|
| `:queue_interval` | `DB_QUEUE_INTERVAL_MS` | `5000` | Ecto queue interval in ms |
|
||||||
| `:types` | `-` | `Parrhesia.PostgresTypes` | Internal config-file setting |
|
| `:types` | `-` | `Parrhesia.PostgresTypes` | Internal config-file setting |
|
||||||
|
|
||||||
#### `Parrhesia.Web.Endpoint`
|
#### `Parrhesia.ReadRepo`
|
||||||
|
|
||||||
| Atom key | ENV | Default | Notes |
|
| Atom key | ENV | Default | Notes |
|
||||||
| --- | --- | --- | --- |
|
| --- | --- | --- | --- |
|
||||||
| `:port` | `PORT` | `4413` | Main HTTP/WebSocket listener |
|
| `:url` | `DATABASE_URL` | required | Shares the primary DB URL with the write repo |
|
||||||
|
| `:pool_size` | `DB_READ_POOL_SIZE` | `32` | Read-only query pool size |
|
||||||
|
| `:queue_target` | `DB_READ_QUEUE_TARGET_MS` | `1000` | Read pool Ecto queue target in ms |
|
||||||
|
| `:queue_interval` | `DB_READ_QUEUE_INTERVAL_MS` | `5000` | Read pool Ecto queue interval in ms |
|
||||||
|
| `:types` | `-` | `Parrhesia.PostgresTypes` | Internal config-file setting |
|
||||||
|
|
||||||
#### `Parrhesia.Web.MetricsEndpoint`
|
#### `:listeners`
|
||||||
|
|
||||||
| Atom key | ENV | Default | Notes |
|
| Atom key | ENV | Default | Notes |
|
||||||
| --- | --- | --- | --- |
|
| --- | --- | --- | --- |
|
||||||
| `:enabled` | `PARRHESIA_METRICS_ENDPOINT_ENABLED` | `false` | Enables dedicated metrics listener |
|
| `:public.bind.port` | `PORT` | `4413` | Default public listener port |
|
||||||
| `:ip` | `PARRHESIA_METRICS_ENDPOINT_IP` | `127.0.0.1` | IPv4 only |
|
| `:public.max_connections` | `PARRHESIA_PUBLIC_MAX_CONNECTIONS` | `20000` | Target total connection ceiling for the public listener |
|
||||||
| `:port` | `PARRHESIA_METRICS_ENDPOINT_PORT` | `9568` | Dedicated metrics port |
|
| `:public.proxy.trusted_cidrs` | `PARRHESIA_TRUSTED_PROXIES` | `[]` | Trusted reverse proxies for forwarded IP handling |
|
||||||
|
| `:public.features.metrics.*` | `PARRHESIA_METRICS_*` | see below | Convenience runtime overrides for metrics on the public listener |
|
||||||
|
| `:metrics.bind.port` | `PARRHESIA_METRICS_ENDPOINT_PORT` | `9568` | Optional dedicated metrics listener port |
|
||||||
|
| `:metrics.max_connections` | `PARRHESIA_METRICS_ENDPOINT_MAX_CONNECTIONS` | `1024` | Target total connection ceiling for the dedicated metrics listener |
|
||||||
|
| `:metrics.enabled` | `PARRHESIA_METRICS_ENDPOINT_ENABLED` | `false` | Enables the optional dedicated metrics listener |
|
||||||
|
|
||||||
|
Listener `max_connections` is a first-class config field. Parrhesia translates it to ThousandIsland's per-acceptor `num_connections` limit based on the active acceptor count. Raw `bandit_options[:thousand_island_options]` can still override that for advanced tuning.
|
||||||
|
|
||||||
|
Listener `transport.tls` supports `:disabled`, `:server`, `:mutual`, and `:proxy_terminated`. For TLS-enabled listeners, the main config-file fields are `certfile`, `keyfile`, optional `cacertfile`, optional `cipher_suite`, optional `client_pins`, and `proxy_headers` for proxy-terminated identity.
|
||||||
|
|
||||||
|
Every listener supports this config-file schema:
|
||||||
|
|
||||||
|
| Atom key | ENV | Default | Notes |
|
||||||
|
| --- | --- | --- | --- |
|
||||||
|
| `:id` | `-` | listener key or `:listener` | Listener identifier |
|
||||||
|
| `:enabled` | public/metrics helpers only | `true` | Whether the listener is started |
|
||||||
|
| `:bind.ip` | `-` | `0.0.0.0` (`public`) / `127.0.0.1` (`metrics`) | Bind address |
|
||||||
|
| `:bind.port` | `PORT` / `PARRHESIA_METRICS_ENDPOINT_PORT` | `4413` / `9568` | Bind port |
|
||||||
|
| `:max_connections` | `PARRHESIA_PUBLIC_MAX_CONNECTIONS` / `PARRHESIA_METRICS_ENDPOINT_MAX_CONNECTIONS` | `20000` / `1024` | Target total listener connection ceiling; accepts integer or `:infinity` in config files |
|
||||||
|
| `:transport.scheme` | `-` | `:http` | Listener scheme |
|
||||||
|
| `:transport.tls` | `-` | `%{mode: :disabled}` | TLS mode and TLS-specific options |
|
||||||
|
| `:proxy.trusted_cidrs` | `PARRHESIA_TRUSTED_PROXIES` on `public` | `[]` | Trusted proxy CIDRs for forwarded identity / IP handling |
|
||||||
|
| `:proxy.honor_x_forwarded_for` | `-` | `true` | Respect `X-Forwarded-For` from trusted proxies |
|
||||||
|
| `:network.public` | `-` | `false` | Allow only public networks |
|
||||||
|
| `:network.private_networks_only` | `-` | `false` | Allow only RFC1918 / local networks |
|
||||||
|
| `:network.allow_cidrs` | `-` | `[]` | Explicit CIDR allowlist |
|
||||||
|
| `:network.allow_all` | `-` | `true` | Allow all source IPs |
|
||||||
|
| `:features.nostr.enabled` | `-` | `true` on `public`, `false` on metrics listener | Enables `/relay` |
|
||||||
|
| `:features.admin.enabled` | `-` | `true` on `public`, `false` on metrics listener | Enables `/management` |
|
||||||
|
| `:features.metrics.enabled` | `PARRHESIA_METRICS_ENABLED_ON_MAIN_ENDPOINT` on `public` | `true` on `public`, `true` on metrics listener | Enables `/metrics` |
|
||||||
|
| `:features.metrics.auth_token` | `PARRHESIA_METRICS_AUTH_TOKEN` | `nil` | Optional bearer token for `/metrics` |
|
||||||
|
| `:features.metrics.access.public` | `PARRHESIA_METRICS_PUBLIC` | `false` | Allow public-network access to `/metrics` |
|
||||||
|
| `:features.metrics.access.private_networks_only` | `PARRHESIA_METRICS_PRIVATE_NETWORKS_ONLY` | `true` | Restrict `/metrics` to private networks |
|
||||||
|
| `:features.metrics.access.allow_cidrs` | `PARRHESIA_METRICS_ALLOWED_CIDRS` | `[]` | Additional CIDR allowlist for `/metrics` |
|
||||||
|
| `:features.metrics.access.allow_all` | `-` | `true` | Unconditional metrics access in config files |
|
||||||
|
| `:auth.nip42_required` | `-` | `false` | Require NIP-42 for relay reads / writes |
|
||||||
|
| `:auth.nip98_required_for_admin` | `PARRHESIA_POLICIES_MANAGEMENT_AUTH_REQUIRED` on `public` | `true` | Require NIP-98 for management API calls |
|
||||||
|
| `:baseline_acl.read` | `-` | `[]` | Static read deny/allow rules |
|
||||||
|
| `:baseline_acl.write` | `-` | `[]` | Static write deny/allow rules |
|
||||||
|
| `:bandit_options` | `-` | `[]` | Advanced Bandit / ThousandIsland passthrough |
|
||||||
|
|
||||||
|
#### `:nip66`
|
||||||
|
|
||||||
|
| Atom key | ENV | Default | Notes |
|
||||||
|
| --- | --- | --- | --- |
|
||||||
|
| `:enabled` | `-` | `true` | Enables the built-in NIP-66 publisher worker |
|
||||||
|
| `:publish_interval_seconds` | `-` | `900` | Republish cadence for `10166` and `30166` events |
|
||||||
|
| `:publish_monitor_announcement?` | `-` | `true` | Publish a `10166` monitor announcement alongside discovery events |
|
||||||
|
| `:timeout_ms` | `-` | `5000` | Probe timeout for websocket and NIP-11 checks |
|
||||||
|
| `:checks` | `-` | `[:open, :read, :nip11]` | Checks advertised in `10166` and run against each target relay during probing |
|
||||||
|
| `:targets` | `-` | `[]` | Optional explicit relay targets to probe; when empty, Parrhesia uses `:relay_url` for the `public` listener |
|
||||||
|
|
||||||
|
NIP-66 targets are probe sources, not publish destinations. Parrhesia connects to each target relay, collects the configured liveness / discovery data, and stores the resulting signed `10166` / `30166` events in its own local event store so clients can query them here.
|
||||||
|
|
||||||
|
#### `:nip43`
|
||||||
|
|
||||||
|
| Atom key | ENV | Default | Notes |
|
||||||
|
| --- | --- | --- | --- |
|
||||||
|
| `:enabled` | `-` | `true` | Enables the built-in NIP-43 relay access flow and advertises `43` in NIP-11 |
|
||||||
|
| `:invite_ttl_seconds` | `-` | `900` | Expiration window for generated invite claim strings returned by `REQ` filters targeting kind `28935` |
|
||||||
|
| `:request_max_age_seconds` | `-` | `300` | Maximum allowed age for inbound join (`28934`) and leave (`28936`) requests |
|
||||||
|
|
||||||
|
Parrhesia treats NIP-43 invite requests as synthetic relay output, not stored client input. A `REQ` for kind `28935` causes the relay to generate a fresh relay-signed invite event on the fly. Clients then submit that claim back in a protected kind `28934` join request. When a join or leave request is accepted, Parrhesia updates its local relay membership state and publishes the corresponding relay-signed `8000` / `8001` delta plus the latest `13534` membership snapshot locally.
|
||||||
|
|
||||||
#### `:limits`
|
#### `:limits`
|
||||||
|
|
||||||
@@ -175,6 +342,12 @@ CSV env vars use comma-separated values. Boolean env vars accept `1/0`, `true/fa
|
|||||||
| `:max_event_bytes` | `PARRHESIA_LIMITS_MAX_EVENT_BYTES` | `262144` |
|
| `:max_event_bytes` | `PARRHESIA_LIMITS_MAX_EVENT_BYTES` | `262144` |
|
||||||
| `:max_filters_per_req` | `PARRHESIA_LIMITS_MAX_FILTERS_PER_REQ` | `16` |
|
| `:max_filters_per_req` | `PARRHESIA_LIMITS_MAX_FILTERS_PER_REQ` | `16` |
|
||||||
| `:max_filter_limit` | `PARRHESIA_LIMITS_MAX_FILTER_LIMIT` | `500` |
|
| `:max_filter_limit` | `PARRHESIA_LIMITS_MAX_FILTER_LIMIT` | `500` |
|
||||||
|
| `:max_tags_per_event` | `PARRHESIA_LIMITS_MAX_TAGS_PER_EVENT` | `256` |
|
||||||
|
| `:max_tag_values_per_filter` | `PARRHESIA_LIMITS_MAX_TAG_VALUES_PER_FILTER` | `128` |
|
||||||
|
| `:ip_max_event_ingest_per_window` | `PARRHESIA_LIMITS_IP_MAX_EVENT_INGEST_PER_WINDOW` | `1000` |
|
||||||
|
| `:ip_event_ingest_window_seconds` | `PARRHESIA_LIMITS_IP_EVENT_INGEST_WINDOW_SECONDS` | `1` |
|
||||||
|
| `:relay_max_event_ingest_per_window` | `PARRHESIA_LIMITS_RELAY_MAX_EVENT_INGEST_PER_WINDOW` | `10000` |
|
||||||
|
| `:relay_event_ingest_window_seconds` | `PARRHESIA_LIMITS_RELAY_EVENT_INGEST_WINDOW_SECONDS` | `1` |
|
||||||
| `:max_subscriptions_per_connection` | `PARRHESIA_LIMITS_MAX_SUBSCRIPTIONS_PER_CONNECTION` | `32` |
|
| `:max_subscriptions_per_connection` | `PARRHESIA_LIMITS_MAX_SUBSCRIPTIONS_PER_CONNECTION` | `32` |
|
||||||
| `:max_event_future_skew_seconds` | `PARRHESIA_LIMITS_MAX_EVENT_FUTURE_SKEW_SECONDS` | `900` |
|
| `:max_event_future_skew_seconds` | `PARRHESIA_LIMITS_MAX_EVENT_FUTURE_SKEW_SECONDS` | `900` |
|
||||||
| `:max_event_ingest_per_window` | `PARRHESIA_LIMITS_MAX_EVENT_INGEST_PER_WINDOW` | `120` |
|
| `:max_event_ingest_per_window` | `PARRHESIA_LIMITS_MAX_EVENT_INGEST_PER_WINDOW` | `120` |
|
||||||
@@ -186,6 +359,8 @@ CSV env vars use comma-separated values. Boolean env vars accept `1/0`, `true/fa
|
|||||||
| `:max_negentropy_payload_bytes` | `PARRHESIA_LIMITS_MAX_NEGENTROPY_PAYLOAD_BYTES` | `4096` |
|
| `:max_negentropy_payload_bytes` | `PARRHESIA_LIMITS_MAX_NEGENTROPY_PAYLOAD_BYTES` | `4096` |
|
||||||
| `:max_negentropy_sessions_per_connection` | `PARRHESIA_LIMITS_MAX_NEGENTROPY_SESSIONS_PER_CONNECTION` | `8` |
|
| `:max_negentropy_sessions_per_connection` | `PARRHESIA_LIMITS_MAX_NEGENTROPY_SESSIONS_PER_CONNECTION` | `8` |
|
||||||
| `:max_negentropy_total_sessions` | `PARRHESIA_LIMITS_MAX_NEGENTROPY_TOTAL_SESSIONS` | `10000` |
|
| `:max_negentropy_total_sessions` | `PARRHESIA_LIMITS_MAX_NEGENTROPY_TOTAL_SESSIONS` | `10000` |
|
||||||
|
| `:max_negentropy_items_per_session` | `PARRHESIA_LIMITS_MAX_NEGENTROPY_ITEMS_PER_SESSION` | `50000` |
|
||||||
|
| `:negentropy_id_list_threshold` | `PARRHESIA_LIMITS_NEGENTROPY_ID_LIST_THRESHOLD` | `32` |
|
||||||
| `:negentropy_session_idle_timeout_seconds` | `PARRHESIA_LIMITS_NEGENTROPY_SESSION_IDLE_TIMEOUT_SECONDS` | `60` |
|
| `:negentropy_session_idle_timeout_seconds` | `PARRHESIA_LIMITS_NEGENTROPY_SESSION_IDLE_TIMEOUT_SECONDS` | `60` |
|
||||||
| `:negentropy_session_sweep_interval_seconds` | `PARRHESIA_LIMITS_NEGENTROPY_SESSION_SWEEP_INTERVAL_SECONDS` | `10` |
|
| `:negentropy_session_sweep_interval_seconds` | `PARRHESIA_LIMITS_NEGENTROPY_SESSION_SWEEP_INTERVAL_SECONDS` | `10` |
|
||||||
|
|
||||||
@@ -215,11 +390,11 @@ CSV env vars use comma-separated values. Boolean env vars accept `1/0`, `true/fa
|
|||||||
| `:marmot_push_max_server_recipients` | `PARRHESIA_POLICIES_MARMOT_PUSH_MAX_SERVER_RECIPIENTS` | `1` |
|
| `:marmot_push_max_server_recipients` | `PARRHESIA_POLICIES_MARMOT_PUSH_MAX_SERVER_RECIPIENTS` | `1` |
|
||||||
| `:management_auth_required` | `PARRHESIA_POLICIES_MANAGEMENT_AUTH_REQUIRED` | `true` |
|
| `:management_auth_required` | `PARRHESIA_POLICIES_MANAGEMENT_AUTH_REQUIRED` | `true` |
|
||||||
|
|
||||||
#### `:metrics`
|
#### Listener-related Metrics Helpers
|
||||||
|
|
||||||
| Atom key | ENV | Default |
|
| Atom key | ENV | Default |
|
||||||
| --- | --- | --- |
|
| --- | --- | --- |
|
||||||
| `:enabled_on_main_endpoint` | `PARRHESIA_METRICS_ENABLED_ON_MAIN_ENDPOINT` | `true` |
|
| `:public.features.metrics.enabled` | `PARRHESIA_METRICS_ENABLED_ON_MAIN_ENDPOINT` | `true` |
|
||||||
| `:public` | `PARRHESIA_METRICS_PUBLIC` | `false` |
|
| `:public` | `PARRHESIA_METRICS_PUBLIC` | `false` |
|
||||||
| `:private_networks_only` | `PARRHESIA_METRICS_PRIVATE_NETWORKS_ONLY` | `true` |
|
| `:private_networks_only` | `PARRHESIA_METRICS_PRIVATE_NETWORKS_ONLY` | `true` |
|
||||||
| `:allowed_cidrs` | `PARRHESIA_METRICS_ALLOWED_CIDRS` | `[]` |
|
| `:allowed_cidrs` | `PARRHESIA_METRICS_ALLOWED_CIDRS` | `[]` |
|
||||||
@@ -239,12 +414,14 @@ CSV env vars use comma-separated values. Boolean env vars accept `1/0`, `true/fa
|
|||||||
|
|
||||||
| Atom key | ENV | Default |
|
| Atom key | ENV | Default |
|
||||||
| --- | --- | --- |
|
| --- | --- | --- |
|
||||||
| `:verify_event_signatures` | `PARRHESIA_FEATURES_VERIFY_EVENT_SIGNATURES` | `true` |
|
| `:verify_event_signatures` | `-` | `true` |
|
||||||
| `:nip_45_count` | `PARRHESIA_FEATURES_NIP_45_COUNT` | `true` |
|
| `:nip_45_count` | `PARRHESIA_FEATURES_NIP_45_COUNT` | `true` |
|
||||||
| `:nip_50_search` | `PARRHESIA_FEATURES_NIP_50_SEARCH` | `true` |
|
| `:nip_50_search` | `PARRHESIA_FEATURES_NIP_50_SEARCH` | `true` |
|
||||||
| `:nip_77_negentropy` | `PARRHESIA_FEATURES_NIP_77_NEGENTROPY` | `true` |
|
| `:nip_77_negentropy` | `PARRHESIA_FEATURES_NIP_77_NEGENTROPY` | `true` |
|
||||||
| `:marmot_push_notifications` | `PARRHESIA_FEATURES_MARMOT_PUSH_NOTIFICATIONS` | `false` |
|
| `:marmot_push_notifications` | `PARRHESIA_FEATURES_MARMOT_PUSH_NOTIFICATIONS` | `false` |
|
||||||
|
|
||||||
|
`:verify_event_signatures` is config-file only. Production releases always verify event signatures.
|
||||||
|
|
||||||
#### Extra runtime config
|
#### Extra runtime config
|
||||||
|
|
||||||
| Atom key | ENV | Default | Notes |
|
| Atom key | ENV | Default | Notes |
|
||||||
@@ -267,17 +444,17 @@ mix compile
|
|||||||
mix release
|
mix release
|
||||||
|
|
||||||
_build/prod/rel/parrhesia/bin/parrhesia eval "Parrhesia.Release.migrate()"
|
_build/prod/rel/parrhesia/bin/parrhesia eval "Parrhesia.Release.migrate()"
|
||||||
_build/prod/rel/parrhesia/bin/parrhesia foreground
|
_build/prod/rel/parrhesia/bin/parrhesia start
|
||||||
```
|
```
|
||||||
|
|
||||||
For systemd/process managers, run the release command in foreground mode.
|
For systemd/process managers, run the release command with `start`.
|
||||||
|
|
||||||
### Option B: Nix release package (`default.nix`)
|
### Option B: Nix release package (`default.nix`)
|
||||||
|
|
||||||
Build:
|
Build:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
nix-build
|
nix build
|
||||||
```
|
```
|
||||||
|
|
||||||
Run the built release from `./result/bin/parrhesia` (release command interface).
|
Run the built release from `./result/bin/parrhesia` (release command interface).
|
||||||
@@ -361,7 +538,9 @@ Notes:
|
|||||||
|
|
||||||
## Benchmark
|
## Benchmark
|
||||||
|
|
||||||
The benchmark compares Parrhesia against [`strfry`](https://github.com/hoytech/strfry) and [`nostr-rs-relay`](https://sr.ht/~gheartsfield/nostr-rs-relay/) using [`nostr-bench`](https://github.com/rnostr/nostr-bench).
|
The benchmark compares two Parrhesia profiles, one backed by PostgreSQL and one backed by the in-memory adapter, against [`strfry`](https://github.com/hoytech/strfry) and [`nostr-rs-relay`](https://sr.ht/~gheartsfield/nostr-rs-relay/) using [`nostr-bench`](https://github.com/rnostr/nostr-bench). Benchmark runs also lift Parrhesia's relay-side limits by default so the benchmark client, not server guardrails, is the main bottleneck.
|
||||||
|
|
||||||
|
`mix bench` is a sequential mixed-workload benchmark, not an isolated per-endpoint microbenchmark. Each relay instance runs `connect`, then `echo`, then `event`, then `req` against the same live process, so later phases measure against state and load created by earlier phases.
|
||||||
|
|
||||||
Run it with:
|
Run it with:
|
||||||
|
|
||||||
@@ -371,16 +550,16 @@ mix bench
|
|||||||
|
|
||||||
Current comparison results from [BENCHMARK.md](./BENCHMARK.md):
|
Current comparison results from [BENCHMARK.md](./BENCHMARK.md):
|
||||||
|
|
||||||
| metric | parrhesia | strfry | nostr-rs-relay | strfry/parrhesia | nostr-rs/parrhesia |
|
| metric | parrhesia-pg | parrhesia-mem | nostr-rs-relay | mem/pg | nostr-rs/pg |
|
||||||
| --- | ---: | ---: | ---: | ---: | ---: |
|
| --- | ---: | ---: | ---: | ---: | ---: |
|
||||||
| connect avg latency (ms) ↓ | 13.50 | 3.00 | 2.00 | **0.22x** | **0.15x** |
|
| connect avg latency (ms) ↓ | 9.33 | 7.67 | 7.00 | **0.82x** | **0.75x** |
|
||||||
| connect max latency (ms) ↓ | 22.50 | 5.50 | 3.00 | **0.24x** | **0.13x** |
|
| connect max latency (ms) ↓ | 12.33 | 9.67 | 10.33 | **0.78x** | **0.84x** |
|
||||||
| echo throughput (TPS) ↑ | 80385.00 | 61673.00 | 164516.00 | 0.77x | **2.05x** |
|
| echo throughput (TPS) ↑ | 64030.33 | 93656.33 | 140767.00 | **1.46x** | **2.20x** |
|
||||||
| echo throughput (MiB/s) ↑ | 44.00 | 34.45 | 90.10 | 0.78x | **2.05x** |
|
| echo throughput (MiB/s) ↑ | 35.07 | 51.27 | 77.07 | **1.46x** | **2.20x** |
|
||||||
| event throughput (TPS) ↑ | 2000.00 | 3404.50 | 788.00 | **1.70x** | 0.39x |
|
| event throughput (TPS) ↑ | 5015.33 | 1505.33 | 2293.67 | 0.30x | 0.46x |
|
||||||
| event throughput (MiB/s) ↑ | 1.30 | 2.20 | 0.50 | **1.69x** | 0.38x |
|
| event throughput (MiB/s) ↑ | 3.40 | 1.00 | 1.50 | 0.29x | 0.44x |
|
||||||
| req throughput (TPS) ↑ | 3664.00 | 1808.50 | 877.50 | 0.49x | 0.24x |
|
| req throughput (TPS) ↑ | 6416.33 | 14566.67 | 3035.67 | **2.27x** | 0.47x |
|
||||||
| req throughput (MiB/s) ↑ | 20.75 | 11.75 | 2.45 | 0.57x | 0.12x |
|
| req throughput (MiB/s) ↑ | 42.43 | 94.23 | 19.23 | **2.22x** | 0.45x |
|
||||||
|
|
||||||
Higher is better for `↑` metrics. Lower is better for `↓` metrics.
|
Higher is better for `↑` metrics. Lower is better for `↓` metrics.
|
||||||
|
|
||||||
|
|||||||
31
bench/chart.gnuplot
Normal file
31
bench/chart.gnuplot
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
# bench/chart.gnuplot — multi-panel SVG showing relay performance over git tags.
|
||||||
|
#
|
||||||
|
# Invoked by scripts/run_bench_update.sh with:
|
||||||
|
# gnuplot -e "data_dir='...'" -e "output_file='...'" bench/chart.gnuplot
|
||||||
|
#
|
||||||
|
# The data_dir contains per-metric TSV files and a plot_commands.gnuplot
|
||||||
|
# fragment generated by the data-prep step that defines the actual plot
|
||||||
|
# directives (handling variable server columns).
|
||||||
|
|
||||||
|
set terminal svg enhanced size 1200,900 font "sans,11"
|
||||||
|
set output output_file
|
||||||
|
|
||||||
|
set style data linespoints
|
||||||
|
set key outside right top
|
||||||
|
set grid ytics
|
||||||
|
set xtics rotate by -30
|
||||||
|
set datafile separator "\t"
|
||||||
|
|
||||||
|
# parrhesia-pg: blue solid, parrhesia-memory: green solid
|
||||||
|
# strfry: orange dashed, nostr-rs-relay: red dashed
|
||||||
|
set linetype 1 lc rgb "#2563eb" lw 2 pt 7 ps 1.0
|
||||||
|
set linetype 2 lc rgb "#16a34a" lw 2 pt 9 ps 1.0
|
||||||
|
set linetype 3 lc rgb "#ea580c" lw 1.5 pt 5 ps 0.8 dt 2
|
||||||
|
set linetype 4 lc rgb "#dc2626" lw 1.5 pt 4 ps 0.8 dt 2
|
||||||
|
|
||||||
|
set multiplot layout 2,2 title "Parrhesia Relay Benchmark History" font ",14"
|
||||||
|
|
||||||
|
# Load dynamically generated plot commands (handles variable column counts)
|
||||||
|
load data_dir."/plot_commands.gnuplot"
|
||||||
|
|
||||||
|
unset multiplot
|
||||||
752
bench/chart.svg
Normal file
752
bench/chart.svg
Normal file
@@ -0,0 +1,752 @@
|
|||||||
|
<?xml version="1.0" encoding="utf-8" standalone="no"?>
|
||||||
|
<svg
|
||||||
|
width="1200" height="900"
|
||||||
|
viewBox="0 0 1200 900"
|
||||||
|
xmlns="http://www.w3.org/2000/svg"
|
||||||
|
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||||
|
>
|
||||||
|
|
||||||
|
<title>Gnuplot</title>
|
||||||
|
<desc>Produced by GNUPLOT 6.0 patchlevel 4 </desc>
|
||||||
|
|
||||||
|
<g id="gnuplot_canvas">
|
||||||
|
|
||||||
|
<rect x="0" y="0" width="1200" height="900" fill="none"/>
|
||||||
|
<defs>
|
||||||
|
|
||||||
|
<circle id='gpDot' r='0.5' stroke-width='0.5' stroke='currentColor'/>
|
||||||
|
<path id='gpPt0' stroke-width='0.242' stroke='currentColor' d='M-1,0 h2 M0,-1 v2'/>
|
||||||
|
<path id='gpPt1' stroke-width='0.242' stroke='currentColor' d='M-1,-1 L1,1 M1,-1 L-1,1'/>
|
||||||
|
<path id='gpPt2' stroke-width='0.242' stroke='currentColor' d='M-1,0 L1,0 M0,-1 L0,1 M-1,-1 L1,1 M-1,1 L1,-1'/>
|
||||||
|
<rect id='gpPt3' stroke-width='0.242' stroke='currentColor' x='-1' y='-1' width='2' height='2'/>
|
||||||
|
<rect id='gpPt4' stroke-width='0.242' stroke='currentColor' fill='currentColor' x='-1' y='-1' width='2' height='2'/>
|
||||||
|
<circle id='gpPt5' stroke-width='0.242' stroke='currentColor' cx='0' cy='0' r='1'/>
|
||||||
|
<use xlink:href='#gpPt5' id='gpPt6' fill='currentColor' stroke='none'/>
|
||||||
|
<path id='gpPt7' stroke-width='0.242' stroke='currentColor' d='M0,-1.33 L-1.33,0.67 L1.33,0.67 z'/>
|
||||||
|
<use xlink:href='#gpPt7' id='gpPt8' fill='currentColor' stroke='none'/>
|
||||||
|
<use xlink:href='#gpPt7' id='gpPt9' stroke='currentColor' transform='rotate(180)'/>
|
||||||
|
<use xlink:href='#gpPt9' id='gpPt10' fill='currentColor' stroke='none'/>
|
||||||
|
<use xlink:href='#gpPt3' id='gpPt11' stroke='currentColor' transform='rotate(45)'/>
|
||||||
|
<use xlink:href='#gpPt11' id='gpPt12' fill='currentColor' stroke='none'/>
|
||||||
|
<path id='gpPt13' stroke-width='0.242' stroke='currentColor' d='M0,1.330 L1.265,0.411 L0.782,-1.067 L-0.782,-1.076 L-1.265,0.411 z'/>
|
||||||
|
<use xlink:href='#gpPt13' id='gpPt14' fill='currentColor' stroke='none'/>
|
||||||
|
<filter id='textbox' filterUnits='objectBoundingBox' x='0' y='0' height='1' width='1'>
|
||||||
|
<feFlood flood-color='white' flood-opacity='1' result='bgnd'/>
|
||||||
|
<feComposite in='SourceGraphic' in2='bgnd' operator='atop'/>
|
||||||
|
</filter>
|
||||||
|
<filter id='greybox' filterUnits='objectBoundingBox' x='0' y='0' height='1' width='1'>
|
||||||
|
<feFlood flood-color='lightgrey' flood-opacity='1' result='grey'/>
|
||||||
|
<feComposite in='SourceGraphic' in2='grey' operator='atop'/>
|
||||||
|
</filter>
|
||||||
|
</defs>
|
||||||
|
<g fill="none" color="white" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<g transform="translate(600.00,21.05)" stroke="none" fill="black" font-family="sans" font-size="14.00" text-anchor="middle">
|
||||||
|
<text><tspan font-family="sans" >Parrhesia Relay Benchmark History</tspan></text>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M74.17,420.94 L368.73,420.94 '/></g>
|
||||||
|
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<path stroke='black' d='M74.17,420.94 L82.42,420.94 M368.73,420.94 L360.48,420.94 '/> <g transform="translate(66.48,424.52)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="end">
|
||||||
|
<text><tspan font-family="sans" > 1500</tspan></text>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M74.17,377.14 L368.73,377.14 '/></g>
|
||||||
|
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<path stroke='black' d='M74.17,377.14 L82.42,377.14 M368.73,377.14 L360.48,377.14 '/> <g transform="translate(66.48,380.72)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="end">
|
||||||
|
<text><tspan font-family="sans" > 2000</tspan></text>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M74.17,333.33 L368.73,333.33 '/></g>
|
||||||
|
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<path stroke='black' d='M74.17,333.33 L82.42,333.33 M368.73,333.33 L360.48,333.33 '/> <g transform="translate(66.48,336.91)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="end">
|
||||||
|
<text><tspan font-family="sans" > 2500</tspan></text>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M74.17,289.53 L368.73,289.53 '/></g>
|
||||||
|
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<path stroke='black' d='M74.17,289.53 L82.42,289.53 M368.73,289.53 L360.48,289.53 '/> <g transform="translate(66.48,293.11)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="end">
|
||||||
|
<text><tspan font-family="sans" > 3000</tspan></text>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M74.17,245.72 L368.73,245.72 '/></g>
|
||||||
|
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<path stroke='black' d='M74.17,245.72 L82.42,245.72 M368.73,245.72 L360.48,245.72 '/> <g transform="translate(66.48,249.30)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="end">
|
||||||
|
<text><tspan font-family="sans" > 3500</tspan></text>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M74.17,201.92 L368.73,201.92 '/></g>
|
||||||
|
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<path stroke='black' d='M74.17,201.92 L82.42,201.92 M368.73,201.92 L360.48,201.92 '/> <g transform="translate(66.48,205.50)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="end">
|
||||||
|
<text><tspan font-family="sans" > 4000</tspan></text>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M74.17,158.12 L368.73,158.12 '/></g>
|
||||||
|
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<path stroke='black' d='M74.17,158.12 L82.42,158.12 M368.73,158.12 L360.48,158.12 '/> <g transform="translate(66.48,161.70)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="end">
|
||||||
|
<text><tspan font-family="sans" > 4500</tspan></text>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M74.17,114.31 L368.73,114.31 '/></g>
|
||||||
|
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<path stroke='black' d='M74.17,114.31 L82.42,114.31 M368.73,114.31 L360.48,114.31 '/> <g transform="translate(66.48,117.89)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="end">
|
||||||
|
<text><tspan font-family="sans" > 5000</tspan></text>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M74.17,70.51 L368.73,70.51 '/></g>
|
||||||
|
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<path stroke='black' d='M74.17,70.51 L82.42,70.51 M368.73,70.51 L360.48,70.51 '/> <g transform="translate(66.48,74.09)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="end">
|
||||||
|
<text><tspan font-family="sans" > 5500</tspan></text>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<path stroke='black' d='M221.45,420.94 L221.45,412.69 M221.45,70.51 L221.45,78.76 '/> <g transform="translate(219.66,431.73) rotate(30.00)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="start">
|
||||||
|
<text><tspan font-family="sans" >v0.5.0</tspan></text>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<path stroke='black' d='M74.17,70.51 L74.17,420.94 L368.73,420.94 L368.73,70.51 L74.17,70.51 Z '/></g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g id="gnuplot_plot_1a" ><title>parrhesia-pg</title>
|
||||||
|
<g fill="none" color="white" stroke="black" stroke-width="2.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="2.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<g transform="translate(537.91,82.34)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="end">
|
||||||
|
<text><tspan font-family="sans" >parrhesia-pg</tspan></text>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="2.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<path stroke='rgb( 37, 99, 235)' d='M545.60,78.76 L584.61,78.76 M221.45,112.97 '/> <use xlink:href='#gpPt6' transform='translate(221.45,112.97) scale(4.12)' color='rgb( 37, 99, 235)'/>
|
||||||
|
<use xlink:href='#gpPt6' transform='translate(565.10,78.76) scale(4.12)' color='rgb( 37, 99, 235)'/>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g id="gnuplot_plot_2a" ><title>parrhesia-memory</title>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="2.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<g transform="translate(537.91,98.84)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="end">
|
||||||
|
<text><tspan font-family="sans" >parrhesia-memory</tspan></text>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="2.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<path stroke='rgb( 22, 163, 74)' d='M545.60,95.26 L584.61,95.26 M221.45,420.47 '/> <use xlink:href='#gpPt8' transform='translate(221.45,420.47) scale(4.12)' color='rgb( 22, 163, 74)'/>
|
||||||
|
<use xlink:href='#gpPt8' transform='translate(565.10,95.26) scale(4.12)' color='rgb( 22, 163, 74)'/>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g id="gnuplot_plot_3a" ><title>nostr-rs-relay (avg)</title>
|
||||||
|
<g fill="none" color="white" stroke="rgb( 22, 163, 74)" stroke-width="1.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<g transform="translate(537.91,115.34)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="end">
|
||||||
|
<text><tspan font-family="sans" >nostr-rs-relay (avg)</tspan></text>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<path stroke='rgb(234, 88, 12)' stroke-dasharray='3.8,6.0' d='M545.60,111.76 L584.61,111.76 M221.45,351.41 '/> <use xlink:href='#gpPt4' transform='translate(221.45,351.41) scale(3.30)' color='rgb(234, 88, 12)'/>
|
||||||
|
<use xlink:href='#gpPt4' transform='translate(565.10,111.76) scale(3.30)' color='rgb(234, 88, 12)'/>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="white" stroke="rgb(234, 88, 12)" stroke-width="2.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="2.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="black" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<path stroke='black' d='M74.17,70.51 L74.17,420.94 L368.73,420.94 L368.73,70.51 L74.17,70.51 Z '/> <g transform="translate(17.58,245.73) rotate(270.00)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="middle">
|
||||||
|
<text><tspan font-family="sans" >TPS</tspan></text>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<g transform="translate(221.45,49.34)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="middle">
|
||||||
|
<text><tspan font-family="sans" >Event Throughput (TPS) — higher is better</tspan></text>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M681.86,420.94 L968.73,420.94 '/></g>
|
||||||
|
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<path stroke='black' d='M681.86,420.94 L690.11,420.94 M968.73,420.94 L960.48,420.94 '/> <g transform="translate(674.17,424.52)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="end">
|
||||||
|
<text><tspan font-family="sans" > 2000</tspan></text>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M681.86,370.88 L968.73,370.88 '/></g>
|
||||||
|
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<path stroke='black' d='M681.86,370.88 L690.11,370.88 M968.73,370.88 L960.48,370.88 '/> <g transform="translate(674.17,374.46)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="end">
|
||||||
|
<text><tspan font-family="sans" > 4000</tspan></text>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M681.86,320.82 L968.73,320.82 '/></g>
|
||||||
|
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<path stroke='black' d='M681.86,320.82 L690.11,320.82 M968.73,320.82 L960.48,320.82 '/> <g transform="translate(674.17,324.40)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="end">
|
||||||
|
<text><tspan font-family="sans" > 6000</tspan></text>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M681.86,270.76 L968.73,270.76 '/></g>
|
||||||
|
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<path stroke='black' d='M681.86,270.76 L690.11,270.76 M968.73,270.76 L960.48,270.76 '/> <g transform="translate(674.17,274.34)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="end">
|
||||||
|
<text><tspan font-family="sans" > 8000</tspan></text>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M681.86,220.69 L968.73,220.69 '/></g>
|
||||||
|
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<path stroke='black' d='M681.86,220.69 L690.11,220.69 M968.73,220.69 L960.48,220.69 '/> <g transform="translate(674.17,224.27)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="end">
|
||||||
|
<text><tspan font-family="sans" > 10000</tspan></text>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M681.86,170.63 L968.73,170.63 '/></g>
|
||||||
|
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<path stroke='black' d='M681.86,170.63 L690.11,170.63 M968.73,170.63 L960.48,170.63 '/> <g transform="translate(674.17,174.21)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="end">
|
||||||
|
<text><tspan font-family="sans" > 12000</tspan></text>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M681.86,120.57 L968.73,120.57 '/></g>
|
||||||
|
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<path stroke='black' d='M681.86,120.57 L690.11,120.57 M968.73,120.57 L960.48,120.57 '/> <g transform="translate(674.17,124.15)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="end">
|
||||||
|
<text><tspan font-family="sans" > 14000</tspan></text>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M681.86,70.51 L968.73,70.51 '/></g>
|
||||||
|
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<path stroke='black' d='M681.86,70.51 L690.11,70.51 M968.73,70.51 L960.48,70.51 '/> <g transform="translate(674.17,74.09)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="end">
|
||||||
|
<text><tspan font-family="sans" > 16000</tspan></text>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<path stroke='black' d='M825.30,420.94 L825.30,412.69 M825.30,70.51 L825.30,78.76 '/> <g transform="translate(823.51,431.73) rotate(30.00)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="start">
|
||||||
|
<text><tspan font-family="sans" >v0.5.0</tspan></text>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<path stroke='black' d='M681.86,70.51 L681.86,420.94 L968.73,420.94 L968.73,70.51 L681.86,70.51 Z '/></g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g id="gnuplot_plot_1b" ><title>parrhesia-pg</title>
|
||||||
|
<g fill="none" color="white" stroke="black" stroke-width="2.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="2.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<g transform="translate(1137.91,82.34)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="end">
|
||||||
|
<text><tspan font-family="sans" >parrhesia-pg</tspan></text>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="2.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<path stroke='rgb( 37, 99, 235)' d='M1145.60,78.76 L1184.61,78.76 M825.30,310.40 '/> <use xlink:href='#gpPt6' transform='translate(825.30,310.40) scale(4.12)' color='rgb( 37, 99, 235)'/>
|
||||||
|
<use xlink:href='#gpPt6' transform='translate(1165.10,78.76) scale(4.12)' color='rgb( 37, 99, 235)'/>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g id="gnuplot_plot_2b" ><title>parrhesia-memory</title>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="2.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<g transform="translate(1137.91,98.84)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="end">
|
||||||
|
<text><tspan font-family="sans" >parrhesia-memory</tspan></text>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="2.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<path stroke='rgb( 22, 163, 74)' d='M1145.60,95.26 L1184.61,95.26 M825.30,106.39 '/> <use xlink:href='#gpPt8' transform='translate(825.30,106.39) scale(4.12)' color='rgb( 22, 163, 74)'/>
|
||||||
|
<use xlink:href='#gpPt8' transform='translate(1165.10,95.26) scale(4.12)' color='rgb( 22, 163, 74)'/>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g id="gnuplot_plot_3b" ><title>nostr-rs-relay (avg)</title>
|
||||||
|
<g fill="none" color="white" stroke="rgb( 22, 163, 74)" stroke-width="1.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<g transform="translate(1137.91,115.34)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="end">
|
||||||
|
<text><tspan font-family="sans" >nostr-rs-relay (avg)</tspan></text>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<path stroke='rgb(234, 88, 12)' stroke-dasharray='3.8,6.0' d='M1145.60,111.76 L1184.61,111.76 M825.30,395.02 '/> <use xlink:href='#gpPt4' transform='translate(825.30,395.02) scale(3.30)' color='rgb(234, 88, 12)'/>
|
||||||
|
<use xlink:href='#gpPt4' transform='translate(1165.10,111.76) scale(3.30)' color='rgb(234, 88, 12)'/>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="white" stroke="rgb(234, 88, 12)" stroke-width="2.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="2.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="black" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<path stroke='black' d='M681.86,70.51 L681.86,420.94 L968.73,420.94 L968.73,70.51 L681.86,70.51 Z '/> <g transform="translate(617.58,245.73) rotate(270.00)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="middle">
|
||||||
|
<text><tspan font-family="sans" >TPS</tspan></text>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<g transform="translate(825.29,49.34)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="middle">
|
||||||
|
<text><tspan font-family="sans" >Req Throughput (TPS) — higher is better</tspan></text>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M89.55,860.44 L368.73,860.44 '/></g>
|
||||||
|
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<path stroke='black' d='M89.55,860.44 L97.80,860.44 M368.73,860.44 L360.48,860.44 '/> <g transform="translate(81.86,864.02)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="end">
|
||||||
|
<text><tspan font-family="sans" > 60000</tspan></text>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M89.55,821.50 L368.73,821.50 '/></g>
|
||||||
|
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<path stroke='black' d='M89.55,821.50 L97.80,821.50 M368.73,821.50 L360.48,821.50 '/> <g transform="translate(81.86,825.08)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="end">
|
||||||
|
<text><tspan font-family="sans" > 70000</tspan></text>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M89.55,782.56 L368.73,782.56 '/></g>
|
||||||
|
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<path stroke='black' d='M89.55,782.56 L97.80,782.56 M368.73,782.56 L360.48,782.56 '/> <g transform="translate(81.86,786.14)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="end">
|
||||||
|
<text><tspan font-family="sans" > 80000</tspan></text>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M89.55,743.63 L368.73,743.63 '/></g>
|
||||||
|
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<path stroke='black' d='M89.55,743.63 L97.80,743.63 M368.73,743.63 L360.48,743.63 '/> <g transform="translate(81.86,747.21)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="end">
|
||||||
|
<text><tspan font-family="sans" > 90000</tspan></text>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M89.55,704.69 L368.73,704.69 '/></g>
|
||||||
|
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<path stroke='black' d='M89.55,704.69 L97.80,704.69 M368.73,704.69 L360.48,704.69 '/> <g transform="translate(81.86,708.27)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="end">
|
||||||
|
<text><tspan font-family="sans" > 100000</tspan></text>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M89.55,665.75 L368.73,665.75 '/></g>
|
||||||
|
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<path stroke='black' d='M89.55,665.75 L97.80,665.75 M368.73,665.75 L360.48,665.75 '/> <g transform="translate(81.86,669.33)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="end">
|
||||||
|
<text><tspan font-family="sans" > 110000</tspan></text>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M89.55,626.81 L368.73,626.81 '/></g>
|
||||||
|
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<path stroke='black' d='M89.55,626.81 L97.80,626.81 M368.73,626.81 L360.48,626.81 '/> <g transform="translate(81.86,630.39)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="end">
|
||||||
|
<text><tspan font-family="sans" > 120000</tspan></text>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M89.55,587.88 L368.73,587.88 '/></g>
|
||||||
|
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<path stroke='black' d='M89.55,587.88 L97.80,587.88 M368.73,587.88 L360.48,587.88 '/> <g transform="translate(81.86,591.46)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="end">
|
||||||
|
<text><tspan font-family="sans" > 130000</tspan></text>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M89.55,548.94 L368.73,548.94 '/></g>
|
||||||
|
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<path stroke='black' d='M89.55,548.94 L97.80,548.94 M368.73,548.94 L360.48,548.94 '/> <g transform="translate(81.86,552.52)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="end">
|
||||||
|
<text><tspan font-family="sans" > 140000</tspan></text>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M89.55,510.00 L368.73,510.00 '/></g>
|
||||||
|
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<path stroke='black' d='M89.55,510.00 L97.80,510.00 M368.73,510.00 L360.48,510.00 '/> <g transform="translate(81.86,513.58)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="end">
|
||||||
|
<text><tspan font-family="sans" > 150000</tspan></text>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<path stroke='black' d='M229.14,860.44 L229.14,852.19 M229.14,510.00 L229.14,518.25 '/> <g transform="translate(227.35,871.23) rotate(30.00)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="start">
|
||||||
|
<text><tspan font-family="sans" >v0.5.0</tspan></text>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<path stroke='black' d='M89.55,510.00 L89.55,860.44 L368.73,860.44 L368.73,510.00 L89.55,510.00 Z '/></g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g id="gnuplot_plot_1c" ><title>parrhesia-pg</title>
|
||||||
|
<g fill="none" color="white" stroke="black" stroke-width="2.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="2.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<g transform="translate(537.91,521.83)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="end">
|
||||||
|
<text><tspan font-family="sans" >parrhesia-pg</tspan></text>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="2.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<path stroke='rgb( 37, 99, 235)' d='M545.60,518.25 L584.61,518.25 M229.14,844.75 '/> <use xlink:href='#gpPt6' transform='translate(229.14,844.75) scale(4.12)' color='rgb( 37, 99, 235)'/>
|
||||||
|
<use xlink:href='#gpPt6' transform='translate(565.10,518.25) scale(4.12)' color='rgb( 37, 99, 235)'/>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g id="gnuplot_plot_2c" ><title>parrhesia-memory</title>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="2.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<g transform="translate(537.91,538.33)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="end">
|
||||||
|
<text><tspan font-family="sans" >parrhesia-memory</tspan></text>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="2.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<path stroke='rgb( 22, 163, 74)' d='M545.60,534.75 L584.61,534.75 M229.14,729.39 '/> <use xlink:href='#gpPt8' transform='translate(229.14,729.39) scale(4.12)' color='rgb( 22, 163, 74)'/>
|
||||||
|
<use xlink:href='#gpPt8' transform='translate(565.10,534.75) scale(4.12)' color='rgb( 22, 163, 74)'/>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g id="gnuplot_plot_3c" ><title>nostr-rs-relay (avg)</title>
|
||||||
|
<g fill="none" color="white" stroke="rgb( 22, 163, 74)" stroke-width="1.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<g transform="translate(537.91,554.83)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="end">
|
||||||
|
<text><tspan font-family="sans" >nostr-rs-relay (avg)</tspan></text>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<path stroke='rgb(234, 88, 12)' stroke-dasharray='3.8,6.0' d='M545.60,551.25 L584.61,551.25 M229.14,545.95 '/> <use xlink:href='#gpPt4' transform='translate(229.14,545.95) scale(3.30)' color='rgb(234, 88, 12)'/>
|
||||||
|
<use xlink:href='#gpPt4' transform='translate(565.10,551.25) scale(3.30)' color='rgb(234, 88, 12)'/>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="white" stroke="rgb(234, 88, 12)" stroke-width="2.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="2.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="black" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<path stroke='black' d='M89.55,510.00 L89.55,860.44 L368.73,860.44 L368.73,510.00 L89.55,510.00 Z '/> <g transform="translate(17.58,685.22) rotate(270.00)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="middle">
|
||||||
|
<text><tspan font-family="sans" >TPS</tspan></text>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<g transform="translate(229.14,488.83)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="middle">
|
||||||
|
<text><tspan font-family="sans" >Echo Throughput (TPS) — higher is better</tspan></text>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M666.48,860.44 L968.73,860.44 '/></g>
|
||||||
|
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<path stroke='black' d='M666.48,860.44 L674.73,860.44 M968.73,860.44 L960.48,860.44 '/> <g transform="translate(658.79,864.02)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="end">
|
||||||
|
<text><tspan font-family="sans" > 7</tspan></text>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M666.48,790.35 L968.73,790.35 '/></g>
|
||||||
|
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<path stroke='black' d='M666.48,790.35 L674.73,790.35 M968.73,790.35 L960.48,790.35 '/> <g transform="translate(658.79,793.93)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="end">
|
||||||
|
<text><tspan font-family="sans" > 7.5</tspan></text>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M666.48,720.26 L968.73,720.26 '/></g>
|
||||||
|
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<path stroke='black' d='M666.48,720.26 L674.73,720.26 M968.73,720.26 L960.48,720.26 '/> <g transform="translate(658.79,723.84)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="end">
|
||||||
|
<text><tspan font-family="sans" > 8</tspan></text>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M666.48,650.18 L968.73,650.18 '/></g>
|
||||||
|
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<path stroke='black' d='M666.48,650.18 L674.73,650.18 M968.73,650.18 L960.48,650.18 '/> <g transform="translate(658.79,653.76)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="end">
|
||||||
|
<text><tspan font-family="sans" > 8.5</tspan></text>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M666.48,580.09 L968.73,580.09 '/></g>
|
||||||
|
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<path stroke='black' d='M666.48,580.09 L674.73,580.09 M968.73,580.09 L960.48,580.09 '/> <g transform="translate(658.79,583.67)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="end">
|
||||||
|
<text><tspan font-family="sans" > 9</tspan></text>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="black" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="gray" stroke="currentColor" stroke-width="0.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<path stroke='gray' stroke-dasharray='2,4' class="gridline" d='M666.48,510.00 L968.73,510.00 '/></g>
|
||||||
|
<g fill="none" color="gray" stroke="gray" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<path stroke='black' d='M666.48,510.00 L674.73,510.00 M968.73,510.00 L960.48,510.00 '/> <g transform="translate(658.79,513.58)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="end">
|
||||||
|
<text><tspan font-family="sans" > 9.5</tspan></text>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<path stroke='black' d='M817.61,860.44 L817.61,852.19 M817.61,510.00 L817.61,518.25 '/> <g transform="translate(815.82,871.23) rotate(30.00)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="start">
|
||||||
|
<text><tspan font-family="sans" >v0.5.0</tspan></text>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<path stroke='black' d='M666.48,510.00 L666.48,860.44 L968.73,860.44 L968.73,510.00 L666.48,510.00 Z '/></g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g id="gnuplot_plot_1d" ><title>parrhesia-pg</title>
|
||||||
|
<g fill="none" color="white" stroke="black" stroke-width="2.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="2.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<g transform="translate(1137.91,521.83)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="end">
|
||||||
|
<text><tspan font-family="sans" >parrhesia-pg</tspan></text>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="2.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<path stroke='rgb( 37, 99, 235)' d='M1145.60,518.25 L1184.61,518.25 M817.61,533.36 '/> <use xlink:href='#gpPt6' transform='translate(817.61,533.36) scale(4.12)' color='rgb( 37, 99, 235)'/>
|
||||||
|
<use xlink:href='#gpPt6' transform='translate(1165.10,518.25) scale(4.12)' color='rgb( 37, 99, 235)'/>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g id="gnuplot_plot_2d" ><title>parrhesia-memory</title>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="2.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<g transform="translate(1137.91,538.33)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="end">
|
||||||
|
<text><tspan font-family="sans" >parrhesia-memory</tspan></text>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="2.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<path stroke='rgb( 22, 163, 74)' d='M1145.60,534.75 L1184.61,534.75 M817.61,766.99 '/> <use xlink:href='#gpPt8' transform='translate(817.61,766.99) scale(4.12)' color='rgb( 22, 163, 74)'/>
|
||||||
|
<use xlink:href='#gpPt8' transform='translate(1165.10,534.75) scale(4.12)' color='rgb( 22, 163, 74)'/>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g id="gnuplot_plot_3d" ><title>nostr-rs-relay (avg)</title>
|
||||||
|
<g fill="none" color="white" stroke="rgb( 22, 163, 74)" stroke-width="1.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<g transform="translate(1137.91,554.83)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="end">
|
||||||
|
<text><tspan font-family="sans" >nostr-rs-relay (avg)</tspan></text>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.50" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<path stroke='rgb(234, 88, 12)' stroke-dasharray='3.8,6.0' d='M1145.60,551.25 L1184.61,551.25 M817.61,860.44 '/> <use xlink:href='#gpPt4' transform='translate(817.61,860.44) scale(3.30)' color='rgb(234, 88, 12)'/>
|
||||||
|
<use xlink:href='#gpPt4' transform='translate(1165.10,551.25) scale(3.30)' color='rgb(234, 88, 12)'/>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="white" stroke="rgb(234, 88, 12)" stroke-width="2.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="2.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="black" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<path stroke='black' d='M666.48,510.00 L666.48,860.44 L968.73,860.44 L968.73,510.00 L666.48,510.00 Z '/> <g transform="translate(617.58,685.22) rotate(270.00)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="middle">
|
||||||
|
<text><tspan font-family="sans" >ms</tspan></text>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
<g transform="translate(817.60,488.83)" stroke="none" fill="black" font-family="sans" font-size="11.00" text-anchor="middle">
|
||||||
|
<text><tspan font-family="sans" >Connect Avg Latency (ms) — lower is better</tspan></text>
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
<g fill="none" color="black" stroke="currentColor" stroke-width="1.00" stroke-linecap="butt" stroke-linejoin="miter">
|
||||||
|
</g>
|
||||||
|
</g>
|
||||||
|
</svg>
|
||||||
|
|
||||||
|
After Width: | Height: | Size: 53 KiB |
1
bench/history.jsonl
Normal file
1
bench/history.jsonl
Normal file
@@ -0,0 +1 @@
|
|||||||
|
{"timestamp":"2026-03-18T20:13:21Z","machine_id":"squirrel","git_tag":"v0.5.0","git_commit":"970cee2","runs":3,"servers":{"parrhesia-pg":{"connect_avg_ms":9.333333333333334,"connect_max_ms":12.333333333333334,"echo_tps":64030.333333333336,"echo_mibs":35.06666666666666,"event_tps":5015.333333333333,"event_mibs":3.4,"req_tps":6416.333333333333,"req_mibs":42.43333333333334},"parrhesia-memory":{"connect_avg_ms":7.666666666666667,"connect_max_ms":9.666666666666666,"echo_tps":93656.33333333333,"echo_mibs":51.26666666666667,"event_tps":1505.3333333333333,"event_mibs":1,"req_tps":14566.666666666666,"req_mibs":94.23333333333335},"nostr-rs-relay":{"connect_avg_ms":7,"connect_max_ms":10.333333333333334,"echo_tps":140767,"echo_mibs":77.06666666666666,"event_tps":2293.6666666666665,"event_mibs":1.5,"req_tps":3035.6666666666665,"req_mibs":19.23333333333333}}}
|
||||||
92
compose.node-sync-e2e.yaml
Normal file
92
compose.node-sync-e2e.yaml
Normal file
@@ -0,0 +1,92 @@
|
|||||||
|
services:
|
||||||
|
db-a:
|
||||||
|
image: postgres:17
|
||||||
|
restart: unless-stopped
|
||||||
|
environment:
|
||||||
|
POSTGRES_DB: parrhesia_a
|
||||||
|
POSTGRES_USER: parrhesia
|
||||||
|
POSTGRES_PASSWORD: parrhesia
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD-SHELL", "pg_isready -U $$POSTGRES_USER -d $$POSTGRES_DB"]
|
||||||
|
interval: 5s
|
||||||
|
timeout: 5s
|
||||||
|
retries: 12
|
||||||
|
volumes:
|
||||||
|
- postgres-a-data:/var/lib/postgresql/data
|
||||||
|
|
||||||
|
db-b:
|
||||||
|
image: postgres:17
|
||||||
|
restart: unless-stopped
|
||||||
|
environment:
|
||||||
|
POSTGRES_DB: parrhesia_b
|
||||||
|
POSTGRES_USER: parrhesia
|
||||||
|
POSTGRES_PASSWORD: parrhesia
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD-SHELL", "pg_isready -U $$POSTGRES_USER -d $$POSTGRES_DB"]
|
||||||
|
interval: 5s
|
||||||
|
timeout: 5s
|
||||||
|
retries: 12
|
||||||
|
volumes:
|
||||||
|
- postgres-b-data:/var/lib/postgresql/data
|
||||||
|
|
||||||
|
migrate-a:
|
||||||
|
image: ${PARRHESIA_IMAGE:-parrhesia:latest}
|
||||||
|
restart: "no"
|
||||||
|
depends_on:
|
||||||
|
db-a:
|
||||||
|
condition: service_healthy
|
||||||
|
environment:
|
||||||
|
DATABASE_URL: ecto://parrhesia:parrhesia@db-a:5432/parrhesia_a
|
||||||
|
POOL_SIZE: ${POOL_SIZE:-20}
|
||||||
|
PARRHESIA_ACL_PROTECTED_FILTERS: ${PARRHESIA_ACL_PROTECTED_FILTERS}
|
||||||
|
command: ["eval", "Parrhesia.Release.migrate()"]
|
||||||
|
|
||||||
|
migrate-b:
|
||||||
|
image: ${PARRHESIA_IMAGE:-parrhesia:latest}
|
||||||
|
restart: "no"
|
||||||
|
depends_on:
|
||||||
|
db-b:
|
||||||
|
condition: service_healthy
|
||||||
|
environment:
|
||||||
|
DATABASE_URL: ecto://parrhesia:parrhesia@db-b:5432/parrhesia_b
|
||||||
|
POOL_SIZE: ${POOL_SIZE:-20}
|
||||||
|
PARRHESIA_ACL_PROTECTED_FILTERS: ${PARRHESIA_ACL_PROTECTED_FILTERS}
|
||||||
|
command: ["eval", "Parrhesia.Release.migrate()"]
|
||||||
|
|
||||||
|
parrhesia-a:
|
||||||
|
image: ${PARRHESIA_IMAGE:-parrhesia:latest}
|
||||||
|
restart: unless-stopped
|
||||||
|
depends_on:
|
||||||
|
db-a:
|
||||||
|
condition: service_healthy
|
||||||
|
environment:
|
||||||
|
DATABASE_URL: ecto://parrhesia:parrhesia@db-a:5432/parrhesia_a
|
||||||
|
POOL_SIZE: ${POOL_SIZE:-20}
|
||||||
|
PORT: 4413
|
||||||
|
PARRHESIA_RELAY_URL: ${PARRHESIA_NODE_A_RELAY_URL:-ws://parrhesia-a:4413/relay}
|
||||||
|
PARRHESIA_ACL_PROTECTED_FILTERS: ${PARRHESIA_ACL_PROTECTED_FILTERS}
|
||||||
|
PARRHESIA_IDENTITY_PATH: /tmp/parrhesia-a/server_identity.json
|
||||||
|
PARRHESIA_SYNC_PATH: /tmp/parrhesia-a/sync_servers.json
|
||||||
|
ports:
|
||||||
|
- "${PARRHESIA_NODE_A_HOST_PORT:-45131}:4413"
|
||||||
|
|
||||||
|
parrhesia-b:
|
||||||
|
image: ${PARRHESIA_IMAGE:-parrhesia:latest}
|
||||||
|
restart: unless-stopped
|
||||||
|
depends_on:
|
||||||
|
db-b:
|
||||||
|
condition: service_healthy
|
||||||
|
environment:
|
||||||
|
DATABASE_URL: ecto://parrhesia:parrhesia@db-b:5432/parrhesia_b
|
||||||
|
POOL_SIZE: ${POOL_SIZE:-20}
|
||||||
|
PORT: 4413
|
||||||
|
PARRHESIA_RELAY_URL: ${PARRHESIA_NODE_B_RELAY_URL:-ws://parrhesia-b:4413/relay}
|
||||||
|
PARRHESIA_ACL_PROTECTED_FILTERS: ${PARRHESIA_ACL_PROTECTED_FILTERS}
|
||||||
|
PARRHESIA_IDENTITY_PATH: /tmp/parrhesia-b/server_identity.json
|
||||||
|
PARRHESIA_SYNC_PATH: /tmp/parrhesia-b/sync_servers.json
|
||||||
|
ports:
|
||||||
|
- "${PARRHESIA_NODE_B_HOST_PORT:-45132}:4413"
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
postgres-a-data:
|
||||||
|
postgres-b-data:
|
||||||
@@ -1,15 +1,57 @@
|
|||||||
import Config
|
import Config
|
||||||
|
|
||||||
|
project_version =
|
||||||
|
case Mix.Project.config()[:version] do
|
||||||
|
version when is_binary(version) -> version
|
||||||
|
version -> to_string(version)
|
||||||
|
end
|
||||||
|
|
||||||
config :postgrex, :json_library, JSON
|
config :postgrex, :json_library, JSON
|
||||||
|
|
||||||
config :parrhesia,
|
config :parrhesia,
|
||||||
|
metadata: [
|
||||||
|
name: "Parrhesia",
|
||||||
|
version: project_version,
|
||||||
|
hide_version?: true
|
||||||
|
],
|
||||||
|
database: [
|
||||||
|
separate_read_pool?: config_env() != :test
|
||||||
|
],
|
||||||
moderation_cache_enabled: true,
|
moderation_cache_enabled: true,
|
||||||
|
enable_partition_retention_worker: true,
|
||||||
relay_url: "ws://localhost:4413/relay",
|
relay_url: "ws://localhost:4413/relay",
|
||||||
|
nip43: [
|
||||||
|
enabled: true,
|
||||||
|
invite_ttl_seconds: 900,
|
||||||
|
request_max_age_seconds: 300
|
||||||
|
],
|
||||||
|
nip66: [
|
||||||
|
enabled: true,
|
||||||
|
publish_interval_seconds: 900,
|
||||||
|
publish_monitor_announcement?: true,
|
||||||
|
timeout_ms: 5_000,
|
||||||
|
checks: [:open, :read, :nip11],
|
||||||
|
targets: []
|
||||||
|
],
|
||||||
|
identity: [
|
||||||
|
path: nil,
|
||||||
|
private_key: nil
|
||||||
|
],
|
||||||
|
sync: [
|
||||||
|
path: nil,
|
||||||
|
start_workers?: true
|
||||||
|
],
|
||||||
limits: [
|
limits: [
|
||||||
max_frame_bytes: 1_048_576,
|
max_frame_bytes: 1_048_576,
|
||||||
max_event_bytes: 262_144,
|
max_event_bytes: 262_144,
|
||||||
max_filters_per_req: 16,
|
max_filters_per_req: 16,
|
||||||
max_filter_limit: 500,
|
max_filter_limit: 500,
|
||||||
|
max_tags_per_event: 256,
|
||||||
|
max_tag_values_per_filter: 128,
|
||||||
|
ip_max_event_ingest_per_window: 1_000,
|
||||||
|
ip_event_ingest_window_seconds: 1,
|
||||||
|
relay_max_event_ingest_per_window: 10_000,
|
||||||
|
relay_event_ingest_window_seconds: 1,
|
||||||
max_subscriptions_per_connection: 32,
|
max_subscriptions_per_connection: 32,
|
||||||
max_event_future_skew_seconds: 900,
|
max_event_future_skew_seconds: 900,
|
||||||
max_event_ingest_per_window: 120,
|
max_event_ingest_per_window: 120,
|
||||||
@@ -21,6 +63,8 @@ config :parrhesia,
|
|||||||
max_negentropy_payload_bytes: 4096,
|
max_negentropy_payload_bytes: 4096,
|
||||||
max_negentropy_sessions_per_connection: 8,
|
max_negentropy_sessions_per_connection: 8,
|
||||||
max_negentropy_total_sessions: 10_000,
|
max_negentropy_total_sessions: 10_000,
|
||||||
|
max_negentropy_items_per_session: 50_000,
|
||||||
|
negentropy_id_list_threshold: 32,
|
||||||
negentropy_session_idle_timeout_seconds: 60,
|
negentropy_session_idle_timeout_seconds: 60,
|
||||||
negentropy_session_sweep_interval_seconds: 10
|
negentropy_session_sweep_interval_seconds: 10
|
||||||
],
|
],
|
||||||
@@ -47,13 +91,27 @@ config :parrhesia,
|
|||||||
marmot_push_max_server_recipients: 1,
|
marmot_push_max_server_recipients: 1,
|
||||||
management_auth_required: true
|
management_auth_required: true
|
||||||
],
|
],
|
||||||
metrics: [
|
listeners: %{
|
||||||
enabled_on_main_endpoint: true,
|
public: %{
|
||||||
public: false,
|
enabled: true,
|
||||||
private_networks_only: true,
|
bind: %{ip: {0, 0, 0, 0}, port: 4413},
|
||||||
allowed_cidrs: [],
|
max_connections: 20_000,
|
||||||
auth_token: nil
|
transport: %{scheme: :http, tls: %{mode: :disabled}},
|
||||||
],
|
proxy: %{trusted_cidrs: [], honor_x_forwarded_for: true},
|
||||||
|
network: %{allow_all: true},
|
||||||
|
features: %{
|
||||||
|
nostr: %{enabled: true},
|
||||||
|
admin: %{enabled: true},
|
||||||
|
metrics: %{
|
||||||
|
enabled: true,
|
||||||
|
access: %{private_networks_only: true},
|
||||||
|
auth_token: nil
|
||||||
|
}
|
||||||
|
},
|
||||||
|
auth: %{nip42_required: false, nip98_required_for_admin: true},
|
||||||
|
baseline_acl: %{read: [], write: []}
|
||||||
|
}
|
||||||
|
},
|
||||||
retention: [
|
retention: [
|
||||||
check_interval_hours: 24,
|
check_interval_hours: 24,
|
||||||
months_ahead: 2,
|
months_ahead: 2,
|
||||||
@@ -62,6 +120,7 @@ config :parrhesia,
|
|||||||
max_partitions_to_drop_per_run: 1
|
max_partitions_to_drop_per_run: 1
|
||||||
],
|
],
|
||||||
features: [
|
features: [
|
||||||
|
verify_event_signatures_locked?: config_env() == :prod,
|
||||||
verify_event_signatures: true,
|
verify_event_signatures: true,
|
||||||
nip_45_count: true,
|
nip_45_count: true,
|
||||||
nip_50_search: true,
|
nip_50_search: true,
|
||||||
@@ -69,20 +128,16 @@ config :parrhesia,
|
|||||||
marmot_push_notifications: false
|
marmot_push_notifications: false
|
||||||
],
|
],
|
||||||
storage: [
|
storage: [
|
||||||
|
backend: :postgres,
|
||||||
events: Parrhesia.Storage.Adapters.Postgres.Events,
|
events: Parrhesia.Storage.Adapters.Postgres.Events,
|
||||||
|
acl: Parrhesia.Storage.Adapters.Postgres.ACL,
|
||||||
moderation: Parrhesia.Storage.Adapters.Postgres.Moderation,
|
moderation: Parrhesia.Storage.Adapters.Postgres.Moderation,
|
||||||
groups: Parrhesia.Storage.Adapters.Postgres.Groups,
|
groups: Parrhesia.Storage.Adapters.Postgres.Groups,
|
||||||
admin: Parrhesia.Storage.Adapters.Postgres.Admin
|
admin: Parrhesia.Storage.Adapters.Postgres.Admin
|
||||||
]
|
]
|
||||||
|
|
||||||
config :parrhesia, Parrhesia.Web.Endpoint, port: 4413
|
|
||||||
|
|
||||||
config :parrhesia, Parrhesia.Web.MetricsEndpoint,
|
|
||||||
enabled: false,
|
|
||||||
ip: {127, 0, 0, 1},
|
|
||||||
port: 9568
|
|
||||||
|
|
||||||
config :parrhesia, Parrhesia.Repo, types: Parrhesia.PostgresTypes
|
config :parrhesia, Parrhesia.Repo, types: Parrhesia.PostgresTypes
|
||||||
|
config :parrhesia, Parrhesia.ReadRepo, types: Parrhesia.PostgresTypes
|
||||||
|
|
||||||
config :parrhesia, ecto_repos: [Parrhesia.Repo]
|
config :parrhesia, ecto_repos: [Parrhesia.Repo]
|
||||||
|
|
||||||
|
|||||||
@@ -23,3 +23,13 @@ config :parrhesia,
|
|||||||
show_sensitive_data_on_connection_error: true,
|
show_sensitive_data_on_connection_error: true,
|
||||||
pool_size: 10
|
pool_size: 10
|
||||||
] ++ repo_host_opts
|
] ++ repo_host_opts
|
||||||
|
|
||||||
|
config :parrhesia,
|
||||||
|
Parrhesia.ReadRepo,
|
||||||
|
[
|
||||||
|
username: System.get_env("PGUSER") || System.get_env("USER") || "agent",
|
||||||
|
password: System.get_env("PGPASSWORD"),
|
||||||
|
database: System.get_env("PGDATABASE") || "parrhesia_dev",
|
||||||
|
show_sensitive_data_on_connection_error: true,
|
||||||
|
pool_size: 10
|
||||||
|
] ++ repo_host_opts
|
||||||
|
|||||||
@@ -5,4 +5,9 @@ config :parrhesia, Parrhesia.Repo,
|
|||||||
queue_target: 1_000,
|
queue_target: 1_000,
|
||||||
queue_interval: 5_000
|
queue_interval: 5_000
|
||||||
|
|
||||||
|
config :parrhesia, Parrhesia.ReadRepo,
|
||||||
|
pool_size: 32,
|
||||||
|
queue_target: 1_000,
|
||||||
|
queue_interval: 5_000
|
||||||
|
|
||||||
# Production runtime configuration lives in config/runtime.exs.
|
# Production runtime configuration lives in config/runtime.exs.
|
||||||
|
|||||||
@@ -35,6 +35,20 @@ bool_env = fn name, default ->
|
|||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
|
storage_backend_env = fn name, default ->
|
||||||
|
case System.get_env(name) do
|
||||||
|
nil ->
|
||||||
|
default
|
||||||
|
|
||||||
|
value ->
|
||||||
|
case String.downcase(String.trim(value)) do
|
||||||
|
"postgres" -> :postgres
|
||||||
|
"memory" -> :memory
|
||||||
|
_other -> raise "environment variable #{name} must be one of: postgres, memory"
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
csv_env = fn name, default ->
|
csv_env = fn name, default ->
|
||||||
case System.get_env(name) do
|
case System.get_env(name) do
|
||||||
nil ->
|
nil ->
|
||||||
@@ -48,6 +62,25 @@ csv_env = fn name, default ->
|
|||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
|
json_env = fn name, default ->
|
||||||
|
case System.get_env(name) do
|
||||||
|
nil ->
|
||||||
|
default
|
||||||
|
|
||||||
|
"" ->
|
||||||
|
default
|
||||||
|
|
||||||
|
value ->
|
||||||
|
case JSON.decode(value) do
|
||||||
|
{:ok, decoded} ->
|
||||||
|
decoded
|
||||||
|
|
||||||
|
{:error, reason} ->
|
||||||
|
raise "environment variable #{name} must contain valid JSON: #{inspect(reason)}"
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
infinity_or_int_env = fn name, default ->
|
infinity_or_int_env = fn name, default ->
|
||||||
case System.get_env(name) do
|
case System.get_env(name) do
|
||||||
nil ->
|
nil ->
|
||||||
@@ -106,12 +139,12 @@ ipv4_env = fn name, default ->
|
|||||||
end
|
end
|
||||||
|
|
||||||
if config_env() == :prod do
|
if config_env() == :prod do
|
||||||
database_url =
|
|
||||||
System.get_env("DATABASE_URL") ||
|
|
||||||
raise "environment variable DATABASE_URL is missing. Example: ecto://USER:PASS@HOST/DATABASE"
|
|
||||||
|
|
||||||
repo_defaults = Application.get_env(:parrhesia, Parrhesia.Repo, [])
|
repo_defaults = Application.get_env(:parrhesia, Parrhesia.Repo, [])
|
||||||
|
read_repo_defaults = Application.get_env(:parrhesia, Parrhesia.ReadRepo, [])
|
||||||
relay_url_default = Application.get_env(:parrhesia, :relay_url)
|
relay_url_default = Application.get_env(:parrhesia, :relay_url)
|
||||||
|
metadata_defaults = Application.get_env(:parrhesia, :metadata, [])
|
||||||
|
database_defaults = Application.get_env(:parrhesia, :database, [])
|
||||||
|
storage_defaults = Application.get_env(:parrhesia, :storage, [])
|
||||||
|
|
||||||
moderation_cache_enabled_default =
|
moderation_cache_enabled_default =
|
||||||
Application.get_env(:parrhesia, :moderation_cache_enabled, true)
|
Application.get_env(:parrhesia, :moderation_cache_enabled, true)
|
||||||
@@ -119,20 +152,54 @@ if config_env() == :prod do
|
|||||||
enable_expiration_worker_default =
|
enable_expiration_worker_default =
|
||||||
Application.get_env(:parrhesia, :enable_expiration_worker, true)
|
Application.get_env(:parrhesia, :enable_expiration_worker, true)
|
||||||
|
|
||||||
|
enable_partition_retention_worker_default =
|
||||||
|
Application.get_env(:parrhesia, :enable_partition_retention_worker, true)
|
||||||
|
|
||||||
limits_defaults = Application.get_env(:parrhesia, :limits, [])
|
limits_defaults = Application.get_env(:parrhesia, :limits, [])
|
||||||
policies_defaults = Application.get_env(:parrhesia, :policies, [])
|
policies_defaults = Application.get_env(:parrhesia, :policies, [])
|
||||||
metrics_defaults = Application.get_env(:parrhesia, :metrics, [])
|
listeners_defaults = Application.get_env(:parrhesia, :listeners, %{})
|
||||||
retention_defaults = Application.get_env(:parrhesia, :retention, [])
|
retention_defaults = Application.get_env(:parrhesia, :retention, [])
|
||||||
features_defaults = Application.get_env(:parrhesia, :features, [])
|
features_defaults = Application.get_env(:parrhesia, :features, [])
|
||||||
metrics_endpoint_defaults = Application.get_env(:parrhesia, Parrhesia.Web.MetricsEndpoint, [])
|
acl_defaults = Application.get_env(:parrhesia, :acl, [])
|
||||||
|
|
||||||
default_pool_size = Keyword.get(repo_defaults, :pool_size, 32)
|
default_pool_size = Keyword.get(repo_defaults, :pool_size, 32)
|
||||||
default_queue_target = Keyword.get(repo_defaults, :queue_target, 1_000)
|
default_queue_target = Keyword.get(repo_defaults, :queue_target, 1_000)
|
||||||
default_queue_interval = Keyword.get(repo_defaults, :queue_interval, 5_000)
|
default_queue_interval = Keyword.get(repo_defaults, :queue_interval, 5_000)
|
||||||
|
default_read_pool_size = Keyword.get(read_repo_defaults, :pool_size, default_pool_size)
|
||||||
|
default_read_queue_target = Keyword.get(read_repo_defaults, :queue_target, default_queue_target)
|
||||||
|
|
||||||
|
default_read_queue_interval =
|
||||||
|
Keyword.get(read_repo_defaults, :queue_interval, default_queue_interval)
|
||||||
|
|
||||||
|
default_storage_backend =
|
||||||
|
storage_defaults
|
||||||
|
|> Keyword.get(:backend, :postgres)
|
||||||
|
|> case do
|
||||||
|
:postgres -> :postgres
|
||||||
|
:memory -> :memory
|
||||||
|
other -> raise "unsupported storage backend default: #{inspect(other)}"
|
||||||
|
end
|
||||||
|
|
||||||
|
storage_backend = storage_backend_env.("PARRHESIA_STORAGE_BACKEND", default_storage_backend)
|
||||||
|
postgres_backend? = storage_backend == :postgres
|
||||||
|
|
||||||
|
separate_read_pool? =
|
||||||
|
postgres_backend? and Keyword.get(database_defaults, :separate_read_pool?, true)
|
||||||
|
|
||||||
|
database_url =
|
||||||
|
if postgres_backend? do
|
||||||
|
System.get_env("DATABASE_URL") ||
|
||||||
|
raise "environment variable DATABASE_URL is missing. Example: ecto://USER:PASS@HOST/DATABASE"
|
||||||
|
else
|
||||||
|
nil
|
||||||
|
end
|
||||||
|
|
||||||
pool_size = int_env.("POOL_SIZE", default_pool_size)
|
pool_size = int_env.("POOL_SIZE", default_pool_size)
|
||||||
queue_target = int_env.("DB_QUEUE_TARGET_MS", default_queue_target)
|
queue_target = int_env.("DB_QUEUE_TARGET_MS", default_queue_target)
|
||||||
queue_interval = int_env.("DB_QUEUE_INTERVAL_MS", default_queue_interval)
|
queue_interval = int_env.("DB_QUEUE_INTERVAL_MS", default_queue_interval)
|
||||||
|
read_pool_size = int_env.("DB_READ_POOL_SIZE", default_read_pool_size)
|
||||||
|
read_queue_target = int_env.("DB_READ_QUEUE_TARGET_MS", default_read_queue_target)
|
||||||
|
read_queue_interval = int_env.("DB_READ_QUEUE_INTERVAL_MS", default_read_queue_interval)
|
||||||
|
|
||||||
limits = [
|
limits = [
|
||||||
max_frame_bytes:
|
max_frame_bytes:
|
||||||
@@ -155,6 +222,36 @@ if config_env() == :prod do
|
|||||||
"PARRHESIA_LIMITS_MAX_FILTER_LIMIT",
|
"PARRHESIA_LIMITS_MAX_FILTER_LIMIT",
|
||||||
Keyword.get(limits_defaults, :max_filter_limit, 500)
|
Keyword.get(limits_defaults, :max_filter_limit, 500)
|
||||||
),
|
),
|
||||||
|
max_tags_per_event:
|
||||||
|
int_env.(
|
||||||
|
"PARRHESIA_LIMITS_MAX_TAGS_PER_EVENT",
|
||||||
|
Keyword.get(limits_defaults, :max_tags_per_event, 256)
|
||||||
|
),
|
||||||
|
max_tag_values_per_filter:
|
||||||
|
int_env.(
|
||||||
|
"PARRHESIA_LIMITS_MAX_TAG_VALUES_PER_FILTER",
|
||||||
|
Keyword.get(limits_defaults, :max_tag_values_per_filter, 128)
|
||||||
|
),
|
||||||
|
ip_max_event_ingest_per_window:
|
||||||
|
int_env.(
|
||||||
|
"PARRHESIA_LIMITS_IP_MAX_EVENT_INGEST_PER_WINDOW",
|
||||||
|
Keyword.get(limits_defaults, :ip_max_event_ingest_per_window, 1_000)
|
||||||
|
),
|
||||||
|
ip_event_ingest_window_seconds:
|
||||||
|
int_env.(
|
||||||
|
"PARRHESIA_LIMITS_IP_EVENT_INGEST_WINDOW_SECONDS",
|
||||||
|
Keyword.get(limits_defaults, :ip_event_ingest_window_seconds, 1)
|
||||||
|
),
|
||||||
|
relay_max_event_ingest_per_window:
|
||||||
|
int_env.(
|
||||||
|
"PARRHESIA_LIMITS_RELAY_MAX_EVENT_INGEST_PER_WINDOW",
|
||||||
|
Keyword.get(limits_defaults, :relay_max_event_ingest_per_window, 10_000)
|
||||||
|
),
|
||||||
|
relay_event_ingest_window_seconds:
|
||||||
|
int_env.(
|
||||||
|
"PARRHESIA_LIMITS_RELAY_EVENT_INGEST_WINDOW_SECONDS",
|
||||||
|
Keyword.get(limits_defaults, :relay_event_ingest_window_seconds, 1)
|
||||||
|
),
|
||||||
max_subscriptions_per_connection:
|
max_subscriptions_per_connection:
|
||||||
int_env.(
|
int_env.(
|
||||||
"PARRHESIA_LIMITS_MAX_SUBSCRIPTIONS_PER_CONNECTION",
|
"PARRHESIA_LIMITS_MAX_SUBSCRIPTIONS_PER_CONNECTION",
|
||||||
@@ -210,6 +307,16 @@ if config_env() == :prod do
|
|||||||
"PARRHESIA_LIMITS_MAX_NEGENTROPY_TOTAL_SESSIONS",
|
"PARRHESIA_LIMITS_MAX_NEGENTROPY_TOTAL_SESSIONS",
|
||||||
Keyword.get(limits_defaults, :max_negentropy_total_sessions, 10_000)
|
Keyword.get(limits_defaults, :max_negentropy_total_sessions, 10_000)
|
||||||
),
|
),
|
||||||
|
max_negentropy_items_per_session:
|
||||||
|
int_env.(
|
||||||
|
"PARRHESIA_LIMITS_MAX_NEGENTROPY_ITEMS_PER_SESSION",
|
||||||
|
Keyword.get(limits_defaults, :max_negentropy_items_per_session, 50_000)
|
||||||
|
),
|
||||||
|
negentropy_id_list_threshold:
|
||||||
|
int_env.(
|
||||||
|
"PARRHESIA_LIMITS_NEGENTROPY_ID_LIST_THRESHOLD",
|
||||||
|
Keyword.get(limits_defaults, :negentropy_id_list_threshold, 32)
|
||||||
|
),
|
||||||
negentropy_session_idle_timeout_seconds:
|
negentropy_session_idle_timeout_seconds:
|
||||||
int_env.(
|
int_env.(
|
||||||
"PARRHESIA_LIMITS_NEGENTROPY_SESSION_IDLE_TIMEOUT_SECONDS",
|
"PARRHESIA_LIMITS_NEGENTROPY_SESSION_IDLE_TIMEOUT_SECONDS",
|
||||||
@@ -330,33 +437,180 @@ if config_env() == :prod do
|
|||||||
)
|
)
|
||||||
]
|
]
|
||||||
|
|
||||||
metrics = [
|
public_listener_defaults = Map.get(listeners_defaults, :public, %{})
|
||||||
enabled_on_main_endpoint:
|
public_bind_defaults = Map.get(public_listener_defaults, :bind, %{})
|
||||||
bool_env.(
|
public_transport_defaults = Map.get(public_listener_defaults, :transport, %{})
|
||||||
"PARRHESIA_METRICS_ENABLED_ON_MAIN_ENDPOINT",
|
public_proxy_defaults = Map.get(public_listener_defaults, :proxy, %{})
|
||||||
Keyword.get(metrics_defaults, :enabled_on_main_endpoint, true)
|
public_network_defaults = Map.get(public_listener_defaults, :network, %{})
|
||||||
|
public_features_defaults = Map.get(public_listener_defaults, :features, %{})
|
||||||
|
public_auth_defaults = Map.get(public_listener_defaults, :auth, %{})
|
||||||
|
public_metrics_defaults = Map.get(public_features_defaults, :metrics, %{})
|
||||||
|
public_metrics_access_defaults = Map.get(public_metrics_defaults, :access, %{})
|
||||||
|
|
||||||
|
metrics_listener_defaults = Map.get(listeners_defaults, :metrics, %{})
|
||||||
|
metrics_listener_bind_defaults = Map.get(metrics_listener_defaults, :bind, %{})
|
||||||
|
metrics_listener_transport_defaults = Map.get(metrics_listener_defaults, :transport, %{})
|
||||||
|
metrics_listener_network_defaults = Map.get(metrics_listener_defaults, :network, %{})
|
||||||
|
|
||||||
|
metrics_listener_metrics_defaults =
|
||||||
|
metrics_listener_defaults
|
||||||
|
|> Map.get(:features, %{})
|
||||||
|
|> Map.get(:metrics, %{})
|
||||||
|
|
||||||
|
metrics_listener_metrics_access_defaults =
|
||||||
|
Map.get(metrics_listener_metrics_defaults, :access, %{})
|
||||||
|
|
||||||
|
public_listener = %{
|
||||||
|
enabled: Map.get(public_listener_defaults, :enabled, true),
|
||||||
|
bind: %{
|
||||||
|
ip: Map.get(public_bind_defaults, :ip, {0, 0, 0, 0}),
|
||||||
|
port: int_env.("PORT", Map.get(public_bind_defaults, :port, 4413))
|
||||||
|
},
|
||||||
|
max_connections:
|
||||||
|
infinity_or_int_env.(
|
||||||
|
"PARRHESIA_PUBLIC_MAX_CONNECTIONS",
|
||||||
|
Map.get(public_listener_defaults, :max_connections, 20_000)
|
||||||
),
|
),
|
||||||
public:
|
transport: %{
|
||||||
bool_env.(
|
scheme: Map.get(public_transport_defaults, :scheme, :http),
|
||||||
"PARRHESIA_METRICS_PUBLIC",
|
tls: Map.get(public_transport_defaults, :tls, %{mode: :disabled})
|
||||||
Keyword.get(metrics_defaults, :public, false)
|
},
|
||||||
),
|
proxy: %{
|
||||||
private_networks_only:
|
trusted_cidrs:
|
||||||
bool_env.(
|
csv_env.(
|
||||||
"PARRHESIA_METRICS_PRIVATE_NETWORKS_ONLY",
|
"PARRHESIA_TRUSTED_PROXIES",
|
||||||
Keyword.get(metrics_defaults, :private_networks_only, true)
|
Map.get(public_proxy_defaults, :trusted_cidrs, [])
|
||||||
),
|
),
|
||||||
allowed_cidrs:
|
honor_x_forwarded_for: Map.get(public_proxy_defaults, :honor_x_forwarded_for, true)
|
||||||
csv_env.(
|
},
|
||||||
"PARRHESIA_METRICS_ALLOWED_CIDRS",
|
network: %{
|
||||||
Keyword.get(metrics_defaults, :allowed_cidrs, [])
|
allow_cidrs: Map.get(public_network_defaults, :allow_cidrs, []),
|
||||||
),
|
private_networks_only: Map.get(public_network_defaults, :private_networks_only, false),
|
||||||
auth_token:
|
public: Map.get(public_network_defaults, :public, false),
|
||||||
string_env.(
|
allow_all: Map.get(public_network_defaults, :allow_all, true)
|
||||||
"PARRHESIA_METRICS_AUTH_TOKEN",
|
},
|
||||||
Keyword.get(metrics_defaults, :auth_token)
|
features: %{
|
||||||
|
nostr: %{
|
||||||
|
enabled: public_features_defaults |> Map.get(:nostr, %{}) |> Map.get(:enabled, true)
|
||||||
|
},
|
||||||
|
admin: %{
|
||||||
|
enabled: public_features_defaults |> Map.get(:admin, %{}) |> Map.get(:enabled, true)
|
||||||
|
},
|
||||||
|
metrics: %{
|
||||||
|
enabled:
|
||||||
|
bool_env.(
|
||||||
|
"PARRHESIA_METRICS_ENABLED_ON_MAIN_ENDPOINT",
|
||||||
|
Map.get(public_metrics_defaults, :enabled, true)
|
||||||
|
),
|
||||||
|
auth_token:
|
||||||
|
string_env.(
|
||||||
|
"PARRHESIA_METRICS_AUTH_TOKEN",
|
||||||
|
Map.get(public_metrics_defaults, :auth_token)
|
||||||
|
),
|
||||||
|
access: %{
|
||||||
|
public:
|
||||||
|
bool_env.(
|
||||||
|
"PARRHESIA_METRICS_PUBLIC",
|
||||||
|
Map.get(public_metrics_access_defaults, :public, false)
|
||||||
|
),
|
||||||
|
private_networks_only:
|
||||||
|
bool_env.(
|
||||||
|
"PARRHESIA_METRICS_PRIVATE_NETWORKS_ONLY",
|
||||||
|
Map.get(public_metrics_access_defaults, :private_networks_only, true)
|
||||||
|
),
|
||||||
|
allow_cidrs:
|
||||||
|
csv_env.(
|
||||||
|
"PARRHESIA_METRICS_ALLOWED_CIDRS",
|
||||||
|
Map.get(public_metrics_access_defaults, :allow_cidrs, [])
|
||||||
|
),
|
||||||
|
allow_all: Map.get(public_metrics_access_defaults, :allow_all, true)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
auth: %{
|
||||||
|
nip42_required: Map.get(public_auth_defaults, :nip42_required, false),
|
||||||
|
nip98_required_for_admin:
|
||||||
|
bool_env.(
|
||||||
|
"PARRHESIA_POLICIES_MANAGEMENT_AUTH_REQUIRED",
|
||||||
|
Map.get(public_auth_defaults, :nip98_required_for_admin, true)
|
||||||
|
)
|
||||||
|
},
|
||||||
|
baseline_acl: Map.get(public_listener_defaults, :baseline_acl, %{read: [], write: []})
|
||||||
|
}
|
||||||
|
|
||||||
|
listeners =
|
||||||
|
if Map.get(metrics_listener_defaults, :enabled, false) or
|
||||||
|
bool_env.("PARRHESIA_METRICS_ENDPOINT_ENABLED", false) do
|
||||||
|
Map.put(
|
||||||
|
%{public: public_listener},
|
||||||
|
:metrics,
|
||||||
|
%{
|
||||||
|
enabled: true,
|
||||||
|
bind: %{
|
||||||
|
ip: Map.get(metrics_listener_bind_defaults, :ip, {127, 0, 0, 1}),
|
||||||
|
port:
|
||||||
|
int_env.(
|
||||||
|
"PARRHESIA_METRICS_ENDPOINT_PORT",
|
||||||
|
Map.get(metrics_listener_bind_defaults, :port, 9568)
|
||||||
|
)
|
||||||
|
},
|
||||||
|
max_connections:
|
||||||
|
infinity_or_int_env.(
|
||||||
|
"PARRHESIA_METRICS_ENDPOINT_MAX_CONNECTIONS",
|
||||||
|
Map.get(metrics_listener_defaults, :max_connections, 1_024)
|
||||||
|
),
|
||||||
|
transport: %{
|
||||||
|
scheme: Map.get(metrics_listener_transport_defaults, :scheme, :http),
|
||||||
|
tls: Map.get(metrics_listener_transport_defaults, :tls, %{mode: :disabled})
|
||||||
|
},
|
||||||
|
network: %{
|
||||||
|
allow_cidrs: Map.get(metrics_listener_network_defaults, :allow_cidrs, []),
|
||||||
|
private_networks_only:
|
||||||
|
Map.get(metrics_listener_network_defaults, :private_networks_only, false),
|
||||||
|
public: Map.get(metrics_listener_network_defaults, :public, false),
|
||||||
|
allow_all: Map.get(metrics_listener_network_defaults, :allow_all, true)
|
||||||
|
},
|
||||||
|
features: %{
|
||||||
|
nostr: %{enabled: false},
|
||||||
|
admin: %{enabled: false},
|
||||||
|
metrics: %{
|
||||||
|
enabled: true,
|
||||||
|
auth_token:
|
||||||
|
string_env.(
|
||||||
|
"PARRHESIA_METRICS_AUTH_TOKEN",
|
||||||
|
Map.get(metrics_listener_metrics_defaults, :auth_token)
|
||||||
|
),
|
||||||
|
access: %{
|
||||||
|
public:
|
||||||
|
bool_env.(
|
||||||
|
"PARRHESIA_METRICS_PUBLIC",
|
||||||
|
Map.get(metrics_listener_metrics_access_defaults, :public, false)
|
||||||
|
),
|
||||||
|
private_networks_only:
|
||||||
|
bool_env.(
|
||||||
|
"PARRHESIA_METRICS_PRIVATE_NETWORKS_ONLY",
|
||||||
|
Map.get(
|
||||||
|
metrics_listener_metrics_access_defaults,
|
||||||
|
:private_networks_only,
|
||||||
|
true
|
||||||
|
)
|
||||||
|
),
|
||||||
|
allow_cidrs:
|
||||||
|
csv_env.(
|
||||||
|
"PARRHESIA_METRICS_ALLOWED_CIDRS",
|
||||||
|
Map.get(metrics_listener_metrics_access_defaults, :allow_cidrs, [])
|
||||||
|
),
|
||||||
|
allow_all: Map.get(metrics_listener_metrics_access_defaults, :allow_all, true)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
auth: %{nip42_required: false, nip98_required_for_admin: true},
|
||||||
|
baseline_acl: %{read: [], write: []}
|
||||||
|
}
|
||||||
)
|
)
|
||||||
]
|
else
|
||||||
|
%{public: public_listener}
|
||||||
|
end
|
||||||
|
|
||||||
retention = [
|
retention = [
|
||||||
check_interval_hours:
|
check_interval_hours:
|
||||||
@@ -387,11 +641,14 @@ if config_env() == :prod do
|
|||||||
]
|
]
|
||||||
|
|
||||||
features = [
|
features = [
|
||||||
|
verify_event_signatures_locked?:
|
||||||
|
Keyword.get(features_defaults, :verify_event_signatures_locked?, false),
|
||||||
verify_event_signatures:
|
verify_event_signatures:
|
||||||
bool_env.(
|
if Keyword.get(features_defaults, :verify_event_signatures_locked?, false) do
|
||||||
"PARRHESIA_FEATURES_VERIFY_EVENT_SIGNATURES",
|
true
|
||||||
|
else
|
||||||
Keyword.get(features_defaults, :verify_event_signatures, true)
|
Keyword.get(features_defaults, :verify_event_signatures, true)
|
||||||
),
|
end,
|
||||||
nip_45_count:
|
nip_45_count:
|
||||||
bool_env.(
|
bool_env.(
|
||||||
"PARRHESIA_FEATURES_NIP_45_COUNT",
|
"PARRHESIA_FEATURES_NIP_45_COUNT",
|
||||||
@@ -414,42 +671,91 @@ if config_env() == :prod do
|
|||||||
)
|
)
|
||||||
]
|
]
|
||||||
|
|
||||||
config :parrhesia, Parrhesia.Repo,
|
storage =
|
||||||
url: database_url,
|
case storage_backend do
|
||||||
pool_size: pool_size,
|
:postgres ->
|
||||||
queue_target: queue_target,
|
[
|
||||||
queue_interval: queue_interval
|
backend: :postgres,
|
||||||
|
events: Parrhesia.Storage.Adapters.Postgres.Events,
|
||||||
|
acl: Parrhesia.Storage.Adapters.Postgres.ACL,
|
||||||
|
moderation: Parrhesia.Storage.Adapters.Postgres.Moderation,
|
||||||
|
groups: Parrhesia.Storage.Adapters.Postgres.Groups,
|
||||||
|
admin: Parrhesia.Storage.Adapters.Postgres.Admin
|
||||||
|
]
|
||||||
|
|
||||||
config :parrhesia, Parrhesia.Web.Endpoint, port: int_env.("PORT", 4413)
|
:memory ->
|
||||||
|
[
|
||||||
|
backend: :memory,
|
||||||
|
events: Parrhesia.Storage.Adapters.Memory.Events,
|
||||||
|
acl: Parrhesia.Storage.Adapters.Memory.ACL,
|
||||||
|
moderation: Parrhesia.Storage.Adapters.Memory.Moderation,
|
||||||
|
groups: Parrhesia.Storage.Adapters.Memory.Groups,
|
||||||
|
admin: Parrhesia.Storage.Adapters.Memory.Admin
|
||||||
|
]
|
||||||
|
end
|
||||||
|
|
||||||
config :parrhesia, Parrhesia.Web.MetricsEndpoint,
|
if postgres_backend? do
|
||||||
enabled:
|
config :parrhesia, Parrhesia.Repo,
|
||||||
bool_env.(
|
url: database_url,
|
||||||
"PARRHESIA_METRICS_ENDPOINT_ENABLED",
|
pool_size: pool_size,
|
||||||
Keyword.get(metrics_endpoint_defaults, :enabled, false)
|
queue_target: queue_target,
|
||||||
),
|
queue_interval: queue_interval
|
||||||
ip:
|
|
||||||
ipv4_env.(
|
config :parrhesia, Parrhesia.ReadRepo,
|
||||||
"PARRHESIA_METRICS_ENDPOINT_IP",
|
url: database_url,
|
||||||
Keyword.get(metrics_endpoint_defaults, :ip, {127, 0, 0, 1})
|
pool_size: read_pool_size,
|
||||||
),
|
queue_target: read_queue_target,
|
||||||
port:
|
queue_interval: read_queue_interval
|
||||||
int_env.(
|
end
|
||||||
"PARRHESIA_METRICS_ENDPOINT_PORT",
|
|
||||||
Keyword.get(metrics_endpoint_defaults, :port, 9568)
|
|
||||||
)
|
|
||||||
|
|
||||||
config :parrhesia,
|
config :parrhesia,
|
||||||
|
database: [
|
||||||
|
separate_read_pool?: separate_read_pool?
|
||||||
|
],
|
||||||
relay_url: string_env.("PARRHESIA_RELAY_URL", relay_url_default),
|
relay_url: string_env.("PARRHESIA_RELAY_URL", relay_url_default),
|
||||||
|
metadata: [
|
||||||
|
name: Keyword.get(metadata_defaults, :name, "Parrhesia"),
|
||||||
|
version: Keyword.get(metadata_defaults, :version, "0.0.0"),
|
||||||
|
hide_version?:
|
||||||
|
bool_env.(
|
||||||
|
"PARRHESIA_METADATA_HIDE_VERSION",
|
||||||
|
Keyword.get(metadata_defaults, :hide_version?, true)
|
||||||
|
)
|
||||||
|
],
|
||||||
|
acl: [
|
||||||
|
protected_filters:
|
||||||
|
json_env.(
|
||||||
|
"PARRHESIA_ACL_PROTECTED_FILTERS",
|
||||||
|
Keyword.get(acl_defaults, :protected_filters, [])
|
||||||
|
)
|
||||||
|
],
|
||||||
|
identity: [
|
||||||
|
path: string_env.("PARRHESIA_IDENTITY_PATH", nil),
|
||||||
|
private_key: string_env.("PARRHESIA_IDENTITY_PRIVATE_KEY", nil)
|
||||||
|
],
|
||||||
|
sync: [
|
||||||
|
path: string_env.("PARRHESIA_SYNC_PATH", nil),
|
||||||
|
start_workers?:
|
||||||
|
bool_env.(
|
||||||
|
"PARRHESIA_SYNC_START_WORKERS",
|
||||||
|
Keyword.get(Application.get_env(:parrhesia, :sync, []), :start_workers?, true)
|
||||||
|
)
|
||||||
|
],
|
||||||
moderation_cache_enabled:
|
moderation_cache_enabled:
|
||||||
bool_env.("PARRHESIA_MODERATION_CACHE_ENABLED", moderation_cache_enabled_default),
|
bool_env.("PARRHESIA_MODERATION_CACHE_ENABLED", moderation_cache_enabled_default),
|
||||||
enable_expiration_worker:
|
enable_expiration_worker:
|
||||||
bool_env.("PARRHESIA_ENABLE_EXPIRATION_WORKER", enable_expiration_worker_default),
|
bool_env.("PARRHESIA_ENABLE_EXPIRATION_WORKER", enable_expiration_worker_default),
|
||||||
|
enable_partition_retention_worker:
|
||||||
|
bool_env.(
|
||||||
|
"PARRHESIA_ENABLE_PARTITION_RETENTION_WORKER",
|
||||||
|
enable_partition_retention_worker_default
|
||||||
|
),
|
||||||
|
listeners: listeners,
|
||||||
limits: limits,
|
limits: limits,
|
||||||
policies: policies,
|
policies: policies,
|
||||||
metrics: metrics,
|
|
||||||
retention: retention,
|
retention: retention,
|
||||||
features: features
|
features: features,
|
||||||
|
storage: storage
|
||||||
|
|
||||||
case System.get_env("PARRHESIA_EXTRA_CONFIG") do
|
case System.get_env("PARRHESIA_EXTRA_CONFIG") do
|
||||||
nil -> :ok
|
nil -> :ok
|
||||||
|
|||||||
@@ -8,13 +8,34 @@ test_endpoint_port =
|
|||||||
value -> String.to_integer(value)
|
value -> String.to_integer(value)
|
||||||
end
|
end
|
||||||
|
|
||||||
config :parrhesia, Parrhesia.Web.Endpoint,
|
config :parrhesia, :listeners,
|
||||||
port: test_endpoint_port,
|
public: %{
|
||||||
ip: {127, 0, 0, 1}
|
enabled: true,
|
||||||
|
bind: %{ip: {127, 0, 0, 1}, port: test_endpoint_port},
|
||||||
|
transport: %{scheme: :http, tls: %{mode: :disabled}},
|
||||||
|
proxy: %{trusted_cidrs: [], honor_x_forwarded_for: true},
|
||||||
|
network: %{allow_all: true},
|
||||||
|
features: %{
|
||||||
|
nostr: %{enabled: true},
|
||||||
|
admin: %{enabled: true},
|
||||||
|
metrics: %{enabled: true, access: %{private_networks_only: true}, auth_token: nil}
|
||||||
|
},
|
||||||
|
auth: %{nip42_required: false, nip98_required_for_admin: true},
|
||||||
|
baseline_acl: %{read: [], write: []}
|
||||||
|
}
|
||||||
|
|
||||||
config :parrhesia,
|
config :parrhesia,
|
||||||
enable_expiration_worker: false,
|
enable_expiration_worker: false,
|
||||||
moderation_cache_enabled: false,
|
moderation_cache_enabled: false,
|
||||||
|
nip66: [enabled: false],
|
||||||
|
identity: [
|
||||||
|
path: Path.join(System.tmp_dir!(), "parrhesia_test_identity.json"),
|
||||||
|
private_key: nil
|
||||||
|
],
|
||||||
|
sync: [
|
||||||
|
path: Path.join(System.tmp_dir!(), "parrhesia_test_sync.json"),
|
||||||
|
start_workers?: false
|
||||||
|
],
|
||||||
features: [verify_event_signatures: false]
|
features: [verify_event_signatures: false]
|
||||||
|
|
||||||
pg_host = System.get_env("PGHOST")
|
pg_host = System.get_env("PGHOST")
|
||||||
|
|||||||
@@ -10,7 +10,7 @@
|
|||||||
vips,
|
vips,
|
||||||
}: let
|
}: let
|
||||||
pname = "parrhesia";
|
pname = "parrhesia";
|
||||||
version = "0.4.0";
|
version = "0.6.0";
|
||||||
|
|
||||||
beamPackages = beam.packages.erlang_28.extend (
|
beamPackages = beam.packages.erlang_28.extend (
|
||||||
final: _prev: {
|
final: _prev: {
|
||||||
@@ -48,7 +48,7 @@
|
|||||||
beamPackages.fetchMixDeps {
|
beamPackages.fetchMixDeps {
|
||||||
pname = "${pname}-mix-deps";
|
pname = "${pname}-mix-deps";
|
||||||
inherit version src;
|
inherit version src;
|
||||||
hash = "sha256-I09Q2PG22lOrZjjXoq8Py3P3o5dgaz9LhKJSmP+/r6k=";
|
hash = "sha256-D69wuFnIChQzm1PmpIW+X/1sPpsIcDHe4V5fKmFeJ3k=";
|
||||||
}
|
}
|
||||||
else null;
|
else null;
|
||||||
|
|
||||||
|
|||||||
@@ -101,6 +101,8 @@ in {
|
|||||||
nostr-bench
|
nostr-bench
|
||||||
# Nostr reference servers
|
# Nostr reference servers
|
||||||
nostr-rs-relay
|
nostr-rs-relay
|
||||||
|
# Benchmark graph
|
||||||
|
gnuplot
|
||||||
]
|
]
|
||||||
++ lib.optionals pkgs.stdenv.hostPlatform.isx86_64 [
|
++ lib.optionals pkgs.stdenv.hostPlatform.isx86_64 [
|
||||||
strfry
|
strfry
|
||||||
|
|||||||
33
docs/ARCH.md
33
docs/ARCH.md
@@ -68,10 +68,10 @@ Notes:
|
|||||||
## 3) System architecture (high level)
|
## 3) System architecture (high level)
|
||||||
|
|
||||||
```text
|
```text
|
||||||
WS/HTTP Edge (Bandit/Plug)
|
Configured WS/HTTP Listeners (Bandit/Plug)
|
||||||
-> Protocol Decoder/Encoder
|
-> Protocol Decoder/Encoder
|
||||||
-> Command Router (EVENT/REQ/CLOSE/AUTH/COUNT/NEG-*)
|
-> Command Router (EVENT/REQ/CLOSE/AUTH/COUNT/NEG-*)
|
||||||
-> Policy Pipeline (validation, auth, ACL, PoW, NIP-70)
|
-> Policy Pipeline (listener baseline, validation, auth, ACL, PoW, NIP-70)
|
||||||
-> Event Service / Query Service
|
-> Event Service / Query Service
|
||||||
-> Storage Port (behavior)
|
-> Storage Port (behavior)
|
||||||
-> Postgres Adapter (Ecto)
|
-> Postgres Adapter (Ecto)
|
||||||
@@ -82,23 +82,36 @@ WS/HTTP Edge (Bandit/Plug)
|
|||||||
|
|
||||||
## 4) OTP supervision design
|
## 4) OTP supervision design
|
||||||
|
|
||||||
`Parrhesia.Application` children (top-level):
|
`Parrhesia.Runtime` children (top-level):
|
||||||
|
|
||||||
1. `Parrhesia.Telemetry` – metric definitions/reporters
|
1. `Parrhesia.Telemetry` – metric definitions/reporters
|
||||||
2. `Parrhesia.Config` – runtime config cache (ETS-backed)
|
2. `Parrhesia.ConnectionStats` – per-listener connection/subscription counters
|
||||||
3. `Parrhesia.Storage.Supervisor` – adapter processes (`Repo`, pools)
|
3. `Parrhesia.Config` – runtime config cache (ETS-backed)
|
||||||
4. `Parrhesia.Subscriptions.Supervisor` – subscription index + fanout workers
|
4. `Parrhesia.Web.EventIngestLimiter` – relay-wide event ingest rate limiter
|
||||||
5. `Parrhesia.Auth.Supervisor` – AUTH challenge/session tracking
|
5. `Parrhesia.Web.IPEventIngestLimiter` – per-IP event ingest rate limiter
|
||||||
6. `Parrhesia.Policy.Supervisor` – rate limiters / ACL caches
|
6. `Parrhesia.Storage.Supervisor` – adapter processes (`Repo`, pools)
|
||||||
7. `Parrhesia.Web.Endpoint` – WS + HTTP ingress
|
7. `Parrhesia.Subscriptions.Supervisor` – subscription index + fanout workers
|
||||||
8. `Parrhesia.Tasks.Supervisor` – background jobs (expiry purge, maintenance)
|
8. `Parrhesia.Auth.Supervisor` – AUTH challenge/session tracking
|
||||||
|
9. `Parrhesia.Sync.Supervisor` – outbound relay sync workers
|
||||||
|
10. `Parrhesia.Policy.Supervisor` – rate limiters / ACL caches
|
||||||
|
11. `Parrhesia.Web.Endpoint` – supervises configured WS + HTTP listeners
|
||||||
|
12. `Parrhesia.Tasks.Supervisor` – background jobs (expiry purge, maintenance)
|
||||||
|
|
||||||
Failure model:
|
Failure model:
|
||||||
|
|
||||||
- Connection failures are isolated per socket process.
|
- Connection failures are isolated per socket process.
|
||||||
|
- Listener failures are isolated per Bandit child and restarted independently.
|
||||||
- Storage outages degrade with explicit `OK/CLOSED` error prefixes (`error:`) per NIP-01.
|
- Storage outages degrade with explicit `OK/CLOSED` error prefixes (`error:`) per NIP-01.
|
||||||
- Non-critical workers are `:transient`; core infra is `:permanent`.
|
- Non-critical workers are `:transient`; core infra is `:permanent`.
|
||||||
|
|
||||||
|
Ingress model:
|
||||||
|
|
||||||
|
- Ingress is defined through `config :parrhesia, :listeners, ...`.
|
||||||
|
- Each listener has its own bind/transport settings, TLS mode, proxy trust, network allowlist, enabled features (`nostr`, `admin`, `metrics`), auth requirements, and baseline read/write ACL.
|
||||||
|
- Listeners can therefore expose different security postures, for example a public relay listener and a VPN-only sync-capable listener.
|
||||||
|
- TLS-capable listeners support direct server TLS, mutual TLS with optional client pin checks, and proxy-terminated TLS identity on explicitly trusted proxy hops.
|
||||||
|
- Certificate reload is currently implemented as admin-triggered listener restart from disk rather than background file watching.
|
||||||
|
|
||||||
## 5) Core runtime components
|
## 5) Core runtime components
|
||||||
|
|
||||||
### 5.1 Connection process
|
### 5.1 Connection process
|
||||||
|
|||||||
147
docs/LOCAL_API.md
Normal file
147
docs/LOCAL_API.md
Normal file
@@ -0,0 +1,147 @@
|
|||||||
|
# Parrhesia Local API
|
||||||
|
|
||||||
|
Parrhesia can run as a normal standalone relay application, but it also exposes a stable
|
||||||
|
in-process API for Elixir callers that want to embed the relay inside a larger OTP system.
|
||||||
|
|
||||||
|
This document describes that embedding surface. The runtime is still alpha, so treat the API
|
||||||
|
as usable but not yet frozen.
|
||||||
|
|
||||||
|
## What embedding means today
|
||||||
|
|
||||||
|
Embedding currently means:
|
||||||
|
|
||||||
|
- the host app adds `:parrhesia` as a dependency and OTP application
|
||||||
|
- the host app provides `config :parrhesia, ...` explicitly
|
||||||
|
- the host app migrates the Parrhesia database schema
|
||||||
|
- callers interact with the relay through `Parrhesia.API.*`
|
||||||
|
|
||||||
|
Current operational assumptions:
|
||||||
|
|
||||||
|
- Parrhesia runs one runtime per BEAM node
|
||||||
|
- core processes use global module names such as `Parrhesia.Config` and `Parrhesia.Web.Endpoint`
|
||||||
|
- the config defaults in this repo's `config/*.exs` are not imported automatically by a host app
|
||||||
|
|
||||||
|
If you want multiple isolated relay instances inside one VM, Parrhesia does not support that
|
||||||
|
cleanly yet.
|
||||||
|
|
||||||
|
## Minimal host setup
|
||||||
|
|
||||||
|
Add the dependency in your host app:
|
||||||
|
|
||||||
|
```elixir
|
||||||
|
defp deps do
|
||||||
|
[
|
||||||
|
{:parrhesia, path: "../parrhesia"}
|
||||||
|
]
|
||||||
|
end
|
||||||
|
```
|
||||||
|
|
||||||
|
Configure the runtime in your host app. At minimum you should carry over:
|
||||||
|
|
||||||
|
```elixir
|
||||||
|
import Config
|
||||||
|
|
||||||
|
config :postgrex, :json_library, JSON
|
||||||
|
|
||||||
|
config :parrhesia,
|
||||||
|
relay_url: "wss://relay.example.com/relay",
|
||||||
|
listeners: %{},
|
||||||
|
storage: [backend: :postgres]
|
||||||
|
|
||||||
|
config :parrhesia, Parrhesia.Repo,
|
||||||
|
url: System.fetch_env!("DATABASE_URL"),
|
||||||
|
pool_size: 10,
|
||||||
|
types: Parrhesia.PostgresTypes
|
||||||
|
|
||||||
|
config :parrhesia, Parrhesia.ReadRepo,
|
||||||
|
url: System.fetch_env!("DATABASE_URL"),
|
||||||
|
pool_size: 10,
|
||||||
|
types: Parrhesia.PostgresTypes
|
||||||
|
|
||||||
|
config :parrhesia, ecto_repos: [Parrhesia.Repo]
|
||||||
|
```
|
||||||
|
|
||||||
|
Notes:
|
||||||
|
|
||||||
|
- Set `listeners: %{}` if you only want the in-process API and no HTTP/WebSocket ingress.
|
||||||
|
- If you do want ingress, copy the listener shape from the config reference in
|
||||||
|
[README.md](../README.md).
|
||||||
|
- Production runtime overrides still use the `PARRHESIA_*` environment variables described in
|
||||||
|
[README.md](../README.md).
|
||||||
|
|
||||||
|
Migrate before serving traffic:
|
||||||
|
|
||||||
|
```elixir
|
||||||
|
Parrhesia.Release.migrate()
|
||||||
|
```
|
||||||
|
|
||||||
|
In development, `mix ecto.migrate -r Parrhesia.Repo` works too.
|
||||||
|
|
||||||
|
## Starting the runtime
|
||||||
|
|
||||||
|
In the common case, letting OTP start the `:parrhesia` application is enough.
|
||||||
|
|
||||||
|
If you need to start the runtime explicitly under your own supervision tree, use
|
||||||
|
`Parrhesia.Runtime`:
|
||||||
|
|
||||||
|
```elixir
|
||||||
|
children = [
|
||||||
|
{Parrhesia.Runtime, name: Parrhesia.Supervisor}
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
|
## Primary modules
|
||||||
|
|
||||||
|
The in-process surface is centered on these modules:
|
||||||
|
|
||||||
|
- `Parrhesia.API.Events` for publish, query, and count
|
||||||
|
- `Parrhesia.API.Stream` for REQ-like local subscriptions
|
||||||
|
- `Parrhesia.API.Auth` for event validation and NIP-98 auth parsing
|
||||||
|
- `Parrhesia.API.Admin` for management operations
|
||||||
|
- `Parrhesia.API.Identity` for relay-owned key management
|
||||||
|
- `Parrhesia.API.ACL` for protected sync ACLs
|
||||||
|
- `Parrhesia.API.Sync` for outbound relay sync management
|
||||||
|
|
||||||
|
Generated ExDoc groups these modules under `Embedded API`.
|
||||||
|
|
||||||
|
## Request context
|
||||||
|
|
||||||
|
Most calls take a `Parrhesia.API.RequestContext`. This carries authenticated pubkeys and
|
||||||
|
caller metadata through policy checks.
|
||||||
|
|
||||||
|
```elixir
|
||||||
|
%Parrhesia.API.RequestContext{
|
||||||
|
caller: :local,
|
||||||
|
authenticated_pubkeys: MapSet.new()
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
If your host app has already authenticated a user or peer, put that pubkey into
|
||||||
|
`authenticated_pubkeys` before calling the API.
|
||||||
|
|
||||||
|
## Example
|
||||||
|
|
||||||
|
```elixir
|
||||||
|
alias Parrhesia.API.Events
|
||||||
|
alias Parrhesia.API.RequestContext
|
||||||
|
alias Parrhesia.API.Stream
|
||||||
|
|
||||||
|
context = %RequestContext{caller: :local}
|
||||||
|
|
||||||
|
{:ok, publish_result} = Events.publish(event, context: context)
|
||||||
|
{:ok, events} = Events.query([%{"kinds" => [1]}], context: context)
|
||||||
|
{:ok, ref} = Stream.subscribe(self(), "local-sub", [%{"kinds" => [1]}], context: context)
|
||||||
|
|
||||||
|
receive do
|
||||||
|
{:parrhesia, :event, ^ref, "local-sub", event} -> event
|
||||||
|
{:parrhesia, :eose, ^ref, "local-sub"} -> :ok
|
||||||
|
end
|
||||||
|
|
||||||
|
:ok = Stream.unsubscribe(ref)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Where to look next
|
||||||
|
|
||||||
|
- [README.md](../README.md) for setup and the full config reference
|
||||||
|
- [docs/SYNC.md](./SYNC.md) for relay-to-relay sync semantics
|
||||||
|
- module docs under `Parrhesia.API.*` for per-function behavior
|
||||||
354
docs/NIP-DBSYNC.md
Normal file
354
docs/NIP-DBSYNC.md
Normal file
@@ -0,0 +1,354 @@
|
|||||||
|
# NIP-DBSYNC — Minimal Mutation Events over Nostr
|
||||||
|
|
||||||
|
`draft` `optional`
|
||||||
|
|
||||||
|
Defines a minimal event format for publishing immutable application mutation events over Nostr.
|
||||||
|
|
||||||
|
This draft intentionally standardizes only the wire format for mutation transport. It does **not** standardize database replication strategy, conflict resolution, relay retention, or key derivation.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Abstract
|
||||||
|
|
||||||
|
This NIP defines one regular event kind, **5000**, for signed mutation events.
|
||||||
|
|
||||||
|
A mutation event identifies:
|
||||||
|
|
||||||
|
- the object namespace being mutated,
|
||||||
|
- the object identifier within that namespace,
|
||||||
|
- the mutation operation,
|
||||||
|
- an optional parent mutation event,
|
||||||
|
- an application-defined payload.
|
||||||
|
|
||||||
|
The purpose of this NIP is to make signed mutation logs portable across Nostr clients and relays without requiring relays to implement database-specific behavior.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Motivation
|
||||||
|
|
||||||
|
Many applications need a way to distribute signed state changes across multiple publishers, consumers, or services.
|
||||||
|
|
||||||
|
Today this can be done with private event kinds, but private schemas make cross-implementation interoperability harder than necessary. This NIP defines a small shared envelope for mutation events while leaving application-specific state semantics in the payload.
|
||||||
|
|
||||||
|
This NIP is intended for use cases such as:
|
||||||
|
|
||||||
|
- synchronizing object changes between cooperating services,
|
||||||
|
- publishing auditable mutation logs,
|
||||||
|
- replaying application events from ordinary Nostr relays,
|
||||||
|
- bridging non-Nostr systems into a Nostr-based event stream.
|
||||||
|
|
||||||
|
This NIP is **not** a consensus protocol. It does not provide:
|
||||||
|
|
||||||
|
- total ordering,
|
||||||
|
- transactional guarantees,
|
||||||
|
- global conflict resolution,
|
||||||
|
- authorization rules,
|
||||||
|
- guaranteed relay retention.
|
||||||
|
|
||||||
|
Applications that require those properties MUST define them separately.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Specification
|
||||||
|
|
||||||
|
### Event Kind
|
||||||
|
|
||||||
|
| Kind | Category | Name |
|
||||||
|
|------|----------|------|
|
||||||
|
| 5000 | Regular | Mutation |
|
||||||
|
|
||||||
|
Kind `5000` is a regular event. Relays that support this NIP MAY store it like any other regular event.
|
||||||
|
|
||||||
|
This NIP does **not** require relays to:
|
||||||
|
|
||||||
|
- retain all historical events,
|
||||||
|
- index any specific tag beyond normal NIP-01 behavior,
|
||||||
|
- deliver events in causal or chronological order,
|
||||||
|
- detect or resolve conflicts.
|
||||||
|
|
||||||
|
Applications that depend on durable replay or custom indexing MUST choose relays whose policies satisfy those needs.
|
||||||
|
|
||||||
|
### Event Structure
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"id": "<32-byte lowercase hex>",
|
||||||
|
"pubkey": "<32-byte lowercase hex>",
|
||||||
|
"created_at": "<unix timestamp, seconds>",
|
||||||
|
"kind": 5000,
|
||||||
|
"tags": [
|
||||||
|
["r", "<resource namespace>"],
|
||||||
|
["i", "<object identifier>"],
|
||||||
|
["op", "<mutation operation>"],
|
||||||
|
["e", "<parent mutation event id>"]
|
||||||
|
],
|
||||||
|
"content": "<JSON-encoded application payload>",
|
||||||
|
"sig": "<64-byte lowercase hex>"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
The `content` field is a JSON-encoded string. Its structure is defined below.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Tags
|
||||||
|
|
||||||
|
| Tag | Required | Description |
|
||||||
|
|-----|----------|-------------|
|
||||||
|
| `r` | Yes | Stable resource namespace for the mutated object type. Reverse-DNS style names are RECOMMENDED, for example `com.example.accounts.user`. |
|
||||||
|
| `i` | Yes | Opaque object identifier, unique within the `r` namespace. Consumers MUST treat this as a string. |
|
||||||
|
| `op` | Yes | Mutation operation. This NIP defines only `upsert` and `delete`. |
|
||||||
|
| `e` | No | Parent mutation event id, if the publisher wants to express ancestry. At most one `e` tag SHOULD be included in this version of the protocol. |
|
||||||
|
| `v` | No | Application payload schema version as a string. RECOMMENDED when the payload format may evolve over time. |
|
||||||
|
|
||||||
|
### Tag Rules
|
||||||
|
|
||||||
|
Publishers:
|
||||||
|
|
||||||
|
- MUST include exactly one `r` tag.
|
||||||
|
- MUST include exactly one `i` tag.
|
||||||
|
- MUST include exactly one `op` tag.
|
||||||
|
- MUST set `op` to either `upsert` or `delete`.
|
||||||
|
- SHOULD include at most one `e` tag.
|
||||||
|
- MAY include one `v` tag.
|
||||||
|
|
||||||
|
Consumers:
|
||||||
|
|
||||||
|
- MUST ignore unknown tags.
|
||||||
|
- MUST NOT assume tag ordering.
|
||||||
|
- MUST treat the `e` tag as an ancestry hint, not as proof of global ordering.
|
||||||
|
|
||||||
|
### Resource Namespaces
|
||||||
|
|
||||||
|
The `r` tag identifies an application-level object type.
|
||||||
|
|
||||||
|
This NIP does not define a global registry of resource namespaces. To reduce collisions, publishers SHOULD use a stable namespace they control, such as reverse-DNS notation.
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
|
||||||
|
- `com.example.accounts.user`
|
||||||
|
- `org.example.inventory.item`
|
||||||
|
- `net.example.billing.invoice`
|
||||||
|
|
||||||
|
Publishers MUST document the payload schema associated with each resource namespace they use.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Content Payload
|
||||||
|
|
||||||
|
The `content` field MUST be a JSON-encoded object.
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"value": {},
|
||||||
|
"patch": "merge"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
| Field | Required | Description |
|
||||||
|
|-------|----------|-------------|
|
||||||
|
| `value` | Yes | Application-defined mutation payload. For `upsert`, this is the state fragment or full post-mutation state being published. For `delete`, this MAY be an empty object or a small reason object. |
|
||||||
|
| `patch` | No | How `value` should be interpreted. This NIP defines `merge` and `replace`. If omitted, consumers MUST treat it as application-defined. |
|
||||||
|
|
||||||
|
### Payload Rules
|
||||||
|
|
||||||
|
For `op = upsert`:
|
||||||
|
|
||||||
|
- `value` MUST be a JSON object.
|
||||||
|
- Publishers SHOULD publish either:
|
||||||
|
- a partial object intended to be merged, or
|
||||||
|
- a full post-mutation object intended to replace prior state.
|
||||||
|
- If the interpretation is important for interoperability, publishers SHOULD set `patch` to `merge` or `replace`.
|
||||||
|
|
||||||
|
For `op = delete`:
|
||||||
|
|
||||||
|
- `value` MAY be `{}`.
|
||||||
|
- Consumers MUST treat `delete` as an application-level tombstone signal.
|
||||||
|
- This NIP does not define whether deletion means hard delete, soft delete, archival, or hiding. Applications MUST define that separately.
|
||||||
|
|
||||||
|
### Serialization
|
||||||
|
|
||||||
|
All payload values MUST be JSON-serializable.
|
||||||
|
|
||||||
|
The following representations are RECOMMENDED:
|
||||||
|
|
||||||
|
| Type | Representation |
|
||||||
|
|------|----------------|
|
||||||
|
| Timestamp / datetime | ISO 8601 string |
|
||||||
|
| Decimal | String |
|
||||||
|
| Binary | Base64 string |
|
||||||
|
| Null | JSON `null` |
|
||||||
|
|
||||||
|
Publishers MAY define additional type mappings, but those mappings are application-specific and MUST be documented outside this NIP.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Ancestry and Replay
|
||||||
|
|
||||||
|
The optional `e` tag allows a publisher to indicate which prior mutation event it considered the parent when creating a new mutation.
|
||||||
|
|
||||||
|
This supports applications that want ancestry hints for:
|
||||||
|
|
||||||
|
- local conflict detection,
|
||||||
|
- replay ordering,
|
||||||
|
- branch inspection,
|
||||||
|
- audit tooling.
|
||||||
|
|
||||||
|
However:
|
||||||
|
|
||||||
|
- the `e` tag does **not** create a global ordering guarantee,
|
||||||
|
- relays are not required to deliver parents before children,
|
||||||
|
- consumers MUST be prepared to receive out-of-order events,
|
||||||
|
- consumers MAY buffer, defer, ignore, or immediately apply parent-missing events according to local policy.
|
||||||
|
|
||||||
|
This NIP does not define a merge event format.
|
||||||
|
|
||||||
|
This NIP does not define conflict resolution. If two valid mutation events for the same `(r, i)` object are concurrent or incompatible, consumers MUST resolve them using application-specific rules.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Authorization
|
||||||
|
|
||||||
|
This NIP does not define who is authorized to publish mutation events for a given resource or object.
|
||||||
|
|
||||||
|
Authorization is application-specific.
|
||||||
|
|
||||||
|
Consumers MUST NOT assume that a valid Nostr signature alone authorizes a mutation. Consumers MUST apply their own trust policy, which MAY include:
|
||||||
|
|
||||||
|
- explicit pubkey allowlists,
|
||||||
|
- per-resource ACLs,
|
||||||
|
- external capability documents,
|
||||||
|
- relay-level write restrictions,
|
||||||
|
- application-specific verification.
|
||||||
|
|
||||||
|
This NIP does not define custodial keys, deterministic key derivation, shared cluster secrets, or delegation schemes.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Relay Behavior
|
||||||
|
|
||||||
|
A relay implementing only NIP-01 remains compatible with this NIP.
|
||||||
|
|
||||||
|
No new relay messages are required beyond `REQ`, `EVENT`, and `CLOSE`.
|
||||||
|
|
||||||
|
Relays:
|
||||||
|
|
||||||
|
- MAY index the `r` and `i` tags using existing single-letter tag indexing conventions.
|
||||||
|
- MAY apply normal retention, rate-limit, and access-control policies.
|
||||||
|
- MAY reject events that are too large or otherwise violate local policy.
|
||||||
|
- MUST NOT be expected to validate application payload semantics.
|
||||||
|
|
||||||
|
Applications that require stronger guarantees, such as durable retention or strict admission control, MUST obtain those guarantees from relay policy or from a separate protocol profile.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Subscription Filters
|
||||||
|
|
||||||
|
This NIP works with ordinary NIP-01 filters.
|
||||||
|
|
||||||
|
### All mutations for one resource
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"kinds": [5000],
|
||||||
|
"#r": ["com.example.accounts.user"]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Mutation history for one object
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"kinds": [5000],
|
||||||
|
"#r": ["com.example.accounts.user"],
|
||||||
|
"#i": ["550e8400-e29b-41d4-a716-446655440000"]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Mutations from trusted authors
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"kinds": [5000],
|
||||||
|
"authors": [
|
||||||
|
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
|
||||||
|
"bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Applications SHOULD prefer narrow subscriptions over broad network-wide firehoses.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Examples
|
||||||
|
|
||||||
|
### Upsert with parent
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"id": "1111111111111111111111111111111111111111111111111111111111111111",
|
||||||
|
"pubkey": "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
|
||||||
|
"created_at": 1710500300,
|
||||||
|
"kind": 5000,
|
||||||
|
"tags": [
|
||||||
|
["r", "com.example.accounts.user"],
|
||||||
|
["i", "550e8400-e29b-41d4-a716-446655440000"],
|
||||||
|
["op", "upsert"],
|
||||||
|
["e", "0000000000000000000000000000000000000000000000000000000000000000"],
|
||||||
|
["v", "1"]
|
||||||
|
],
|
||||||
|
"content": "{\"value\":{\"email\":\"jane.doe@newdomain.com\",\"updated_at\":\"2025-03-15T14:35:00Z\"},\"patch\":\"merge\"}",
|
||||||
|
"sig": "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Delete tombstone
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"id": "2222222222222222222222222222222222222222222222222222222222222222",
|
||||||
|
"pubkey": "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
|
||||||
|
"created_at": 1710500600,
|
||||||
|
"kind": 5000,
|
||||||
|
"tags": [
|
||||||
|
["r", "com.example.accounts.user"],
|
||||||
|
["i", "550e8400-e29b-41d4-a716-446655440000"],
|
||||||
|
["op", "delete"],
|
||||||
|
["e", "1111111111111111111111111111111111111111111111111111111111111111"],
|
||||||
|
["v", "1"]
|
||||||
|
],
|
||||||
|
"content": "{\"value\":{\"reason\":\"user_requested\"}}",
|
||||||
|
"sig": "dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Security Considerations
|
||||||
|
|
||||||
|
- **Unauthorized writes:** A valid signature proves authorship, not authorization. Consumers MUST enforce their own trust policy.
|
||||||
|
- **Replay:** Old valid events may be redelivered by relays or attackers. Consumers SHOULD deduplicate by event id and apply local replay policy.
|
||||||
|
- **Reordering:** Events may arrive out of order. Consumers MUST NOT treat `created_at` or `e` as a guaranteed total order.
|
||||||
|
- **Conflict flooding:** Multiple valid mutations may target the same object. Consumers SHOULD rate-limit, bound buffering, and define local conflict policy.
|
||||||
|
- **Sensitive data exposure:** Nostr events are typically widely replicable. Publishers SHOULD NOT put secrets or regulated data in mutation payloads unless they provide application-layer encryption.
|
||||||
|
- **Relay retention variance:** Some relays will prune history. Applications that depend on full replay MUST choose relays accordingly or maintain an external archive.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Extension Points
|
||||||
|
|
||||||
|
Future drafts or companion NIPs may define:
|
||||||
|
|
||||||
|
- snapshot events for faster bootstrap,
|
||||||
|
- object-head or checkpoint events,
|
||||||
|
- capability or delegation profiles for authorized writers,
|
||||||
|
- standardized conflict-resolution profiles for specific application classes.
|
||||||
|
|
||||||
|
Such extensions SHOULD remain optional and MUST NOT change the meaning of kind `5000` mutation events defined here.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## References
|
||||||
|
|
||||||
|
- [NIP-01](https://github.com/nostr-protocol/nips/blob/master/01.md) — Basic protocol flow description
|
||||||
417
docs/SYNC.md
Normal file
417
docs/SYNC.md
Normal file
@@ -0,0 +1,417 @@
|
|||||||
|
# Parrhesia Relay Sync
|
||||||
|
|
||||||
|
## 1. Purpose
|
||||||
|
|
||||||
|
This document defines the Parrhesia proposal for **relay-to-relay event synchronization**.
|
||||||
|
|
||||||
|
It is intentionally transport-focused:
|
||||||
|
|
||||||
|
- manage remote relay peers,
|
||||||
|
- catch up on matching events,
|
||||||
|
- keep a live stream open,
|
||||||
|
- expose health and basic stats.
|
||||||
|
|
||||||
|
It does **not** define application data semantics.
|
||||||
|
|
||||||
|
Parrhesia syncs Nostr events. Callers decide which events matter and how to apply them.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 2. Boundary
|
||||||
|
|
||||||
|
### Parrhesia is responsible for
|
||||||
|
|
||||||
|
- storing and validating events,
|
||||||
|
- querying and streaming events,
|
||||||
|
- running outbound sync workers against remote relays,
|
||||||
|
- tracking peer configuration, worker health, and sync counters,
|
||||||
|
- exposing peer management through `Parrhesia.API.Sync`.
|
||||||
|
|
||||||
|
### Parrhesia is not responsible for
|
||||||
|
|
||||||
|
- resource mapping,
|
||||||
|
- trusted node allowlists for an app profile,
|
||||||
|
- mutation payload validation beyond normal event validation,
|
||||||
|
- conflict resolution,
|
||||||
|
- replay winner selection,
|
||||||
|
- database upsert/delete semantics.
|
||||||
|
|
||||||
|
For Tribes, those remain in `TRIBES-NOSTRSYNC` and `AshNostrSync`.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 3. Security Foundation
|
||||||
|
|
||||||
|
### Default posture
|
||||||
|
|
||||||
|
The baseline posture for sync traffic is:
|
||||||
|
|
||||||
|
- no access to sync events by default,
|
||||||
|
- no implicit trust from ordinary relay usage,
|
||||||
|
- no reliance on plaintext confidentiality from public relays.
|
||||||
|
|
||||||
|
For the first implementation, Parrhesia should protect sync data primarily with:
|
||||||
|
|
||||||
|
- authenticated server identities,
|
||||||
|
- ACL-gated read and write access,
|
||||||
|
- TLS with certificate pinning for outbound peers.
|
||||||
|
|
||||||
|
### Server identity
|
||||||
|
|
||||||
|
Parrhesia owns a low-level server identity used for relay-to-relay authentication.
|
||||||
|
|
||||||
|
This identity is separate from:
|
||||||
|
|
||||||
|
- TLS endpoint identity,
|
||||||
|
- application event author pubkeys.
|
||||||
|
|
||||||
|
Recommended model:
|
||||||
|
|
||||||
|
- Parrhesia has one local server-auth pubkey,
|
||||||
|
- sync peers authenticate as server-auth pubkeys,
|
||||||
|
- ACL grants are bound to those authenticated server-auth pubkeys,
|
||||||
|
- application-level writer trust remains outside Parrhesia.
|
||||||
|
|
||||||
|
Identity lifecycle:
|
||||||
|
|
||||||
|
1. use configured/imported key if provided,
|
||||||
|
2. otherwise use persisted local identity,
|
||||||
|
3. otherwise generate once during initial startup and persist it.
|
||||||
|
|
||||||
|
Private key export should not be supported.
|
||||||
|
|
||||||
|
### ACLs
|
||||||
|
|
||||||
|
Sync traffic should use a real ACL layer, not moderation allowlists.
|
||||||
|
|
||||||
|
Current implementation note:
|
||||||
|
|
||||||
|
- Parrhesia already has storage-backed moderation state such as `allowed_pubkeys` and `blocked_ips`,
|
||||||
|
- that is not the sync ACL model,
|
||||||
|
- sync protection must be enforced in the active websocket/query/count/negentropy/write path, not inferred from management tables alone.
|
||||||
|
|
||||||
|
Initial ACL model:
|
||||||
|
|
||||||
|
- principal: authenticated pubkey,
|
||||||
|
- capabilities: `sync_read`, `sync_write`,
|
||||||
|
- match: event/filter shape such as `kinds: [5000]` and namespace tags.
|
||||||
|
|
||||||
|
This is enough for now. We do **not** need a separate user ACL model and server ACL model yet.
|
||||||
|
|
||||||
|
A sync peer is simply an authenticated principal with sync capabilities.
|
||||||
|
|
||||||
|
### TLS pinning
|
||||||
|
|
||||||
|
Each outbound sync peer must include pinned TLS material.
|
||||||
|
|
||||||
|
Recommended pin type:
|
||||||
|
|
||||||
|
- SPKI SHA-256 pins
|
||||||
|
|
||||||
|
Multiple pins should be allowed to support certificate rotation.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 4. Sync Model
|
||||||
|
|
||||||
|
Each configured sync server represents one outbound worker managed by Parrhesia.
|
||||||
|
|
||||||
|
Implementation note:
|
||||||
|
|
||||||
|
- Khatru-style relay designs benefit from explicit runtime stages,
|
||||||
|
- Parrhesia sync should therefore plug into clear internal phases for connection admission, auth, query/count, subscription, negentropy, publish, and fanout,
|
||||||
|
- this should stay a runtime refactor, not become extra sync semantics.
|
||||||
|
|
||||||
|
Minimum behavior:
|
||||||
|
|
||||||
|
1. connect to the remote relay,
|
||||||
|
2. run an initial catch-up query for the configured filters,
|
||||||
|
3. ingest received events into the local relay through the normal API path,
|
||||||
|
4. switch to a live subscription for the same filters,
|
||||||
|
5. reconnect with backoff when disconnected.
|
||||||
|
|
||||||
|
The worker treats filters as opaque Nostr filters. It does not interpret app payloads.
|
||||||
|
|
||||||
|
### Initial implementation mode
|
||||||
|
|
||||||
|
Initial implementation should use ordinary NIP-01 behavior:
|
||||||
|
|
||||||
|
- catch-up via `REQ`-style query,
|
||||||
|
- live updates via `REQ` subscription.
|
||||||
|
|
||||||
|
This is enough for Tribes and keeps the first version simple.
|
||||||
|
|
||||||
|
### NIP-77
|
||||||
|
|
||||||
|
Parrhesia now has a real reusable relay-side NIP-77 engine:
|
||||||
|
|
||||||
|
- proper `NEG-OPEN` / `NEG-MSG` / `NEG-CLOSE` / `NEG-ERR` framing,
|
||||||
|
- a reusable negentropy codec and reconciliation engine,
|
||||||
|
- bounded local `(created_at, id)` snapshot enumeration for matching filters,
|
||||||
|
- connection/session integration with policy checks and resource limits.
|
||||||
|
|
||||||
|
That means NIP-77 can be used for bandwidth-efficient catch-up between trusted nodes.
|
||||||
|
|
||||||
|
The first sync worker implementation may still default to ordinary NIP-01 catch-up plus live replay, because that path is operationally simpler and already matches the current Tribes sync profile. `:negentropy` can now be introduced as an optimization mode rather than a future prerequisite.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 5. API Surface
|
||||||
|
|
||||||
|
Primary control plane:
|
||||||
|
|
||||||
|
- `Parrhesia.API.Identity.get/1`
|
||||||
|
- `Parrhesia.API.Identity.ensure/1`
|
||||||
|
- `Parrhesia.API.Identity.import/2`
|
||||||
|
- `Parrhesia.API.Identity.rotate/1`
|
||||||
|
- `Parrhesia.API.ACL.grant/2`
|
||||||
|
- `Parrhesia.API.ACL.revoke/2`
|
||||||
|
- `Parrhesia.API.ACL.list/1`
|
||||||
|
- `Parrhesia.API.Sync.put_server/2`
|
||||||
|
- `Parrhesia.API.Sync.remove_server/2`
|
||||||
|
- `Parrhesia.API.Sync.get_server/2`
|
||||||
|
- `Parrhesia.API.Sync.list_servers/1`
|
||||||
|
- `Parrhesia.API.Sync.start_server/2`
|
||||||
|
- `Parrhesia.API.Sync.stop_server/2`
|
||||||
|
- `Parrhesia.API.Sync.sync_now/2`
|
||||||
|
- `Parrhesia.API.Sync.server_stats/2`
|
||||||
|
- `Parrhesia.API.Sync.sync_stats/1`
|
||||||
|
- `Parrhesia.API.Sync.sync_health/1`
|
||||||
|
|
||||||
|
These APIs are in-process. HTTP management may expose them through `Parrhesia.API.Admin` or direct routing to `Parrhesia.API.Sync`.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 6. Server Specification
|
||||||
|
|
||||||
|
`put_server/2` is an upsert.
|
||||||
|
|
||||||
|
Suggested server shape:
|
||||||
|
|
||||||
|
```elixir
|
||||||
|
%{
|
||||||
|
id: "tribes-primary",
|
||||||
|
url: "wss://relay-a.example/relay",
|
||||||
|
enabled?: true,
|
||||||
|
auth_pubkey: "<remote-server-auth-pubkey>",
|
||||||
|
mode: :req_stream,
|
||||||
|
filters: [
|
||||||
|
%{
|
||||||
|
"kinds" => [5000],
|
||||||
|
"authors" => ["<trusted-node-pubkey-a>", "<trusted-node-pubkey-b>"],
|
||||||
|
"#r" => ["tribes.accounts.user", "tribes.chat.tribe"]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
overlap_window_seconds: 300,
|
||||||
|
auth: %{
|
||||||
|
type: :nip42
|
||||||
|
},
|
||||||
|
tls: %{
|
||||||
|
mode: :required,
|
||||||
|
hostname: "relay-a.example",
|
||||||
|
pins: [
|
||||||
|
%{type: :spki_sha256, value: "<pin-a>"},
|
||||||
|
%{type: :spki_sha256, value: "<pin-b>"}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
metadata: %{}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Required fields:
|
||||||
|
|
||||||
|
- `id`
|
||||||
|
- `url`
|
||||||
|
- `auth_pubkey`
|
||||||
|
- `filters`
|
||||||
|
- `tls`
|
||||||
|
|
||||||
|
Recommended fields:
|
||||||
|
|
||||||
|
- `enabled?`
|
||||||
|
- `mode`
|
||||||
|
- `overlap_window_seconds`
|
||||||
|
- `auth`
|
||||||
|
- `metadata`
|
||||||
|
|
||||||
|
Rules:
|
||||||
|
|
||||||
|
- `id` must be stable and unique locally.
|
||||||
|
- `url` is the remote relay websocket URL.
|
||||||
|
- `auth_pubkey` is the expected remote server-auth pubkey.
|
||||||
|
- `filters` must be valid NIP-01 filters.
|
||||||
|
- filters are owned by the caller; Parrhesia only validates filter shape.
|
||||||
|
- `mode` defaults to `:req_stream`.
|
||||||
|
- `tls.mode` defaults to `:required`.
|
||||||
|
- `tls.pins` must be non-empty for synced peers.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 7. Runtime State
|
||||||
|
|
||||||
|
Each server should have both configuration and runtime status.
|
||||||
|
|
||||||
|
Suggested runtime fields:
|
||||||
|
|
||||||
|
```elixir
|
||||||
|
%{
|
||||||
|
server_id: "tribes-primary",
|
||||||
|
state: :running,
|
||||||
|
connected?: true,
|
||||||
|
last_connected_at: ~U[2026-03-16 10:00:00Z],
|
||||||
|
last_disconnected_at: nil,
|
||||||
|
last_sync_started_at: ~U[2026-03-16 10:00:00Z],
|
||||||
|
last_sync_completed_at: ~U[2026-03-16 10:00:02Z],
|
||||||
|
last_event_received_at: ~U[2026-03-16 10:12:45Z],
|
||||||
|
last_eose_at: ~U[2026-03-16 10:00:02Z],
|
||||||
|
reconnect_attempts: 0,
|
||||||
|
last_error: nil
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Parrhesia should keep this state generic. It is about relay sync health, not app state convergence.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 8. Stats and Health
|
||||||
|
|
||||||
|
### Per-server stats
|
||||||
|
|
||||||
|
`server_stats/2` should return basic counters such as:
|
||||||
|
|
||||||
|
- `events_received`
|
||||||
|
- `events_accepted`
|
||||||
|
- `events_duplicate`
|
||||||
|
- `events_rejected`
|
||||||
|
- `query_runs`
|
||||||
|
- `subscription_restarts`
|
||||||
|
- `reconnects`
|
||||||
|
- `last_remote_eose_at`
|
||||||
|
- `last_error`
|
||||||
|
|
||||||
|
### Aggregate sync stats
|
||||||
|
|
||||||
|
`sync_stats/1` should summarize:
|
||||||
|
|
||||||
|
- total configured servers,
|
||||||
|
- enabled servers,
|
||||||
|
- running servers,
|
||||||
|
- connected servers,
|
||||||
|
- aggregate event counters,
|
||||||
|
- aggregate reconnect count.
|
||||||
|
|
||||||
|
### Health
|
||||||
|
|
||||||
|
`sync_health/1` should be operator-oriented, for example:
|
||||||
|
|
||||||
|
```elixir
|
||||||
|
%{
|
||||||
|
"status" => "degraded",
|
||||||
|
"servers_total" => 3,
|
||||||
|
"servers_connected" => 2,
|
||||||
|
"servers_failing" => [
|
||||||
|
%{"id" => "tribes-secondary", "reason" => "connection_refused"}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
This is intentionally simple. It should answer “is sync working?” without pretending to prove application convergence.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 9. Event Ingest Path
|
||||||
|
|
||||||
|
Events received from a remote sync worker should enter Parrhesia through the same ingest path as any other accepted event.
|
||||||
|
|
||||||
|
That means:
|
||||||
|
|
||||||
|
1. validate the event,
|
||||||
|
2. run normal write policy,
|
||||||
|
3. persist or reject,
|
||||||
|
4. fan out locally,
|
||||||
|
5. rely on duplicate-event behavior for idempotency.
|
||||||
|
|
||||||
|
This avoids a second ingest path with divergent behavior.
|
||||||
|
|
||||||
|
Before normal event acceptance, the sync worker should enforce:
|
||||||
|
|
||||||
|
1. pinned TLS validation for the remote endpoint,
|
||||||
|
2. remote server-auth identity match,
|
||||||
|
3. local ACL grant permitting the peer to perform sync reads and/or writes.
|
||||||
|
|
||||||
|
The sync worker may attach request-context metadata such as:
|
||||||
|
|
||||||
|
```elixir
|
||||||
|
%Parrhesia.API.RequestContext{
|
||||||
|
caller: :sync,
|
||||||
|
peer_id: "tribes-primary",
|
||||||
|
metadata: %{sync_server_id: "tribes-primary"}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Recommended additional context when available:
|
||||||
|
|
||||||
|
- `remote_ip`
|
||||||
|
- `subscription_id`
|
||||||
|
|
||||||
|
This context is for telemetry, policy, and audit only. It must not become app sync semantics.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 10. Persistence
|
||||||
|
|
||||||
|
Parrhesia should persist enough sync control-plane state to survive restart:
|
||||||
|
|
||||||
|
- local server identity reference,
|
||||||
|
- configured ACL rules for sync principals,
|
||||||
|
- configured servers,
|
||||||
|
- whether a server is enabled,
|
||||||
|
- optional catch-up cursor or watermark per server,
|
||||||
|
- basic last-error and last-success markers.
|
||||||
|
|
||||||
|
Parrhesia does not need to persist application replay heads or winner state. That remains in the embedding application.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 11. Relationship to Current Features
|
||||||
|
|
||||||
|
### BEAM cluster fanout
|
||||||
|
|
||||||
|
`Parrhesia.Fanout.MultiNode` is a separate feature.
|
||||||
|
|
||||||
|
It provides best-effort live fanout between connected BEAM nodes. It is not remote relay sync and is not a substitute for `Parrhesia.API.Sync`.
|
||||||
|
|
||||||
|
### Management stats
|
||||||
|
|
||||||
|
Current admin `stats` is relay-global and minimal.
|
||||||
|
|
||||||
|
Sync adds a new dimension:
|
||||||
|
|
||||||
|
- peer config,
|
||||||
|
- worker state,
|
||||||
|
- per-peer counters,
|
||||||
|
- sync health summary.
|
||||||
|
|
||||||
|
That should be exposed without coupling it to app-specific sync semantics.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 12. Tribes Usage
|
||||||
|
|
||||||
|
For Tribes, `AshNostrSync` should be able to:
|
||||||
|
|
||||||
|
1. rely on Parrhesia’s local server identity,
|
||||||
|
2. register one or more remote relays with `Parrhesia.API.Sync.put_server/2`,
|
||||||
|
3. grant sync ACLs for trusted server-auth pubkeys,
|
||||||
|
4. provide narrow Nostr filters for `kind: 5000`,
|
||||||
|
5. observe sync health and counters,
|
||||||
|
6. consume events via the normal local Parrhesia ingest/query/stream surface.
|
||||||
|
|
||||||
|
Tribes should not need Parrhesia to know:
|
||||||
|
|
||||||
|
- what a resource namespace means,
|
||||||
|
- which node pubkeys are trusted for Tribes,
|
||||||
|
- how to resolve conflicts,
|
||||||
|
- how to apply an upsert or delete.
|
||||||
|
|
||||||
|
That is the key boundary.
|
||||||
@@ -1,70 +1,111 @@
|
|||||||
# Parrhesia Shared API + Local API Design (Option 1)
|
# Parrhesia Shared API
|
||||||
|
|
||||||
## 1) Goal
|
## 1. Goal
|
||||||
|
|
||||||
Expose a stable in-process API for embedding apps **and** refactor server transports to consume the same API.
|
Expose a stable in-process API that:
|
||||||
|
|
||||||
Desired end state:
|
- is used by WebSocket, HTTP management, local callers, and sync workers,
|
||||||
|
- keeps protocol and storage behavior in one place,
|
||||||
|
- stays neutral about application-level replication semantics.
|
||||||
|
|
||||||
- WebSocket server, HTTP management, and embedding app all call one shared core API.
|
This document defines the Parrhesia contract. It does **not** define Tribes or Ash sync behavior.
|
||||||
- Transport layers (WS/HTTP/local) only do framing, auth header extraction, and response encoding.
|
|
||||||
- Policy/storage/fanout/business semantics live in one place.
|
|
||||||
|
|
||||||
This keeps everything in the same dependency (`:parrhesia`) and avoids a second package.
|
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## 2) Key architectural decision
|
## 2. Scope
|
||||||
|
|
||||||
Previous direction: `Parrhesia.Local.*` as primary public API.
|
### In scope
|
||||||
|
|
||||||
Updated direction (this doc):
|
- event ingest/query/count parity with WebSocket behavior,
|
||||||
|
- local subscription APIs,
|
||||||
|
- NIP-98 validation helpers,
|
||||||
|
- management/admin helpers,
|
||||||
|
- remote relay sync worker control and health reporting.
|
||||||
|
|
||||||
- Introduce **shared core API modules** under `Parrhesia.API.*`.
|
### Out of scope
|
||||||
- Make server code (`Parrhesia.Web.Connection`, management handlers) delegate to `Parrhesia.API.*`.
|
|
||||||
- Keep `Parrhesia.Local.*` as optional convenience wrappers over `Parrhesia.API.*`.
|
|
||||||
|
|
||||||
This ensures no divergence between local embedding behavior and websocket behavior.
|
- resource registration,
|
||||||
|
- trusted app writers,
|
||||||
|
- mutation payload semantics,
|
||||||
|
- conflict resolution,
|
||||||
|
- replay winner selection,
|
||||||
|
- Ash action mapping.
|
||||||
|
|
||||||
|
Those belong in app profiles such as `TRIBES-NOSTRSYNC`, not in Parrhesia.
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## 3) Layered design
|
## 3. Layering
|
||||||
|
|
||||||
```text
|
```text
|
||||||
Transport layer
|
Transport / embedding / background workers
|
||||||
- Parrhesia.Web.Connection (WS)
|
- Parrhesia.Web.Connection
|
||||||
- Parrhesia.Web.Management (HTTP)
|
- Parrhesia.Web.Management
|
||||||
- Parrhesia.Local.* wrappers (in-process)
|
- Parrhesia.Local.*
|
||||||
|
- Parrhesia.Sync.*
|
||||||
|
|
||||||
Shared API layer
|
Shared API
|
||||||
- Parrhesia.API.Auth
|
- Parrhesia.API.Auth
|
||||||
- Parrhesia.API.Events
|
- Parrhesia.API.Events
|
||||||
- Parrhesia.API.Stream (optional)
|
- Parrhesia.API.Stream
|
||||||
- Parrhesia.API.Admin (optional, for management methods)
|
- Parrhesia.API.Admin
|
||||||
|
- Parrhesia.API.Identity
|
||||||
|
- Parrhesia.API.ACL
|
||||||
|
- Parrhesia.API.Sync
|
||||||
|
|
||||||
Domain/runtime dependencies
|
Runtime internals
|
||||||
- Parrhesia.Policy.EventPolicy
|
- Parrhesia.Policy.EventPolicy
|
||||||
- Parrhesia.Storage.* adapters
|
- Parrhesia.Storage.*
|
||||||
- Parrhesia.Groups.Flow
|
- Parrhesia.Groups.Flow
|
||||||
- Parrhesia.Subscriptions.Index
|
- Parrhesia.Subscriptions.Index
|
||||||
- Parrhesia.Fanout.MultiNode
|
- Parrhesia.Fanout.MultiNode
|
||||||
- Parrhesia.Telemetry
|
- Parrhesia.Telemetry
|
||||||
```
|
```
|
||||||
|
|
||||||
Rule: all ingest/query/count decisions happen in `Parrhesia.API.Events`.
|
Rule: transport framing stays at the edge. Business decisions happen in `Parrhesia.API.*`.
|
||||||
|
|
||||||
|
Implementation note:
|
||||||
|
|
||||||
|
- the runtime beneath `Parrhesia.API.*` should expose clearer internal policy stages than it does today,
|
||||||
|
- at minimum: connection/auth, publish, query/count, stream subscription, negentropy, response shaping, and broadcast/fanout,
|
||||||
|
- these are internal runtime seams, not additional public APIs.
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## 4) Public module plan
|
## 4. Core Context
|
||||||
|
|
||||||
## 4.1 `Parrhesia.API.Auth`
|
```elixir
|
||||||
|
defmodule Parrhesia.API.RequestContext do
|
||||||
|
defstruct authenticated_pubkeys: MapSet.new(),
|
||||||
|
actor: nil,
|
||||||
|
caller: :local,
|
||||||
|
remote_ip: nil,
|
||||||
|
subscription_id: nil,
|
||||||
|
peer_id: nil,
|
||||||
|
metadata: %{}
|
||||||
|
end
|
||||||
|
```
|
||||||
|
|
||||||
|
`caller` is for telemetry and policy parity, for example `:websocket`, `:http`, `:local`, or `:sync`.
|
||||||
|
|
||||||
|
Recommended usage:
|
||||||
|
|
||||||
|
- `remote_ip` for connection-level policy and audit,
|
||||||
|
- `subscription_id` for query/stream/negentropy context,
|
||||||
|
- `peer_id` for trusted sync peer identity when applicable,
|
||||||
|
- `metadata` for transport-specific details that should not become API fields.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 5. Public Modules
|
||||||
|
|
||||||
|
### 5.1 `Parrhesia.API.Auth`
|
||||||
|
|
||||||
Purpose:
|
Purpose:
|
||||||
- event validation helpers
|
|
||||||
- NIP-98 verification
|
|
||||||
- optional embedding account resolution hook
|
|
||||||
|
|
||||||
Proposed functions:
|
- event validation helpers,
|
||||||
|
- NIP-98 verification,
|
||||||
|
- optional embedding account resolution.
|
||||||
|
|
||||||
```elixir
|
```elixir
|
||||||
@spec validate_event(map()) :: :ok | {:error, term()}
|
@spec validate_event(map()) :: :ok | {:error, term()}
|
||||||
@@ -77,100 +118,65 @@ Proposed functions:
|
|||||||
{:ok, Parrhesia.API.Auth.Context.t()} | {:error, term()}
|
{:ok, Parrhesia.API.Auth.Context.t()} | {:error, term()}
|
||||||
```
|
```
|
||||||
|
|
||||||
`validate_nip98/4` options:
|
### 5.2 `Parrhesia.API.Events`
|
||||||
|
|
||||||
```elixir
|
|
||||||
account_resolver: (pubkey_hex :: String.t(), auth_event :: map() ->
|
|
||||||
{:ok, account :: term()} | {:error, term()})
|
|
||||||
```
|
|
||||||
|
|
||||||
Context struct:
|
|
||||||
|
|
||||||
```elixir
|
|
||||||
defmodule Parrhesia.API.Auth.Context do
|
|
||||||
@enforce_keys [:pubkey, :auth_event]
|
|
||||||
defstruct [:pubkey, :auth_event, :account, claims: %{}]
|
|
||||||
end
|
|
||||||
```
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 4.2 `Parrhesia.API.Events`
|
|
||||||
|
|
||||||
Purpose:
|
Purpose:
|
||||||
- canonical ingress/query/count API used by WS + local + HTTP integrations.
|
|
||||||
|
|
||||||
Proposed functions:
|
- canonical ingest/query/count path used by WS, HTTP, local callers, and sync workers.
|
||||||
|
|
||||||
```elixir
|
```elixir
|
||||||
@spec publish(map(), keyword()) :: {:ok, Parrhesia.API.Events.PublishResult.t()} | {:error, term()}
|
@spec publish(map(), keyword()) ::
|
||||||
@spec query([map()], keyword()) :: {:ok, [map()]} | {:error, term()}
|
{:ok, Parrhesia.API.Events.PublishResult.t()} | {:error, term()}
|
||||||
@spec count([map()], keyword()) :: {:ok, non_neg_integer() | map()} | {:error, term()}
|
|
||||||
|
@spec query([map()], keyword()) ::
|
||||||
|
{:ok, [map()]} | {:error, term()}
|
||||||
|
|
||||||
|
@spec count([map()], keyword()) ::
|
||||||
|
{:ok, non_neg_integer() | map()} | {:error, term()}
|
||||||
```
|
```
|
||||||
|
|
||||||
Request context:
|
Required options:
|
||||||
|
|
||||||
```elixir
|
- `:context` - `%Parrhesia.API.RequestContext{}`
|
||||||
defmodule Parrhesia.API.RequestContext do
|
|
||||||
defstruct authenticated_pubkeys: MapSet.new(),
|
|
||||||
actor: nil,
|
|
||||||
metadata: %{}
|
|
||||||
end
|
|
||||||
```
|
|
||||||
|
|
||||||
Publish result:
|
`publish/2` must preserve current `EVENT` semantics:
|
||||||
|
|
||||||
```elixir
|
1. size checks,
|
||||||
defmodule Parrhesia.API.Events.PublishResult do
|
2. `Protocol.validate_event/1`,
|
||||||
@enforce_keys [:event_id, :accepted, :message]
|
3. `EventPolicy.authorize_write/2`,
|
||||||
defstruct [:event_id, :accepted, :message]
|
4. group handling,
|
||||||
end
|
5. persistence or control-event path,
|
||||||
```
|
6. local plus multi-node fanout,
|
||||||
|
7. telemetry.
|
||||||
|
|
||||||
### Publish semantics (must match websocket EVENT)
|
Return shape mirrors `OK`:
|
||||||
|
|
||||||
Pipeline in `publish/2`:
|
|
||||||
|
|
||||||
1. frame/event size limits
|
|
||||||
2. `Parrhesia.Protocol.validate_event/1`
|
|
||||||
3. `Parrhesia.Policy.EventPolicy.authorize_write/2`
|
|
||||||
4. group handling (`Parrhesia.Groups.Flow.handle_event/1`)
|
|
||||||
5. persistence path (`put_event`, deletion, vanish, ephemeral rules)
|
|
||||||
6. fanout (local + multi-node)
|
|
||||||
7. telemetry emit
|
|
||||||
|
|
||||||
Return shape mirrors Nostr `OK` semantics:
|
|
||||||
|
|
||||||
```elixir
|
```elixir
|
||||||
{:ok, %PublishResult{event_id: id, accepted: true, message: "ok: event stored"}}
|
{:ok, %PublishResult{event_id: id, accepted: true, message: "ok: event stored"}}
|
||||||
{:ok, %PublishResult{event_id: id, accepted: false, message: "blocked: ..."}}
|
{:ok, %PublishResult{event_id: id, accepted: false, message: "blocked: ..."}}
|
||||||
```
|
```
|
||||||
|
|
||||||
### Query/count semantics (must match websocket REQ/COUNT)
|
`query/2` and `count/2` must preserve current `REQ` and `COUNT` behavior, including giftwrap restrictions and server-side filter validation.
|
||||||
|
|
||||||
`query/2` and `count/2`:
|
### 5.3 `Parrhesia.API.Stream`
|
||||||
|
|
||||||
1. validate filters
|
|
||||||
2. run read policy (`EventPolicy.authorize_read/2`)
|
|
||||||
3. call storage with `requester_pubkeys` from context
|
|
||||||
4. return ordered events/count payload
|
|
||||||
|
|
||||||
Giftwrap restrictions (`kind 1059`) must remain identical to websocket behavior.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 4.3 `Parrhesia.API.Stream` (optional but recommended)
|
|
||||||
|
|
||||||
Purpose:
|
Purpose:
|
||||||
- local in-process subscriptions using same subscription index/fanout model.
|
|
||||||
|
|
||||||
Proposed functions:
|
- in-process subscription surface with the same semantics as a WebSocket `REQ`.
|
||||||
|
|
||||||
|
This is **required** for embedding and sync consumers.
|
||||||
|
|
||||||
```elixir
|
```elixir
|
||||||
@spec subscribe(pid(), String.t(), [map()], keyword()) :: {:ok, reference()} | {:error, term()}
|
@spec subscribe(pid(), String.t(), [map()], keyword()) ::
|
||||||
|
{:ok, reference()} | {:error, term()}
|
||||||
|
|
||||||
@spec unsubscribe(reference()) :: :ok
|
@spec unsubscribe(reference()) :: :ok
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Required options:
|
||||||
|
|
||||||
|
- `:context` - `%Parrhesia.API.RequestContext{}`
|
||||||
|
|
||||||
Subscriber contract:
|
Subscriber contract:
|
||||||
|
|
||||||
```elixir
|
```elixir
|
||||||
@@ -179,220 +185,240 @@ Subscriber contract:
|
|||||||
{:parrhesia, :closed, ref, subscription_id, reason}
|
{:parrhesia, :closed, ref, subscription_id, reason}
|
||||||
```
|
```
|
||||||
|
|
||||||
---
|
`subscribe/4` must:
|
||||||
|
|
||||||
## 4.4 `Parrhesia.Local.*` wrappers
|
1. validate filters,
|
||||||
|
2. apply read policy,
|
||||||
|
3. emit initial catch-up events in the same order as `REQ`,
|
||||||
|
4. emit exactly one `:eose`,
|
||||||
|
5. register for live fanout until `unsubscribe/1`.
|
||||||
|
|
||||||
`Parrhesia.Local.*` remain as convenience API for embedding apps, implemented as thin wrappers:
|
This module does **not** know why a caller wants the stream.
|
||||||
|
|
||||||
- `Parrhesia.Local.Auth` -> delegates to `Parrhesia.API.Auth`
|
### 5.4 `Parrhesia.API.Admin`
|
||||||
- `Parrhesia.Local.Events` -> delegates to `Parrhesia.API.Events`
|
|
||||||
- `Parrhesia.Local.Stream` -> delegates to `Parrhesia.API.Stream`
|
|
||||||
- `Parrhesia.Local.Client` -> use-case helpers (posts + private messages)
|
|
||||||
|
|
||||||
No business logic in wrappers.
|
Purpose:
|
||||||
|
|
||||||
|
- stable in-process facade for management operations already exposed over HTTP.
|
||||||
|
|
||||||
|
```elixir
|
||||||
|
@spec execute(String.t() | atom(), map(), keyword()) :: {:ok, map()} | {:error, term()}
|
||||||
|
@spec stats(keyword()) :: {:ok, map()} | {:error, term()}
|
||||||
|
@spec health(keyword()) :: {:ok, map()} | {:error, term()}
|
||||||
|
@spec list_audit_logs(keyword()) :: {:ok, [map()]} | {:error, term()}
|
||||||
|
```
|
||||||
|
|
||||||
|
Baseline methods:
|
||||||
|
|
||||||
|
- `ping`
|
||||||
|
- `stats`
|
||||||
|
- `health`
|
||||||
|
- moderation methods already supported by the storage admin adapter
|
||||||
|
|
||||||
|
`stats/1` is relay-level and cheap. `health/1` is liveness/readiness-oriented and may include worker state.
|
||||||
|
|
||||||
|
`API.Admin` is the operator-facing umbrella for management. It may delegate domain-specific work to `API.Identity`, `API.ACL`, and `API.Sync`.
|
||||||
|
|
||||||
|
### 5.5 `Parrhesia.API.Identity`
|
||||||
|
|
||||||
|
Purpose:
|
||||||
|
|
||||||
|
- manage Parrhesia-owned server identity,
|
||||||
|
- expose public identity metadata,
|
||||||
|
- support explicit import and rotation,
|
||||||
|
- keep private key material internal.
|
||||||
|
|
||||||
|
Parrhesia owns a low-level server identity used for relay-to-relay auth and other transport-local security features.
|
||||||
|
|
||||||
|
```elixir
|
||||||
|
@spec get(keyword()) :: {:ok, map()} | {:error, term()}
|
||||||
|
@spec ensure(keyword()) :: {:ok, map()} | {:error, term()}
|
||||||
|
@spec import(map(), keyword()) :: {:ok, map()} | {:error, term()}
|
||||||
|
@spec rotate(keyword()) :: {:ok, map()} | {:error, term()}
|
||||||
|
@spec sign_event(map(), keyword()) :: {:ok, map()} | {:error, term()}
|
||||||
|
```
|
||||||
|
|
||||||
|
Rules:
|
||||||
|
|
||||||
|
- private key material must never be returned by API,
|
||||||
|
- production deployments should be able to import a configured key,
|
||||||
|
- local/dev deployments may generate on first init if none exists,
|
||||||
|
- identity creation should be eager and deterministic, not lazy on first sync use.
|
||||||
|
|
||||||
|
Recommended boot order:
|
||||||
|
|
||||||
|
1. configured/imported key,
|
||||||
|
2. persisted local identity,
|
||||||
|
3. generate once and persist.
|
||||||
|
|
||||||
|
### 5.6 `Parrhesia.API.ACL`
|
||||||
|
|
||||||
|
Purpose:
|
||||||
|
|
||||||
|
- enforce event/filter ACLs for authenticated principals,
|
||||||
|
- support default-deny sync visibility,
|
||||||
|
- allow dynamic grants for trusted sync peers.
|
||||||
|
|
||||||
|
This is a real authorization layer, not a reuse of moderation allowlists.
|
||||||
|
|
||||||
|
Current implementation note:
|
||||||
|
|
||||||
|
- Parrhesia already has storage-backed moderation presence tables such as `allowed_pubkeys` and `blocked_ips`,
|
||||||
|
- those are not sufficient for sync ACLs,
|
||||||
|
- the new ACL layer must be enforced directly in the active read/write/query/negentropy path, not only through management tables.
|
||||||
|
|
||||||
|
```elixir
|
||||||
|
@spec grant(map(), keyword()) :: :ok | {:error, term()}
|
||||||
|
@spec revoke(map(), keyword()) :: :ok | {:error, term()}
|
||||||
|
@spec list(keyword()) :: {:ok, [map()]} | {:error, term()}
|
||||||
|
@spec check(atom(), map(), keyword()) :: :ok | {:error, term()}
|
||||||
|
```
|
||||||
|
|
||||||
|
Suggested rule shape:
|
||||||
|
|
||||||
|
```elixir
|
||||||
|
%{
|
||||||
|
principal_type: :pubkey,
|
||||||
|
principal: "<server-auth-pubkey>",
|
||||||
|
capability: :sync_read,
|
||||||
|
match: %{
|
||||||
|
"kinds" => [5000],
|
||||||
|
"#r" => ["tribes.accounts.user", "tribes.chat.tribe"]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
For the first implementation, principals should be authenticated pubkeys only.
|
||||||
|
|
||||||
|
We do **not** need a separate user-vs-server ACL model yet. A sync peer is simply a principal with sync capabilities.
|
||||||
|
|
||||||
|
Initial required capabilities:
|
||||||
|
|
||||||
|
- `:sync_read`
|
||||||
|
- `:sync_write`
|
||||||
|
|
||||||
|
Recommended baseline:
|
||||||
|
|
||||||
|
- ordinary events follow existing relay behavior,
|
||||||
|
- sync traffic is default-deny,
|
||||||
|
- access is lifted only by explicit ACL grants for authenticated server pubkeys.
|
||||||
|
|
||||||
|
### 5.7 `Parrhesia.API.Sync`
|
||||||
|
|
||||||
|
Purpose:
|
||||||
|
|
||||||
|
- manage remote relay sync workers without embedding app-specific replication semantics.
|
||||||
|
|
||||||
|
Parrhesia syncs **events**, not records.
|
||||||
|
|
||||||
|
```elixir
|
||||||
|
@spec put_server(map(), keyword()) ::
|
||||||
|
{:ok, Parrhesia.API.Sync.Server.t()} | {:error, term()}
|
||||||
|
|
||||||
|
@spec remove_server(String.t(), keyword()) :: :ok | {:error, term()}
|
||||||
|
@spec get_server(String.t(), keyword()) ::
|
||||||
|
{:ok, Parrhesia.API.Sync.Server.t()} | :error | {:error, term()}
|
||||||
|
|
||||||
|
@spec list_servers(keyword()) ::
|
||||||
|
{:ok, [Parrhesia.API.Sync.Server.t()]} | {:error, term()}
|
||||||
|
|
||||||
|
@spec start_server(String.t(), keyword()) :: :ok | {:error, term()}
|
||||||
|
@spec stop_server(String.t(), keyword()) :: :ok | {:error, term()}
|
||||||
|
@spec sync_now(String.t(), keyword()) :: :ok | {:error, term()}
|
||||||
|
|
||||||
|
@spec server_stats(String.t(), keyword()) ::
|
||||||
|
{:ok, map()} | :error | {:error, term()}
|
||||||
|
|
||||||
|
@spec sync_stats(keyword()) :: {:ok, map()} | {:error, term()}
|
||||||
|
@spec sync_health(keyword()) :: {:ok, map()} | {:error, term()}
|
||||||
|
```
|
||||||
|
|
||||||
|
`put_server/2` is upsert-style. It covers both add and update.
|
||||||
|
|
||||||
|
Minimum server shape:
|
||||||
|
|
||||||
|
```elixir
|
||||||
|
%{
|
||||||
|
id: "tribes-a",
|
||||||
|
url: "wss://relay-a.example/relay",
|
||||||
|
enabled?: true,
|
||||||
|
auth_pubkey: "<remote-server-auth-pubkey>",
|
||||||
|
filters: [%{"kinds" => [5000], "#r" => ["tribes.accounts.user"]}],
|
||||||
|
mode: :req_stream,
|
||||||
|
auth: %{type: :nip42},
|
||||||
|
tls: %{
|
||||||
|
mode: :required,
|
||||||
|
hostname: "relay-a.example",
|
||||||
|
pins: [
|
||||||
|
%{type: :spki_sha256, value: "<base64-sha256-spki-pin>"}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Important constraints:
|
||||||
|
|
||||||
|
- filters are caller-provided and opaque to Parrhesia,
|
||||||
|
- Parrhesia does not inspect `kind: 5000` payload semantics,
|
||||||
|
- Parrhesia may persist peer config and runtime counters,
|
||||||
|
- Parrhesia may reconnect and resume catch-up using generic event cursors,
|
||||||
|
- Parrhesia must expose worker health and basic counters,
|
||||||
|
- remote relay TLS pinning is required,
|
||||||
|
- sync peer auth is bound to a server-auth pubkey, not inferred from event author pubkeys.
|
||||||
|
- sync enforcement should reuse the same runtime policy stages as ordinary websocket traffic rather than inventing a parallel trust path.
|
||||||
|
|
||||||
|
Server identity model:
|
||||||
|
|
||||||
|
- Parrhesia owns its local server-auth identity via `API.Identity`,
|
||||||
|
- peer config declares the expected remote server-auth pubkey,
|
||||||
|
- ACL grants are bound to authenticated server-auth pubkeys,
|
||||||
|
- event author pubkeys remain a separate application concern.
|
||||||
|
|
||||||
|
Initial mode should be `:req_stream`:
|
||||||
|
|
||||||
|
1. run catch-up with `API.Events.query/2`-equivalent client behavior against the remote relay,
|
||||||
|
2. switch to a live subscription,
|
||||||
|
3. ingest received events through local `API.Events.publish/2`.
|
||||||
|
|
||||||
|
Future optimization:
|
||||||
|
|
||||||
|
- `:negentropy` may be added as an optimization mode on top of the simpler `:req_stream` baseline.
|
||||||
|
- Parrhesia now has a reusable NIP-77 engine, but a sync worker does not need to depend on it for the first implementation.
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## 5) Server integration plan (critical)
|
## 6. Server Integration
|
||||||
|
|
||||||
## 5.1 WebSocket (`Parrhesia.Web.Connection`)
|
### WebSocket
|
||||||
|
|
||||||
After decode:
|
|
||||||
- `EVENT` -> `Parrhesia.API.Events.publish/2`
|
- `EVENT` -> `Parrhesia.API.Events.publish/2`
|
||||||
- `REQ` -> `Parrhesia.API.Events.query/2`
|
- `REQ` -> `Parrhesia.API.Stream.subscribe/4`
|
||||||
- `COUNT` -> `Parrhesia.API.Events.count/2`
|
- `COUNT` -> `Parrhesia.API.Events.count/2`
|
||||||
- `AUTH` keep transport-specific challenge/session flow, but can use `API.Auth.validate_event/1` internally
|
- `AUTH` stays connection-specific, but validation helpers may move to `API.Auth`
|
||||||
|
- `NEG-*` maps to the reusable NIP-77 engine and remains exposed through the websocket transport boundary
|
||||||
|
|
||||||
WebSocket keeps responsibility for:
|
### HTTP management
|
||||||
- websocket framing
|
|
||||||
- subscription lifecycle per connection
|
|
||||||
- AUTH challenge rotation protocol frames
|
|
||||||
|
|
||||||
## 5.2 HTTP management (`Parrhesia.Web.Management`)
|
- NIP-98 validation via `Parrhesia.API.Auth.validate_nip98/3`
|
||||||
|
- management methods via `Parrhesia.API.Admin`
|
||||||
|
- sync peer CRUD and health endpoints may delegate to `Parrhesia.API.Sync`
|
||||||
|
- identity and ACL management may delegate to `API.Identity` and `API.ACL`
|
||||||
|
|
||||||
- NIP-98 header validation via `Parrhesia.API.Auth.validate_nip98/3`
|
### Local wrappers
|
||||||
- command execution via `Parrhesia.API.Admin` (or existing storage admin adapter via API facade)
|
|
||||||
|
`Parrhesia.Local.*` remain thin delegates over `Parrhesia.API.*`.
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## 6) High-level client helpers for embedding app use case
|
## 7. Relationship to Sync Profiles
|
||||||
|
|
||||||
These helpers are optional and live in `Parrhesia.Local.Client`.
|
This document is intentionally lower-level than `TRIBES-NOSTRSYNC` and `SYNC_DB.md`.
|
||||||
|
|
||||||
## 6.1 Public posts
|
Those documents may require:
|
||||||
|
|
||||||
```elixir
|
- `Parrhesia.API.Events.publish/2`
|
||||||
@spec publish_post(Parrhesia.API.Auth.Context.t(), String.t(), keyword()) ::
|
- `Parrhesia.API.Events.query/2`
|
||||||
{:ok, Parrhesia.API.Events.PublishResult.t()} | {:error, term()}
|
- `Parrhesia.API.Stream.subscribe/4`
|
||||||
|
- `Parrhesia.API.Sync.*`
|
||||||
|
|
||||||
@spec list_posts(keyword()) :: {:ok, [map()]} | {:error, term()}
|
But they must not move application conflict rules or payload semantics into Parrhesia.
|
||||||
@spec stream_posts(pid(), keyword()) :: {:ok, reference()} | {:error, term()}
|
|
||||||
```
|
|
||||||
|
|
||||||
`publish_post/3` options:
|
|
||||||
- `:tags`
|
|
||||||
- `:created_at`
|
|
||||||
- `:signer` callback (required unless fully signed event provided)
|
|
||||||
|
|
||||||
Signer contract:
|
|
||||||
|
|
||||||
```elixir
|
|
||||||
(unsigned_event_map -> {:ok, signed_event_map} | {:error, term()})
|
|
||||||
```
|
|
||||||
|
|
||||||
Parrhesia does not store or manage private keys.
|
|
||||||
|
|
||||||
## 6.2 Private messages (giftwrap kind 1059)
|
|
||||||
|
|
||||||
```elixir
|
|
||||||
@spec send_private_message(
|
|
||||||
Parrhesia.API.Auth.Context.t(),
|
|
||||||
recipient_pubkey :: String.t(),
|
|
||||||
encrypted_payload :: String.t(),
|
|
||||||
keyword()
|
|
||||||
) :: {:ok, Parrhesia.API.Events.PublishResult.t()} | {:error, term()}
|
|
||||||
|
|
||||||
@spec inbox(Parrhesia.API.Auth.Context.t(), keyword()) :: {:ok, [map()]} | {:error, term()}
|
|
||||||
@spec stream_inbox(pid(), Parrhesia.API.Auth.Context.t(), keyword()) :: {:ok, reference()} | {:error, term()}
|
|
||||||
```
|
|
||||||
|
|
||||||
Behavior:
|
|
||||||
- `send_private_message/4` builds event template with kind `1059` and `p` tag.
|
|
||||||
- host signer signs template.
|
|
||||||
- publish through `API.Events.publish/2`.
|
|
||||||
- `inbox/2` queries `%{"kinds" => [1059], "#p" => [auth.pubkey]}` with authenticated context.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 7) Error model
|
|
||||||
|
|
||||||
Shared API should normalize output regardless of transport.
|
|
||||||
|
|
||||||
Guideline:
|
|
||||||
- protocol/policy rejection -> `{:ok, %{accepted: false, message: "..."}}`
|
|
||||||
- runtime/system failure -> `{:error, term()}`
|
|
||||||
|
|
||||||
Common reason mapping:
|
|
||||||
|
|
||||||
| Reason | Message prefix |
|
|
||||||
|---|---|
|
|
||||||
| `:auth_required` | `auth-required:` |
|
|
||||||
| `:restricted_giftwrap` | `restricted:` |
|
|
||||||
| `:invalid_event` | `invalid:` |
|
|
||||||
| `:duplicate_event` | `duplicate:` |
|
|
||||||
| `:event_rate_limited` | `rate-limited:` |
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 8) Telemetry
|
|
||||||
|
|
||||||
Emit shared events in API layer (not transport-specific):
|
|
||||||
|
|
||||||
- `[:parrhesia, :api, :publish, :stop]`
|
|
||||||
- `[:parrhesia, :api, :query, :stop]`
|
|
||||||
- `[:parrhesia, :api, :count, :stop]`
|
|
||||||
- `[:parrhesia, :api, :auth, :stop]`
|
|
||||||
|
|
||||||
Metadata:
|
|
||||||
- `traffic_class`
|
|
||||||
- `caller` (`:websocket | :http | :local`)
|
|
||||||
- optional `account_present?`
|
|
||||||
|
|
||||||
Transport-level telemetry can remain separate where needed.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 9) Refactor sequence
|
|
||||||
|
|
||||||
### Phase 1: Extract shared API
|
|
||||||
1. Create `Parrhesia.API.Events` with publish/query/count from current `Web.Connection` paths.
|
|
||||||
2. Create `Parrhesia.API.Auth` wrappers for NIP-98/event validation.
|
|
||||||
3. Add API-level tests.
|
|
||||||
|
|
||||||
### Phase 2: Migrate transports
|
|
||||||
1. Update `Parrhesia.Web.Connection` to delegate publish/query/count to `API.Events`.
|
|
||||||
2. Update `Parrhesia.Web.Management` to use `API.Auth`.
|
|
||||||
3. Keep behavior unchanged.
|
|
||||||
|
|
||||||
### Phase 3: Add local wrappers/helpers
|
|
||||||
1. Implement `Parrhesia.Local.Auth/Events/Stream` as thin delegates.
|
|
||||||
2. Add `Parrhesia.Local.Client` post/inbox/send helpers.
|
|
||||||
3. Add embedding documentation.
|
|
||||||
|
|
||||||
### Phase 4: Lock parity
|
|
||||||
1. Add parity tests: WS vs Local API for same inputs and policy outcomes.
|
|
||||||
2. Add property tests for query/count equivalence where feasible.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 10) Testing requirements
|
|
||||||
|
|
||||||
1. **Transport parity tests**
|
|
||||||
- Same signed event via WS and API => same accepted/message semantics.
|
|
||||||
2. **Policy parity tests**
|
|
||||||
- Giftwrap visibility and auth-required behavior identical across WS/API/local.
|
|
||||||
3. **Auth tests**
|
|
||||||
- NIP-98 success/failure + account resolver success/failure.
|
|
||||||
4. **Fanout tests**
|
|
||||||
- publish via API reaches local stream subscribers and WS subscribers.
|
|
||||||
5. **Failure tests**
|
|
||||||
- storage failures surface deterministic errors in all transports.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 11) Backwards compatibility
|
|
||||||
|
|
||||||
- No breaking change to websocket protocol.
|
|
||||||
- No breaking change to management endpoint contract.
|
|
||||||
- New API modules are additive.
|
|
||||||
- Existing apps can ignore local API entirely.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 12) Embedding example flow
|
|
||||||
|
|
||||||
### 12.1 Login/auth
|
|
||||||
|
|
||||||
```elixir
|
|
||||||
with {:ok, auth} <- Parrhesia.API.Auth.validate_nip98(header, method, url,
|
|
||||||
account_resolver: &MyApp.Accounts.resolve_nostr_pubkey/2
|
|
||||||
) do
|
|
||||||
# use auth.pubkey/auth.account in host session
|
|
||||||
end
|
|
||||||
```
|
|
||||||
|
|
||||||
### 12.2 Post publish
|
|
||||||
|
|
||||||
```elixir
|
|
||||||
Parrhesia.Local.Client.publish_post(auth, "hello", signer: &MyApp.NostrSigner.sign/1)
|
|
||||||
```
|
|
||||||
|
|
||||||
### 12.3 Private message
|
|
||||||
|
|
||||||
```elixir
|
|
||||||
Parrhesia.Local.Client.send_private_message(
|
|
||||||
auth,
|
|
||||||
recipient_pubkey,
|
|
||||||
encrypted_payload,
|
|
||||||
signer: &MyApp.NostrSigner.sign/1
|
|
||||||
)
|
|
||||||
```
|
|
||||||
|
|
||||||
### 12.4 Inbox
|
|
||||||
|
|
||||||
```elixir
|
|
||||||
Parrhesia.Local.Client.inbox(auth, limit: 100)
|
|
||||||
```
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
## 13) Summary
|
|
||||||
|
|
||||||
Yes, this can and should be extracted into a shared API module. The server should consume it too.
|
|
||||||
|
|
||||||
That gives:
|
|
||||||
- one canonical behavior path,
|
|
||||||
- cleaner embedding,
|
|
||||||
- easier testing,
|
|
||||||
- lower long-term maintenance cost.
|
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -45,7 +45,7 @@
|
|||||||
|
|
||||||
config = {
|
config = {
|
||||||
Entrypoint = ["${parrhesia}/bin/parrhesia"];
|
Entrypoint = ["${parrhesia}/bin/parrhesia"];
|
||||||
Cmd = ["foreground"];
|
Cmd = ["start"];
|
||||||
ExposedPorts = {
|
ExposedPorts = {
|
||||||
"4413/tcp" = {};
|
"4413/tcp" = {};
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -1,17 +1,27 @@
|
|||||||
defmodule Parrhesia do
|
defmodule Parrhesia do
|
||||||
@moduledoc """
|
@moduledoc """
|
||||||
Documentation for `Parrhesia`.
|
Parrhesia is a Nostr relay runtime that can run standalone or as an embedded OTP service.
|
||||||
|
|
||||||
|
For embedded use, the main developer-facing surface is `Parrhesia.API.*`.
|
||||||
|
Start with:
|
||||||
|
|
||||||
|
- `Parrhesia.API.Events`
|
||||||
|
- `Parrhesia.API.Stream`
|
||||||
|
- `Parrhesia.API.Admin`
|
||||||
|
- `Parrhesia.API.Identity`
|
||||||
|
- `Parrhesia.API.ACL`
|
||||||
|
- `Parrhesia.API.Sync`
|
||||||
|
|
||||||
|
The host application is responsible for:
|
||||||
|
|
||||||
|
- setting `config :parrhesia, ...`
|
||||||
|
- migrating the configured Parrhesia repos
|
||||||
|
- deciding whether to expose listeners or use only the in-process API
|
||||||
|
|
||||||
|
See `README.md` and `docs/LOCAL_API.md` for the embedding model and configuration guide.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
@doc """
|
@doc false
|
||||||
Hello world.
|
|
||||||
|
|
||||||
## Examples
|
|
||||||
|
|
||||||
iex> Parrhesia.hello()
|
|
||||||
:world
|
|
||||||
|
|
||||||
"""
|
|
||||||
def hello do
|
def hello do
|
||||||
:world
|
:world
|
||||||
end
|
end
|
||||||
|
|||||||
304
lib/parrhesia/api/acl.ex
Normal file
304
lib/parrhesia/api/acl.ex
Normal file
@@ -0,0 +1,304 @@
|
|||||||
|
defmodule Parrhesia.API.ACL do
|
||||||
|
@moduledoc """
|
||||||
|
Public ACL API and rule matching for protected sync traffic.
|
||||||
|
|
||||||
|
ACL checks are only applied when the requested subject overlaps with
|
||||||
|
`config :parrhesia, :acl, protected_filters: [...]`.
|
||||||
|
|
||||||
|
The intended flow is:
|
||||||
|
|
||||||
|
1. mark a subset of sync traffic as protected with `protected_filters`
|
||||||
|
2. persist pubkey-based grants with `grant/2`
|
||||||
|
3. call `check/3` during sync reads and writes
|
||||||
|
|
||||||
|
Unprotected subjects always return `:ok`.
|
||||||
|
"""
|
||||||
|
|
||||||
|
alias Parrhesia.API.RequestContext
|
||||||
|
alias Parrhesia.Protocol.Filter
|
||||||
|
alias Parrhesia.Storage
|
||||||
|
|
||||||
|
@doc """
|
||||||
|
Persists an ACL rule.
|
||||||
|
|
||||||
|
A typical rule looks like:
|
||||||
|
|
||||||
|
```elixir
|
||||||
|
%{
|
||||||
|
principal_type: :pubkey,
|
||||||
|
principal: "...64 hex chars...",
|
||||||
|
capability: :sync_read,
|
||||||
|
match: %{"kinds" => [5000], "#r" => ["tribes.accounts.user"]}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
"""
|
||||||
|
@spec grant(map(), keyword()) :: :ok | {:error, term()}
|
||||||
|
def grant(rule, _opts \\ []) do
|
||||||
|
with {:ok, _stored_rule} <- Storage.acl().put_rule(%{}, normalize_rule(rule)) do
|
||||||
|
:ok
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
@doc """
|
||||||
|
Deletes ACL rules matching the given selector.
|
||||||
|
|
||||||
|
The selector is passed through to the configured storage adapter, which typically accepts an
|
||||||
|
id-based selector such as `%{id: rule_id}`.
|
||||||
|
"""
|
||||||
|
@spec revoke(map(), keyword()) :: :ok | {:error, term()}
|
||||||
|
def revoke(rule, _opts \\ []) do
|
||||||
|
Storage.acl().delete_rule(%{}, normalize_delete_selector(rule))
|
||||||
|
end
|
||||||
|
|
||||||
|
@doc """
|
||||||
|
Lists persisted ACL rules.
|
||||||
|
|
||||||
|
Supported filters are:
|
||||||
|
|
||||||
|
- `:principal_type`
|
||||||
|
- `:principal`
|
||||||
|
- `:capability`
|
||||||
|
"""
|
||||||
|
@spec list(keyword()) :: {:ok, [map()]} | {:error, term()}
|
||||||
|
def list(opts \\ []) do
|
||||||
|
Storage.acl().list_rules(%{}, normalize_list_opts(opts))
|
||||||
|
end
|
||||||
|
|
||||||
|
@doc """
|
||||||
|
Authorizes a protected sync read or write subject for the given request context.
|
||||||
|
|
||||||
|
Supported capabilities are `:sync_read` and `:sync_write`.
|
||||||
|
|
||||||
|
`opts[:context]` defaults to an empty `Parrhesia.API.RequestContext`, which means protected
|
||||||
|
subjects will fail with `{:error, :auth_required}` until authenticated pubkeys are present.
|
||||||
|
"""
|
||||||
|
@spec check(atom(), map(), keyword()) :: :ok | {:error, term()}
|
||||||
|
def check(capability, subject, opts \\ [])
|
||||||
|
|
||||||
|
def check(capability, subject, opts)
|
||||||
|
when capability in [:sync_read, :sync_write] and is_map(subject) do
|
||||||
|
context = Keyword.get(opts, :context, %RequestContext{})
|
||||||
|
|
||||||
|
with {:ok, normalized_capability} <- normalize_capability(capability),
|
||||||
|
{:ok, normalized_context} <- normalize_context(context),
|
||||||
|
{:ok, protected_filters} <- protected_filters() do
|
||||||
|
if protected_subject?(normalized_capability, subject, protected_filters) do
|
||||||
|
authorize_subject(normalized_capability, subject, normalized_context)
|
||||||
|
else
|
||||||
|
:ok
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def check(_capability, _subject, _opts), do: {:error, :invalid_acl_capability}
|
||||||
|
|
||||||
|
@doc """
|
||||||
|
Returns `true` when a filter overlaps the configured protected read surface.
|
||||||
|
"""
|
||||||
|
@spec protected_read?(map()) :: boolean()
|
||||||
|
def protected_read?(filter) when is_map(filter) do
|
||||||
|
case protected_filters() do
|
||||||
|
{:ok, protected_filters} ->
|
||||||
|
protected_subject?(:sync_read, filter, protected_filters)
|
||||||
|
|
||||||
|
{:error, _reason} ->
|
||||||
|
false
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def protected_read?(_filter), do: false
|
||||||
|
|
||||||
|
@doc """
|
||||||
|
Returns `true` when an event matches the configured protected write surface.
|
||||||
|
"""
|
||||||
|
@spec protected_write?(map()) :: boolean()
|
||||||
|
def protected_write?(event) when is_map(event) do
|
||||||
|
case protected_filters() do
|
||||||
|
{:ok, protected_filters} ->
|
||||||
|
protected_subject?(:sync_write, event, protected_filters)
|
||||||
|
|
||||||
|
{:error, _reason} ->
|
||||||
|
false
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def protected_write?(_event), do: false
|
||||||
|
|
||||||
|
defp authorize_subject(capability, subject, %RequestContext{} = context) do
|
||||||
|
if MapSet.size(context.authenticated_pubkeys) == 0 do
|
||||||
|
{:error, :auth_required}
|
||||||
|
else
|
||||||
|
capability
|
||||||
|
|> list_rules_for_capability()
|
||||||
|
|> authorize_against_rules(capability, context.authenticated_pubkeys, subject)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp list_rules_for_capability(capability) do
|
||||||
|
Storage.acl().list_rules(%{}, principal_type: :pubkey, capability: capability)
|
||||||
|
end
|
||||||
|
|
||||||
|
defp authorize_against_rules({:ok, rules}, capability, authenticated_pubkeys, subject) do
|
||||||
|
if Enum.any?(authenticated_pubkeys, &principal_authorized?(&1, subject, rules)) do
|
||||||
|
:ok
|
||||||
|
else
|
||||||
|
{:error, denial_reason(capability)}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp authorize_against_rules({:error, reason}, _capability, _authenticated_pubkeys, _subject),
|
||||||
|
do: {:error, reason}
|
||||||
|
|
||||||
|
defp principal_authorized?(authenticated_pubkey, subject, rules) do
|
||||||
|
Enum.any?(rules, fn rule ->
|
||||||
|
rule.principal == authenticated_pubkey and
|
||||||
|
rule_covers_subject?(rule.capability, rule.match, subject)
|
||||||
|
end)
|
||||||
|
end
|
||||||
|
|
||||||
|
defp rule_covers_subject?(:sync_read, rule_match, filter),
|
||||||
|
do: filter_within_rule?(filter, rule_match)
|
||||||
|
|
||||||
|
defp rule_covers_subject?(:sync_write, rule_match, event),
|
||||||
|
do: Filter.matches_filter?(event, rule_match)
|
||||||
|
|
||||||
|
defp protected_subject?(:sync_read, filter, protected_filters) do
|
||||||
|
Enum.any?(protected_filters, &filters_overlap?(filter, &1))
|
||||||
|
end
|
||||||
|
|
||||||
|
defp protected_subject?(:sync_write, event, protected_filters) do
|
||||||
|
Enum.any?(protected_filters, &Filter.matches_filter?(event, &1))
|
||||||
|
end
|
||||||
|
|
||||||
|
defp filters_overlap?(left, right) when is_map(left) and is_map(right) do
|
||||||
|
comparable_keys =
|
||||||
|
left
|
||||||
|
|> comparable_filter_keys(right)
|
||||||
|
|> Enum.reject(&(&1 in ["limit", "search", "since", "until"]))
|
||||||
|
|
||||||
|
Enum.all?(
|
||||||
|
comparable_keys,
|
||||||
|
&filter_constraint_compatible?(Map.get(left, &1), Map.get(right, &1), &1)
|
||||||
|
) and
|
||||||
|
filter_ranges_overlap?(left, right)
|
||||||
|
end
|
||||||
|
|
||||||
|
defp filter_constraint_compatible?(nil, _right, _key), do: true
|
||||||
|
defp filter_constraint_compatible?(_left, nil, _key), do: true
|
||||||
|
|
||||||
|
defp filter_constraint_compatible?(left, right, _key) when is_list(left) and is_list(right) do
|
||||||
|
MapSet.disjoint?(MapSet.new(left), MapSet.new(right)) == false
|
||||||
|
end
|
||||||
|
|
||||||
|
defp filter_constraint_compatible?(left, right, _key), do: left == right
|
||||||
|
|
||||||
|
defp filter_within_rule?(filter, rule_match) when is_map(filter) and is_map(rule_match) do
|
||||||
|
Enum.reject(rule_match, fn {key, _value} -> key in ["since", "until", "limit", "search"] end)
|
||||||
|
|> Enum.all?(fn {key, rule_value} ->
|
||||||
|
requested_value = Map.get(filter, key)
|
||||||
|
requested_constraint_within_rule?(requested_value, rule_value, key)
|
||||||
|
end) and filter_range_within_rule?(filter, rule_match)
|
||||||
|
end
|
||||||
|
|
||||||
|
defp requested_constraint_within_rule?(nil, _rule_value, _key), do: false
|
||||||
|
|
||||||
|
defp requested_constraint_within_rule?(requested_values, rule_values, _key)
|
||||||
|
when is_list(requested_values) and is_list(rule_values) do
|
||||||
|
requested_values
|
||||||
|
|> MapSet.new()
|
||||||
|
|> MapSet.subset?(MapSet.new(rule_values))
|
||||||
|
end
|
||||||
|
|
||||||
|
defp requested_constraint_within_rule?(requested_value, rule_value, _key),
|
||||||
|
do: requested_value == rule_value
|
||||||
|
|
||||||
|
defp denial_reason(:sync_read), do: :sync_read_not_allowed
|
||||||
|
defp denial_reason(:sync_write), do: :sync_write_not_allowed
|
||||||
|
|
||||||
|
defp normalize_context(%RequestContext{} = context), do: {:ok, normalize_pubkeys(context)}
|
||||||
|
defp normalize_context(_context), do: {:error, :invalid_context}
|
||||||
|
|
||||||
|
defp normalize_pubkeys(%RequestContext{} = context) do
|
||||||
|
normalized_pubkeys =
|
||||||
|
context.authenticated_pubkeys
|
||||||
|
|> Enum.map(&String.downcase/1)
|
||||||
|
|> MapSet.new()
|
||||||
|
|
||||||
|
%RequestContext{context | authenticated_pubkeys: normalized_pubkeys}
|
||||||
|
end
|
||||||
|
|
||||||
|
defp normalize_rule(rule) when is_map(rule), do: rule
|
||||||
|
defp normalize_rule(_rule), do: %{}
|
||||||
|
|
||||||
|
defp normalize_delete_selector(selector) when is_map(selector), do: selector
|
||||||
|
defp normalize_delete_selector(_selector), do: %{}
|
||||||
|
|
||||||
|
defp normalize_list_opts(opts) do
|
||||||
|
[]
|
||||||
|
|> maybe_put_opt(:principal_type, Keyword.get(opts, :principal_type))
|
||||||
|
|> maybe_put_opt(:principal, normalize_list_principal(Keyword.get(opts, :principal)))
|
||||||
|
|> maybe_put_opt(:capability, Keyword.get(opts, :capability))
|
||||||
|
end
|
||||||
|
|
||||||
|
defp normalize_list_principal(nil), do: nil
|
||||||
|
|
||||||
|
defp normalize_list_principal(principal) when is_binary(principal),
|
||||||
|
do: String.downcase(principal)
|
||||||
|
|
||||||
|
defp normalize_list_principal(principal), do: principal
|
||||||
|
|
||||||
|
defp maybe_put_opt(opts, _key, nil), do: opts
|
||||||
|
defp maybe_put_opt(opts, key, value), do: Keyword.put(opts, key, value)
|
||||||
|
|
||||||
|
defp normalize_capability(capability) do
|
||||||
|
case capability do
|
||||||
|
:sync_read -> {:ok, :sync_read}
|
||||||
|
:sync_write -> {:ok, :sync_write}
|
||||||
|
_other -> {:error, :invalid_acl_capability}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp protected_filters do
|
||||||
|
filters =
|
||||||
|
:parrhesia
|
||||||
|
|> Application.get_env(:acl, [])
|
||||||
|
|> Keyword.get(:protected_filters, [])
|
||||||
|
|
||||||
|
if is_list(filters) and
|
||||||
|
Enum.all?(filters, &(match?(%{}, &1) and Filter.validate_filter(&1) == :ok)) do
|
||||||
|
{:ok, filters}
|
||||||
|
else
|
||||||
|
{:error, :invalid_protected_filters}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp comparable_filter_keys(left, right) do
|
||||||
|
Map.keys(left)
|
||||||
|
|> Kernel.++(Map.keys(right))
|
||||||
|
|> Enum.uniq()
|
||||||
|
end
|
||||||
|
|
||||||
|
defp filter_ranges_overlap?(left, right) do
|
||||||
|
since = max(boundary_value(left, "since", :lower), boundary_value(right, "since", :lower))
|
||||||
|
until = min(boundary_value(left, "until", :upper), boundary_value(right, "until", :upper))
|
||||||
|
since <= until
|
||||||
|
end
|
||||||
|
|
||||||
|
defp filter_range_within_rule?(filter, rule_match) do
|
||||||
|
requested_since = Map.get(filter, "since")
|
||||||
|
requested_until = Map.get(filter, "until")
|
||||||
|
rule_since = Map.get(rule_match, "since")
|
||||||
|
rule_until = Map.get(rule_match, "until")
|
||||||
|
|
||||||
|
lower_ok? =
|
||||||
|
is_nil(rule_since) or (is_integer(requested_since) and requested_since >= rule_since)
|
||||||
|
|
||||||
|
upper_ok? =
|
||||||
|
is_nil(rule_until) or (is_integer(requested_until) and requested_until <= rule_until)
|
||||||
|
|
||||||
|
lower_ok? and upper_ok?
|
||||||
|
end
|
||||||
|
|
||||||
|
defp boundary_value(filter, key, :lower), do: Map.get(filter, key, 0)
|
||||||
|
defp boundary_value(filter, key, :upper), do: Map.get(filter, key, 9_223_372_036_854_775_807)
|
||||||
|
end
|
||||||
325
lib/parrhesia/api/admin.ex
Normal file
325
lib/parrhesia/api/admin.ex
Normal file
@@ -0,0 +1,325 @@
|
|||||||
|
defmodule Parrhesia.API.Admin do
|
||||||
|
@moduledoc """
|
||||||
|
Public management API facade.
|
||||||
|
|
||||||
|
This module exposes the DX-friendly control plane for administrative tasks. It wraps
|
||||||
|
storage-backed management methods and a set of built-in helpers for ACL, identity, sync,
|
||||||
|
and listener management.
|
||||||
|
|
||||||
|
`execute/3` accepts the same method names used by NIP-86 style management endpoints, while
|
||||||
|
the dedicated functions (`stats/1`, `health/1`, `list_audit_logs/1`) are easier to call
|
||||||
|
from Elixir code.
|
||||||
|
"""
|
||||||
|
|
||||||
|
alias Parrhesia.API.ACL
|
||||||
|
alias Parrhesia.API.Identity
|
||||||
|
alias Parrhesia.API.Sync
|
||||||
|
alias Parrhesia.Storage
|
||||||
|
alias Parrhesia.Web.Endpoint
|
||||||
|
|
||||||
|
@supported_admin_methods ~w(health list_audit_logs stats)
|
||||||
|
@supported_acl_methods ~w(acl_grant acl_revoke acl_list)
|
||||||
|
@supported_identity_methods ~w(identity_ensure identity_get identity_import identity_rotate)
|
||||||
|
@supported_listener_methods ~w(listener_reload)
|
||||||
|
@supported_sync_methods ~w(
|
||||||
|
sync_get_server
|
||||||
|
sync_health
|
||||||
|
sync_list_servers
|
||||||
|
sync_put_server
|
||||||
|
sync_remove_server
|
||||||
|
sync_server_stats
|
||||||
|
sync_start_server
|
||||||
|
sync_stats
|
||||||
|
sync_stop_server
|
||||||
|
sync_sync_now
|
||||||
|
)
|
||||||
|
|
||||||
|
@doc """
|
||||||
|
Executes a management method by name.
|
||||||
|
|
||||||
|
Built-in methods include:
|
||||||
|
|
||||||
|
- `supportedmethods`
|
||||||
|
- `stats`
|
||||||
|
- `health`
|
||||||
|
- `list_audit_logs`
|
||||||
|
- `acl_grant`, `acl_revoke`, `acl_list`
|
||||||
|
- `identity_get`, `identity_ensure`, `identity_import`, `identity_rotate`
|
||||||
|
- `listener_reload`
|
||||||
|
- `sync_*`
|
||||||
|
|
||||||
|
Unknown methods are delegated to the configured `Parrhesia.Storage.Admin` implementation.
|
||||||
|
"""
|
||||||
|
@spec execute(String.t() | atom(), map(), keyword()) :: {:ok, map()} | {:error, term()}
|
||||||
|
def execute(method, params, opts \\ [])
|
||||||
|
|
||||||
|
def execute(method, params, opts) when is_map(params) do
|
||||||
|
method_name = normalize_method_name(method)
|
||||||
|
|
||||||
|
case execute_builtin(method_name, params, opts) do
|
||||||
|
{:continue, other_method} -> Storage.admin().execute(%{}, other_method, params)
|
||||||
|
result -> result
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def execute(method, _params, _opts),
|
||||||
|
do: {:error, {:unsupported_method, normalize_method_name(method)}}
|
||||||
|
|
||||||
|
@doc """
|
||||||
|
Returns aggregate relay stats plus nested sync stats.
|
||||||
|
"""
|
||||||
|
@spec stats(keyword()) :: {:ok, map()} | {:error, term()}
|
||||||
|
def stats(opts \\ []) do
|
||||||
|
with {:ok, relay_stats} <- relay_stats(),
|
||||||
|
{:ok, sync_stats} <- Sync.sync_stats(opts) do
|
||||||
|
{:ok, Map.put(relay_stats, "sync", sync_stats)}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
@doc """
|
||||||
|
Returns the overall management health payload.
|
||||||
|
|
||||||
|
The top-level `"status"` is currently derived from sync health, while relay-specific health
|
||||||
|
details remain delegated to storage-backed management methods.
|
||||||
|
"""
|
||||||
|
@spec health(keyword()) :: {:ok, map()} | {:error, term()}
|
||||||
|
def health(opts \\ []) do
|
||||||
|
with {:ok, sync_health} <- Sync.sync_health(opts) do
|
||||||
|
{:ok,
|
||||||
|
%{
|
||||||
|
"status" => overall_health_status(sync_health),
|
||||||
|
"sync" => sync_health
|
||||||
|
}}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
@doc """
|
||||||
|
Lists persisted audit log entries from the configured admin storage backend.
|
||||||
|
|
||||||
|
Supported options are storage-adapter specific. The built-in admin execution path forwards
|
||||||
|
`:limit`, `:method`, and `:actor_pubkey`.
|
||||||
|
"""
|
||||||
|
@spec list_audit_logs(keyword()) :: {:ok, [map()]} | {:error, term()}
|
||||||
|
def list_audit_logs(opts \\ []) do
|
||||||
|
Storage.admin().list_audit_logs(%{}, opts)
|
||||||
|
end
|
||||||
|
|
||||||
|
defp acl_grant(params) do
|
||||||
|
with :ok <- ACL.grant(params) do
|
||||||
|
{:ok, %{"ok" => true}}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp acl_revoke(params) do
|
||||||
|
with :ok <- ACL.revoke(params) do
|
||||||
|
{:ok, %{"ok" => true}}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp acl_list(params) do
|
||||||
|
with {:ok, rules} <- ACL.list(acl_list_opts(params)) do
|
||||||
|
{:ok, %{"rules" => rules}}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp acl_list_opts(params) do
|
||||||
|
[]
|
||||||
|
|> maybe_put_opt(:principal_type, fetch_value(params, :principal_type))
|
||||||
|
|> maybe_put_opt(:principal, fetch_value(params, :principal))
|
||||||
|
|> maybe_put_opt(:capability, fetch_value(params, :capability))
|
||||||
|
end
|
||||||
|
|
||||||
|
defp supported_methods do
|
||||||
|
storage_supported =
|
||||||
|
case Storage.admin().execute(%{}, :supportedmethods, %{}) do
|
||||||
|
{:ok, methods} when is_list(methods) -> methods
|
||||||
|
{:ok, %{"methods" => methods}} when is_list(methods) -> methods
|
||||||
|
_other -> []
|
||||||
|
end
|
||||||
|
|
||||||
|
(storage_supported ++
|
||||||
|
@supported_admin_methods ++
|
||||||
|
@supported_acl_methods ++
|
||||||
|
@supported_identity_methods ++ @supported_listener_methods ++ @supported_sync_methods)
|
||||||
|
|> Enum.uniq()
|
||||||
|
|> Enum.sort()
|
||||||
|
end
|
||||||
|
|
||||||
|
defp identity_get(_params), do: Identity.get()
|
||||||
|
|
||||||
|
defp identity_ensure(_params), do: Identity.ensure()
|
||||||
|
|
||||||
|
defp identity_rotate(_params), do: Identity.rotate()
|
||||||
|
|
||||||
|
defp identity_import(params) do
|
||||||
|
Identity.import(params)
|
||||||
|
end
|
||||||
|
|
||||||
|
defp admin_stats(_params, opts), do: stats(opts)
|
||||||
|
defp admin_health(_params, opts), do: health(opts)
|
||||||
|
|
||||||
|
defp admin_list_audit_logs(params, _opts) do
|
||||||
|
list_audit_logs(audit_log_opts(params))
|
||||||
|
end
|
||||||
|
|
||||||
|
defp listener_reload(params) do
|
||||||
|
case normalize_listener_id(fetch_value(params, :id)) do
|
||||||
|
:all ->
|
||||||
|
Endpoint.reload_all()
|
||||||
|
|> ok_result()
|
||||||
|
|
||||||
|
{:ok, listener_id} ->
|
||||||
|
listener_id
|
||||||
|
|> Endpoint.reload_listener()
|
||||||
|
|> ok_result()
|
||||||
|
|
||||||
|
:error ->
|
||||||
|
{:error, :not_found}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp sync_put_server(params, opts), do: Sync.put_server(params, opts)
|
||||||
|
|
||||||
|
defp sync_remove_server(params, opts) do
|
||||||
|
with {:ok, server_id} <- fetch_required_string(params, :id),
|
||||||
|
:ok <- Sync.remove_server(server_id, opts) do
|
||||||
|
{:ok, %{"ok" => true}}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp sync_get_server(params, opts) do
|
||||||
|
with {:ok, server_id} <- fetch_required_string(params, :id),
|
||||||
|
{:ok, server} <- Sync.get_server(server_id, opts) do
|
||||||
|
{:ok, server}
|
||||||
|
else
|
||||||
|
:error -> {:error, :not_found}
|
||||||
|
other -> other
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp sync_list_servers(_params, opts), do: Sync.list_servers(opts)
|
||||||
|
|
||||||
|
defp sync_start_server(params, opts) do
|
||||||
|
with {:ok, server_id} <- fetch_required_string(params, :id),
|
||||||
|
:ok <- Sync.start_server(server_id, opts) do
|
||||||
|
{:ok, %{"ok" => true}}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp sync_stop_server(params, opts) do
|
||||||
|
with {:ok, server_id} <- fetch_required_string(params, :id),
|
||||||
|
:ok <- Sync.stop_server(server_id, opts) do
|
||||||
|
{:ok, %{"ok" => true}}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp sync_sync_now(params, opts) do
|
||||||
|
with {:ok, server_id} <- fetch_required_string(params, :id),
|
||||||
|
:ok <- Sync.sync_now(server_id, opts) do
|
||||||
|
{:ok, %{"ok" => true}}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp sync_server_stats(params, opts) do
|
||||||
|
with {:ok, server_id} <- fetch_required_string(params, :id),
|
||||||
|
{:ok, stats} <- Sync.server_stats(server_id, opts) do
|
||||||
|
{:ok, stats}
|
||||||
|
else
|
||||||
|
:error -> {:error, :not_found}
|
||||||
|
other -> other
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp sync_stats(_params, opts), do: Sync.sync_stats(opts)
|
||||||
|
defp sync_health(_params, opts), do: Sync.sync_health(opts)
|
||||||
|
|
||||||
|
defp execute_builtin("stats", params, opts), do: admin_stats(params, opts)
|
||||||
|
defp execute_builtin("health", params, opts), do: admin_health(params, opts)
|
||||||
|
defp execute_builtin("list_audit_logs", params, opts), do: admin_list_audit_logs(params, opts)
|
||||||
|
defp execute_builtin("acl_grant", params, _opts), do: acl_grant(params)
|
||||||
|
defp execute_builtin("acl_revoke", params, _opts), do: acl_revoke(params)
|
||||||
|
defp execute_builtin("acl_list", params, _opts), do: acl_list(params)
|
||||||
|
defp execute_builtin("identity_get", params, _opts), do: identity_get(params)
|
||||||
|
defp execute_builtin("identity_ensure", params, _opts), do: identity_ensure(params)
|
||||||
|
defp execute_builtin("identity_import", params, _opts), do: identity_import(params)
|
||||||
|
defp execute_builtin("identity_rotate", params, _opts), do: identity_rotate(params)
|
||||||
|
defp execute_builtin("listener_reload", params, _opts), do: listener_reload(params)
|
||||||
|
defp execute_builtin("sync_put_server", params, opts), do: sync_put_server(params, opts)
|
||||||
|
defp execute_builtin("sync_remove_server", params, opts), do: sync_remove_server(params, opts)
|
||||||
|
defp execute_builtin("sync_get_server", params, opts), do: sync_get_server(params, opts)
|
||||||
|
defp execute_builtin("sync_list_servers", params, opts), do: sync_list_servers(params, opts)
|
||||||
|
defp execute_builtin("sync_start_server", params, opts), do: sync_start_server(params, opts)
|
||||||
|
defp execute_builtin("sync_stop_server", params, opts), do: sync_stop_server(params, opts)
|
||||||
|
defp execute_builtin("sync_sync_now", params, opts), do: sync_sync_now(params, opts)
|
||||||
|
defp execute_builtin("sync_server_stats", params, opts), do: sync_server_stats(params, opts)
|
||||||
|
defp execute_builtin("sync_stats", params, opts), do: sync_stats(params, opts)
|
||||||
|
defp execute_builtin("sync_health", params, opts), do: sync_health(params, opts)
|
||||||
|
|
||||||
|
defp execute_builtin("supportedmethods", _params, _opts),
|
||||||
|
do: {:ok, %{"methods" => supported_methods()}}
|
||||||
|
|
||||||
|
defp execute_builtin(other_method, _params, _opts), do: {:continue, other_method}
|
||||||
|
|
||||||
|
defp relay_stats do
|
||||||
|
case Storage.admin().execute(%{}, :stats, %{}) do
|
||||||
|
{:ok, stats} when is_map(stats) -> {:ok, stats}
|
||||||
|
{:error, {:unsupported_method, _method}} -> {:ok, %{}}
|
||||||
|
other -> other
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp overall_health_status(%{"status" => "degraded"}), do: "degraded"
|
||||||
|
defp overall_health_status(_sync_health), do: "ok"
|
||||||
|
|
||||||
|
defp audit_log_opts(params) do
|
||||||
|
[]
|
||||||
|
|> maybe_put_opt(:limit, fetch_value(params, :limit))
|
||||||
|
|> maybe_put_opt(:method, fetch_value(params, :method))
|
||||||
|
|> maybe_put_opt(:actor_pubkey, fetch_value(params, :actor_pubkey))
|
||||||
|
end
|
||||||
|
|
||||||
|
defp maybe_put_opt(opts, _key, nil), do: opts
|
||||||
|
defp maybe_put_opt(opts, key, value), do: Keyword.put(opts, key, value)
|
||||||
|
|
||||||
|
defp ok_result(:ok), do: {:ok, %{"ok" => true}}
|
||||||
|
defp ok_result({:error, _reason} = error), do: error
|
||||||
|
defp ok_result(other), do: other
|
||||||
|
|
||||||
|
defp normalize_listener_id(nil), do: :all
|
||||||
|
|
||||||
|
defp normalize_listener_id(listener_id) when is_atom(listener_id) do
|
||||||
|
{:ok, listener_id}
|
||||||
|
end
|
||||||
|
|
||||||
|
defp normalize_listener_id(listener_id) when is_binary(listener_id) do
|
||||||
|
case Supervisor.which_children(Endpoint) do
|
||||||
|
children when is_list(children) ->
|
||||||
|
Enum.find_value(children, :error, &match_listener_child(&1, listener_id))
|
||||||
|
|
||||||
|
_other ->
|
||||||
|
:error
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp normalize_listener_id(_listener_id), do: :error
|
||||||
|
|
||||||
|
defp match_listener_child({{:listener, id}, _pid, _type, _modules}, listener_id) do
|
||||||
|
normalized_id = Atom.to_string(id)
|
||||||
|
if normalized_id == listener_id, do: {:ok, id}, else: false
|
||||||
|
end
|
||||||
|
|
||||||
|
defp match_listener_child(_child, _listener_id), do: false
|
||||||
|
|
||||||
|
defp fetch_required_string(map, key) do
|
||||||
|
case fetch_value(map, key) do
|
||||||
|
value when is_binary(value) and value != "" -> {:ok, value}
|
||||||
|
_other -> {:error, {:missing_param, Atom.to_string(key)}}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp fetch_value(map, key), do: Map.get(map, key) || Map.get(map, Atom.to_string(key))
|
||||||
|
|
||||||
|
defp normalize_method_name(method) when is_atom(method), do: Atom.to_string(method)
|
||||||
|
defp normalize_method_name(method) when is_binary(method), do: method
|
||||||
|
defp normalize_method_name(method), do: inspect(method)
|
||||||
|
end
|
||||||
85
lib/parrhesia/api/auth.ex
Normal file
85
lib/parrhesia/api/auth.ex
Normal file
@@ -0,0 +1,85 @@
|
|||||||
|
defmodule Parrhesia.API.Auth do
|
||||||
|
@moduledoc """
|
||||||
|
Public helpers for event validation and NIP-98 HTTP authentication.
|
||||||
|
|
||||||
|
This module is intended for callers that need a programmatic API surface:
|
||||||
|
|
||||||
|
- `validate_event/1` returns validator reason atoms.
|
||||||
|
- `compute_event_id/1` computes the canonical Nostr event id.
|
||||||
|
- `validate_nip98/3` and `validate_nip98/4` turn an `Authorization` header into a
|
||||||
|
shared auth context that can be reused by the rest of the API surface.
|
||||||
|
|
||||||
|
For transport-facing validation messages, see `Parrhesia.Protocol.validate_event/1`.
|
||||||
|
"""
|
||||||
|
|
||||||
|
alias Parrhesia.API.Auth.Context
|
||||||
|
alias Parrhesia.API.RequestContext
|
||||||
|
alias Parrhesia.Auth.Nip98
|
||||||
|
alias Parrhesia.Protocol.EventValidator
|
||||||
|
|
||||||
|
@doc """
|
||||||
|
Validates a Nostr event and returns validator-friendly error atoms.
|
||||||
|
|
||||||
|
This is the low-level validation entrypoint used by the API surface. Unlike
|
||||||
|
`Parrhesia.Protocol.validate_event/1`, it preserves the raw validator reason so callers
|
||||||
|
can branch on it directly.
|
||||||
|
"""
|
||||||
|
@spec validate_event(map()) :: :ok | {:error, term()}
|
||||||
|
def validate_event(event), do: EventValidator.validate(event)
|
||||||
|
|
||||||
|
@doc """
|
||||||
|
Computes the canonical Nostr event id for an event payload.
|
||||||
|
|
||||||
|
The event does not need to be persisted first. This is useful when building or signing
|
||||||
|
events locally.
|
||||||
|
"""
|
||||||
|
@spec compute_event_id(map()) :: String.t()
|
||||||
|
def compute_event_id(event), do: EventValidator.compute_id(event)
|
||||||
|
|
||||||
|
@doc """
|
||||||
|
Validates a NIP-98 `Authorization` header using default options.
|
||||||
|
"""
|
||||||
|
@spec validate_nip98(String.t() | nil, String.t(), String.t()) ::
|
||||||
|
{:ok, Context.t()} | {:error, term()}
|
||||||
|
def validate_nip98(authorization, method, url) do
|
||||||
|
validate_nip98(authorization, method, url, [])
|
||||||
|
end
|
||||||
|
|
||||||
|
@doc """
|
||||||
|
Validates a NIP-98 `Authorization` header and returns a shared auth context.
|
||||||
|
|
||||||
|
The returned `Parrhesia.API.Auth.Context` includes:
|
||||||
|
|
||||||
|
- the decoded auth event
|
||||||
|
- the authenticated pubkey
|
||||||
|
- a `Parrhesia.API.RequestContext` with `caller: :http`
|
||||||
|
|
||||||
|
Supported options are forwarded to `Parrhesia.Auth.Nip98.validate_authorization_header/4`,
|
||||||
|
including `:max_age_seconds` and `:replay_cache`.
|
||||||
|
"""
|
||||||
|
@spec validate_nip98(String.t() | nil, String.t(), String.t(), keyword()) ::
|
||||||
|
{:ok, Context.t()} | {:error, term()}
|
||||||
|
def validate_nip98(authorization, method, url, opts)
|
||||||
|
when is_binary(method) and is_binary(url) and is_list(opts) do
|
||||||
|
with {:ok, auth_event} <-
|
||||||
|
Nip98.validate_authorization_header(authorization, method, url, opts),
|
||||||
|
pubkey when is_binary(pubkey) <- Map.get(auth_event, "pubkey") do
|
||||||
|
{:ok,
|
||||||
|
%Context{
|
||||||
|
auth_event: auth_event,
|
||||||
|
pubkey: pubkey,
|
||||||
|
request_context: %RequestContext{
|
||||||
|
authenticated_pubkeys: MapSet.new([pubkey]),
|
||||||
|
caller: :http
|
||||||
|
},
|
||||||
|
metadata: %{
|
||||||
|
method: method,
|
||||||
|
url: url
|
||||||
|
}
|
||||||
|
}}
|
||||||
|
else
|
||||||
|
nil -> {:error, :invalid_event}
|
||||||
|
{:error, reason} -> {:error, reason}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
23
lib/parrhesia/api/auth/context.ex
Normal file
23
lib/parrhesia/api/auth/context.ex
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
defmodule Parrhesia.API.Auth.Context do
|
||||||
|
@moduledoc """
|
||||||
|
Authenticated request details returned by shared auth helpers.
|
||||||
|
|
||||||
|
This is the higher-level result returned by `Parrhesia.API.Auth.validate_nip98/3` and
|
||||||
|
`validate_nip98/4`. The nested `request_context` is ready to be passed into the rest of the
|
||||||
|
public API surface.
|
||||||
|
"""
|
||||||
|
|
||||||
|
alias Parrhesia.API.RequestContext
|
||||||
|
|
||||||
|
defstruct auth_event: nil,
|
||||||
|
pubkey: nil,
|
||||||
|
request_context: %RequestContext{},
|
||||||
|
metadata: %{}
|
||||||
|
|
||||||
|
@type t :: %__MODULE__{
|
||||||
|
auth_event: map() | nil,
|
||||||
|
pubkey: String.t() | nil,
|
||||||
|
request_context: RequestContext.t(),
|
||||||
|
metadata: map()
|
||||||
|
}
|
||||||
|
end
|
||||||
470
lib/parrhesia/api/events.ex
Normal file
470
lib/parrhesia/api/events.ex
Normal file
@@ -0,0 +1,470 @@
|
|||||||
|
defmodule Parrhesia.API.Events do
|
||||||
|
@moduledoc """
|
||||||
|
Canonical event publish, query, and count API.
|
||||||
|
|
||||||
|
This is the main in-process API for working with Nostr events. It applies the same core
|
||||||
|
validation and policy checks used by the relay edge, but without going through a socket or
|
||||||
|
HTTP transport.
|
||||||
|
|
||||||
|
All public functions expect `opts[:context]` to contain a `Parrhesia.API.RequestContext`.
|
||||||
|
That context drives authorization, caller attribution, and downstream policy behavior.
|
||||||
|
|
||||||
|
`publish/2` intentionally returns `{:ok, %PublishResult{accepted: false}}` for policy and
|
||||||
|
storage rejections so callers can mirror relay `OK` semantics without treating a rejected
|
||||||
|
event as a process error.
|
||||||
|
"""
|
||||||
|
|
||||||
|
alias Parrhesia.API.Events.PublishResult
|
||||||
|
alias Parrhesia.API.RequestContext
|
||||||
|
alias Parrhesia.Fanout.Dispatcher
|
||||||
|
alias Parrhesia.Fanout.MultiNode
|
||||||
|
alias Parrhesia.NIP43
|
||||||
|
alias Parrhesia.Policy.EventPolicy
|
||||||
|
alias Parrhesia.Protocol
|
||||||
|
alias Parrhesia.Protocol.Filter
|
||||||
|
alias Parrhesia.Storage
|
||||||
|
alias Parrhesia.Telemetry
|
||||||
|
|
||||||
|
@default_max_event_bytes 262_144
|
||||||
|
|
||||||
|
@marmot_kinds MapSet.new([
|
||||||
|
443,
|
||||||
|
444,
|
||||||
|
445,
|
||||||
|
1059,
|
||||||
|
10_050,
|
||||||
|
10_051,
|
||||||
|
446,
|
||||||
|
447,
|
||||||
|
448,
|
||||||
|
449
|
||||||
|
])
|
||||||
|
|
||||||
|
@doc """
|
||||||
|
Validates, authorizes, persists, and fans out an event.
|
||||||
|
|
||||||
|
Required options:
|
||||||
|
|
||||||
|
- `:context` - a `Parrhesia.API.RequestContext`
|
||||||
|
|
||||||
|
Supported options:
|
||||||
|
|
||||||
|
- `:max_event_bytes` - overrides the configured max encoded event size
|
||||||
|
- `:path`, `:private_key`, `:configured_private_key` - forwarded to the NIP-43 helper flow
|
||||||
|
|
||||||
|
Return semantics:
|
||||||
|
|
||||||
|
- `{:ok, %PublishResult{accepted: true}}` for accepted events
|
||||||
|
- `{:ok, %PublishResult{accepted: false}}` for rejected or duplicate events
|
||||||
|
- `{:error, :invalid_context}` only when the call itself is malformed
|
||||||
|
"""
|
||||||
|
@spec publish(map(), keyword()) :: {:ok, PublishResult.t()} | {:error, term()}
|
||||||
|
def publish(event, opts \\ [])
|
||||||
|
|
||||||
|
def publish(event, opts) when is_map(event) and is_list(opts) do
|
||||||
|
started_at = System.monotonic_time()
|
||||||
|
event_id = Map.get(event, "id", "")
|
||||||
|
telemetry_metadata = telemetry_metadata_for_event(event)
|
||||||
|
|
||||||
|
with {:ok, context} <- fetch_context(opts),
|
||||||
|
:ok <- validate_event_payload_size(event, max_event_bytes(opts)),
|
||||||
|
:ok <- Protocol.validate_event(event),
|
||||||
|
:ok <- EventPolicy.authorize_write(event, context.authenticated_pubkeys, context),
|
||||||
|
{:ok, publish_state} <- NIP43.prepare_publish(event, nip43_opts(opts, context)),
|
||||||
|
{:ok, _stored, message} <- persist_event(event) do
|
||||||
|
Telemetry.emit(
|
||||||
|
[:parrhesia, :ingest, :stop],
|
||||||
|
%{duration: System.monotonic_time() - started_at},
|
||||||
|
telemetry_metadata
|
||||||
|
)
|
||||||
|
|
||||||
|
emit_ingest_result(telemetry_metadata, :accepted, :accepted)
|
||||||
|
|
||||||
|
message =
|
||||||
|
case NIP43.finalize_publish(event, publish_state, nip43_opts(opts, context)) do
|
||||||
|
{:ok, override} when is_binary(override) -> override
|
||||||
|
:ok -> message
|
||||||
|
end
|
||||||
|
|
||||||
|
Dispatcher.dispatch(event)
|
||||||
|
maybe_publish_multi_node(event)
|
||||||
|
|
||||||
|
{:ok,
|
||||||
|
%PublishResult{
|
||||||
|
event_id: event_id,
|
||||||
|
accepted: true,
|
||||||
|
message: message,
|
||||||
|
reason: nil
|
||||||
|
}}
|
||||||
|
else
|
||||||
|
{:error, :invalid_context} = error ->
|
||||||
|
emit_ingest_result(telemetry_metadata, :rejected, :invalid_context)
|
||||||
|
error
|
||||||
|
|
||||||
|
{:error, reason} ->
|
||||||
|
emit_ingest_result(telemetry_metadata, :rejected, reason)
|
||||||
|
|
||||||
|
{:ok,
|
||||||
|
%PublishResult{
|
||||||
|
event_id: event_id,
|
||||||
|
accepted: false,
|
||||||
|
message: error_message_for_publish_failure(reason),
|
||||||
|
reason: reason
|
||||||
|
}}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def publish(_event, _opts), do: {:error, :invalid_event}
|
||||||
|
|
||||||
|
@doc """
|
||||||
|
Queries stored events plus any dynamic NIP-43 events visible to the caller.
|
||||||
|
|
||||||
|
Required options:
|
||||||
|
|
||||||
|
- `:context` - a `Parrhesia.API.RequestContext`
|
||||||
|
|
||||||
|
Supported options:
|
||||||
|
|
||||||
|
- `:max_filter_limit` - overrides the configured per-filter limit
|
||||||
|
- `:validate_filters?` - skips filter validation when `false`
|
||||||
|
- `:authorize_read?` - skips read policy checks when `false`
|
||||||
|
|
||||||
|
The skip flags are primarily for internal composition, such as `Parrhesia.API.Stream`.
|
||||||
|
External callers should normally leave them enabled.
|
||||||
|
"""
|
||||||
|
@spec query([map()], keyword()) :: {:ok, [map()]} | {:error, term()}
|
||||||
|
def query(filters, opts \\ [])
|
||||||
|
|
||||||
|
def query(filters, opts) when is_list(filters) and is_list(opts) do
|
||||||
|
started_at = System.monotonic_time()
|
||||||
|
telemetry_metadata = telemetry_metadata_for_filters(filters, :query)
|
||||||
|
|
||||||
|
with {:ok, context} <- fetch_context(opts),
|
||||||
|
:ok <- maybe_validate_filters(filters, opts),
|
||||||
|
:ok <- maybe_authorize_read(filters, context, opts),
|
||||||
|
{:ok, events} <- Storage.events().query(%{}, filters, storage_query_opts(context, opts)) do
|
||||||
|
events = NIP43.dynamic_events(filters, nip43_opts(opts, context)) ++ events
|
||||||
|
|
||||||
|
Telemetry.emit(
|
||||||
|
[:parrhesia, :query, :stop],
|
||||||
|
%{duration: System.monotonic_time() - started_at, result_count: length(events)},
|
||||||
|
telemetry_metadata
|
||||||
|
)
|
||||||
|
|
||||||
|
emit_query_result(telemetry_metadata, :ok)
|
||||||
|
|
||||||
|
{:ok, events}
|
||||||
|
else
|
||||||
|
{:error, reason} = error ->
|
||||||
|
emit_query_result(telemetry_metadata, :error, reason)
|
||||||
|
error
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def query(_filters, _opts), do: {:error, :invalid_filters}
|
||||||
|
|
||||||
|
@doc """
|
||||||
|
Counts events matching the given filters.
|
||||||
|
|
||||||
|
Required options:
|
||||||
|
|
||||||
|
- `:context` - a `Parrhesia.API.RequestContext`
|
||||||
|
|
||||||
|
Supported options:
|
||||||
|
|
||||||
|
- `:validate_filters?` - skips filter validation when `false`
|
||||||
|
- `:authorize_read?` - skips read policy checks when `false`
|
||||||
|
- `:options` - when set to a map, returns a NIP-45-style payload instead of a bare integer
|
||||||
|
|
||||||
|
When `opts[:options]` is a map, the result shape is `%{"count" => count, "approximate" => false}`.
|
||||||
|
If `opts[:options]["hll"]` is `true` and the feature is enabled, an `"hll"` field is included.
|
||||||
|
"""
|
||||||
|
@spec count([map()], keyword()) :: {:ok, non_neg_integer() | map()} | {:error, term()}
|
||||||
|
def count(filters, opts \\ [])
|
||||||
|
|
||||||
|
def count(filters, opts) when is_list(filters) and is_list(opts) do
|
||||||
|
started_at = System.monotonic_time()
|
||||||
|
telemetry_metadata = telemetry_metadata_for_filters(filters, :count)
|
||||||
|
|
||||||
|
with {:ok, context} <- fetch_context(opts),
|
||||||
|
:ok <- maybe_validate_filters(filters, opts),
|
||||||
|
:ok <- maybe_authorize_read(filters, context, opts),
|
||||||
|
{:ok, count} <-
|
||||||
|
Storage.events().count(%{}, filters, requester_pubkeys: requester_pubkeys(context)),
|
||||||
|
count <- count + NIP43.dynamic_count(filters, nip43_opts(opts, context)),
|
||||||
|
{:ok, result} <- maybe_build_count_result(filters, count, Keyword.get(opts, :options)) do
|
||||||
|
Telemetry.emit(
|
||||||
|
[:parrhesia, :query, :stop],
|
||||||
|
%{duration: System.monotonic_time() - started_at, result_count: count},
|
||||||
|
telemetry_metadata
|
||||||
|
)
|
||||||
|
|
||||||
|
emit_query_result(telemetry_metadata, :ok)
|
||||||
|
|
||||||
|
{:ok, result}
|
||||||
|
else
|
||||||
|
{:error, reason} = error ->
|
||||||
|
emit_query_result(telemetry_metadata, :error, reason)
|
||||||
|
error
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def count(_filters, _opts), do: {:error, :invalid_filters}
|
||||||
|
|
||||||
|
defp maybe_validate_filters(filters, opts) do
|
||||||
|
if Keyword.get(opts, :validate_filters?, true) do
|
||||||
|
Filter.validate_filters(filters)
|
||||||
|
else
|
||||||
|
:ok
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp maybe_authorize_read(filters, context, opts) do
|
||||||
|
if Keyword.get(opts, :authorize_read?, true) do
|
||||||
|
EventPolicy.authorize_read(filters, context.authenticated_pubkeys, context)
|
||||||
|
else
|
||||||
|
:ok
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp storage_query_opts(context, opts) do
|
||||||
|
[
|
||||||
|
max_filter_limit:
|
||||||
|
Keyword.get(opts, :max_filter_limit, Parrhesia.Config.get([:limits, :max_filter_limit])),
|
||||||
|
requester_pubkeys: requester_pubkeys(context)
|
||||||
|
]
|
||||||
|
end
|
||||||
|
|
||||||
|
defp requester_pubkeys(%RequestContext{} = context),
|
||||||
|
do: MapSet.to_list(context.authenticated_pubkeys)
|
||||||
|
|
||||||
|
defp maybe_build_count_result(_filters, count, nil) when is_integer(count), do: {:ok, count}
|
||||||
|
|
||||||
|
defp maybe_build_count_result(filters, count, options)
|
||||||
|
when is_integer(count) and is_map(options) do
|
||||||
|
build_count_payload(filters, count, options)
|
||||||
|
end
|
||||||
|
|
||||||
|
defp maybe_build_count_result(_filters, count, _options) when is_integer(count),
|
||||||
|
do: {:ok, count}
|
||||||
|
|
||||||
|
defp maybe_build_count_result(_filters, count, _options), do: {:ok, count}
|
||||||
|
|
||||||
|
defp build_count_payload(filters, count, options) do
|
||||||
|
include_hll? =
|
||||||
|
Map.get(options, "hll", false) and Parrhesia.Config.get([:features, :nip_45_count], true)
|
||||||
|
|
||||||
|
payload = %{"count" => count, "approximate" => false}
|
||||||
|
|
||||||
|
payload =
|
||||||
|
if include_hll? do
|
||||||
|
Map.put(payload, "hll", generate_hll_payload(filters, count))
|
||||||
|
else
|
||||||
|
payload
|
||||||
|
end
|
||||||
|
|
||||||
|
{:ok, payload}
|
||||||
|
end
|
||||||
|
|
||||||
|
defp generate_hll_payload(filters, count) do
|
||||||
|
filters
|
||||||
|
|> JSON.encode!()
|
||||||
|
|> then(&"#{&1}:#{count}")
|
||||||
|
|> then(&:crypto.hash(:sha256, &1))
|
||||||
|
|> Base.encode64()
|
||||||
|
end
|
||||||
|
|
||||||
|
defp persist_event(event) do
|
||||||
|
kind = Map.get(event, "kind")
|
||||||
|
|
||||||
|
cond do
|
||||||
|
kind in [5, 62] -> persist_control_event(kind, event)
|
||||||
|
ephemeral_kind?(kind) -> persist_ephemeral_event()
|
||||||
|
true -> persist_regular_event(event)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp persist_control_event(5, event) do
|
||||||
|
with {:ok, deleted_count} <- Storage.events().delete_by_request(%{}, event) do
|
||||||
|
{:ok, deleted_count, "ok: deletion request processed"}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp persist_control_event(62, event) do
|
||||||
|
with {:ok, deleted_count} <- Storage.events().vanish(%{}, event) do
|
||||||
|
{:ok, deleted_count, "ok: vanish request processed"}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp persist_ephemeral_event do
|
||||||
|
if accept_ephemeral_events?() do
|
||||||
|
{:ok, :ephemeral, "ok: ephemeral event accepted"}
|
||||||
|
else
|
||||||
|
{:error, :ephemeral_events_disabled}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp persist_regular_event(event) do
|
||||||
|
case Storage.events().put_event(%{}, event) do
|
||||||
|
{:ok, persisted_event} -> {:ok, persisted_event, "ok: event stored"}
|
||||||
|
{:error, :duplicate_event} -> {:error, :duplicate_event}
|
||||||
|
{:error, reason} -> {:error, reason}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp maybe_publish_multi_node(event) do
|
||||||
|
MultiNode.publish(event)
|
||||||
|
:ok
|
||||||
|
catch
|
||||||
|
:exit, _reason -> :ok
|
||||||
|
end
|
||||||
|
|
||||||
|
defp telemetry_metadata_for_event(event) do
|
||||||
|
%{traffic_class: traffic_class_for_event(event)}
|
||||||
|
end
|
||||||
|
|
||||||
|
defp telemetry_metadata_for_filters(filters, operation) do
|
||||||
|
%{traffic_class: traffic_class_for_filters(filters), operation: operation}
|
||||||
|
end
|
||||||
|
|
||||||
|
defp traffic_class_for_filters(filters) do
|
||||||
|
if Enum.any?(filters, &marmot_filter?/1) do
|
||||||
|
:marmot
|
||||||
|
else
|
||||||
|
:generic
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp marmot_filter?(filter) when is_map(filter) do
|
||||||
|
has_marmot_kind? =
|
||||||
|
case Map.get(filter, "kinds") do
|
||||||
|
kinds when is_list(kinds) -> Enum.any?(kinds, &MapSet.member?(@marmot_kinds, &1))
|
||||||
|
_other -> false
|
||||||
|
end
|
||||||
|
|
||||||
|
has_marmot_kind? or Map.has_key?(filter, "#h") or Map.has_key?(filter, "#i")
|
||||||
|
end
|
||||||
|
|
||||||
|
defp marmot_filter?(_filter), do: false
|
||||||
|
|
||||||
|
defp traffic_class_for_event(event) when is_map(event) do
|
||||||
|
if MapSet.member?(@marmot_kinds, Map.get(event, "kind")) do
|
||||||
|
:marmot
|
||||||
|
else
|
||||||
|
:generic
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp traffic_class_for_event(_event), do: :generic
|
||||||
|
|
||||||
|
defp emit_ingest_result(metadata, outcome, reason) do
|
||||||
|
Telemetry.emit(
|
||||||
|
[:parrhesia, :ingest, :result],
|
||||||
|
%{count: 1},
|
||||||
|
Map.merge(metadata, %{outcome: outcome, reason: normalize_reason(reason)})
|
||||||
|
)
|
||||||
|
end
|
||||||
|
|
||||||
|
defp emit_query_result(metadata, outcome, reason \\ nil) do
|
||||||
|
Telemetry.emit(
|
||||||
|
[:parrhesia, :query, :result],
|
||||||
|
%{count: 1},
|
||||||
|
Map.merge(
|
||||||
|
metadata,
|
||||||
|
%{outcome: outcome, reason: normalize_reason(reason || outcome)}
|
||||||
|
)
|
||||||
|
)
|
||||||
|
end
|
||||||
|
|
||||||
|
defp normalize_reason(reason) when is_atom(reason), do: reason
|
||||||
|
defp normalize_reason(reason) when is_binary(reason), do: reason
|
||||||
|
defp normalize_reason(nil), do: :none
|
||||||
|
defp normalize_reason(_reason), do: :unknown
|
||||||
|
|
||||||
|
defp fetch_context(opts) do
|
||||||
|
case Keyword.get(opts, :context) do
|
||||||
|
%RequestContext{} = context -> {:ok, context}
|
||||||
|
_other -> {:error, :invalid_context}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp nip43_opts(opts, %RequestContext{} = context) do
|
||||||
|
[context: context, relay_url: Application.get_env(:parrhesia, :relay_url)]
|
||||||
|
|> Kernel.++(Keyword.take(opts, [:path, :private_key, :configured_private_key]))
|
||||||
|
end
|
||||||
|
|
||||||
|
defp error_message_for_publish_failure(:duplicate_event),
|
||||||
|
do: "duplicate: event already stored"
|
||||||
|
|
||||||
|
defp error_message_for_publish_failure(:event_too_large),
|
||||||
|
do: "invalid: event exceeds max event size"
|
||||||
|
|
||||||
|
defp error_message_for_publish_failure(:ephemeral_events_disabled),
|
||||||
|
do: "blocked: ephemeral events are disabled"
|
||||||
|
|
||||||
|
defp error_message_for_publish_failure(reason)
|
||||||
|
when reason in [
|
||||||
|
:auth_required,
|
||||||
|
:pubkey_not_allowed,
|
||||||
|
:restricted_giftwrap,
|
||||||
|
:sync_write_not_allowed,
|
||||||
|
:protected_event_requires_auth,
|
||||||
|
:protected_event_pubkey_mismatch,
|
||||||
|
:pow_below_minimum,
|
||||||
|
:pubkey_banned,
|
||||||
|
:event_banned,
|
||||||
|
:media_metadata_tags_exceeded,
|
||||||
|
:media_metadata_tag_value_too_large,
|
||||||
|
:media_metadata_url_too_long,
|
||||||
|
:media_metadata_invalid_url,
|
||||||
|
:media_metadata_invalid_hash,
|
||||||
|
:media_metadata_invalid_mime,
|
||||||
|
:media_metadata_mime_not_allowed,
|
||||||
|
:media_metadata_unsupported_version,
|
||||||
|
:push_notification_relay_tags_exceeded,
|
||||||
|
:push_notification_payload_too_large,
|
||||||
|
:push_notification_replay_window_exceeded,
|
||||||
|
:push_notification_missing_expiration,
|
||||||
|
:push_notification_expiration_too_far,
|
||||||
|
:push_notification_server_recipients_exceeded
|
||||||
|
],
|
||||||
|
do: EventPolicy.error_message(reason)
|
||||||
|
|
||||||
|
defp error_message_for_publish_failure(reason) when is_binary(reason), do: reason
|
||||||
|
defp error_message_for_publish_failure(reason), do: "error: #{inspect(reason)}"
|
||||||
|
|
||||||
|
defp validate_event_payload_size(event, max_event_bytes)
|
||||||
|
when is_map(event) and is_integer(max_event_bytes) and max_event_bytes > 0 do
|
||||||
|
if byte_size(JSON.encode!(event)) <= max_event_bytes do
|
||||||
|
:ok
|
||||||
|
else
|
||||||
|
{:error, :event_too_large}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp validate_event_payload_size(_event, _max_event_bytes), do: :ok
|
||||||
|
|
||||||
|
defp max_event_bytes(opts) do
|
||||||
|
opts
|
||||||
|
|> Keyword.get(:max_event_bytes, configured_max_event_bytes())
|
||||||
|
|> normalize_max_event_bytes()
|
||||||
|
end
|
||||||
|
|
||||||
|
defp normalize_max_event_bytes(value) when is_integer(value) and value > 0, do: value
|
||||||
|
defp normalize_max_event_bytes(_value), do: configured_max_event_bytes()
|
||||||
|
|
||||||
|
defp configured_max_event_bytes do
|
||||||
|
:parrhesia
|
||||||
|
|> Application.get_env(:limits, [])
|
||||||
|
|> Keyword.get(:max_event_bytes, @default_max_event_bytes)
|
||||||
|
end
|
||||||
|
|
||||||
|
defp ephemeral_kind?(kind) when is_integer(kind), do: kind >= 20_000 and kind < 30_000
|
||||||
|
defp ephemeral_kind?(_kind), do: false
|
||||||
|
|
||||||
|
defp accept_ephemeral_events? do
|
||||||
|
:parrhesia
|
||||||
|
|> Application.get_env(:policies, [])
|
||||||
|
|> Keyword.get(:accept_ephemeral_events, true)
|
||||||
|
end
|
||||||
|
end
|
||||||
22
lib/parrhesia/api/events/publish_result.ex
Normal file
22
lib/parrhesia/api/events/publish_result.ex
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
defmodule Parrhesia.API.Events.PublishResult do
|
||||||
|
@moduledoc """
|
||||||
|
Result shape for event publish attempts.
|
||||||
|
|
||||||
|
This mirrors relay `OK` semantics:
|
||||||
|
|
||||||
|
- `accepted: true` means the event was accepted
|
||||||
|
- `accepted: false` means the event was rejected or identified as a duplicate
|
||||||
|
|
||||||
|
The surrounding call still returns `{:ok, result}` in both cases so callers can surface the
|
||||||
|
rejection message without treating it as a transport or process failure.
|
||||||
|
"""
|
||||||
|
|
||||||
|
defstruct [:event_id, :accepted, :message, :reason]
|
||||||
|
|
||||||
|
@type t :: %__MODULE__{
|
||||||
|
event_id: String.t(),
|
||||||
|
accepted: boolean(),
|
||||||
|
message: String.t(),
|
||||||
|
reason: term()
|
||||||
|
}
|
||||||
|
end
|
||||||
298
lib/parrhesia/api/identity.ex
Normal file
298
lib/parrhesia/api/identity.ex
Normal file
@@ -0,0 +1,298 @@
|
|||||||
|
defmodule Parrhesia.API.Identity do
|
||||||
|
@moduledoc """
|
||||||
|
Server-auth identity management.
|
||||||
|
|
||||||
|
Parrhesia uses a single server identity for flows that need the relay to sign events or
|
||||||
|
prove control of a pubkey.
|
||||||
|
|
||||||
|
Identity resolution follows this order:
|
||||||
|
|
||||||
|
1. `opts[:private_key]` or `opts[:configured_private_key]`
|
||||||
|
2. `Application.get_env(:parrhesia, :identity)`
|
||||||
|
3. the persisted file on disk
|
||||||
|
|
||||||
|
Supported options across this module:
|
||||||
|
|
||||||
|
- `:path` - overrides the identity file path
|
||||||
|
- `:private_key` / `:configured_private_key` - uses an explicit hex secret key
|
||||||
|
|
||||||
|
A configured private key is treated as read-only input and therefore cannot be rotated.
|
||||||
|
"""
|
||||||
|
|
||||||
|
alias Parrhesia.API.Auth
|
||||||
|
|
||||||
|
@typedoc """
|
||||||
|
Public identity metadata returned to callers.
|
||||||
|
"""
|
||||||
|
@type identity_metadata :: %{
|
||||||
|
pubkey: String.t(),
|
||||||
|
source: :configured | :persisted | :generated | :imported
|
||||||
|
}
|
||||||
|
|
||||||
|
@doc """
|
||||||
|
Returns the current server identity metadata.
|
||||||
|
|
||||||
|
This does not generate a new identity. If no configured or persisted identity exists, it
|
||||||
|
returns `{:error, :identity_not_found}`.
|
||||||
|
"""
|
||||||
|
@spec get(keyword()) :: {:ok, identity_metadata()} | {:error, term()}
|
||||||
|
def get(opts \\ []) do
|
||||||
|
with {:ok, identity} <- fetch_existing_identity(opts) do
|
||||||
|
{:ok, public_identity(identity)}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
@doc """
|
||||||
|
Returns the current identity, generating and persisting one when necessary.
|
||||||
|
"""
|
||||||
|
@spec ensure(keyword()) :: {:ok, identity_metadata()} | {:error, term()}
|
||||||
|
def ensure(opts \\ []) do
|
||||||
|
with {:ok, identity} <- ensure_identity(opts) do
|
||||||
|
{:ok, public_identity(identity)}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
@doc """
|
||||||
|
Imports an explicit secret key and persists it as the server identity.
|
||||||
|
|
||||||
|
The input map must contain `:secret_key` or `"secret_key"` as a 64-character lowercase or
|
||||||
|
uppercase hex string.
|
||||||
|
"""
|
||||||
|
@spec import(map(), keyword()) :: {:ok, identity_metadata()} | {:error, term()}
|
||||||
|
def import(identity, opts \\ [])
|
||||||
|
|
||||||
|
def import(identity, opts) when is_map(identity) do
|
||||||
|
with {:ok, secret_key} <- fetch_secret_key(identity),
|
||||||
|
{:ok, normalized_identity} <- build_identity(secret_key, :imported),
|
||||||
|
:ok <- persist_identity(normalized_identity, opts) do
|
||||||
|
{:ok, public_identity(normalized_identity)}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def import(_identity, _opts), do: {:error, :invalid_identity}
|
||||||
|
|
||||||
|
@doc """
|
||||||
|
Generates and persists a fresh server identity.
|
||||||
|
|
||||||
|
Rotation is rejected with `{:error, :configured_identity_cannot_rotate}` when the active
|
||||||
|
identity comes from configuration rather than the persisted file.
|
||||||
|
"""
|
||||||
|
@spec rotate(keyword()) :: {:ok, identity_metadata()} | {:error, term()}
|
||||||
|
def rotate(opts \\ []) do
|
||||||
|
with :ok <- ensure_rotation_allowed(opts),
|
||||||
|
{:ok, identity} <- generate_identity(:generated),
|
||||||
|
:ok <- persist_identity(identity, opts) do
|
||||||
|
{:ok, public_identity(identity)}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
@doc """
|
||||||
|
Signs an event with the current server identity.
|
||||||
|
|
||||||
|
The incoming event must already include the fields required to compute a Nostr id:
|
||||||
|
|
||||||
|
- `"created_at"`
|
||||||
|
- `"kind"`
|
||||||
|
- `"tags"`
|
||||||
|
- `"content"`
|
||||||
|
|
||||||
|
On success the returned event includes `"pubkey"`, `"id"`, and `"sig"`.
|
||||||
|
"""
|
||||||
|
@spec sign_event(map(), keyword()) :: {:ok, map()} | {:error, term()}
|
||||||
|
def sign_event(event, opts \\ [])
|
||||||
|
|
||||||
|
def sign_event(event, opts) when is_map(event) and is_list(opts) do
|
||||||
|
with :ok <- validate_signable_event(event),
|
||||||
|
{:ok, identity} <- ensure_identity(opts),
|
||||||
|
signed_event <- attach_signature(event, identity) do
|
||||||
|
{:ok, signed_event}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def sign_event(_event, _opts), do: {:error, :invalid_event}
|
||||||
|
|
||||||
|
@doc """
|
||||||
|
Returns the default filesystem path for the persisted server identity.
|
||||||
|
"""
|
||||||
|
def default_path do
|
||||||
|
Path.join([default_data_dir(), "server_identity.json"])
|
||||||
|
end
|
||||||
|
|
||||||
|
defp ensure_identity(opts) do
|
||||||
|
case fetch_existing_identity(opts) do
|
||||||
|
{:ok, identity} ->
|
||||||
|
{:ok, identity}
|
||||||
|
|
||||||
|
{:error, :identity_not_found} ->
|
||||||
|
with {:ok, identity} <- generate_identity(:generated),
|
||||||
|
:ok <- persist_identity(identity, opts) do
|
||||||
|
{:ok, identity}
|
||||||
|
end
|
||||||
|
|
||||||
|
{:error, reason} ->
|
||||||
|
{:error, reason}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp fetch_existing_identity(opts) do
|
||||||
|
if configured_private_key = configured_private_key(opts) do
|
||||||
|
build_identity(configured_private_key, :configured)
|
||||||
|
else
|
||||||
|
read_persisted_identity(opts)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp ensure_rotation_allowed(opts) do
|
||||||
|
if configured_private_key(opts) do
|
||||||
|
{:error, :configured_identity_cannot_rotate}
|
||||||
|
else
|
||||||
|
:ok
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp validate_signable_event(event) do
|
||||||
|
signable =
|
||||||
|
is_integer(Map.get(event, "created_at")) and
|
||||||
|
is_integer(Map.get(event, "kind")) and
|
||||||
|
is_list(Map.get(event, "tags")) and
|
||||||
|
is_binary(Map.get(event, "content", ""))
|
||||||
|
|
||||||
|
if signable, do: :ok, else: {:error, :invalid_event}
|
||||||
|
end
|
||||||
|
|
||||||
|
defp attach_signature(event, identity) do
|
||||||
|
unsigned_event =
|
||||||
|
event
|
||||||
|
|> Map.put("pubkey", identity.pubkey)
|
||||||
|
|> Map.put("sig", String.duplicate("0", 128))
|
||||||
|
|
||||||
|
event_id =
|
||||||
|
unsigned_event
|
||||||
|
|> Auth.compute_event_id()
|
||||||
|
|
||||||
|
signature =
|
||||||
|
event_id
|
||||||
|
|> Base.decode16!(case: :lower)
|
||||||
|
|> Secp256k1.schnorr_sign(identity.secret_key)
|
||||||
|
|> Base.encode16(case: :lower)
|
||||||
|
|
||||||
|
unsigned_event
|
||||||
|
|> Map.put("id", event_id)
|
||||||
|
|> Map.put("sig", signature)
|
||||||
|
end
|
||||||
|
|
||||||
|
defp read_persisted_identity(opts) do
|
||||||
|
path = identity_path(opts)
|
||||||
|
|
||||||
|
case File.read(path) do
|
||||||
|
{:ok, payload} ->
|
||||||
|
with {:ok, decoded} <- JSON.decode(payload),
|
||||||
|
{:ok, secret_key} <- fetch_secret_key(decoded),
|
||||||
|
{:ok, identity} <- build_identity(secret_key, :persisted) do
|
||||||
|
{:ok, identity}
|
||||||
|
else
|
||||||
|
{:error, reason} -> {:error, reason}
|
||||||
|
end
|
||||||
|
|
||||||
|
{:error, :enoent} ->
|
||||||
|
{:error, :identity_not_found}
|
||||||
|
|
||||||
|
{:error, reason} ->
|
||||||
|
{:error, reason}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp persist_identity(identity, opts) do
|
||||||
|
path = identity_path(opts)
|
||||||
|
temp_path = path <> ".tmp"
|
||||||
|
|
||||||
|
with :ok <- File.mkdir_p(Path.dirname(path)),
|
||||||
|
:ok <- File.write(temp_path, JSON.encode!(persisted_identity(identity))),
|
||||||
|
:ok <- File.rename(temp_path, path) do
|
||||||
|
:ok
|
||||||
|
else
|
||||||
|
{:error, reason} ->
|
||||||
|
_ = File.rm(temp_path)
|
||||||
|
{:error, reason}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp persisted_identity(identity) do
|
||||||
|
%{
|
||||||
|
"secret_key" => Base.encode16(identity.secret_key, case: :lower),
|
||||||
|
"pubkey" => identity.pubkey
|
||||||
|
}
|
||||||
|
end
|
||||||
|
|
||||||
|
defp generate_identity(source) do
|
||||||
|
{secret_key, pubkey} = Secp256k1.keypair(:xonly)
|
||||||
|
|
||||||
|
{:ok,
|
||||||
|
%{
|
||||||
|
secret_key: secret_key,
|
||||||
|
pubkey: Base.encode16(pubkey, case: :lower),
|
||||||
|
source: source
|
||||||
|
}}
|
||||||
|
rescue
|
||||||
|
_error -> {:error, :identity_generation_failed}
|
||||||
|
end
|
||||||
|
|
||||||
|
defp build_identity(secret_key_hex, source) when is_binary(secret_key_hex) do
|
||||||
|
with {:ok, secret_key} <- decode_secret_key(secret_key_hex),
|
||||||
|
pubkey <- Secp256k1.pubkey(secret_key, :xonly) do
|
||||||
|
{:ok,
|
||||||
|
%{
|
||||||
|
secret_key: secret_key,
|
||||||
|
pubkey: Base.encode16(pubkey, case: :lower),
|
||||||
|
source: source
|
||||||
|
}}
|
||||||
|
end
|
||||||
|
rescue
|
||||||
|
_error -> {:error, :invalid_secret_key}
|
||||||
|
end
|
||||||
|
|
||||||
|
defp decode_secret_key(secret_key_hex) when is_binary(secret_key_hex) do
|
||||||
|
normalized = String.downcase(secret_key_hex)
|
||||||
|
|
||||||
|
case Base.decode16(normalized, case: :lower) do
|
||||||
|
{:ok, <<_::256>> = secret_key} -> {:ok, secret_key}
|
||||||
|
_other -> {:error, :invalid_secret_key}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp fetch_secret_key(identity) when is_map(identity) do
|
||||||
|
case Map.get(identity, :secret_key) || Map.get(identity, "secret_key") do
|
||||||
|
secret_key when is_binary(secret_key) -> {:ok, secret_key}
|
||||||
|
_other -> {:error, :invalid_identity}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp configured_private_key(opts) do
|
||||||
|
opts[:private_key] || opts[:configured_private_key] || config_value(:private_key)
|
||||||
|
end
|
||||||
|
|
||||||
|
defp identity_path(opts) do
|
||||||
|
opts[:path] || config_value(:path) || default_path()
|
||||||
|
end
|
||||||
|
|
||||||
|
defp public_identity(identity) do
|
||||||
|
%{
|
||||||
|
pubkey: identity.pubkey,
|
||||||
|
source: identity.source
|
||||||
|
}
|
||||||
|
end
|
||||||
|
|
||||||
|
defp config_value(key) do
|
||||||
|
:parrhesia
|
||||||
|
|> Application.get_env(:identity, [])
|
||||||
|
|> Keyword.get(key)
|
||||||
|
end
|
||||||
|
|
||||||
|
defp default_data_dir do
|
||||||
|
base_dir =
|
||||||
|
System.get_env("XDG_DATA_HOME") ||
|
||||||
|
Path.join(System.user_home!(), ".local/share")
|
||||||
|
|
||||||
|
Path.join(base_dir, "parrhesia")
|
||||||
|
end
|
||||||
|
end
|
||||||
25
lib/parrhesia/api/identity/manager.ex
Normal file
25
lib/parrhesia/api/identity/manager.ex
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
defmodule Parrhesia.API.Identity.Manager do
|
||||||
|
@moduledoc false
|
||||||
|
|
||||||
|
use GenServer
|
||||||
|
|
||||||
|
alias Parrhesia.API.Identity
|
||||||
|
|
||||||
|
require Logger
|
||||||
|
|
||||||
|
def start_link(opts \\ []) do
|
||||||
|
GenServer.start_link(__MODULE__, opts, name: __MODULE__)
|
||||||
|
end
|
||||||
|
|
||||||
|
@impl true
|
||||||
|
def init(_opts) do
|
||||||
|
case Identity.ensure() do
|
||||||
|
{:ok, _identity} ->
|
||||||
|
{:ok, %{}}
|
||||||
|
|
||||||
|
{:error, reason} ->
|
||||||
|
Logger.error("failed to ensure server identity: #{inspect(reason)}")
|
||||||
|
{:ok, %{}}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
44
lib/parrhesia/api/request_context.ex
Normal file
44
lib/parrhesia/api/request_context.ex
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
defmodule Parrhesia.API.RequestContext do
|
||||||
|
@moduledoc """
|
||||||
|
Shared request context used across API and policy surfaces.
|
||||||
|
|
||||||
|
This struct carries caller identity and transport metadata through authorization and storage
|
||||||
|
boundaries.
|
||||||
|
|
||||||
|
The most important field for external callers is `authenticated_pubkeys`. For example:
|
||||||
|
|
||||||
|
- `Parrhesia.API.Events` uses it for read and write policy checks
|
||||||
|
- `Parrhesia.API.Stream` uses it for subscription authorization
|
||||||
|
- `Parrhesia.API.ACL` uses it when evaluating protected sync traffic
|
||||||
|
"""
|
||||||
|
|
||||||
|
defstruct authenticated_pubkeys: MapSet.new(),
|
||||||
|
actor: nil,
|
||||||
|
caller: :local,
|
||||||
|
remote_ip: nil,
|
||||||
|
subscription_id: nil,
|
||||||
|
peer_id: nil,
|
||||||
|
transport_identity: nil,
|
||||||
|
metadata: %{}
|
||||||
|
|
||||||
|
@type t :: %__MODULE__{
|
||||||
|
authenticated_pubkeys: MapSet.t(String.t()),
|
||||||
|
actor: term(),
|
||||||
|
caller: atom(),
|
||||||
|
remote_ip: String.t() | nil,
|
||||||
|
subscription_id: String.t() | nil,
|
||||||
|
peer_id: String.t() | nil,
|
||||||
|
transport_identity: map() | nil,
|
||||||
|
metadata: map()
|
||||||
|
}
|
||||||
|
|
||||||
|
@doc """
|
||||||
|
Merges arbitrary metadata into the context.
|
||||||
|
|
||||||
|
Existing keys are overwritten by the incoming map.
|
||||||
|
"""
|
||||||
|
@spec put_metadata(t(), map()) :: t()
|
||||||
|
def put_metadata(%__MODULE__{} = context, metadata) when is_map(metadata) do
|
||||||
|
%__MODULE__{context | metadata: Map.merge(context.metadata, metadata)}
|
||||||
|
end
|
||||||
|
end
|
||||||
121
lib/parrhesia/api/stream.ex
Normal file
121
lib/parrhesia/api/stream.ex
Normal file
@@ -0,0 +1,121 @@
|
|||||||
|
defmodule Parrhesia.API.Stream do
|
||||||
|
@moduledoc """
|
||||||
|
In-process subscription API with relay-equivalent catch-up and live fanout semantics.
|
||||||
|
|
||||||
|
Subscriptions are process-local bridges. After subscribing, the caller receives messages in
|
||||||
|
the same order a relay client would expect:
|
||||||
|
|
||||||
|
- `{:parrhesia, :event, ref, subscription_id, event}` for catch-up and live events
|
||||||
|
- `{:parrhesia, :eose, ref, subscription_id}` after the initial replay finishes
|
||||||
|
|
||||||
|
This API requires a `Parrhesia.API.RequestContext` so read policies are applied exactly as
|
||||||
|
they would be for a transport-backed subscriber.
|
||||||
|
"""
|
||||||
|
|
||||||
|
alias Parrhesia.API.Events
|
||||||
|
alias Parrhesia.API.RequestContext
|
||||||
|
alias Parrhesia.API.Stream.Subscription
|
||||||
|
alias Parrhesia.Policy.EventPolicy
|
||||||
|
alias Parrhesia.Protocol.Filter
|
||||||
|
|
||||||
|
@doc """
|
||||||
|
Starts an in-process subscription for a subscriber pid.
|
||||||
|
|
||||||
|
`opts[:context]` must be a `Parrhesia.API.RequestContext`.
|
||||||
|
|
||||||
|
On success the returned reference is both:
|
||||||
|
|
||||||
|
- the subscription handle used by `unsubscribe/1`
|
||||||
|
- the value embedded in emitted subscriber messages
|
||||||
|
"""
|
||||||
|
@spec subscribe(pid(), String.t(), [map()], keyword()) :: {:ok, reference()} | {:error, term()}
|
||||||
|
def subscribe(subscriber, subscription_id, filters, opts \\ [])
|
||||||
|
|
||||||
|
def subscribe(subscriber, subscription_id, filters, opts)
|
||||||
|
when is_pid(subscriber) and is_binary(subscription_id) and is_list(filters) and
|
||||||
|
is_list(opts) do
|
||||||
|
with {:ok, context} <- fetch_context(opts),
|
||||||
|
:ok <- Filter.validate_filters(filters),
|
||||||
|
:ok <-
|
||||||
|
EventPolicy.authorize_read(
|
||||||
|
filters,
|
||||||
|
context.authenticated_pubkeys,
|
||||||
|
stream_context(context, subscription_id)
|
||||||
|
) do
|
||||||
|
ref = make_ref()
|
||||||
|
|
||||||
|
case DynamicSupervisor.start_child(
|
||||||
|
Parrhesia.API.Stream.Supervisor,
|
||||||
|
{Subscription,
|
||||||
|
ref: ref, subscriber: subscriber, subscription_id: subscription_id, filters: filters}
|
||||||
|
) do
|
||||||
|
{:ok, pid} ->
|
||||||
|
finalize_subscription(pid, ref, filters, stream_context(context, subscription_id))
|
||||||
|
|
||||||
|
{:error, reason} ->
|
||||||
|
{:error, reason}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def subscribe(_subscriber, _subscription_id, _filters, _opts),
|
||||||
|
do: {:error, :invalid_subscription}
|
||||||
|
|
||||||
|
@doc """
|
||||||
|
Stops a subscription previously created with `subscribe/4`.
|
||||||
|
|
||||||
|
This function is idempotent. Unknown or already-stopped references return `:ok`.
|
||||||
|
"""
|
||||||
|
@spec unsubscribe(reference()) :: :ok
|
||||||
|
def unsubscribe(ref) when is_reference(ref) do
|
||||||
|
case Registry.lookup(Parrhesia.API.Stream.Registry, ref) do
|
||||||
|
[{pid, _value}] ->
|
||||||
|
try do
|
||||||
|
:ok = GenServer.stop(pid, :normal)
|
||||||
|
catch
|
||||||
|
:exit, _reason -> :ok
|
||||||
|
end
|
||||||
|
|
||||||
|
:ok
|
||||||
|
|
||||||
|
[] ->
|
||||||
|
:ok
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def unsubscribe(_ref), do: :ok
|
||||||
|
|
||||||
|
defp fetch_context(opts) do
|
||||||
|
case Keyword.get(opts, :context) do
|
||||||
|
%RequestContext{} = context -> {:ok, context}
|
||||||
|
_other -> {:error, :invalid_context}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp finalize_subscription(pid, ref, filters, context) do
|
||||||
|
with {:ok, initial_events} <-
|
||||||
|
Events.query(filters,
|
||||||
|
context: context,
|
||||||
|
validate_filters?: false,
|
||||||
|
authorize_read?: false
|
||||||
|
),
|
||||||
|
:ok <- Subscription.deliver_initial(pid, initial_events) do
|
||||||
|
{:ok, ref}
|
||||||
|
else
|
||||||
|
{:error, reason} ->
|
||||||
|
_ = safe_stop_subscription(pid)
|
||||||
|
{:error, reason}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp safe_stop_subscription(pid) do
|
||||||
|
GenServer.stop(pid, :shutdown)
|
||||||
|
:ok
|
||||||
|
catch
|
||||||
|
:exit, _reason -> :ok
|
||||||
|
end
|
||||||
|
|
||||||
|
defp stream_context(%RequestContext{} = context, subscription_id) do
|
||||||
|
%RequestContext{context | subscription_id: subscription_id}
|
||||||
|
end
|
||||||
|
end
|
||||||
192
lib/parrhesia/api/stream/subscription.ex
Normal file
192
lib/parrhesia/api/stream/subscription.ex
Normal file
@@ -0,0 +1,192 @@
|
|||||||
|
defmodule Parrhesia.API.Stream.Subscription do
|
||||||
|
@moduledoc false
|
||||||
|
|
||||||
|
use GenServer, restart: :temporary
|
||||||
|
|
||||||
|
alias Parrhesia.Protocol.Filter
|
||||||
|
alias Parrhesia.Subscriptions.Index
|
||||||
|
alias Parrhesia.Telemetry
|
||||||
|
|
||||||
|
defstruct [
|
||||||
|
:ref,
|
||||||
|
:subscriber,
|
||||||
|
:subscriber_monitor_ref,
|
||||||
|
:subscription_id,
|
||||||
|
:filters,
|
||||||
|
ready?: false,
|
||||||
|
buffered_events: []
|
||||||
|
]
|
||||||
|
|
||||||
|
@type t :: %__MODULE__{
|
||||||
|
ref: reference(),
|
||||||
|
subscriber: pid(),
|
||||||
|
subscriber_monitor_ref: reference(),
|
||||||
|
subscription_id: String.t(),
|
||||||
|
filters: [map()],
|
||||||
|
ready?: boolean(),
|
||||||
|
buffered_events: [map()]
|
||||||
|
}
|
||||||
|
|
||||||
|
@spec start_link(keyword()) :: GenServer.on_start()
|
||||||
|
def start_link(opts) when is_list(opts) do
|
||||||
|
ref = Keyword.fetch!(opts, :ref)
|
||||||
|
|
||||||
|
GenServer.start_link(__MODULE__, opts, name: via_tuple(ref))
|
||||||
|
end
|
||||||
|
|
||||||
|
@spec deliver_initial(GenServer.server(), [map()]) :: :ok | {:error, term()}
|
||||||
|
def deliver_initial(server, initial_events) when is_list(initial_events) do
|
||||||
|
GenServer.call(server, {:deliver_initial, initial_events})
|
||||||
|
end
|
||||||
|
|
||||||
|
@impl true
|
||||||
|
def init(opts) do
|
||||||
|
with {:ok, subscriber} <- fetch_subscriber(opts),
|
||||||
|
{:ok, subscription_id} <- fetch_subscription_id(opts),
|
||||||
|
{:ok, filters} <- fetch_filters(opts),
|
||||||
|
:ok <-
|
||||||
|
maybe_upsert_index_subscription(subscription_index(opts), subscription_id, filters) do
|
||||||
|
monitor_ref = Process.monitor(subscriber)
|
||||||
|
|
||||||
|
state = %__MODULE__{
|
||||||
|
ref: Keyword.fetch!(opts, :ref),
|
||||||
|
subscriber: subscriber,
|
||||||
|
subscriber_monitor_ref: monitor_ref,
|
||||||
|
subscription_id: subscription_id,
|
||||||
|
filters: filters,
|
||||||
|
ready?: false,
|
||||||
|
buffered_events: []
|
||||||
|
}
|
||||||
|
|
||||||
|
Telemetry.emit_process_mailbox_depth(:subscription)
|
||||||
|
{:ok, state}
|
||||||
|
else
|
||||||
|
{:error, reason} -> {:stop, reason}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
@impl true
|
||||||
|
def handle_call({:deliver_initial, initial_events}, _from, %__MODULE__{} = state) do
|
||||||
|
send_initial_events(state, initial_events)
|
||||||
|
|
||||||
|
Enum.each(Enum.reverse(state.buffered_events), fn event ->
|
||||||
|
send(state.subscriber, {:parrhesia, :event, state.ref, state.subscription_id, event})
|
||||||
|
end)
|
||||||
|
|
||||||
|
{:reply, :ok, %__MODULE__{state | ready?: true, buffered_events: []}}
|
||||||
|
|> emit_mailbox_depth()
|
||||||
|
end
|
||||||
|
|
||||||
|
@impl true
|
||||||
|
def handle_info({:fanout_event, subscription_id, event}, %__MODULE__{} = state)
|
||||||
|
when is_binary(subscription_id) and is_map(event) do
|
||||||
|
state
|
||||||
|
|> handle_fanout_event(subscription_id, event)
|
||||||
|
|> emit_mailbox_depth()
|
||||||
|
end
|
||||||
|
|
||||||
|
def handle_info({:DOWN, monitor_ref, :process, subscriber, _reason}, %__MODULE__{} = state)
|
||||||
|
when monitor_ref == state.subscriber_monitor_ref and subscriber == state.subscriber do
|
||||||
|
{:stop, :normal, state}
|
||||||
|
|> emit_mailbox_depth()
|
||||||
|
end
|
||||||
|
|
||||||
|
def handle_info(_message, %__MODULE__{} = state) do
|
||||||
|
{:noreply, state}
|
||||||
|
|> emit_mailbox_depth()
|
||||||
|
end
|
||||||
|
|
||||||
|
@impl true
|
||||||
|
def terminate(reason, %__MODULE__{} = state) do
|
||||||
|
:ok = maybe_remove_index_subscription(state.subscription_id)
|
||||||
|
|
||||||
|
if reason not in [:normal, :shutdown] do
|
||||||
|
send(state.subscriber, {:parrhesia, :closed, state.ref, state.subscription_id, reason})
|
||||||
|
end
|
||||||
|
|
||||||
|
:ok
|
||||||
|
end
|
||||||
|
|
||||||
|
defp send_initial_events(state, events) do
|
||||||
|
Enum.each(events, fn event ->
|
||||||
|
send(state.subscriber, {:parrhesia, :event, state.ref, state.subscription_id, event})
|
||||||
|
end)
|
||||||
|
|
||||||
|
send(state.subscriber, {:parrhesia, :eose, state.ref, state.subscription_id})
|
||||||
|
end
|
||||||
|
|
||||||
|
defp via_tuple(ref), do: {:via, Registry, {Parrhesia.API.Stream.Registry, ref}}
|
||||||
|
|
||||||
|
defp fetch_subscriber(opts) do
|
||||||
|
case Keyword.get(opts, :subscriber) do
|
||||||
|
subscriber when is_pid(subscriber) -> {:ok, subscriber}
|
||||||
|
_other -> {:error, :invalid_subscriber}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp fetch_subscription_id(opts) do
|
||||||
|
case Keyword.get(opts, :subscription_id) do
|
||||||
|
subscription_id when is_binary(subscription_id) -> {:ok, subscription_id}
|
||||||
|
_other -> {:error, :invalid_subscription_id}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp fetch_filters(opts) do
|
||||||
|
case Keyword.get(opts, :filters) do
|
||||||
|
filters when is_list(filters) -> {:ok, filters}
|
||||||
|
_other -> {:error, :invalid_filters}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp subscription_index(opts) do
|
||||||
|
case Keyword.get(opts, :subscription_index, Index) do
|
||||||
|
subscription_index when is_pid(subscription_index) or is_atom(subscription_index) ->
|
||||||
|
subscription_index
|
||||||
|
|
||||||
|
_other ->
|
||||||
|
nil
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp maybe_upsert_index_subscription(nil, _subscription_id, _filters),
|
||||||
|
do: {:error, :subscription_index_unavailable}
|
||||||
|
|
||||||
|
defp maybe_upsert_index_subscription(subscription_index, subscription_id, filters) do
|
||||||
|
case Index.upsert(subscription_index, self(), subscription_id, filters) do
|
||||||
|
:ok -> :ok
|
||||||
|
{:error, reason} -> {:error, reason}
|
||||||
|
end
|
||||||
|
catch
|
||||||
|
:exit, _reason -> {:error, :subscription_index_unavailable}
|
||||||
|
end
|
||||||
|
|
||||||
|
defp maybe_remove_index_subscription(subscription_id) do
|
||||||
|
:ok = Index.remove(Index, self(), subscription_id)
|
||||||
|
:ok
|
||||||
|
catch
|
||||||
|
:exit, _reason -> :ok
|
||||||
|
end
|
||||||
|
|
||||||
|
defp handle_fanout_event(%__MODULE__{} = state, subscription_id, event) do
|
||||||
|
cond do
|
||||||
|
subscription_id != state.subscription_id ->
|
||||||
|
{:noreply, state}
|
||||||
|
|
||||||
|
not Filter.matches_any?(event, state.filters) ->
|
||||||
|
{:noreply, state}
|
||||||
|
|
||||||
|
state.ready? ->
|
||||||
|
send(state.subscriber, {:parrhesia, :event, state.ref, state.subscription_id, event})
|
||||||
|
{:noreply, state}
|
||||||
|
|
||||||
|
true ->
|
||||||
|
buffered_events = [event | state.buffered_events]
|
||||||
|
{:noreply, %__MODULE__{state | buffered_events: buffered_events}}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp emit_mailbox_depth(result) do
|
||||||
|
Telemetry.emit_process_mailbox_depth(:subscription)
|
||||||
|
result
|
||||||
|
end
|
||||||
|
end
|
||||||
170
lib/parrhesia/api/sync.ex
Normal file
170
lib/parrhesia/api/sync.ex
Normal file
@@ -0,0 +1,170 @@
|
|||||||
|
defmodule Parrhesia.API.Sync do
|
||||||
|
@moduledoc """
|
||||||
|
Sync server control-plane API.
|
||||||
|
|
||||||
|
This module manages outbound relay sync definitions and exposes runtime status for each
|
||||||
|
configured sync worker.
|
||||||
|
|
||||||
|
The main entrypoint is `put_server/2`. Accepted server maps are normalized into a stable
|
||||||
|
internal shape and persisted by the sync manager. The expected input shape is:
|
||||||
|
|
||||||
|
```elixir
|
||||||
|
%{
|
||||||
|
"id" => "tribes-primary",
|
||||||
|
"url" => "wss://relay-a.example/relay",
|
||||||
|
"enabled?" => true,
|
||||||
|
"auth_pubkey" => "...64 hex chars...",
|
||||||
|
"filters" => [%{"kinds" => [5000]}],
|
||||||
|
"mode" => "req_stream",
|
||||||
|
"overlap_window_seconds" => 300,
|
||||||
|
"auth" => %{"type" => "nip42"},
|
||||||
|
"tls" => %{
|
||||||
|
"mode" => "required",
|
||||||
|
"hostname" => "relay-a.example",
|
||||||
|
"pins" => [%{"type" => "spki_sha256", "value" => "..."}]
|
||||||
|
},
|
||||||
|
"metadata" => %{}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Most functions accept `:manager` or `:name` in `opts` to target a non-default manager.
|
||||||
|
"""
|
||||||
|
|
||||||
|
alias Parrhesia.API.Sync.Manager
|
||||||
|
|
||||||
|
@typedoc """
|
||||||
|
Normalized sync server configuration returned by the sync manager.
|
||||||
|
"""
|
||||||
|
@type server :: map()
|
||||||
|
|
||||||
|
@doc """
|
||||||
|
Creates or replaces a sync server definition.
|
||||||
|
"""
|
||||||
|
@spec put_server(map(), keyword()) :: {:ok, server()} | {:error, term()}
|
||||||
|
def put_server(server, opts \\ [])
|
||||||
|
|
||||||
|
def put_server(server, opts) when is_map(server) and is_list(opts) do
|
||||||
|
Manager.put_server(manager_name(opts), server)
|
||||||
|
end
|
||||||
|
|
||||||
|
def put_server(_server, _opts), do: {:error, :invalid_server}
|
||||||
|
|
||||||
|
@doc """
|
||||||
|
Removes a stored sync server definition and stops its worker if it is running.
|
||||||
|
"""
|
||||||
|
@spec remove_server(String.t(), keyword()) :: :ok | {:error, term()}
|
||||||
|
def remove_server(server_id, opts \\ [])
|
||||||
|
|
||||||
|
def remove_server(server_id, opts) when is_binary(server_id) and is_list(opts) do
|
||||||
|
Manager.remove_server(manager_name(opts), server_id)
|
||||||
|
end
|
||||||
|
|
||||||
|
def remove_server(_server_id, _opts), do: {:error, :invalid_server_id}
|
||||||
|
|
||||||
|
@doc """
|
||||||
|
Fetches a single normalized sync server definition.
|
||||||
|
|
||||||
|
Returns `:error` when the server id is unknown.
|
||||||
|
"""
|
||||||
|
@spec get_server(String.t(), keyword()) :: {:ok, server()} | :error | {:error, term()}
|
||||||
|
def get_server(server_id, opts \\ [])
|
||||||
|
|
||||||
|
def get_server(server_id, opts) when is_binary(server_id) and is_list(opts) do
|
||||||
|
Manager.get_server(manager_name(opts), server_id)
|
||||||
|
end
|
||||||
|
|
||||||
|
def get_server(_server_id, _opts), do: {:error, :invalid_server_id}
|
||||||
|
|
||||||
|
@doc """
|
||||||
|
Lists all configured sync servers, including their runtime state.
|
||||||
|
"""
|
||||||
|
@spec list_servers(keyword()) :: {:ok, [server()]} | {:error, term()}
|
||||||
|
def list_servers(opts \\ []) when is_list(opts) do
|
||||||
|
Manager.list_servers(manager_name(opts))
|
||||||
|
end
|
||||||
|
|
||||||
|
@doc """
|
||||||
|
Marks a sync server as running and reconciles its worker state.
|
||||||
|
"""
|
||||||
|
@spec start_server(String.t(), keyword()) :: :ok | {:error, term()}
|
||||||
|
def start_server(server_id, opts \\ [])
|
||||||
|
|
||||||
|
def start_server(server_id, opts) when is_binary(server_id) and is_list(opts) do
|
||||||
|
Manager.start_server(manager_name(opts), server_id)
|
||||||
|
end
|
||||||
|
|
||||||
|
def start_server(_server_id, _opts), do: {:error, :invalid_server_id}
|
||||||
|
|
||||||
|
@doc """
|
||||||
|
Stops a sync server and records a disconnect timestamp in runtime state.
|
||||||
|
"""
|
||||||
|
@spec stop_server(String.t(), keyword()) :: :ok | {:error, term()}
|
||||||
|
def stop_server(server_id, opts \\ [])
|
||||||
|
|
||||||
|
def stop_server(server_id, opts) when is_binary(server_id) and is_list(opts) do
|
||||||
|
Manager.stop_server(manager_name(opts), server_id)
|
||||||
|
end
|
||||||
|
|
||||||
|
def stop_server(_server_id, _opts), do: {:error, :invalid_server_id}
|
||||||
|
|
||||||
|
@doc """
|
||||||
|
Triggers an immediate sync run for a server.
|
||||||
|
"""
|
||||||
|
@spec sync_now(String.t(), keyword()) :: :ok | {:error, term()}
|
||||||
|
def sync_now(server_id, opts \\ [])
|
||||||
|
|
||||||
|
def sync_now(server_id, opts) when is_binary(server_id) and is_list(opts) do
|
||||||
|
Manager.sync_now(manager_name(opts), server_id)
|
||||||
|
end
|
||||||
|
|
||||||
|
def sync_now(_server_id, _opts), do: {:error, :invalid_server_id}
|
||||||
|
|
||||||
|
@doc """
|
||||||
|
Returns runtime counters and timestamps for a single sync server.
|
||||||
|
|
||||||
|
Returns `:error` when the server id is unknown.
|
||||||
|
"""
|
||||||
|
@spec server_stats(String.t(), keyword()) :: {:ok, map()} | :error | {:error, term()}
|
||||||
|
def server_stats(server_id, opts \\ [])
|
||||||
|
|
||||||
|
def server_stats(server_id, opts) when is_binary(server_id) and is_list(opts) do
|
||||||
|
Manager.server_stats(manager_name(opts), server_id)
|
||||||
|
end
|
||||||
|
|
||||||
|
def server_stats(_server_id, _opts), do: {:error, :invalid_server_id}
|
||||||
|
|
||||||
|
@doc """
|
||||||
|
Returns aggregate counters across all configured sync servers.
|
||||||
|
"""
|
||||||
|
@spec sync_stats(keyword()) :: {:ok, map()} | {:error, term()}
|
||||||
|
def sync_stats(opts \\ []) when is_list(opts) do
|
||||||
|
Manager.sync_stats(manager_name(opts))
|
||||||
|
end
|
||||||
|
|
||||||
|
@doc """
|
||||||
|
Returns a health summary for the sync subsystem.
|
||||||
|
"""
|
||||||
|
@spec sync_health(keyword()) :: {:ok, map()} | {:error, term()}
|
||||||
|
def sync_health(opts \\ []) when is_list(opts) do
|
||||||
|
Manager.sync_health(manager_name(opts))
|
||||||
|
end
|
||||||
|
|
||||||
|
@doc """
|
||||||
|
Returns the default filesystem path for persisted sync server state.
|
||||||
|
"""
|
||||||
|
def default_path do
|
||||||
|
Path.join([default_data_dir(), "sync_servers.json"])
|
||||||
|
end
|
||||||
|
|
||||||
|
defp manager_name(opts) do
|
||||||
|
opts[:manager] || opts[:name] || Manager
|
||||||
|
end
|
||||||
|
|
||||||
|
defp default_data_dir do
|
||||||
|
base_dir =
|
||||||
|
System.get_env("XDG_DATA_HOME") ||
|
||||||
|
Path.join(System.user_home!(), ".local/share")
|
||||||
|
|
||||||
|
Path.join(base_dir, "parrhesia")
|
||||||
|
end
|
||||||
|
end
|
||||||
938
lib/parrhesia/api/sync/manager.ex
Normal file
938
lib/parrhesia/api/sync/manager.ex
Normal file
@@ -0,0 +1,938 @@
|
|||||||
|
defmodule Parrhesia.API.Sync.Manager do
|
||||||
|
@moduledoc false
|
||||||
|
|
||||||
|
use GenServer
|
||||||
|
|
||||||
|
alias Parrhesia.API.Sync
|
||||||
|
alias Parrhesia.Protocol.Filter
|
||||||
|
alias Parrhesia.Sync.Transport.WebSockexClient
|
||||||
|
alias Parrhesia.Sync.Worker
|
||||||
|
|
||||||
|
require Logger
|
||||||
|
|
||||||
|
@default_overlap_window_seconds 300
|
||||||
|
@default_mode :req_stream
|
||||||
|
@default_auth_type :nip42
|
||||||
|
@default_tls_mode :required
|
||||||
|
@hex64 ~r/\A[0-9a-f]{64}\z/
|
||||||
|
|
||||||
|
def start_link(opts \\ []) do
|
||||||
|
name = Keyword.get(opts, :name, __MODULE__)
|
||||||
|
GenServer.start_link(__MODULE__, opts, name: name)
|
||||||
|
end
|
||||||
|
|
||||||
|
def put_server(name, server), do: GenServer.call(name, {:put_server, server})
|
||||||
|
def remove_server(name, server_id), do: GenServer.call(name, {:remove_server, server_id})
|
||||||
|
def get_server(name, server_id), do: GenServer.call(name, {:get_server, server_id})
|
||||||
|
def list_servers(name), do: GenServer.call(name, :list_servers)
|
||||||
|
def start_server(name, server_id), do: GenServer.call(name, {:start_server, server_id})
|
||||||
|
def stop_server(name, server_id), do: GenServer.call(name, {:stop_server, server_id})
|
||||||
|
def sync_now(name, server_id), do: GenServer.call(name, {:sync_now, server_id})
|
||||||
|
def server_stats(name, server_id), do: GenServer.call(name, {:server_stats, server_id})
|
||||||
|
def sync_stats(name), do: GenServer.call(name, :sync_stats)
|
||||||
|
def sync_health(name), do: GenServer.call(name, :sync_health)
|
||||||
|
|
||||||
|
def runtime_event(name, server_id, kind, attrs \\ %{}) do
|
||||||
|
GenServer.cast(name, {:runtime_event, server_id, kind, attrs})
|
||||||
|
end
|
||||||
|
|
||||||
|
@impl true
|
||||||
|
def init(opts) do
|
||||||
|
path = Keyword.get(opts, :path, config_path() || Sync.default_path())
|
||||||
|
|
||||||
|
state =
|
||||||
|
load_state(path)
|
||||||
|
|> Map.merge(%{
|
||||||
|
start_workers?: Keyword.get(opts, :start_workers?, config_value(:start_workers?, true)),
|
||||||
|
worker_supervisor: Keyword.get(opts, :worker_supervisor, Parrhesia.Sync.WorkerSupervisor),
|
||||||
|
worker_registry: Keyword.get(opts, :worker_registry, Parrhesia.Sync.WorkerRegistry),
|
||||||
|
transport_module: Keyword.get(opts, :transport_module, WebSockexClient),
|
||||||
|
relay_info_opts: Keyword.get(opts, :relay_info_opts, []),
|
||||||
|
transport_opts: Keyword.get(opts, :transport_opts, [])
|
||||||
|
})
|
||||||
|
|
||||||
|
{:ok, state, {:continue, :bootstrap}}
|
||||||
|
end
|
||||||
|
|
||||||
|
@impl true
|
||||||
|
def handle_continue(:bootstrap, state) do
|
||||||
|
next_state =
|
||||||
|
if state.start_workers? do
|
||||||
|
state.servers
|
||||||
|
|> Map.keys()
|
||||||
|
|> Enum.reduce(state, fn server_id, acc -> maybe_start_worker(acc, server_id) end)
|
||||||
|
else
|
||||||
|
state
|
||||||
|
end
|
||||||
|
|
||||||
|
{:noreply, next_state}
|
||||||
|
end
|
||||||
|
|
||||||
|
@impl true
|
||||||
|
def handle_call({:put_server, server}, _from, state) do
|
||||||
|
case normalize_server(server) do
|
||||||
|
{:ok, normalized_server} ->
|
||||||
|
updated_state =
|
||||||
|
state
|
||||||
|
|> stop_worker_if_running(normalized_server.id)
|
||||||
|
|> put_server_state(normalized_server)
|
||||||
|
|> persist_and_reconcile!(normalized_server.id)
|
||||||
|
|
||||||
|
{:reply, {:ok, merged_server(updated_state, normalized_server.id)}, updated_state}
|
||||||
|
|
||||||
|
{:error, reason} ->
|
||||||
|
{:reply, {:error, reason}, state}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def handle_call({:remove_server, server_id}, _from, state) do
|
||||||
|
if Map.has_key?(state.servers, server_id) do
|
||||||
|
next_state =
|
||||||
|
state
|
||||||
|
|> stop_worker_if_running(server_id)
|
||||||
|
|> Map.update!(:servers, &Map.delete(&1, server_id))
|
||||||
|
|> Map.update!(:runtime, &Map.delete(&1, server_id))
|
||||||
|
|
||||||
|
with :ok <- persist_state(next_state) do
|
||||||
|
{:reply, :ok, next_state}
|
||||||
|
end
|
||||||
|
else
|
||||||
|
{:reply, {:error, :not_found}, state}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def handle_call({:get_server, server_id}, _from, state) do
|
||||||
|
case Map.fetch(state.servers, server_id) do
|
||||||
|
{:ok, _server} -> {:reply, {:ok, merged_server(state, server_id)}, state}
|
||||||
|
:error -> {:reply, :error, state}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def handle_call(:list_servers, _from, state) do
|
||||||
|
servers =
|
||||||
|
state.servers
|
||||||
|
|> Map.keys()
|
||||||
|
|> Enum.sort()
|
||||||
|
|> Enum.map(&merged_server(state, &1))
|
||||||
|
|
||||||
|
{:reply, {:ok, servers}, state}
|
||||||
|
end
|
||||||
|
|
||||||
|
def handle_call({:start_server, server_id}, _from, state) do
|
||||||
|
case Map.fetch(state.runtime, server_id) do
|
||||||
|
{:ok, runtime} ->
|
||||||
|
next_state =
|
||||||
|
state
|
||||||
|
|> put_runtime(server_id, %{runtime | state: :running, last_error: nil})
|
||||||
|
|> persist_and_reconcile!(server_id)
|
||||||
|
|
||||||
|
{:reply, :ok, next_state}
|
||||||
|
|
||||||
|
:error ->
|
||||||
|
{:reply, {:error, :not_found}, state}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def handle_call({:stop_server, server_id}, _from, state) do
|
||||||
|
case Map.fetch(state.runtime, server_id) do
|
||||||
|
{:ok, runtime} ->
|
||||||
|
next_runtime =
|
||||||
|
runtime
|
||||||
|
|> Map.put(:state, :stopped)
|
||||||
|
|> Map.put(:connected?, false)
|
||||||
|
|> Map.put(:last_disconnected_at, now())
|
||||||
|
|
||||||
|
next_state =
|
||||||
|
state
|
||||||
|
|> stop_worker_if_running(server_id)
|
||||||
|
|> put_runtime(server_id, next_runtime)
|
||||||
|
|
||||||
|
with :ok <- persist_state(next_state) do
|
||||||
|
{:reply, :ok, next_state}
|
||||||
|
end
|
||||||
|
|
||||||
|
:error ->
|
||||||
|
{:reply, {:error, :not_found}, state}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def handle_call({:sync_now, server_id}, _from, state) do
|
||||||
|
case {Map.has_key?(state.runtime, server_id), state.start_workers?,
|
||||||
|
lookup_worker(state, server_id)} do
|
||||||
|
{false, _start_workers?, _worker_pid} ->
|
||||||
|
{:reply, {:error, :not_found}, state}
|
||||||
|
|
||||||
|
{true, true, worker_pid} when is_pid(worker_pid) ->
|
||||||
|
Worker.sync_now(worker_pid)
|
||||||
|
{:reply, :ok, state}
|
||||||
|
|
||||||
|
{true, true, nil} ->
|
||||||
|
next_state =
|
||||||
|
state
|
||||||
|
|> put_in([:runtime, server_id, :state], :running)
|
||||||
|
|> persist_and_reconcile!(server_id)
|
||||||
|
|
||||||
|
{:reply, :ok, next_state}
|
||||||
|
|
||||||
|
{true, false, _worker_pid} ->
|
||||||
|
next_state =
|
||||||
|
apply_runtime_event(state, server_id, :sync_started, %{})
|
||||||
|
|> apply_runtime_event(server_id, :sync_completed, %{})
|
||||||
|
|
||||||
|
with :ok <- persist_state(next_state) do
|
||||||
|
{:reply, :ok, next_state}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def handle_call({:server_stats, server_id}, _from, state) do
|
||||||
|
case Map.fetch(state.runtime, server_id) do
|
||||||
|
{:ok, runtime} -> {:reply, {:ok, runtime_stats(runtime)}, state}
|
||||||
|
:error -> {:reply, :error, state}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def handle_call(:sync_stats, _from, state), do: {:reply, {:ok, aggregate_stats(state)}, state}
|
||||||
|
def handle_call(:sync_health, _from, state), do: {:reply, {:ok, health_summary(state)}, state}
|
||||||
|
|
||||||
|
@impl true
|
||||||
|
def handle_cast({:runtime_event, server_id, kind, attrs}, state) do
|
||||||
|
next_state =
|
||||||
|
state
|
||||||
|
|> apply_runtime_event(server_id, kind, attrs)
|
||||||
|
|> persist_state_if_known_server(server_id)
|
||||||
|
|
||||||
|
{:noreply, next_state}
|
||||||
|
end
|
||||||
|
|
||||||
|
defp persist_state_if_known_server(state, server_id) do
|
||||||
|
if Map.has_key?(state.runtime, server_id) do
|
||||||
|
case persist_state(state) do
|
||||||
|
:ok ->
|
||||||
|
state
|
||||||
|
|
||||||
|
{:error, reason} ->
|
||||||
|
Logger.warning("failed to persist sync runtime for #{server_id}: #{inspect(reason)}")
|
||||||
|
state
|
||||||
|
end
|
||||||
|
else
|
||||||
|
state
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp put_server_state(state, server) do
|
||||||
|
runtime =
|
||||||
|
case Map.get(state.runtime, server.id) do
|
||||||
|
nil -> default_runtime(server)
|
||||||
|
existing_runtime -> existing_runtime
|
||||||
|
end
|
||||||
|
|
||||||
|
%{
|
||||||
|
state
|
||||||
|
| servers: Map.put(state.servers, server.id, server),
|
||||||
|
runtime: Map.put(state.runtime, server.id, runtime)
|
||||||
|
}
|
||||||
|
end
|
||||||
|
|
||||||
|
defp put_runtime(state, server_id, runtime) do
|
||||||
|
%{state | runtime: Map.put(state.runtime, server_id, runtime)}
|
||||||
|
end
|
||||||
|
|
||||||
|
defp persist_and_reconcile!(state, server_id) do
|
||||||
|
:ok = persist_state(state)
|
||||||
|
reconcile_worker(state, server_id)
|
||||||
|
end
|
||||||
|
|
||||||
|
defp reconcile_worker(state, server_id) do
|
||||||
|
cond do
|
||||||
|
not state.start_workers? ->
|
||||||
|
state
|
||||||
|
|
||||||
|
desired_running?(state, server_id) ->
|
||||||
|
maybe_start_worker(state, server_id)
|
||||||
|
|
||||||
|
true ->
|
||||||
|
stop_worker_if_running(state, server_id)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp maybe_start_worker(state, server_id) do
|
||||||
|
cond do
|
||||||
|
not state.start_workers? ->
|
||||||
|
state
|
||||||
|
|
||||||
|
not desired_running?(state, server_id) ->
|
||||||
|
state
|
||||||
|
|
||||||
|
lookup_worker(state, server_id) != nil ->
|
||||||
|
state
|
||||||
|
|
||||||
|
true ->
|
||||||
|
server = Map.fetch!(state.servers, server_id)
|
||||||
|
runtime = Map.fetch!(state.runtime, server_id)
|
||||||
|
|
||||||
|
child_spec = %{
|
||||||
|
id: {:sync_worker, server_id},
|
||||||
|
start:
|
||||||
|
{Worker, :start_link,
|
||||||
|
[
|
||||||
|
[
|
||||||
|
name: via_tuple(server_id, state.worker_registry),
|
||||||
|
server: server,
|
||||||
|
runtime: runtime,
|
||||||
|
manager: self(),
|
||||||
|
transport_module: state.transport_module,
|
||||||
|
relay_info_opts: state.relay_info_opts,
|
||||||
|
transport_opts: state.transport_opts
|
||||||
|
]
|
||||||
|
]},
|
||||||
|
restart: :transient
|
||||||
|
}
|
||||||
|
|
||||||
|
case DynamicSupervisor.start_child(state.worker_supervisor, child_spec) do
|
||||||
|
{:ok, _pid} ->
|
||||||
|
state
|
||||||
|
|
||||||
|
{:error, {:already_started, _pid}} ->
|
||||||
|
state
|
||||||
|
|
||||||
|
{:error, reason} ->
|
||||||
|
Logger.warning("failed to start sync worker #{server_id}: #{inspect(reason)}")
|
||||||
|
state
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp stop_worker_if_running(state, server_id) do
|
||||||
|
if worker_pid = lookup_worker(state, server_id) do
|
||||||
|
_ = Worker.stop(worker_pid)
|
||||||
|
end
|
||||||
|
|
||||||
|
state
|
||||||
|
end
|
||||||
|
|
||||||
|
defp desired_running?(state, server_id) do
|
||||||
|
case Map.fetch(state.runtime, server_id) do
|
||||||
|
{:ok, runtime} -> runtime.state == :running
|
||||||
|
:error -> false
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp lookup_worker(state, server_id) do
|
||||||
|
case Registry.lookup(state.worker_registry, server_id) do
|
||||||
|
[{pid, _value}] -> pid
|
||||||
|
[] -> nil
|
||||||
|
end
|
||||||
|
catch
|
||||||
|
:exit, _reason -> nil
|
||||||
|
end
|
||||||
|
|
||||||
|
defp via_tuple(server_id, registry) do
|
||||||
|
{:via, Registry, {registry, server_id}}
|
||||||
|
end
|
||||||
|
|
||||||
|
defp merged_server(state, server_id) do
|
||||||
|
state.servers
|
||||||
|
|> Map.fetch!(server_id)
|
||||||
|
|> Map.put(:runtime, Map.fetch!(state.runtime, server_id))
|
||||||
|
end
|
||||||
|
|
||||||
|
defp runtime_stats(runtime) do
|
||||||
|
%{
|
||||||
|
"server_id" => runtime.server_id,
|
||||||
|
"state" => Atom.to_string(runtime.state),
|
||||||
|
"connected" => runtime.connected?,
|
||||||
|
"events_received" => runtime.events_received,
|
||||||
|
"events_accepted" => runtime.events_accepted,
|
||||||
|
"events_duplicate" => runtime.events_duplicate,
|
||||||
|
"events_rejected" => runtime.events_rejected,
|
||||||
|
"query_runs" => runtime.query_runs,
|
||||||
|
"subscription_restarts" => runtime.subscription_restarts,
|
||||||
|
"reconnects" => runtime.reconnects,
|
||||||
|
"last_sync_started_at" => runtime.last_sync_started_at,
|
||||||
|
"last_sync_completed_at" => runtime.last_sync_completed_at,
|
||||||
|
"last_remote_eose_at" => runtime.last_remote_eose_at,
|
||||||
|
"last_error" => runtime.last_error,
|
||||||
|
"cursor_created_at" => runtime.cursor_created_at,
|
||||||
|
"cursor_event_id" => runtime.cursor_event_id
|
||||||
|
}
|
||||||
|
end
|
||||||
|
|
||||||
|
defp aggregate_stats(state) do
|
||||||
|
runtimes = Map.values(state.runtime)
|
||||||
|
|
||||||
|
%{
|
||||||
|
"servers_total" => map_size(state.servers),
|
||||||
|
"servers_enabled" => Enum.count(state.servers, fn {_id, server} -> server.enabled? end),
|
||||||
|
"servers_running" => Enum.count(runtimes, &(&1.state == :running)),
|
||||||
|
"servers_connected" => Enum.count(runtimes, & &1.connected?),
|
||||||
|
"events_received" => Enum.reduce(runtimes, 0, &(&1.events_received + &2)),
|
||||||
|
"events_accepted" => Enum.reduce(runtimes, 0, &(&1.events_accepted + &2)),
|
||||||
|
"events_duplicate" => Enum.reduce(runtimes, 0, &(&1.events_duplicate + &2)),
|
||||||
|
"events_rejected" => Enum.reduce(runtimes, 0, &(&1.events_rejected + &2)),
|
||||||
|
"query_runs" => Enum.reduce(runtimes, 0, &(&1.query_runs + &2)),
|
||||||
|
"subscription_restarts" => Enum.reduce(runtimes, 0, &(&1.subscription_restarts + &2)),
|
||||||
|
"reconnects" => Enum.reduce(runtimes, 0, &(&1.reconnects + &2))
|
||||||
|
}
|
||||||
|
end
|
||||||
|
|
||||||
|
defp health_summary(state) do
|
||||||
|
failing_servers =
|
||||||
|
state.runtime
|
||||||
|
|> Enum.flat_map(fn {server_id, runtime} ->
|
||||||
|
if is_binary(runtime.last_error) and runtime.last_error != "" do
|
||||||
|
[%{"id" => server_id, "reason" => runtime.last_error}]
|
||||||
|
else
|
||||||
|
[]
|
||||||
|
end
|
||||||
|
end)
|
||||||
|
|
||||||
|
%{
|
||||||
|
"status" => if(failing_servers == [], do: "ok", else: "degraded"),
|
||||||
|
"servers_total" => map_size(state.servers),
|
||||||
|
"servers_connected" =>
|
||||||
|
Enum.count(state.runtime, fn {_id, runtime} -> runtime.connected? end),
|
||||||
|
"servers_failing" => failing_servers
|
||||||
|
}
|
||||||
|
end
|
||||||
|
|
||||||
|
defp apply_runtime_event(state, server_id, kind, attrs) do
|
||||||
|
case Map.fetch(state.runtime, server_id) do
|
||||||
|
{:ok, runtime} ->
|
||||||
|
updated_runtime = update_runtime_for_event(runtime, kind, attrs)
|
||||||
|
put_runtime(state, server_id, updated_runtime)
|
||||||
|
|
||||||
|
:error ->
|
||||||
|
state
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp update_runtime_for_event(runtime, :connected, _attrs) do
|
||||||
|
runtime
|
||||||
|
|> Map.put(:state, :running)
|
||||||
|
|> Map.put(:connected?, true)
|
||||||
|
|> Map.put(:last_connected_at, now())
|
||||||
|
|> Map.put(:last_error, nil)
|
||||||
|
end
|
||||||
|
|
||||||
|
defp update_runtime_for_event(runtime, :disconnected, attrs) do
|
||||||
|
reason = format_reason(Map.get(attrs, :reason))
|
||||||
|
|
||||||
|
runtime
|
||||||
|
|> Map.put(:connected?, false)
|
||||||
|
|> Map.put(:last_disconnected_at, now())
|
||||||
|
|> Map.update!(:reconnects, &(&1 + 1))
|
||||||
|
|> Map.put(:last_error, reason)
|
||||||
|
end
|
||||||
|
|
||||||
|
defp update_runtime_for_event(runtime, :error, attrs) do
|
||||||
|
Map.put(runtime, :last_error, format_reason(Map.get(attrs, :reason)))
|
||||||
|
end
|
||||||
|
|
||||||
|
defp update_runtime_for_event(runtime, :sync_started, _attrs) do
|
||||||
|
runtime
|
||||||
|
|> Map.put(:last_sync_started_at, now())
|
||||||
|
|> Map.update!(:query_runs, &(&1 + 1))
|
||||||
|
end
|
||||||
|
|
||||||
|
defp update_runtime_for_event(runtime, :sync_completed, _attrs) do
|
||||||
|
timestamp = now()
|
||||||
|
|
||||||
|
runtime
|
||||||
|
|> Map.put(:last_sync_completed_at, timestamp)
|
||||||
|
|> Map.put(:last_eose_at, timestamp)
|
||||||
|
|> Map.put(:last_remote_eose_at, timestamp)
|
||||||
|
end
|
||||||
|
|
||||||
|
defp update_runtime_for_event(runtime, :subscription_restart, _attrs) do
|
||||||
|
Map.update!(runtime, :subscription_restarts, &(&1 + 1))
|
||||||
|
end
|
||||||
|
|
||||||
|
defp update_runtime_for_event(runtime, :cursor_advanced, attrs) do
|
||||||
|
runtime
|
||||||
|
|> Map.put(:cursor_created_at, Map.get(attrs, :created_at))
|
||||||
|
|> Map.put(:cursor_event_id, Map.get(attrs, :event_id))
|
||||||
|
end
|
||||||
|
|
||||||
|
defp update_runtime_for_event(runtime, :event_result, attrs) do
|
||||||
|
event = Map.get(attrs, :event, %{})
|
||||||
|
result = Map.get(attrs, :result)
|
||||||
|
|
||||||
|
runtime
|
||||||
|
|> Map.update!(:events_received, &(&1 + 1))
|
||||||
|
|> Map.put(:last_event_received_at, now())
|
||||||
|
|> increment_result_counter(result)
|
||||||
|
|> maybe_put_last_error(attrs)
|
||||||
|
|> maybe_advance_runtime_cursor(event, result)
|
||||||
|
end
|
||||||
|
|
||||||
|
defp update_runtime_for_event(runtime, _kind, _attrs), do: runtime
|
||||||
|
|
||||||
|
defp increment_result_counter(runtime, :accepted),
|
||||||
|
do: Map.update!(runtime, :events_accepted, &(&1 + 1))
|
||||||
|
|
||||||
|
defp increment_result_counter(runtime, :duplicate),
|
||||||
|
do: Map.update!(runtime, :events_duplicate, &(&1 + 1))
|
||||||
|
|
||||||
|
defp increment_result_counter(runtime, :rejected),
|
||||||
|
do: Map.update!(runtime, :events_rejected, &(&1 + 1))
|
||||||
|
|
||||||
|
defp increment_result_counter(runtime, _result), do: runtime
|
||||||
|
|
||||||
|
defp maybe_put_last_error(runtime, %{reason: nil}), do: runtime
|
||||||
|
|
||||||
|
defp maybe_put_last_error(runtime, attrs),
|
||||||
|
do: Map.put(runtime, :last_error, format_reason(attrs[:reason]))
|
||||||
|
|
||||||
|
defp maybe_advance_runtime_cursor(runtime, event, result)
|
||||||
|
when result in [:accepted, :duplicate] do
|
||||||
|
created_at = Map.get(event, "created_at")
|
||||||
|
event_id = Map.get(event, "id")
|
||||||
|
|
||||||
|
cond do
|
||||||
|
not is_integer(created_at) or not is_binary(event_id) ->
|
||||||
|
runtime
|
||||||
|
|
||||||
|
is_nil(runtime.cursor_created_at) ->
|
||||||
|
runtime
|
||||||
|
|> Map.put(:cursor_created_at, created_at)
|
||||||
|
|> Map.put(:cursor_event_id, event_id)
|
||||||
|
|
||||||
|
created_at > runtime.cursor_created_at ->
|
||||||
|
runtime
|
||||||
|
|> Map.put(:cursor_created_at, created_at)
|
||||||
|
|> Map.put(:cursor_event_id, event_id)
|
||||||
|
|
||||||
|
created_at == runtime.cursor_created_at and event_id > runtime.cursor_event_id ->
|
||||||
|
runtime
|
||||||
|
|> Map.put(:cursor_created_at, created_at)
|
||||||
|
|> Map.put(:cursor_event_id, event_id)
|
||||||
|
|
||||||
|
true ->
|
||||||
|
runtime
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp maybe_advance_runtime_cursor(runtime, _event, _result), do: runtime
|
||||||
|
|
||||||
|
defp format_reason(nil), do: nil
|
||||||
|
defp format_reason(reason) when is_binary(reason), do: reason
|
||||||
|
defp format_reason(reason), do: inspect(reason)
|
||||||
|
|
||||||
|
defp load_state(path) do
|
||||||
|
case File.read(path) do
|
||||||
|
{:ok, payload} ->
|
||||||
|
case decode_persisted_state(payload, path) do
|
||||||
|
{:ok, state} ->
|
||||||
|
state
|
||||||
|
|
||||||
|
{:error, reason} ->
|
||||||
|
Logger.warning("failed to load sync state from #{path}: #{inspect(reason)}")
|
||||||
|
empty_state(path)
|
||||||
|
end
|
||||||
|
|
||||||
|
{:error, :enoent} ->
|
||||||
|
empty_state(path)
|
||||||
|
|
||||||
|
{:error, reason} ->
|
||||||
|
Logger.warning("failed to read sync state from #{path}: #{inspect(reason)}")
|
||||||
|
empty_state(path)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp decode_persisted_state(payload, path) do
|
||||||
|
with {:ok, decoded} <- JSON.decode(payload),
|
||||||
|
{:ok, servers} <- decode_servers(Map.get(decoded, "servers", %{})),
|
||||||
|
{:ok, runtime} <- decode_runtime(Map.get(decoded, "runtime", %{}), servers) do
|
||||||
|
{:ok, %{path: path, servers: servers, runtime: runtime}}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp decode_servers(servers) when is_map(servers) do
|
||||||
|
Enum.reduce_while(servers, {:ok, %{}}, fn {_id, server_payload}, {:ok, acc} ->
|
||||||
|
case normalize_server(server_payload) do
|
||||||
|
{:ok, server} -> {:cont, {:ok, Map.put(acc, server.id, server)}}
|
||||||
|
{:error, reason} -> {:halt, {:error, reason}}
|
||||||
|
end
|
||||||
|
end)
|
||||||
|
end
|
||||||
|
|
||||||
|
defp decode_servers(_servers), do: {:error, :invalid_servers_state}
|
||||||
|
|
||||||
|
defp decode_runtime(runtime_payload, servers)
|
||||||
|
when is_map(runtime_payload) and is_map(servers) do
|
||||||
|
runtime =
|
||||||
|
Enum.reduce(servers, %{}, fn {server_id, server}, acc ->
|
||||||
|
decoded_runtime =
|
||||||
|
runtime_payload
|
||||||
|
|> Map.get(server_id)
|
||||||
|
|> normalize_runtime(server)
|
||||||
|
|
||||||
|
Map.put(acc, server_id, decoded_runtime)
|
||||||
|
end)
|
||||||
|
|
||||||
|
{:ok, runtime}
|
||||||
|
end
|
||||||
|
|
||||||
|
defp decode_runtime(_runtime_payload, _servers), do: {:error, :invalid_runtime_state}
|
||||||
|
|
||||||
|
defp normalize_runtime(nil, server), do: default_runtime(server)
|
||||||
|
|
||||||
|
defp normalize_runtime(runtime, server) when is_map(runtime) do
|
||||||
|
%{
|
||||||
|
server_id: server.id,
|
||||||
|
state: normalize_runtime_state(fetch_value(runtime, :state)),
|
||||||
|
connected?: fetch_boolean(runtime, :connected?) || false,
|
||||||
|
last_connected_at: fetch_string_or_nil(runtime, :last_connected_at),
|
||||||
|
last_disconnected_at: fetch_string_or_nil(runtime, :last_disconnected_at),
|
||||||
|
last_sync_started_at: fetch_string_or_nil(runtime, :last_sync_started_at),
|
||||||
|
last_sync_completed_at: fetch_string_or_nil(runtime, :last_sync_completed_at),
|
||||||
|
last_event_received_at: fetch_string_or_nil(runtime, :last_event_received_at),
|
||||||
|
last_eose_at: fetch_string_or_nil(runtime, :last_eose_at),
|
||||||
|
reconnect_attempts: fetch_non_neg_integer(runtime, :reconnect_attempts),
|
||||||
|
last_error: fetch_string_or_nil(runtime, :last_error),
|
||||||
|
events_received: fetch_non_neg_integer(runtime, :events_received),
|
||||||
|
events_accepted: fetch_non_neg_integer(runtime, :events_accepted),
|
||||||
|
events_duplicate: fetch_non_neg_integer(runtime, :events_duplicate),
|
||||||
|
events_rejected: fetch_non_neg_integer(runtime, :events_rejected),
|
||||||
|
query_runs: fetch_non_neg_integer(runtime, :query_runs),
|
||||||
|
subscription_restarts: fetch_non_neg_integer(runtime, :subscription_restarts),
|
||||||
|
reconnects: fetch_non_neg_integer(runtime, :reconnects),
|
||||||
|
last_remote_eose_at: fetch_string_or_nil(runtime, :last_remote_eose_at),
|
||||||
|
cursor_created_at: fetch_optional_integer(runtime, :cursor_created_at),
|
||||||
|
cursor_event_id: fetch_string_or_nil(runtime, :cursor_event_id)
|
||||||
|
}
|
||||||
|
end
|
||||||
|
|
||||||
|
defp normalize_runtime(_runtime, server), do: default_runtime(server)
|
||||||
|
|
||||||
|
defp persist_state(%{path: path} = state) do
|
||||||
|
temp_path = path <> ".tmp"
|
||||||
|
|
||||||
|
with :ok <- File.mkdir_p(Path.dirname(path)),
|
||||||
|
:ok <- File.write(temp_path, JSON.encode!(encode_state(state))),
|
||||||
|
:ok <- File.rename(temp_path, path) do
|
||||||
|
:ok
|
||||||
|
else
|
||||||
|
{:error, reason} ->
|
||||||
|
_ = File.rm(temp_path)
|
||||||
|
{:error, reason}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp encode_state(state) do
|
||||||
|
%{
|
||||||
|
"version" => 2,
|
||||||
|
"servers" =>
|
||||||
|
Map.new(state.servers, fn {server_id, server} -> {server_id, encode_server(server)} end),
|
||||||
|
"runtime" =>
|
||||||
|
Map.new(state.runtime, fn {server_id, runtime} -> {server_id, encode_runtime(runtime)} end)
|
||||||
|
}
|
||||||
|
end
|
||||||
|
|
||||||
|
defp encode_server(server) do
|
||||||
|
%{
|
||||||
|
"id" => server.id,
|
||||||
|
"url" => server.url,
|
||||||
|
"enabled?" => server.enabled?,
|
||||||
|
"auth_pubkey" => server.auth_pubkey,
|
||||||
|
"filters" => server.filters,
|
||||||
|
"mode" => Atom.to_string(server.mode),
|
||||||
|
"overlap_window_seconds" => server.overlap_window_seconds,
|
||||||
|
"auth" => %{"type" => Atom.to_string(server.auth.type)},
|
||||||
|
"tls" => %{
|
||||||
|
"mode" => Atom.to_string(server.tls.mode),
|
||||||
|
"hostname" => server.tls.hostname,
|
||||||
|
"pins" =>
|
||||||
|
Enum.map(server.tls.pins, fn pin ->
|
||||||
|
%{
|
||||||
|
"type" => Atom.to_string(pin.type),
|
||||||
|
"value" => pin.value
|
||||||
|
}
|
||||||
|
end)
|
||||||
|
},
|
||||||
|
"metadata" => server.metadata
|
||||||
|
}
|
||||||
|
end
|
||||||
|
|
||||||
|
defp encode_runtime(runtime) do
|
||||||
|
%{
|
||||||
|
"server_id" => runtime.server_id,
|
||||||
|
"state" => Atom.to_string(runtime.state),
|
||||||
|
"connected?" => runtime.connected?,
|
||||||
|
"last_connected_at" => runtime.last_connected_at,
|
||||||
|
"last_disconnected_at" => runtime.last_disconnected_at,
|
||||||
|
"last_sync_started_at" => runtime.last_sync_started_at,
|
||||||
|
"last_sync_completed_at" => runtime.last_sync_completed_at,
|
||||||
|
"last_event_received_at" => runtime.last_event_received_at,
|
||||||
|
"last_eose_at" => runtime.last_eose_at,
|
||||||
|
"reconnect_attempts" => runtime.reconnect_attempts,
|
||||||
|
"last_error" => runtime.last_error,
|
||||||
|
"events_received" => runtime.events_received,
|
||||||
|
"events_accepted" => runtime.events_accepted,
|
||||||
|
"events_duplicate" => runtime.events_duplicate,
|
||||||
|
"events_rejected" => runtime.events_rejected,
|
||||||
|
"query_runs" => runtime.query_runs,
|
||||||
|
"subscription_restarts" => runtime.subscription_restarts,
|
||||||
|
"reconnects" => runtime.reconnects,
|
||||||
|
"last_remote_eose_at" => runtime.last_remote_eose_at,
|
||||||
|
"cursor_created_at" => runtime.cursor_created_at,
|
||||||
|
"cursor_event_id" => runtime.cursor_event_id
|
||||||
|
}
|
||||||
|
end
|
||||||
|
|
||||||
|
defp empty_state(path) do
|
||||||
|
%{path: path, servers: %{}, runtime: %{}}
|
||||||
|
end
|
||||||
|
|
||||||
|
defp default_runtime(server) do
|
||||||
|
%{
|
||||||
|
server_id: server.id,
|
||||||
|
state: if(server.enabled?, do: :running, else: :stopped),
|
||||||
|
connected?: false,
|
||||||
|
last_connected_at: nil,
|
||||||
|
last_disconnected_at: nil,
|
||||||
|
last_sync_started_at: nil,
|
||||||
|
last_sync_completed_at: nil,
|
||||||
|
last_event_received_at: nil,
|
||||||
|
last_eose_at: nil,
|
||||||
|
reconnect_attempts: 0,
|
||||||
|
last_error: nil,
|
||||||
|
events_received: 0,
|
||||||
|
events_accepted: 0,
|
||||||
|
events_duplicate: 0,
|
||||||
|
events_rejected: 0,
|
||||||
|
query_runs: 0,
|
||||||
|
subscription_restarts: 0,
|
||||||
|
reconnects: 0,
|
||||||
|
last_remote_eose_at: nil,
|
||||||
|
cursor_created_at: nil,
|
||||||
|
cursor_event_id: nil
|
||||||
|
}
|
||||||
|
end
|
||||||
|
|
||||||
|
defp normalize_server(server) when is_map(server) do
|
||||||
|
with {:ok, id} <- normalize_non_empty_string(fetch_value(server, :id), :invalid_server_id),
|
||||||
|
{:ok, {url, host, scheme}} <- normalize_url(fetch_value(server, :url)),
|
||||||
|
{:ok, enabled?} <- normalize_boolean(fetch_value(server, :enabled?), true),
|
||||||
|
{:ok, auth_pubkey} <- normalize_pubkey(fetch_value(server, :auth_pubkey)),
|
||||||
|
{:ok, filters} <- normalize_filters(fetch_value(server, :filters)),
|
||||||
|
{:ok, mode} <- normalize_mode(fetch_value(server, :mode)),
|
||||||
|
{:ok, overlap_window_seconds} <-
|
||||||
|
normalize_overlap_window(fetch_value(server, :overlap_window_seconds)),
|
||||||
|
{:ok, auth} <- normalize_auth(fetch_value(server, :auth)),
|
||||||
|
{:ok, tls} <- normalize_tls(fetch_value(server, :tls), host, scheme),
|
||||||
|
{:ok, metadata} <- normalize_metadata(fetch_value(server, :metadata)) do
|
||||||
|
{:ok,
|
||||||
|
%{
|
||||||
|
id: id,
|
||||||
|
url: url,
|
||||||
|
enabled?: enabled?,
|
||||||
|
auth_pubkey: auth_pubkey,
|
||||||
|
filters: filters,
|
||||||
|
mode: mode,
|
||||||
|
overlap_window_seconds: overlap_window_seconds,
|
||||||
|
auth: auth,
|
||||||
|
tls: tls,
|
||||||
|
metadata: metadata
|
||||||
|
}}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp normalize_server(_server), do: {:error, :invalid_server}
|
||||||
|
|
||||||
|
defp normalize_url(url) when is_binary(url) and url != "" do
|
||||||
|
uri = URI.parse(url)
|
||||||
|
|
||||||
|
if uri.scheme in ["ws", "wss"] and is_binary(uri.host) and uri.host != "" do
|
||||||
|
{:ok, {URI.to_string(uri), uri.host, uri.scheme}}
|
||||||
|
else
|
||||||
|
{:error, :invalid_url}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp normalize_url(_url), do: {:error, :invalid_url}
|
||||||
|
|
||||||
|
defp normalize_pubkey(pubkey) when is_binary(pubkey) do
|
||||||
|
normalized = String.downcase(pubkey)
|
||||||
|
|
||||||
|
if String.match?(normalized, @hex64) do
|
||||||
|
{:ok, normalized}
|
||||||
|
else
|
||||||
|
{:error, :invalid_auth_pubkey}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp normalize_pubkey(_pubkey), do: {:error, :invalid_auth_pubkey}
|
||||||
|
|
||||||
|
defp normalize_filters(filters) when is_list(filters) do
|
||||||
|
normalized_filters = Enum.map(filters, &normalize_filter_map/1)
|
||||||
|
|
||||||
|
with :ok <- Filter.validate_filters(normalized_filters) do
|
||||||
|
{:ok, normalized_filters}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp normalize_filters(_filters), do: {:error, :invalid_filters}
|
||||||
|
|
||||||
|
defp normalize_mode(nil), do: {:ok, @default_mode}
|
||||||
|
defp normalize_mode(:req_stream), do: {:ok, :req_stream}
|
||||||
|
defp normalize_mode("req_stream"), do: {:ok, :req_stream}
|
||||||
|
defp normalize_mode(_mode), do: {:error, :invalid_mode}
|
||||||
|
|
||||||
|
defp normalize_overlap_window(nil), do: {:ok, @default_overlap_window_seconds}
|
||||||
|
|
||||||
|
defp normalize_overlap_window(seconds) when is_integer(seconds) and seconds >= 0,
|
||||||
|
do: {:ok, seconds}
|
||||||
|
|
||||||
|
defp normalize_overlap_window(_seconds), do: {:error, :invalid_overlap_window_seconds}
|
||||||
|
|
||||||
|
defp normalize_auth(nil), do: {:ok, %{type: @default_auth_type}}
|
||||||
|
|
||||||
|
defp normalize_auth(auth) when is_map(auth) do
|
||||||
|
with {:ok, type} <- normalize_auth_type(fetch_value(auth, :type)) do
|
||||||
|
{:ok, %{type: type}}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp normalize_auth(_auth), do: {:error, :invalid_auth}
|
||||||
|
|
||||||
|
defp normalize_auth_type(nil), do: {:ok, @default_auth_type}
|
||||||
|
defp normalize_auth_type(:nip42), do: {:ok, :nip42}
|
||||||
|
defp normalize_auth_type("nip42"), do: {:ok, :nip42}
|
||||||
|
defp normalize_auth_type(_type), do: {:error, :invalid_auth_type}
|
||||||
|
|
||||||
|
defp normalize_tls(tls, host, scheme) when is_map(tls) do
|
||||||
|
with {:ok, mode} <- normalize_tls_mode(fetch_value(tls, :mode)),
|
||||||
|
:ok <- validate_tls_mode_against_scheme(mode, scheme),
|
||||||
|
{:ok, hostname} <- normalize_hostname(fetch_value(tls, :hostname) || host),
|
||||||
|
{:ok, pins} <- normalize_tls_pins(mode, fetch_value(tls, :pins)) do
|
||||||
|
{:ok, %{mode: mode, hostname: hostname, pins: pins}}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp normalize_tls(_tls, _host, _scheme), do: {:error, :invalid_tls}
|
||||||
|
|
||||||
|
defp normalize_tls_mode(nil), do: {:ok, @default_tls_mode}
|
||||||
|
defp normalize_tls_mode(:required), do: {:ok, :required}
|
||||||
|
defp normalize_tls_mode("required"), do: {:ok, :required}
|
||||||
|
defp normalize_tls_mode(:disabled), do: {:ok, :disabled}
|
||||||
|
defp normalize_tls_mode("disabled"), do: {:ok, :disabled}
|
||||||
|
defp normalize_tls_mode(_mode), do: {:error, :invalid_tls_mode}
|
||||||
|
|
||||||
|
defp validate_tls_mode_against_scheme(:required, "wss"), do: :ok
|
||||||
|
defp validate_tls_mode_against_scheme(:required, _scheme), do: {:error, :invalid_url}
|
||||||
|
defp validate_tls_mode_against_scheme(:disabled, _scheme), do: :ok
|
||||||
|
|
||||||
|
defp normalize_hostname(hostname) when is_binary(hostname) and hostname != "",
|
||||||
|
do: {:ok, hostname}
|
||||||
|
|
||||||
|
defp normalize_hostname(_hostname), do: {:error, :invalid_tls_hostname}
|
||||||
|
|
||||||
|
defp normalize_tls_pins(:disabled, nil), do: {:ok, []}
|
||||||
|
defp normalize_tls_pins(:disabled, pins) when is_list(pins), do: {:ok, []}
|
||||||
|
|
||||||
|
defp normalize_tls_pins(:required, pins) when is_list(pins) and pins != [] do
|
||||||
|
Enum.reduce_while(pins, {:ok, []}, fn pin, {:ok, acc} ->
|
||||||
|
case normalize_tls_pin(pin) do
|
||||||
|
{:ok, normalized_pin} -> {:cont, {:ok, [normalized_pin | acc]}}
|
||||||
|
{:error, reason} -> {:halt, {:error, reason}}
|
||||||
|
end
|
||||||
|
end)
|
||||||
|
|> case do
|
||||||
|
{:ok, normalized_pins} -> {:ok, Enum.reverse(normalized_pins)}
|
||||||
|
error -> error
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp normalize_tls_pins(:required, _pins), do: {:error, :invalid_tls_pins}
|
||||||
|
|
||||||
|
defp normalize_tls_pin(pin) when is_map(pin) do
|
||||||
|
with {:ok, type} <- normalize_tls_pin_type(fetch_value(pin, :type)),
|
||||||
|
{:ok, value} <- normalize_non_empty_string(fetch_value(pin, :value), :invalid_tls_pin) do
|
||||||
|
{:ok, %{type: type, value: value}}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp normalize_tls_pin(_pin), do: {:error, :invalid_tls_pin}
|
||||||
|
|
||||||
|
defp normalize_tls_pin_type(:spki_sha256), do: {:ok, :spki_sha256}
|
||||||
|
defp normalize_tls_pin_type("spki_sha256"), do: {:ok, :spki_sha256}
|
||||||
|
defp normalize_tls_pin_type(_type), do: {:error, :invalid_tls_pin}
|
||||||
|
|
||||||
|
defp normalize_metadata(nil), do: {:ok, %{}}
|
||||||
|
defp normalize_metadata(metadata) when is_map(metadata), do: {:ok, metadata}
|
||||||
|
defp normalize_metadata(_metadata), do: {:error, :invalid_metadata}
|
||||||
|
|
||||||
|
defp normalize_boolean(nil, default), do: {:ok, default}
|
||||||
|
defp normalize_boolean(value, _default) when is_boolean(value), do: {:ok, value}
|
||||||
|
defp normalize_boolean(_value, _default), do: {:error, :invalid_enabled_flag}
|
||||||
|
|
||||||
|
defp normalize_non_empty_string(value, _reason) when is_binary(value) and value != "",
|
||||||
|
do: {:ok, value}
|
||||||
|
|
||||||
|
defp normalize_non_empty_string(_value, reason), do: {:error, reason}
|
||||||
|
|
||||||
|
defp normalize_filter_map(filter) when is_map(filter) do
|
||||||
|
Map.new(filter, fn
|
||||||
|
{key, value} when is_atom(key) -> {Atom.to_string(key), value}
|
||||||
|
{key, value} -> {key, value}
|
||||||
|
end)
|
||||||
|
end
|
||||||
|
|
||||||
|
defp normalize_filter_map(filter), do: filter
|
||||||
|
|
||||||
|
defp normalize_runtime_state("running"), do: :running
|
||||||
|
defp normalize_runtime_state(:running), do: :running
|
||||||
|
defp normalize_runtime_state("stopped"), do: :stopped
|
||||||
|
defp normalize_runtime_state(:stopped), do: :stopped
|
||||||
|
defp normalize_runtime_state(_state), do: :stopped
|
||||||
|
|
||||||
|
defp fetch_non_neg_integer(map, key) do
|
||||||
|
case fetch_value(map, key) do
|
||||||
|
value when is_integer(value) and value >= 0 -> value
|
||||||
|
_other -> 0
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp fetch_optional_integer(map, key) do
|
||||||
|
case fetch_value(map, key) do
|
||||||
|
value when is_integer(value) and value >= 0 -> value
|
||||||
|
_other -> nil
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp fetch_boolean(map, key) do
|
||||||
|
case fetch_value(map, key) do
|
||||||
|
value when is_boolean(value) -> value
|
||||||
|
_other -> nil
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp fetch_string_or_nil(map, key) do
|
||||||
|
case fetch_value(map, key) do
|
||||||
|
value when is_binary(value) and value != "" -> value
|
||||||
|
_other -> nil
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp fetch_value(map, key) when is_map(map) do
|
||||||
|
Map.get(map, key) || Map.get(map, Atom.to_string(key))
|
||||||
|
end
|
||||||
|
|
||||||
|
defp config_path do
|
||||||
|
config_value(:path)
|
||||||
|
end
|
||||||
|
|
||||||
|
defp config_value(key, default \\ nil) do
|
||||||
|
:parrhesia
|
||||||
|
|> Application.get_env(:sync, [])
|
||||||
|
|> Keyword.get(key, default)
|
||||||
|
end
|
||||||
|
|
||||||
|
defp now do
|
||||||
|
DateTime.utc_now()
|
||||||
|
|> DateTime.truncate(:second)
|
||||||
|
|> DateTime.to_iso8601()
|
||||||
|
end
|
||||||
|
end
|
||||||
@@ -5,19 +5,6 @@ defmodule Parrhesia.Application do
|
|||||||
|
|
||||||
@impl true
|
@impl true
|
||||||
def start(_type, _args) do
|
def start(_type, _args) do
|
||||||
children = [
|
Parrhesia.Runtime.start_link(name: Parrhesia.Supervisor)
|
||||||
Parrhesia.Telemetry,
|
|
||||||
Parrhesia.Config,
|
|
||||||
Parrhesia.Storage.Supervisor,
|
|
||||||
Parrhesia.Subscriptions.Supervisor,
|
|
||||||
Parrhesia.Auth.Supervisor,
|
|
||||||
Parrhesia.Policy.Supervisor,
|
|
||||||
Parrhesia.Web.Endpoint,
|
|
||||||
Parrhesia.Web.MetricsEndpoint,
|
|
||||||
Parrhesia.Tasks.Supervisor
|
|
||||||
]
|
|
||||||
|
|
||||||
opts = [strategy: :one_for_one, name: Parrhesia.Supervisor]
|
|
||||||
Supervisor.start_link(children, opts)
|
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|||||||
@@ -3,20 +3,29 @@ defmodule Parrhesia.Auth.Nip98 do
|
|||||||
Minimal NIP-98 HTTP auth validation.
|
Minimal NIP-98 HTTP auth validation.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
alias Parrhesia.Auth.Nip98ReplayCache
|
||||||
alias Parrhesia.Protocol.EventValidator
|
alias Parrhesia.Protocol.EventValidator
|
||||||
|
|
||||||
@max_age_seconds 60
|
@max_age_seconds 60
|
||||||
|
|
||||||
@spec validate_authorization_header(String.t() | nil, String.t(), String.t()) ::
|
@spec validate_authorization_header(String.t() | nil, String.t(), String.t()) ::
|
||||||
{:ok, map()} | {:error, atom()}
|
{:ok, map()} | {:error, atom()}
|
||||||
def validate_authorization_header(nil, _method, _url), do: {:error, :missing_authorization}
|
def validate_authorization_header(authorization, method, url) do
|
||||||
|
validate_authorization_header(authorization, method, url, [])
|
||||||
|
end
|
||||||
|
|
||||||
def validate_authorization_header("Nostr " <> encoded_event, method, url)
|
@spec validate_authorization_header(String.t() | nil, String.t(), String.t(), keyword()) ::
|
||||||
when is_binary(method) and is_binary(url) do
|
{:ok, map()} | {:error, atom()}
|
||||||
|
def validate_authorization_header(nil, _method, _url, _opts),
|
||||||
|
do: {:error, :missing_authorization}
|
||||||
|
|
||||||
|
def validate_authorization_header("Nostr " <> encoded_event, method, url, opts)
|
||||||
|
when is_binary(method) and is_binary(url) and is_list(opts) do
|
||||||
with {:ok, event_json} <- decode_base64(encoded_event),
|
with {:ok, event_json} <- decode_base64(encoded_event),
|
||||||
{:ok, event} <- JSON.decode(event_json),
|
{:ok, event} <- JSON.decode(event_json),
|
||||||
:ok <- validate_event_shape(event),
|
:ok <- validate_event_shape(event, opts),
|
||||||
:ok <- validate_http_binding(event, method, url) do
|
:ok <- validate_http_binding(event, method, url),
|
||||||
|
:ok <- consume_replay_token(event, opts) do
|
||||||
{:ok, event}
|
{:ok, event}
|
||||||
else
|
else
|
||||||
{:error, reason} -> {:error, reason}
|
{:error, reason} -> {:error, reason}
|
||||||
@@ -24,7 +33,8 @@ defmodule Parrhesia.Auth.Nip98 do
|
|||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
def validate_authorization_header(_header, _method, _url), do: {:error, :invalid_authorization}
|
def validate_authorization_header(_header, _method, _url, _opts),
|
||||||
|
do: {:error, :invalid_authorization}
|
||||||
|
|
||||||
defp decode_base64(encoded_event) do
|
defp decode_base64(encoded_event) do
|
||||||
case Base.decode64(encoded_event) do
|
case Base.decode64(encoded_event) do
|
||||||
@@ -33,33 +43,35 @@ defmodule Parrhesia.Auth.Nip98 do
|
|||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
defp validate_event_shape(event) when is_map(event) do
|
defp validate_event_shape(event, opts) when is_map(event) do
|
||||||
with :ok <- EventValidator.validate(event),
|
with :ok <- EventValidator.validate(event),
|
||||||
:ok <- validate_kind(event),
|
:ok <- validate_kind(event),
|
||||||
:ok <- validate_fresh_created_at(event) do
|
:ok <- validate_fresh_created_at(event, opts) do
|
||||||
:ok
|
:ok
|
||||||
else
|
else
|
||||||
:ok -> :ok
|
{:error, :stale_event} -> {:error, :stale_event}
|
||||||
{:error, _reason} -> {:error, :invalid_event}
|
{:error, _reason} -> {:error, :invalid_event}
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
defp validate_event_shape(_event), do: {:error, :invalid_event}
|
defp validate_event_shape(_event, _opts), do: {:error, :invalid_event}
|
||||||
|
|
||||||
defp validate_kind(%{"kind" => 27_235}), do: :ok
|
defp validate_kind(%{"kind" => 27_235}), do: :ok
|
||||||
defp validate_kind(_event), do: {:error, :invalid_event}
|
defp validate_kind(_event), do: {:error, :invalid_event}
|
||||||
|
|
||||||
defp validate_fresh_created_at(%{"created_at" => created_at}) when is_integer(created_at) do
|
defp validate_fresh_created_at(%{"created_at" => created_at}, opts)
|
||||||
|
when is_integer(created_at) do
|
||||||
now = System.system_time(:second)
|
now = System.system_time(:second)
|
||||||
|
max_age_seconds = Keyword.get(opts, :max_age_seconds, @max_age_seconds)
|
||||||
|
|
||||||
if abs(now - created_at) <= @max_age_seconds do
|
if abs(now - created_at) <= max_age_seconds do
|
||||||
:ok
|
:ok
|
||||||
else
|
else
|
||||||
{:error, :stale_event}
|
{:error, :stale_event}
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
defp validate_fresh_created_at(_event), do: {:error, :invalid_event}
|
defp validate_fresh_created_at(_event, _opts), do: {:error, :invalid_event}
|
||||||
|
|
||||||
defp validate_http_binding(event, method, url) do
|
defp validate_http_binding(event, method, url) do
|
||||||
tags = Map.get(event, "tags", [])
|
tags = Map.get(event, "tags", [])
|
||||||
@@ -85,4 +97,14 @@ defmodule Parrhesia.Auth.Nip98 do
|
|||||||
true -> :ok
|
true -> :ok
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
|
defp consume_replay_token(%{"id" => event_id, "created_at" => created_at}, opts)
|
||||||
|
when is_binary(event_id) and is_integer(created_at) do
|
||||||
|
case Keyword.get(opts, :replay_cache, Nip98ReplayCache) do
|
||||||
|
nil -> :ok
|
||||||
|
replay_cache -> Nip98ReplayCache.consume(replay_cache, event_id, created_at, opts)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp consume_replay_token(_event, _opts), do: {:error, :invalid_event}
|
||||||
end
|
end
|
||||||
|
|||||||
56
lib/parrhesia/auth/nip98_replay_cache.ex
Normal file
56
lib/parrhesia/auth/nip98_replay_cache.ex
Normal file
@@ -0,0 +1,56 @@
|
|||||||
|
defmodule Parrhesia.Auth.Nip98ReplayCache do
|
||||||
|
@moduledoc """
|
||||||
|
Tracks recently accepted NIP-98 auth event ids to prevent replay.
|
||||||
|
"""
|
||||||
|
|
||||||
|
use GenServer
|
||||||
|
|
||||||
|
@default_max_age_seconds 60
|
||||||
|
|
||||||
|
@spec start_link(keyword()) :: GenServer.on_start()
|
||||||
|
def start_link(opts \\ []) do
|
||||||
|
case Keyword.get(opts, :name, __MODULE__) do
|
||||||
|
nil -> GenServer.start_link(__MODULE__, opts)
|
||||||
|
name -> GenServer.start_link(__MODULE__, opts, name: name)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
@spec consume(GenServer.server(), String.t(), integer(), keyword()) ::
|
||||||
|
:ok | {:error, :replayed_auth_event}
|
||||||
|
def consume(server \\ __MODULE__, event_id, created_at, opts \\ [])
|
||||||
|
when is_binary(event_id) and is_integer(created_at) and is_list(opts) do
|
||||||
|
GenServer.call(server, {:consume, event_id, created_at, opts})
|
||||||
|
end
|
||||||
|
|
||||||
|
@impl true
|
||||||
|
def init(_opts) do
|
||||||
|
{:ok, %{entries: %{}}}
|
||||||
|
end
|
||||||
|
|
||||||
|
@impl true
|
||||||
|
def handle_call({:consume, event_id, created_at, opts}, _from, state) do
|
||||||
|
now_ms = System.monotonic_time(:millisecond)
|
||||||
|
entries = prune_expired(state.entries, now_ms)
|
||||||
|
|
||||||
|
case Map.has_key?(entries, event_id) do
|
||||||
|
true ->
|
||||||
|
{:reply, {:error, :replayed_auth_event}, %{state | entries: entries}}
|
||||||
|
|
||||||
|
false ->
|
||||||
|
expires_at_ms = replay_expiration_ms(now_ms, created_at, opts)
|
||||||
|
next_entries = Map.put(entries, event_id, expires_at_ms)
|
||||||
|
{:reply, :ok, %{state | entries: next_entries}}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp prune_expired(entries, now_ms) do
|
||||||
|
Map.reject(entries, fn {_event_id, expires_at_ms} -> expires_at_ms <= now_ms end)
|
||||||
|
end
|
||||||
|
|
||||||
|
defp replay_expiration_ms(now_ms, created_at, opts) do
|
||||||
|
max_age_seconds = Keyword.get(opts, :max_age_seconds, max_age_seconds())
|
||||||
|
max(now_ms, created_at * 1000) + max_age_seconds * 1000
|
||||||
|
end
|
||||||
|
|
||||||
|
defp max_age_seconds, do: @default_max_age_seconds
|
||||||
|
end
|
||||||
@@ -12,7 +12,9 @@ defmodule Parrhesia.Auth.Supervisor do
|
|||||||
@impl true
|
@impl true
|
||||||
def init(_init_arg) do
|
def init(_init_arg) do
|
||||||
children = [
|
children = [
|
||||||
{Parrhesia.Auth.Challenges, name: Parrhesia.Auth.Challenges}
|
{Parrhesia.Auth.Challenges, name: Parrhesia.Auth.Challenges},
|
||||||
|
{Parrhesia.Auth.Nip98ReplayCache, name: Parrhesia.Auth.Nip98ReplayCache},
|
||||||
|
{Parrhesia.API.Identity.Manager, []}
|
||||||
]
|
]
|
||||||
|
|
||||||
Supervisor.init(children, strategy: :one_for_one)
|
Supervisor.init(children, strategy: :one_for_one)
|
||||||
|
|||||||
@@ -1,6 +1,9 @@
|
|||||||
defmodule Parrhesia.Config do
|
defmodule Parrhesia.Config do
|
||||||
@moduledoc """
|
@moduledoc """
|
||||||
Runtime configuration cache backed by ETS.
|
Runtime configuration cache backed by ETS.
|
||||||
|
|
||||||
|
The application environment is copied into ETS at startup so hot-path reads do not need to
|
||||||
|
traverse the application environment repeatedly.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
use GenServer
|
use GenServer
|
||||||
@@ -8,6 +11,9 @@ defmodule Parrhesia.Config do
|
|||||||
@table __MODULE__
|
@table __MODULE__
|
||||||
@root_key :config
|
@root_key :config
|
||||||
|
|
||||||
|
@doc """
|
||||||
|
Starts the config cache server.
|
||||||
|
"""
|
||||||
def start_link(init_arg \\ []) do
|
def start_link(init_arg \\ []) do
|
||||||
GenServer.start_link(__MODULE__, init_arg, name: __MODULE__)
|
GenServer.start_link(__MODULE__, init_arg, name: __MODULE__)
|
||||||
end
|
end
|
||||||
@@ -26,6 +32,9 @@ defmodule Parrhesia.Config do
|
|||||||
{:ok, %{}}
|
{:ok, %{}}
|
||||||
end
|
end
|
||||||
|
|
||||||
|
@doc """
|
||||||
|
Returns the cached top-level Parrhesia application config.
|
||||||
|
"""
|
||||||
@spec all() :: map() | keyword()
|
@spec all() :: map() | keyword()
|
||||||
def all do
|
def all do
|
||||||
case :ets.lookup(@table, @root_key) do
|
case :ets.lookup(@table, @root_key) do
|
||||||
@@ -34,6 +43,11 @@ defmodule Parrhesia.Config do
|
|||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
|
@doc """
|
||||||
|
Reads a nested config value by path.
|
||||||
|
|
||||||
|
The path may traverse maps or keyword lists. Missing paths return `default`.
|
||||||
|
"""
|
||||||
@spec get([atom()], term()) :: term()
|
@spec get([atom()], term()) :: term()
|
||||||
def get(path, default \\ nil) when is_list(path) do
|
def get(path, default \\ nil) when is_list(path) do
|
||||||
case fetch(path) do
|
case fetch(path) do
|
||||||
|
|||||||
89
lib/parrhesia/connection_stats.ex
Normal file
89
lib/parrhesia/connection_stats.ex
Normal file
@@ -0,0 +1,89 @@
|
|||||||
|
defmodule Parrhesia.ConnectionStats do
|
||||||
|
@moduledoc """
|
||||||
|
Per-listener connection and subscription counters.
|
||||||
|
|
||||||
|
Tracks active connection and subscription counts per listener and emits
|
||||||
|
`[:parrhesia, :listener, :population]` telemetry events on each change.
|
||||||
|
"""
|
||||||
|
|
||||||
|
use GenServer
|
||||||
|
|
||||||
|
alias Parrhesia.Telemetry
|
||||||
|
|
||||||
|
defstruct connections: %{}, subscriptions: %{}
|
||||||
|
|
||||||
|
@type state :: %__MODULE__{
|
||||||
|
connections: %{(atom() | String.t()) => non_neg_integer()},
|
||||||
|
subscriptions: %{(atom() | String.t()) => non_neg_integer()}
|
||||||
|
}
|
||||||
|
|
||||||
|
@spec start_link(keyword()) :: GenServer.on_start()
|
||||||
|
def start_link(opts \\ []) do
|
||||||
|
name = Keyword.get(opts, :name, __MODULE__)
|
||||||
|
GenServer.start_link(__MODULE__, %__MODULE__{}, name: name)
|
||||||
|
end
|
||||||
|
|
||||||
|
@spec connection_open(atom() | String.t()) :: :ok
|
||||||
|
def connection_open(listener_id), do: cast({:connection_open, listener_id})
|
||||||
|
|
||||||
|
@spec connection_close(atom() | String.t()) :: :ok
|
||||||
|
def connection_close(listener_id), do: cast({:connection_close, listener_id})
|
||||||
|
|
||||||
|
@spec subscriptions_change(atom() | String.t(), integer()) :: :ok
|
||||||
|
def subscriptions_change(listener_id, delta) when is_integer(delta) do
|
||||||
|
cast({:subscriptions_change, listener_id, delta})
|
||||||
|
end
|
||||||
|
|
||||||
|
@impl true
|
||||||
|
def init(%__MODULE__{} = state), do: {:ok, state}
|
||||||
|
|
||||||
|
@impl true
|
||||||
|
def handle_cast({:connection_open, listener_id}, %__MODULE__{} = state) do
|
||||||
|
listener_id = normalize_listener_id(listener_id)
|
||||||
|
next_state = %{state | connections: increment(state.connections, listener_id, 1)}
|
||||||
|
emit_population(listener_id, next_state)
|
||||||
|
{:noreply, next_state}
|
||||||
|
end
|
||||||
|
|
||||||
|
def handle_cast({:connection_close, listener_id}, %__MODULE__{} = state) do
|
||||||
|
listener_id = normalize_listener_id(listener_id)
|
||||||
|
next_state = %{state | connections: increment(state.connections, listener_id, -1)}
|
||||||
|
emit_population(listener_id, next_state)
|
||||||
|
{:noreply, next_state}
|
||||||
|
end
|
||||||
|
|
||||||
|
def handle_cast({:subscriptions_change, listener_id, delta}, %__MODULE__{} = state) do
|
||||||
|
listener_id = normalize_listener_id(listener_id)
|
||||||
|
next_state = %{state | subscriptions: increment(state.subscriptions, listener_id, delta)}
|
||||||
|
emit_population(listener_id, next_state)
|
||||||
|
{:noreply, next_state}
|
||||||
|
end
|
||||||
|
|
||||||
|
defp cast(message) do
|
||||||
|
GenServer.cast(__MODULE__, message)
|
||||||
|
:ok
|
||||||
|
catch
|
||||||
|
:exit, {:noproc, _details} -> :ok
|
||||||
|
:exit, {:normal, _details} -> :ok
|
||||||
|
end
|
||||||
|
|
||||||
|
defp increment(counts, key, delta) do
|
||||||
|
current = Map.get(counts, key, 0)
|
||||||
|
Map.put(counts, key, max(current + delta, 0))
|
||||||
|
end
|
||||||
|
|
||||||
|
defp emit_population(listener_id, %__MODULE__{} = state) do
|
||||||
|
Telemetry.emit(
|
||||||
|
[:parrhesia, :listener, :population],
|
||||||
|
%{
|
||||||
|
connections: Map.get(state.connections, listener_id, 0),
|
||||||
|
subscriptions: Map.get(state.subscriptions, listener_id, 0)
|
||||||
|
},
|
||||||
|
%{listener_id: listener_id}
|
||||||
|
)
|
||||||
|
end
|
||||||
|
|
||||||
|
defp normalize_listener_id(listener_id) when is_atom(listener_id), do: listener_id
|
||||||
|
defp normalize_listener_id(listener_id) when is_binary(listener_id), do: listener_id
|
||||||
|
defp normalize_listener_id(_listener_id), do: :unknown
|
||||||
|
end
|
||||||
46
lib/parrhesia/fanout/dispatcher.ex
Normal file
46
lib/parrhesia/fanout/dispatcher.ex
Normal file
@@ -0,0 +1,46 @@
|
|||||||
|
defmodule Parrhesia.Fanout.Dispatcher do
|
||||||
|
@moduledoc """
|
||||||
|
Asynchronous local fanout dispatcher.
|
||||||
|
"""
|
||||||
|
|
||||||
|
use GenServer
|
||||||
|
|
||||||
|
alias Parrhesia.Subscriptions.Index
|
||||||
|
|
||||||
|
@spec start_link(keyword()) :: GenServer.on_start()
|
||||||
|
def start_link(opts \\ []) do
|
||||||
|
name = Keyword.get(opts, :name, __MODULE__)
|
||||||
|
GenServer.start_link(__MODULE__, :ok, name: name)
|
||||||
|
end
|
||||||
|
|
||||||
|
@spec dispatch(map()) :: :ok
|
||||||
|
def dispatch(event), do: dispatch(__MODULE__, event)
|
||||||
|
|
||||||
|
@spec dispatch(GenServer.server(), map()) :: :ok
|
||||||
|
def dispatch(server, event) when is_map(event) do
|
||||||
|
GenServer.cast(server, {:dispatch, event})
|
||||||
|
end
|
||||||
|
|
||||||
|
@impl true
|
||||||
|
def init(:ok), do: {:ok, %{}}
|
||||||
|
|
||||||
|
@impl true
|
||||||
|
def handle_cast({:dispatch, event}, state) do
|
||||||
|
dispatch_to_candidates(event)
|
||||||
|
{:noreply, state}
|
||||||
|
end
|
||||||
|
|
||||||
|
defp dispatch_to_candidates(event) do
|
||||||
|
case Index.candidate_subscription_keys(event) do
|
||||||
|
candidates when is_list(candidates) ->
|
||||||
|
Enum.each(candidates, fn {owner_pid, subscription_id} ->
|
||||||
|
send(owner_pid, {:fanout_event, subscription_id, event})
|
||||||
|
end)
|
||||||
|
|
||||||
|
_other ->
|
||||||
|
:ok
|
||||||
|
end
|
||||||
|
catch
|
||||||
|
:exit, _reason -> :ok
|
||||||
|
end
|
||||||
|
end
|
||||||
@@ -5,7 +5,7 @@ defmodule Parrhesia.Fanout.MultiNode do
|
|||||||
|
|
||||||
use GenServer
|
use GenServer
|
||||||
|
|
||||||
alias Parrhesia.Subscriptions.Index
|
alias Parrhesia.Fanout.Dispatcher
|
||||||
|
|
||||||
@group __MODULE__
|
@group __MODULE__
|
||||||
|
|
||||||
@@ -44,11 +44,7 @@ defmodule Parrhesia.Fanout.MultiNode do
|
|||||||
|
|
||||||
@impl true
|
@impl true
|
||||||
def handle_info({:remote_fanout_event, event}, state) do
|
def handle_info({:remote_fanout_event, event}, state) do
|
||||||
Index.candidate_subscription_keys(event)
|
Dispatcher.dispatch(event)
|
||||||
|> Enum.each(fn {owner_pid, subscription_id} ->
|
|
||||||
send(owner_pid, {:fanout_event, subscription_id, event})
|
|
||||||
end)
|
|
||||||
|
|
||||||
{:noreply, state}
|
{:noreply, state}
|
||||||
end
|
end
|
||||||
|
|
||||||
|
|||||||
@@ -1,52 +1,62 @@
|
|||||||
defmodule Parrhesia.Groups.Flow do
|
defmodule Parrhesia.Groups.Flow do
|
||||||
@moduledoc """
|
@moduledoc """
|
||||||
Minimal group and membership flow handling for NIP-29/NIP-43 related kinds.
|
Relay access membership projection backed by the shared group storage adapter.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
alias Parrhesia.Storage
|
alias Parrhesia.Storage
|
||||||
|
|
||||||
@membership_request_kind 8_000
|
@relay_access_group_id "__relay_access__"
|
||||||
@membership_approval_kind 8_001
|
@add_user_kind 8_000
|
||||||
@relay_metadata_kind 28_934
|
@remove_user_kind 8_001
|
||||||
@relay_admins_kind 28_935
|
@join_request_kind 28_934
|
||||||
@relay_rules_kind 28_936
|
@invite_request_kind 28_935
|
||||||
@membership_event_kind 13_534
|
@leave_request_kind 28_936
|
||||||
|
@membership_list_kind 13_534
|
||||||
|
|
||||||
@spec handle_event(map()) :: :ok | {:error, term()}
|
@spec handle_event(map()) :: :ok | {:error, term()}
|
||||||
def handle_event(event) when is_map(event) do
|
def handle_event(event) when is_map(event) do
|
||||||
case Map.get(event, "kind") do
|
case Map.get(event, "kind") do
|
||||||
@membership_request_kind -> upsert_membership(event, "requested")
|
@join_request_kind -> put_member(event, membership_pubkey_from_event(event))
|
||||||
@membership_approval_kind -> upsert_membership(event, "member")
|
@leave_request_kind -> delete_member(event, membership_pubkey_from_event(event))
|
||||||
@membership_event_kind -> upsert_membership(event, "member")
|
@add_user_kind -> put_member(event, tagged_pubkey(event, "p"))
|
||||||
@relay_metadata_kind -> :ok
|
@remove_user_kind -> delete_member(event, tagged_pubkey(event, "p"))
|
||||||
@relay_admins_kind -> :ok
|
@membership_list_kind -> replace_membership_snapshot(event)
|
||||||
@relay_rules_kind -> :ok
|
@invite_request_kind -> :ok
|
||||||
_other -> :ok
|
_other -> :ok
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
@spec group_related_kind?(non_neg_integer()) :: boolean()
|
@spec relay_access_kind?(non_neg_integer()) :: boolean()
|
||||||
def group_related_kind?(kind)
|
def relay_access_kind?(kind)
|
||||||
when kind in [
|
when kind in [
|
||||||
@membership_request_kind,
|
@add_user_kind,
|
||||||
@membership_approval_kind,
|
@remove_user_kind,
|
||||||
@relay_metadata_kind,
|
@join_request_kind,
|
||||||
@relay_admins_kind,
|
@invite_request_kind,
|
||||||
@relay_rules_kind,
|
@leave_request_kind,
|
||||||
@membership_event_kind
|
@membership_list_kind
|
||||||
],
|
],
|
||||||
do: true
|
do: true
|
||||||
|
|
||||||
def group_related_kind?(_kind), do: false
|
def relay_access_kind?(_kind), do: false
|
||||||
|
|
||||||
defp upsert_membership(event, role) do
|
@spec get_membership(binary()) :: {:ok, map() | nil} | {:error, term()}
|
||||||
with {:ok, group_id} <- group_id_from_event(event),
|
def get_membership(pubkey) when is_binary(pubkey) do
|
||||||
{:ok, pubkey} <- pubkey_from_event(event) do
|
Storage.groups().get_membership(%{}, @relay_access_group_id, pubkey)
|
||||||
|
end
|
||||||
|
|
||||||
|
@spec list_memberships() :: {:ok, [map()]} | {:error, term()}
|
||||||
|
def list_memberships do
|
||||||
|
Storage.groups().list_memberships(%{}, @relay_access_group_id)
|
||||||
|
end
|
||||||
|
|
||||||
|
defp put_member(event, {:ok, pubkey}) do
|
||||||
|
with {:ok, metadata} <- membership_metadata(event) do
|
||||||
Storage.groups().put_membership(%{}, %{
|
Storage.groups().put_membership(%{}, %{
|
||||||
group_id: group_id,
|
group_id: @relay_access_group_id,
|
||||||
pubkey: pubkey,
|
pubkey: pubkey,
|
||||||
role: role,
|
role: "member",
|
||||||
metadata: %{"source_kind" => Map.get(event, "kind")}
|
metadata: metadata
|
||||||
})
|
})
|
||||||
|> case do
|
|> case do
|
||||||
{:ok, _membership} -> :ok
|
{:ok, _membership} -> :ok
|
||||||
@@ -55,21 +65,85 @@ defmodule Parrhesia.Groups.Flow do
|
|||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
defp group_id_from_event(event) do
|
defp put_member(_event, {:error, reason}), do: {:error, reason}
|
||||||
group_id =
|
|
||||||
event
|
|
||||||
|> Map.get("tags", [])
|
|
||||||
|> Enum.find_value(fn
|
|
||||||
["h", value | _rest] when is_binary(value) and value != "" -> value
|
|
||||||
_tag -> nil
|
|
||||||
end)
|
|
||||||
|
|
||||||
case group_id do
|
defp delete_member(_event, {:ok, pubkey}) do
|
||||||
nil -> {:error, :missing_group_id}
|
Storage.groups().delete_membership(%{}, @relay_access_group_id, pubkey)
|
||||||
value -> {:ok, value}
|
end
|
||||||
|
|
||||||
|
defp delete_member(_event, {:error, reason}), do: {:error, reason}
|
||||||
|
|
||||||
|
defp replace_membership_snapshot(event) do
|
||||||
|
with {:ok, tagged_members} <- tagged_pubkeys(event, "member"),
|
||||||
|
{:ok, existing_memberships} <- list_memberships() do
|
||||||
|
incoming_pubkeys = MapSet.new(tagged_members)
|
||||||
|
existing_pubkeys = MapSet.new(Enum.map(existing_memberships, & &1.pubkey))
|
||||||
|
|
||||||
|
remove_members =
|
||||||
|
existing_pubkeys
|
||||||
|
|> MapSet.difference(incoming_pubkeys)
|
||||||
|
|> MapSet.to_list()
|
||||||
|
|
||||||
|
add_members =
|
||||||
|
incoming_pubkeys
|
||||||
|
|> MapSet.to_list()
|
||||||
|
|
||||||
|
:ok = remove_memberships(remove_members)
|
||||||
|
add_memberships(event, add_members)
|
||||||
|
else
|
||||||
|
{:error, reason} -> {:error, reason}
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
defp pubkey_from_event(%{"pubkey" => pubkey}) when is_binary(pubkey), do: {:ok, pubkey}
|
defp membership_pubkey_from_event(%{"pubkey" => pubkey}) when is_binary(pubkey),
|
||||||
defp pubkey_from_event(_event), do: {:error, :missing_pubkey}
|
do: {:ok, pubkey}
|
||||||
|
|
||||||
|
defp membership_pubkey_from_event(_event), do: {:error, :missing_pubkey}
|
||||||
|
|
||||||
|
defp tagged_pubkey(event, tag_name) do
|
||||||
|
event
|
||||||
|
|> tagged_pubkeys(tag_name)
|
||||||
|
|> case do
|
||||||
|
{:ok, [pubkey]} -> {:ok, pubkey}
|
||||||
|
{:ok, []} -> {:error, :missing_pubkey}
|
||||||
|
{:ok, _pubkeys} -> {:error, :invalid_pubkey}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp tagged_pubkeys(event, tag_name) do
|
||||||
|
pubkeys =
|
||||||
|
event
|
||||||
|
|> Map.get("tags", [])
|
||||||
|
|> Enum.flat_map(fn
|
||||||
|
[^tag_name, pubkey | _rest] when is_binary(pubkey) and pubkey != "" -> [pubkey]
|
||||||
|
_tag -> []
|
||||||
|
end)
|
||||||
|
|
||||||
|
{:ok, Enum.uniq(pubkeys)}
|
||||||
|
end
|
||||||
|
|
||||||
|
defp membership_metadata(event) do
|
||||||
|
{:ok,
|
||||||
|
%{
|
||||||
|
"source_kind" => Map.get(event, "kind"),
|
||||||
|
"source_event_id" => Map.get(event, "id")
|
||||||
|
}}
|
||||||
|
end
|
||||||
|
|
||||||
|
defp remove_memberships(pubkeys) when is_list(pubkeys) do
|
||||||
|
Enum.each(pubkeys, fn pubkey ->
|
||||||
|
:ok = Storage.groups().delete_membership(%{}, @relay_access_group_id, pubkey)
|
||||||
|
end)
|
||||||
|
|
||||||
|
:ok
|
||||||
|
end
|
||||||
|
|
||||||
|
defp add_memberships(event, pubkeys) when is_list(pubkeys) do
|
||||||
|
Enum.reduce_while(pubkeys, :ok, fn pubkey, :ok ->
|
||||||
|
case put_member(event, {:ok, pubkey}) do
|
||||||
|
:ok -> {:cont, :ok}
|
||||||
|
{:error, _reason} = error -> {:halt, error}
|
||||||
|
end
|
||||||
|
end)
|
||||||
|
end
|
||||||
end
|
end
|
||||||
|
|||||||
48
lib/parrhesia/http.ex
Normal file
48
lib/parrhesia/http.ex
Normal file
@@ -0,0 +1,48 @@
|
|||||||
|
defmodule Parrhesia.HTTP do
|
||||||
|
@moduledoc false
|
||||||
|
|
||||||
|
alias Parrhesia.Metadata
|
||||||
|
|
||||||
|
@default_headers [{"user-agent", Metadata.user_agent()}]
|
||||||
|
|
||||||
|
@spec default_headers() :: [{String.t(), String.t()}]
|
||||||
|
def default_headers, do: @default_headers
|
||||||
|
|
||||||
|
@spec get(Keyword.t()) :: {:ok, Req.Response.t()} | {:error, Exception.t()}
|
||||||
|
def get(options) when is_list(options) do
|
||||||
|
Req.get(put_default_headers(options))
|
||||||
|
end
|
||||||
|
|
||||||
|
@spec post(Keyword.t()) :: {:ok, Req.Response.t()} | {:error, Exception.t()}
|
||||||
|
def post(options) when is_list(options) do
|
||||||
|
Req.post(put_default_headers(options))
|
||||||
|
end
|
||||||
|
|
||||||
|
@spec put_default_headers(Keyword.t()) :: Keyword.t()
|
||||||
|
def put_default_headers(options) when is_list(options) do
|
||||||
|
Keyword.update(options, :headers, @default_headers, &merge_headers(&1, @default_headers))
|
||||||
|
end
|
||||||
|
|
||||||
|
defp merge_headers(headers, defaults) do
|
||||||
|
existing_names =
|
||||||
|
headers
|
||||||
|
|> List.wrap()
|
||||||
|
|> Enum.reduce(MapSet.new(), fn
|
||||||
|
{name, _value}, acc -> MapSet.put(acc, normalize_header_name(name))
|
||||||
|
_other, acc -> acc
|
||||||
|
end)
|
||||||
|
|
||||||
|
headers ++
|
||||||
|
Enum.reject(defaults, fn {name, _value} ->
|
||||||
|
MapSet.member?(existing_names, normalize_header_name(name))
|
||||||
|
end)
|
||||||
|
end
|
||||||
|
|
||||||
|
defp normalize_header_name(name) when is_atom(name) do
|
||||||
|
name
|
||||||
|
|> Atom.to_string()
|
||||||
|
|> String.downcase()
|
||||||
|
end
|
||||||
|
|
||||||
|
defp normalize_header_name(name) when is_binary(name), do: String.downcase(name)
|
||||||
|
end
|
||||||
29
lib/parrhesia/metadata.ex
Normal file
29
lib/parrhesia/metadata.ex
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
defmodule Parrhesia.Metadata do
|
||||||
|
@moduledoc false
|
||||||
|
|
||||||
|
@metadata Application.compile_env(:parrhesia, :metadata, [])
|
||||||
|
@name Keyword.get(@metadata, :name, "Parrhesia")
|
||||||
|
@version Keyword.get(@metadata, :version, "0.0.0")
|
||||||
|
@hide_version? Keyword.get(@metadata, :hide_version?, true)
|
||||||
|
|
||||||
|
@spec name() :: String.t()
|
||||||
|
def name, do: @name
|
||||||
|
|
||||||
|
@spec version() :: String.t()
|
||||||
|
def version, do: @version
|
||||||
|
|
||||||
|
@spec hide_version?() :: boolean()
|
||||||
|
def hide_version?, do: @hide_version?
|
||||||
|
|
||||||
|
@spec name_and_version() :: String.t()
|
||||||
|
def name_and_version, do: "#{@name}/#{@version}"
|
||||||
|
|
||||||
|
@spec user_agent() :: String.t()
|
||||||
|
def user_agent do
|
||||||
|
if hide_version?() do
|
||||||
|
name()
|
||||||
|
else
|
||||||
|
name_and_version()
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
136
lib/parrhesia/negentropy/engine.ex
Normal file
136
lib/parrhesia/negentropy/engine.ex
Normal file
@@ -0,0 +1,136 @@
|
|||||||
|
defmodule Parrhesia.Negentropy.Engine do
|
||||||
|
@moduledoc """
|
||||||
|
Relay/client-agnostic negentropy reconciliation engine.
|
||||||
|
"""
|
||||||
|
|
||||||
|
alias Parrhesia.Negentropy.Message
|
||||||
|
|
||||||
|
@default_id_list_threshold 32
|
||||||
|
|
||||||
|
@type item :: Message.item()
|
||||||
|
|
||||||
|
@spec initial_message([item()], keyword()) :: binary()
|
||||||
|
def initial_message(items, opts \\ []) when is_list(opts) do
|
||||||
|
normalized_items = normalize_items(items)
|
||||||
|
|
||||||
|
Message.encode([
|
||||||
|
describe_range(normalized_items, :infinity, id_list_threshold(opts))
|
||||||
|
])
|
||||||
|
end
|
||||||
|
|
||||||
|
@spec answer([item()], binary(), keyword()) :: {:ok, binary()} | {:error, term()}
|
||||||
|
def answer(items, incoming_message, opts \\ [])
|
||||||
|
when is_binary(incoming_message) and is_list(opts) do
|
||||||
|
normalized_items = normalize_items(items)
|
||||||
|
threshold = id_list_threshold(opts)
|
||||||
|
|
||||||
|
case Message.decode(incoming_message) do
|
||||||
|
{:ok, ranges} ->
|
||||||
|
response_ranges =
|
||||||
|
respond_to_ranges(normalized_items, ranges, Message.initial_lower_bound(), threshold)
|
||||||
|
|
||||||
|
{:ok, Message.encode(response_ranges)}
|
||||||
|
|
||||||
|
{:unsupported_version, _supported_version} ->
|
||||||
|
{:ok, Message.supported_version_message()}
|
||||||
|
|
||||||
|
{:error, reason} ->
|
||||||
|
{:error, reason}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp respond_to_ranges(_items, [], _lower_bound, _threshold), do: []
|
||||||
|
|
||||||
|
defp respond_to_ranges(items, [range | rest], lower_bound, threshold) do
|
||||||
|
upper_bound = Map.fetch!(range, :upper_bound)
|
||||||
|
|
||||||
|
items_in_range =
|
||||||
|
Enum.filter(items, fn item ->
|
||||||
|
Message.item_in_range?(item, lower_bound, upper_bound)
|
||||||
|
end)
|
||||||
|
|
||||||
|
response =
|
||||||
|
case range.mode do
|
||||||
|
:skip ->
|
||||||
|
[%{upper_bound: upper_bound, mode: :skip, payload: nil}]
|
||||||
|
|
||||||
|
:fingerprint ->
|
||||||
|
respond_to_fingerprint_range(items_in_range, upper_bound, range.payload, threshold)
|
||||||
|
|
||||||
|
:id_list ->
|
||||||
|
respond_to_id_list_range(items_in_range, upper_bound, range.payload, threshold)
|
||||||
|
end
|
||||||
|
|
||||||
|
response ++ respond_to_ranges(items, rest, upper_bound, threshold)
|
||||||
|
end
|
||||||
|
|
||||||
|
defp respond_to_fingerprint_range(items, upper_bound, remote_fingerprint, threshold) do
|
||||||
|
if Message.fingerprint(items) == remote_fingerprint do
|
||||||
|
[%{upper_bound: upper_bound, mode: :skip, payload: nil}]
|
||||||
|
else
|
||||||
|
mismatch_response(items, upper_bound, threshold)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp respond_to_id_list_range(items, upper_bound, remote_ids, threshold) do
|
||||||
|
if Enum.map(items, & &1.id) == remote_ids do
|
||||||
|
[%{upper_bound: upper_bound, mode: :skip, payload: nil}]
|
||||||
|
else
|
||||||
|
mismatch_response(items, upper_bound, threshold)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp mismatch_response(items, upper_bound, threshold) do
|
||||||
|
if length(items) <= threshold do
|
||||||
|
[%{upper_bound: upper_bound, mode: :id_list, payload: Enum.map(items, & &1.id)}]
|
||||||
|
else
|
||||||
|
split_response(items, upper_bound, threshold)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp split_response(items, upper_bound, threshold) do
|
||||||
|
midpoint = div(length(items), 2)
|
||||||
|
left_items = Enum.take(items, midpoint)
|
||||||
|
right_items = Enum.drop(items, midpoint)
|
||||||
|
|
||||||
|
boundary =
|
||||||
|
left_items
|
||||||
|
|> List.last()
|
||||||
|
|> then(&Message.split_bound(&1, hd(right_items)))
|
||||||
|
|
||||||
|
[
|
||||||
|
describe_range(left_items, boundary, threshold),
|
||||||
|
describe_range(right_items, upper_bound, threshold)
|
||||||
|
]
|
||||||
|
end
|
||||||
|
|
||||||
|
defp describe_range(items, upper_bound, threshold) do
|
||||||
|
if length(items) <= threshold do
|
||||||
|
%{upper_bound: upper_bound, mode: :id_list, payload: Enum.map(items, & &1.id)}
|
||||||
|
else
|
||||||
|
%{upper_bound: upper_bound, mode: :fingerprint, payload: Message.fingerprint(items)}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp normalize_items(items) do
|
||||||
|
items
|
||||||
|
|> Enum.map(&normalize_item/1)
|
||||||
|
|> Enum.sort(&(Message.compare_items(&1, &2) != :gt))
|
||||||
|
end
|
||||||
|
|
||||||
|
defp normalize_item(%{created_at: created_at, id: id})
|
||||||
|
when is_integer(created_at) and created_at >= 0 and is_binary(id) and byte_size(id) == 32 do
|
||||||
|
%{created_at: created_at, id: id}
|
||||||
|
end
|
||||||
|
|
||||||
|
defp normalize_item(item) do
|
||||||
|
raise ArgumentError, "invalid negentropy item: #{inspect(item)}"
|
||||||
|
end
|
||||||
|
|
||||||
|
defp id_list_threshold(opts) do
|
||||||
|
case Keyword.get(opts, :id_list_threshold, @default_id_list_threshold) do
|
||||||
|
threshold when is_integer(threshold) and threshold > 0 -> threshold
|
||||||
|
_other -> @default_id_list_threshold
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
349
lib/parrhesia/negentropy/message.ex
Normal file
349
lib/parrhesia/negentropy/message.ex
Normal file
@@ -0,0 +1,349 @@
|
|||||||
|
defmodule Parrhesia.Negentropy.Message do
|
||||||
|
@moduledoc """
|
||||||
|
NIP-77 negentropy message codec and helpers.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import Bitwise
|
||||||
|
|
||||||
|
@protocol_version 0x61
|
||||||
|
@id_size 32
|
||||||
|
@fingerprint_size 16
|
||||||
|
@u256_mod 1 <<< 256
|
||||||
|
@zero_id <<0::size(256)>>
|
||||||
|
|
||||||
|
@type item :: %{created_at: non_neg_integer(), id: binary()}
|
||||||
|
@type bound :: :infinity | {non_neg_integer(), binary()}
|
||||||
|
@type range ::
|
||||||
|
%{
|
||||||
|
upper_bound: bound(),
|
||||||
|
mode: :skip | :fingerprint | :id_list,
|
||||||
|
payload: nil | binary() | [binary()]
|
||||||
|
}
|
||||||
|
|
||||||
|
@spec protocol_version() :: byte()
|
||||||
|
def protocol_version, do: @protocol_version
|
||||||
|
|
||||||
|
@spec supported_version_message() :: binary()
|
||||||
|
def supported_version_message, do: <<@protocol_version>>
|
||||||
|
|
||||||
|
@spec decode(binary()) :: {:ok, [range()]} | {:unsupported_version, byte()} | {:error, term()}
|
||||||
|
def decode(<<version, _rest::binary>>) when version != @protocol_version,
|
||||||
|
do: {:unsupported_version, @protocol_version}
|
||||||
|
|
||||||
|
def decode(<<@protocol_version, rest::binary>>) do
|
||||||
|
decode_ranges(rest, 0, initial_lower_bound(), [])
|
||||||
|
end
|
||||||
|
|
||||||
|
def decode(_message), do: {:error, :invalid_message}
|
||||||
|
|
||||||
|
@spec encode([range()]) :: binary()
|
||||||
|
def encode(ranges) when is_list(ranges) do
|
||||||
|
ranges
|
||||||
|
|> drop_trailing_skip_ranges()
|
||||||
|
|> Enum.reduce({[@protocol_version], 0}, fn range, {acc, previous_timestamp} ->
|
||||||
|
{encoded_range, next_timestamp} = encode_range(range, previous_timestamp)
|
||||||
|
{[acc, encoded_range], next_timestamp}
|
||||||
|
end)
|
||||||
|
|> elem(0)
|
||||||
|
|> IO.iodata_to_binary()
|
||||||
|
end
|
||||||
|
|
||||||
|
@spec fingerprint([item()]) :: binary()
|
||||||
|
def fingerprint(items) when is_list(items) do
|
||||||
|
sum =
|
||||||
|
Enum.reduce(items, 0, fn %{id: id}, acc ->
|
||||||
|
<<id_integer::unsigned-little-size(256)>> = id
|
||||||
|
rem(acc + id_integer, @u256_mod)
|
||||||
|
end)
|
||||||
|
|
||||||
|
payload = [<<sum::unsigned-little-size(256)>>, encode_varint(length(items))]
|
||||||
|
|
||||||
|
payload
|
||||||
|
|> IO.iodata_to_binary()
|
||||||
|
|> then(&:crypto.hash(:sha256, &1))
|
||||||
|
|> binary_part(0, @fingerprint_size)
|
||||||
|
end
|
||||||
|
|
||||||
|
@spec compare_items(item(), item()) :: :lt | :eq | :gt
|
||||||
|
def compare_items(left, right) do
|
||||||
|
cond do
|
||||||
|
left.created_at < right.created_at -> :lt
|
||||||
|
left.created_at > right.created_at -> :gt
|
||||||
|
left.id < right.id -> :lt
|
||||||
|
left.id > right.id -> :gt
|
||||||
|
true -> :eq
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
@spec compare_bound(bound(), bound()) :: :lt | :eq | :gt
|
||||||
|
def compare_bound(:infinity, :infinity), do: :eq
|
||||||
|
def compare_bound(:infinity, _other), do: :gt
|
||||||
|
def compare_bound(_other, :infinity), do: :lt
|
||||||
|
|
||||||
|
def compare_bound({left_timestamp, left_id}, {right_timestamp, right_id}) do
|
||||||
|
cond do
|
||||||
|
left_timestamp < right_timestamp -> :lt
|
||||||
|
left_timestamp > right_timestamp -> :gt
|
||||||
|
left_id < right_id -> :lt
|
||||||
|
left_id > right_id -> :gt
|
||||||
|
true -> :eq
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
@spec item_in_range?(item(), bound(), bound()) :: boolean()
|
||||||
|
def item_in_range?(item, lower_bound, upper_bound) do
|
||||||
|
compare_item_to_bound(item, lower_bound) != :lt and
|
||||||
|
compare_item_to_bound(item, upper_bound) == :lt
|
||||||
|
end
|
||||||
|
|
||||||
|
@spec initial_lower_bound() :: bound()
|
||||||
|
def initial_lower_bound, do: {0, @zero_id}
|
||||||
|
|
||||||
|
@spec zero_id() :: binary()
|
||||||
|
def zero_id, do: @zero_id
|
||||||
|
|
||||||
|
@spec split_bound(item(), item()) :: bound()
|
||||||
|
def split_bound(previous_item, next_item)
|
||||||
|
when is_map(previous_item) and is_map(next_item) do
|
||||||
|
cond do
|
||||||
|
previous_item.created_at < next_item.created_at ->
|
||||||
|
{next_item.created_at, @zero_id}
|
||||||
|
|
||||||
|
previous_item.created_at == next_item.created_at ->
|
||||||
|
prefix_length = shared_prefix_length(previous_item.id, next_item.id) + 1
|
||||||
|
<<prefix::binary-size(prefix_length), _rest::binary>> = next_item.id
|
||||||
|
{next_item.created_at, prefix <> :binary.copy(<<0>>, @id_size - prefix_length)}
|
||||||
|
|
||||||
|
true ->
|
||||||
|
raise ArgumentError, "split_bound/2 requires previous_item <= next_item"
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp decode_ranges(<<>>, _previous_timestamp, _lower_bound, ranges),
|
||||||
|
do: {:ok, Enum.reverse(ranges)}
|
||||||
|
|
||||||
|
defp decode_ranges(binary, previous_timestamp, lower_bound, ranges) do
|
||||||
|
with {:ok, upper_bound, rest, next_timestamp} <- decode_bound(binary, previous_timestamp),
|
||||||
|
:ok <- validate_upper_bound(lower_bound, upper_bound),
|
||||||
|
{:ok, mode, payload, tail} <- decode_payload(rest) do
|
||||||
|
next_ranges = [%{upper_bound: upper_bound, mode: mode, payload: payload} | ranges]
|
||||||
|
|
||||||
|
if upper_bound == :infinity and tail != <<>> do
|
||||||
|
{:error, :invalid_message}
|
||||||
|
else
|
||||||
|
decode_ranges(tail, next_timestamp, upper_bound, next_ranges)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp validate_upper_bound(lower_bound, upper_bound) do
|
||||||
|
if compare_bound(lower_bound, upper_bound) == :lt do
|
||||||
|
:ok
|
||||||
|
else
|
||||||
|
{:error, :invalid_message}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp decode_bound(binary, previous_timestamp) do
|
||||||
|
with {:ok, encoded_timestamp, rest} <- decode_varint(binary),
|
||||||
|
{:ok, length, tail} <- decode_varint(rest),
|
||||||
|
:ok <- validate_bound_prefix_length(length),
|
||||||
|
{:ok, prefix, remainder} <- decode_prefix(tail, length) do
|
||||||
|
decode_bound_value(encoded_timestamp, length, prefix, remainder, previous_timestamp)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp decode_payload(binary) do
|
||||||
|
with {:ok, mode_value, rest} <- decode_varint(binary) do
|
||||||
|
case mode_value do
|
||||||
|
0 ->
|
||||||
|
{:ok, :skip, nil, rest}
|
||||||
|
|
||||||
|
1 ->
|
||||||
|
decode_fingerprint_payload(rest)
|
||||||
|
|
||||||
|
2 ->
|
||||||
|
decode_id_list_payload(rest)
|
||||||
|
|
||||||
|
_other ->
|
||||||
|
{:error, :invalid_message}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp decode_varint(binary), do: decode_varint(binary, 0)
|
||||||
|
|
||||||
|
defp decode_varint(<<>>, _acc), do: {:error, :invalid_message}
|
||||||
|
|
||||||
|
defp decode_varint(<<byte, rest::binary>>, acc) do
|
||||||
|
value = acc * 128 + band(byte, 0x7F)
|
||||||
|
|
||||||
|
if band(byte, 0x80) == 0 do
|
||||||
|
{:ok, value, rest}
|
||||||
|
else
|
||||||
|
decode_varint(rest, value)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp encode_range(range, previous_timestamp) do
|
||||||
|
{encoded_bound, next_timestamp} = encode_bound(range.upper_bound, previous_timestamp)
|
||||||
|
{mode, payload} = encode_payload(range)
|
||||||
|
{[encoded_bound, mode, payload], next_timestamp}
|
||||||
|
end
|
||||||
|
|
||||||
|
defp encode_bound(:infinity, previous_timestamp),
|
||||||
|
do: {[encode_varint(0), encode_varint(0)], previous_timestamp}
|
||||||
|
|
||||||
|
defp encode_bound({timestamp, id}, previous_timestamp) do
|
||||||
|
prefix_length = id_prefix_length(id)
|
||||||
|
<<prefix::binary-size(prefix_length), _rest::binary>> = id
|
||||||
|
|
||||||
|
{
|
||||||
|
[encode_varint(timestamp - previous_timestamp + 1), encode_varint(prefix_length), prefix],
|
||||||
|
timestamp
|
||||||
|
}
|
||||||
|
end
|
||||||
|
|
||||||
|
defp encode_payload(%{mode: :skip}) do
|
||||||
|
{encode_varint(0), <<>>}
|
||||||
|
end
|
||||||
|
|
||||||
|
defp encode_payload(%{mode: :fingerprint, payload: fingerprint})
|
||||||
|
when is_binary(fingerprint) and byte_size(fingerprint) == @fingerprint_size do
|
||||||
|
{encode_varint(1), fingerprint}
|
||||||
|
end
|
||||||
|
|
||||||
|
defp encode_payload(%{mode: :id_list, payload: ids}) when is_list(ids) do
|
||||||
|
encoded_ids = Enum.map(ids, fn id -> validate_id!(id) end)
|
||||||
|
{encode_varint(2), [encode_varint(length(encoded_ids)), encoded_ids]}
|
||||||
|
end
|
||||||
|
|
||||||
|
defp encode_varint(value) when is_integer(value) and value >= 0 do
|
||||||
|
digits = collect_base128_digits(value, [])
|
||||||
|
last_index = length(digits) - 1
|
||||||
|
|
||||||
|
digits
|
||||||
|
|> Enum.with_index()
|
||||||
|
|> Enum.map(fn {digit, index} ->
|
||||||
|
if index == last_index do
|
||||||
|
digit
|
||||||
|
else
|
||||||
|
digit + 128
|
||||||
|
end
|
||||||
|
end)
|
||||||
|
|> :erlang.list_to_binary()
|
||||||
|
end
|
||||||
|
|
||||||
|
defp collect_base128_digits(value, acc) do
|
||||||
|
quotient = div(value, 128)
|
||||||
|
remainder = rem(value, 128)
|
||||||
|
|
||||||
|
if quotient == 0 do
|
||||||
|
[remainder | acc]
|
||||||
|
else
|
||||||
|
collect_base128_digits(quotient, [remainder | acc])
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp unpack_ids(binary), do: unpack_ids(binary, [])
|
||||||
|
|
||||||
|
defp unpack_ids(<<>>, acc), do: Enum.reverse(acc)
|
||||||
|
|
||||||
|
defp unpack_ids(<<id::binary-size(@id_size), rest::binary>>, acc),
|
||||||
|
do: unpack_ids(rest, [id | acc])
|
||||||
|
|
||||||
|
defp decode_prefix(binary, length) when byte_size(binary) >= length do
|
||||||
|
<<prefix::binary-size(length), rest::binary>> = binary
|
||||||
|
{:ok, prefix, rest}
|
||||||
|
end
|
||||||
|
|
||||||
|
defp decode_prefix(_binary, _length), do: {:error, :invalid_message}
|
||||||
|
|
||||||
|
defp decode_bound_value(0, 0, _prefix, remainder, previous_timestamp),
|
||||||
|
do: {:ok, :infinity, remainder, previous_timestamp}
|
||||||
|
|
||||||
|
defp decode_bound_value(0, _length, _prefix, _remainder, _previous_timestamp),
|
||||||
|
do: {:error, :invalid_message}
|
||||||
|
|
||||||
|
defp decode_bound_value(encoded_timestamp, length, prefix, remainder, previous_timestamp) do
|
||||||
|
timestamp = previous_timestamp + encoded_timestamp - 1
|
||||||
|
id = prefix <> :binary.copy(<<0>>, @id_size - length)
|
||||||
|
{:ok, {timestamp, id}, remainder, timestamp}
|
||||||
|
end
|
||||||
|
|
||||||
|
defp decode_fingerprint_payload(<<fingerprint::binary-size(@fingerprint_size), tail::binary>>),
|
||||||
|
do: {:ok, :fingerprint, fingerprint, tail}
|
||||||
|
|
||||||
|
defp decode_fingerprint_payload(_payload), do: {:error, :invalid_message}
|
||||||
|
|
||||||
|
defp decode_id_list_payload(rest) do
|
||||||
|
with {:ok, count, tail} <- decode_varint(rest),
|
||||||
|
{:ok, ids, remainder} <- decode_id_list_bytes(tail, count) do
|
||||||
|
{:ok, :id_list, ids, remainder}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp decode_id_list_bytes(tail, count) do
|
||||||
|
expected_bytes = count * @id_size
|
||||||
|
|
||||||
|
if byte_size(tail) >= expected_bytes do
|
||||||
|
<<ids::binary-size(expected_bytes), remainder::binary>> = tail
|
||||||
|
{:ok, unpack_ids(ids), remainder}
|
||||||
|
else
|
||||||
|
{:error, :invalid_message}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp validate_bound_prefix_length(length)
|
||||||
|
when is_integer(length) and length >= 0 and length <= @id_size,
|
||||||
|
do: :ok
|
||||||
|
|
||||||
|
defp validate_bound_prefix_length(_length), do: {:error, :invalid_message}
|
||||||
|
|
||||||
|
defp id_prefix_length(id) do
|
||||||
|
id
|
||||||
|
|> validate_id!()
|
||||||
|
|> :binary.bin_to_list()
|
||||||
|
|> Enum.reverse()
|
||||||
|
|> Enum.drop_while(&(&1 == 0))
|
||||||
|
|> length()
|
||||||
|
end
|
||||||
|
|
||||||
|
defp shared_prefix_length(left_id, right_id) do
|
||||||
|
left_id = validate_id!(left_id)
|
||||||
|
right_id = validate_id!(right_id)
|
||||||
|
|
||||||
|
left_id
|
||||||
|
|> :binary.bin_to_list()
|
||||||
|
|> Enum.zip(:binary.bin_to_list(right_id))
|
||||||
|
|> Enum.reduce_while(0, fn
|
||||||
|
{left_byte, right_byte}, acc when left_byte == right_byte -> {:cont, acc + 1}
|
||||||
|
_pair, acc -> {:halt, acc}
|
||||||
|
end)
|
||||||
|
end
|
||||||
|
|
||||||
|
defp drop_trailing_skip_ranges(ranges) do
|
||||||
|
ranges
|
||||||
|
|> Enum.reverse()
|
||||||
|
|> Enum.drop_while(fn range -> range.mode == :skip end)
|
||||||
|
|> Enum.reverse()
|
||||||
|
end
|
||||||
|
|
||||||
|
defp compare_item_to_bound(_item, :infinity), do: :lt
|
||||||
|
|
||||||
|
defp compare_item_to_bound(item, {timestamp, id}) do
|
||||||
|
cond do
|
||||||
|
item.created_at < timestamp -> :lt
|
||||||
|
item.created_at > timestamp -> :gt
|
||||||
|
item.id < id -> :lt
|
||||||
|
item.id > id -> :gt
|
||||||
|
true -> :eq
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp validate_id!(id) when is_binary(id) and byte_size(id) == @id_size, do: id
|
||||||
|
|
||||||
|
defp validate_id!(_id) do
|
||||||
|
raise ArgumentError, "negentropy ids must be 32-byte binaries"
|
||||||
|
end
|
||||||
|
end
|
||||||
@@ -1,10 +1,13 @@
|
|||||||
defmodule Parrhesia.Negentropy.Sessions do
|
defmodule Parrhesia.Negentropy.Sessions do
|
||||||
@moduledoc """
|
@moduledoc """
|
||||||
In-memory NEG-* session tracking.
|
In-memory NIP-77 session tracking over bounded local event snapshots.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
use GenServer
|
use GenServer
|
||||||
|
|
||||||
|
alias Parrhesia.Negentropy.Engine
|
||||||
|
alias Parrhesia.Storage
|
||||||
|
|
||||||
@type session_key :: {pid(), String.t()}
|
@type session_key :: {pid(), String.t()}
|
||||||
|
|
||||||
@default_max_payload_bytes 4096
|
@default_max_payload_bytes 4096
|
||||||
@@ -12,6 +15,8 @@ defmodule Parrhesia.Negentropy.Sessions do
|
|||||||
@default_max_total_sessions 10_000
|
@default_max_total_sessions 10_000
|
||||||
@default_max_idle_seconds 60
|
@default_max_idle_seconds 60
|
||||||
@default_sweep_interval_seconds 10
|
@default_sweep_interval_seconds 10
|
||||||
|
@default_max_items_per_session 50_000
|
||||||
|
@default_id_list_threshold 32
|
||||||
@sweep_idle_sessions :sweep_idle_sessions
|
@sweep_idle_sessions :sweep_idle_sessions
|
||||||
|
|
||||||
@spec start_link(keyword()) :: GenServer.on_start()
|
@spec start_link(keyword()) :: GenServer.on_start()
|
||||||
@@ -20,16 +25,19 @@ defmodule Parrhesia.Negentropy.Sessions do
|
|||||||
GenServer.start_link(__MODULE__, opts, name: name)
|
GenServer.start_link(__MODULE__, opts, name: name)
|
||||||
end
|
end
|
||||||
|
|
||||||
@spec open(GenServer.server(), pid(), String.t(), map()) :: {:ok, map()} | {:error, term()}
|
@spec open(GenServer.server(), pid(), String.t(), map(), binary(), keyword()) ::
|
||||||
def open(server \\ __MODULE__, owner_pid, subscription_id, params)
|
{:ok, binary()} | {:error, term()}
|
||||||
when is_pid(owner_pid) and is_binary(subscription_id) and is_map(params) do
|
def open(server \\ __MODULE__, owner_pid, subscription_id, filter, message, opts \\ [])
|
||||||
GenServer.call(server, {:open, owner_pid, subscription_id, params})
|
when is_pid(owner_pid) and is_binary(subscription_id) and is_map(filter) and
|
||||||
|
is_binary(message) and is_list(opts) do
|
||||||
|
GenServer.call(server, {:open, owner_pid, subscription_id, filter, message, opts})
|
||||||
end
|
end
|
||||||
|
|
||||||
@spec message(GenServer.server(), pid(), String.t(), map()) :: {:ok, map()} | {:error, term()}
|
@spec message(GenServer.server(), pid(), String.t(), binary()) ::
|
||||||
def message(server \\ __MODULE__, owner_pid, subscription_id, payload)
|
{:ok, binary()} | {:error, term()}
|
||||||
when is_pid(owner_pid) and is_binary(subscription_id) and is_map(payload) do
|
def message(server \\ __MODULE__, owner_pid, subscription_id, message)
|
||||||
GenServer.call(server, {:message, owner_pid, subscription_id, payload})
|
when is_pid(owner_pid) and is_binary(subscription_id) and is_binary(message) do
|
||||||
|
GenServer.call(server, {:message, owner_pid, subscription_id, message})
|
||||||
end
|
end
|
||||||
|
|
||||||
@spec close(GenServer.server(), pid(), String.t()) :: :ok
|
@spec close(GenServer.server(), pid(), String.t()) :: :ok
|
||||||
@@ -63,7 +71,17 @@ defmodule Parrhesia.Negentropy.Sessions do
|
|||||||
max_total_sessions:
|
max_total_sessions:
|
||||||
normalize_positive_integer(Keyword.get(opts, :max_total_sessions), max_total_sessions()),
|
normalize_positive_integer(Keyword.get(opts, :max_total_sessions), max_total_sessions()),
|
||||||
max_idle_ms: max_idle_ms,
|
max_idle_ms: max_idle_ms,
|
||||||
sweep_interval_ms: sweep_interval_ms
|
sweep_interval_ms: sweep_interval_ms,
|
||||||
|
max_items_per_session:
|
||||||
|
normalize_positive_integer(
|
||||||
|
Keyword.get(opts, :max_items_per_session),
|
||||||
|
max_items_per_session()
|
||||||
|
),
|
||||||
|
id_list_threshold:
|
||||||
|
normalize_positive_integer(
|
||||||
|
Keyword.get(opts, :id_list_threshold),
|
||||||
|
id_list_threshold()
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
:ok = schedule_idle_sweep(sweep_interval_ms)
|
:ok = schedule_idle_sweep(sweep_interval_ms)
|
||||||
@@ -72,16 +90,19 @@ defmodule Parrhesia.Negentropy.Sessions do
|
|||||||
end
|
end
|
||||||
|
|
||||||
@impl true
|
@impl true
|
||||||
def handle_call({:open, owner_pid, subscription_id, params}, _from, state) do
|
def handle_call({:open, owner_pid, subscription_id, filter, message, opts}, _from, state) do
|
||||||
key = {owner_pid, subscription_id}
|
key = {owner_pid, subscription_id}
|
||||||
|
|
||||||
with :ok <- validate_payload_size(params, state.max_payload_bytes),
|
with :ok <- validate_payload_size(filter, message, state.max_payload_bytes),
|
||||||
:ok <- enforce_session_limits(state, owner_pid, key) do
|
:ok <- enforce_session_limits(state, owner_pid, key),
|
||||||
|
{:ok, refs} <- fetch_event_refs(filter, opts, state.max_items_per_session),
|
||||||
|
{:ok, response} <-
|
||||||
|
Engine.answer(refs, message, id_list_threshold: state.id_list_threshold) do
|
||||||
now_ms = System.monotonic_time(:millisecond)
|
now_ms = System.monotonic_time(:millisecond)
|
||||||
|
|
||||||
session = %{
|
session = %{
|
||||||
cursor: 0,
|
filter: filter,
|
||||||
params: params,
|
refs: refs,
|
||||||
opened_at: System.system_time(:second),
|
opened_at: System.system_time(:second),
|
||||||
last_active_at_ms: now_ms
|
last_active_at_ms: now_ms
|
||||||
}
|
}
|
||||||
@@ -91,14 +112,14 @@ defmodule Parrhesia.Negentropy.Sessions do
|
|||||||
|> ensure_monitor(owner_pid)
|
|> ensure_monitor(owner_pid)
|
||||||
|> put_in([:sessions, key], session)
|
|> put_in([:sessions, key], session)
|
||||||
|
|
||||||
{:reply, {:ok, %{"status" => "open", "cursor" => 0}}, state}
|
{:reply, {:ok, response}, state}
|
||||||
else
|
else
|
||||||
{:error, reason} ->
|
{:error, reason} ->
|
||||||
{:reply, {:error, reason}, state}
|
{:reply, {:error, reason}, state}
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
def handle_call({:message, owner_pid, subscription_id, payload}, _from, state) do
|
def handle_call({:message, owner_pid, subscription_id, message}, _from, state) do
|
||||||
key = {owner_pid, subscription_id}
|
key = {owner_pid, subscription_id}
|
||||||
|
|
||||||
case Map.get(state.sessions, key) do
|
case Map.get(state.sessions, key) do
|
||||||
@@ -106,20 +127,18 @@ defmodule Parrhesia.Negentropy.Sessions do
|
|||||||
{:reply, {:error, :unknown_session}, state}
|
{:reply, {:error, :unknown_session}, state}
|
||||||
|
|
||||||
session ->
|
session ->
|
||||||
case validate_payload_size(payload, state.max_payload_bytes) do
|
with :ok <- validate_payload_size(session.filter, message, state.max_payload_bytes),
|
||||||
:ok ->
|
{:ok, response} <-
|
||||||
cursor = session.cursor + 1
|
Engine.answer(session.refs, message, id_list_threshold: state.id_list_threshold) do
|
||||||
|
next_session = %{
|
||||||
|
session
|
||||||
|
| last_active_at_ms: System.monotonic_time(:millisecond)
|
||||||
|
}
|
||||||
|
|
||||||
next_session = %{
|
state = put_in(state, [:sessions, key], next_session)
|
||||||
session
|
|
||||||
| cursor: cursor,
|
|
||||||
last_active_at_ms: System.monotonic_time(:millisecond)
|
|
||||||
}
|
|
||||||
|
|
||||||
state = put_in(state, [:sessions, key], next_session)
|
|
||||||
|
|
||||||
{:reply, {:ok, %{"status" => "ack", "cursor" => cursor}}, state}
|
|
||||||
|
|
||||||
|
{:reply, {:ok, response}, state}
|
||||||
|
else
|
||||||
{:error, reason} ->
|
{:error, reason} ->
|
||||||
{:reply, {:error, reason}, state}
|
{:reply, {:error, reason}, state}
|
||||||
end
|
end
|
||||||
@@ -185,6 +204,21 @@ defmodule Parrhesia.Negentropy.Sessions do
|
|||||||
|
|
||||||
def handle_info(_message, state), do: {:noreply, state}
|
def handle_info(_message, state), do: {:noreply, state}
|
||||||
|
|
||||||
|
defp fetch_event_refs(filter, opts, max_items_per_session) do
|
||||||
|
query_opts =
|
||||||
|
opts
|
||||||
|
|> Keyword.take([:now, :requester_pubkeys])
|
||||||
|
|> Keyword.put(:limit, max_items_per_session + 1)
|
||||||
|
|
||||||
|
with {:ok, refs} <- Storage.events().query_event_refs(%{}, [filter], query_opts) do
|
||||||
|
if length(refs) > max_items_per_session do
|
||||||
|
{:error, :query_too_big}
|
||||||
|
else
|
||||||
|
{:ok, refs}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
defp clear_monitors_without_sessions(state, owner_pids) do
|
defp clear_monitors_without_sessions(state, owner_pids) do
|
||||||
Enum.reduce(Map.keys(state.monitors), state, fn owner_pid, acc ->
|
Enum.reduce(Map.keys(state.monitors), state, fn owner_pid, acc ->
|
||||||
if MapSet.member?(owner_pids, owner_pid) do
|
if MapSet.member?(owner_pids, owner_pid) do
|
||||||
@@ -203,8 +237,8 @@ defmodule Parrhesia.Negentropy.Sessions do
|
|||||||
end)
|
end)
|
||||||
end
|
end
|
||||||
|
|
||||||
defp validate_payload_size(payload, max_payload_bytes) do
|
defp validate_payload_size(filter, message, max_payload_bytes) do
|
||||||
if :erlang.external_size(payload) <= max_payload_bytes do
|
if :erlang.external_size({filter, message}) <= max_payload_bytes do
|
||||||
:ok
|
:ok
|
||||||
else
|
else
|
||||||
{:error, :payload_too_large}
|
{:error, :payload_too_large}
|
||||||
@@ -296,6 +330,18 @@ defmodule Parrhesia.Negentropy.Sessions do
|
|||||||
|> Keyword.get(:negentropy_session_sweep_interval_seconds, @default_sweep_interval_seconds)
|
|> Keyword.get(:negentropy_session_sweep_interval_seconds, @default_sweep_interval_seconds)
|
||||||
end
|
end
|
||||||
|
|
||||||
|
defp max_items_per_session do
|
||||||
|
:parrhesia
|
||||||
|
|> Application.get_env(:limits, [])
|
||||||
|
|> Keyword.get(:max_negentropy_items_per_session, @default_max_items_per_session)
|
||||||
|
end
|
||||||
|
|
||||||
|
defp id_list_threshold do
|
||||||
|
:parrhesia
|
||||||
|
|> Application.get_env(:limits, [])
|
||||||
|
|> Keyword.get(:negentropy_id_list_threshold, @default_id_list_threshold)
|
||||||
|
end
|
||||||
|
|
||||||
defp normalize_positive_integer(value, _default) when is_integer(value) and value > 0,
|
defp normalize_positive_integer(value, _default) when is_integer(value) and value > 0,
|
||||||
do: value
|
do: value
|
||||||
|
|
||||||
|
|||||||
389
lib/parrhesia/nip43.ex
Normal file
389
lib/parrhesia/nip43.ex
Normal file
@@ -0,0 +1,389 @@
|
|||||||
|
defmodule Parrhesia.NIP43 do
|
||||||
|
@moduledoc false
|
||||||
|
|
||||||
|
alias Parrhesia.API.Events
|
||||||
|
alias Parrhesia.API.Identity
|
||||||
|
alias Parrhesia.API.RequestContext
|
||||||
|
alias Parrhesia.Groups.Flow
|
||||||
|
alias Parrhesia.Protocol
|
||||||
|
alias Parrhesia.Protocol.Filter
|
||||||
|
|
||||||
|
@join_request_kind 28_934
|
||||||
|
@invite_request_kind 28_935
|
||||||
|
@leave_request_kind 28_936
|
||||||
|
@add_user_kind 8_000
|
||||||
|
@remove_user_kind 8_001
|
||||||
|
@membership_list_kind 13_534
|
||||||
|
@claim_token_kind 31_943
|
||||||
|
@default_invite_ttl_seconds 900
|
||||||
|
|
||||||
|
@type publish_state ::
|
||||||
|
:ok
|
||||||
|
| %{action: :join, duplicate?: boolean(), message: String.t()}
|
||||||
|
| %{action: :leave, duplicate?: boolean(), message: String.t()}
|
||||||
|
|
||||||
|
@spec enabled?(keyword()) :: boolean()
|
||||||
|
def enabled?(opts \\ []) do
|
||||||
|
config(opts)
|
||||||
|
|> Keyword.get(:enabled, true)
|
||||||
|
|> Kernel.==(true)
|
||||||
|
end
|
||||||
|
|
||||||
|
@spec prepare_publish(map(), keyword()) :: {:ok, publish_state()} | {:error, term()}
|
||||||
|
def prepare_publish(event, opts \\ []) when is_map(event) and is_list(opts) do
|
||||||
|
if enabled?(opts) do
|
||||||
|
prepare_enabled_publish(event, opts)
|
||||||
|
else
|
||||||
|
prepare_disabled_publish(event)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
@spec finalize_publish(map(), publish_state(), keyword()) :: :ok | {:ok, String.t()}
|
||||||
|
def finalize_publish(event, publish_state, opts \\ [])
|
||||||
|
|
||||||
|
def finalize_publish(event, :ok, _opts) when is_map(event) do
|
||||||
|
case Map.get(event, "kind") do
|
||||||
|
kind when kind in [@add_user_kind, @remove_user_kind, @membership_list_kind] ->
|
||||||
|
Flow.handle_event(event)
|
||||||
|
|
||||||
|
_other ->
|
||||||
|
:ok
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def finalize_publish(event, %{action: :join, duplicate?: true, message: message}, _opts)
|
||||||
|
when is_map(event) do
|
||||||
|
{:ok, message}
|
||||||
|
end
|
||||||
|
|
||||||
|
def finalize_publish(event, %{action: :join, duplicate?: false, message: message}, opts)
|
||||||
|
when is_map(event) do
|
||||||
|
opts = Keyword.put_new(opts, :now, Map.get(event, "created_at"))
|
||||||
|
:ok = Flow.handle_event(event)
|
||||||
|
publish_membership_events(Map.get(event, "pubkey"), :add, opts)
|
||||||
|
{:ok, message}
|
||||||
|
end
|
||||||
|
|
||||||
|
def finalize_publish(event, %{action: :leave, duplicate?: true, message: message}, _opts)
|
||||||
|
when is_map(event) do
|
||||||
|
{:ok, message}
|
||||||
|
end
|
||||||
|
|
||||||
|
def finalize_publish(event, %{action: :leave, duplicate?: false, message: message}, opts)
|
||||||
|
when is_map(event) do
|
||||||
|
opts = Keyword.put_new(opts, :now, Map.get(event, "created_at"))
|
||||||
|
:ok = Flow.handle_event(event)
|
||||||
|
publish_membership_events(Map.get(event, "pubkey"), :remove, opts)
|
||||||
|
{:ok, message}
|
||||||
|
end
|
||||||
|
|
||||||
|
@spec dynamic_events([map()], keyword()) :: [map()]
|
||||||
|
def dynamic_events(filters, opts \\ []) when is_list(filters) and is_list(opts) do
|
||||||
|
if enabled?(opts) and requests_invite?(filters) do
|
||||||
|
filters
|
||||||
|
|> build_invite_event(opts)
|
||||||
|
|> maybe_wrap_event()
|
||||||
|
else
|
||||||
|
[]
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
@spec dynamic_count([map()], keyword()) :: non_neg_integer()
|
||||||
|
def dynamic_count(filters, opts \\ []) do
|
||||||
|
filters
|
||||||
|
|> dynamic_events(opts)
|
||||||
|
|> length()
|
||||||
|
end
|
||||||
|
|
||||||
|
defp prepare_enabled_publish(%{"kind" => @join_request_kind, "pubkey" => pubkey} = event, opts)
|
||||||
|
when is_binary(pubkey) do
|
||||||
|
with {:ok, _claim} <- validate_claim_from_event(event),
|
||||||
|
{:ok, membership} <- Flow.get_membership(pubkey) do
|
||||||
|
if membership_active?(membership) do
|
||||||
|
{:ok,
|
||||||
|
%{
|
||||||
|
action: :join,
|
||||||
|
duplicate?: true,
|
||||||
|
message: "duplicate: you are already a member of this relay."
|
||||||
|
}}
|
||||||
|
else
|
||||||
|
{:ok,
|
||||||
|
%{
|
||||||
|
action: :join,
|
||||||
|
duplicate?: false,
|
||||||
|
message: "info: welcome to #{relay_url(opts)}!"
|
||||||
|
}}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp prepare_enabled_publish(%{"kind" => @leave_request_kind, "pubkey" => pubkey}, _opts)
|
||||||
|
when is_binary(pubkey) do
|
||||||
|
with {:ok, membership} <- Flow.get_membership(pubkey) do
|
||||||
|
if membership_active?(membership) do
|
||||||
|
{:ok, %{action: :leave, duplicate?: false, message: "info: membership revoked."}}
|
||||||
|
else
|
||||||
|
{:ok,
|
||||||
|
%{
|
||||||
|
action: :leave,
|
||||||
|
duplicate?: true,
|
||||||
|
message: "duplicate: you are not a member of this relay."
|
||||||
|
}}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp prepare_enabled_publish(%{"kind" => @invite_request_kind}, _opts) do
|
||||||
|
{:error, "restricted: kind 28935 invite claims are generated via REQ"}
|
||||||
|
end
|
||||||
|
|
||||||
|
defp prepare_enabled_publish(%{"kind" => kind, "pubkey" => pubkey}, _opts)
|
||||||
|
when kind in [@add_user_kind, @remove_user_kind, @membership_list_kind] and
|
||||||
|
is_binary(pubkey) do
|
||||||
|
case relay_pubkey() do
|
||||||
|
{:ok, ^pubkey} -> {:ok, :ok}
|
||||||
|
{:ok, _other} -> {:error, "restricted: relay access metadata must be relay-signed"}
|
||||||
|
{:error, _reason} -> {:error, "error: relay identity unavailable"}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp prepare_enabled_publish(_event, _opts), do: {:ok, :ok}
|
||||||
|
|
||||||
|
defp prepare_disabled_publish(%{"kind" => kind})
|
||||||
|
when kind in [
|
||||||
|
@join_request_kind,
|
||||||
|
@invite_request_kind,
|
||||||
|
@leave_request_kind,
|
||||||
|
@add_user_kind,
|
||||||
|
@remove_user_kind,
|
||||||
|
@membership_list_kind
|
||||||
|
] do
|
||||||
|
{:error, "blocked: NIP-43 relay access requests are disabled"}
|
||||||
|
end
|
||||||
|
|
||||||
|
defp prepare_disabled_publish(_event), do: {:ok, :ok}
|
||||||
|
|
||||||
|
defp build_invite_event(filters, opts) do
|
||||||
|
now = Keyword.get(opts, :now, System.system_time(:second))
|
||||||
|
identity_opts = identity_opts(opts)
|
||||||
|
|
||||||
|
with {:ok, claim} <- issue_claim(now, opts),
|
||||||
|
{:ok, signed_event} <-
|
||||||
|
%{
|
||||||
|
"created_at" => now,
|
||||||
|
"kind" => @invite_request_kind,
|
||||||
|
"tags" => [["-"], ["claim", claim]],
|
||||||
|
"content" => ""
|
||||||
|
}
|
||||||
|
|> Identity.sign_event(identity_opts),
|
||||||
|
true <- Filter.matches_any?(signed_event, filters) do
|
||||||
|
{:ok, signed_event}
|
||||||
|
else
|
||||||
|
_other -> :error
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp maybe_wrap_event({:ok, event}), do: [event]
|
||||||
|
defp maybe_wrap_event(_other), do: []
|
||||||
|
|
||||||
|
defp requests_invite?(filters) do
|
||||||
|
Enum.any?(filters, fn filter ->
|
||||||
|
case Map.get(filter, "kinds") do
|
||||||
|
kinds when is_list(kinds) -> @invite_request_kind in kinds
|
||||||
|
_other -> false
|
||||||
|
end
|
||||||
|
end)
|
||||||
|
end
|
||||||
|
|
||||||
|
defp issue_claim(now, opts) do
|
||||||
|
ttl_seconds =
|
||||||
|
config(opts)
|
||||||
|
|> Keyword.get(:invite_ttl_seconds, @default_invite_ttl_seconds)
|
||||||
|
|> normalize_positive_integer(@default_invite_ttl_seconds)
|
||||||
|
|
||||||
|
identity_opts = identity_opts(opts)
|
||||||
|
|
||||||
|
token_event = %{
|
||||||
|
"created_at" => now,
|
||||||
|
"kind" => @claim_token_kind,
|
||||||
|
"tags" => [["exp", Integer.to_string(now + ttl_seconds)]],
|
||||||
|
"content" => Base.encode16(:crypto.strong_rand_bytes(16), case: :lower)
|
||||||
|
}
|
||||||
|
|
||||||
|
with {:ok, signed_token} <- Identity.sign_event(token_event, identity_opts) do
|
||||||
|
signed_token
|
||||||
|
|> JSON.encode!()
|
||||||
|
|> Base.url_encode64(padding: false)
|
||||||
|
|> then(&{:ok, &1})
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp validate_claim_from_event(event) do
|
||||||
|
claim =
|
||||||
|
event
|
||||||
|
|> Map.get("tags", [])
|
||||||
|
|> Enum.find_value(fn
|
||||||
|
["claim", value | _rest] when is_binary(value) and value != "" -> value
|
||||||
|
_tag -> nil
|
||||||
|
end)
|
||||||
|
|
||||||
|
case claim do
|
||||||
|
nil -> {:error, "restricted: that is an invalid invite code."}
|
||||||
|
value -> validate_claim(value)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp validate_claim(claim) when is_binary(claim) do
|
||||||
|
with {:ok, payload} <- Base.url_decode64(claim, padding: false),
|
||||||
|
{:ok, decoded} <- JSON.decode(payload),
|
||||||
|
:ok <- Protocol.validate_event(decoded),
|
||||||
|
:ok <- validate_claim_token(decoded) do
|
||||||
|
{:ok, decoded}
|
||||||
|
else
|
||||||
|
{:error, :expired_claim} ->
|
||||||
|
{:error, "restricted: that invite code is expired."}
|
||||||
|
|
||||||
|
_other ->
|
||||||
|
{:error, "restricted: that is an invalid invite code."}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp validate_claim(_claim), do: {:error, "restricted: that is an invalid invite code."}
|
||||||
|
|
||||||
|
defp validate_claim_token(%{
|
||||||
|
"kind" => @claim_token_kind,
|
||||||
|
"pubkey" => pubkey,
|
||||||
|
"tags" => tags
|
||||||
|
}) do
|
||||||
|
with {:ok, relay_pubkey} <- relay_pubkey(),
|
||||||
|
true <- pubkey == relay_pubkey,
|
||||||
|
{:ok, expires_at} <- fetch_expiration(tags),
|
||||||
|
true <- expires_at >= System.system_time(:second) do
|
||||||
|
:ok
|
||||||
|
else
|
||||||
|
false -> {:error, :invalid_claim}
|
||||||
|
{:error, _reason} -> {:error, :invalid_claim}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp validate_claim_token(_event), do: {:error, :invalid_claim}
|
||||||
|
|
||||||
|
defp fetch_expiration(tags) when is_list(tags) do
|
||||||
|
case Enum.find(tags, &match?(["exp", _value | _rest], &1)) do
|
||||||
|
["exp", value | _rest] ->
|
||||||
|
parse_expiration(value)
|
||||||
|
|
||||||
|
_other ->
|
||||||
|
{:error, :invalid_claim}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp parse_expiration(value) when is_binary(value) do
|
||||||
|
case Integer.parse(value) do
|
||||||
|
{expires_at, ""} when expires_at > 0 -> validate_expiration(expires_at)
|
||||||
|
_other -> {:error, :invalid_claim}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp parse_expiration(_value), do: {:error, :invalid_claim}
|
||||||
|
|
||||||
|
defp validate_expiration(expires_at) when is_integer(expires_at) do
|
||||||
|
if expires_at >= System.system_time(:second) do
|
||||||
|
{:ok, expires_at}
|
||||||
|
else
|
||||||
|
{:error, :expired_claim}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp validate_expiration(_expires_at), do: {:error, :expired_claim}
|
||||||
|
|
||||||
|
defp publish_membership_events(member_pubkey, action, opts) when is_binary(member_pubkey) do
|
||||||
|
now = Keyword.get(opts, :now, System.system_time(:second))
|
||||||
|
identity_opts = identity_opts(opts)
|
||||||
|
context = Keyword.get(opts, :context, %RequestContext{})
|
||||||
|
|
||||||
|
action
|
||||||
|
|> build_membership_delta_event(member_pubkey, now)
|
||||||
|
|> sign_and_publish(context, identity_opts)
|
||||||
|
|
||||||
|
current_membership_snapshot(now)
|
||||||
|
|> sign_and_publish(context, identity_opts)
|
||||||
|
|
||||||
|
:ok
|
||||||
|
end
|
||||||
|
|
||||||
|
defp build_membership_delta_event(:add, member_pubkey, now) do
|
||||||
|
%{
|
||||||
|
"created_at" => now,
|
||||||
|
"kind" => @add_user_kind,
|
||||||
|
"tags" => [["-"], ["p", member_pubkey]],
|
||||||
|
"content" => ""
|
||||||
|
}
|
||||||
|
end
|
||||||
|
|
||||||
|
defp build_membership_delta_event(:remove, member_pubkey, now) do
|
||||||
|
%{
|
||||||
|
"created_at" => now,
|
||||||
|
"kind" => @remove_user_kind,
|
||||||
|
"tags" => [["-"], ["p", member_pubkey]],
|
||||||
|
"content" => ""
|
||||||
|
}
|
||||||
|
end
|
||||||
|
|
||||||
|
defp current_membership_snapshot(now) do
|
||||||
|
tags =
|
||||||
|
case Flow.list_memberships() do
|
||||||
|
{:ok, memberships} ->
|
||||||
|
[["-"] | Enum.map(memberships, &["member", &1.pubkey])]
|
||||||
|
|
||||||
|
{:error, _reason} ->
|
||||||
|
[["-"]]
|
||||||
|
end
|
||||||
|
|
||||||
|
%{
|
||||||
|
"created_at" => now,
|
||||||
|
"kind" => @membership_list_kind,
|
||||||
|
"tags" => tags,
|
||||||
|
"content" => ""
|
||||||
|
}
|
||||||
|
end
|
||||||
|
|
||||||
|
defp sign_and_publish(unsigned_event, context, identity_opts) do
|
||||||
|
with {:ok, signed_event} <- Identity.sign_event(unsigned_event, identity_opts),
|
||||||
|
{:ok, %{accepted: true}} <- Events.publish(signed_event, context: context) do
|
||||||
|
:ok
|
||||||
|
else
|
||||||
|
_other -> :ok
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp membership_active?(nil), do: false
|
||||||
|
defp membership_active?(%{role: "member"}), do: true
|
||||||
|
defp membership_active?(_membership), do: false
|
||||||
|
|
||||||
|
defp relay_pubkey do
|
||||||
|
case Identity.get() do
|
||||||
|
{:ok, %{pubkey: pubkey}} when is_binary(pubkey) -> {:ok, pubkey}
|
||||||
|
{:error, reason} -> {:error, reason}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp relay_url(opts) do
|
||||||
|
Keyword.get(opts, :relay_url, Application.get_env(:parrhesia, :relay_url))
|
||||||
|
end
|
||||||
|
|
||||||
|
defp identity_opts(opts) do
|
||||||
|
opts
|
||||||
|
|> Keyword.take([:path, :private_key, :configured_private_key])
|
||||||
|
end
|
||||||
|
|
||||||
|
defp config(opts) do
|
||||||
|
case Keyword.get(opts, :config) do
|
||||||
|
config when is_list(config) -> config
|
||||||
|
_other -> Application.get_env(:parrhesia, :nip43, [])
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp normalize_positive_integer(value, _default) when is_integer(value) and value > 0, do: value
|
||||||
|
defp normalize_positive_integer(_value, default), do: default
|
||||||
|
end
|
||||||
400
lib/parrhesia/nip66.ex
Normal file
400
lib/parrhesia/nip66.ex
Normal file
@@ -0,0 +1,400 @@
|
|||||||
|
defmodule Parrhesia.NIP66 do
|
||||||
|
@moduledoc false
|
||||||
|
|
||||||
|
alias Parrhesia.API.Events
|
||||||
|
alias Parrhesia.API.Identity
|
||||||
|
alias Parrhesia.API.RequestContext
|
||||||
|
alias Parrhesia.NIP66.Probe
|
||||||
|
alias Parrhesia.Web.Listener
|
||||||
|
alias Parrhesia.Web.RelayInfo
|
||||||
|
|
||||||
|
@default_publish_interval_seconds 900
|
||||||
|
@default_timeout_ms 5_000
|
||||||
|
@default_checks [:open, :read, :nip11]
|
||||||
|
@allowed_requirement_keys MapSet.new(~w[auth writes pow payment])
|
||||||
|
|
||||||
|
@spec enabled?(keyword()) :: boolean()
|
||||||
|
def enabled?(opts \\ []) do
|
||||||
|
config = config(opts)
|
||||||
|
config_enabled?(config) and active_targets(config, listeners(opts)) != []
|
||||||
|
end
|
||||||
|
|
||||||
|
@spec publish_snapshot(keyword()) :: {:ok, [map()]}
|
||||||
|
def publish_snapshot(opts \\ []) when is_list(opts) do
|
||||||
|
config = config(opts)
|
||||||
|
targets = active_targets(config, listeners(opts))
|
||||||
|
|
||||||
|
if config_enabled?(config) and targets != [] do
|
||||||
|
probe_fun = Keyword.get(opts, :probe_fun, &Probe.probe/3)
|
||||||
|
context = Keyword.get(opts, :context, %RequestContext{})
|
||||||
|
now = Keyword.get(opts, :now, System.system_time(:second))
|
||||||
|
identity_opts = identity_opts(opts)
|
||||||
|
|
||||||
|
events =
|
||||||
|
maybe_publish_monitor_announcement(config, now, context, identity_opts)
|
||||||
|
|> Kernel.++(
|
||||||
|
publish_discovery_events(targets, config, probe_fun, now, context, identity_opts)
|
||||||
|
)
|
||||||
|
|
||||||
|
{:ok, events}
|
||||||
|
else
|
||||||
|
{:ok, []}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
@spec publish_interval_ms(keyword()) :: pos_integer()
|
||||||
|
def publish_interval_ms(opts \\ []) when is_list(opts) do
|
||||||
|
config = config(opts)
|
||||||
|
|
||||||
|
config
|
||||||
|
|> Keyword.get(:publish_interval_seconds, @default_publish_interval_seconds)
|
||||||
|
|> normalize_positive_integer(@default_publish_interval_seconds)
|
||||||
|
|> Kernel.*(1_000)
|
||||||
|
end
|
||||||
|
|
||||||
|
defp maybe_publish_monitor_announcement(config, now, context, identity_opts) do
|
||||||
|
if Keyword.get(config, :publish_monitor_announcement?, true) do
|
||||||
|
config
|
||||||
|
|> build_monitor_announcement(now)
|
||||||
|
|> sign_and_publish(context, identity_opts)
|
||||||
|
|> maybe_wrap_event()
|
||||||
|
else
|
||||||
|
[]
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp publish_discovery_events(targets, config, probe_fun, now, context, identity_opts) do
|
||||||
|
probe_opts = [
|
||||||
|
timeout_ms:
|
||||||
|
config
|
||||||
|
|> Keyword.get(:timeout_ms, @default_timeout_ms)
|
||||||
|
|> normalize_positive_integer(@default_timeout_ms),
|
||||||
|
checks: normalize_checks(Keyword.get(config, :checks, @default_checks))
|
||||||
|
]
|
||||||
|
|
||||||
|
Enum.flat_map(targets, fn target ->
|
||||||
|
probe_result =
|
||||||
|
case probe_fun.(target, probe_opts, identity_opts) do
|
||||||
|
{:ok, result} when is_map(result) -> result
|
||||||
|
_other -> %{checks: [], metrics: %{}, relay_info: nil, relay_info_body: nil}
|
||||||
|
end
|
||||||
|
|
||||||
|
target
|
||||||
|
|> build_discovery_event(now, probe_result, identity_opts)
|
||||||
|
|> sign_and_publish(context, identity_opts)
|
||||||
|
|> maybe_wrap_event()
|
||||||
|
end)
|
||||||
|
end
|
||||||
|
|
||||||
|
defp sign_and_publish(event, context, identity_opts) do
|
||||||
|
with {:ok, signed_event} <- Identity.sign_event(event, identity_opts),
|
||||||
|
{:ok, %{accepted: true}} <- Events.publish(signed_event, context: context) do
|
||||||
|
{:ok, signed_event}
|
||||||
|
else
|
||||||
|
_other -> :error
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp maybe_wrap_event({:ok, event}), do: [event]
|
||||||
|
defp maybe_wrap_event(_other), do: []
|
||||||
|
|
||||||
|
defp build_monitor_announcement(config, now) do
|
||||||
|
checks = normalize_checks(Keyword.get(config, :checks, @default_checks))
|
||||||
|
timeout_ms = Keyword.get(config, :timeout_ms, @default_timeout_ms)
|
||||||
|
frequency = Keyword.get(config, :publish_interval_seconds, @default_publish_interval_seconds)
|
||||||
|
|
||||||
|
tags =
|
||||||
|
[
|
||||||
|
[
|
||||||
|
"frequency",
|
||||||
|
Integer.to_string(
|
||||||
|
normalize_positive_integer(frequency, @default_publish_interval_seconds)
|
||||||
|
)
|
||||||
|
]
|
||||||
|
] ++
|
||||||
|
Enum.map(checks, fn check ->
|
||||||
|
["timeout", Atom.to_string(check), Integer.to_string(timeout_ms)]
|
||||||
|
end) ++
|
||||||
|
Enum.map(checks, fn check -> ["c", Atom.to_string(check)] end) ++
|
||||||
|
maybe_geohash_tag(config)
|
||||||
|
|
||||||
|
%{
|
||||||
|
"created_at" => now,
|
||||||
|
"kind" => 10_166,
|
||||||
|
"tags" => tags,
|
||||||
|
"content" => ""
|
||||||
|
}
|
||||||
|
end
|
||||||
|
|
||||||
|
defp build_discovery_event(target, now, probe_result, identity_opts) do
|
||||||
|
relay_info = probe_result[:relay_info] || local_relay_info(target.listener, identity_opts)
|
||||||
|
content = probe_result[:relay_info_body] || JSON.encode!(relay_info)
|
||||||
|
|
||||||
|
tags =
|
||||||
|
[["d", target.relay_url]]
|
||||||
|
|> append_network_tag(target)
|
||||||
|
|> append_relay_type_tag(target)
|
||||||
|
|> append_geohash_tag(target)
|
||||||
|
|> append_topic_tags(target)
|
||||||
|
|> Kernel.++(nip_tags(relay_info))
|
||||||
|
|> Kernel.++(requirement_tags(relay_info))
|
||||||
|
|> Kernel.++(rtt_tags(probe_result[:metrics] || %{}))
|
||||||
|
|
||||||
|
%{
|
||||||
|
"created_at" => now,
|
||||||
|
"kind" => 30_166,
|
||||||
|
"tags" => tags,
|
||||||
|
"content" => content
|
||||||
|
}
|
||||||
|
end
|
||||||
|
|
||||||
|
defp nip_tags(relay_info) do
|
||||||
|
relay_info
|
||||||
|
|> Map.get("supported_nips", [])
|
||||||
|
|> Enum.map(&["N", Integer.to_string(&1)])
|
||||||
|
end
|
||||||
|
|
||||||
|
defp requirement_tags(relay_info) do
|
||||||
|
limitation = Map.get(relay_info, "limitation", %{})
|
||||||
|
|
||||||
|
[
|
||||||
|
requirement_value("auth", Map.get(limitation, "auth_required", false)),
|
||||||
|
requirement_value("writes", Map.get(limitation, "restricted_writes", false)),
|
||||||
|
requirement_value("pow", Map.get(limitation, "min_pow_difficulty", 0) > 0),
|
||||||
|
requirement_value("payment", Map.get(limitation, "payment_required", false))
|
||||||
|
]
|
||||||
|
|> Enum.filter(&MapSet.member?(@allowed_requirement_keys, String.trim_leading(&1, "!")))
|
||||||
|
|> Enum.map(&["R", &1])
|
||||||
|
end
|
||||||
|
|
||||||
|
defp requirement_value(name, true), do: name
|
||||||
|
defp requirement_value(name, false), do: "!" <> name
|
||||||
|
|
||||||
|
defp rtt_tags(metrics) when is_map(metrics) do
|
||||||
|
[]
|
||||||
|
|> maybe_put_metric_tag("rtt-open", Map.get(metrics, :rtt_open_ms))
|
||||||
|
|> maybe_put_metric_tag("rtt-read", Map.get(metrics, :rtt_read_ms))
|
||||||
|
|> maybe_put_metric_tag("rtt-write", Map.get(metrics, :rtt_write_ms))
|
||||||
|
end
|
||||||
|
|
||||||
|
defp append_network_tag(tags, target) do
|
||||||
|
case target.network do
|
||||||
|
nil -> tags
|
||||||
|
value -> tags ++ [["n", value]]
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp append_relay_type_tag(tags, target) do
|
||||||
|
case target.relay_type do
|
||||||
|
nil -> tags
|
||||||
|
value -> tags ++ [["T", value]]
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp append_geohash_tag(tags, target) do
|
||||||
|
case target.geohash do
|
||||||
|
nil -> tags
|
||||||
|
value -> tags ++ [["g", value]]
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp append_topic_tags(tags, target) do
|
||||||
|
tags ++ Enum.map(target.topics, &["t", &1])
|
||||||
|
end
|
||||||
|
|
||||||
|
defp maybe_put_metric_tag(tags, _name, nil), do: tags
|
||||||
|
|
||||||
|
defp maybe_put_metric_tag(tags, name, value) when is_integer(value) and value >= 0 do
|
||||||
|
tags ++ [[name, Integer.to_string(value)]]
|
||||||
|
end
|
||||||
|
|
||||||
|
defp maybe_put_metric_tag(tags, _name, _value), do: tags
|
||||||
|
|
||||||
|
defp local_relay_info(listener, identity_opts) do
|
||||||
|
relay_info = RelayInfo.document(listener)
|
||||||
|
|
||||||
|
case Identity.get(identity_opts) do
|
||||||
|
{:ok, %{pubkey: pubkey}} ->
|
||||||
|
relay_info
|
||||||
|
|> Map.put("pubkey", pubkey)
|
||||||
|
|> Map.put("self", pubkey)
|
||||||
|
|
||||||
|
{:error, _reason} ->
|
||||||
|
relay_info
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp maybe_geohash_tag(config) do
|
||||||
|
case fetch_value(config, :geohash) do
|
||||||
|
value when is_binary(value) and value != "" -> [["g", value]]
|
||||||
|
_other -> []
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp active_targets(config, listeners) do
|
||||||
|
listeners_by_id = Map.new(listeners, &{&1.id, &1})
|
||||||
|
|
||||||
|
raw_targets =
|
||||||
|
case Keyword.get(config, :targets, []) do
|
||||||
|
[] -> [default_target()]
|
||||||
|
targets when is_list(targets) -> targets
|
||||||
|
_other -> []
|
||||||
|
end
|
||||||
|
|
||||||
|
Enum.flat_map(raw_targets, fn raw_target ->
|
||||||
|
case normalize_target(raw_target, listeners_by_id) do
|
||||||
|
{:ok, target} -> [target]
|
||||||
|
:error -> []
|
||||||
|
end
|
||||||
|
end)
|
||||||
|
end
|
||||||
|
|
||||||
|
defp normalize_target(target, listeners_by_id) when is_map(target) or is_list(target) do
|
||||||
|
listener_id = fetch_value(target, :listener) || :public
|
||||||
|
relay_url = fetch_value(target, :relay_url) || Application.get_env(:parrhesia, :relay_url)
|
||||||
|
|
||||||
|
with %{} = listener <- Map.get(listeners_by_id, normalize_listener_id(listener_id)),
|
||||||
|
true <- listener.enabled and Listener.feature_enabled?(listener, :nostr),
|
||||||
|
{:ok, normalized_relay_url} <- normalize_relay_url(relay_url) do
|
||||||
|
{:ok,
|
||||||
|
%{
|
||||||
|
listener: listener,
|
||||||
|
relay_url: normalized_relay_url,
|
||||||
|
network: normalize_network(fetch_value(target, :network), normalized_relay_url),
|
||||||
|
relay_type: normalize_optional_string(fetch_value(target, :relay_type)),
|
||||||
|
geohash: normalize_optional_string(fetch_value(target, :geohash)),
|
||||||
|
topics: normalize_string_list(fetch_value(target, :topics))
|
||||||
|
}}
|
||||||
|
else
|
||||||
|
_other -> :error
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp normalize_target(_target, _listeners_by_id), do: :error
|
||||||
|
|
||||||
|
defp normalize_relay_url(relay_url) when is_binary(relay_url) and relay_url != "" do
|
||||||
|
case URI.parse(relay_url) do
|
||||||
|
%URI{scheme: scheme, host: host} = uri
|
||||||
|
when scheme in ["ws", "wss"] and is_binary(host) and host != "" ->
|
||||||
|
normalized_uri = %URI{
|
||||||
|
uri
|
||||||
|
| scheme: String.downcase(scheme),
|
||||||
|
host: String.downcase(host),
|
||||||
|
path: normalize_path(uri.path),
|
||||||
|
query: nil,
|
||||||
|
fragment: nil,
|
||||||
|
port: normalize_port(uri.port, scheme)
|
||||||
|
}
|
||||||
|
|
||||||
|
{:ok, URI.to_string(normalized_uri)}
|
||||||
|
|
||||||
|
_other ->
|
||||||
|
:error
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp normalize_relay_url(_relay_url), do: :error
|
||||||
|
|
||||||
|
defp normalize_path(nil), do: "/"
|
||||||
|
defp normalize_path(""), do: "/"
|
||||||
|
defp normalize_path(path), do: path
|
||||||
|
|
||||||
|
defp normalize_port(80, "ws"), do: nil
|
||||||
|
defp normalize_port(443, "wss"), do: nil
|
||||||
|
defp normalize_port(port, _scheme), do: port
|
||||||
|
|
||||||
|
defp normalize_network(value, _relay_url)
|
||||||
|
when is_binary(value) and value in ["clearnet", "tor", "i2p", "loki"],
|
||||||
|
do: value
|
||||||
|
|
||||||
|
defp normalize_network(_value, relay_url) do
|
||||||
|
relay_url
|
||||||
|
|> URI.parse()
|
||||||
|
|> Map.get(:host)
|
||||||
|
|> infer_network()
|
||||||
|
end
|
||||||
|
|
||||||
|
defp infer_network(host) when is_binary(host) do
|
||||||
|
cond do
|
||||||
|
String.ends_with?(host, ".onion") -> "tor"
|
||||||
|
String.ends_with?(host, ".i2p") -> "i2p"
|
||||||
|
true -> "clearnet"
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp infer_network(_host), do: "clearnet"
|
||||||
|
|
||||||
|
defp normalize_checks(checks) when is_list(checks) do
|
||||||
|
checks
|
||||||
|
|> Enum.map(&normalize_check/1)
|
||||||
|
|> Enum.reject(&is_nil/1)
|
||||||
|
|> Enum.uniq()
|
||||||
|
end
|
||||||
|
|
||||||
|
defp normalize_checks(_checks), do: @default_checks
|
||||||
|
|
||||||
|
defp normalize_check(:open), do: :open
|
||||||
|
defp normalize_check("open"), do: :open
|
||||||
|
defp normalize_check(:read), do: :read
|
||||||
|
defp normalize_check("read"), do: :read
|
||||||
|
defp normalize_check(:nip11), do: :nip11
|
||||||
|
defp normalize_check("nip11"), do: :nip11
|
||||||
|
defp normalize_check(_check), do: nil
|
||||||
|
|
||||||
|
defp listeners(opts) do
|
||||||
|
case Keyword.get(opts, :listeners) do
|
||||||
|
listeners when is_list(listeners) -> listeners
|
||||||
|
_other -> Listener.all()
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp identity_opts(opts) do
|
||||||
|
opts
|
||||||
|
|> Keyword.take([:path, :private_key, :configured_private_key])
|
||||||
|
end
|
||||||
|
|
||||||
|
defp config(opts) do
|
||||||
|
case Keyword.get(opts, :config) do
|
||||||
|
config when is_list(config) -> config
|
||||||
|
_other -> Application.get_env(:parrhesia, :nip66, [])
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp config_enabled?(config), do: Keyword.get(config, :enabled, true)
|
||||||
|
|
||||||
|
defp default_target do
|
||||||
|
%{listener: :public, relay_url: Application.get_env(:parrhesia, :relay_url)}
|
||||||
|
end
|
||||||
|
|
||||||
|
defp normalize_listener_id(value) when is_atom(value), do: value
|
||||||
|
|
||||||
|
defp normalize_listener_id(value) when is_binary(value) do
|
||||||
|
String.to_existing_atom(value)
|
||||||
|
rescue
|
||||||
|
ArgumentError -> :public
|
||||||
|
end
|
||||||
|
|
||||||
|
defp normalize_listener_id(_value), do: :public
|
||||||
|
|
||||||
|
defp normalize_positive_integer(value, _default) when is_integer(value) and value > 0, do: value
|
||||||
|
defp normalize_positive_integer(_value, default), do: default
|
||||||
|
|
||||||
|
defp normalize_optional_string(value) when is_binary(value) and value != "", do: value
|
||||||
|
defp normalize_optional_string(_value), do: nil
|
||||||
|
|
||||||
|
defp normalize_string_list(values) when is_list(values) do
|
||||||
|
Enum.filter(values, &(is_binary(&1) and &1 != ""))
|
||||||
|
end
|
||||||
|
|
||||||
|
defp normalize_string_list(_values), do: []
|
||||||
|
|
||||||
|
defp fetch_value(map, key) when is_map(map) do
|
||||||
|
Map.get(map, key) || Map.get(map, Atom.to_string(key))
|
||||||
|
end
|
||||||
|
|
||||||
|
defp fetch_value(list, key) when is_list(list) do
|
||||||
|
if Keyword.keyword?(list), do: Keyword.get(list, key), else: nil
|
||||||
|
end
|
||||||
|
|
||||||
|
defp fetch_value(_container, _key), do: nil
|
||||||
|
end
|
||||||
218
lib/parrhesia/nip66/probe.ex
Normal file
218
lib/parrhesia/nip66/probe.ex
Normal file
@@ -0,0 +1,218 @@
|
|||||||
|
defmodule Parrhesia.NIP66.Probe do
|
||||||
|
@moduledoc false
|
||||||
|
|
||||||
|
alias Parrhesia.HTTP
|
||||||
|
alias Parrhesia.Sync.Transport.WebSockexClient
|
||||||
|
|
||||||
|
@type result :: %{
|
||||||
|
checks: [atom()],
|
||||||
|
metrics: map(),
|
||||||
|
relay_info: map() | nil,
|
||||||
|
relay_info_body: String.t() | nil
|
||||||
|
}
|
||||||
|
|
||||||
|
@spec probe(map(), keyword(), keyword()) :: {:ok, result()}
|
||||||
|
def probe(target, opts \\ [], publish_opts \\ [])
|
||||||
|
|
||||||
|
def probe(target, opts, _publish_opts) when is_map(target) and is_list(opts) do
|
||||||
|
timeout_ms = Keyword.get(opts, :timeout_ms, 5_000)
|
||||||
|
checks = normalize_checks(Keyword.get(opts, :checks, [:open, :read, :nip11]))
|
||||||
|
|
||||||
|
initial = %{checks: [], metrics: %{}, relay_info: nil, relay_info_body: nil}
|
||||||
|
|
||||||
|
result =
|
||||||
|
Enum.reduce(checks, initial, fn check, acc ->
|
||||||
|
merge_probe_result(acc, check_result(check, target, timeout_ms))
|
||||||
|
end)
|
||||||
|
|
||||||
|
{:ok, result}
|
||||||
|
end
|
||||||
|
|
||||||
|
def probe(_target, _opts, _publish_opts),
|
||||||
|
do: {:ok, %{checks: [], metrics: %{}, relay_info: nil, relay_info_body: nil}}
|
||||||
|
|
||||||
|
defp merge_probe_result(acc, %{check: check, metric_key: metric_key, metric_value: metric_value}) do
|
||||||
|
acc
|
||||||
|
|> Map.update!(:checks, &[check | &1])
|
||||||
|
|> Map.update!(:metrics, &Map.put(&1, metric_key, metric_value))
|
||||||
|
end
|
||||||
|
|
||||||
|
defp merge_probe_result(acc, %{
|
||||||
|
check: check,
|
||||||
|
relay_info: relay_info,
|
||||||
|
relay_info_body: relay_info_body
|
||||||
|
}) do
|
||||||
|
acc
|
||||||
|
|> Map.update!(:checks, &[check | &1])
|
||||||
|
|> Map.put(:relay_info, relay_info)
|
||||||
|
|> Map.put(:relay_info_body, relay_info_body)
|
||||||
|
end
|
||||||
|
|
||||||
|
defp merge_probe_result(acc, :skip), do: acc
|
||||||
|
defp merge_probe_result(acc, {:error, _reason}), do: acc
|
||||||
|
|
||||||
|
defp check_result(:open, target, timeout_ms) do
|
||||||
|
case measure_websocket_connect(Map.fetch!(target, :relay_url), timeout_ms) do
|
||||||
|
{:ok, metric_value} ->
|
||||||
|
%{check: :open, metric_key: :rtt_open_ms, metric_value: metric_value}
|
||||||
|
|
||||||
|
{:error, reason} ->
|
||||||
|
{:error, reason}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp check_result(:read, %{listener: listener} = target, timeout_ms) do
|
||||||
|
if listener.auth.nip42_required do
|
||||||
|
:skip
|
||||||
|
else
|
||||||
|
case measure_websocket_read(Map.fetch!(target, :relay_url), timeout_ms) do
|
||||||
|
{:ok, metric_value} ->
|
||||||
|
%{check: :read, metric_key: :rtt_read_ms, metric_value: metric_value}
|
||||||
|
|
||||||
|
{:error, reason} ->
|
||||||
|
{:error, reason}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp check_result(:nip11, target, timeout_ms) do
|
||||||
|
case fetch_nip11(Map.fetch!(target, :relay_url), timeout_ms) do
|
||||||
|
{:ok, relay_info, relay_info_body, _metric_value} ->
|
||||||
|
%{check: :nip11, relay_info: relay_info, relay_info_body: relay_info_body}
|
||||||
|
|
||||||
|
{:error, reason} ->
|
||||||
|
{:error, reason}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp check_result(_check, _target, _timeout_ms), do: :skip
|
||||||
|
|
||||||
|
defp measure_websocket_connect(relay_url, timeout_ms) do
|
||||||
|
with {:ok, websocket} <- connect(relay_url, timeout_ms),
|
||||||
|
{:ok, metric_value} <- await_connected(websocket, timeout_ms) do
|
||||||
|
:ok = WebSockexClient.close(websocket)
|
||||||
|
{:ok, metric_value}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp measure_websocket_read(relay_url, timeout_ms) do
|
||||||
|
with {:ok, websocket} <- connect(relay_url, timeout_ms),
|
||||||
|
{:ok, started_at} <- await_connected_started_at(websocket, timeout_ms),
|
||||||
|
:ok <- WebSockexClient.send_json(websocket, ["COUNT", "nip66-probe", %{"kinds" => [1]}]),
|
||||||
|
{:ok, metric_value} <- await_count_response(websocket, timeout_ms, started_at) do
|
||||||
|
:ok = WebSockexClient.close(websocket)
|
||||||
|
{:ok, metric_value}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp connect(relay_url, timeout_ms) do
|
||||||
|
server = %{url: relay_url, tls: tls_config(relay_url)}
|
||||||
|
|
||||||
|
WebSockexClient.connect(self(), server, websocket_opts: [timeout: timeout_ms, protocols: nil])
|
||||||
|
end
|
||||||
|
|
||||||
|
defp await_connected(websocket, timeout_ms) do
|
||||||
|
with {:ok, started_at} <- await_connected_started_at(websocket, timeout_ms) do
|
||||||
|
{:ok, monotonic_duration_ms(started_at)}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp await_connected_started_at(websocket, timeout_ms) do
|
||||||
|
started_at = System.monotonic_time()
|
||||||
|
|
||||||
|
receive do
|
||||||
|
{:sync_transport, ^websocket, :connected, _metadata} -> {:ok, started_at}
|
||||||
|
{:sync_transport, ^websocket, :disconnected, reason} -> {:error, reason}
|
||||||
|
after
|
||||||
|
timeout_ms -> {:error, :timeout}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp await_count_response(websocket, timeout_ms, started_at) do
|
||||||
|
receive do
|
||||||
|
{:sync_transport, ^websocket, :frame, ["COUNT", "nip66-probe", _payload]} ->
|
||||||
|
{:ok, monotonic_duration_ms(started_at)}
|
||||||
|
|
||||||
|
{:sync_transport, ^websocket, :frame, ["CLOSED", "nip66-probe", _message]} ->
|
||||||
|
{:error, :closed}
|
||||||
|
|
||||||
|
{:sync_transport, ^websocket, :disconnected, reason} ->
|
||||||
|
{:error, reason}
|
||||||
|
after
|
||||||
|
timeout_ms -> {:error, :timeout}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp fetch_nip11(relay_url, timeout_ms) do
|
||||||
|
started_at = System.monotonic_time()
|
||||||
|
|
||||||
|
case HTTP.get(
|
||||||
|
url: relay_info_url(relay_url),
|
||||||
|
headers: [{"accept", "application/nostr+json"}],
|
||||||
|
decode_body: false,
|
||||||
|
connect_options: [timeout: timeout_ms],
|
||||||
|
receive_timeout: timeout_ms
|
||||||
|
) do
|
||||||
|
{:ok, %Req.Response{status: 200, body: body}} when is_binary(body) ->
|
||||||
|
case JSON.decode(body) do
|
||||||
|
{:ok, relay_info} when is_map(relay_info) ->
|
||||||
|
{:ok, relay_info, body, monotonic_duration_ms(started_at)}
|
||||||
|
|
||||||
|
{:error, reason} ->
|
||||||
|
{:error, reason}
|
||||||
|
|
||||||
|
_other ->
|
||||||
|
{:error, :invalid_relay_info}
|
||||||
|
end
|
||||||
|
|
||||||
|
{:ok, %Req.Response{status: status}} ->
|
||||||
|
{:error, {:relay_info_request_failed, status}}
|
||||||
|
|
||||||
|
{:error, reason} ->
|
||||||
|
{:error, reason}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp relay_info_url(relay_url) do
|
||||||
|
relay_url
|
||||||
|
|> URI.parse()
|
||||||
|
|> Map.update!(:scheme, fn
|
||||||
|
"wss" -> "https"
|
||||||
|
"ws" -> "http"
|
||||||
|
end)
|
||||||
|
|> URI.to_string()
|
||||||
|
end
|
||||||
|
|
||||||
|
defp tls_config(relay_url) do
|
||||||
|
case URI.parse(relay_url) do
|
||||||
|
%URI{scheme: "wss", host: host} when is_binary(host) and host != "" ->
|
||||||
|
%{mode: :required, hostname: host, pins: []}
|
||||||
|
|
||||||
|
_other ->
|
||||||
|
%{mode: :disabled}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp normalize_checks(checks) when is_list(checks) do
|
||||||
|
checks
|
||||||
|
|> Enum.map(&normalize_check/1)
|
||||||
|
|> Enum.reject(&is_nil/1)
|
||||||
|
|> Enum.uniq()
|
||||||
|
end
|
||||||
|
|
||||||
|
defp normalize_checks(_checks), do: []
|
||||||
|
|
||||||
|
defp normalize_check(:open), do: :open
|
||||||
|
defp normalize_check("open"), do: :open
|
||||||
|
defp normalize_check(:read), do: :read
|
||||||
|
defp normalize_check("read"), do: :read
|
||||||
|
defp normalize_check(:nip11), do: :nip11
|
||||||
|
defp normalize_check("nip11"), do: :nip11
|
||||||
|
defp normalize_check(_check), do: nil
|
||||||
|
|
||||||
|
defp monotonic_duration_ms(started_at) do
|
||||||
|
System.monotonic_time()
|
||||||
|
|> Kernel.-(started_at)
|
||||||
|
|> System.convert_time_unit(:native, :millisecond)
|
||||||
|
end
|
||||||
|
end
|
||||||
68
lib/parrhesia/policy/connection_policy.ex
Normal file
68
lib/parrhesia/policy/connection_policy.ex
Normal file
@@ -0,0 +1,68 @@
|
|||||||
|
defmodule Parrhesia.Policy.ConnectionPolicy do
|
||||||
|
@moduledoc """
|
||||||
|
Connection/session-level policy checks shared by websocket and management entrypoints.
|
||||||
|
"""
|
||||||
|
|
||||||
|
alias Parrhesia.Storage
|
||||||
|
|
||||||
|
@spec authorize_remote_ip(tuple() | String.t() | nil) :: :ok | {:error, :ip_blocked}
|
||||||
|
def authorize_remote_ip(remote_ip) do
|
||||||
|
case normalize_ip(remote_ip) do
|
||||||
|
nil ->
|
||||||
|
:ok
|
||||||
|
|
||||||
|
normalized_ip ->
|
||||||
|
case Storage.moderation().ip_blocked?(%{}, normalized_ip) do
|
||||||
|
{:ok, true} -> {:error, :ip_blocked}
|
||||||
|
_other -> :ok
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
@spec authorize_authenticated_pubkey(String.t()) :: :ok | {:error, :pubkey_not_allowed}
|
||||||
|
def authorize_authenticated_pubkey(pubkey) when is_binary(pubkey) do
|
||||||
|
if allowlist_active?() do
|
||||||
|
case Storage.moderation().pubkey_allowed?(%{}, pubkey) do
|
||||||
|
{:ok, true} -> :ok
|
||||||
|
_other -> {:error, :pubkey_not_allowed}
|
||||||
|
end
|
||||||
|
else
|
||||||
|
:ok
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
@spec authorize_authenticated_pubkeys(MapSet.t(String.t())) ::
|
||||||
|
:ok | {:error, :auth_required | :pubkey_not_allowed}
|
||||||
|
def authorize_authenticated_pubkeys(authenticated_pubkeys) do
|
||||||
|
if allowlist_active?() do
|
||||||
|
cond do
|
||||||
|
MapSet.size(authenticated_pubkeys) == 0 ->
|
||||||
|
{:error, :auth_required}
|
||||||
|
|
||||||
|
Enum.any?(authenticated_pubkeys, &(authorize_authenticated_pubkey(&1) == :ok)) ->
|
||||||
|
:ok
|
||||||
|
|
||||||
|
true ->
|
||||||
|
{:error, :pubkey_not_allowed}
|
||||||
|
end
|
||||||
|
else
|
||||||
|
:ok
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp allowlist_active? do
|
||||||
|
case Storage.moderation().has_allowed_pubkeys?(%{}) do
|
||||||
|
{:ok, true} -> true
|
||||||
|
_other -> false
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp normalize_ip(nil), do: nil
|
||||||
|
defp normalize_ip({_, _, _, _} = remote_ip), do: :inet.ntoa(remote_ip) |> to_string()
|
||||||
|
|
||||||
|
defp normalize_ip({_, _, _, _, _, _, _, _} = remote_ip),
|
||||||
|
do: :inet.ntoa(remote_ip) |> to_string()
|
||||||
|
|
||||||
|
defp normalize_ip(remote_ip) when is_binary(remote_ip), do: remote_ip
|
||||||
|
defp normalize_ip(_remote_ip), do: nil
|
||||||
|
end
|
||||||
@@ -3,11 +3,17 @@ defmodule Parrhesia.Policy.EventPolicy do
|
|||||||
Write/read policy checks for relay operations.
|
Write/read policy checks for relay operations.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
alias Parrhesia.API.ACL
|
||||||
|
alias Parrhesia.API.RequestContext
|
||||||
|
alias Parrhesia.Policy.ConnectionPolicy
|
||||||
alias Parrhesia.Storage
|
alias Parrhesia.Storage
|
||||||
|
|
||||||
@type policy_error ::
|
@type policy_error ::
|
||||||
:auth_required
|
:auth_required
|
||||||
|
| :pubkey_not_allowed
|
||||||
| :restricted_giftwrap
|
| :restricted_giftwrap
|
||||||
|
| :sync_read_not_allowed
|
||||||
|
| :sync_write_not_allowed
|
||||||
| :marmot_group_h_tag_required
|
| :marmot_group_h_tag_required
|
||||||
| :marmot_group_h_values_exceeded
|
| :marmot_group_h_values_exceeded
|
||||||
| :marmot_group_filter_window_too_wide
|
| :marmot_group_filter_window_too_wide
|
||||||
@@ -33,15 +39,31 @@ defmodule Parrhesia.Policy.EventPolicy do
|
|||||||
|
|
||||||
@spec authorize_read([map()], MapSet.t(String.t())) :: :ok | {:error, policy_error()}
|
@spec authorize_read([map()], MapSet.t(String.t())) :: :ok | {:error, policy_error()}
|
||||||
def authorize_read(filters, authenticated_pubkeys) when is_list(filters) do
|
def authorize_read(filters, authenticated_pubkeys) when is_list(filters) do
|
||||||
|
authorize_read(filters, authenticated_pubkeys, request_context(authenticated_pubkeys))
|
||||||
|
end
|
||||||
|
|
||||||
|
@spec authorize_read([map()], MapSet.t(String.t()), RequestContext.t()) ::
|
||||||
|
:ok | {:error, policy_error()}
|
||||||
|
def authorize_read(filters, authenticated_pubkeys, %RequestContext{} = context)
|
||||||
|
when is_list(filters) do
|
||||||
auth_required? = config_bool([:policies, :auth_required_for_reads], false)
|
auth_required? = config_bool([:policies, :auth_required_for_reads], false)
|
||||||
|
|
||||||
cond do
|
cond do
|
||||||
|
match?(
|
||||||
|
{:error, _reason},
|
||||||
|
ConnectionPolicy.authorize_authenticated_pubkeys(authenticated_pubkeys)
|
||||||
|
) ->
|
||||||
|
ConnectionPolicy.authorize_authenticated_pubkeys(authenticated_pubkeys)
|
||||||
|
|
||||||
auth_required? and MapSet.size(authenticated_pubkeys) == 0 ->
|
auth_required? and MapSet.size(authenticated_pubkeys) == 0 ->
|
||||||
{:error, :auth_required}
|
{:error, :auth_required}
|
||||||
|
|
||||||
giftwrap_restricted?(filters, authenticated_pubkeys) ->
|
giftwrap_restricted?(filters, authenticated_pubkeys) ->
|
||||||
{:error, :restricted_giftwrap}
|
{:error, :restricted_giftwrap}
|
||||||
|
|
||||||
|
match?({:error, _reason}, authorize_sync_reads(filters, context)) ->
|
||||||
|
authorize_sync_reads(filters, context)
|
||||||
|
|
||||||
true ->
|
true ->
|
||||||
enforce_marmot_group_read_guardrails(filters)
|
enforce_marmot_group_read_guardrails(filters)
|
||||||
end
|
end
|
||||||
@@ -49,8 +71,17 @@ defmodule Parrhesia.Policy.EventPolicy do
|
|||||||
|
|
||||||
@spec authorize_write(map(), MapSet.t(String.t())) :: :ok | {:error, policy_error()}
|
@spec authorize_write(map(), MapSet.t(String.t())) :: :ok | {:error, policy_error()}
|
||||||
def authorize_write(event, authenticated_pubkeys) when is_map(event) do
|
def authorize_write(event, authenticated_pubkeys) when is_map(event) do
|
||||||
|
authorize_write(event, authenticated_pubkeys, request_context(authenticated_pubkeys))
|
||||||
|
end
|
||||||
|
|
||||||
|
@spec authorize_write(map(), MapSet.t(String.t()), RequestContext.t()) ::
|
||||||
|
:ok | {:error, policy_error()}
|
||||||
|
def authorize_write(event, authenticated_pubkeys, %RequestContext{} = context)
|
||||||
|
when is_map(event) do
|
||||||
checks = [
|
checks = [
|
||||||
|
fn -> ConnectionPolicy.authorize_authenticated_pubkeys(authenticated_pubkeys) end,
|
||||||
fn -> maybe_require_auth_for_write(authenticated_pubkeys) end,
|
fn -> maybe_require_auth_for_write(authenticated_pubkeys) end,
|
||||||
|
fn -> authorize_sync_write(event, context) end,
|
||||||
fn -> reject_if_pubkey_banned(event) end,
|
fn -> reject_if_pubkey_banned(event) end,
|
||||||
fn -> reject_if_event_banned(event) end,
|
fn -> reject_if_event_banned(event) end,
|
||||||
fn -> enforce_pow(event) end,
|
fn -> enforce_pow(event) end,
|
||||||
@@ -69,10 +100,17 @@ defmodule Parrhesia.Policy.EventPolicy do
|
|||||||
|
|
||||||
@spec error_message(policy_error()) :: String.t()
|
@spec error_message(policy_error()) :: String.t()
|
||||||
def error_message(:auth_required), do: "auth-required: authentication required"
|
def error_message(:auth_required), do: "auth-required: authentication required"
|
||||||
|
def error_message(:pubkey_not_allowed), do: "restricted: authenticated pubkey is not allowed"
|
||||||
|
|
||||||
def error_message(:restricted_giftwrap),
|
def error_message(:restricted_giftwrap),
|
||||||
do: "restricted: giftwrap access requires recipient authentication"
|
do: "restricted: giftwrap access requires recipient authentication"
|
||||||
|
|
||||||
|
def error_message(:sync_read_not_allowed),
|
||||||
|
do: "restricted: sync read not allowed for authenticated pubkey"
|
||||||
|
|
||||||
|
def error_message(:sync_write_not_allowed),
|
||||||
|
do: "restricted: sync write not allowed for authenticated pubkey"
|
||||||
|
|
||||||
def error_message(:marmot_group_h_tag_required),
|
def error_message(:marmot_group_h_tag_required),
|
||||||
do: "restricted: kind 445 queries must include a #h tag"
|
do: "restricted: kind 445 queries must include a #h tag"
|
||||||
|
|
||||||
@@ -143,6 +181,19 @@ defmodule Parrhesia.Policy.EventPolicy do
|
|||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
|
defp authorize_sync_reads(filters, %RequestContext{} = context) do
|
||||||
|
Enum.reduce_while(filters, :ok, fn filter, :ok ->
|
||||||
|
case ACL.check(:sync_read, filter, context: context) do
|
||||||
|
:ok -> {:cont, :ok}
|
||||||
|
{:error, reason} -> {:halt, {:error, reason}}
|
||||||
|
end
|
||||||
|
end)
|
||||||
|
end
|
||||||
|
|
||||||
|
defp authorize_sync_write(event, %RequestContext{} = context) do
|
||||||
|
ACL.check(:sync_write, event, context: context)
|
||||||
|
end
|
||||||
|
|
||||||
defp giftwrap_restricted?(filters, authenticated_pubkeys) do
|
defp giftwrap_restricted?(filters, authenticated_pubkeys) do
|
||||||
if MapSet.size(authenticated_pubkeys) == 0 do
|
if MapSet.size(authenticated_pubkeys) == 0 do
|
||||||
any_filter_targets_giftwrap?(filters)
|
any_filter_targets_giftwrap?(filters)
|
||||||
@@ -635,19 +686,29 @@ defmodule Parrhesia.Policy.EventPolicy do
|
|||||||
_tag -> false
|
_tag -> false
|
||||||
end)
|
end)
|
||||||
|
|
||||||
if protected? do
|
cond do
|
||||||
pubkey = Map.get(event, "pubkey")
|
not protected? ->
|
||||||
|
:ok
|
||||||
|
|
||||||
cond do
|
nip43_relay_access_kind?(Map.get(event, "kind")) ->
|
||||||
MapSet.size(authenticated_pubkeys) == 0 -> {:error, :protected_event_requires_auth}
|
:ok
|
||||||
MapSet.member?(authenticated_pubkeys, pubkey) -> :ok
|
|
||||||
true -> {:error, :protected_event_pubkey_mismatch}
|
true ->
|
||||||
end
|
pubkey = Map.get(event, "pubkey")
|
||||||
else
|
|
||||||
:ok
|
cond do
|
||||||
|
MapSet.size(authenticated_pubkeys) == 0 -> {:error, :protected_event_requires_auth}
|
||||||
|
MapSet.member?(authenticated_pubkeys, pubkey) -> :ok
|
||||||
|
true -> {:error, :protected_event_pubkey_mismatch}
|
||||||
|
end
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
|
defp nip43_relay_access_kind?(kind) when kind in [8_000, 8_001, 13_534, 28_934, 28_935, 28_936],
|
||||||
|
do: true
|
||||||
|
|
||||||
|
defp nip43_relay_access_kind?(_kind), do: false
|
||||||
|
|
||||||
defp config_bool([scope, key], default) do
|
defp config_bool([scope, key], default) do
|
||||||
case Application.get_env(:parrhesia, scope, []) |> Keyword.get(key, default) do
|
case Application.get_env(:parrhesia, scope, []) |> Keyword.get(key, default) do
|
||||||
true -> true
|
true -> true
|
||||||
@@ -672,4 +733,8 @@ defmodule Parrhesia.Policy.EventPolicy do
|
|||||||
default
|
default
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
|
defp request_context(authenticated_pubkeys) do
|
||||||
|
%RequestContext{authenticated_pubkeys: authenticated_pubkeys}
|
||||||
|
end
|
||||||
end
|
end
|
||||||
|
|||||||
73
lib/parrhesia/postgres_repos.ex
Normal file
73
lib/parrhesia/postgres_repos.ex
Normal file
@@ -0,0 +1,73 @@
|
|||||||
|
defmodule Parrhesia.PostgresRepos do
|
||||||
|
@moduledoc false
|
||||||
|
|
||||||
|
alias Parrhesia.Config
|
||||||
|
alias Parrhesia.ReadRepo
|
||||||
|
alias Parrhesia.Repo
|
||||||
|
|
||||||
|
@spec write() :: module()
|
||||||
|
def write, do: Repo
|
||||||
|
|
||||||
|
@spec read() :: module()
|
||||||
|
def read do
|
||||||
|
if separate_read_pool_enabled?() and is_pid(Process.whereis(ReadRepo)) do
|
||||||
|
ReadRepo
|
||||||
|
else
|
||||||
|
Repo
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
@spec started_repos() :: [module()]
|
||||||
|
def started_repos do
|
||||||
|
cond do
|
||||||
|
not postgres_enabled?() ->
|
||||||
|
[]
|
||||||
|
|
||||||
|
separate_read_pool_enabled?() ->
|
||||||
|
[Repo, ReadRepo]
|
||||||
|
|
||||||
|
true ->
|
||||||
|
[Repo]
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
@spec postgres_enabled?() :: boolean()
|
||||||
|
def postgres_enabled? do
|
||||||
|
case Process.whereis(Config) do
|
||||||
|
pid when is_pid(pid) ->
|
||||||
|
Config.get([:storage, :backend], storage_backend_default()) == :postgres
|
||||||
|
|
||||||
|
nil ->
|
||||||
|
storage_backend_default() == :postgres
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
@spec separate_read_pool_enabled?() :: boolean()
|
||||||
|
def separate_read_pool_enabled? do
|
||||||
|
case {postgres_enabled?(), Process.whereis(Config)} do
|
||||||
|
{false, _pid} ->
|
||||||
|
false
|
||||||
|
|
||||||
|
{true, pid} when is_pid(pid) ->
|
||||||
|
Config.get(
|
||||||
|
[:database, :separate_read_pool?],
|
||||||
|
application_default(:separate_read_pool?, false)
|
||||||
|
)
|
||||||
|
|
||||||
|
{true, nil} ->
|
||||||
|
application_default(:separate_read_pool?, false)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp application_default(key, default) do
|
||||||
|
:parrhesia
|
||||||
|
|> Application.get_env(:database, [])
|
||||||
|
|> Keyword.get(key, default)
|
||||||
|
end
|
||||||
|
|
||||||
|
defp storage_backend_default do
|
||||||
|
:parrhesia
|
||||||
|
|> Application.get_env(:storage, [])
|
||||||
|
|> Keyword.get(:backend, :postgres)
|
||||||
|
end
|
||||||
|
end
|
||||||
@@ -1 +1,4 @@
|
|||||||
Postgrex.Types.define(Parrhesia.PostgresTypes, [], json: JSON)
|
Postgrex.Types.define(Parrhesia.PostgresTypes, [],
|
||||||
|
json: JSON,
|
||||||
|
moduledoc: "Custom Postgrex type definitions used by `Parrhesia.Repo` and `Parrhesia.ReadRepo`."
|
||||||
|
)
|
||||||
|
|||||||
@@ -1,6 +1,15 @@
|
|||||||
defmodule Parrhesia.Protocol do
|
defmodule Parrhesia.Protocol do
|
||||||
@moduledoc """
|
@moduledoc """
|
||||||
Nostr protocol message decode/encode helpers.
|
Nostr protocol message decode/encode helpers.
|
||||||
|
|
||||||
|
This module is transport-oriented: it turns websocket payloads into structured tuples and
|
||||||
|
back again.
|
||||||
|
|
||||||
|
For programmatic API calls inside the application, prefer the `Parrhesia.API.*` modules.
|
||||||
|
In particular:
|
||||||
|
|
||||||
|
- `validate_event/1` returns user-facing error strings
|
||||||
|
- `Parrhesia.API.Auth.validate_event/1` returns machine-friendly validator atoms
|
||||||
"""
|
"""
|
||||||
|
|
||||||
alias Parrhesia.Protocol.EventValidator
|
alias Parrhesia.Protocol.EventValidator
|
||||||
@@ -14,8 +23,8 @@ defmodule Parrhesia.Protocol do
|
|||||||
| {:close, String.t()}
|
| {:close, String.t()}
|
||||||
| {:auth, event()}
|
| {:auth, event()}
|
||||||
| {:count, String.t(), [filter()], map()}
|
| {:count, String.t(), [filter()], map()}
|
||||||
| {:neg_open, String.t(), map()}
|
| {:neg_open, String.t(), filter(), binary()}
|
||||||
| {:neg_msg, String.t(), map()}
|
| {:neg_msg, String.t(), binary()}
|
||||||
| {:neg_close, String.t()}
|
| {:neg_close, String.t()}
|
||||||
|
|
||||||
@type relay_message ::
|
@type relay_message ::
|
||||||
@@ -26,7 +35,8 @@ defmodule Parrhesia.Protocol do
|
|||||||
| {:event, String.t(), event()}
|
| {:event, String.t(), event()}
|
||||||
| {:auth, String.t()}
|
| {:auth, String.t()}
|
||||||
| {:count, String.t(), map()}
|
| {:count, String.t(), map()}
|
||||||
| {:neg_msg, String.t(), map()}
|
| {:neg_msg, String.t(), String.t()}
|
||||||
|
| {:neg_err, String.t(), String.t()}
|
||||||
|
|
||||||
@type decode_error ::
|
@type decode_error ::
|
||||||
:invalid_json
|
:invalid_json
|
||||||
@@ -40,6 +50,9 @@ defmodule Parrhesia.Protocol do
|
|||||||
|
|
||||||
@count_options_keys MapSet.new(["hll", "approximate"])
|
@count_options_keys MapSet.new(["hll", "approximate"])
|
||||||
|
|
||||||
|
@doc """
|
||||||
|
Decodes a client websocket payload into a structured protocol tuple.
|
||||||
|
"""
|
||||||
@spec decode_client(binary()) :: {:ok, client_message()} | {:error, decode_error()}
|
@spec decode_client(binary()) :: {:ok, client_message()} | {:error, decode_error()}
|
||||||
def decode_client(payload) when is_binary(payload) do
|
def decode_client(payload) when is_binary(payload) do
|
||||||
with {:ok, decoded} <- decode_json(payload) do
|
with {:ok, decoded} <- decode_json(payload) do
|
||||||
@@ -47,6 +60,9 @@ defmodule Parrhesia.Protocol do
|
|||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
|
@doc """
|
||||||
|
Validates an event and returns relay-facing error strings.
|
||||||
|
"""
|
||||||
@spec validate_event(event()) :: :ok | {:error, String.t()}
|
@spec validate_event(event()) :: :ok | {:error, String.t()}
|
||||||
def validate_event(event) do
|
def validate_event(event) do
|
||||||
case EventValidator.validate(event) do
|
case EventValidator.validate(event) do
|
||||||
@@ -55,6 +71,9 @@ defmodule Parrhesia.Protocol do
|
|||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
|
@doc """
|
||||||
|
Encodes a relay message tuple into the JSON frame sent to clients.
|
||||||
|
"""
|
||||||
@spec encode_relay(relay_message()) :: binary()
|
@spec encode_relay(relay_message()) :: binary()
|
||||||
def encode_relay(message) do
|
def encode_relay(message) do
|
||||||
message
|
message
|
||||||
@@ -62,6 +81,9 @@ defmodule Parrhesia.Protocol do
|
|||||||
|> JSON.encode!()
|
|> JSON.encode!()
|
||||||
end
|
end
|
||||||
|
|
||||||
|
@doc """
|
||||||
|
Converts a decode error into the relay notice string that should be sent to a client.
|
||||||
|
"""
|
||||||
@spec decode_error_notice(decode_error()) :: String.t()
|
@spec decode_error_notice(decode_error()) :: String.t()
|
||||||
def decode_error_notice(reason) do
|
def decode_error_notice(reason) do
|
||||||
case reason do
|
case reason do
|
||||||
@@ -122,21 +144,25 @@ defmodule Parrhesia.Protocol do
|
|||||||
|
|
||||||
defp decode_message(["AUTH", _invalid]), do: {:error, :invalid_auth}
|
defp decode_message(["AUTH", _invalid]), do: {:error, :invalid_auth}
|
||||||
|
|
||||||
defp decode_message(["NEG-OPEN", subscription_id, payload])
|
defp decode_message(["NEG-OPEN", subscription_id, filter, initial_message])
|
||||||
when is_binary(subscription_id) and is_map(payload) do
|
when is_binary(subscription_id) and is_map(filter) and is_binary(initial_message) do
|
||||||
if valid_subscription_id?(subscription_id) do
|
with true <- valid_subscription_id?(subscription_id),
|
||||||
{:ok, {:neg_open, subscription_id, payload}}
|
{:ok, decoded_message} <- decode_negentropy_hex(initial_message) do
|
||||||
|
{:ok, {:neg_open, subscription_id, filter, decoded_message}}
|
||||||
else
|
else
|
||||||
{:error, :invalid_subscription_id}
|
false -> {:error, :invalid_subscription_id}
|
||||||
|
{:error, _reason} -> {:error, :invalid_negentropy}
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
defp decode_message(["NEG-MSG", subscription_id, payload])
|
defp decode_message(["NEG-MSG", subscription_id, payload])
|
||||||
when is_binary(subscription_id) and is_map(payload) do
|
when is_binary(subscription_id) and is_binary(payload) do
|
||||||
if valid_subscription_id?(subscription_id) do
|
with true <- valid_subscription_id?(subscription_id),
|
||||||
{:ok, {:neg_msg, subscription_id, payload}}
|
{:ok, decoded_payload} <- decode_negentropy_hex(payload) do
|
||||||
|
{:ok, {:neg_msg, subscription_id, decoded_payload}}
|
||||||
else
|
else
|
||||||
{:error, :invalid_subscription_id}
|
false -> {:error, :invalid_subscription_id}
|
||||||
|
{:error, _reason} -> {:error, :invalid_negentropy}
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
@@ -215,7 +241,19 @@ defmodule Parrhesia.Protocol do
|
|||||||
defp relay_frame({:neg_msg, subscription_id, payload}),
|
defp relay_frame({:neg_msg, subscription_id, payload}),
|
||||||
do: ["NEG-MSG", subscription_id, payload]
|
do: ["NEG-MSG", subscription_id, payload]
|
||||||
|
|
||||||
|
defp relay_frame({:neg_err, subscription_id, reason}),
|
||||||
|
do: ["NEG-ERR", subscription_id, reason]
|
||||||
|
|
||||||
defp valid_subscription_id?(subscription_id) do
|
defp valid_subscription_id?(subscription_id) do
|
||||||
subscription_id != "" and String.length(subscription_id) <= 64
|
subscription_id != "" and String.length(subscription_id) <= 64
|
||||||
end
|
end
|
||||||
|
|
||||||
|
defp decode_negentropy_hex(payload) when is_binary(payload) and payload != "" do
|
||||||
|
case Base.decode16(payload, case: :mixed) do
|
||||||
|
{:ok, decoded} when decoded != <<>> -> {:ok, decoded}
|
||||||
|
_other -> {:error, :invalid_negentropy}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp decode_negentropy_hex(_payload), do: {:error, :invalid_negentropy}
|
||||||
end
|
end
|
||||||
|
|||||||
@@ -6,6 +6,14 @@ defmodule Parrhesia.Protocol.EventValidator do
|
|||||||
@required_fields ~w[id pubkey created_at kind tags content sig]
|
@required_fields ~w[id pubkey created_at kind tags content sig]
|
||||||
@max_kind 65_535
|
@max_kind 65_535
|
||||||
@default_max_event_future_skew_seconds 900
|
@default_max_event_future_skew_seconds 900
|
||||||
|
@default_max_tags_per_event 256
|
||||||
|
@default_nip43_request_max_age_seconds 300
|
||||||
|
@verify_event_signatures_locked Application.compile_env(
|
||||||
|
:parrhesia,
|
||||||
|
[:features, :verify_event_signatures_locked?],
|
||||||
|
false
|
||||||
|
)
|
||||||
|
|
||||||
@supported_mls_ciphersuites MapSet.new(~w[0x0001 0x0002 0x0003 0x0004 0x0005 0x0006 0x0007])
|
@supported_mls_ciphersuites MapSet.new(~w[0x0001 0x0002 0x0003 0x0004 0x0005 0x0006 0x0007])
|
||||||
@required_mls_extensions MapSet.new(["0xf2ee", "0x000a"])
|
@required_mls_extensions MapSet.new(["0xf2ee", "0x000a"])
|
||||||
@supported_keypackage_ref_sizes [32, 48, 64]
|
@supported_keypackage_ref_sizes [32, 48, 64]
|
||||||
@@ -17,6 +25,7 @@ defmodule Parrhesia.Protocol.EventValidator do
|
|||||||
| :invalid_created_at
|
| :invalid_created_at
|
||||||
| :created_at_too_far_in_future
|
| :created_at_too_far_in_future
|
||||||
| :invalid_kind
|
| :invalid_kind
|
||||||
|
| :too_many_tags
|
||||||
| :invalid_tags
|
| :invalid_tags
|
||||||
| :invalid_content
|
| :invalid_content
|
||||||
| :invalid_sig
|
| :invalid_sig
|
||||||
@@ -44,6 +53,22 @@ defmodule Parrhesia.Protocol.EventValidator do
|
|||||||
| :missing_marmot_group_tag
|
| :missing_marmot_group_tag
|
||||||
| :invalid_marmot_group_tag
|
| :invalid_marmot_group_tag
|
||||||
| :invalid_marmot_group_content
|
| :invalid_marmot_group_content
|
||||||
|
| :missing_nip66_d_tag
|
||||||
|
| :invalid_nip66_d_tag
|
||||||
|
| :invalid_nip66_discovery_tag
|
||||||
|
| :missing_nip66_frequency_tag
|
||||||
|
| :invalid_nip66_frequency_tag
|
||||||
|
| :invalid_nip66_timeout_tag
|
||||||
|
| :invalid_nip66_check_tag
|
||||||
|
| :missing_nip43_protected_tag
|
||||||
|
| :missing_nip43_claim_tag
|
||||||
|
| :invalid_nip43_claim_tag
|
||||||
|
| :missing_nip43_member_tag
|
||||||
|
| :invalid_nip43_member_tag
|
||||||
|
| :missing_nip43_pubkey_tag
|
||||||
|
| :invalid_nip43_pubkey_tag
|
||||||
|
| :stale_nip43_join_request
|
||||||
|
| :stale_nip43_leave_request
|
||||||
|
|
||||||
@spec validate(map()) :: :ok | {:error, error_reason()}
|
@spec validate(map()) :: :ok | {:error, error_reason()}
|
||||||
def validate(event) when is_map(event) do
|
def validate(event) when is_map(event) do
|
||||||
@@ -87,6 +112,7 @@ defmodule Parrhesia.Protocol.EventValidator do
|
|||||||
created_at_too_far_in_future:
|
created_at_too_far_in_future:
|
||||||
"invalid: event creation date is too far off from the current time",
|
"invalid: event creation date is too far off from the current time",
|
||||||
invalid_kind: "invalid: kind must be an integer between 0 and 65535",
|
invalid_kind: "invalid: kind must be an integer between 0 and 65535",
|
||||||
|
too_many_tags: "invalid: event tags exceed configured limit",
|
||||||
invalid_tags: "invalid: tags must be an array of non-empty string arrays",
|
invalid_tags: "invalid: tags must be an array of non-empty string arrays",
|
||||||
invalid_content: "invalid: content must be a string",
|
invalid_content: "invalid: content must be a string",
|
||||||
invalid_sig: "invalid: sig must be 64-byte lowercase hex",
|
invalid_sig: "invalid: sig must be 64-byte lowercase hex",
|
||||||
@@ -127,7 +153,35 @@ defmodule Parrhesia.Protocol.EventValidator do
|
|||||||
missing_marmot_group_tag: "invalid: kind 445 must include at least one h tag with a group id",
|
missing_marmot_group_tag: "invalid: kind 445 must include at least one h tag with a group id",
|
||||||
invalid_marmot_group_tag:
|
invalid_marmot_group_tag:
|
||||||
"invalid: kind 445 h tags must contain 32-byte lowercase hex group ids",
|
"invalid: kind 445 h tags must contain 32-byte lowercase hex group ids",
|
||||||
invalid_marmot_group_content: "invalid: kind 445 content must be non-empty base64"
|
invalid_marmot_group_content: "invalid: kind 445 content must be non-empty base64",
|
||||||
|
missing_nip66_d_tag:
|
||||||
|
"invalid: kind 30166 must include a single [\"d\", <normalized ws/wss url or relay pubkey>] tag",
|
||||||
|
invalid_nip66_d_tag:
|
||||||
|
"invalid: kind 30166 must include a single [\"d\", <normalized ws/wss url or relay pubkey>] tag",
|
||||||
|
invalid_nip66_discovery_tag: "invalid: kind 30166 includes malformed NIP-66 discovery tags",
|
||||||
|
missing_nip66_frequency_tag:
|
||||||
|
"invalid: kind 10166 must include a single [\"frequency\", <seconds>] tag",
|
||||||
|
invalid_nip66_frequency_tag:
|
||||||
|
"invalid: kind 10166 must include a single [\"frequency\", <seconds>] tag",
|
||||||
|
invalid_nip66_timeout_tag:
|
||||||
|
"invalid: kind 10166 timeout tags must be [\"timeout\", <check>, <ms>]",
|
||||||
|
invalid_nip66_check_tag: "invalid: kind 10166 c tags must contain lowercase check names",
|
||||||
|
missing_nip43_protected_tag:
|
||||||
|
"invalid: NIP-43 events must include a NIP-70 protected [\"-\"] tag",
|
||||||
|
missing_nip43_claim_tag:
|
||||||
|
"invalid: kinds 28934 and 28935 must include a single [\"claim\", <invite code>] tag",
|
||||||
|
invalid_nip43_claim_tag:
|
||||||
|
"invalid: kinds 28934 and 28935 must include a single [\"claim\", <invite code>] tag",
|
||||||
|
missing_nip43_member_tag:
|
||||||
|
"invalid: kind 13534 must include at least one [\"member\", <hex pubkey>] tag",
|
||||||
|
invalid_nip43_member_tag:
|
||||||
|
"invalid: kind 13534 member tags must contain lowercase hex pubkeys",
|
||||||
|
missing_nip43_pubkey_tag:
|
||||||
|
"invalid: kinds 8000 and 8001 must include a single [\"p\", <hex pubkey>] tag",
|
||||||
|
invalid_nip43_pubkey_tag:
|
||||||
|
"invalid: kinds 8000 and 8001 must include a single [\"p\", <hex pubkey>] tag",
|
||||||
|
stale_nip43_join_request: "invalid: kind 28934 created_at must be recent",
|
||||||
|
stale_nip43_leave_request: "invalid: kind 28936 created_at must be recent"
|
||||||
}
|
}
|
||||||
|
|
||||||
@spec error_message(error_reason()) :: String.t()
|
@spec error_message(error_reason()) :: String.t()
|
||||||
@@ -169,16 +223,25 @@ defmodule Parrhesia.Protocol.EventValidator do
|
|||||||
defp validate_kind(kind) when is_integer(kind) and kind >= 0 and kind <= @max_kind, do: :ok
|
defp validate_kind(kind) when is_integer(kind) and kind >= 0 and kind <= @max_kind, do: :ok
|
||||||
defp validate_kind(_kind), do: {:error, :invalid_kind}
|
defp validate_kind(_kind), do: {:error, :invalid_kind}
|
||||||
|
|
||||||
defp validate_tags(tags) when is_list(tags) do
|
defp validate_tags(tags) when is_list(tags), do: validate_tags(tags, max_tags_per_event(), 0)
|
||||||
if Enum.all?(tags, &valid_tag?/1) do
|
|
||||||
:ok
|
|
||||||
else
|
|
||||||
{:error, :invalid_tags}
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
defp validate_tags(_tags), do: {:error, :invalid_tags}
|
defp validate_tags(_tags), do: {:error, :invalid_tags}
|
||||||
|
|
||||||
|
defp validate_tags([], _max_tags, _count), do: :ok
|
||||||
|
|
||||||
|
defp validate_tags([tag | rest], max_tags, count) do
|
||||||
|
cond do
|
||||||
|
count + 1 > max_tags ->
|
||||||
|
{:error, :too_many_tags}
|
||||||
|
|
||||||
|
valid_tag?(tag) ->
|
||||||
|
validate_tags(rest, max_tags, count + 1)
|
||||||
|
|
||||||
|
true ->
|
||||||
|
{:error, :invalid_tags}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
defp validate_content(content) when is_binary(content), do: :ok
|
defp validate_content(content) when is_binary(content), do: :ok
|
||||||
defp validate_content(_content), do: {:error, :invalid_content}
|
defp validate_content(_content), do: {:error, :invalid_content}
|
||||||
|
|
||||||
@@ -197,7 +260,7 @@ defmodule Parrhesia.Protocol.EventValidator do
|
|||||||
end
|
end
|
||||||
|
|
||||||
defp validate_signature(event) do
|
defp validate_signature(event) do
|
||||||
if verify_event_signatures?() do
|
if @verify_event_signatures_locked or verify_event_signatures?() do
|
||||||
verify_signature(event)
|
verify_signature(event)
|
||||||
else
|
else
|
||||||
:ok
|
:ok
|
||||||
@@ -240,6 +303,27 @@ defmodule Parrhesia.Protocol.EventValidator do
|
|||||||
defp validate_kind_specific(%{"kind" => 1059} = event),
|
defp validate_kind_specific(%{"kind" => 1059} = event),
|
||||||
do: validate_giftwrap_event(event)
|
do: validate_giftwrap_event(event)
|
||||||
|
|
||||||
|
defp validate_kind_specific(%{"kind" => 30_166} = event),
|
||||||
|
do: validate_nip66_discovery_event(event)
|
||||||
|
|
||||||
|
defp validate_kind_specific(%{"kind" => 10_166} = event),
|
||||||
|
do: validate_nip66_monitor_announcement(event)
|
||||||
|
|
||||||
|
defp validate_kind_specific(%{"kind" => 13_534} = event),
|
||||||
|
do: validate_nip43_membership_list(event)
|
||||||
|
|
||||||
|
defp validate_kind_specific(%{"kind" => kind} = event) when kind in [8_000, 8_001],
|
||||||
|
do: validate_nip43_membership_delta(event)
|
||||||
|
|
||||||
|
defp validate_kind_specific(%{"kind" => 28_934} = event),
|
||||||
|
do: validate_nip43_join_request(event)
|
||||||
|
|
||||||
|
defp validate_kind_specific(%{"kind" => 28_935} = event),
|
||||||
|
do: validate_nip43_invite_response(event)
|
||||||
|
|
||||||
|
defp validate_kind_specific(%{"kind" => 28_936} = event),
|
||||||
|
do: validate_nip43_leave_request(event)
|
||||||
|
|
||||||
defp validate_kind_specific(_event), do: :ok
|
defp validate_kind_specific(_event), do: :ok
|
||||||
|
|
||||||
defp validate_marmot_keypackage_event(event) do
|
defp validate_marmot_keypackage_event(event) do
|
||||||
@@ -313,6 +397,184 @@ defmodule Parrhesia.Protocol.EventValidator do
|
|||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
|
defp validate_nip66_discovery_event(event) do
|
||||||
|
tags = Map.get(event, "tags", [])
|
||||||
|
|
||||||
|
with :ok <- validate_nip66_d_tag(tags),
|
||||||
|
:ok <-
|
||||||
|
validate_optional_single_string_tag_with_predicate(
|
||||||
|
tags,
|
||||||
|
"n",
|
||||||
|
:invalid_nip66_discovery_tag,
|
||||||
|
&(&1 in ["clearnet", "tor", "i2p", "loki"])
|
||||||
|
),
|
||||||
|
:ok <-
|
||||||
|
validate_optional_single_string_tag_with_predicate(
|
||||||
|
tags,
|
||||||
|
"T",
|
||||||
|
:invalid_nip66_discovery_tag,
|
||||||
|
&valid_pascal_case?/1
|
||||||
|
),
|
||||||
|
:ok <-
|
||||||
|
validate_optional_single_string_tag_with_predicate(
|
||||||
|
tags,
|
||||||
|
"g",
|
||||||
|
:invalid_nip66_discovery_tag,
|
||||||
|
&non_empty_string?/1
|
||||||
|
),
|
||||||
|
:ok <-
|
||||||
|
validate_optional_repeated_tag(
|
||||||
|
tags,
|
||||||
|
"N",
|
||||||
|
&positive_integer_string?/1,
|
||||||
|
:invalid_nip66_discovery_tag
|
||||||
|
),
|
||||||
|
:ok <-
|
||||||
|
validate_optional_repeated_tag(
|
||||||
|
tags,
|
||||||
|
"R",
|
||||||
|
&valid_nip66_requirement_value?/1,
|
||||||
|
:invalid_nip66_discovery_tag
|
||||||
|
),
|
||||||
|
:ok <-
|
||||||
|
validate_optional_repeated_tag(
|
||||||
|
tags,
|
||||||
|
"k",
|
||||||
|
&valid_nip66_kind_value?/1,
|
||||||
|
:invalid_nip66_discovery_tag
|
||||||
|
),
|
||||||
|
:ok <-
|
||||||
|
validate_optional_repeated_tag(
|
||||||
|
tags,
|
||||||
|
"t",
|
||||||
|
&non_empty_string?/1,
|
||||||
|
:invalid_nip66_discovery_tag
|
||||||
|
),
|
||||||
|
:ok <-
|
||||||
|
validate_optional_single_string_tag_with_predicate(
|
||||||
|
tags,
|
||||||
|
"rtt-open",
|
||||||
|
:invalid_nip66_discovery_tag,
|
||||||
|
&positive_integer_string?/1
|
||||||
|
),
|
||||||
|
:ok <-
|
||||||
|
validate_optional_single_string_tag_with_predicate(
|
||||||
|
tags,
|
||||||
|
"rtt-read",
|
||||||
|
:invalid_nip66_discovery_tag,
|
||||||
|
&positive_integer_string?/1
|
||||||
|
) do
|
||||||
|
validate_optional_single_string_tag_with_predicate(
|
||||||
|
tags,
|
||||||
|
"rtt-write",
|
||||||
|
:invalid_nip66_discovery_tag,
|
||||||
|
&positive_integer_string?/1
|
||||||
|
)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp validate_nip66_monitor_announcement(event) do
|
||||||
|
tags = Map.get(event, "tags", [])
|
||||||
|
|
||||||
|
with :ok <-
|
||||||
|
validate_single_string_tag_with_predicate(
|
||||||
|
tags,
|
||||||
|
"frequency",
|
||||||
|
:missing_nip66_frequency_tag,
|
||||||
|
:invalid_nip66_frequency_tag,
|
||||||
|
&positive_integer_string?/1
|
||||||
|
),
|
||||||
|
:ok <- validate_optional_repeated_timeout_tags(tags),
|
||||||
|
:ok <-
|
||||||
|
validate_optional_repeated_tag(
|
||||||
|
tags,
|
||||||
|
"c",
|
||||||
|
&valid_nip66_check_name?/1,
|
||||||
|
:invalid_nip66_check_tag
|
||||||
|
) do
|
||||||
|
validate_optional_single_string_tag_with_predicate(
|
||||||
|
tags,
|
||||||
|
"g",
|
||||||
|
:invalid_nip66_discovery_tag,
|
||||||
|
&non_empty_string?/1
|
||||||
|
)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp validate_nip43_membership_list(event) do
|
||||||
|
tags = Map.get(event, "tags", [])
|
||||||
|
|
||||||
|
case validate_protected_tag(tags) do
|
||||||
|
:ok -> validate_optional_repeated_pubkey_tag(tags, "member", :invalid_nip43_member_tag)
|
||||||
|
{:error, _reason} = error -> error
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp validate_nip43_membership_delta(event) do
|
||||||
|
tags = Map.get(event, "tags", [])
|
||||||
|
|
||||||
|
case validate_protected_tag(tags) do
|
||||||
|
:ok ->
|
||||||
|
validate_single_pubkey_tag(
|
||||||
|
tags,
|
||||||
|
"p",
|
||||||
|
:missing_nip43_pubkey_tag,
|
||||||
|
:invalid_nip43_pubkey_tag
|
||||||
|
)
|
||||||
|
|
||||||
|
{:error, _reason} = error ->
|
||||||
|
error
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp validate_nip43_join_request(event) do
|
||||||
|
tags = Map.get(event, "tags", [])
|
||||||
|
|
||||||
|
case validate_protected_tag(tags) do
|
||||||
|
:ok ->
|
||||||
|
with :ok <-
|
||||||
|
validate_single_string_tag_with_predicate(
|
||||||
|
tags,
|
||||||
|
"claim",
|
||||||
|
:missing_nip43_claim_tag,
|
||||||
|
:invalid_nip43_claim_tag,
|
||||||
|
&non_empty_string?/1
|
||||||
|
) do
|
||||||
|
validate_recent_created_at(event, :stale_nip43_join_request)
|
||||||
|
end
|
||||||
|
|
||||||
|
{:error, _reason} = error ->
|
||||||
|
error
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp validate_nip43_invite_response(event) do
|
||||||
|
tags = Map.get(event, "tags", [])
|
||||||
|
|
||||||
|
case validate_protected_tag(tags) do
|
||||||
|
:ok ->
|
||||||
|
validate_single_string_tag_with_predicate(
|
||||||
|
tags,
|
||||||
|
"claim",
|
||||||
|
:missing_nip43_claim_tag,
|
||||||
|
:invalid_nip43_claim_tag,
|
||||||
|
&non_empty_string?/1
|
||||||
|
)
|
||||||
|
|
||||||
|
{:error, _reason} = error ->
|
||||||
|
error
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp validate_nip43_leave_request(event) do
|
||||||
|
tags = Map.get(event, "tags", [])
|
||||||
|
|
||||||
|
case validate_protected_tag(tags) do
|
||||||
|
:ok -> validate_recent_created_at(event, :stale_nip43_leave_request)
|
||||||
|
{:error, _reason} = error -> error
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
defp validate_non_empty_base64_content(event),
|
defp validate_non_empty_base64_content(event),
|
||||||
do: validate_non_empty_base64_content(event, :invalid_marmot_keypackage_content)
|
do: validate_non_empty_base64_content(event, :invalid_marmot_keypackage_content)
|
||||||
|
|
||||||
@@ -394,6 +656,25 @@ defmodule Parrhesia.Protocol.EventValidator do
|
|||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
|
defp validate_optional_single_string_tag_with_predicate(
|
||||||
|
tags,
|
||||||
|
tag_name,
|
||||||
|
invalid_error,
|
||||||
|
predicate
|
||||||
|
)
|
||||||
|
when is_function(predicate, 1) do
|
||||||
|
case Enum.filter(tags, &match_tag_name?(&1, tag_name)) do
|
||||||
|
[] ->
|
||||||
|
:ok
|
||||||
|
|
||||||
|
[[^tag_name, value]] ->
|
||||||
|
if predicate.(value), do: :ok, else: {:error, invalid_error}
|
||||||
|
|
||||||
|
_other ->
|
||||||
|
{:error, invalid_error}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
defp validate_mls_extensions_tag(tags) do
|
defp validate_mls_extensions_tag(tags) do
|
||||||
with {:ok, ["mls_extensions" | extensions]} <-
|
with {:ok, ["mls_extensions" | extensions]} <-
|
||||||
fetch_single_tag(tags, "mls_extensions", :missing_marmot_extensions_tag),
|
fetch_single_tag(tags, "mls_extensions", :missing_marmot_extensions_tag),
|
||||||
@@ -432,6 +713,89 @@ defmodule Parrhesia.Protocol.EventValidator do
|
|||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
|
defp validate_nip66_d_tag(tags) do
|
||||||
|
with {:ok, ["d", value]} <- fetch_single_tag(tags, "d", :missing_nip66_d_tag),
|
||||||
|
true <- valid_websocket_url?(value) or lowercase_hex?(value, 32) do
|
||||||
|
:ok
|
||||||
|
else
|
||||||
|
{:ok, _invalid_tag_shape} -> {:error, :invalid_nip66_d_tag}
|
||||||
|
false -> {:error, :invalid_nip66_d_tag}
|
||||||
|
{:error, _reason} = error -> error
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp validate_optional_repeated_timeout_tags(tags) do
|
||||||
|
timeout_tags = Enum.filter(tags, &match_tag_name?(&1, "timeout"))
|
||||||
|
|
||||||
|
if Enum.all?(timeout_tags, &valid_nip66_timeout_tag?/1) do
|
||||||
|
:ok
|
||||||
|
else
|
||||||
|
{:error, :invalid_nip66_timeout_tag}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp validate_optional_repeated_tag(tags, tag_name, predicate, invalid_error)
|
||||||
|
when is_function(predicate, 1) do
|
||||||
|
tags
|
||||||
|
|> Enum.filter(&match_tag_name?(&1, tag_name))
|
||||||
|
|> Enum.reduce_while(:ok, fn
|
||||||
|
[^tag_name, value], :ok ->
|
||||||
|
if predicate.(value), do: {:cont, :ok}, else: {:halt, {:error, invalid_error}}
|
||||||
|
|
||||||
|
_other, :ok ->
|
||||||
|
{:halt, {:error, invalid_error}}
|
||||||
|
end)
|
||||||
|
end
|
||||||
|
|
||||||
|
defp validate_protected_tag(tags) do
|
||||||
|
if Enum.any?(tags, &match?(["-"], &1)) do
|
||||||
|
:ok
|
||||||
|
else
|
||||||
|
{:error, :missing_nip43_protected_tag}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp validate_single_pubkey_tag(tags, tag_name, missing_error, invalid_error) do
|
||||||
|
case fetch_single_tag(tags, tag_name, missing_error) do
|
||||||
|
{:ok, [^tag_name, value]} ->
|
||||||
|
if lowercase_hex?(value, 32) do
|
||||||
|
:ok
|
||||||
|
else
|
||||||
|
{:error, invalid_error}
|
||||||
|
end
|
||||||
|
|
||||||
|
{:ok, _invalid_tag_shape} ->
|
||||||
|
{:error, invalid_error}
|
||||||
|
|
||||||
|
{:error, _reason} = error ->
|
||||||
|
error
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp validate_optional_repeated_pubkey_tag(tags, tag_name, invalid_error) do
|
||||||
|
matching_tags = Enum.filter(tags, &match_tag_name?(&1, tag_name))
|
||||||
|
|
||||||
|
if Enum.all?(matching_tags, fn
|
||||||
|
[^tag_name, pubkey | _rest] -> lowercase_hex?(pubkey, 32)
|
||||||
|
_other -> false
|
||||||
|
end) do
|
||||||
|
:ok
|
||||||
|
else
|
||||||
|
{:error, invalid_error}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp validate_recent_created_at(%{"created_at" => created_at}, error_reason)
|
||||||
|
when is_integer(created_at) do
|
||||||
|
if created_at >= System.system_time(:second) - nip43_request_max_age_seconds() do
|
||||||
|
:ok
|
||||||
|
else
|
||||||
|
{:error, error_reason}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp validate_recent_created_at(_event, error_reason), do: {:error, error_reason}
|
||||||
|
|
||||||
defp fetch_single_tag(tags, tag_name, missing_error) do
|
defp fetch_single_tag(tags, tag_name, missing_error) do
|
||||||
case Enum.filter(tags, &match_tag_name?(&1, tag_name)) do
|
case Enum.filter(tags, &match_tag_name?(&1, tag_name)) do
|
||||||
[tag] -> {:ok, tag}
|
[tag] -> {:ok, tag}
|
||||||
@@ -488,6 +852,49 @@ defmodule Parrhesia.Protocol.EventValidator do
|
|||||||
|
|
||||||
defp valid_websocket_url?(_url), do: false
|
defp valid_websocket_url?(_url), do: false
|
||||||
|
|
||||||
|
defp valid_nip66_timeout_tag?(["timeout", milliseconds]),
|
||||||
|
do: positive_integer_string?(milliseconds)
|
||||||
|
|
||||||
|
defp valid_nip66_timeout_tag?(["timeout", check, milliseconds]) do
|
||||||
|
valid_nip66_check_name?(check) and positive_integer_string?(milliseconds)
|
||||||
|
end
|
||||||
|
|
||||||
|
defp valid_nip66_timeout_tag?(_tag), do: false
|
||||||
|
|
||||||
|
defp valid_nip66_requirement_value?(value) when is_binary(value) do
|
||||||
|
normalized = String.trim_leading(value, "!")
|
||||||
|
normalized in ["auth", "writes", "pow", "payment"]
|
||||||
|
end
|
||||||
|
|
||||||
|
defp valid_nip66_requirement_value?(_value), do: false
|
||||||
|
|
||||||
|
defp valid_nip66_kind_value?(<<"!", rest::binary>>), do: positive_integer_string?(rest)
|
||||||
|
defp valid_nip66_kind_value?(value), do: positive_integer_string?(value)
|
||||||
|
|
||||||
|
defp valid_nip66_check_name?(value) when is_binary(value) do
|
||||||
|
String.match?(value, ~r/^[a-z0-9-]+$/)
|
||||||
|
end
|
||||||
|
|
||||||
|
defp valid_nip66_check_name?(_value), do: false
|
||||||
|
|
||||||
|
defp valid_pascal_case?(value) when is_binary(value) do
|
||||||
|
String.match?(value, ~r/^[A-Z][A-Za-z0-9]*$/)
|
||||||
|
end
|
||||||
|
|
||||||
|
defp valid_pascal_case?(_value), do: false
|
||||||
|
|
||||||
|
defp positive_integer_string?(value) when is_binary(value) do
|
||||||
|
case Integer.parse(value) do
|
||||||
|
{integer, ""} when integer >= 0 -> true
|
||||||
|
_other -> false
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp positive_integer_string?(_value), do: false
|
||||||
|
|
||||||
|
defp non_empty_string?(value) when is_binary(value), do: value != ""
|
||||||
|
defp non_empty_string?(_value), do: false
|
||||||
|
|
||||||
defp valid_keypackage_ref?(value) when is_binary(value) do
|
defp valid_keypackage_ref?(value) when is_binary(value) do
|
||||||
Enum.any?(@supported_keypackage_ref_sizes, &lowercase_hex?(value, &1))
|
Enum.any?(@supported_keypackage_ref_sizes, &lowercase_hex?(value, &1))
|
||||||
end
|
end
|
||||||
@@ -510,4 +917,17 @@ defmodule Parrhesia.Protocol.EventValidator do
|
|||||||
|> Application.get_env(:limits, [])
|
|> Application.get_env(:limits, [])
|
||||||
|> Keyword.get(:max_event_future_skew_seconds, @default_max_event_future_skew_seconds)
|
|> Keyword.get(:max_event_future_skew_seconds, @default_max_event_future_skew_seconds)
|
||||||
end
|
end
|
||||||
|
|
||||||
|
defp max_tags_per_event do
|
||||||
|
case Application.get_env(:parrhesia, :limits, []) |> Keyword.get(:max_tags_per_event) do
|
||||||
|
value when is_integer(value) and value > 0 -> value
|
||||||
|
_other -> @default_max_tags_per_event
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp nip43_request_max_age_seconds do
|
||||||
|
:parrhesia
|
||||||
|
|> Application.get_env(:nip43, [])
|
||||||
|
|> Keyword.get(:request_max_age_seconds, @default_nip43_request_max_age_seconds)
|
||||||
|
end
|
||||||
end
|
end
|
||||||
|
|||||||
@@ -5,6 +5,7 @@ defmodule Parrhesia.Protocol.Filter do
|
|||||||
|
|
||||||
@max_kind 65_535
|
@max_kind 65_535
|
||||||
@default_max_filters_per_req 16
|
@default_max_filters_per_req 16
|
||||||
|
@default_max_tag_values_per_filter 128
|
||||||
|
|
||||||
@type validation_error ::
|
@type validation_error ::
|
||||||
:invalid_filters
|
:invalid_filters
|
||||||
@@ -19,6 +20,7 @@ defmodule Parrhesia.Protocol.Filter do
|
|||||||
| :invalid_until
|
| :invalid_until
|
||||||
| :invalid_limit
|
| :invalid_limit
|
||||||
| :invalid_search
|
| :invalid_search
|
||||||
|
| :too_many_tag_values
|
||||||
| :invalid_tag_filter
|
| :invalid_tag_filter
|
||||||
|
|
||||||
@allowed_keys MapSet.new(["ids", "authors", "kinds", "since", "until", "limit", "search"])
|
@allowed_keys MapSet.new(["ids", "authors", "kinds", "since", "until", "limit", "search"])
|
||||||
@@ -36,6 +38,7 @@ defmodule Parrhesia.Protocol.Filter do
|
|||||||
invalid_until: "invalid: until must be a non-negative integer",
|
invalid_until: "invalid: until must be a non-negative integer",
|
||||||
invalid_limit: "invalid: limit must be a positive integer",
|
invalid_limit: "invalid: limit must be a positive integer",
|
||||||
invalid_search: "invalid: search must be a non-empty string",
|
invalid_search: "invalid: search must be a non-empty string",
|
||||||
|
too_many_tag_values: "invalid: tag filters exceed configured value limit",
|
||||||
invalid_tag_filter:
|
invalid_tag_filter:
|
||||||
"invalid: tag filters must use #<single-letter> with non-empty string arrays"
|
"invalid: tag filters must use #<single-letter> with non-empty string arrays"
|
||||||
}
|
}
|
||||||
@@ -178,19 +181,33 @@ defmodule Parrhesia.Protocol.Filter do
|
|||||||
filter
|
filter
|
||||||
|> Enum.filter(fn {key, _value} -> valid_tag_filter_key?(key) end)
|
|> Enum.filter(fn {key, _value} -> valid_tag_filter_key?(key) end)
|
||||||
|> Enum.reduce_while(:ok, fn {_key, values}, :ok ->
|
|> Enum.reduce_while(:ok, fn {_key, values}, :ok ->
|
||||||
if valid_tag_filter_values?(values) do
|
case validate_tag_filter_values(values) do
|
||||||
{:cont, :ok}
|
:ok -> {:cont, :ok}
|
||||||
else
|
{:error, reason} -> {:halt, {:error, reason}}
|
||||||
{:halt, {:error, :invalid_tag_filter}}
|
|
||||||
end
|
end
|
||||||
end)
|
end)
|
||||||
end
|
end
|
||||||
|
|
||||||
defp valid_tag_filter_values?(values) when is_list(values) do
|
defp validate_tag_filter_values(values) when is_list(values),
|
||||||
values != [] and Enum.all?(values, &is_binary/1)
|
do: validate_tag_filter_values(values, max_tag_values_per_filter(), 0)
|
||||||
end
|
|
||||||
|
|
||||||
defp valid_tag_filter_values?(_values), do: false
|
defp validate_tag_filter_values(_values), do: {:error, :invalid_tag_filter}
|
||||||
|
|
||||||
|
defp validate_tag_filter_values([], _max_values, 0), do: {:error, :invalid_tag_filter}
|
||||||
|
defp validate_tag_filter_values([], _max_values, _count), do: :ok
|
||||||
|
|
||||||
|
defp validate_tag_filter_values([value | rest], max_values, count) do
|
||||||
|
cond do
|
||||||
|
count + 1 > max_values ->
|
||||||
|
{:error, :too_many_tag_values}
|
||||||
|
|
||||||
|
is_binary(value) ->
|
||||||
|
validate_tag_filter_values(rest, max_values, count + 1)
|
||||||
|
|
||||||
|
true ->
|
||||||
|
{:error, :invalid_tag_filter}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
defp filter_predicates(event, filter) do
|
defp filter_predicates(event, filter) do
|
||||||
[
|
[
|
||||||
@@ -278,4 +295,12 @@ defmodule Parrhesia.Protocol.Filter do
|
|||||||
|> Application.get_env(:limits, [])
|
|> Application.get_env(:limits, [])
|
||||||
|> Keyword.get(:max_filters_per_req, @default_max_filters_per_req)
|
|> Keyword.get(:max_filters_per_req, @default_max_filters_per_req)
|
||||||
end
|
end
|
||||||
|
|
||||||
|
defp max_tag_values_per_filter do
|
||||||
|
case Application.get_env(:parrhesia, :limits, [])
|
||||||
|
|> Keyword.get(:max_tag_values_per_filter) do
|
||||||
|
value when is_integer(value) and value > 0 -> value
|
||||||
|
_other -> @default_max_tag_values_per_filter
|
||||||
|
end
|
||||||
|
end
|
||||||
end
|
end
|
||||||
|
|||||||
9
lib/parrhesia/read_repo.ex
Normal file
9
lib/parrhesia/read_repo.ex
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
defmodule Parrhesia.ReadRepo do
|
||||||
|
@moduledoc """
|
||||||
|
PostgreSQL repository dedicated to read-heavy workloads when a separate read pool is enabled.
|
||||||
|
"""
|
||||||
|
|
||||||
|
use Ecto.Repo,
|
||||||
|
otp_app: :parrhesia,
|
||||||
|
adapter: Ecto.Adapters.Postgres
|
||||||
|
end
|
||||||
@@ -1,10 +1,18 @@
|
|||||||
defmodule Parrhesia.Release do
|
defmodule Parrhesia.Release do
|
||||||
@moduledoc """
|
@moduledoc """
|
||||||
Helpers for running Ecto tasks from a production release.
|
Helpers for running Ecto tasks from a production release.
|
||||||
|
|
||||||
|
Intended for use from a release `eval` command where Mix is not available:
|
||||||
|
|
||||||
|
bin/parrhesia eval "Parrhesia.Release.migrate()"
|
||||||
|
bin/parrhesia eval "Parrhesia.Release.rollback(Parrhesia.Repo, 20260101000000)"
|
||||||
"""
|
"""
|
||||||
|
|
||||||
@app :parrhesia
|
@app :parrhesia
|
||||||
|
|
||||||
|
@doc """
|
||||||
|
Runs all pending Ecto migrations for every configured repo.
|
||||||
|
"""
|
||||||
def migrate do
|
def migrate do
|
||||||
load_app()
|
load_app()
|
||||||
|
|
||||||
@@ -16,6 +24,9 @@ defmodule Parrhesia.Release do
|
|||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
|
@doc """
|
||||||
|
Rolls back the given `repo` to the specified migration `version`.
|
||||||
|
"""
|
||||||
def rollback(repo, version) when is_atom(repo) and is_integer(version) do
|
def rollback(repo, version) when is_atom(repo) and is_integer(version) do
|
||||||
load_app()
|
load_app()
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,9 @@
|
|||||||
defmodule Parrhesia.Repo do
|
defmodule Parrhesia.Repo do
|
||||||
@moduledoc """
|
@moduledoc """
|
||||||
PostgreSQL repository for storage adapter persistence.
|
PostgreSQL repository for write traffic and storage adapter persistence.
|
||||||
|
|
||||||
|
Separated from `Parrhesia.ReadRepo` so that ingest writes and read-heavy
|
||||||
|
queries use independent connection pools.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
use Ecto.Repo,
|
use Ecto.Repo,
|
||||||
|
|||||||
52
lib/parrhesia/runtime.ex
Normal file
52
lib/parrhesia/runtime.ex
Normal file
@@ -0,0 +1,52 @@
|
|||||||
|
defmodule Parrhesia.Runtime do
|
||||||
|
@moduledoc """
|
||||||
|
Top-level Parrhesia supervisor.
|
||||||
|
|
||||||
|
In normal standalone use, the `:parrhesia` application starts this supervisor automatically.
|
||||||
|
Host applications can also embed it directly under their own supervision tree:
|
||||||
|
|
||||||
|
children = [
|
||||||
|
{Parrhesia.Runtime, name: Parrhesia.Supervisor}
|
||||||
|
]
|
||||||
|
|
||||||
|
Parrhesia currently assumes a single runtime per BEAM node and uses globally registered
|
||||||
|
process names for core services.
|
||||||
|
"""
|
||||||
|
|
||||||
|
use Supervisor
|
||||||
|
|
||||||
|
@doc """
|
||||||
|
Starts the Parrhesia runtime supervisor.
|
||||||
|
|
||||||
|
Accepts a `:name` option (defaults to `Parrhesia.Supervisor`).
|
||||||
|
"""
|
||||||
|
def start_link(opts \\ []) do
|
||||||
|
name = Keyword.get(opts, :name, Parrhesia.Supervisor)
|
||||||
|
Supervisor.start_link(__MODULE__, opts, name: name)
|
||||||
|
end
|
||||||
|
|
||||||
|
@impl true
|
||||||
|
def init(_opts) do
|
||||||
|
Supervisor.init(children(), strategy: :one_for_one)
|
||||||
|
end
|
||||||
|
|
||||||
|
@doc """
|
||||||
|
Returns the list of child specifications started by the runtime supervisor.
|
||||||
|
"""
|
||||||
|
def children do
|
||||||
|
[
|
||||||
|
Parrhesia.Telemetry,
|
||||||
|
Parrhesia.ConnectionStats,
|
||||||
|
Parrhesia.Config,
|
||||||
|
Parrhesia.Web.EventIngestLimiter,
|
||||||
|
Parrhesia.Web.IPEventIngestLimiter,
|
||||||
|
Parrhesia.Storage.Supervisor,
|
||||||
|
Parrhesia.Subscriptions.Supervisor,
|
||||||
|
Parrhesia.Auth.Supervisor,
|
||||||
|
Parrhesia.Sync.Supervisor,
|
||||||
|
Parrhesia.Policy.Supervisor,
|
||||||
|
Parrhesia.Web.Endpoint,
|
||||||
|
Parrhesia.Tasks.Supervisor
|
||||||
|
]
|
||||||
|
end
|
||||||
|
end
|
||||||
@@ -4,24 +4,46 @@ defmodule Parrhesia.Storage do
|
|||||||
|
|
||||||
Domain/runtime code should resolve behavior modules through this module instead of
|
Domain/runtime code should resolve behavior modules through this module instead of
|
||||||
depending on concrete adapter implementations directly.
|
depending on concrete adapter implementations directly.
|
||||||
|
|
||||||
|
Each accessor validates that the configured module is loaded and declares the expected
|
||||||
|
behaviour before returning it.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
@default_modules [
|
@default_modules [
|
||||||
events: Parrhesia.Storage.Adapters.Postgres.Events,
|
events: Parrhesia.Storage.Adapters.Postgres.Events,
|
||||||
|
acl: Parrhesia.Storage.Adapters.Postgres.ACL,
|
||||||
moderation: Parrhesia.Storage.Adapters.Postgres.Moderation,
|
moderation: Parrhesia.Storage.Adapters.Postgres.Moderation,
|
||||||
groups: Parrhesia.Storage.Adapters.Postgres.Groups,
|
groups: Parrhesia.Storage.Adapters.Postgres.Groups,
|
||||||
admin: Parrhesia.Storage.Adapters.Postgres.Admin
|
admin: Parrhesia.Storage.Adapters.Postgres.Admin
|
||||||
]
|
]
|
||||||
|
|
||||||
|
@doc """
|
||||||
|
Returns the configured events storage module.
|
||||||
|
"""
|
||||||
@spec events() :: module()
|
@spec events() :: module()
|
||||||
def events, do: fetch_module!(:events, Parrhesia.Storage.Events)
|
def events, do: fetch_module!(:events, Parrhesia.Storage.Events)
|
||||||
|
|
||||||
|
@doc """
|
||||||
|
Returns the configured moderation storage module.
|
||||||
|
"""
|
||||||
@spec moderation() :: module()
|
@spec moderation() :: module()
|
||||||
def moderation, do: fetch_module!(:moderation, Parrhesia.Storage.Moderation)
|
def moderation, do: fetch_module!(:moderation, Parrhesia.Storage.Moderation)
|
||||||
|
|
||||||
|
@doc """
|
||||||
|
Returns the configured ACL storage module.
|
||||||
|
"""
|
||||||
|
@spec acl() :: module()
|
||||||
|
def acl, do: fetch_module!(:acl, Parrhesia.Storage.ACL)
|
||||||
|
|
||||||
|
@doc """
|
||||||
|
Returns the configured groups storage module.
|
||||||
|
"""
|
||||||
@spec groups() :: module()
|
@spec groups() :: module()
|
||||||
def groups, do: fetch_module!(:groups, Parrhesia.Storage.Groups)
|
def groups, do: fetch_module!(:groups, Parrhesia.Storage.Groups)
|
||||||
|
|
||||||
|
@doc """
|
||||||
|
Returns the configured admin storage module.
|
||||||
|
"""
|
||||||
@spec admin() :: module()
|
@spec admin() :: module()
|
||||||
def admin, do: fetch_module!(:admin, Parrhesia.Storage.Admin)
|
def admin, do: fetch_module!(:admin, Parrhesia.Storage.Admin)
|
||||||
|
|
||||||
|
|||||||
14
lib/parrhesia/storage/acl.ex
Normal file
14
lib/parrhesia/storage/acl.ex
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
defmodule Parrhesia.Storage.ACL do
|
||||||
|
@moduledoc """
|
||||||
|
Storage callbacks for persisted ACL rules.
|
||||||
|
"""
|
||||||
|
|
||||||
|
@type context :: map()
|
||||||
|
@type rule :: map()
|
||||||
|
@type opts :: keyword()
|
||||||
|
@type reason :: term()
|
||||||
|
|
||||||
|
@callback put_rule(context(), rule()) :: {:ok, rule()} | {:error, reason()}
|
||||||
|
@callback delete_rule(context(), map()) :: :ok | {:error, reason()}
|
||||||
|
@callback list_rules(context(), opts()) :: {:ok, [rule()]} | {:error, reason()}
|
||||||
|
end
|
||||||
157
lib/parrhesia/storage/adapters/memory/acl.ex
Normal file
157
lib/parrhesia/storage/adapters/memory/acl.ex
Normal file
@@ -0,0 +1,157 @@
|
|||||||
|
defmodule Parrhesia.Storage.Adapters.Memory.ACL do
|
||||||
|
@moduledoc """
|
||||||
|
In-memory prototype adapter for `Parrhesia.Storage.ACL`.
|
||||||
|
"""
|
||||||
|
|
||||||
|
alias Parrhesia.Storage.Adapters.Memory.Store
|
||||||
|
|
||||||
|
@behaviour Parrhesia.Storage.ACL
|
||||||
|
|
||||||
|
@impl true
|
||||||
|
def put_rule(_context, rule) when is_map(rule) do
|
||||||
|
with {:ok, normalized_rule} <- normalize_rule(rule) do
|
||||||
|
Store.get_and_update(fn state -> put_rule_in_state(state, normalized_rule) end)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def put_rule(_context, _rule), do: {:error, :invalid_acl_rule}
|
||||||
|
|
||||||
|
@impl true
|
||||||
|
def delete_rule(_context, selector) when is_map(selector) do
|
||||||
|
case normalize_delete_selector(selector) do
|
||||||
|
{:ok, {:id, id}} ->
|
||||||
|
Store.update(fn state ->
|
||||||
|
%{state | acl_rules: Enum.reject(state.acl_rules, &(&1.id == id))}
|
||||||
|
end)
|
||||||
|
|
||||||
|
:ok
|
||||||
|
|
||||||
|
{:ok, {:exact, rule}} ->
|
||||||
|
Store.update(fn state ->
|
||||||
|
%{state | acl_rules: Enum.reject(state.acl_rules, &same_rule?(&1, rule))}
|
||||||
|
end)
|
||||||
|
|
||||||
|
:ok
|
||||||
|
|
||||||
|
{:error, reason} ->
|
||||||
|
{:error, reason}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def delete_rule(_context, _selector), do: {:error, :invalid_acl_rule}
|
||||||
|
|
||||||
|
@impl true
|
||||||
|
def list_rules(_context, opts) when is_list(opts) do
|
||||||
|
rules =
|
||||||
|
Store.get(fn state -> Enum.reverse(state.acl_rules) end)
|
||||||
|
|> Enum.filter(fn rule ->
|
||||||
|
matches_principal_type?(rule, Keyword.get(opts, :principal_type)) and
|
||||||
|
matches_principal?(rule, Keyword.get(opts, :principal)) and
|
||||||
|
matches_capability?(rule, Keyword.get(opts, :capability))
|
||||||
|
end)
|
||||||
|
|
||||||
|
{:ok, rules}
|
||||||
|
end
|
||||||
|
|
||||||
|
def list_rules(_context, _opts), do: {:error, :invalid_opts}
|
||||||
|
|
||||||
|
defp put_rule_in_state(state, normalized_rule) do
|
||||||
|
case Enum.find(state.acl_rules, &same_rule?(&1, normalized_rule)) do
|
||||||
|
nil ->
|
||||||
|
next_id = state.next_acl_rule_id
|
||||||
|
persisted_rule = Map.put(normalized_rule, :id, next_id)
|
||||||
|
|
||||||
|
{{:ok, persisted_rule},
|
||||||
|
%{
|
||||||
|
state
|
||||||
|
| acl_rules: [persisted_rule | state.acl_rules],
|
||||||
|
next_acl_rule_id: next_id + 1
|
||||||
|
}}
|
||||||
|
|
||||||
|
existing_rule ->
|
||||||
|
{{:ok, existing_rule}, state}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp matches_principal_type?(_rule, nil), do: true
|
||||||
|
defp matches_principal_type?(rule, principal_type), do: rule.principal_type == principal_type
|
||||||
|
|
||||||
|
defp matches_principal?(_rule, nil), do: true
|
||||||
|
defp matches_principal?(rule, principal), do: rule.principal == principal
|
||||||
|
|
||||||
|
defp matches_capability?(_rule, nil), do: true
|
||||||
|
defp matches_capability?(rule, capability), do: rule.capability == capability
|
||||||
|
|
||||||
|
defp same_rule?(left, right) do
|
||||||
|
left.principal_type == right.principal_type and
|
||||||
|
left.principal == right.principal and
|
||||||
|
left.capability == right.capability and
|
||||||
|
left.match == right.match
|
||||||
|
end
|
||||||
|
|
||||||
|
defp normalize_delete_selector(%{"id" => id}), do: normalize_delete_selector(%{id: id})
|
||||||
|
|
||||||
|
defp normalize_delete_selector(%{id: id}) when is_integer(id) and id > 0,
|
||||||
|
do: {:ok, {:id, id}}
|
||||||
|
|
||||||
|
defp normalize_delete_selector(selector) do
|
||||||
|
case normalize_rule(selector) do
|
||||||
|
{:ok, rule} -> {:ok, {:exact, rule}}
|
||||||
|
{:error, reason} -> {:error, reason}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp normalize_rule(rule) when is_map(rule) do
|
||||||
|
with {:ok, principal_type} <- normalize_principal_type(fetch(rule, :principal_type)),
|
||||||
|
{:ok, principal} <- normalize_principal(fetch(rule, :principal)),
|
||||||
|
{:ok, capability} <- normalize_capability(fetch(rule, :capability)),
|
||||||
|
{:ok, match} <- normalize_match(fetch(rule, :match)) do
|
||||||
|
{:ok,
|
||||||
|
%{
|
||||||
|
principal_type: principal_type,
|
||||||
|
principal: principal,
|
||||||
|
capability: capability,
|
||||||
|
match: match
|
||||||
|
}}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp normalize_rule(_rule), do: {:error, :invalid_acl_rule}
|
||||||
|
|
||||||
|
defp normalize_principal_type(:pubkey), do: {:ok, :pubkey}
|
||||||
|
defp normalize_principal_type("pubkey"), do: {:ok, :pubkey}
|
||||||
|
defp normalize_principal_type(_value), do: {:error, :invalid_acl_principal_type}
|
||||||
|
|
||||||
|
defp normalize_principal(value) when is_binary(value) and byte_size(value) == 64,
|
||||||
|
do: {:ok, String.downcase(value)}
|
||||||
|
|
||||||
|
defp normalize_principal(_value), do: {:error, :invalid_acl_principal}
|
||||||
|
|
||||||
|
defp normalize_capability(:sync_read), do: {:ok, :sync_read}
|
||||||
|
defp normalize_capability(:sync_write), do: {:ok, :sync_write}
|
||||||
|
defp normalize_capability("sync_read"), do: {:ok, :sync_read}
|
||||||
|
defp normalize_capability("sync_write"), do: {:ok, :sync_write}
|
||||||
|
defp normalize_capability(_value), do: {:error, :invalid_acl_capability}
|
||||||
|
|
||||||
|
defp normalize_match(match) when is_map(match) do
|
||||||
|
normalized_match =
|
||||||
|
Enum.reduce(match, %{}, fn
|
||||||
|
{key, values}, acc when is_binary(key) ->
|
||||||
|
Map.put(acc, key, values)
|
||||||
|
|
||||||
|
{key, values}, acc when is_atom(key) ->
|
||||||
|
Map.put(acc, Atom.to_string(key), values)
|
||||||
|
|
||||||
|
_entry, acc ->
|
||||||
|
acc
|
||||||
|
end)
|
||||||
|
|
||||||
|
{:ok, normalized_match}
|
||||||
|
end
|
||||||
|
|
||||||
|
defp normalize_match(_match), do: {:error, :invalid_acl_match}
|
||||||
|
|
||||||
|
defp fetch(map, key) do
|
||||||
|
Map.get(map, key) || Map.get(map, Atom.to_string(key))
|
||||||
|
end
|
||||||
|
end
|
||||||
@@ -6,6 +6,9 @@ defmodule Parrhesia.Storage.Adapters.Memory.Admin do
|
|||||||
alias Parrhesia.Storage.Adapters.Memory.Store
|
alias Parrhesia.Storage.Adapters.Memory.Store
|
||||||
|
|
||||||
@behaviour Parrhesia.Storage.Admin
|
@behaviour Parrhesia.Storage.Admin
|
||||||
|
@default_limit 100
|
||||||
|
@max_limit 1_000
|
||||||
|
@max_audit_logs 1_000
|
||||||
|
|
||||||
@impl true
|
@impl true
|
||||||
def execute(_context, method, _params) do
|
def execute(_context, method, _params) do
|
||||||
@@ -17,18 +20,59 @@ defmodule Parrhesia.Storage.Adapters.Memory.Admin do
|
|||||||
|
|
||||||
@impl true
|
@impl true
|
||||||
def append_audit_log(_context, audit_entry) when is_map(audit_entry) do
|
def append_audit_log(_context, audit_entry) when is_map(audit_entry) do
|
||||||
Store.update(fn state -> update_in(state.audit_logs, &[audit_entry | &1]) end)
|
Store.update(fn state ->
|
||||||
|
update_in(state.audit_logs, fn logs ->
|
||||||
|
[audit_entry | logs] |> Enum.take(@max_audit_logs)
|
||||||
|
end)
|
||||||
|
end)
|
||||||
|
|
||||||
:ok
|
:ok
|
||||||
end
|
end
|
||||||
|
|
||||||
def append_audit_log(_context, _audit_entry), do: {:error, :invalid_audit_entry}
|
def append_audit_log(_context, _audit_entry), do: {:error, :invalid_audit_entry}
|
||||||
|
|
||||||
@impl true
|
@impl true
|
||||||
def list_audit_logs(_context, _opts) do
|
def list_audit_logs(_context, opts) when is_list(opts) do
|
||||||
{:ok, Store.get(fn state -> Enum.reverse(state.audit_logs) end)}
|
limit = normalize_limit(Keyword.get(opts, :limit, @default_limit))
|
||||||
|
method = normalize_method_filter(Keyword.get(opts, :method))
|
||||||
|
actor_pubkey = Keyword.get(opts, :actor_pubkey)
|
||||||
|
|
||||||
|
logs =
|
||||||
|
Store.get(fn state ->
|
||||||
|
state.audit_logs
|
||||||
|
|> Enum.filter(&matches_filters?(&1, method, actor_pubkey))
|
||||||
|
|> Enum.take(limit)
|
||||||
|
end)
|
||||||
|
|
||||||
|
{:ok, logs}
|
||||||
end
|
end
|
||||||
|
|
||||||
|
def list_audit_logs(_context, _opts), do: {:error, :invalid_opts}
|
||||||
|
|
||||||
defp normalize_method(method) when is_binary(method), do: method
|
defp normalize_method(method) when is_binary(method), do: method
|
||||||
defp normalize_method(method) when is_atom(method), do: Atom.to_string(method)
|
defp normalize_method(method) when is_atom(method), do: Atom.to_string(method)
|
||||||
defp normalize_method(method), do: inspect(method)
|
defp normalize_method(method), do: inspect(method)
|
||||||
|
|
||||||
|
defp normalize_limit(limit) when is_integer(limit) and limit > 0, do: min(limit, @max_limit)
|
||||||
|
defp normalize_limit(_limit), do: @default_limit
|
||||||
|
|
||||||
|
defp normalize_method_filter(nil), do: nil
|
||||||
|
defp normalize_method_filter(method), do: normalize_method(method)
|
||||||
|
|
||||||
|
defp matches_method?(_entry, nil), do: true
|
||||||
|
|
||||||
|
defp matches_method?(entry, method) do
|
||||||
|
normalize_method(Map.get(entry, :method) || Map.get(entry, "method")) == method
|
||||||
|
end
|
||||||
|
|
||||||
|
defp matches_actor_pubkey?(_entry, nil), do: true
|
||||||
|
|
||||||
|
defp matches_actor_pubkey?(entry, actor_pubkey) do
|
||||||
|
Map.get(entry, :actor_pubkey) == actor_pubkey or
|
||||||
|
Map.get(entry, "actor_pubkey") == actor_pubkey
|
||||||
|
end
|
||||||
|
|
||||||
|
defp matches_filters?(entry, method, actor_pubkey) do
|
||||||
|
matches_method?(entry, method) and matches_actor_pubkey?(entry, actor_pubkey)
|
||||||
|
end
|
||||||
end
|
end
|
||||||
|
|||||||
@@ -12,53 +12,75 @@ defmodule Parrhesia.Storage.Adapters.Memory.Events do
|
|||||||
def put_event(_context, event) do
|
def put_event(_context, event) do
|
||||||
event_id = Map.fetch!(event, "id")
|
event_id = Map.fetch!(event, "id")
|
||||||
|
|
||||||
result =
|
case Store.put_event(event_id, event) do
|
||||||
Store.get_and_update(fn state ->
|
:ok -> {:ok, event}
|
||||||
if Map.has_key?(state.events, event_id) do
|
{:error, :duplicate_event} -> {:error, :duplicate_event}
|
||||||
{{:error, :duplicate_event}, state}
|
end
|
||||||
else
|
|
||||||
next_state = put_in(state.events[event_id], event)
|
|
||||||
{{:ok, event}, next_state}
|
|
||||||
end
|
|
||||||
end)
|
|
||||||
|
|
||||||
result
|
|
||||||
end
|
end
|
||||||
|
|
||||||
@impl true
|
@impl true
|
||||||
def get_event(_context, event_id) do
|
def get_event(_context, event_id) do
|
||||||
deleted? = Store.get(fn state -> MapSet.member?(state.deleted, event_id) end)
|
case Store.get_event(event_id) do
|
||||||
|
{:ok, _event, true} -> {:ok, nil}
|
||||||
if deleted? do
|
{:ok, event, false} -> {:ok, event}
|
||||||
{:ok, nil}
|
:error -> {:ok, nil}
|
||||||
else
|
|
||||||
{:ok, Store.get(fn state -> Map.get(state.events, event_id) end)}
|
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
@impl true
|
@impl true
|
||||||
def query(_context, filters, opts) do
|
def query(_context, filters, opts) do
|
||||||
with :ok <- Filter.validate_filters(filters) do
|
with :ok <- Filter.validate_filters(filters) do
|
||||||
state = Store.get(& &1)
|
|
||||||
requester_pubkeys = Keyword.get(opts, :requester_pubkeys, [])
|
requester_pubkeys = Keyword.get(opts, :requester_pubkeys, [])
|
||||||
|
|
||||||
events =
|
events =
|
||||||
state.events
|
filters
|
||||||
|> Map.values()
|
|> Enum.flat_map(&matching_events_for_filter(&1, requester_pubkeys, opts))
|
||||||
|> Enum.filter(fn event ->
|
|> deduplicate_events()
|
||||||
not MapSet.member?(state.deleted, event["id"]) and
|
|> sort_events()
|
||||||
Filter.matches_any?(event, filters) and
|
|> maybe_apply_query_limit(opts)
|
||||||
giftwrap_visible_to_requester?(event, requester_pubkeys)
|
|
||||||
end)
|
|
||||||
|
|
||||||
{:ok, events}
|
{:ok, events}
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
@impl true
|
@impl true
|
||||||
def count(context, filters, opts) do
|
def query_event_refs(_context, filters, opts) do
|
||||||
with {:ok, events} <- query(context, filters, opts) do
|
with :ok <- Filter.validate_filters(filters) do
|
||||||
{:ok, length(events)}
|
requester_pubkeys = Keyword.get(opts, :requester_pubkeys, [])
|
||||||
|
query_opts = Keyword.put(opts, :apply_filter_limits?, false)
|
||||||
|
|
||||||
|
{_, refs} =
|
||||||
|
reduce_unique_matching_events(
|
||||||
|
filters,
|
||||||
|
requester_pubkeys,
|
||||||
|
query_opts,
|
||||||
|
{MapSet.new(), []},
|
||||||
|
&append_unique_event_ref/2
|
||||||
|
)
|
||||||
|
|
||||||
|
refs =
|
||||||
|
refs |> Enum.sort(&(compare_event_refs(&1, &2) != :gt)) |> maybe_limit_event_refs(opts)
|
||||||
|
|
||||||
|
{:ok, refs}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
@impl true
|
||||||
|
def count(_context, filters, opts) do
|
||||||
|
with :ok <- Filter.validate_filters(filters) do
|
||||||
|
requester_pubkeys = Keyword.get(opts, :requester_pubkeys, [])
|
||||||
|
query_opts = Keyword.put(opts, :apply_filter_limits?, false)
|
||||||
|
|
||||||
|
{_seen_ids, count} =
|
||||||
|
reduce_unique_matching_events(
|
||||||
|
filters,
|
||||||
|
requester_pubkeys,
|
||||||
|
query_opts,
|
||||||
|
{MapSet.new(), 0},
|
||||||
|
&count_unique_event/2
|
||||||
|
)
|
||||||
|
|
||||||
|
{:ok, count}
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
@@ -89,22 +111,14 @@ defmodule Parrhesia.Storage.Adapters.Memory.Events do
|
|||||||
end)
|
end)
|
||||||
|
|
||||||
coordinate_delete_ids =
|
coordinate_delete_ids =
|
||||||
Store.get(fn state ->
|
delete_coordinates
|
||||||
state.events
|
|> coordinate_delete_candidates(deleter_pubkey)
|
||||||
|> Map.values()
|
|> Enum.filter(&matches_delete_coordinate?(&1, delete_coordinates, deleter_pubkey))
|
||||||
|> Enum.filter(fn candidate ->
|
|> Enum.map(& &1["id"])
|
||||||
matches_delete_coordinate?(candidate, delete_coordinates, deleter_pubkey)
|
|
||||||
end)
|
|
||||||
|> Enum.map(& &1["id"])
|
|
||||||
end)
|
|
||||||
|
|
||||||
all_delete_ids = Enum.uniq(delete_event_ids ++ coordinate_delete_ids)
|
all_delete_ids = Enum.uniq(delete_event_ids ++ coordinate_delete_ids)
|
||||||
|
|
||||||
Store.update(fn state ->
|
Enum.each(all_delete_ids, &Store.mark_deleted/1)
|
||||||
Enum.reduce(all_delete_ids, state, fn event_id, acc ->
|
|
||||||
update_in(acc.deleted, &MapSet.put(&1, event_id))
|
|
||||||
end)
|
|
||||||
end)
|
|
||||||
|
|
||||||
{:ok, length(all_delete_ids)}
|
{:ok, length(all_delete_ids)}
|
||||||
end
|
end
|
||||||
@@ -114,18 +128,11 @@ defmodule Parrhesia.Storage.Adapters.Memory.Events do
|
|||||||
pubkey = Map.get(event, "pubkey")
|
pubkey = Map.get(event, "pubkey")
|
||||||
|
|
||||||
deleted_ids =
|
deleted_ids =
|
||||||
Store.get(fn state ->
|
pubkey
|
||||||
state.events
|
|> vanish_candidates(Map.get(event, "created_at"))
|
||||||
|> Map.values()
|
|> Enum.map(& &1["id"])
|
||||||
|> Enum.filter(fn candidate -> candidate["pubkey"] == pubkey end)
|
|
||||||
|> Enum.map(& &1["id"])
|
|
||||||
end)
|
|
||||||
|
|
||||||
Store.update(fn state ->
|
Enum.each(deleted_ids, &Store.mark_deleted/1)
|
||||||
Enum.reduce(deleted_ids, state, fn event_id, acc ->
|
|
||||||
update_in(acc.deleted, &MapSet.put(&1, event_id))
|
|
||||||
end)
|
|
||||||
end)
|
|
||||||
|
|
||||||
{:ok, length(deleted_ids)}
|
{:ok, length(deleted_ids)}
|
||||||
end
|
end
|
||||||
@@ -189,4 +196,328 @@ defmodule Parrhesia.Storage.Adapters.Memory.Events do
|
|||||||
_tag -> false
|
_tag -> false
|
||||||
end)
|
end)
|
||||||
end
|
end
|
||||||
|
|
||||||
|
defp compare_event_refs(left, right) do
|
||||||
|
cond do
|
||||||
|
left.created_at < right.created_at -> :lt
|
||||||
|
left.created_at > right.created_at -> :gt
|
||||||
|
left.id < right.id -> :lt
|
||||||
|
left.id > right.id -> :gt
|
||||||
|
true -> :eq
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp maybe_limit_event_refs(refs, opts) do
|
||||||
|
case Keyword.get(opts, :limit) do
|
||||||
|
limit when is_integer(limit) and limit > 0 -> Enum.take(refs, limit)
|
||||||
|
_other -> refs
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp matching_events_for_filter(filter, requester_pubkeys, opts) do
|
||||||
|
cond do
|
||||||
|
Map.has_key?(filter, "ids") ->
|
||||||
|
direct_id_lookup_events(filter, requester_pubkeys, opts)
|
||||||
|
|
||||||
|
indexed_candidate_spec(filter) != nil ->
|
||||||
|
indexed_tag_lookup_events(filter, requester_pubkeys, opts)
|
||||||
|
|
||||||
|
true ->
|
||||||
|
scan_filter_matches(filter, requester_pubkeys, opts)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp direct_id_lookup_events(filter, requester_pubkeys, opts) do
|
||||||
|
filter
|
||||||
|
|> Map.get("ids", [])
|
||||||
|
|> Enum.reduce([], fn event_id, acc ->
|
||||||
|
maybe_prepend_direct_lookup_match(acc, event_id, filter, requester_pubkeys)
|
||||||
|
end)
|
||||||
|
|> deduplicate_events()
|
||||||
|
|> sort_events()
|
||||||
|
|> maybe_take_filter_limit(filter, opts)
|
||||||
|
end
|
||||||
|
|
||||||
|
defp scan_filter_matches(filter, requester_pubkeys, opts) do
|
||||||
|
limit =
|
||||||
|
if Keyword.get(opts, :apply_filter_limits?, true) do
|
||||||
|
effective_filter_limit(filter, opts)
|
||||||
|
else
|
||||||
|
nil
|
||||||
|
end
|
||||||
|
|
||||||
|
{matches, _count} =
|
||||||
|
Store.reduce_events_newest(
|
||||||
|
{[], 0},
|
||||||
|
&reduce_scan_match(&1, &2, filter, requester_pubkeys, limit)
|
||||||
|
)
|
||||||
|
|
||||||
|
matches
|
||||||
|
|> Enum.reverse()
|
||||||
|
|> sort_events()
|
||||||
|
end
|
||||||
|
|
||||||
|
defp indexed_tag_lookup_events(filter, requester_pubkeys, opts) do
|
||||||
|
filter
|
||||||
|
|> indexed_candidate_events()
|
||||||
|
|> Enum.filter(&filter_match_visible?(&1, filter, requester_pubkeys))
|
||||||
|
|> maybe_take_filter_limit(filter, opts)
|
||||||
|
end
|
||||||
|
|
||||||
|
defp indexed_tag_filter(filter) do
|
||||||
|
filter
|
||||||
|
|> Enum.filter(fn
|
||||||
|
{"#" <> _tag_name, values} when is_list(values) -> values != []
|
||||||
|
_entry -> false
|
||||||
|
end)
|
||||||
|
|> Enum.sort_by(fn {key, _values} -> key end)
|
||||||
|
|> List.first()
|
||||||
|
|> case do
|
||||||
|
{"#" <> tag_name, values} -> {tag_name, values}
|
||||||
|
nil -> nil
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp indexed_candidate_spec(filter) do
|
||||||
|
authors = Map.get(filter, "authors")
|
||||||
|
kinds = Map.get(filter, "kinds")
|
||||||
|
tag_filter = indexed_tag_filter(filter)
|
||||||
|
|
||||||
|
cond do
|
||||||
|
is_tuple(tag_filter) ->
|
||||||
|
{tag_name, tag_values} = tag_filter
|
||||||
|
{:tag, tag_name, effective_indexed_tag_values(filter, tag_values)}
|
||||||
|
|
||||||
|
is_list(authors) and is_list(kinds) ->
|
||||||
|
{:pubkey_kind, authors, kinds}
|
||||||
|
|
||||||
|
is_list(authors) ->
|
||||||
|
{:pubkey, authors}
|
||||||
|
|
||||||
|
is_list(kinds) ->
|
||||||
|
{:kind, kinds}
|
||||||
|
|
||||||
|
true ->
|
||||||
|
nil
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp indexed_candidate_events(filter) do
|
||||||
|
case indexed_candidate_spec(filter) do
|
||||||
|
{:tag, tag_name, tag_values} ->
|
||||||
|
Store.tagged_events(tag_name, tag_values)
|
||||||
|
|
||||||
|
{:pubkey_kind, authors, kinds} ->
|
||||||
|
Store.events_by_pubkeys_and_kinds(authors, kinds)
|
||||||
|
|
||||||
|
{:pubkey, authors} ->
|
||||||
|
Store.events_by_pubkeys(authors)
|
||||||
|
|
||||||
|
{:kind, kinds} ->
|
||||||
|
Store.events_by_kinds(kinds)
|
||||||
|
|
||||||
|
nil ->
|
||||||
|
[]
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp effective_indexed_tag_values(filter, tag_values) do
|
||||||
|
case Map.get(filter, "limit") do
|
||||||
|
limit when is_integer(limit) and limit == 1 ->
|
||||||
|
Enum.take(tag_values, 1)
|
||||||
|
|
||||||
|
_other ->
|
||||||
|
tag_values
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp filter_match_visible?(event, filter, requester_pubkeys) do
|
||||||
|
Filter.matches_filter?(event, filter) and
|
||||||
|
giftwrap_visible_to_requester?(event, requester_pubkeys)
|
||||||
|
end
|
||||||
|
|
||||||
|
defp maybe_prepend_direct_lookup_match(acc, event_id, filter, requester_pubkeys) do
|
||||||
|
case Store.get_event(event_id) do
|
||||||
|
{:ok, event, false} ->
|
||||||
|
if filter_match_visible?(event, filter, requester_pubkeys) do
|
||||||
|
[event | acc]
|
||||||
|
else
|
||||||
|
acc
|
||||||
|
end
|
||||||
|
|
||||||
|
_other ->
|
||||||
|
acc
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp reduce_scan_match(event, {acc, count}, filter, requester_pubkeys, limit) do
|
||||||
|
if filter_match_visible?(event, filter, requester_pubkeys) do
|
||||||
|
maybe_halt_scan([event | acc], count + 1, limit)
|
||||||
|
else
|
||||||
|
{acc, count}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp maybe_halt_scan(acc, count, limit) when is_integer(limit) and count >= limit do
|
||||||
|
{:halt, {acc, count}}
|
||||||
|
end
|
||||||
|
|
||||||
|
defp maybe_halt_scan(acc, count, _limit), do: {acc, count}
|
||||||
|
|
||||||
|
defp reduce_unique_matching_events(filters, requester_pubkeys, opts, acc, reducer) do
|
||||||
|
Enum.reduce(filters, acc, fn filter, current_acc ->
|
||||||
|
reduce_matching_events_for_filter(filter, requester_pubkeys, opts, current_acc, reducer)
|
||||||
|
end)
|
||||||
|
end
|
||||||
|
|
||||||
|
defp reduce_matching_events_for_filter(filter, requester_pubkeys, _opts, acc, reducer) do
|
||||||
|
cond do
|
||||||
|
Map.has_key?(filter, "ids") ->
|
||||||
|
filter
|
||||||
|
|> Map.get("ids", [])
|
||||||
|
|> Enum.reduce(acc, &reduce_event_id_match(&1, filter, requester_pubkeys, &2, reducer))
|
||||||
|
|
||||||
|
indexed_candidate_spec(filter) != nil ->
|
||||||
|
filter
|
||||||
|
|> indexed_candidate_events()
|
||||||
|
|> Enum.reduce(
|
||||||
|
acc,
|
||||||
|
&maybe_reduce_visible_event(&1, filter, requester_pubkeys, &2, reducer)
|
||||||
|
)
|
||||||
|
|
||||||
|
true ->
|
||||||
|
Store.reduce_events_newest(
|
||||||
|
acc,
|
||||||
|
&maybe_reduce_visible_event(&1, filter, requester_pubkeys, &2, reducer)
|
||||||
|
)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp coordinate_delete_candidates(delete_coordinates, deleter_pubkey) do
|
||||||
|
delete_coordinates
|
||||||
|
|> Enum.flat_map(fn coordinate ->
|
||||||
|
cond do
|
||||||
|
coordinate.pubkey != deleter_pubkey ->
|
||||||
|
[]
|
||||||
|
|
||||||
|
addressable_kind?(coordinate.kind) ->
|
||||||
|
Store.events_by_addresses([{coordinate.kind, deleter_pubkey, coordinate.d_tag}])
|
||||||
|
|
||||||
|
replaceable_kind?(coordinate.kind) ->
|
||||||
|
Store.events_by_pubkeys_and_kinds([deleter_pubkey], [coordinate.kind])
|
||||||
|
|
||||||
|
true ->
|
||||||
|
[]
|
||||||
|
end
|
||||||
|
end)
|
||||||
|
|> deduplicate_events()
|
||||||
|
end
|
||||||
|
|
||||||
|
defp vanish_candidates(pubkey, created_at) do
|
||||||
|
own_events =
|
||||||
|
Store.events_by_pubkeys([pubkey])
|
||||||
|
|> Enum.filter(&(&1["created_at"] <= created_at))
|
||||||
|
|
||||||
|
giftwrap_events =
|
||||||
|
Store.tagged_events("p", [pubkey])
|
||||||
|
|> Enum.filter(&(&1["kind"] == 1059 and &1["created_at"] <= created_at))
|
||||||
|
|
||||||
|
deduplicate_events(own_events ++ giftwrap_events)
|
||||||
|
end
|
||||||
|
|
||||||
|
defp event_ref(event) do
|
||||||
|
%{
|
||||||
|
created_at: Map.fetch!(event, "created_at"),
|
||||||
|
id: Base.decode16!(Map.fetch!(event, "id"), case: :mixed)
|
||||||
|
}
|
||||||
|
end
|
||||||
|
|
||||||
|
defp append_unique_event_ref(event, {seen_ids, acc}) do
|
||||||
|
reduce_unique_event(event, {seen_ids, acc}, fn _event_id, next_seen_ids ->
|
||||||
|
{next_seen_ids, [event_ref(event) | acc]}
|
||||||
|
end)
|
||||||
|
end
|
||||||
|
|
||||||
|
defp count_unique_event(event, {seen_ids, acc}) do
|
||||||
|
reduce_unique_event(event, {seen_ids, acc}, fn _event_id, next_seen_ids ->
|
||||||
|
{next_seen_ids, acc + 1}
|
||||||
|
end)
|
||||||
|
end
|
||||||
|
|
||||||
|
defp reduce_unique_event(event, {seen_ids, acc}, fun) do
|
||||||
|
event_id = Map.fetch!(event, "id")
|
||||||
|
|
||||||
|
if MapSet.member?(seen_ids, event_id) do
|
||||||
|
{seen_ids, acc}
|
||||||
|
else
|
||||||
|
fun.(event_id, MapSet.put(seen_ids, event_id))
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp maybe_reduce_visible_event(event, filter, requester_pubkeys, acc, reducer) do
|
||||||
|
if filter_match_visible?(event, filter, requester_pubkeys) do
|
||||||
|
reducer.(event, acc)
|
||||||
|
else
|
||||||
|
acc
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp reduce_event_id_match(event_id, filter, requester_pubkeys, acc, reducer) do
|
||||||
|
case Store.get_event(event_id) do
|
||||||
|
{:ok, event, false} ->
|
||||||
|
maybe_reduce_visible_event(event, filter, requester_pubkeys, acc, reducer)
|
||||||
|
|
||||||
|
_other ->
|
||||||
|
acc
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp deduplicate_events(events) do
|
||||||
|
events
|
||||||
|
|> Enum.reduce(%{}, fn event, acc -> Map.put(acc, event["id"], event) end)
|
||||||
|
|> Map.values()
|
||||||
|
end
|
||||||
|
|
||||||
|
defp sort_events(events) do
|
||||||
|
Enum.sort(events, &chronological_sorter/2)
|
||||||
|
end
|
||||||
|
|
||||||
|
defp chronological_sorter(left, right) do
|
||||||
|
cond do
|
||||||
|
left["created_at"] > right["created_at"] -> true
|
||||||
|
left["created_at"] < right["created_at"] -> false
|
||||||
|
true -> left["id"] < right["id"]
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp maybe_apply_query_limit(events, opts) do
|
||||||
|
case Keyword.get(opts, :limit) do
|
||||||
|
limit when is_integer(limit) and limit > 0 -> Enum.take(events, limit)
|
||||||
|
_other -> events
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp maybe_take_filter_limit(events, filter, opts) do
|
||||||
|
case effective_filter_limit(filter, opts) do
|
||||||
|
limit when is_integer(limit) and limit > 0 -> Enum.take(events, limit)
|
||||||
|
_other -> events
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp effective_filter_limit(filter, opts) do
|
||||||
|
max_filter_limit = Keyword.get(opts, :max_filter_limit)
|
||||||
|
|
||||||
|
case Map.get(filter, "limit") do
|
||||||
|
limit
|
||||||
|
when is_integer(limit) and limit > 0 and is_integer(max_filter_limit) and
|
||||||
|
max_filter_limit > 0 ->
|
||||||
|
min(limit, max_filter_limit)
|
||||||
|
|
||||||
|
limit when is_integer(limit) and limit > 0 ->
|
||||||
|
limit
|
||||||
|
|
||||||
|
_other ->
|
||||||
|
nil
|
||||||
|
end
|
||||||
|
end
|
||||||
end
|
end
|
||||||
|
|||||||
@@ -33,6 +33,11 @@ defmodule Parrhesia.Storage.Adapters.Memory.Moderation do
|
|||||||
{:ok, Store.get(fn state -> MapSet.member?(state.allowed_pubkeys, pubkey) end)}
|
{:ok, Store.get(fn state -> MapSet.member?(state.allowed_pubkeys, pubkey) end)}
|
||||||
end
|
end
|
||||||
|
|
||||||
|
@impl true
|
||||||
|
def has_allowed_pubkeys?(_context) do
|
||||||
|
{:ok, Store.get(fn state -> MapSet.size(state.allowed_pubkeys) > 0 end)}
|
||||||
|
end
|
||||||
|
|
||||||
@impl true
|
@impl true
|
||||||
def ban_event(_context, event_id), do: update_ban_set(:events, event_id, :add)
|
def ban_event(_context, event_id), do: update_ban_set(:events, event_id, :add)
|
||||||
|
|
||||||
|
|||||||
@@ -4,33 +4,160 @@ defmodule Parrhesia.Storage.Adapters.Memory.Store do
|
|||||||
use Agent
|
use Agent
|
||||||
|
|
||||||
@name __MODULE__
|
@name __MODULE__
|
||||||
|
@events_table :parrhesia_memory_events
|
||||||
|
@events_by_time_table :parrhesia_memory_events_by_time
|
||||||
|
@events_by_tag_table :parrhesia_memory_events_by_tag
|
||||||
|
@events_by_pubkey_table :parrhesia_memory_events_by_pubkey
|
||||||
|
@events_by_kind_table :parrhesia_memory_events_by_kind
|
||||||
|
@events_by_pubkey_kind_table :parrhesia_memory_events_by_pubkey_kind
|
||||||
|
@events_by_address_table :parrhesia_memory_events_by_address
|
||||||
|
|
||||||
@initial_state %{
|
@initial_state %{
|
||||||
events: %{},
|
|
||||||
deleted: MapSet.new(),
|
|
||||||
bans: %{pubkeys: MapSet.new(), events: MapSet.new(), ips: MapSet.new()},
|
bans: %{pubkeys: MapSet.new(), events: MapSet.new(), ips: MapSet.new()},
|
||||||
allowed_pubkeys: MapSet.new(),
|
allowed_pubkeys: MapSet.new(),
|
||||||
|
acl_rules: [],
|
||||||
|
next_acl_rule_id: 1,
|
||||||
groups: %{},
|
groups: %{},
|
||||||
roles: %{},
|
roles: %{},
|
||||||
audit_logs: []
|
audit_logs: []
|
||||||
}
|
}
|
||||||
|
|
||||||
def ensure_started do
|
def ensure_started, do: start_store()
|
||||||
if Process.whereis(@name) do
|
|
||||||
|
def put_event(event_id, event) when is_binary(event_id) and is_map(event) do
|
||||||
|
:ok = ensure_started()
|
||||||
|
|
||||||
|
if :ets.insert_new(@events_table, {event_id, event, false}) do
|
||||||
|
true = :ets.insert(@events_by_time_table, {{sort_key(event), event_id}, event_id})
|
||||||
|
index_event_tags(event_id, event)
|
||||||
|
index_event_secondary_keys(event_id, event)
|
||||||
:ok
|
:ok
|
||||||
else
|
else
|
||||||
start_store()
|
{:error, :duplicate_event}
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
defp start_store do
|
def get_event(event_id) when is_binary(event_id) do
|
||||||
case Agent.start_link(fn -> @initial_state end, name: @name) do
|
:ok = ensure_started()
|
||||||
{:ok, _pid} -> :ok
|
|
||||||
{:error, {:already_started, _pid}} -> :ok
|
case :ets.lookup(@events_table, event_id) do
|
||||||
{:error, reason} -> {:error, reason}
|
[{^event_id, event, deleted?}] -> {:ok, event, deleted?}
|
||||||
|
[] -> :error
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
|
def mark_deleted(event_id) when is_binary(event_id) do
|
||||||
|
:ok = ensure_started()
|
||||||
|
|
||||||
|
case lookup_event(event_id) do
|
||||||
|
{:ok, event, false} ->
|
||||||
|
true = :ets.insert(@events_table, {event_id, event, true})
|
||||||
|
true = :ets.delete(@events_by_time_table, {sort_key(event), event_id})
|
||||||
|
unindex_event_tags(event_id, event)
|
||||||
|
unindex_event_secondary_keys(event_id, event)
|
||||||
|
:ok
|
||||||
|
|
||||||
|
{:ok, _event, true} ->
|
||||||
|
:ok
|
||||||
|
|
||||||
|
:error ->
|
||||||
|
:ok
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def reduce_events(acc, fun) when is_function(fun, 2) do
|
||||||
|
:ok = ensure_started()
|
||||||
|
|
||||||
|
:ets.foldl(
|
||||||
|
fn {_event_id, event, deleted?}, current_acc ->
|
||||||
|
if deleted? do
|
||||||
|
current_acc
|
||||||
|
else
|
||||||
|
fun.(event, current_acc)
|
||||||
|
end
|
||||||
|
end,
|
||||||
|
acc,
|
||||||
|
@events_table
|
||||||
|
)
|
||||||
|
end
|
||||||
|
|
||||||
|
def reduce_events_newest(acc, fun) when is_function(fun, 2) do
|
||||||
|
:ok = ensure_started()
|
||||||
|
reduce_events_newest_from(:ets.first(@events_by_time_table), acc, fun)
|
||||||
|
end
|
||||||
|
|
||||||
|
def tagged_events(tag_name, tag_values) when is_binary(tag_name) and is_list(tag_values) do
|
||||||
|
:ok = ensure_started()
|
||||||
|
|
||||||
|
tag_values
|
||||||
|
|> Enum.flat_map(&indexed_events_for_value(@events_by_tag_table, {tag_name, &1}))
|
||||||
|
|> sort_and_deduplicate_events()
|
||||||
|
end
|
||||||
|
|
||||||
|
def events_by_pubkeys(pubkeys) when is_list(pubkeys) do
|
||||||
|
:ok = ensure_started()
|
||||||
|
|
||||||
|
pubkeys
|
||||||
|
|> Enum.flat_map(&indexed_events_for_value(@events_by_pubkey_table, &1))
|
||||||
|
|> sort_and_deduplicate_events()
|
||||||
|
end
|
||||||
|
|
||||||
|
def events_by_kinds(kinds) when is_list(kinds) do
|
||||||
|
:ok = ensure_started()
|
||||||
|
|
||||||
|
kinds
|
||||||
|
|> Enum.flat_map(&indexed_events_for_value(@events_by_kind_table, &1))
|
||||||
|
|> sort_and_deduplicate_events()
|
||||||
|
end
|
||||||
|
|
||||||
|
def events_by_pubkeys_and_kinds(pubkeys, kinds) when is_list(pubkeys) and is_list(kinds) do
|
||||||
|
:ok = ensure_started()
|
||||||
|
|
||||||
|
pubkeys
|
||||||
|
|> Enum.flat_map(fn pubkey ->
|
||||||
|
kinds
|
||||||
|
|> Enum.flat_map(&indexed_events_for_value(@events_by_pubkey_kind_table, {pubkey, &1}))
|
||||||
|
end)
|
||||||
|
|> sort_and_deduplicate_events()
|
||||||
|
end
|
||||||
|
|
||||||
|
def events_by_addresses(addresses) when is_list(addresses) do
|
||||||
|
:ok = ensure_started()
|
||||||
|
|
||||||
|
addresses
|
||||||
|
|> Enum.flat_map(&indexed_events_for_value(@events_by_address_table, &1))
|
||||||
|
|> sort_and_deduplicate_events()
|
||||||
|
end
|
||||||
|
|
||||||
|
defp reduce_events_newest_from(:"$end_of_table", acc, _fun), do: acc
|
||||||
|
|
||||||
|
defp reduce_events_newest_from(key, acc, fun) do
|
||||||
|
next_key = :ets.next(@events_by_time_table, key)
|
||||||
|
acc = reduce_indexed_event(key, acc, fun)
|
||||||
|
|
||||||
|
case acc do
|
||||||
|
{:halt, final_acc} -> final_acc
|
||||||
|
next_acc -> reduce_events_newest_from(next_key, next_acc, fun)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp reduce_indexed_event(key, acc, fun) do
|
||||||
|
case :ets.lookup(@events_by_time_table, key) do
|
||||||
|
[{^key, event_id}] -> apply_reduce_fun(event_id, acc, fun)
|
||||||
|
[] -> acc
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp apply_reduce_fun(event_id, acc, fun) do
|
||||||
|
case lookup_event(event_id) do
|
||||||
|
{:ok, event, false} -> normalize_reduce_result(fun.(event, acc))
|
||||||
|
_other -> acc
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp normalize_reduce_result({:halt, next_acc}), do: {:halt, next_acc}
|
||||||
|
defp normalize_reduce_result(next_acc), do: next_acc
|
||||||
|
|
||||||
def get(fun) do
|
def get(fun) do
|
||||||
:ok = ensure_started()
|
:ok = ensure_started()
|
||||||
Agent.get(@name, fun)
|
Agent.get(@name, fun)
|
||||||
@@ -45,4 +172,208 @@ defmodule Parrhesia.Storage.Adapters.Memory.Store do
|
|||||||
:ok = ensure_started()
|
:ok = ensure_started()
|
||||||
Agent.get_and_update(@name, fun)
|
Agent.get_and_update(@name, fun)
|
||||||
end
|
end
|
||||||
|
|
||||||
|
defp start_store do
|
||||||
|
case Agent.start_link(&init_state/0, name: @name) do
|
||||||
|
{:ok, _pid} -> :ok
|
||||||
|
{:error, {:already_started, _pid}} -> :ok
|
||||||
|
{:error, reason} -> {:error, reason}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp init_state do
|
||||||
|
ensure_tables_started()
|
||||||
|
|
||||||
|
@initial_state
|
||||||
|
end
|
||||||
|
|
||||||
|
defp ensure_tables_started do
|
||||||
|
ensure_table(@events_table, [
|
||||||
|
:named_table,
|
||||||
|
:public,
|
||||||
|
:set,
|
||||||
|
read_concurrency: true,
|
||||||
|
write_concurrency: true
|
||||||
|
])
|
||||||
|
|
||||||
|
ensure_table(@events_by_time_table, [
|
||||||
|
:named_table,
|
||||||
|
:public,
|
||||||
|
:ordered_set,
|
||||||
|
read_concurrency: true,
|
||||||
|
write_concurrency: true
|
||||||
|
])
|
||||||
|
|
||||||
|
ensure_table(@events_by_tag_table, [
|
||||||
|
:named_table,
|
||||||
|
:public,
|
||||||
|
:bag,
|
||||||
|
read_concurrency: true,
|
||||||
|
write_concurrency: true
|
||||||
|
])
|
||||||
|
|
||||||
|
ensure_table(@events_by_pubkey_table, [
|
||||||
|
:named_table,
|
||||||
|
:public,
|
||||||
|
:bag,
|
||||||
|
read_concurrency: true,
|
||||||
|
write_concurrency: true
|
||||||
|
])
|
||||||
|
|
||||||
|
ensure_table(@events_by_kind_table, [
|
||||||
|
:named_table,
|
||||||
|
:public,
|
||||||
|
:bag,
|
||||||
|
read_concurrency: true,
|
||||||
|
write_concurrency: true
|
||||||
|
])
|
||||||
|
|
||||||
|
ensure_table(@events_by_pubkey_kind_table, [
|
||||||
|
:named_table,
|
||||||
|
:public,
|
||||||
|
:bag,
|
||||||
|
read_concurrency: true,
|
||||||
|
write_concurrency: true
|
||||||
|
])
|
||||||
|
|
||||||
|
ensure_table(@events_by_address_table, [
|
||||||
|
:named_table,
|
||||||
|
:public,
|
||||||
|
:bag,
|
||||||
|
read_concurrency: true,
|
||||||
|
write_concurrency: true
|
||||||
|
])
|
||||||
|
end
|
||||||
|
|
||||||
|
defp ensure_table(name, options) do
|
||||||
|
case :ets.whereis(name) do
|
||||||
|
:undefined -> :ets.new(name, options)
|
||||||
|
_table -> :ok
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp lookup_event(event_id) do
|
||||||
|
case :ets.lookup(@events_table, event_id) do
|
||||||
|
[{^event_id, event, deleted?}] -> {:ok, event, deleted?}
|
||||||
|
[] -> :error
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp index_event_tags(event_id, event) do
|
||||||
|
event
|
||||||
|
|> event_tag_index_entries(event_id)
|
||||||
|
|> Enum.each(fn entry ->
|
||||||
|
true = :ets.insert(@events_by_tag_table, entry)
|
||||||
|
end)
|
||||||
|
end
|
||||||
|
|
||||||
|
defp index_event_secondary_keys(event_id, event) do
|
||||||
|
event
|
||||||
|
|> secondary_index_entries(event_id)
|
||||||
|
|> Enum.each(fn {table, entry} ->
|
||||||
|
true = :ets.insert(table, entry)
|
||||||
|
end)
|
||||||
|
end
|
||||||
|
|
||||||
|
defp unindex_event_tags(event_id, event) do
|
||||||
|
event
|
||||||
|
|> event_tag_index_entries(event_id)
|
||||||
|
|> Enum.each(&:ets.delete_object(@events_by_tag_table, &1))
|
||||||
|
end
|
||||||
|
|
||||||
|
defp unindex_event_secondary_keys(event_id, event) do
|
||||||
|
event
|
||||||
|
|> secondary_index_entries(event_id)
|
||||||
|
|> Enum.each(fn {table, entry} ->
|
||||||
|
:ets.delete_object(table, entry)
|
||||||
|
end)
|
||||||
|
end
|
||||||
|
|
||||||
|
defp event_tag_index_entries(event, event_id) do
|
||||||
|
created_sort_key = sort_key(event)
|
||||||
|
|
||||||
|
event
|
||||||
|
|> Map.get("tags", [])
|
||||||
|
|> Enum.flat_map(fn
|
||||||
|
[tag_name, tag_value | _rest] when is_binary(tag_name) and is_binary(tag_value) ->
|
||||||
|
[{{tag_name, tag_value}, created_sort_key, event_id}]
|
||||||
|
|
||||||
|
_tag ->
|
||||||
|
[]
|
||||||
|
end)
|
||||||
|
end
|
||||||
|
|
||||||
|
defp secondary_index_entries(event, event_id) do
|
||||||
|
created_sort_key = sort_key(event)
|
||||||
|
pubkey = Map.get(event, "pubkey")
|
||||||
|
kind = Map.get(event, "kind")
|
||||||
|
|
||||||
|
[]
|
||||||
|
|> maybe_put_secondary_entry(@events_by_pubkey_table, pubkey, created_sort_key, event_id)
|
||||||
|
|> maybe_put_secondary_entry(@events_by_kind_table, kind, created_sort_key, event_id)
|
||||||
|
|> maybe_put_pubkey_kind_entry(pubkey, kind, created_sort_key, event_id)
|
||||||
|
|> maybe_put_address_entry(event, pubkey, kind, event_id)
|
||||||
|
end
|
||||||
|
|
||||||
|
defp maybe_put_secondary_entry(entries, _table, key, _created_sort_key, _event_id)
|
||||||
|
when is_nil(key),
|
||||||
|
do: entries
|
||||||
|
|
||||||
|
defp maybe_put_secondary_entry(entries, table, key, created_sort_key, event_id) do
|
||||||
|
[{table, {key, created_sort_key, event_id}} | entries]
|
||||||
|
end
|
||||||
|
|
||||||
|
defp maybe_put_pubkey_kind_entry(entries, pubkey, kind, created_sort_key, event_id)
|
||||||
|
when is_binary(pubkey) and is_integer(kind) do
|
||||||
|
[{@events_by_pubkey_kind_table, {{pubkey, kind}, created_sort_key, event_id}} | entries]
|
||||||
|
end
|
||||||
|
|
||||||
|
defp maybe_put_pubkey_kind_entry(entries, _pubkey, _kind, _created_sort_key, _event_id),
|
||||||
|
do: entries
|
||||||
|
|
||||||
|
defp maybe_put_address_entry(entries, event, pubkey, kind, event_id)
|
||||||
|
when is_binary(pubkey) and is_integer(kind) and kind >= 30_000 and kind < 40_000 do
|
||||||
|
d_tag =
|
||||||
|
event
|
||||||
|
|> Map.get("tags", [])
|
||||||
|
|> Enum.find_value("", fn
|
||||||
|
["d", value | _rest] -> value
|
||||||
|
_tag -> nil
|
||||||
|
end)
|
||||||
|
|
||||||
|
[{@events_by_address_table, {{kind, pubkey, d_tag}, sort_key(event), event_id}} | entries]
|
||||||
|
end
|
||||||
|
|
||||||
|
defp maybe_put_address_entry(entries, _event, _pubkey, _kind, _event_id), do: entries
|
||||||
|
|
||||||
|
defp indexed_events_for_value(_table, value)
|
||||||
|
when not is_binary(value) and not is_integer(value) and not is_tuple(value),
|
||||||
|
do: []
|
||||||
|
|
||||||
|
defp indexed_events_for_value(table, value) do
|
||||||
|
table
|
||||||
|
|> :ets.lookup(value)
|
||||||
|
|> Enum.reduce([], fn {^value, _created_sort_key, event_id}, acc ->
|
||||||
|
case lookup_event(event_id) do
|
||||||
|
{:ok, event, false} -> [event | acc]
|
||||||
|
_other -> acc
|
||||||
|
end
|
||||||
|
end)
|
||||||
|
end
|
||||||
|
|
||||||
|
defp sort_and_deduplicate_events(events) do
|
||||||
|
events
|
||||||
|
|> Enum.uniq_by(& &1["id"])
|
||||||
|
|> Enum.sort(&chronological_sorter/2)
|
||||||
|
end
|
||||||
|
|
||||||
|
defp chronological_sorter(left, right) do
|
||||||
|
cond do
|
||||||
|
left["created_at"] > right["created_at"] -> true
|
||||||
|
left["created_at"] < right["created_at"] -> false
|
||||||
|
true -> left["id"] < right["id"]
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp sort_key(event), do: -Map.get(event, "created_at", 0)
|
||||||
end
|
end
|
||||||
|
|||||||
279
lib/parrhesia/storage/adapters/postgres/acl.ex
Normal file
279
lib/parrhesia/storage/adapters/postgres/acl.ex
Normal file
@@ -0,0 +1,279 @@
|
|||||||
|
defmodule Parrhesia.Storage.Adapters.Postgres.ACL do
|
||||||
|
@moduledoc """
|
||||||
|
PostgreSQL-backed implementation for `Parrhesia.Storage.ACL`.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import Ecto.Query
|
||||||
|
|
||||||
|
alias Parrhesia.PostgresRepos
|
||||||
|
alias Parrhesia.Repo
|
||||||
|
|
||||||
|
@behaviour Parrhesia.Storage.ACL
|
||||||
|
|
||||||
|
@impl true
|
||||||
|
def put_rule(_context, rule) when is_map(rule) do
|
||||||
|
with {:ok, normalized_rule} <- normalize_rule(rule) do
|
||||||
|
normalized_rule
|
||||||
|
|> find_matching_rule()
|
||||||
|
|> maybe_insert_rule(normalized_rule)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def put_rule(_context, _rule), do: {:error, :invalid_acl_rule}
|
||||||
|
|
||||||
|
defp maybe_insert_rule(nil, normalized_rule), do: insert_rule(normalized_rule)
|
||||||
|
defp maybe_insert_rule(existing_rule, _normalized_rule), do: {:ok, existing_rule}
|
||||||
|
|
||||||
|
@impl true
|
||||||
|
def delete_rule(_context, selector) when is_map(selector) do
|
||||||
|
case normalize_delete_selector(selector) do
|
||||||
|
{:ok, {:id, id}} ->
|
||||||
|
query = from(rule in "acl_rules", where: rule.id == ^id)
|
||||||
|
{_deleted, _result} = Repo.delete_all(query)
|
||||||
|
:ok
|
||||||
|
|
||||||
|
{:ok, {:exact, rule}} ->
|
||||||
|
query =
|
||||||
|
from(stored_rule in "acl_rules",
|
||||||
|
where:
|
||||||
|
stored_rule.principal_type == ^rule.principal_type and
|
||||||
|
stored_rule.principal == ^rule.principal and
|
||||||
|
stored_rule.capability == ^rule.capability and
|
||||||
|
stored_rule.match == ^rule.match
|
||||||
|
)
|
||||||
|
|
||||||
|
{_deleted, _result} = Repo.delete_all(query)
|
||||||
|
:ok
|
||||||
|
|
||||||
|
{:error, reason} ->
|
||||||
|
{:error, reason}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def delete_rule(_context, _selector), do: {:error, :invalid_acl_rule}
|
||||||
|
|
||||||
|
@impl true
|
||||||
|
def list_rules(_context, opts) when is_list(opts) do
|
||||||
|
query =
|
||||||
|
from(rule in "acl_rules",
|
||||||
|
order_by: [
|
||||||
|
asc: rule.principal_type,
|
||||||
|
asc: rule.principal,
|
||||||
|
asc: rule.capability,
|
||||||
|
asc: rule.id
|
||||||
|
],
|
||||||
|
select: %{
|
||||||
|
id: rule.id,
|
||||||
|
principal_type: rule.principal_type,
|
||||||
|
principal: rule.principal,
|
||||||
|
capability: rule.capability,
|
||||||
|
match: rule.match,
|
||||||
|
inserted_at: rule.inserted_at
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|> maybe_filter_principal_type(Keyword.get(opts, :principal_type))
|
||||||
|
|> maybe_filter_principal(Keyword.get(opts, :principal))
|
||||||
|
|> maybe_filter_capability(Keyword.get(opts, :capability))
|
||||||
|
|
||||||
|
repo = read_repo()
|
||||||
|
{:ok, Enum.map(repo.all(query), &normalize_persisted_rule/1)}
|
||||||
|
end
|
||||||
|
|
||||||
|
def list_rules(_context, _opts), do: {:error, :invalid_opts}
|
||||||
|
|
||||||
|
defp maybe_filter_principal_type(query, nil), do: query
|
||||||
|
|
||||||
|
defp maybe_filter_principal_type(query, principal_type) when is_atom(principal_type) do
|
||||||
|
maybe_filter_principal_type(query, Atom.to_string(principal_type))
|
||||||
|
end
|
||||||
|
|
||||||
|
defp maybe_filter_principal_type(query, principal_type) when is_binary(principal_type) do
|
||||||
|
where(query, [rule], rule.principal_type == ^principal_type)
|
||||||
|
end
|
||||||
|
|
||||||
|
defp maybe_filter_principal_type(query, _principal_type), do: query
|
||||||
|
|
||||||
|
defp maybe_filter_principal(query, nil), do: query
|
||||||
|
|
||||||
|
defp maybe_filter_principal(query, principal) when is_binary(principal) do
|
||||||
|
case decode_hex_or_binary(principal, 32, :invalid_acl_principal) do
|
||||||
|
{:ok, decoded_principal} -> where(query, [rule], rule.principal == ^decoded_principal)
|
||||||
|
{:error, _reason} -> where(query, [rule], false)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp maybe_filter_principal(query, _principal), do: query
|
||||||
|
|
||||||
|
defp maybe_filter_capability(query, nil), do: query
|
||||||
|
|
||||||
|
defp maybe_filter_capability(query, capability) when is_atom(capability) do
|
||||||
|
maybe_filter_capability(query, Atom.to_string(capability))
|
||||||
|
end
|
||||||
|
|
||||||
|
defp maybe_filter_capability(query, capability) when is_binary(capability) do
|
||||||
|
where(query, [rule], rule.capability == ^capability)
|
||||||
|
end
|
||||||
|
|
||||||
|
defp maybe_filter_capability(query, _capability), do: query
|
||||||
|
|
||||||
|
defp find_matching_rule(normalized_rule) do
|
||||||
|
query =
|
||||||
|
from(stored_rule in "acl_rules",
|
||||||
|
where:
|
||||||
|
stored_rule.principal_type == ^normalized_rule.principal_type and
|
||||||
|
stored_rule.principal == ^normalized_rule.principal and
|
||||||
|
stored_rule.capability == ^normalized_rule.capability and
|
||||||
|
stored_rule.match == ^normalized_rule.match,
|
||||||
|
limit: 1,
|
||||||
|
select: %{
|
||||||
|
id: stored_rule.id,
|
||||||
|
principal_type: stored_rule.principal_type,
|
||||||
|
principal: stored_rule.principal,
|
||||||
|
capability: stored_rule.capability,
|
||||||
|
match: stored_rule.match,
|
||||||
|
inserted_at: stored_rule.inserted_at
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
repo = read_repo()
|
||||||
|
|
||||||
|
case repo.one(query) do
|
||||||
|
nil -> nil
|
||||||
|
stored_rule -> normalize_persisted_rule(stored_rule)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp read_repo, do: PostgresRepos.read()
|
||||||
|
|
||||||
|
defp insert_rule(normalized_rule) do
|
||||||
|
now = DateTime.utc_now() |> DateTime.truncate(:microsecond)
|
||||||
|
|
||||||
|
row = %{
|
||||||
|
principal_type: normalized_rule.principal_type,
|
||||||
|
principal: normalized_rule.principal,
|
||||||
|
capability: normalized_rule.capability,
|
||||||
|
match: normalized_rule.match,
|
||||||
|
inserted_at: now
|
||||||
|
}
|
||||||
|
|
||||||
|
case Repo.insert_all("acl_rules", [row], returning: [:id, :inserted_at]) do
|
||||||
|
{1, [inserted_row]} ->
|
||||||
|
{:ok, normalize_persisted_rule(Map.merge(row, Map.new(inserted_row)))}
|
||||||
|
|
||||||
|
_other ->
|
||||||
|
{:error, :acl_rule_insert_failed}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp normalize_persisted_rule(rule) do
|
||||||
|
%{
|
||||||
|
id: rule.id,
|
||||||
|
principal_type: normalize_principal_type(rule.principal_type),
|
||||||
|
principal: Base.encode16(rule.principal, case: :lower),
|
||||||
|
capability: normalize_capability(rule.capability),
|
||||||
|
match: normalize_match(rule.match),
|
||||||
|
inserted_at: rule.inserted_at
|
||||||
|
}
|
||||||
|
end
|
||||||
|
|
||||||
|
defp normalize_delete_selector(%{"id" => id}), do: normalize_delete_selector(%{id: id})
|
||||||
|
|
||||||
|
defp normalize_delete_selector(%{id: id}) when is_integer(id) and id > 0,
|
||||||
|
do: {:ok, {:id, id}}
|
||||||
|
|
||||||
|
defp normalize_delete_selector(selector) do
|
||||||
|
case normalize_rule(selector) do
|
||||||
|
{:ok, normalized_rule} -> {:ok, {:exact, normalized_rule}}
|
||||||
|
{:error, reason} -> {:error, reason}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp normalize_rule(rule) when is_map(rule) do
|
||||||
|
with {:ok, principal_type} <- normalize_principal_type_value(fetch(rule, :principal_type)),
|
||||||
|
{:ok, principal} <-
|
||||||
|
decode_hex_or_binary(fetch(rule, :principal), 32, :invalid_acl_principal),
|
||||||
|
{:ok, capability} <- normalize_capability_value(fetch(rule, :capability)),
|
||||||
|
{:ok, match} <- normalize_match_value(fetch(rule, :match)) do
|
||||||
|
{:ok,
|
||||||
|
%{
|
||||||
|
principal_type: principal_type,
|
||||||
|
principal: principal,
|
||||||
|
capability: capability,
|
||||||
|
match: match
|
||||||
|
}}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp normalize_rule(_rule), do: {:error, :invalid_acl_rule}
|
||||||
|
|
||||||
|
defp normalize_principal_type("pubkey"), do: :pubkey
|
||||||
|
defp normalize_principal_type(principal_type), do: principal_type
|
||||||
|
|
||||||
|
defp normalize_capability("sync_read"), do: :sync_read
|
||||||
|
defp normalize_capability("sync_write"), do: :sync_write
|
||||||
|
defp normalize_capability(capability), do: capability
|
||||||
|
|
||||||
|
defp normalize_principal_type_value(:pubkey), do: {:ok, "pubkey"}
|
||||||
|
defp normalize_principal_type_value("pubkey"), do: {:ok, "pubkey"}
|
||||||
|
defp normalize_principal_type_value(_principal_type), do: {:error, :invalid_acl_principal_type}
|
||||||
|
|
||||||
|
defp normalize_capability_value(:sync_read), do: {:ok, "sync_read"}
|
||||||
|
defp normalize_capability_value(:sync_write), do: {:ok, "sync_write"}
|
||||||
|
defp normalize_capability_value("sync_read"), do: {:ok, "sync_read"}
|
||||||
|
defp normalize_capability_value("sync_write"), do: {:ok, "sync_write"}
|
||||||
|
defp normalize_capability_value(_capability), do: {:error, :invalid_acl_capability}
|
||||||
|
|
||||||
|
defp normalize_match_value(match) when is_map(match) do
|
||||||
|
normalized_match =
|
||||||
|
Enum.reduce(match, %{}, fn
|
||||||
|
{key, values}, acc when is_binary(key) ->
|
||||||
|
Map.put(acc, key, values)
|
||||||
|
|
||||||
|
{key, values}, acc when is_atom(key) ->
|
||||||
|
Map.put(acc, Atom.to_string(key), values)
|
||||||
|
|
||||||
|
_entry, acc ->
|
||||||
|
acc
|
||||||
|
end)
|
||||||
|
|
||||||
|
{:ok, normalize_match(normalized_match)}
|
||||||
|
end
|
||||||
|
|
||||||
|
defp normalize_match_value(_match), do: {:error, :invalid_acl_match}
|
||||||
|
|
||||||
|
defp normalize_match(match) when is_map(match) do
|
||||||
|
Enum.reduce(match, %{}, fn
|
||||||
|
{key, values}, acc when is_binary(key) and is_list(values) ->
|
||||||
|
Map.put(acc, key, Enum.uniq(values))
|
||||||
|
|
||||||
|
{key, value}, acc when is_binary(key) ->
|
||||||
|
Map.put(acc, key, value)
|
||||||
|
|
||||||
|
_entry, acc ->
|
||||||
|
acc
|
||||||
|
end)
|
||||||
|
end
|
||||||
|
|
||||||
|
defp normalize_match(_match), do: %{}
|
||||||
|
|
||||||
|
defp fetch(map, key) do
|
||||||
|
Map.get(map, key) || Map.get(map, Atom.to_string(key))
|
||||||
|
end
|
||||||
|
|
||||||
|
defp decode_hex_or_binary(value, expected_bytes, _reason)
|
||||||
|
when is_binary(value) and byte_size(value) == expected_bytes,
|
||||||
|
do: {:ok, value}
|
||||||
|
|
||||||
|
defp decode_hex_or_binary(value, expected_bytes, reason) when is_binary(value) do
|
||||||
|
if byte_size(value) == expected_bytes * 2 do
|
||||||
|
case Base.decode16(value, case: :mixed) do
|
||||||
|
{:ok, decoded} -> {:ok, decoded}
|
||||||
|
:error -> {:error, reason}
|
||||||
|
end
|
||||||
|
else
|
||||||
|
{:error, reason}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp decode_hex_or_binary(_value, _expected_bytes, reason), do: {:error, reason}
|
||||||
|
end
|
||||||
@@ -5,6 +5,7 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Admin do
|
|||||||
|
|
||||||
import Ecto.Query
|
import Ecto.Query
|
||||||
|
|
||||||
|
alias Parrhesia.PostgresRepos
|
||||||
alias Parrhesia.Repo
|
alias Parrhesia.Repo
|
||||||
|
|
||||||
@behaviour Parrhesia.Storage.Admin
|
@behaviour Parrhesia.Storage.Admin
|
||||||
@@ -20,6 +21,7 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Admin do
|
|||||||
case method_name do
|
case method_name do
|
||||||
"ping" -> {:ok, %{"status" => "ok"}}
|
"ping" -> {:ok, %{"status" => "ok"}}
|
||||||
"stats" -> {:ok, relay_stats()}
|
"stats" -> {:ok, relay_stats()}
|
||||||
|
"supportedmethods" -> {:ok, %{"methods" => supported_methods()}}
|
||||||
"list_audit_logs" -> list_audit_logs(%{}, audit_list_opts(params))
|
"list_audit_logs" -> list_audit_logs(%{}, audit_list_opts(params))
|
||||||
_other -> execute_moderation_method(moderation, method_name, params)
|
_other -> execute_moderation_method(moderation, method_name, params)
|
||||||
end
|
end
|
||||||
@@ -72,8 +74,8 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Admin do
|
|||||||
|> maybe_filter_actor_pubkey(Keyword.get(opts, :actor_pubkey))
|
|> maybe_filter_actor_pubkey(Keyword.get(opts, :actor_pubkey))
|
||||||
|
|
||||||
logs =
|
logs =
|
||||||
query
|
read_repo()
|
||||||
|> Repo.all()
|
|> then(fn repo -> repo.all(query) end)
|
||||||
|> Enum.map(&to_audit_log_map/1)
|
|> Enum.map(&to_audit_log_map/1)
|
||||||
|
|
||||||
{:ok, logs}
|
{:ok, logs}
|
||||||
@@ -82,17 +84,39 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Admin do
|
|||||||
def list_audit_logs(_context, _opts), do: {:error, :invalid_opts}
|
def list_audit_logs(_context, _opts), do: {:error, :invalid_opts}
|
||||||
|
|
||||||
defp relay_stats do
|
defp relay_stats do
|
||||||
events_count = Repo.aggregate("events", :count, :id)
|
repo = read_repo()
|
||||||
banned_pubkeys = Repo.aggregate("banned_pubkeys", :count, :pubkey)
|
events_count = repo.aggregate("events", :count, :id)
|
||||||
blocked_ips = Repo.aggregate("blocked_ips", :count, :ip)
|
banned_pubkeys = repo.aggregate("banned_pubkeys", :count, :pubkey)
|
||||||
|
allowed_pubkeys = repo.aggregate("allowed_pubkeys", :count, :pubkey)
|
||||||
|
blocked_ips = repo.aggregate("blocked_ips", :count, :ip)
|
||||||
|
acl_rules = repo.aggregate("acl_rules", :count, :id)
|
||||||
|
|
||||||
%{
|
%{
|
||||||
"events" => events_count,
|
"events" => events_count,
|
||||||
"banned_pubkeys" => banned_pubkeys,
|
"banned_pubkeys" => banned_pubkeys,
|
||||||
|
"allowed_pubkeys" => allowed_pubkeys,
|
||||||
|
"acl_rules" => acl_rules,
|
||||||
"blocked_ips" => blocked_ips
|
"blocked_ips" => blocked_ips
|
||||||
}
|
}
|
||||||
end
|
end
|
||||||
|
|
||||||
|
defp supported_methods do
|
||||||
|
[
|
||||||
|
"allow_pubkey",
|
||||||
|
"ban_event",
|
||||||
|
"ban_pubkey",
|
||||||
|
"block_ip",
|
||||||
|
"disallow_pubkey",
|
||||||
|
"list_audit_logs",
|
||||||
|
"ping",
|
||||||
|
"stats",
|
||||||
|
"supportedmethods",
|
||||||
|
"unban_event",
|
||||||
|
"unban_pubkey",
|
||||||
|
"unblock_ip"
|
||||||
|
]
|
||||||
|
end
|
||||||
|
|
||||||
defp execute_moderation_method(moderation, "ban_pubkey", params),
|
defp execute_moderation_method(moderation, "ban_pubkey", params),
|
||||||
do: execute_pubkey_method(fn ctx, value -> moderation.ban_pubkey(ctx, value) end, params)
|
do: execute_pubkey_method(fn ctx, value -> moderation.ban_pubkey(ctx, value) end, params)
|
||||||
|
|
||||||
@@ -212,6 +236,8 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Admin do
|
|||||||
|
|
||||||
defp normalize_pubkey(_value), do: {:error, :invalid_actor_pubkey}
|
defp normalize_pubkey(_value), do: {:error, :invalid_actor_pubkey}
|
||||||
|
|
||||||
|
defp read_repo, do: PostgresRepos.read()
|
||||||
|
|
||||||
defp invalid_key_reason(:params), do: :invalid_params
|
defp invalid_key_reason(:params), do: :invalid_params
|
||||||
defp invalid_key_reason(:result), do: :invalid_result
|
defp invalid_key_reason(:result), do: :invalid_result
|
||||||
|
|
||||||
|
|||||||
@@ -5,10 +5,16 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Events do
|
|||||||
|
|
||||||
import Ecto.Query
|
import Ecto.Query
|
||||||
|
|
||||||
|
alias Parrhesia.PostgresRepos
|
||||||
alias Parrhesia.Protocol.Filter
|
alias Parrhesia.Protocol.Filter
|
||||||
alias Parrhesia.Repo
|
alias Parrhesia.Repo
|
||||||
|
|
||||||
@behaviour Parrhesia.Storage.Events
|
@behaviour Parrhesia.Storage.Events
|
||||||
|
@trigram_fallback_max_single_term_length 4
|
||||||
|
@trigram_fallback_pattern ~r/[^\p{L}\p{N}\s"]/u
|
||||||
|
@fts_match_fragment "to_tsvector('simple', ?) @@ websearch_to_tsquery('simple', ?)"
|
||||||
|
@fts_rank_fragment "ts_rank_cd(to_tsvector('simple', ?), websearch_to_tsquery('simple', ?))"
|
||||||
|
@trigram_rank_fragment "word_similarity(lower(?), lower(?))"
|
||||||
|
|
||||||
@type normalized_event :: %{
|
@type normalized_event :: %{
|
||||||
id: binary(),
|
id: binary(),
|
||||||
@@ -62,7 +68,9 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Events do
|
|||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
case Repo.one(event_query) do
|
repo = read_repo()
|
||||||
|
|
||||||
|
case repo.one(event_query) do
|
||||||
nil ->
|
nil ->
|
||||||
{:ok, nil}
|
{:ok, nil}
|
||||||
|
|
||||||
@@ -76,16 +84,17 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Events do
|
|||||||
def query(_context, filters, opts) when is_list(opts) do
|
def query(_context, filters, opts) when is_list(opts) do
|
||||||
with :ok <- Filter.validate_filters(filters) do
|
with :ok <- Filter.validate_filters(filters) do
|
||||||
now = Keyword.get(opts, :now, System.system_time(:second))
|
now = Keyword.get(opts, :now, System.system_time(:second))
|
||||||
|
repo = read_repo()
|
||||||
|
|
||||||
persisted_events =
|
persisted_events =
|
||||||
filters
|
filters
|
||||||
|> Enum.flat_map(fn filter ->
|
|> Enum.flat_map(fn filter ->
|
||||||
filter
|
filter
|
||||||
|> event_query_for_filter(now, opts)
|
|> event_query_for_filter(now, opts)
|
||||||
|> Repo.all()
|
|> repo.all()
|
||||||
end)
|
end)
|
||||||
|> deduplicate_events()
|
|> deduplicate_events()
|
||||||
|> sort_persisted_events()
|
|> sort_persisted_events(filters)
|
||||||
|> maybe_apply_query_limit(opts)
|
|> maybe_apply_query_limit(opts)
|
||||||
|
|
||||||
{:ok, Enum.map(persisted_events, &to_nostr_event/1)}
|
{:ok, Enum.map(persisted_events, &to_nostr_event/1)}
|
||||||
@@ -94,21 +103,21 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Events do
|
|||||||
|
|
||||||
def query(_context, _filters, _opts), do: {:error, :invalid_opts}
|
def query(_context, _filters, _opts), do: {:error, :invalid_opts}
|
||||||
|
|
||||||
|
@impl true
|
||||||
|
def query_event_refs(_context, filters, opts) when is_list(opts) do
|
||||||
|
with :ok <- Filter.validate_filters(filters) do
|
||||||
|
now = Keyword.get(opts, :now, System.system_time(:second))
|
||||||
|
{:ok, fetch_event_refs(filters, now, opts)}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def query_event_refs(_context, _filters, _opts), do: {:error, :invalid_opts}
|
||||||
|
|
||||||
@impl true
|
@impl true
|
||||||
def count(_context, filters, opts) when is_list(opts) do
|
def count(_context, filters, opts) when is_list(opts) do
|
||||||
with :ok <- Filter.validate_filters(filters) do
|
with :ok <- Filter.validate_filters(filters) do
|
||||||
now = Keyword.get(opts, :now, System.system_time(:second))
|
now = Keyword.get(opts, :now, System.system_time(:second))
|
||||||
|
{:ok, count_events(filters, now, opts)}
|
||||||
total_count =
|
|
||||||
filters
|
|
||||||
|> event_id_union_query_for_filters(now, opts)
|
|
||||||
|> subquery()
|
|
||||||
|> then(fn union_query ->
|
|
||||||
from(event in union_query, select: count(event.id, :distinct))
|
|
||||||
end)
|
|
||||||
|> Repo.one()
|
|
||||||
|
|
||||||
{:ok, total_count}
|
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
@@ -360,30 +369,7 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Events do
|
|||||||
|
|
||||||
defp maybe_upsert_replaceable_state(normalized_event, now, deleted_at) do
|
defp maybe_upsert_replaceable_state(normalized_event, now, deleted_at) do
|
||||||
if replaceable_kind?(normalized_event.kind) do
|
if replaceable_kind?(normalized_event.kind) do
|
||||||
lookup_query =
|
upsert_replaceable_state_table(normalized_event, now, deleted_at)
|
||||||
from(state in "replaceable_event_state",
|
|
||||||
where:
|
|
||||||
state.pubkey == ^normalized_event.pubkey and state.kind == ^normalized_event.kind,
|
|
||||||
select: %{event_created_at: state.event_created_at, event_id: state.event_id}
|
|
||||||
)
|
|
||||||
|
|
||||||
update_query =
|
|
||||||
from(state in "replaceable_event_state",
|
|
||||||
where:
|
|
||||||
state.pubkey == ^normalized_event.pubkey and
|
|
||||||
state.kind == ^normalized_event.kind
|
|
||||||
)
|
|
||||||
|
|
||||||
upsert_state_table(
|
|
||||||
"replaceable_event_state",
|
|
||||||
lookup_query,
|
|
||||||
update_query,
|
|
||||||
replaceable_state_row(normalized_event, now),
|
|
||||||
normalized_event,
|
|
||||||
now,
|
|
||||||
deleted_at,
|
|
||||||
:replaceable_state_update_failed
|
|
||||||
)
|
|
||||||
else
|
else
|
||||||
:ok
|
:ok
|
||||||
end
|
end
|
||||||
@@ -391,159 +377,94 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Events do
|
|||||||
|
|
||||||
defp maybe_upsert_addressable_state(normalized_event, now, deleted_at) do
|
defp maybe_upsert_addressable_state(normalized_event, now, deleted_at) do
|
||||||
if addressable_kind?(normalized_event.kind) do
|
if addressable_kind?(normalized_event.kind) do
|
||||||
lookup_query =
|
upsert_addressable_state_table(normalized_event, now, deleted_at)
|
||||||
from(state in "addressable_event_state",
|
|
||||||
where:
|
|
||||||
state.pubkey == ^normalized_event.pubkey and
|
|
||||||
state.kind == ^normalized_event.kind and
|
|
||||||
state.d_tag == ^normalized_event.d_tag,
|
|
||||||
select: %{event_created_at: state.event_created_at, event_id: state.event_id}
|
|
||||||
)
|
|
||||||
|
|
||||||
update_query =
|
|
||||||
from(state in "addressable_event_state",
|
|
||||||
where:
|
|
||||||
state.pubkey == ^normalized_event.pubkey and
|
|
||||||
state.kind == ^normalized_event.kind and
|
|
||||||
state.d_tag == ^normalized_event.d_tag
|
|
||||||
)
|
|
||||||
|
|
||||||
upsert_state_table(
|
|
||||||
"addressable_event_state",
|
|
||||||
lookup_query,
|
|
||||||
update_query,
|
|
||||||
addressable_state_row(normalized_event, now),
|
|
||||||
normalized_event,
|
|
||||||
now,
|
|
||||||
deleted_at,
|
|
||||||
:addressable_state_update_failed
|
|
||||||
)
|
|
||||||
else
|
else
|
||||||
:ok
|
:ok
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
defp upsert_state_table(
|
defp upsert_replaceable_state_table(normalized_event, now, deleted_at) do
|
||||||
table_name,
|
params = [
|
||||||
lookup_query,
|
normalized_event.pubkey,
|
||||||
update_query,
|
normalized_event.kind,
|
||||||
insert_row,
|
normalized_event.created_at,
|
||||||
normalized_event,
|
normalized_event.id,
|
||||||
now,
|
now,
|
||||||
deleted_at,
|
now
|
||||||
failure_reason
|
]
|
||||||
) do
|
|
||||||
case Repo.one(lookup_query) do
|
|
||||||
nil ->
|
|
||||||
insert_state_or_resolve_race(
|
|
||||||
table_name,
|
|
||||||
lookup_query,
|
|
||||||
update_query,
|
|
||||||
insert_row,
|
|
||||||
normalized_event,
|
|
||||||
now,
|
|
||||||
deleted_at,
|
|
||||||
failure_reason
|
|
||||||
)
|
|
||||||
|
|
||||||
current_state ->
|
case Repo.query(replaceable_state_upsert_sql(), params) do
|
||||||
maybe_update_state(
|
{:ok, %{rows: [row]}} ->
|
||||||
update_query,
|
finalize_state_upsert(row, normalized_event, deleted_at, :replaceable_state_update_failed)
|
||||||
normalized_event,
|
|
||||||
current_state,
|
{:ok, _result} ->
|
||||||
now,
|
Repo.rollback(:replaceable_state_update_failed)
|
||||||
deleted_at,
|
|
||||||
failure_reason
|
{:error, _reason} ->
|
||||||
)
|
Repo.rollback(:replaceable_state_update_failed)
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
defp insert_state_or_resolve_race(
|
defp upsert_addressable_state_table(normalized_event, now, deleted_at) do
|
||||||
table_name,
|
params = [
|
||||||
lookup_query,
|
normalized_event.pubkey,
|
||||||
update_query,
|
normalized_event.kind,
|
||||||
insert_row,
|
normalized_event.d_tag,
|
||||||
|
normalized_event.created_at,
|
||||||
|
normalized_event.id,
|
||||||
|
now,
|
||||||
|
now
|
||||||
|
]
|
||||||
|
|
||||||
|
case Repo.query(addressable_state_upsert_sql(), params) do
|
||||||
|
{:ok, %{rows: [row]}} ->
|
||||||
|
finalize_state_upsert(row, normalized_event, deleted_at, :addressable_state_update_failed)
|
||||||
|
|
||||||
|
{:ok, _result} ->
|
||||||
|
Repo.rollback(:addressable_state_update_failed)
|
||||||
|
|
||||||
|
{:error, _reason} ->
|
||||||
|
Repo.rollback(:addressable_state_update_failed)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp finalize_state_upsert(
|
||||||
|
[retired_event_created_at, retired_event_id, winner_event_created_at, winner_event_id],
|
||||||
normalized_event,
|
normalized_event,
|
||||||
now,
|
|
||||||
deleted_at,
|
deleted_at,
|
||||||
failure_reason
|
failure_reason
|
||||||
) do
|
) do
|
||||||
case Repo.insert_all(table_name, [insert_row], on_conflict: :nothing) do
|
case {winner_event_created_at, winner_event_id} do
|
||||||
{1, _result} ->
|
{created_at, event_id}
|
||||||
:ok
|
when created_at == normalized_event.created_at and event_id == normalized_event.id ->
|
||||||
|
maybe_retire_previous_state_event(
|
||||||
{0, _result} ->
|
retired_event_created_at,
|
||||||
resolve_state_race(
|
retired_event_id,
|
||||||
lookup_query,
|
|
||||||
update_query,
|
|
||||||
normalized_event,
|
|
||||||
now,
|
|
||||||
deleted_at,
|
deleted_at,
|
||||||
failure_reason
|
failure_reason
|
||||||
)
|
)
|
||||||
|
|
||||||
{_inserted, _result} ->
|
{_created_at, _event_id} ->
|
||||||
Repo.rollback(failure_reason)
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
defp resolve_state_race(
|
|
||||||
lookup_query,
|
|
||||||
update_query,
|
|
||||||
normalized_event,
|
|
||||||
now,
|
|
||||||
deleted_at,
|
|
||||||
failure_reason
|
|
||||||
) do
|
|
||||||
case Repo.one(lookup_query) do
|
|
||||||
nil ->
|
|
||||||
Repo.rollback(failure_reason)
|
|
||||||
|
|
||||||
current_state ->
|
|
||||||
maybe_update_state(
|
|
||||||
update_query,
|
|
||||||
normalized_event,
|
|
||||||
current_state,
|
|
||||||
now,
|
|
||||||
deleted_at,
|
|
||||||
failure_reason
|
|
||||||
)
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
defp maybe_update_state(
|
|
||||||
update_query,
|
|
||||||
normalized_event,
|
|
||||||
current_state,
|
|
||||||
now,
|
|
||||||
deleted_at,
|
|
||||||
failure_reason
|
|
||||||
) do
|
|
||||||
if candidate_wins_state?(normalized_event, current_state) do
|
|
||||||
{updated, _result} =
|
|
||||||
Repo.update_all(update_query,
|
|
||||||
set: [
|
|
||||||
event_created_at: normalized_event.created_at,
|
|
||||||
event_id: normalized_event.id,
|
|
||||||
updated_at: now
|
|
||||||
]
|
|
||||||
)
|
|
||||||
|
|
||||||
if updated == 1 do
|
|
||||||
retire_event!(
|
retire_event!(
|
||||||
current_state.event_created_at,
|
normalized_event.created_at,
|
||||||
current_state.event_id,
|
normalized_event.id,
|
||||||
deleted_at,
|
deleted_at,
|
||||||
failure_reason
|
failure_reason
|
||||||
)
|
)
|
||||||
else
|
|
||||||
Repo.rollback(failure_reason)
|
|
||||||
end
|
|
||||||
else
|
|
||||||
retire_event!(normalized_event.created_at, normalized_event.id, deleted_at, failure_reason)
|
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
|
defp maybe_retire_previous_state_event(nil, nil, _deleted_at, _failure_reason), do: :ok
|
||||||
|
|
||||||
|
defp maybe_retire_previous_state_event(
|
||||||
|
retired_event_created_at,
|
||||||
|
retired_event_id,
|
||||||
|
deleted_at,
|
||||||
|
failure_reason
|
||||||
|
) do
|
||||||
|
retire_event!(retired_event_created_at, retired_event_id, deleted_at, failure_reason)
|
||||||
|
end
|
||||||
|
|
||||||
defp retire_event!(event_created_at, event_id, deleted_at, failure_reason) do
|
defp retire_event!(event_created_at, event_id, deleted_at, failure_reason) do
|
||||||
{updated, _result} =
|
{updated, _result} =
|
||||||
Repo.update_all(
|
Repo.update_all(
|
||||||
@@ -567,27 +488,147 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Events do
|
|||||||
|
|
||||||
defp addressable_kind?(kind), do: kind >= 30_000 and kind < 40_000
|
defp addressable_kind?(kind), do: kind >= 30_000 and kind < 40_000
|
||||||
|
|
||||||
defp replaceable_state_row(normalized_event, now) do
|
defp replaceable_state_upsert_sql do
|
||||||
%{
|
"""
|
||||||
pubkey: normalized_event.pubkey,
|
WITH inserted AS (
|
||||||
kind: normalized_event.kind,
|
INSERT INTO replaceable_event_state (
|
||||||
event_created_at: normalized_event.created_at,
|
pubkey,
|
||||||
event_id: normalized_event.id,
|
kind,
|
||||||
inserted_at: now,
|
event_created_at,
|
||||||
updated_at: now
|
event_id,
|
||||||
}
|
inserted_at,
|
||||||
|
updated_at
|
||||||
|
)
|
||||||
|
VALUES ($1, $2, $3, $4, $5, $6)
|
||||||
|
ON CONFLICT (pubkey, kind) DO NOTHING
|
||||||
|
RETURNING
|
||||||
|
NULL::bigint AS retired_event_created_at,
|
||||||
|
NULL::bytea AS retired_event_id,
|
||||||
|
event_created_at AS winner_event_created_at,
|
||||||
|
event_id AS winner_event_id
|
||||||
|
),
|
||||||
|
updated AS (
|
||||||
|
UPDATE replaceable_event_state AS state
|
||||||
|
SET
|
||||||
|
event_created_at = $3,
|
||||||
|
event_id = $4,
|
||||||
|
updated_at = $6
|
||||||
|
FROM (
|
||||||
|
SELECT current.event_created_at, current.event_id
|
||||||
|
FROM replaceable_event_state AS current
|
||||||
|
WHERE current.pubkey = $1 AND current.kind = $2
|
||||||
|
FOR UPDATE
|
||||||
|
) AS previous
|
||||||
|
WHERE
|
||||||
|
NOT EXISTS (SELECT 1 FROM inserted)
|
||||||
|
AND state.pubkey = $1
|
||||||
|
AND state.kind = $2
|
||||||
|
AND (
|
||||||
|
state.event_created_at < $3
|
||||||
|
OR (state.event_created_at = $3 AND state.event_id > $4)
|
||||||
|
)
|
||||||
|
RETURNING
|
||||||
|
previous.event_created_at AS retired_event_created_at,
|
||||||
|
previous.event_id AS retired_event_id,
|
||||||
|
state.event_created_at AS winner_event_created_at,
|
||||||
|
state.event_id AS winner_event_id
|
||||||
|
),
|
||||||
|
current AS (
|
||||||
|
SELECT
|
||||||
|
NULL::bigint AS retired_event_created_at,
|
||||||
|
NULL::bytea AS retired_event_id,
|
||||||
|
state.event_created_at AS winner_event_created_at,
|
||||||
|
state.event_id AS winner_event_id
|
||||||
|
FROM replaceable_event_state AS state
|
||||||
|
WHERE
|
||||||
|
NOT EXISTS (SELECT 1 FROM inserted)
|
||||||
|
AND NOT EXISTS (SELECT 1 FROM updated)
|
||||||
|
AND state.pubkey = $1
|
||||||
|
AND state.kind = $2
|
||||||
|
)
|
||||||
|
SELECT *
|
||||||
|
FROM inserted
|
||||||
|
UNION ALL
|
||||||
|
SELECT *
|
||||||
|
FROM updated
|
||||||
|
UNION ALL
|
||||||
|
SELECT *
|
||||||
|
FROM current
|
||||||
|
LIMIT 1
|
||||||
|
"""
|
||||||
end
|
end
|
||||||
|
|
||||||
defp addressable_state_row(normalized_event, now) do
|
defp addressable_state_upsert_sql do
|
||||||
%{
|
"""
|
||||||
pubkey: normalized_event.pubkey,
|
WITH inserted AS (
|
||||||
kind: normalized_event.kind,
|
INSERT INTO addressable_event_state (
|
||||||
d_tag: normalized_event.d_tag,
|
pubkey,
|
||||||
event_created_at: normalized_event.created_at,
|
kind,
|
||||||
event_id: normalized_event.id,
|
d_tag,
|
||||||
inserted_at: now,
|
event_created_at,
|
||||||
updated_at: now
|
event_id,
|
||||||
}
|
inserted_at,
|
||||||
|
updated_at
|
||||||
|
)
|
||||||
|
VALUES ($1, $2, $3, $4, $5, $6, $7)
|
||||||
|
ON CONFLICT (pubkey, kind, d_tag) DO NOTHING
|
||||||
|
RETURNING
|
||||||
|
NULL::bigint AS retired_event_created_at,
|
||||||
|
NULL::bytea AS retired_event_id,
|
||||||
|
event_created_at AS winner_event_created_at,
|
||||||
|
event_id AS winner_event_id
|
||||||
|
),
|
||||||
|
updated AS (
|
||||||
|
UPDATE addressable_event_state AS state
|
||||||
|
SET
|
||||||
|
event_created_at = $4,
|
||||||
|
event_id = $5,
|
||||||
|
updated_at = $7
|
||||||
|
FROM (
|
||||||
|
SELECT current.event_created_at, current.event_id
|
||||||
|
FROM addressable_event_state AS current
|
||||||
|
WHERE current.pubkey = $1 AND current.kind = $2 AND current.d_tag = $3
|
||||||
|
FOR UPDATE
|
||||||
|
) AS previous
|
||||||
|
WHERE
|
||||||
|
NOT EXISTS (SELECT 1 FROM inserted)
|
||||||
|
AND state.pubkey = $1
|
||||||
|
AND state.kind = $2
|
||||||
|
AND state.d_tag = $3
|
||||||
|
AND (
|
||||||
|
state.event_created_at < $4
|
||||||
|
OR (state.event_created_at = $4 AND state.event_id > $5)
|
||||||
|
)
|
||||||
|
RETURNING
|
||||||
|
previous.event_created_at AS retired_event_created_at,
|
||||||
|
previous.event_id AS retired_event_id,
|
||||||
|
state.event_created_at AS winner_event_created_at,
|
||||||
|
state.event_id AS winner_event_id
|
||||||
|
),
|
||||||
|
current AS (
|
||||||
|
SELECT
|
||||||
|
NULL::bigint AS retired_event_created_at,
|
||||||
|
NULL::bytea AS retired_event_id,
|
||||||
|
state.event_created_at AS winner_event_created_at,
|
||||||
|
state.event_id AS winner_event_id
|
||||||
|
FROM addressable_event_state AS state
|
||||||
|
WHERE
|
||||||
|
NOT EXISTS (SELECT 1 FROM inserted)
|
||||||
|
AND NOT EXISTS (SELECT 1 FROM updated)
|
||||||
|
AND state.pubkey = $1
|
||||||
|
AND state.kind = $2
|
||||||
|
AND state.d_tag = $3
|
||||||
|
)
|
||||||
|
SELECT *
|
||||||
|
FROM inserted
|
||||||
|
UNION ALL
|
||||||
|
SELECT *
|
||||||
|
FROM updated
|
||||||
|
UNION ALL
|
||||||
|
SELECT *
|
||||||
|
FROM current
|
||||||
|
LIMIT 1
|
||||||
|
"""
|
||||||
end
|
end
|
||||||
|
|
||||||
defp event_row(normalized_event, now) do
|
defp event_row(normalized_event, now) do
|
||||||
@@ -607,95 +648,219 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Events do
|
|||||||
end
|
end
|
||||||
|
|
||||||
defp event_query_for_filter(filter, now, opts) do
|
defp event_query_for_filter(filter, now, opts) do
|
||||||
base_query =
|
search_plan = search_plan(Map.get(filter, "search"))
|
||||||
from(event in "events",
|
{base_query, remaining_tag_filters} = event_source_query(filter, now)
|
||||||
where: is_nil(event.deleted_at) and (is_nil(event.expires_at) or event.expires_at > ^now),
|
|
||||||
order_by: [desc: event.created_at, asc: event.id],
|
|
||||||
select: %{
|
|
||||||
id: event.id,
|
|
||||||
pubkey: event.pubkey,
|
|
||||||
created_at: event.created_at,
|
|
||||||
kind: event.kind,
|
|
||||||
tags: event.tags,
|
|
||||||
content: event.content,
|
|
||||||
sig: event.sig
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
query =
|
base_query
|
||||||
base_query
|
|> apply_common_event_filters(filter, remaining_tag_filters, opts, search_plan)
|
||||||
|> maybe_filter_ids(Map.get(filter, "ids"))
|
|> maybe_order_by_search_rank(search_plan)
|
||||||
|> maybe_filter_authors(Map.get(filter, "authors"))
|
|> select([event: event], %{
|
||||||
|> maybe_filter_kinds(Map.get(filter, "kinds"))
|
id: event.id,
|
||||||
|> maybe_filter_since(Map.get(filter, "since"))
|
pubkey: event.pubkey,
|
||||||
|> maybe_filter_until(Map.get(filter, "until"))
|
created_at: event.created_at,
|
||||||
|> maybe_filter_search(Map.get(filter, "search"))
|
kind: event.kind,
|
||||||
|> filter_by_tags(filter)
|
tags: event.tags,
|
||||||
|> maybe_restrict_giftwrap_access(filter, opts)
|
content: event.content,
|
||||||
|
sig: event.sig
|
||||||
maybe_limit_query(query, effective_filter_limit(filter, opts))
|
})
|
||||||
|
|> maybe_select_search_score(search_plan)
|
||||||
|
|> maybe_limit_query(effective_filter_limit(filter, opts))
|
||||||
end
|
end
|
||||||
|
|
||||||
defp event_id_query_for_filter(filter, now, opts) do
|
defp event_id_query_for_filter(filter, now, opts) do
|
||||||
from(event in "events",
|
search_plan = search_plan(Map.get(filter, "search"))
|
||||||
where: is_nil(event.deleted_at) and (is_nil(event.expires_at) or event.expires_at > ^now),
|
{base_query, remaining_tag_filters} = event_source_query(filter, now)
|
||||||
select: event.id
|
|
||||||
)
|
base_query
|
||||||
|> maybe_filter_ids(Map.get(filter, "ids"))
|
|> apply_common_event_filters(filter, remaining_tag_filters, opts, search_plan)
|
||||||
|> maybe_filter_authors(Map.get(filter, "authors"))
|
|> select([event: event], event.id)
|
||||||
|> maybe_filter_kinds(Map.get(filter, "kinds"))
|
|
||||||
|> maybe_filter_since(Map.get(filter, "since"))
|
|
||||||
|> maybe_filter_until(Map.get(filter, "until"))
|
|
||||||
|> maybe_filter_search(Map.get(filter, "search"))
|
|
||||||
|> filter_by_tags(filter)
|
|
||||||
|> maybe_restrict_giftwrap_access(filter, opts)
|
|
||||||
end
|
end
|
||||||
|
|
||||||
defp event_id_union_query_for_filters([], now, _opts) do
|
defp event_id_distinct_union_query_for_filters([], now, _opts) do
|
||||||
from(event in "events",
|
from(event in "events",
|
||||||
where: event.created_at > ^now and event.created_at < ^now,
|
where: event.created_at > ^now and event.created_at < ^now,
|
||||||
select: event.id
|
select: event.id
|
||||||
)
|
)
|
||||||
end
|
end
|
||||||
|
|
||||||
defp event_id_union_query_for_filters([first_filter | rest_filters], now, opts) do
|
defp event_id_distinct_union_query_for_filters([first_filter | rest_filters], now, opts) do
|
||||||
Enum.reduce(rest_filters, event_id_query_for_filter(first_filter, now, opts), fn filter,
|
Enum.reduce(rest_filters, event_id_query_for_filter(first_filter, now, opts), fn filter,
|
||||||
acc ->
|
acc ->
|
||||||
union_all(acc, ^event_id_query_for_filter(filter, now, opts))
|
union(acc, ^event_id_query_for_filter(filter, now, opts))
|
||||||
end)
|
end)
|
||||||
end
|
end
|
||||||
|
|
||||||
|
defp event_ref_query_for_filter(filter, now, opts) do
|
||||||
|
search_plan = search_plan(Map.get(filter, "search"))
|
||||||
|
{base_query, remaining_tag_filters} = event_source_query(filter, now)
|
||||||
|
|
||||||
|
base_query
|
||||||
|
|> apply_common_event_filters(filter, remaining_tag_filters, opts, search_plan)
|
||||||
|
|> order_by([event: event], asc: event.created_at, asc: event.id)
|
||||||
|
|> select([event: event], %{
|
||||||
|
created_at: event.created_at,
|
||||||
|
id: event.id
|
||||||
|
})
|
||||||
|
|> maybe_limit_query(effective_filter_limit(filter, opts))
|
||||||
|
end
|
||||||
|
|
||||||
|
defp event_ref_union_query_for_filters([], now, _opts) do
|
||||||
|
from(event in "events",
|
||||||
|
where: event.created_at > ^now and event.created_at < ^now,
|
||||||
|
select: %{created_at: event.created_at, id: event.id}
|
||||||
|
)
|
||||||
|
end
|
||||||
|
|
||||||
|
defp event_ref_union_query_for_filters([first_filter | rest_filters], now, opts) do
|
||||||
|
Enum.reduce(rest_filters, event_ref_query_for_filter(first_filter, now, opts), fn filter,
|
||||||
|
acc ->
|
||||||
|
union_all(acc, ^event_ref_query_for_filter(filter, now, opts))
|
||||||
|
end)
|
||||||
|
end
|
||||||
|
|
||||||
|
defp fetch_event_refs([filter], now, opts) do
|
||||||
|
query =
|
||||||
|
filter
|
||||||
|
|> event_ref_query_for_filter(now, opts)
|
||||||
|
|> maybe_limit_query(Keyword.get(opts, :limit))
|
||||||
|
|
||||||
|
read_repo()
|
||||||
|
|> then(fn repo -> repo.all(query) end)
|
||||||
|
end
|
||||||
|
|
||||||
|
defp fetch_event_refs(filters, now, opts) do
|
||||||
|
query =
|
||||||
|
filters
|
||||||
|
|> event_ref_union_query_for_filters(now, opts)
|
||||||
|
|> subquery()
|
||||||
|
|> then(fn union_query ->
|
||||||
|
from(ref in union_query,
|
||||||
|
group_by: [ref.created_at, ref.id],
|
||||||
|
order_by: [asc: ref.created_at, asc: ref.id],
|
||||||
|
select: %{created_at: ref.created_at, id: ref.id}
|
||||||
|
)
|
||||||
|
end)
|
||||||
|
|> maybe_limit_query(Keyword.get(opts, :limit))
|
||||||
|
|
||||||
|
read_repo()
|
||||||
|
|> then(fn repo -> repo.all(query) end)
|
||||||
|
end
|
||||||
|
|
||||||
|
defp count_events([filter], now, opts) do
|
||||||
|
query =
|
||||||
|
filter
|
||||||
|
|> event_id_query_for_filter(now, opts)
|
||||||
|
|> subquery()
|
||||||
|
|> then(fn query ->
|
||||||
|
from(event in query, select: count())
|
||||||
|
end)
|
||||||
|
|
||||||
|
read_repo()
|
||||||
|
|> then(fn repo -> repo.one(query) end)
|
||||||
|
end
|
||||||
|
|
||||||
|
defp count_events(filters, now, opts) do
|
||||||
|
query =
|
||||||
|
filters
|
||||||
|
|> event_id_distinct_union_query_for_filters(now, opts)
|
||||||
|
|> subquery()
|
||||||
|
|> then(fn union_query ->
|
||||||
|
from(event in union_query, select: count())
|
||||||
|
end)
|
||||||
|
|
||||||
|
read_repo()
|
||||||
|
|> then(fn repo -> repo.one(query) end)
|
||||||
|
end
|
||||||
|
|
||||||
|
defp event_source_query(filter, now) do
|
||||||
|
tag_filters = tag_filters(filter)
|
||||||
|
|
||||||
|
case primary_tag_filter(tag_filters) do
|
||||||
|
nil ->
|
||||||
|
{from(event in "events",
|
||||||
|
as: :event,
|
||||||
|
where:
|
||||||
|
is_nil(event.deleted_at) and
|
||||||
|
(is_nil(event.expires_at) or event.expires_at > ^now)
|
||||||
|
), []}
|
||||||
|
|
||||||
|
{tag_name, values} = primary_tag_filter ->
|
||||||
|
remaining_tag_filters = List.delete(tag_filters, primary_tag_filter)
|
||||||
|
|
||||||
|
{from(tag in "event_tags",
|
||||||
|
as: :primary_tag,
|
||||||
|
where: tag.name == ^tag_name and tag.value in ^values,
|
||||||
|
join: event in "events",
|
||||||
|
as: :event,
|
||||||
|
on: event.created_at == tag.event_created_at and event.id == tag.event_id,
|
||||||
|
where:
|
||||||
|
is_nil(event.deleted_at) and
|
||||||
|
(is_nil(event.expires_at) or event.expires_at > ^now),
|
||||||
|
distinct: [event.created_at, event.id]
|
||||||
|
), remaining_tag_filters}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp apply_common_event_filters(query, filter, remaining_tag_filters, opts, search_plan) do
|
||||||
|
query
|
||||||
|
|> maybe_filter_ids(Map.get(filter, "ids"))
|
||||||
|
|> maybe_filter_authors(Map.get(filter, "authors"))
|
||||||
|
|> maybe_filter_kinds(Map.get(filter, "kinds"))
|
||||||
|
|> maybe_filter_since(Map.get(filter, "since"))
|
||||||
|
|> maybe_filter_until(Map.get(filter, "until"))
|
||||||
|
|> maybe_filter_search(search_plan)
|
||||||
|
|> filter_by_tag_filters(remaining_tag_filters)
|
||||||
|
|> maybe_restrict_giftwrap_access(filter, opts)
|
||||||
|
end
|
||||||
|
|
||||||
|
defp primary_tag_filter([]), do: nil
|
||||||
|
|
||||||
|
defp primary_tag_filter(tag_filters) do
|
||||||
|
Enum.find(tag_filters, fn {tag_name, _values} -> tag_name in ["h", "i"] end) ||
|
||||||
|
List.first(tag_filters)
|
||||||
|
end
|
||||||
|
|
||||||
defp maybe_filter_ids(query, nil), do: query
|
defp maybe_filter_ids(query, nil), do: query
|
||||||
|
|
||||||
defp maybe_filter_ids(query, ids) do
|
defp maybe_filter_ids(query, ids) do
|
||||||
decoded_ids = decode_hex_list(ids, :lower)
|
decoded_ids = decode_hex_list(ids, :lower)
|
||||||
where(query, [event], event.id in ^decoded_ids)
|
where(query, [event: event], event.id in ^decoded_ids)
|
||||||
end
|
end
|
||||||
|
|
||||||
defp maybe_filter_authors(query, nil), do: query
|
defp maybe_filter_authors(query, nil), do: query
|
||||||
|
|
||||||
defp maybe_filter_authors(query, authors) do
|
defp maybe_filter_authors(query, authors) do
|
||||||
decoded_authors = decode_hex_list(authors, :lower)
|
decoded_authors = decode_hex_list(authors, :lower)
|
||||||
where(query, [event], event.pubkey in ^decoded_authors)
|
where(query, [event: event], event.pubkey in ^decoded_authors)
|
||||||
end
|
end
|
||||||
|
|
||||||
defp maybe_filter_kinds(query, nil), do: query
|
defp maybe_filter_kinds(query, nil), do: query
|
||||||
defp maybe_filter_kinds(query, kinds), do: where(query, [event], event.kind in ^kinds)
|
defp maybe_filter_kinds(query, kinds), do: where(query, [event: event], event.kind in ^kinds)
|
||||||
|
|
||||||
defp maybe_filter_since(query, nil), do: query
|
defp maybe_filter_since(query, nil), do: query
|
||||||
defp maybe_filter_since(query, since), do: where(query, [event], event.created_at >= ^since)
|
|
||||||
|
defp maybe_filter_since(query, since),
|
||||||
|
do: where(query, [event: event], event.created_at >= ^since)
|
||||||
|
|
||||||
defp maybe_filter_until(query, nil), do: query
|
defp maybe_filter_until(query, nil), do: query
|
||||||
defp maybe_filter_until(query, until), do: where(query, [event], event.created_at <= ^until)
|
|
||||||
|
defp maybe_filter_until(query, until),
|
||||||
|
do: where(query, [event: event], event.created_at <= ^until)
|
||||||
|
|
||||||
defp maybe_filter_search(query, nil), do: query
|
defp maybe_filter_search(query, nil), do: query
|
||||||
|
|
||||||
defp maybe_filter_search(query, search) when is_binary(search) and search != "" do
|
defp maybe_filter_search(query, %{mode: :fts, query: search}) do
|
||||||
escaped_search = escape_like_pattern(search)
|
where(
|
||||||
where(query, [event], ilike(event.content, ^"%#{escaped_search}%"))
|
query,
|
||||||
|
[event: event],
|
||||||
|
fragment(@fts_match_fragment, event.content, ^search)
|
||||||
|
)
|
||||||
end
|
end
|
||||||
|
|
||||||
defp maybe_filter_search(query, _search), do: query
|
defp maybe_filter_search(query, %{mode: :trigram, query: search}) do
|
||||||
|
escaped_search = escape_like_pattern(search)
|
||||||
|
where(query, [event: event], ilike(event.content, ^"%#{escaped_search}%"))
|
||||||
|
end
|
||||||
|
|
||||||
defp escape_like_pattern(search) do
|
defp escape_like_pattern(search) do
|
||||||
search
|
search
|
||||||
@@ -704,13 +869,11 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Events do
|
|||||||
|> String.replace("_", "\\_")
|
|> String.replace("_", "\\_")
|
||||||
end
|
end
|
||||||
|
|
||||||
defp filter_by_tags(query, filter) do
|
defp filter_by_tag_filters(query, tag_filters) do
|
||||||
filter
|
Enum.reduce(tag_filters, query, fn {tag_name, values}, acc ->
|
||||||
|> tag_filters()
|
|
||||||
|> Enum.reduce(query, fn {tag_name, values}, acc ->
|
|
||||||
where(
|
where(
|
||||||
acc,
|
acc,
|
||||||
[event],
|
[event: event],
|
||||||
fragment(
|
fragment(
|
||||||
"EXISTS (SELECT 1 FROM event_tags AS tag WHERE tag.event_created_at = ? AND tag.event_id = ? AND tag.name = ? AND tag.value = ANY(?))",
|
"EXISTS (SELECT 1 FROM event_tags AS tag WHERE tag.event_created_at = ? AND tag.event_id = ? AND tag.name = ? AND tag.value = ANY(?))",
|
||||||
event.created_at,
|
event.created_at,
|
||||||
@@ -740,7 +903,7 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Events do
|
|||||||
targets_giftwrap?(filter) and requester_pubkeys != [] ->
|
targets_giftwrap?(filter) and requester_pubkeys != [] ->
|
||||||
where(
|
where(
|
||||||
query,
|
query,
|
||||||
[event],
|
[event: event],
|
||||||
fragment(
|
fragment(
|
||||||
"EXISTS (SELECT 1 FROM event_tags AS tag WHERE tag.event_created_at = ? AND tag.event_id = ? AND tag.name = 'p' AND tag.value = ANY(?))",
|
"EXISTS (SELECT 1 FROM event_tags AS tag WHERE tag.event_created_at = ? AND tag.event_id = ? AND tag.name = 'p' AND tag.value = ANY(?))",
|
||||||
event.created_at,
|
event.created_at,
|
||||||
@@ -750,7 +913,7 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Events do
|
|||||||
)
|
)
|
||||||
|
|
||||||
targets_giftwrap?(filter) ->
|
targets_giftwrap?(filter) ->
|
||||||
where(query, [_event], false)
|
where(query, [event: _event], false)
|
||||||
|
|
||||||
true ->
|
true ->
|
||||||
query
|
query
|
||||||
@@ -786,20 +949,90 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Events do
|
|||||||
defp maybe_limit_query(query, nil), do: query
|
defp maybe_limit_query(query, nil), do: query
|
||||||
defp maybe_limit_query(query, limit), do: limit(query, ^limit)
|
defp maybe_limit_query(query, limit), do: limit(query, ^limit)
|
||||||
|
|
||||||
|
defp maybe_order_by_search_rank(query, nil) do
|
||||||
|
order_by(query, [event: event], desc: event.created_at, asc: event.id)
|
||||||
|
end
|
||||||
|
|
||||||
|
defp maybe_order_by_search_rank(query, %{mode: :fts, query: search}) do
|
||||||
|
order_by(
|
||||||
|
query,
|
||||||
|
[event: event],
|
||||||
|
desc: fragment(@fts_rank_fragment, event.content, ^search),
|
||||||
|
desc: event.created_at,
|
||||||
|
asc: event.id
|
||||||
|
)
|
||||||
|
end
|
||||||
|
|
||||||
|
defp maybe_order_by_search_rank(query, %{mode: :trigram, query: search}) do
|
||||||
|
order_by(
|
||||||
|
query,
|
||||||
|
[event: event],
|
||||||
|
desc: fragment(@trigram_rank_fragment, ^search, event.content),
|
||||||
|
desc: event.created_at,
|
||||||
|
asc: event.id
|
||||||
|
)
|
||||||
|
end
|
||||||
|
|
||||||
|
defp maybe_select_search_score(query, nil), do: query
|
||||||
|
|
||||||
|
defp maybe_select_search_score(query, %{mode: :fts, query: search}) do
|
||||||
|
select_merge(
|
||||||
|
query,
|
||||||
|
[event: event],
|
||||||
|
%{search_score: fragment(@fts_rank_fragment, event.content, ^search)}
|
||||||
|
)
|
||||||
|
end
|
||||||
|
|
||||||
|
defp maybe_select_search_score(query, %{mode: :trigram, query: search}) do
|
||||||
|
select_merge(
|
||||||
|
query,
|
||||||
|
[event: event],
|
||||||
|
%{search_score: fragment(@trigram_rank_fragment, ^search, event.content)}
|
||||||
|
)
|
||||||
|
end
|
||||||
|
|
||||||
|
defp search_plan(nil), do: nil
|
||||||
|
|
||||||
|
defp search_plan(search) when is_binary(search) do
|
||||||
|
normalized_search = String.trim(search)
|
||||||
|
|
||||||
|
cond do
|
||||||
|
normalized_search == "" ->
|
||||||
|
nil
|
||||||
|
|
||||||
|
trigram_fallback_search?(normalized_search) ->
|
||||||
|
%{mode: :trigram, query: normalized_search}
|
||||||
|
|
||||||
|
true ->
|
||||||
|
%{mode: :fts, query: normalized_search}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp trigram_fallback_search?(search) do
|
||||||
|
String.match?(search, @trigram_fallback_pattern) or short_single_term_search?(search)
|
||||||
|
end
|
||||||
|
|
||||||
|
defp short_single_term_search?(search) do
|
||||||
|
case String.split(search, ~r/\s+/, trim: true) do
|
||||||
|
[term] -> String.length(term) <= @trigram_fallback_max_single_term_length
|
||||||
|
_other -> false
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
defp deduplicate_events(events) do
|
defp deduplicate_events(events) do
|
||||||
events
|
events
|
||||||
|> Enum.reduce(%{}, fn event, acc -> Map.put_new(acc, event.id, event) end)
|
|> Enum.reduce(%{}, fn event, acc ->
|
||||||
|
Map.update(acc, event.id, event, fn existing -> preferred_event(existing, event) end)
|
||||||
|
end)
|
||||||
|> Map.values()
|
|> Map.values()
|
||||||
end
|
end
|
||||||
|
|
||||||
defp sort_persisted_events(events) do
|
defp sort_persisted_events(events, filters) do
|
||||||
Enum.sort(events, fn left, right ->
|
if Enum.any?(filters, &search_filter?/1) do
|
||||||
cond do
|
Enum.sort(events, &search_result_sorter/2)
|
||||||
left.created_at > right.created_at -> true
|
else
|
||||||
left.created_at < right.created_at -> false
|
Enum.sort(events, &chronological_sorter/2)
|
||||||
true -> left.id < right.id
|
end
|
||||||
end
|
|
||||||
end)
|
|
||||||
end
|
end
|
||||||
|
|
||||||
defp maybe_apply_query_limit(events, opts) do
|
defp maybe_apply_query_limit(events, opts) do
|
||||||
@@ -821,6 +1054,50 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Events do
|
|||||||
}
|
}
|
||||||
end
|
end
|
||||||
|
|
||||||
|
defp preferred_event(existing, candidate) do
|
||||||
|
if search_result_sorter(candidate, existing) do
|
||||||
|
candidate
|
||||||
|
else
|
||||||
|
existing
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp search_filter?(filter) do
|
||||||
|
filter
|
||||||
|
|> Map.get("search")
|
||||||
|
|> search_plan()
|
||||||
|
|> Kernel.!=(nil)
|
||||||
|
end
|
||||||
|
|
||||||
|
defp search_result_sorter(left, right) do
|
||||||
|
left_score = search_score(left)
|
||||||
|
right_score = search_score(right)
|
||||||
|
|
||||||
|
cond do
|
||||||
|
left_score > right_score -> true
|
||||||
|
left_score < right_score -> false
|
||||||
|
true -> chronological_sorter(left, right)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp chronological_sorter(left, right) do
|
||||||
|
cond do
|
||||||
|
left.created_at > right.created_at -> true
|
||||||
|
left.created_at < right.created_at -> false
|
||||||
|
true -> left.id < right.id
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp search_score(event) do
|
||||||
|
event
|
||||||
|
|> Map.get(:search_score, 0.0)
|
||||||
|
|> case do
|
||||||
|
score when is_float(score) -> score
|
||||||
|
score when is_integer(score) -> score / 1
|
||||||
|
_other -> 0.0
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
defp normalize_persisted_tags(tags) when is_list(tags), do: tags
|
defp normalize_persisted_tags(tags) when is_list(tags), do: tags
|
||||||
defp normalize_persisted_tags(_tags), do: []
|
defp normalize_persisted_tags(_tags), do: []
|
||||||
|
|
||||||
@@ -966,4 +1243,6 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Events do
|
|||||||
end
|
end
|
||||||
|
|
||||||
defp maybe_apply_mls_group_retention(expires_at, _kind, _created_at), do: expires_at
|
defp maybe_apply_mls_group_retention(expires_at, _kind, _created_at), do: expires_at
|
||||||
|
|
||||||
|
defp read_repo, do: PostgresRepos.read()
|
||||||
end
|
end
|
||||||
|
|||||||
@@ -5,6 +5,7 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Groups do
|
|||||||
|
|
||||||
import Ecto.Query
|
import Ecto.Query
|
||||||
|
|
||||||
|
alias Parrhesia.PostgresRepos
|
||||||
alias Parrhesia.Repo
|
alias Parrhesia.Repo
|
||||||
|
|
||||||
@behaviour Parrhesia.Storage.Groups
|
@behaviour Parrhesia.Storage.Groups
|
||||||
@@ -46,7 +47,9 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Groups do
|
|||||||
limit: 1
|
limit: 1
|
||||||
)
|
)
|
||||||
|
|
||||||
case Repo.one(query) do
|
repo = read_repo()
|
||||||
|
|
||||||
|
case repo.one(query) do
|
||||||
nil ->
|
nil ->
|
||||||
{:ok, nil}
|
{:ok, nil}
|
||||||
|
|
||||||
@@ -94,8 +97,8 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Groups do
|
|||||||
)
|
)
|
||||||
|
|
||||||
memberships =
|
memberships =
|
||||||
query
|
read_repo()
|
||||||
|> Repo.all()
|
|> then(fn repo -> repo.all(query) end)
|
||||||
|> Enum.map(fn membership ->
|
|> Enum.map(fn membership ->
|
||||||
to_membership_map(
|
to_membership_map(
|
||||||
membership.group_id,
|
membership.group_id,
|
||||||
@@ -163,8 +166,8 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Groups do
|
|||||||
)
|
)
|
||||||
|
|
||||||
roles =
|
roles =
|
||||||
query
|
read_repo()
|
||||||
|> Repo.all()
|
|> then(fn repo -> repo.all(query) end)
|
||||||
|> Enum.map(fn role ->
|
|> Enum.map(fn role ->
|
||||||
to_role_map(role.group_id, role.pubkey, role.role, role.metadata)
|
to_role_map(role.group_id, role.pubkey, role.role, role.metadata)
|
||||||
end)
|
end)
|
||||||
@@ -242,6 +245,7 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Groups do
|
|||||||
|
|
||||||
defp unwrap_transaction_result({:ok, result}), do: {:ok, result}
|
defp unwrap_transaction_result({:ok, result}), do: {:ok, result}
|
||||||
defp unwrap_transaction_result({:error, reason}), do: {:error, reason}
|
defp unwrap_transaction_result({:error, reason}), do: {:error, reason}
|
||||||
|
defp read_repo, do: PostgresRepos.read()
|
||||||
|
|
||||||
defp fetch_required_string(map, key) do
|
defp fetch_required_string(map, key) do
|
||||||
map
|
map
|
||||||
|
|||||||
@@ -5,6 +5,7 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Moderation do
|
|||||||
|
|
||||||
import Ecto.Query
|
import Ecto.Query
|
||||||
|
|
||||||
|
alias Parrhesia.PostgresRepos
|
||||||
alias Parrhesia.Repo
|
alias Parrhesia.Repo
|
||||||
|
|
||||||
@behaviour Parrhesia.Storage.Moderation
|
@behaviour Parrhesia.Storage.Moderation
|
||||||
@@ -67,6 +68,11 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Moderation do
|
|||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
|
@impl true
|
||||||
|
def has_allowed_pubkeys?(_context) do
|
||||||
|
{:ok, scope_populated?(:allowed_pubkeys)}
|
||||||
|
end
|
||||||
|
|
||||||
@impl true
|
@impl true
|
||||||
def ban_event(_context, event_id) do
|
def ban_event(_context, event_id) do
|
||||||
with {:ok, normalized_event_id} <- normalize_hex_or_binary(event_id, 32, :invalid_event_id),
|
with {:ok, normalized_event_id} <- normalize_hex_or_binary(event_id, 32, :invalid_event_id),
|
||||||
@@ -163,6 +169,24 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Moderation do
|
|||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
|
defp scope_populated?(scope) do
|
||||||
|
{table, field} = cache_scope_source!(scope)
|
||||||
|
|
||||||
|
if moderation_cache_enabled?() do
|
||||||
|
case cache_table_ref() do
|
||||||
|
:undefined ->
|
||||||
|
scope_populated_db?(table, field)
|
||||||
|
|
||||||
|
cache_table ->
|
||||||
|
ensure_cache_scope_loaded(scope, cache_table)
|
||||||
|
|
||||||
|
:ets.select_count(cache_table, [{{{:member, scope, :_}, true}, [], [true]}]) > 0
|
||||||
|
end
|
||||||
|
else
|
||||||
|
scope_populated_db?(table, field)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
defp ensure_cache_scope_loaded(scope, table) do
|
defp ensure_cache_scope_loaded(scope, table) do
|
||||||
loaded_key = cache_loaded_key(scope)
|
loaded_key = cache_loaded_key(scope)
|
||||||
|
|
||||||
@@ -189,7 +213,8 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Moderation do
|
|||||||
select: field(record, ^field)
|
select: field(record, ^field)
|
||||||
)
|
)
|
||||||
|
|
||||||
Repo.all(query)
|
read_repo()
|
||||||
|
|> then(fn repo -> repo.all(query) end)
|
||||||
end
|
end
|
||||||
|
|
||||||
defp cache_put(scope, value) do
|
defp cache_put(scope, value) do
|
||||||
@@ -243,7 +268,22 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Moderation do
|
|||||||
limit: 1
|
limit: 1
|
||||||
)
|
)
|
||||||
|
|
||||||
Repo.one(query) == 1
|
read_repo()
|
||||||
|
|> then(fn repo -> repo.one(query) end)
|
||||||
|
|> Kernel.==(1)
|
||||||
|
end
|
||||||
|
|
||||||
|
defp scope_populated_db?(table, field) do
|
||||||
|
query =
|
||||||
|
from(record in table,
|
||||||
|
select: field(record, ^field),
|
||||||
|
limit: 1
|
||||||
|
)
|
||||||
|
|
||||||
|
read_repo()
|
||||||
|
|> then(fn repo -> repo.one(query) end)
|
||||||
|
|> is_nil()
|
||||||
|
|> Kernel.not()
|
||||||
end
|
end
|
||||||
|
|
||||||
defp normalize_hex_or_binary(value, expected_bytes, _reason)
|
defp normalize_hex_or_binary(value, expected_bytes, _reason)
|
||||||
@@ -282,4 +322,6 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Moderation do
|
|||||||
|
|
||||||
defp to_inet({_, _, _, _, _, _, _, _} = ip_tuple),
|
defp to_inet({_, _, _, _, _, _, _, _} = ip_tuple),
|
||||||
do: %Postgrex.INET{address: ip_tuple, netmask: 128}
|
do: %Postgrex.INET{address: ip_tuple, netmask: 128}
|
||||||
|
|
||||||
|
defp read_repo, do: PostgresRepos.read()
|
||||||
end
|
end
|
||||||
|
|||||||
@@ -7,6 +7,7 @@ defmodule Parrhesia.Storage.Events do
|
|||||||
@type event_id :: binary()
|
@type event_id :: binary()
|
||||||
@type event :: map()
|
@type event :: map()
|
||||||
@type filter :: map()
|
@type filter :: map()
|
||||||
|
@type event_ref :: %{created_at: non_neg_integer(), id: binary()}
|
||||||
@type query_opts :: keyword()
|
@type query_opts :: keyword()
|
||||||
@type count_result :: non_neg_integer() | %{optional(atom()) => term()}
|
@type count_result :: non_neg_integer() | %{optional(atom()) => term()}
|
||||||
@type reason :: term()
|
@type reason :: term()
|
||||||
@@ -14,6 +15,8 @@ defmodule Parrhesia.Storage.Events do
|
|||||||
@callback put_event(context(), event()) :: {:ok, event()} | {:error, reason()}
|
@callback put_event(context(), event()) :: {:ok, event()} | {:error, reason()}
|
||||||
@callback get_event(context(), event_id()) :: {:ok, event() | nil} | {:error, reason()}
|
@callback get_event(context(), event_id()) :: {:ok, event() | nil} | {:error, reason()}
|
||||||
@callback query(context(), [filter()], query_opts()) :: {:ok, [event()]} | {:error, reason()}
|
@callback query(context(), [filter()], query_opts()) :: {:ok, [event()]} | {:error, reason()}
|
||||||
|
@callback query_event_refs(context(), [filter()], query_opts()) ::
|
||||||
|
{:ok, [event_ref()]} | {:error, reason()}
|
||||||
@callback count(context(), [filter()], query_opts()) ::
|
@callback count(context(), [filter()], query_opts()) ::
|
||||||
{:ok, count_result()} | {:error, reason()}
|
{:ok, count_result()} | {:error, reason()}
|
||||||
@callback delete_by_request(context(), event()) :: {:ok, non_neg_integer()} | {:error, reason()}
|
@callback delete_by_request(context(), event()) :: {:ok, non_neg_integer()} | {:error, reason()}
|
||||||
|
|||||||
@@ -16,6 +16,7 @@ defmodule Parrhesia.Storage.Moderation do
|
|||||||
@callback allow_pubkey(context(), pubkey()) :: :ok | {:error, reason()}
|
@callback allow_pubkey(context(), pubkey()) :: :ok | {:error, reason()}
|
||||||
@callback disallow_pubkey(context(), pubkey()) :: :ok | {:error, reason()}
|
@callback disallow_pubkey(context(), pubkey()) :: :ok | {:error, reason()}
|
||||||
@callback pubkey_allowed?(context(), pubkey()) :: {:ok, boolean()} | {:error, reason()}
|
@callback pubkey_allowed?(context(), pubkey()) :: {:ok, boolean()} | {:error, reason()}
|
||||||
|
@callback has_allowed_pubkeys?(context()) :: {:ok, boolean()} | {:error, reason()}
|
||||||
|
|
||||||
@callback ban_event(context(), event_id()) :: :ok | {:error, reason()}
|
@callback ban_event(context(), event_id()) :: :ok | {:error, reason()}
|
||||||
@callback unban_event(context(), event_id()) :: :ok | {:error, reason()}
|
@callback unban_event(context(), event_id()) :: :ok | {:error, reason()}
|
||||||
|
|||||||
@@ -5,6 +5,7 @@ defmodule Parrhesia.Storage.Partitions do
|
|||||||
|
|
||||||
import Ecto.Query
|
import Ecto.Query
|
||||||
|
|
||||||
|
alias Parrhesia.PostgresRepos
|
||||||
alias Parrhesia.Repo
|
alias Parrhesia.Repo
|
||||||
|
|
||||||
@identifier_pattern ~r/^[a-zA-Z_][a-zA-Z0-9_]*$/
|
@identifier_pattern ~r/^[a-zA-Z_][a-zA-Z0-9_]*$/
|
||||||
@@ -35,7 +36,8 @@ defmodule Parrhesia.Storage.Partitions do
|
|||||||
order_by: [asc: table.tablename]
|
order_by: [asc: table.tablename]
|
||||||
)
|
)
|
||||||
|
|
||||||
Repo.all(query)
|
read_repo()
|
||||||
|
|> then(fn repo -> repo.all(query) end)
|
||||||
end
|
end
|
||||||
|
|
||||||
@doc """
|
@doc """
|
||||||
@@ -88,7 +90,9 @@ defmodule Parrhesia.Storage.Partitions do
|
|||||||
"""
|
"""
|
||||||
@spec database_size_bytes() :: {:ok, non_neg_integer()} | {:error, term()}
|
@spec database_size_bytes() :: {:ok, non_neg_integer()} | {:error, term()}
|
||||||
def database_size_bytes do
|
def database_size_bytes do
|
||||||
case Repo.query("SELECT pg_database_size(current_database())") do
|
repo = read_repo()
|
||||||
|
|
||||||
|
case repo.query("SELECT pg_database_size(current_database())") do
|
||||||
{:ok, %{rows: [[size]]}} when is_integer(size) and size >= 0 -> {:ok, size}
|
{:ok, %{rows: [[size]]}} when is_integer(size) and size >= 0 -> {:ok, size}
|
||||||
{:ok, _result} -> {:error, :unexpected_result}
|
{:ok, _result} -> {:error, :unexpected_result}
|
||||||
{:error, reason} -> {:error, reason}
|
{:error, reason} -> {:error, reason}
|
||||||
@@ -219,7 +223,9 @@ defmodule Parrhesia.Storage.Partitions do
|
|||||||
LIMIT 1
|
LIMIT 1
|
||||||
"""
|
"""
|
||||||
|
|
||||||
case Repo.query(query, [partition_name, parent_table_name]) do
|
repo = read_repo()
|
||||||
|
|
||||||
|
case repo.query(query, [partition_name, parent_table_name]) do
|
||||||
{:ok, %{rows: [[1]]}} -> true
|
{:ok, %{rows: [[1]]}} -> true
|
||||||
{:ok, %{rows: []}} -> false
|
{:ok, %{rows: []}} -> false
|
||||||
{:ok, _result} -> false
|
{:ok, _result} -> false
|
||||||
@@ -278,6 +284,8 @@ defmodule Parrhesia.Storage.Partitions do
|
|||||||
|> DateTime.to_unix()
|
|> DateTime.to_unix()
|
||||||
end
|
end
|
||||||
|
|
||||||
|
defp read_repo, do: PostgresRepos.read()
|
||||||
|
|
||||||
defp month_start(%Date{} = date), do: Date.new!(date.year, date.month, 1)
|
defp month_start(%Date{} = date), do: Date.new!(date.year, date.month, 1)
|
||||||
|
|
||||||
defp shift_month(%Date{} = date, month_delta) when is_integer(month_delta) do
|
defp shift_month(%Date{} = date, month_delta) when is_integer(month_delta) do
|
||||||
|
|||||||
@@ -5,18 +5,28 @@ defmodule Parrhesia.Storage.Supervisor do
|
|||||||
|
|
||||||
use Supervisor
|
use Supervisor
|
||||||
|
|
||||||
|
alias Parrhesia.PostgresRepos
|
||||||
|
|
||||||
def start_link(init_arg \\ []) do
|
def start_link(init_arg \\ []) do
|
||||||
Supervisor.start_link(__MODULE__, init_arg, name: __MODULE__)
|
Supervisor.start_link(__MODULE__, init_arg, name: __MODULE__)
|
||||||
end
|
end
|
||||||
|
|
||||||
@impl true
|
@impl true
|
||||||
def init(_init_arg) do
|
def init(_init_arg) do
|
||||||
children = [
|
children = moderation_cache_children() ++ PostgresRepos.started_repos()
|
||||||
{Parrhesia.Storage.Adapters.Postgres.ModerationCache,
|
|
||||||
name: Parrhesia.Storage.Adapters.Postgres.ModerationCache},
|
|
||||||
Parrhesia.Repo
|
|
||||||
]
|
|
||||||
|
|
||||||
Supervisor.init(children, strategy: :one_for_one)
|
Supervisor.init(children, strategy: :one_for_one)
|
||||||
end
|
end
|
||||||
|
|
||||||
|
defp moderation_cache_children do
|
||||||
|
if PostgresRepos.postgres_enabled?() and
|
||||||
|
Application.get_env(:parrhesia, :moderation_cache_enabled, true) do
|
||||||
|
[
|
||||||
|
{Parrhesia.Storage.Adapters.Postgres.ModerationCache,
|
||||||
|
name: Parrhesia.Storage.Adapters.Postgres.ModerationCache}
|
||||||
|
]
|
||||||
|
else
|
||||||
|
[]
|
||||||
|
end
|
||||||
|
end
|
||||||
end
|
end
|
||||||
|
|||||||
@@ -13,7 +13,10 @@ defmodule Parrhesia.Subscriptions.Supervisor do
|
|||||||
def init(_init_arg) do
|
def init(_init_arg) do
|
||||||
children =
|
children =
|
||||||
[
|
[
|
||||||
{Parrhesia.Subscriptions.Index, name: Parrhesia.Subscriptions.Index}
|
{Parrhesia.Subscriptions.Index, name: Parrhesia.Subscriptions.Index},
|
||||||
|
{Parrhesia.Fanout.Dispatcher, name: Parrhesia.Fanout.Dispatcher},
|
||||||
|
{Registry, keys: :unique, name: Parrhesia.API.Stream.Registry},
|
||||||
|
{DynamicSupervisor, strategy: :one_for_one, name: Parrhesia.API.Stream.Supervisor}
|
||||||
] ++
|
] ++
|
||||||
negentropy_children() ++ [{Parrhesia.Fanout.MultiNode, name: Parrhesia.Fanout.MultiNode}]
|
negentropy_children() ++ [{Parrhesia.Fanout.MultiNode, name: Parrhesia.Fanout.MultiNode}]
|
||||||
|
|
||||||
|
|||||||
62
lib/parrhesia/sync/relay_info_client.ex
Normal file
62
lib/parrhesia/sync/relay_info_client.ex
Normal file
@@ -0,0 +1,62 @@
|
|||||||
|
defmodule Parrhesia.Sync.RelayInfoClient do
|
||||||
|
@moduledoc false
|
||||||
|
|
||||||
|
alias Parrhesia.HTTP
|
||||||
|
alias Parrhesia.Sync.TLS
|
||||||
|
|
||||||
|
@spec verify_remote_identity(map(), keyword()) :: :ok | {:error, term()}
|
||||||
|
def verify_remote_identity(server, opts \\ []) do
|
||||||
|
request_fun = Keyword.get(opts, :request_fun, &default_request/2)
|
||||||
|
|
||||||
|
with {:ok, response} <- request_fun.(relay_info_url(server.url), request_opts(server)),
|
||||||
|
{:ok, pubkey} <- extract_pubkey(response) do
|
||||||
|
if pubkey == server.auth_pubkey do
|
||||||
|
:ok
|
||||||
|
else
|
||||||
|
{:error, :remote_identity_mismatch}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp default_request(url, opts) do
|
||||||
|
case HTTP.get(
|
||||||
|
url: url,
|
||||||
|
headers: [{"accept", "application/nostr+json"}],
|
||||||
|
decode_body: false,
|
||||||
|
connect_options: Keyword.merge([timeout: 5_000], opts),
|
||||||
|
receive_timeout: 5_000
|
||||||
|
) do
|
||||||
|
{:ok, response} -> {:ok, response}
|
||||||
|
{:error, reason} -> {:error, reason}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp extract_pubkey(%Req.Response{status: 200, body: body}) when is_binary(body) do
|
||||||
|
with {:ok, payload} <- JSON.decode(body),
|
||||||
|
pubkey when is_binary(pubkey) and pubkey != "" <- Map.get(payload, "pubkey") do
|
||||||
|
{:ok, String.downcase(pubkey)}
|
||||||
|
else
|
||||||
|
nil -> {:error, :missing_remote_identity}
|
||||||
|
{:error, reason} -> {:error, reason}
|
||||||
|
_other -> {:error, :missing_remote_identity}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp extract_pubkey(%Req.Response{status: status}),
|
||||||
|
do: {:error, {:relay_info_request_failed, status}}
|
||||||
|
|
||||||
|
defp extract_pubkey(_response), do: {:error, :invalid_relay_info}
|
||||||
|
|
||||||
|
defp request_opts(%{tls: %{mode: :disabled}}), do: []
|
||||||
|
defp request_opts(%{tls: tls}), do: TLS.req_connect_options(tls)
|
||||||
|
|
||||||
|
defp relay_info_url(relay_url) do
|
||||||
|
relay_url
|
||||||
|
|> URI.parse()
|
||||||
|
|> Map.update!(:scheme, fn
|
||||||
|
"wss" -> "https"
|
||||||
|
"ws" -> "http"
|
||||||
|
end)
|
||||||
|
|> URI.to_string()
|
||||||
|
end
|
||||||
|
end
|
||||||
43
lib/parrhesia/sync/supervisor.ex
Normal file
43
lib/parrhesia/sync/supervisor.ex
Normal file
@@ -0,0 +1,43 @@
|
|||||||
|
defmodule Parrhesia.Sync.Supervisor do
|
||||||
|
@moduledoc """
|
||||||
|
Supervision entrypoint for sync control-plane processes.
|
||||||
|
"""
|
||||||
|
|
||||||
|
use Supervisor
|
||||||
|
|
||||||
|
def start_link(init_arg \\ []) do
|
||||||
|
name = Keyword.get(init_arg, :name, __MODULE__)
|
||||||
|
Supervisor.start_link(__MODULE__, init_arg, name: name)
|
||||||
|
end
|
||||||
|
|
||||||
|
@impl true
|
||||||
|
def init(init_arg) do
|
||||||
|
worker_registry = Keyword.get(init_arg, :worker_registry, Parrhesia.Sync.WorkerRegistry)
|
||||||
|
worker_supervisor = Keyword.get(init_arg, :worker_supervisor, Parrhesia.Sync.WorkerSupervisor)
|
||||||
|
manager_name = Keyword.get(init_arg, :manager, Parrhesia.API.Sync.Manager)
|
||||||
|
|
||||||
|
children = [
|
||||||
|
{Registry, keys: :unique, name: worker_registry},
|
||||||
|
{DynamicSupervisor, strategy: :one_for_one, name: worker_supervisor},
|
||||||
|
{Parrhesia.API.Sync.Manager,
|
||||||
|
manager_opts(init_arg, manager_name, worker_registry, worker_supervisor)}
|
||||||
|
]
|
||||||
|
|
||||||
|
Supervisor.init(children, strategy: :one_for_one)
|
||||||
|
end
|
||||||
|
|
||||||
|
defp manager_opts(init_arg, manager_name, worker_registry, worker_supervisor) do
|
||||||
|
[
|
||||||
|
name: manager_name,
|
||||||
|
worker_registry: worker_registry,
|
||||||
|
worker_supervisor: worker_supervisor
|
||||||
|
] ++
|
||||||
|
Keyword.take(init_arg, [
|
||||||
|
:path,
|
||||||
|
:start_workers?,
|
||||||
|
:transport_module,
|
||||||
|
:relay_info_opts,
|
||||||
|
:transport_opts
|
||||||
|
])
|
||||||
|
end
|
||||||
|
end
|
||||||
112
lib/parrhesia/sync/tls.ex
Normal file
112
lib/parrhesia/sync/tls.ex
Normal file
@@ -0,0 +1,112 @@
|
|||||||
|
defmodule Parrhesia.Sync.TLS do
|
||||||
|
@moduledoc false
|
||||||
|
|
||||||
|
@type tls_config :: %{
|
||||||
|
mode: :required | :disabled,
|
||||||
|
hostname: String.t(),
|
||||||
|
pins: [%{type: :spki_sha256, value: String.t()}]
|
||||||
|
}
|
||||||
|
|
||||||
|
@spec websocket_options(tls_config()) :: keyword()
|
||||||
|
def websocket_options(%{mode: :disabled}), do: [insecure: true]
|
||||||
|
|
||||||
|
def websocket_options(%{mode: :required} = tls) do
|
||||||
|
[
|
||||||
|
ssl_options: transport_opts(tls)
|
||||||
|
]
|
||||||
|
end
|
||||||
|
|
||||||
|
@spec req_connect_options(tls_config()) :: keyword()
|
||||||
|
def req_connect_options(%{mode: :disabled}), do: []
|
||||||
|
|
||||||
|
def req_connect_options(%{mode: :required} = tls) do
|
||||||
|
[
|
||||||
|
transport_opts: transport_opts(tls)
|
||||||
|
]
|
||||||
|
end
|
||||||
|
|
||||||
|
def transport_opts(%{hostname: hostname, pins: pins}) do
|
||||||
|
[
|
||||||
|
verify: :verify_peer,
|
||||||
|
cacerts: system_cacerts(),
|
||||||
|
server_name_indication: String.to_charlist(hostname),
|
||||||
|
customize_hostname_check: [
|
||||||
|
match_fun: :public_key.pkix_verify_hostname_match_fun(:https)
|
||||||
|
]
|
||||||
|
]
|
||||||
|
|> maybe_put_verify_fun(pins)
|
||||||
|
end
|
||||||
|
|
||||||
|
defp maybe_put_verify_fun(options, []), do: options
|
||||||
|
|
||||||
|
defp maybe_put_verify_fun(options, pins) do
|
||||||
|
Keyword.put(
|
||||||
|
options,
|
||||||
|
:verify_fun,
|
||||||
|
{&verify_certificate/3, %{pins: MapSet.new(Enum.map(pins, & &1.value)), matched?: false}}
|
||||||
|
)
|
||||||
|
end
|
||||||
|
|
||||||
|
defp verify_certificate(_cert, :valid_peer, %{matched?: true} = state), do: {:valid, state}
|
||||||
|
defp verify_certificate(_cert, :valid_peer, _state), do: {:fail, :pin_mismatch}
|
||||||
|
|
||||||
|
defp verify_certificate(_cert, {:bad_cert, reason}, _state), do: {:fail, reason}
|
||||||
|
|
||||||
|
defp verify_certificate(cert, _event, state) when is_binary(cert) do
|
||||||
|
matched? = MapSet.member?(state.pins, spki_pin_from_verify(cert))
|
||||||
|
{:valid, %{state | matched?: state.matched? or matched?}}
|
||||||
|
rescue
|
||||||
|
_error -> {:fail, :invalid_certificate}
|
||||||
|
end
|
||||||
|
|
||||||
|
defp verify_certificate({:OTPCertificate, _tbs, _sig_alg, _sig} = cert, _event, state) do
|
||||||
|
matched? = MapSet.member?(state.pins, spki_pin_from_verify(cert))
|
||||||
|
{:valid, %{state | matched?: state.matched? or matched?}}
|
||||||
|
rescue
|
||||||
|
_error -> {:fail, :invalid_certificate}
|
||||||
|
end
|
||||||
|
|
||||||
|
defp verify_certificate({:Certificate, _tbs, _sig_alg, _sig} = cert, _event, state) do
|
||||||
|
matched? = MapSet.member?(state.pins, spki_pin_from_verify(cert))
|
||||||
|
{:valid, %{state | matched?: state.matched? or matched?}}
|
||||||
|
rescue
|
||||||
|
_error -> {:fail, :invalid_certificate}
|
||||||
|
end
|
||||||
|
|
||||||
|
defp verify_certificate(_cert, _event, state), do: {:valid, state}
|
||||||
|
|
||||||
|
defp spki_pin(cert_der) do
|
||||||
|
cert = :public_key.pkix_decode_cert(cert_der, :plain)
|
||||||
|
spki = cert |> elem(1) |> elem(7)
|
||||||
|
|
||||||
|
:public_key.der_encode(:SubjectPublicKeyInfo, spki)
|
||||||
|
|> then(&:crypto.hash(:sha256, &1))
|
||||||
|
|> Base.encode64()
|
||||||
|
end
|
||||||
|
|
||||||
|
defp spki_pin_from_verify(cert) when is_binary(cert), do: spki_pin(cert)
|
||||||
|
|
||||||
|
defp spki_pin_from_verify({:OTPCertificate, _tbs, _sig_alg, _sig} = cert) do
|
||||||
|
cert
|
||||||
|
|> then(&:public_key.pkix_encode(:OTPCertificate, &1, :otp))
|
||||||
|
|> spki_pin()
|
||||||
|
end
|
||||||
|
|
||||||
|
defp spki_pin_from_verify({:Certificate, _tbs, _sig_alg, _sig} = cert) do
|
||||||
|
cert
|
||||||
|
|> then(&:public_key.der_encode(:Certificate, &1))
|
||||||
|
|> spki_pin()
|
||||||
|
end
|
||||||
|
|
||||||
|
defp spki_pin_from_verify(_cert) do
|
||||||
|
raise(ArgumentError, "invalid certificate")
|
||||||
|
end
|
||||||
|
|
||||||
|
defp system_cacerts do
|
||||||
|
if function_exported?(:public_key, :cacerts_get, 0) do
|
||||||
|
:public_key.cacerts_get()
|
||||||
|
else
|
||||||
|
[]
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
7
lib/parrhesia/sync/transport.ex
Normal file
7
lib/parrhesia/sync/transport.ex
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
defmodule Parrhesia.Sync.Transport do
|
||||||
|
@moduledoc false
|
||||||
|
|
||||||
|
@callback connect(pid(), map(), keyword()) :: {:ok, pid()} | {:error, term()}
|
||||||
|
@callback send_json(pid(), term()) :: :ok | {:error, term()}
|
||||||
|
@callback close(pid()) :: :ok
|
||||||
|
end
|
||||||
93
lib/parrhesia/sync/transport/websockex_client.ex
Normal file
93
lib/parrhesia/sync/transport/websockex_client.ex
Normal file
@@ -0,0 +1,93 @@
|
|||||||
|
defmodule Parrhesia.Sync.Transport.WebSockexClient do
|
||||||
|
@moduledoc false
|
||||||
|
|
||||||
|
use WebSockex
|
||||||
|
|
||||||
|
alias Parrhesia.Sync.TLS
|
||||||
|
|
||||||
|
@behaviour Parrhesia.Sync.Transport
|
||||||
|
|
||||||
|
@impl true
|
||||||
|
def connect(owner, server, opts \\ []) do
|
||||||
|
state = %{
|
||||||
|
owner: owner,
|
||||||
|
server: server
|
||||||
|
}
|
||||||
|
|
||||||
|
transport_opts =
|
||||||
|
server.tls
|
||||||
|
|> TLS.websocket_options()
|
||||||
|
|> merge_websocket_opts(Keyword.get(opts, :websocket_opts, []))
|
||||||
|
|> Keyword.put(:handle_initial_conn_failure, true)
|
||||||
|
|
||||||
|
WebSockex.start(server.url, __MODULE__, state, transport_opts)
|
||||||
|
end
|
||||||
|
|
||||||
|
@impl true
|
||||||
|
def send_json(pid, payload) do
|
||||||
|
WebSockex.cast(pid, {:send_json, payload})
|
||||||
|
end
|
||||||
|
|
||||||
|
@impl true
|
||||||
|
def close(pid) do
|
||||||
|
WebSockex.cast(pid, :close)
|
||||||
|
:ok
|
||||||
|
end
|
||||||
|
|
||||||
|
@impl true
|
||||||
|
def handle_connect(conn, state) do
|
||||||
|
send(state.owner, {:sync_transport, self(), :connected, %{resp_headers: conn.resp_headers}})
|
||||||
|
{:ok, state}
|
||||||
|
end
|
||||||
|
|
||||||
|
@impl true
|
||||||
|
def handle_frame({:text, payload}, state) do
|
||||||
|
message =
|
||||||
|
case JSON.decode(payload) do
|
||||||
|
{:ok, frame} -> frame
|
||||||
|
{:error, reason} -> {:decode_error, reason, payload}
|
||||||
|
end
|
||||||
|
|
||||||
|
send(state.owner, {:sync_transport, self(), :frame, message})
|
||||||
|
{:ok, state}
|
||||||
|
end
|
||||||
|
|
||||||
|
def handle_frame(frame, state) do
|
||||||
|
send(state.owner, {:sync_transport, self(), :frame, frame})
|
||||||
|
{:ok, state}
|
||||||
|
end
|
||||||
|
|
||||||
|
@impl true
|
||||||
|
def handle_cast({:send_json, payload}, state) do
|
||||||
|
{:reply, {:text, JSON.encode!(payload)}, state}
|
||||||
|
end
|
||||||
|
|
||||||
|
def handle_cast(:close, state) do
|
||||||
|
{:close, state}
|
||||||
|
end
|
||||||
|
|
||||||
|
@impl true
|
||||||
|
def handle_disconnect(status, state) do
|
||||||
|
send(state.owner, {:sync_transport, self(), :disconnected, status})
|
||||||
|
{:ok, state}
|
||||||
|
end
|
||||||
|
|
||||||
|
defp merge_websocket_opts(base_opts, override_opts) do
|
||||||
|
override_ssl_options = Keyword.get(override_opts, :ssl_options)
|
||||||
|
|
||||||
|
merged_ssl_options =
|
||||||
|
case {Keyword.get(base_opts, :ssl_options), override_ssl_options} do
|
||||||
|
{nil, nil} -> nil
|
||||||
|
{base_ssl, nil} -> base_ssl
|
||||||
|
{nil, override_ssl} -> override_ssl
|
||||||
|
{base_ssl, override_ssl} -> Keyword.merge(base_ssl, override_ssl)
|
||||||
|
end
|
||||||
|
|
||||||
|
base_opts
|
||||||
|
|> Keyword.merge(Keyword.delete(override_opts, :ssl_options))
|
||||||
|
|> maybe_put_ssl_options(merged_ssl_options)
|
||||||
|
end
|
||||||
|
|
||||||
|
defp maybe_put_ssl_options(opts, nil), do: opts
|
||||||
|
defp maybe_put_ssl_options(opts, ssl_options), do: Keyword.put(opts, :ssl_options, ssl_options)
|
||||||
|
end
|
||||||
367
lib/parrhesia/sync/worker.ex
Normal file
367
lib/parrhesia/sync/worker.ex
Normal file
@@ -0,0 +1,367 @@
|
|||||||
|
defmodule Parrhesia.Sync.Worker do
|
||||||
|
@moduledoc false
|
||||||
|
|
||||||
|
use GenServer
|
||||||
|
|
||||||
|
alias Parrhesia.API.Events
|
||||||
|
alias Parrhesia.API.Identity
|
||||||
|
alias Parrhesia.API.RequestContext
|
||||||
|
alias Parrhesia.API.Sync.Manager
|
||||||
|
alias Parrhesia.Sync.RelayInfoClient
|
||||||
|
alias Parrhesia.Sync.Transport.WebSockexClient
|
||||||
|
|
||||||
|
@initial_backoff_ms 1_000
|
||||||
|
@max_backoff_ms 30_000
|
||||||
|
@auth_kind 22_242
|
||||||
|
|
||||||
|
defstruct server: nil,
|
||||||
|
manager: nil,
|
||||||
|
transport_module: WebSockexClient,
|
||||||
|
transport_pid: nil,
|
||||||
|
phase: :idle,
|
||||||
|
current_subscription_id: nil,
|
||||||
|
backoff_ms: @initial_backoff_ms,
|
||||||
|
authenticated?: false,
|
||||||
|
auth_event_id: nil,
|
||||||
|
resubscribe_after_auth?: false,
|
||||||
|
cursor_created_at: nil,
|
||||||
|
cursor_event_id: nil,
|
||||||
|
relay_info_opts: [],
|
||||||
|
transport_opts: []
|
||||||
|
|
||||||
|
@type t :: %__MODULE__{}
|
||||||
|
|
||||||
|
def child_spec(opts) do
|
||||||
|
server = Keyword.fetch!(opts, :server)
|
||||||
|
|
||||||
|
%{
|
||||||
|
id: {:sync_worker, server.id},
|
||||||
|
start: {__MODULE__, :start_link, [opts]},
|
||||||
|
restart: :transient
|
||||||
|
}
|
||||||
|
end
|
||||||
|
|
||||||
|
def start_link(opts) do
|
||||||
|
name = Keyword.get(opts, :name)
|
||||||
|
GenServer.start_link(__MODULE__, opts, name: name)
|
||||||
|
end
|
||||||
|
|
||||||
|
def sync_now(worker), do: GenServer.cast(worker, :sync_now)
|
||||||
|
def stop(worker), do: GenServer.stop(worker, :normal)
|
||||||
|
|
||||||
|
@impl true
|
||||||
|
def init(opts) do
|
||||||
|
server = Keyword.fetch!(opts, :server)
|
||||||
|
runtime = Keyword.get(opts, :runtime, %{})
|
||||||
|
|
||||||
|
state = %__MODULE__{
|
||||||
|
server: server,
|
||||||
|
manager: Keyword.fetch!(opts, :manager),
|
||||||
|
transport_module: Keyword.get(opts, :transport_module, WebSockexClient),
|
||||||
|
cursor_created_at: Map.get(runtime, :cursor_created_at),
|
||||||
|
cursor_event_id: Map.get(runtime, :cursor_event_id),
|
||||||
|
relay_info_opts: Keyword.get(opts, :relay_info_opts, []),
|
||||||
|
transport_opts: Keyword.get(opts, :transport_opts, [])
|
||||||
|
}
|
||||||
|
|
||||||
|
send(self(), :connect)
|
||||||
|
{:ok, state}
|
||||||
|
end
|
||||||
|
|
||||||
|
@impl true
|
||||||
|
def handle_cast(:sync_now, state) do
|
||||||
|
Manager.runtime_event(state.manager, state.server.id, :subscription_restart)
|
||||||
|
|
||||||
|
next_state =
|
||||||
|
state
|
||||||
|
|> close_subscription()
|
||||||
|
|> issue_subscription()
|
||||||
|
|
||||||
|
{:noreply, next_state}
|
||||||
|
end
|
||||||
|
|
||||||
|
@impl true
|
||||||
|
def handle_info(:connect, %__MODULE__{transport_pid: nil} = state) do
|
||||||
|
case RelayInfoClient.verify_remote_identity(state.server, state.relay_info_opts) do
|
||||||
|
:ok ->
|
||||||
|
connect_transport(state)
|
||||||
|
|
||||||
|
{:error, reason} ->
|
||||||
|
Manager.runtime_event(state.manager, state.server.id, :disconnected, %{reason: reason})
|
||||||
|
{:noreply, schedule_reconnect(state)}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def handle_info(:connect, state), do: {:noreply, state}
|
||||||
|
|
||||||
|
def handle_info({:sync_transport, transport_pid, :connected, _info}, state) do
|
||||||
|
Manager.runtime_event(state.manager, state.server.id, :connected, %{})
|
||||||
|
|
||||||
|
next_state =
|
||||||
|
state
|
||||||
|
|> Map.put(:transport_pid, transport_pid)
|
||||||
|
|> Map.put(:backoff_ms, @initial_backoff_ms)
|
||||||
|
|> Map.put(:authenticated?, false)
|
||||||
|
|> Map.put(:auth_event_id, nil)
|
||||||
|
|> Map.put(:resubscribe_after_auth?, false)
|
||||||
|
|> issue_subscription()
|
||||||
|
|
||||||
|
{:noreply, next_state}
|
||||||
|
end
|
||||||
|
|
||||||
|
def handle_info({:sync_transport, _transport_pid, :frame, frame}, state) do
|
||||||
|
{:noreply, handle_transport_frame(state, frame)}
|
||||||
|
end
|
||||||
|
|
||||||
|
def handle_info({:sync_transport, _transport_pid, :disconnected, status}, state) do
|
||||||
|
Manager.runtime_event(state.manager, state.server.id, :disconnected, %{reason: status.reason})
|
||||||
|
|
||||||
|
next_state =
|
||||||
|
state
|
||||||
|
|> Map.put(:transport_pid, nil)
|
||||||
|
|> Map.put(:phase, :idle)
|
||||||
|
|> Map.put(:authenticated?, false)
|
||||||
|
|> Map.put(:auth_event_id, nil)
|
||||||
|
|> Map.put(:resubscribe_after_auth?, false)
|
||||||
|
|> Map.put(:current_subscription_id, nil)
|
||||||
|
|> schedule_reconnect()
|
||||||
|
|
||||||
|
{:noreply, next_state}
|
||||||
|
end
|
||||||
|
|
||||||
|
def handle_info(_message, state), do: {:noreply, state}
|
||||||
|
|
||||||
|
defp connect_transport(state) do
|
||||||
|
case state.transport_module.connect(self(), state.server, state.transport_opts) do
|
||||||
|
{:ok, transport_pid} ->
|
||||||
|
{:noreply, %{state | transport_pid: transport_pid, phase: :connecting}}
|
||||||
|
|
||||||
|
{:error, reason} ->
|
||||||
|
Manager.runtime_event(state.manager, state.server.id, :disconnected, %{reason: reason})
|
||||||
|
{:noreply, schedule_reconnect(state)}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp handle_transport_frame(state, ["AUTH", challenge]) when is_binary(challenge) do
|
||||||
|
case send_auth_event(state, challenge) do
|
||||||
|
{:ok, auth_event_id} ->
|
||||||
|
%{state | auth_event_id: auth_event_id, phase: :authenticating}
|
||||||
|
|
||||||
|
{:error, reason} ->
|
||||||
|
Manager.runtime_event(state.manager, state.server.id, :error, %{reason: reason})
|
||||||
|
state
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp handle_transport_frame(state, ["OK", event_id, true, _message])
|
||||||
|
when event_id == state.auth_event_id do
|
||||||
|
next_state = %{state | authenticated?: true, auth_event_id: nil}
|
||||||
|
|
||||||
|
if next_state.resubscribe_after_auth? do
|
||||||
|
next_state
|
||||||
|
|> Map.put(:resubscribe_after_auth?, false)
|
||||||
|
|> issue_subscription()
|
||||||
|
else
|
||||||
|
next_state
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp handle_transport_frame(state, ["OK", event_id, false, message])
|
||||||
|
when event_id == state.auth_event_id do
|
||||||
|
Manager.runtime_event(state.manager, state.server.id, :error, %{reason: message})
|
||||||
|
schedule_reconnect(%{state | auth_event_id: nil, authenticated?: false})
|
||||||
|
end
|
||||||
|
|
||||||
|
defp handle_transport_frame(state, ["EVENT", subscription_id, event])
|
||||||
|
when subscription_id == state.current_subscription_id and is_map(event) do
|
||||||
|
handle_remote_event(state, event)
|
||||||
|
end
|
||||||
|
|
||||||
|
defp handle_transport_frame(state, ["EOSE", subscription_id])
|
||||||
|
when subscription_id == state.current_subscription_id do
|
||||||
|
Manager.runtime_event(state.manager, state.server.id, :sync_completed, %{})
|
||||||
|
%{state | phase: :streaming}
|
||||||
|
end
|
||||||
|
|
||||||
|
defp handle_transport_frame(state, ["CLOSED", subscription_id, message])
|
||||||
|
when subscription_id == state.current_subscription_id do
|
||||||
|
auth_required? = is_binary(message) and String.contains?(String.downcase(message), "auth")
|
||||||
|
|
||||||
|
next_state =
|
||||||
|
state
|
||||||
|
|> Map.put(:current_subscription_id, nil)
|
||||||
|
|> Map.put(:phase, :idle)
|
||||||
|
|
||||||
|
if auth_required? and not state.authenticated? do
|
||||||
|
%{next_state | resubscribe_after_auth?: true}
|
||||||
|
else
|
||||||
|
Manager.runtime_event(state.manager, state.server.id, :error, %{reason: message})
|
||||||
|
schedule_reconnect(next_state)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp handle_transport_frame(state, {:decode_error, reason, _payload}) do
|
||||||
|
Manager.runtime_event(state.manager, state.server.id, :error, %{reason: reason})
|
||||||
|
state
|
||||||
|
end
|
||||||
|
|
||||||
|
defp handle_transport_frame(state, _frame), do: state
|
||||||
|
|
||||||
|
defp issue_subscription(%__MODULE__{transport_pid: nil} = state), do: state
|
||||||
|
|
||||||
|
defp issue_subscription(state) do
|
||||||
|
subscription_id = subscription_id(state.server.id)
|
||||||
|
filters = sync_filters(state)
|
||||||
|
|
||||||
|
:ok =
|
||||||
|
state.transport_module.send_json(state.transport_pid, ["REQ", subscription_id | filters])
|
||||||
|
|
||||||
|
Manager.runtime_event(state.manager, state.server.id, :sync_started, %{})
|
||||||
|
|
||||||
|
%{
|
||||||
|
state
|
||||||
|
| current_subscription_id: subscription_id,
|
||||||
|
phase: :catchup
|
||||||
|
}
|
||||||
|
end
|
||||||
|
|
||||||
|
defp close_subscription(%__MODULE__{transport_pid: nil} = state), do: state
|
||||||
|
defp close_subscription(%__MODULE__{current_subscription_id: nil} = state), do: state
|
||||||
|
|
||||||
|
defp close_subscription(state) do
|
||||||
|
:ok =
|
||||||
|
state.transport_module.send_json(state.transport_pid, [
|
||||||
|
"CLOSE",
|
||||||
|
state.current_subscription_id
|
||||||
|
])
|
||||||
|
|
||||||
|
%{state | current_subscription_id: nil}
|
||||||
|
end
|
||||||
|
|
||||||
|
defp send_auth_event(state, challenge) do
|
||||||
|
event = %{
|
||||||
|
"created_at" => System.system_time(:second),
|
||||||
|
"kind" => @auth_kind,
|
||||||
|
"tags" => [["challenge", challenge], ["relay", state.server.url]],
|
||||||
|
"content" => ""
|
||||||
|
}
|
||||||
|
|
||||||
|
with {:ok, signed_event} <- Identity.sign_event(event) do
|
||||||
|
:ok = state.transport_module.send_json(state.transport_pid, ["AUTH", signed_event])
|
||||||
|
{:ok, signed_event["id"]}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp handle_remote_event(state, event) do
|
||||||
|
context = request_context(state)
|
||||||
|
|
||||||
|
case Events.publish(event, context: context) do
|
||||||
|
{:ok, %{accepted: true}} ->
|
||||||
|
Manager.runtime_event(state.manager, state.server.id, :event_result, %{
|
||||||
|
result: :accepted,
|
||||||
|
event: event
|
||||||
|
})
|
||||||
|
|
||||||
|
advance_cursor(state, event)
|
||||||
|
|
||||||
|
{:ok, %{accepted: false, reason: :duplicate_event}} ->
|
||||||
|
Manager.runtime_event(state.manager, state.server.id, :event_result, %{
|
||||||
|
result: :duplicate,
|
||||||
|
event: event
|
||||||
|
})
|
||||||
|
|
||||||
|
advance_cursor(state, event)
|
||||||
|
|
||||||
|
{:ok, %{accepted: false, reason: reason}} ->
|
||||||
|
Manager.runtime_event(state.manager, state.server.id, :event_result, %{
|
||||||
|
result: :rejected,
|
||||||
|
event: event,
|
||||||
|
reason: reason
|
||||||
|
})
|
||||||
|
|
||||||
|
state
|
||||||
|
|
||||||
|
{:error, reason} ->
|
||||||
|
Manager.runtime_event(state.manager, state.server.id, :event_result, %{
|
||||||
|
result: :rejected,
|
||||||
|
event: event,
|
||||||
|
reason: reason
|
||||||
|
})
|
||||||
|
|
||||||
|
state
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp request_context(state) do
|
||||||
|
%RequestContext{
|
||||||
|
authenticated_pubkeys: MapSet.new([state.server.auth_pubkey]),
|
||||||
|
caller: :sync,
|
||||||
|
subscription_id: state.current_subscription_id,
|
||||||
|
peer_id: state.server.id,
|
||||||
|
metadata: %{
|
||||||
|
sync_server_id: state.server.id,
|
||||||
|
remote_url: state.server.url
|
||||||
|
}
|
||||||
|
}
|
||||||
|
end
|
||||||
|
|
||||||
|
defp advance_cursor(state, event) do
|
||||||
|
created_at = Map.get(event, "created_at")
|
||||||
|
event_id = Map.get(event, "id")
|
||||||
|
|
||||||
|
if newer_cursor?(state.cursor_created_at, state.cursor_event_id, created_at, event_id) do
|
||||||
|
Manager.runtime_event(state.manager, state.server.id, :cursor_advanced, %{
|
||||||
|
created_at: created_at,
|
||||||
|
event_id: event_id
|
||||||
|
})
|
||||||
|
|
||||||
|
%{state | cursor_created_at: created_at, cursor_event_id: event_id}
|
||||||
|
else
|
||||||
|
state
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp newer_cursor?(nil, _cursor_event_id, created_at, event_id),
|
||||||
|
do: is_integer(created_at) and is_binary(event_id)
|
||||||
|
|
||||||
|
defp newer_cursor?(cursor_created_at, cursor_event_id, created_at, event_id) do
|
||||||
|
cond do
|
||||||
|
not is_integer(created_at) or not is_binary(event_id) ->
|
||||||
|
false
|
||||||
|
|
||||||
|
created_at > cursor_created_at ->
|
||||||
|
true
|
||||||
|
|
||||||
|
created_at == cursor_created_at and is_binary(cursor_event_id) and
|
||||||
|
event_id > cursor_event_id ->
|
||||||
|
true
|
||||||
|
|
||||||
|
true ->
|
||||||
|
false
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp sync_filters(state) do
|
||||||
|
Enum.map(state.server.filters, fn filter ->
|
||||||
|
case since_value(state, filter) do
|
||||||
|
nil -> filter
|
||||||
|
since -> Map.put(filter, "since", since)
|
||||||
|
end
|
||||||
|
end)
|
||||||
|
end
|
||||||
|
|
||||||
|
defp since_value(%__MODULE__{cursor_created_at: nil}, _filter), do: nil
|
||||||
|
|
||||||
|
defp since_value(state, _filter) do
|
||||||
|
max(state.cursor_created_at - state.server.overlap_window_seconds, 0)
|
||||||
|
end
|
||||||
|
|
||||||
|
defp schedule_reconnect(state) do
|
||||||
|
Process.send_after(self(), :connect, state.backoff_ms)
|
||||||
|
%{state | backoff_ms: min(state.backoff_ms * 2, @max_backoff_ms)}
|
||||||
|
end
|
||||||
|
|
||||||
|
defp subscription_id(server_id) do
|
||||||
|
"sync-#{server_id}-#{System.unique_integer([:positive, :monotonic])}"
|
||||||
|
end
|
||||||
|
end
|
||||||
@@ -30,10 +30,19 @@ defmodule Parrhesia.Tasks.ExpirationWorker do
|
|||||||
def handle_info(:tick, state) do
|
def handle_info(:tick, state) do
|
||||||
started_at = System.monotonic_time()
|
started_at = System.monotonic_time()
|
||||||
|
|
||||||
_result = Storage.events().purge_expired([])
|
purged_events =
|
||||||
|
case Storage.events().purge_expired([]) do
|
||||||
|
{:ok, count} when is_integer(count) and count >= 0 -> count
|
||||||
|
_other -> 0
|
||||||
|
end
|
||||||
|
|
||||||
duration = System.monotonic_time() - started_at
|
duration = System.monotonic_time() - started_at
|
||||||
Telemetry.emit([:parrhesia, :maintenance, :purge_expired, :stop], %{duration: duration}, %{})
|
|
||||||
|
Telemetry.emit(
|
||||||
|
[:parrhesia, :maintenance, :purge_expired, :stop],
|
||||||
|
%{duration: duration, purged_events: purged_events},
|
||||||
|
%{}
|
||||||
|
)
|
||||||
|
|
||||||
schedule_tick(state.interval_ms)
|
schedule_tick(state.interval_ms)
|
||||||
{:noreply, state}
|
{:noreply, state}
|
||||||
|
|||||||
40
lib/parrhesia/tasks/nip66_publisher.ex
Normal file
40
lib/parrhesia/tasks/nip66_publisher.ex
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
defmodule Parrhesia.Tasks.Nip66Publisher do
|
||||||
|
@moduledoc """
|
||||||
|
Periodic worker that publishes NIP-66 monitor and discovery events.
|
||||||
|
"""
|
||||||
|
|
||||||
|
use GenServer
|
||||||
|
|
||||||
|
alias Parrhesia.NIP66
|
||||||
|
|
||||||
|
@spec start_link(keyword()) :: GenServer.on_start()
|
||||||
|
def start_link(opts \\ []) do
|
||||||
|
name = Keyword.get(opts, :name, __MODULE__)
|
||||||
|
GenServer.start_link(__MODULE__, opts, name: name)
|
||||||
|
end
|
||||||
|
|
||||||
|
@impl true
|
||||||
|
def init(opts) do
|
||||||
|
state = %{
|
||||||
|
interval_ms: Keyword.get(opts, :interval_ms, NIP66.publish_interval_ms()),
|
||||||
|
publish_opts: Keyword.drop(opts, [:name, :interval_ms, :nip66_module]),
|
||||||
|
nip66_module: Keyword.get(opts, :nip66_module, NIP66)
|
||||||
|
}
|
||||||
|
|
||||||
|
schedule_tick(0)
|
||||||
|
{:ok, state}
|
||||||
|
end
|
||||||
|
|
||||||
|
@impl true
|
||||||
|
def handle_info(:tick, state) do
|
||||||
|
_result = state.nip66_module.publish_snapshot(state.publish_opts)
|
||||||
|
schedule_tick(state.interval_ms)
|
||||||
|
{:noreply, state}
|
||||||
|
end
|
||||||
|
|
||||||
|
def handle_info(_message, state), do: {:noreply, state}
|
||||||
|
|
||||||
|
defp schedule_tick(interval_ms) do
|
||||||
|
Process.send_after(self(), :tick, interval_ms)
|
||||||
|
end
|
||||||
|
end
|
||||||
@@ -11,7 +11,7 @@ defmodule Parrhesia.Tasks.Supervisor do
|
|||||||
|
|
||||||
@impl true
|
@impl true
|
||||||
def init(_init_arg) do
|
def init(_init_arg) do
|
||||||
children = expiration_children() ++ partition_retention_children()
|
children = expiration_children() ++ partition_retention_children() ++ nip66_children()
|
||||||
|
|
||||||
Supervisor.init(children, strategy: :one_for_one)
|
Supervisor.init(children, strategy: :one_for_one)
|
||||||
end
|
end
|
||||||
@@ -25,8 +25,20 @@ defmodule Parrhesia.Tasks.Supervisor do
|
|||||||
end
|
end
|
||||||
|
|
||||||
defp partition_retention_children do
|
defp partition_retention_children do
|
||||||
[
|
if Application.get_env(:parrhesia, :enable_partition_retention_worker, true) do
|
||||||
{Parrhesia.Tasks.PartitionRetentionWorker, name: Parrhesia.Tasks.PartitionRetentionWorker}
|
[
|
||||||
]
|
{Parrhesia.Tasks.PartitionRetentionWorker, name: Parrhesia.Tasks.PartitionRetentionWorker}
|
||||||
|
]
|
||||||
|
else
|
||||||
|
[]
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp nip66_children do
|
||||||
|
if Parrhesia.NIP66.enabled?() do
|
||||||
|
[{Parrhesia.Tasks.Nip66Publisher, name: Parrhesia.Tasks.Nip66Publisher}]
|
||||||
|
else
|
||||||
|
[]
|
||||||
|
end
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|||||||
@@ -1,12 +1,17 @@
|
|||||||
defmodule Parrhesia.Telemetry do
|
defmodule Parrhesia.Telemetry do
|
||||||
@moduledoc """
|
@moduledoc """
|
||||||
Supervision entrypoint and helpers for relay telemetry.
|
Supervision entrypoint and helpers for relay telemetry.
|
||||||
|
|
||||||
|
Starts the Prometheus reporter and telemetry poller as supervised children.
|
||||||
|
All relay metrics are namespaced under `parrhesia.*` and exposed through the
|
||||||
|
`/metrics` endpoint in Prometheus exposition format.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
use Supervisor
|
use Supervisor
|
||||||
|
|
||||||
import Telemetry.Metrics
|
import Telemetry.Metrics
|
||||||
|
|
||||||
|
@repo_query_handler_id "parrhesia-repo-query-handler"
|
||||||
@prometheus_reporter __MODULE__.Prometheus
|
@prometheus_reporter __MODULE__.Prometheus
|
||||||
|
|
||||||
@spec start_link(keyword()) :: Supervisor.on_start()
|
@spec start_link(keyword()) :: Supervisor.on_start()
|
||||||
@@ -16,6 +21,8 @@ defmodule Parrhesia.Telemetry do
|
|||||||
|
|
||||||
@impl true
|
@impl true
|
||||||
def init(_init_arg) do
|
def init(_init_arg) do
|
||||||
|
:ok = attach_repo_query_handlers()
|
||||||
|
|
||||||
children = [
|
children = [
|
||||||
{TelemetryMetricsPrometheus.Core, name: @prometheus_reporter, metrics: metrics()},
|
{TelemetryMetricsPrometheus.Core, name: @prometheus_reporter, metrics: metrics()},
|
||||||
{:telemetry_poller, measurements: periodic_measurements(), period: 10_000}
|
{:telemetry_poller, measurements: periodic_measurements(), period: 10_000}
|
||||||
@@ -30,6 +37,12 @@ defmodule Parrhesia.Telemetry do
|
|||||||
@spec metrics() :: [Telemetry.Metrics.t()]
|
@spec metrics() :: [Telemetry.Metrics.t()]
|
||||||
def metrics do
|
def metrics do
|
||||||
[
|
[
|
||||||
|
counter("parrhesia.ingest.events.count",
|
||||||
|
event_name: [:parrhesia, :ingest, :result],
|
||||||
|
measurement: :count,
|
||||||
|
tags: [:traffic_class, :outcome, :reason],
|
||||||
|
tag_values: &ingest_result_tag_values/1
|
||||||
|
),
|
||||||
distribution("parrhesia.ingest.duration.ms",
|
distribution("parrhesia.ingest.duration.ms",
|
||||||
event_name: [:parrhesia, :ingest, :stop],
|
event_name: [:parrhesia, :ingest, :stop],
|
||||||
measurement: :duration,
|
measurement: :duration,
|
||||||
@@ -38,14 +51,27 @@ defmodule Parrhesia.Telemetry do
|
|||||||
tag_values: &traffic_class_tag_values/1,
|
tag_values: &traffic_class_tag_values/1,
|
||||||
reporter_options: [buckets: [1, 5, 10, 25, 50, 100, 250, 500, 1000]]
|
reporter_options: [buckets: [1, 5, 10, 25, 50, 100, 250, 500, 1000]]
|
||||||
),
|
),
|
||||||
|
counter("parrhesia.query.requests.count",
|
||||||
|
event_name: [:parrhesia, :query, :result],
|
||||||
|
measurement: :count,
|
||||||
|
tags: [:traffic_class, :operation, :outcome],
|
||||||
|
tag_values: &query_result_tag_values/1
|
||||||
|
),
|
||||||
distribution("parrhesia.query.duration.ms",
|
distribution("parrhesia.query.duration.ms",
|
||||||
event_name: [:parrhesia, :query, :stop],
|
event_name: [:parrhesia, :query, :stop],
|
||||||
measurement: :duration,
|
measurement: :duration,
|
||||||
unit: {:native, :millisecond},
|
unit: {:native, :millisecond},
|
||||||
tags: [:traffic_class],
|
tags: [:traffic_class, :operation],
|
||||||
tag_values: &traffic_class_tag_values/1,
|
tag_values: &query_stop_tag_values/1,
|
||||||
reporter_options: [buckets: [1, 5, 10, 25, 50, 100, 250, 500, 1000]]
|
reporter_options: [buckets: [1, 5, 10, 25, 50, 100, 250, 500, 1000]]
|
||||||
),
|
),
|
||||||
|
distribution("parrhesia.query.results.count",
|
||||||
|
event_name: [:parrhesia, :query, :stop],
|
||||||
|
measurement: :result_count,
|
||||||
|
tags: [:traffic_class, :operation],
|
||||||
|
tag_values: &query_stop_tag_values/1,
|
||||||
|
reporter_options: [buckets: [0, 1, 5, 10, 25, 50, 100, 250, 500, 1000, 5000]]
|
||||||
|
),
|
||||||
distribution("parrhesia.fanout.duration.ms",
|
distribution("parrhesia.fanout.duration.ms",
|
||||||
event_name: [:parrhesia, :fanout, :stop],
|
event_name: [:parrhesia, :fanout, :stop],
|
||||||
measurement: :duration,
|
measurement: :duration,
|
||||||
@@ -54,6 +80,25 @@ defmodule Parrhesia.Telemetry do
|
|||||||
tag_values: &traffic_class_tag_values/1,
|
tag_values: &traffic_class_tag_values/1,
|
||||||
reporter_options: [buckets: [1, 5, 10, 25, 50, 100, 250, 500, 1000]]
|
reporter_options: [buckets: [1, 5, 10, 25, 50, 100, 250, 500, 1000]]
|
||||||
),
|
),
|
||||||
|
counter("parrhesia.fanout.events_considered.count",
|
||||||
|
event_name: [:parrhesia, :fanout, :stop],
|
||||||
|
measurement: :considered,
|
||||||
|
tags: [:traffic_class],
|
||||||
|
tag_values: &traffic_class_tag_values/1
|
||||||
|
),
|
||||||
|
counter("parrhesia.fanout.events_enqueued.count",
|
||||||
|
event_name: [:parrhesia, :fanout, :stop],
|
||||||
|
measurement: :enqueued,
|
||||||
|
tags: [:traffic_class],
|
||||||
|
tag_values: &traffic_class_tag_values/1
|
||||||
|
),
|
||||||
|
distribution("parrhesia.fanout.batch_size",
|
||||||
|
event_name: [:parrhesia, :fanout, :stop],
|
||||||
|
measurement: :enqueued,
|
||||||
|
tags: [:traffic_class],
|
||||||
|
tag_values: &traffic_class_tag_values/1,
|
||||||
|
reporter_options: [buckets: [0, 1, 5, 10, 25, 50, 100, 250, 500, 1000]]
|
||||||
|
),
|
||||||
last_value("parrhesia.connection.outbound_queue.depth",
|
last_value("parrhesia.connection.outbound_queue.depth",
|
||||||
event_name: [:parrhesia, :connection, :outbound_queue],
|
event_name: [:parrhesia, :connection, :outbound_queue],
|
||||||
measurement: :depth,
|
measurement: :depth,
|
||||||
@@ -80,11 +125,153 @@ defmodule Parrhesia.Telemetry do
|
|||||||
tags: [:traffic_class],
|
tags: [:traffic_class],
|
||||||
tag_values: &traffic_class_tag_values/1
|
tag_values: &traffic_class_tag_values/1
|
||||||
),
|
),
|
||||||
|
counter("parrhesia.connection.outbound_queue.drained_frames.count",
|
||||||
|
event_name: [:parrhesia, :connection, :outbound_queue, :drain],
|
||||||
|
measurement: :count
|
||||||
|
),
|
||||||
|
distribution("parrhesia.connection.outbound_queue.drain_batch_size",
|
||||||
|
event_name: [:parrhesia, :connection, :outbound_queue, :drain],
|
||||||
|
measurement: :count,
|
||||||
|
reporter_options: [buckets: [0, 1, 5, 10, 25, 50, 100, 250]]
|
||||||
|
),
|
||||||
|
counter("parrhesia.connection.outbound_queue.dropped_events.count",
|
||||||
|
event_name: [:parrhesia, :connection, :outbound_queue, :drop],
|
||||||
|
measurement: :count,
|
||||||
|
tags: [:strategy],
|
||||||
|
tag_values: &strategy_tag_values/1
|
||||||
|
),
|
||||||
|
last_value("parrhesia.listener.connections.active",
|
||||||
|
event_name: [:parrhesia, :listener, :population],
|
||||||
|
measurement: :connections,
|
||||||
|
tags: [:listener_id],
|
||||||
|
tag_values: &listener_tag_values/1,
|
||||||
|
reporter_options: [prometheus_type: :gauge]
|
||||||
|
),
|
||||||
|
last_value("parrhesia.listener.subscriptions.active",
|
||||||
|
event_name: [:parrhesia, :listener, :population],
|
||||||
|
measurement: :subscriptions,
|
||||||
|
tags: [:listener_id],
|
||||||
|
tag_values: &listener_tag_values/1,
|
||||||
|
reporter_options: [prometheus_type: :gauge]
|
||||||
|
),
|
||||||
|
counter("parrhesia.rate_limit.hits.count",
|
||||||
|
event_name: [:parrhesia, :rate_limit, :hit],
|
||||||
|
measurement: :count,
|
||||||
|
tags: [:scope, :traffic_class],
|
||||||
|
tag_values: &rate_limit_tag_values/1
|
||||||
|
),
|
||||||
|
last_value("parrhesia.process.mailbox.depth",
|
||||||
|
event_name: [:parrhesia, :process, :mailbox],
|
||||||
|
measurement: :depth,
|
||||||
|
tags: [:process_type],
|
||||||
|
tag_values: &process_tag_values/1,
|
||||||
|
reporter_options: [prometheus_type: :gauge]
|
||||||
|
),
|
||||||
|
counter("parrhesia.db.query.count",
|
||||||
|
event_name: [:parrhesia, :db, :query],
|
||||||
|
measurement: :count,
|
||||||
|
tags: [:repo_role],
|
||||||
|
tag_values: &repo_query_tag_values/1
|
||||||
|
),
|
||||||
|
distribution("parrhesia.db.query.total_time.ms",
|
||||||
|
event_name: [:parrhesia, :db, :query],
|
||||||
|
measurement: :total_time,
|
||||||
|
unit: {:native, :millisecond},
|
||||||
|
tags: [:repo_role],
|
||||||
|
tag_values: &repo_query_tag_values/1,
|
||||||
|
reporter_options: [buckets: [1, 5, 10, 25, 50, 100, 250, 500, 1000]]
|
||||||
|
),
|
||||||
|
distribution("parrhesia.db.query.queue_time.ms",
|
||||||
|
event_name: [:parrhesia, :db, :query],
|
||||||
|
measurement: :queue_time,
|
||||||
|
unit: {:native, :millisecond},
|
||||||
|
tags: [:repo_role],
|
||||||
|
tag_values: &repo_query_tag_values/1,
|
||||||
|
reporter_options: [buckets: [0, 1, 5, 10, 25, 50, 100, 250, 500, 1000]]
|
||||||
|
),
|
||||||
|
distribution("parrhesia.db.query.query_time.ms",
|
||||||
|
event_name: [:parrhesia, :db, :query],
|
||||||
|
measurement: :query_time,
|
||||||
|
unit: {:native, :millisecond},
|
||||||
|
tags: [:repo_role],
|
||||||
|
tag_values: &repo_query_tag_values/1,
|
||||||
|
reporter_options: [buckets: [0, 1, 5, 10, 25, 50, 100, 250, 500, 1000]]
|
||||||
|
),
|
||||||
|
distribution("parrhesia.db.query.decode_time.ms",
|
||||||
|
event_name: [:parrhesia, :db, :query],
|
||||||
|
measurement: :decode_time,
|
||||||
|
unit: {:native, :millisecond},
|
||||||
|
tags: [:repo_role],
|
||||||
|
tag_values: &repo_query_tag_values/1,
|
||||||
|
reporter_options: [buckets: [0, 1, 5, 10, 25, 50, 100, 250, 500, 1000]]
|
||||||
|
),
|
||||||
|
distribution("parrhesia.db.query.idle_time.ms",
|
||||||
|
event_name: [:parrhesia, :db, :query],
|
||||||
|
measurement: :idle_time,
|
||||||
|
unit: {:native, :millisecond},
|
||||||
|
tags: [:repo_role],
|
||||||
|
tag_values: &repo_query_tag_values/1,
|
||||||
|
reporter_options: [buckets: [0, 1, 5, 10, 25, 50, 100, 250, 500, 1000]]
|
||||||
|
),
|
||||||
|
distribution("parrhesia.maintenance.purge_expired.duration.ms",
|
||||||
|
event_name: [:parrhesia, :maintenance, :purge_expired, :stop],
|
||||||
|
measurement: :duration,
|
||||||
|
unit: {:native, :millisecond},
|
||||||
|
reporter_options: [buckets: [1, 5, 10, 25, 50, 100, 250, 500, 1000]]
|
||||||
|
),
|
||||||
|
counter("parrhesia.maintenance.purge_expired.events.count",
|
||||||
|
event_name: [:parrhesia, :maintenance, :purge_expired, :stop],
|
||||||
|
measurement: :purged_events
|
||||||
|
),
|
||||||
|
distribution("parrhesia.maintenance.partition_retention.duration.ms",
|
||||||
|
event_name: [:parrhesia, :maintenance, :partition_retention, :stop],
|
||||||
|
measurement: :duration,
|
||||||
|
unit: {:native, :millisecond},
|
||||||
|
tags: [:status],
|
||||||
|
tag_values: &status_tag_values/1,
|
||||||
|
reporter_options: [buckets: [1, 5, 10, 25, 50, 100, 250, 500, 1000]]
|
||||||
|
),
|
||||||
|
counter("parrhesia.maintenance.partition_retention.dropped_partitions.count",
|
||||||
|
event_name: [:parrhesia, :maintenance, :partition_retention, :stop],
|
||||||
|
measurement: :dropped_partitions,
|
||||||
|
tags: [:status],
|
||||||
|
tag_values: &status_tag_values/1
|
||||||
|
),
|
||||||
last_value("parrhesia.vm.memory.total.bytes",
|
last_value("parrhesia.vm.memory.total.bytes",
|
||||||
event_name: [:parrhesia, :vm, :memory],
|
event_name: [:parrhesia, :vm, :memory],
|
||||||
measurement: :total,
|
measurement: :total,
|
||||||
unit: :byte,
|
unit: :byte,
|
||||||
reporter_options: [prometheus_type: :gauge]
|
reporter_options: [prometheus_type: :gauge]
|
||||||
|
),
|
||||||
|
last_value("parrhesia.vm.memory.processes.bytes",
|
||||||
|
event_name: [:parrhesia, :vm, :memory],
|
||||||
|
measurement: :processes,
|
||||||
|
unit: :byte,
|
||||||
|
reporter_options: [prometheus_type: :gauge]
|
||||||
|
),
|
||||||
|
last_value("parrhesia.vm.memory.system.bytes",
|
||||||
|
event_name: [:parrhesia, :vm, :memory],
|
||||||
|
measurement: :system,
|
||||||
|
unit: :byte,
|
||||||
|
reporter_options: [prometheus_type: :gauge]
|
||||||
|
),
|
||||||
|
last_value("parrhesia.vm.memory.atom.bytes",
|
||||||
|
event_name: [:parrhesia, :vm, :memory],
|
||||||
|
measurement: :atom,
|
||||||
|
unit: :byte,
|
||||||
|
reporter_options: [prometheus_type: :gauge]
|
||||||
|
),
|
||||||
|
last_value("parrhesia.vm.memory.binary.bytes",
|
||||||
|
event_name: [:parrhesia, :vm, :memory],
|
||||||
|
measurement: :binary,
|
||||||
|
unit: :byte,
|
||||||
|
reporter_options: [prometheus_type: :gauge]
|
||||||
|
),
|
||||||
|
last_value("parrhesia.vm.memory.ets.bytes",
|
||||||
|
event_name: [:parrhesia, :vm, :memory],
|
||||||
|
measurement: :ets,
|
||||||
|
unit: :byte,
|
||||||
|
reporter_options: [prometheus_type: :gauge]
|
||||||
)
|
)
|
||||||
]
|
]
|
||||||
end
|
end
|
||||||
@@ -95,6 +282,22 @@ defmodule Parrhesia.Telemetry do
|
|||||||
:telemetry.execute(event_name, measurements, metadata)
|
:telemetry.execute(event_name, measurements, metadata)
|
||||||
end
|
end
|
||||||
|
|
||||||
|
@spec emit_process_mailbox_depth(atom(), map()) :: :ok
|
||||||
|
def emit_process_mailbox_depth(process_type, metadata \\ %{})
|
||||||
|
when is_atom(process_type) and is_map(metadata) do
|
||||||
|
case Process.info(self(), :message_queue_len) do
|
||||||
|
{:message_queue_len, depth} ->
|
||||||
|
emit(
|
||||||
|
[:parrhesia, :process, :mailbox],
|
||||||
|
%{depth: depth},
|
||||||
|
Map.put(metadata, :process_type, process_type)
|
||||||
|
)
|
||||||
|
|
||||||
|
nil ->
|
||||||
|
:ok
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
defp periodic_measurements do
|
defp periodic_measurements do
|
||||||
[
|
[
|
||||||
{__MODULE__, :emit_vm_memory, []}
|
{__MODULE__, :emit_vm_memory, []}
|
||||||
@@ -103,12 +306,119 @@ defmodule Parrhesia.Telemetry do
|
|||||||
|
|
||||||
@doc false
|
@doc false
|
||||||
def emit_vm_memory do
|
def emit_vm_memory do
|
||||||
total = :erlang.memory(:total)
|
emit(
|
||||||
emit([:parrhesia, :vm, :memory], %{total: total}, %{})
|
[:parrhesia, :vm, :memory],
|
||||||
|
%{
|
||||||
|
total: :erlang.memory(:total),
|
||||||
|
processes: :erlang.memory(:processes),
|
||||||
|
system: :erlang.memory(:system),
|
||||||
|
atom: :erlang.memory(:atom),
|
||||||
|
binary: :erlang.memory(:binary),
|
||||||
|
ets: :erlang.memory(:ets)
|
||||||
|
},
|
||||||
|
%{}
|
||||||
|
)
|
||||||
end
|
end
|
||||||
|
|
||||||
defp traffic_class_tag_values(metadata) do
|
defp traffic_class_tag_values(metadata) do
|
||||||
traffic_class = metadata |> Map.get(:traffic_class, :generic) |> to_string()
|
traffic_class = metadata |> Map.get(:traffic_class, :generic) |> to_string()
|
||||||
%{traffic_class: traffic_class}
|
%{traffic_class: traffic_class}
|
||||||
end
|
end
|
||||||
|
|
||||||
|
defp ingest_result_tag_values(metadata) do
|
||||||
|
%{
|
||||||
|
traffic_class: metadata |> Map.get(:traffic_class, :generic) |> to_string(),
|
||||||
|
outcome: metadata |> Map.get(:outcome, :unknown) |> to_string(),
|
||||||
|
reason: metadata |> Map.get(:reason, :unknown) |> to_string()
|
||||||
|
}
|
||||||
|
end
|
||||||
|
|
||||||
|
defp query_stop_tag_values(metadata) do
|
||||||
|
%{
|
||||||
|
traffic_class: metadata |> Map.get(:traffic_class, :generic) |> to_string(),
|
||||||
|
operation: metadata |> Map.get(:operation, :query) |> to_string()
|
||||||
|
}
|
||||||
|
end
|
||||||
|
|
||||||
|
defp query_result_tag_values(metadata) do
|
||||||
|
%{
|
||||||
|
traffic_class: metadata |> Map.get(:traffic_class, :generic) |> to_string(),
|
||||||
|
operation: metadata |> Map.get(:operation, :query) |> to_string(),
|
||||||
|
outcome: metadata |> Map.get(:outcome, :unknown) |> to_string()
|
||||||
|
}
|
||||||
|
end
|
||||||
|
|
||||||
|
defp strategy_tag_values(metadata) do
|
||||||
|
%{strategy: metadata |> Map.get(:strategy, :unknown) |> to_string()}
|
||||||
|
end
|
||||||
|
|
||||||
|
defp listener_tag_values(metadata) do
|
||||||
|
%{listener_id: metadata |> Map.get(:listener_id, :unknown) |> to_string()}
|
||||||
|
end
|
||||||
|
|
||||||
|
defp rate_limit_tag_values(metadata) do
|
||||||
|
%{
|
||||||
|
scope: metadata |> Map.get(:scope, :unknown) |> to_string(),
|
||||||
|
traffic_class: metadata |> Map.get(:traffic_class, :generic) |> to_string()
|
||||||
|
}
|
||||||
|
end
|
||||||
|
|
||||||
|
defp process_tag_values(metadata) do
|
||||||
|
process_type = metadata |> Map.get(:process_type, :unknown) |> to_string()
|
||||||
|
%{process_type: process_type}
|
||||||
|
end
|
||||||
|
|
||||||
|
defp repo_query_tag_values(metadata) do
|
||||||
|
%{repo_role: metadata |> Map.get(:repo_role, :unknown) |> to_string()}
|
||||||
|
end
|
||||||
|
|
||||||
|
defp status_tag_values(metadata) do
|
||||||
|
%{status: metadata |> Map.get(:status, :unknown) |> to_string()}
|
||||||
|
end
|
||||||
|
|
||||||
|
defp attach_repo_query_handlers do
|
||||||
|
:telemetry.detach(@repo_query_handler_id)
|
||||||
|
|
||||||
|
:telemetry.attach_many(
|
||||||
|
@repo_query_handler_id,
|
||||||
|
[[:parrhesia, :repo, :query], [:parrhesia, :read_repo, :query]],
|
||||||
|
&__MODULE__.handle_repo_query_event/4,
|
||||||
|
nil
|
||||||
|
)
|
||||||
|
|
||||||
|
:ok
|
||||||
|
rescue
|
||||||
|
ArgumentError -> :ok
|
||||||
|
end
|
||||||
|
|
||||||
|
@doc false
|
||||||
|
def handle_repo_query_event(event_name, measurements, _metadata, _config) do
|
||||||
|
repo_role =
|
||||||
|
case event_name do
|
||||||
|
[:parrhesia, :read_repo, :query] -> :read
|
||||||
|
[:parrhesia, :repo, :query] -> :write
|
||||||
|
end
|
||||||
|
|
||||||
|
total_time =
|
||||||
|
Map.get(
|
||||||
|
measurements,
|
||||||
|
:total_time,
|
||||||
|
Map.get(measurements, :queue_time, 0) +
|
||||||
|
Map.get(measurements, :query_time, 0) +
|
||||||
|
Map.get(measurements, :decode_time, 0)
|
||||||
|
)
|
||||||
|
|
||||||
|
emit(
|
||||||
|
[:parrhesia, :db, :query],
|
||||||
|
%{
|
||||||
|
count: 1,
|
||||||
|
total_time: total_time,
|
||||||
|
queue_time: Map.get(measurements, :queue_time, 0),
|
||||||
|
query_time: Map.get(measurements, :query_time, 0),
|
||||||
|
decode_time: Map.get(measurements, :decode_time, 0),
|
||||||
|
idle_time: Map.get(measurements, :idle_time, 0)
|
||||||
|
},
|
||||||
|
%{repo_role: repo_role}
|
||||||
|
)
|
||||||
|
end
|
||||||
end
|
end
|
||||||
|
|||||||
@@ -12,6 +12,9 @@ defmodule Parrhesia.TestSupport.ExpirationStubEvents do
|
|||||||
@impl true
|
@impl true
|
||||||
def query(_context, _filters, _opts), do: {:ok, []}
|
def query(_context, _filters, _opts), do: {:ok, []}
|
||||||
|
|
||||||
|
@impl true
|
||||||
|
def query_event_refs(_context, _filters, _opts), do: {:ok, []}
|
||||||
|
|
||||||
@impl true
|
@impl true
|
||||||
def count(_context, _filters, _opts), do: {:ok, 0}
|
def count(_context, _filters, _opts), do: {:ok, 0}
|
||||||
|
|
||||||
|
|||||||
@@ -12,6 +12,9 @@ defmodule Parrhesia.TestSupport.FailingEvents do
|
|||||||
@impl true
|
@impl true
|
||||||
def query(_context, _filters, _opts), do: {:error, :db_down}
|
def query(_context, _filters, _opts), do: {:error, :db_down}
|
||||||
|
|
||||||
|
@impl true
|
||||||
|
def query_event_refs(_context, _filters, _opts), do: {:error, :db_down}
|
||||||
|
|
||||||
@impl true
|
@impl true
|
||||||
def count(_context, _filters, _opts), do: {:error, :db_down}
|
def count(_context, _filters, _opts), do: {:error, :db_down}
|
||||||
|
|
||||||
|
|||||||
@@ -21,6 +21,9 @@ defmodule Parrhesia.TestSupport.PermissiveModeration do
|
|||||||
@impl true
|
@impl true
|
||||||
def pubkey_allowed?(_context, _pubkey), do: {:ok, true}
|
def pubkey_allowed?(_context, _pubkey), do: {:ok, true}
|
||||||
|
|
||||||
|
@impl true
|
||||||
|
def has_allowed_pubkeys?(_context), do: {:ok, false}
|
||||||
|
|
||||||
@impl true
|
@impl true
|
||||||
def ban_event(_context, _event_id), do: :ok
|
def ban_event(_context, _event_id), do: :ok
|
||||||
|
|
||||||
|
|||||||
49
lib/parrhesia/test_support/sync_fake_relay/plug.ex
Normal file
49
lib/parrhesia/test_support/sync_fake_relay/plug.ex
Normal file
@@ -0,0 +1,49 @@
|
|||||||
|
defmodule Parrhesia.TestSupport.SyncFakeRelay.Plug do
|
||||||
|
@moduledoc false
|
||||||
|
|
||||||
|
import Plug.Conn
|
||||||
|
|
||||||
|
alias Parrhesia.TestSupport.SyncFakeRelay.Server
|
||||||
|
|
||||||
|
def init(opts), do: opts
|
||||||
|
|
||||||
|
def call(conn, opts) do
|
||||||
|
server = Keyword.fetch!(opts, :server)
|
||||||
|
|
||||||
|
cond do
|
||||||
|
conn.request_path == "/relay" and wants_nip11?(conn) ->
|
||||||
|
send_json(conn, 200, Server.document(server))
|
||||||
|
|
||||||
|
conn.request_path == "/relay" ->
|
||||||
|
conn
|
||||||
|
|> WebSockAdapter.upgrade(
|
||||||
|
Parrhesia.TestSupport.SyncFakeRelay.Socket,
|
||||||
|
%{server: server, relay_url: relay_url(conn)},
|
||||||
|
timeout: 60_000
|
||||||
|
)
|
||||||
|
|> halt()
|
||||||
|
|
||||||
|
true ->
|
||||||
|
send_resp(conn, 404, "not found")
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp wants_nip11?(conn) do
|
||||||
|
conn
|
||||||
|
|> get_req_header("accept")
|
||||||
|
|> Enum.any?(&String.contains?(&1, "application/nostr+json"))
|
||||||
|
end
|
||||||
|
|
||||||
|
defp send_json(conn, status, body) do
|
||||||
|
encoded = JSON.encode!(body)
|
||||||
|
|
||||||
|
conn
|
||||||
|
|> put_resp_content_type("application/nostr+json")
|
||||||
|
|> send_resp(status, encoded)
|
||||||
|
end
|
||||||
|
|
||||||
|
defp relay_url(conn) do
|
||||||
|
scheme = if conn.scheme == :https, do: "wss", else: "ws"
|
||||||
|
"#{scheme}://#{conn.host}:#{conn.port}#{conn.request_path}"
|
||||||
|
end
|
||||||
|
end
|
||||||
65
lib/parrhesia/test_support/sync_fake_relay/server.ex
Normal file
65
lib/parrhesia/test_support/sync_fake_relay/server.ex
Normal file
@@ -0,0 +1,65 @@
|
|||||||
|
defmodule Parrhesia.TestSupport.SyncFakeRelay.Server do
|
||||||
|
@moduledoc false
|
||||||
|
|
||||||
|
use Agent
|
||||||
|
|
||||||
|
def start_link(opts) do
|
||||||
|
name = Keyword.fetch!(opts, :name)
|
||||||
|
|
||||||
|
initial_state = %{
|
||||||
|
pubkey: Keyword.fetch!(opts, :pubkey),
|
||||||
|
expected_client_pubkey: Keyword.fetch!(opts, :expected_client_pubkey),
|
||||||
|
initial_events: Keyword.get(opts, :initial_events, []),
|
||||||
|
subscribers: %{}
|
||||||
|
}
|
||||||
|
|
||||||
|
Agent.start_link(fn -> initial_state end, name: name)
|
||||||
|
end
|
||||||
|
|
||||||
|
def document(server) do
|
||||||
|
Agent.get(server, fn state ->
|
||||||
|
%{
|
||||||
|
"name" => "Sync Fake Relay",
|
||||||
|
"description" => "test relay",
|
||||||
|
"pubkey" => state.pubkey,
|
||||||
|
"supported_nips" => [1, 11, 42]
|
||||||
|
}
|
||||||
|
end)
|
||||||
|
end
|
||||||
|
|
||||||
|
def initial_events(server) do
|
||||||
|
Agent.get(server, & &1.initial_events)
|
||||||
|
end
|
||||||
|
|
||||||
|
def expected_client_pubkey(server) do
|
||||||
|
Agent.get(server, & &1.expected_client_pubkey)
|
||||||
|
end
|
||||||
|
|
||||||
|
def register_subscription(server, pid, subscription_id) do
|
||||||
|
Agent.update(server, fn state ->
|
||||||
|
put_in(state, [:subscribers, {pid, subscription_id}], true)
|
||||||
|
end)
|
||||||
|
end
|
||||||
|
|
||||||
|
def unregister_subscription(server, pid, subscription_id) do
|
||||||
|
Agent.update(server, fn state ->
|
||||||
|
update_in(state.subscribers, &Map.delete(&1, {pid, subscription_id}))
|
||||||
|
end)
|
||||||
|
end
|
||||||
|
|
||||||
|
def publish_live_event(server, event) do
|
||||||
|
subscribers =
|
||||||
|
Agent.get_and_update(server, fn state ->
|
||||||
|
{
|
||||||
|
Map.keys(state.subscribers),
|
||||||
|
%{state | initial_events: state.initial_events ++ [event]}
|
||||||
|
}
|
||||||
|
end)
|
||||||
|
|
||||||
|
Enum.each(subscribers, fn {pid, subscription_id} ->
|
||||||
|
send(pid, {:sync_fake_relay_event, subscription_id, event})
|
||||||
|
end)
|
||||||
|
|
||||||
|
:ok
|
||||||
|
end
|
||||||
|
end
|
||||||
118
lib/parrhesia/test_support/sync_fake_relay/socket.ex
Normal file
118
lib/parrhesia/test_support/sync_fake_relay/socket.ex
Normal file
@@ -0,0 +1,118 @@
|
|||||||
|
defmodule Parrhesia.TestSupport.SyncFakeRelay.Socket do
|
||||||
|
@moduledoc false
|
||||||
|
|
||||||
|
@behaviour WebSock
|
||||||
|
|
||||||
|
alias Parrhesia.TestSupport.SyncFakeRelay.Server
|
||||||
|
|
||||||
|
def init(state), do: {:ok, Map.put(state, :authenticated?, false)}
|
||||||
|
|
||||||
|
def handle_in({payload, [opcode: :text]}, state) do
|
||||||
|
case JSON.decode(payload) do
|
||||||
|
{:ok, ["REQ", subscription_id | _filters]} ->
|
||||||
|
maybe_authorize_req(state, subscription_id)
|
||||||
|
|
||||||
|
{:ok, ["AUTH", auth_event]} when is_map(auth_event) ->
|
||||||
|
handle_auth(auth_event, state)
|
||||||
|
|
||||||
|
{:ok, ["CLOSE", subscription_id]} ->
|
||||||
|
Server.unregister_subscription(state.server, self(), subscription_id)
|
||||||
|
|
||||||
|
{:push, {:text, JSON.encode!(["CLOSED", subscription_id, "error: subscription closed"])},
|
||||||
|
state}
|
||||||
|
|
||||||
|
_other ->
|
||||||
|
{:ok, state}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def handle_in(_frame, state), do: {:ok, state}
|
||||||
|
|
||||||
|
def handle_info({:sync_fake_relay_event, subscription_id, event}, state) do
|
||||||
|
{:push, {:text, JSON.encode!(["EVENT", subscription_id, event])}, state}
|
||||||
|
end
|
||||||
|
|
||||||
|
def handle_info(_message, state), do: {:ok, state}
|
||||||
|
|
||||||
|
def terminate(_reason, state) do
|
||||||
|
Enum.each(Map.get(state, :subscriptions, []), fn subscription_id ->
|
||||||
|
Server.unregister_subscription(state.server, self(), subscription_id)
|
||||||
|
end)
|
||||||
|
|
||||||
|
:ok
|
||||||
|
end
|
||||||
|
|
||||||
|
defp maybe_authorize_req(%{authenticated?: true} = state, subscription_id) do
|
||||||
|
Server.register_subscription(state.server, self(), subscription_id)
|
||||||
|
|
||||||
|
frames =
|
||||||
|
Server.initial_events(state.server)
|
||||||
|
|> Enum.map(fn event -> {:text, JSON.encode!(["EVENT", subscription_id, event])} end)
|
||||||
|
|> Kernel.++([{:text, JSON.encode!(["EOSE", subscription_id])}])
|
||||||
|
|
||||||
|
next_state =
|
||||||
|
state
|
||||||
|
|> Map.update(:subscriptions, [subscription_id], &[subscription_id | &1])
|
||||||
|
|
||||||
|
{:push, frames, next_state}
|
||||||
|
end
|
||||||
|
|
||||||
|
defp maybe_authorize_req(state, subscription_id) do
|
||||||
|
challenge = Base.encode16(:crypto.strong_rand_bytes(12), case: :lower)
|
||||||
|
|
||||||
|
next_state =
|
||||||
|
state
|
||||||
|
|> Map.put(:challenge, challenge)
|
||||||
|
|> Map.put(:pending_subscription_id, subscription_id)
|
||||||
|
|
||||||
|
{:push,
|
||||||
|
[
|
||||||
|
{:text, JSON.encode!(["AUTH", challenge])},
|
||||||
|
{:text,
|
||||||
|
JSON.encode!(["CLOSED", subscription_id, "auth-required: sync access requires AUTH"])}
|
||||||
|
], next_state}
|
||||||
|
end
|
||||||
|
|
||||||
|
defp handle_auth(auth_event, state) do
|
||||||
|
challenge_ok? = has_tag?(auth_event, "challenge", state.challenge)
|
||||||
|
relay_ok? = has_tag?(auth_event, "relay", state.relay_url)
|
||||||
|
pubkey_ok? = Map.get(auth_event, "pubkey") == Server.expected_client_pubkey(state.server)
|
||||||
|
|
||||||
|
if challenge_ok? and relay_ok? and pubkey_ok? do
|
||||||
|
accepted_state = %{state | authenticated?: true}
|
||||||
|
ok_frame = ["OK", Map.get(auth_event, "id"), true, "ok: auth accepted"]
|
||||||
|
|
||||||
|
if subscription_id = Map.get(accepted_state, :pending_subscription_id) do
|
||||||
|
next_state =
|
||||||
|
accepted_state
|
||||||
|
|> Map.delete(:pending_subscription_id)
|
||||||
|
|> Map.update(:subscriptions, [subscription_id], &[subscription_id | &1])
|
||||||
|
|
||||||
|
Server.register_subscription(state.server, self(), subscription_id)
|
||||||
|
|
||||||
|
{:push,
|
||||||
|
[{:text, JSON.encode!(ok_frame)} | auth_success_frames(accepted_state, subscription_id)],
|
||||||
|
next_state}
|
||||||
|
else
|
||||||
|
{:push, {:text, JSON.encode!(ok_frame)}, accepted_state}
|
||||||
|
end
|
||||||
|
else
|
||||||
|
{:push,
|
||||||
|
{:text, JSON.encode!(["OK", Map.get(auth_event, "id"), false, "invalid: auth rejected"])},
|
||||||
|
state}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp auth_success_frames(state, subscription_id) do
|
||||||
|
Server.initial_events(state.server)
|
||||||
|
|> Enum.map(fn event -> {:text, JSON.encode!(["EVENT", subscription_id, event])} end)
|
||||||
|
|> Kernel.++([{:text, JSON.encode!(["EOSE", subscription_id])}])
|
||||||
|
end
|
||||||
|
|
||||||
|
defp has_tag?(event, name, expected_value) do
|
||||||
|
Enum.any?(Map.get(event, "tags", []), fn
|
||||||
|
[^name, ^expected_value | _rest] -> true
|
||||||
|
_other -> false
|
||||||
|
end)
|
||||||
|
end
|
||||||
|
end
|
||||||
145
lib/parrhesia/test_support/tls_certs.ex
Normal file
145
lib/parrhesia/test_support/tls_certs.ex
Normal file
@@ -0,0 +1,145 @@
|
|||||||
|
defmodule Parrhesia.TestSupport.TLSCerts do
|
||||||
|
@moduledoc false
|
||||||
|
|
||||||
|
@spec create_ca!(String.t(), String.t()) :: map()
|
||||||
|
def create_ca!(dir, name) do
|
||||||
|
keyfile = Path.join(dir, "#{name}-ca.key.pem")
|
||||||
|
certfile = Path.join(dir, "#{name}-ca.cert.pem")
|
||||||
|
|
||||||
|
openssl!([
|
||||||
|
"req",
|
||||||
|
"-x509",
|
||||||
|
"-newkey",
|
||||||
|
"rsa:2048",
|
||||||
|
"-nodes",
|
||||||
|
"-sha256",
|
||||||
|
"-days",
|
||||||
|
"2",
|
||||||
|
"-subj",
|
||||||
|
"/CN=#{name} Test CA",
|
||||||
|
"-keyout",
|
||||||
|
keyfile,
|
||||||
|
"-out",
|
||||||
|
certfile
|
||||||
|
])
|
||||||
|
|
||||||
|
%{keyfile: keyfile, certfile: certfile}
|
||||||
|
end
|
||||||
|
|
||||||
|
@spec issue_server_cert!(String.t(), map(), String.t()) :: map()
|
||||||
|
def issue_server_cert!(dir, ca, name) do
|
||||||
|
issue_cert!(
|
||||||
|
dir,
|
||||||
|
ca,
|
||||||
|
name,
|
||||||
|
"localhost",
|
||||||
|
["DNS:localhost", "IP:127.0.0.1"],
|
||||||
|
"serverAuth"
|
||||||
|
)
|
||||||
|
end
|
||||||
|
|
||||||
|
@spec issue_client_cert!(String.t(), map(), String.t()) :: map()
|
||||||
|
def issue_client_cert!(dir, ca, name) do
|
||||||
|
issue_cert!(dir, ca, name, name, [], "clientAuth")
|
||||||
|
end
|
||||||
|
|
||||||
|
@spec spki_pin!(String.t()) :: String.t()
|
||||||
|
def spki_pin!(certfile) do
|
||||||
|
certfile
|
||||||
|
|> der_cert!()
|
||||||
|
|> spki_pin()
|
||||||
|
end
|
||||||
|
|
||||||
|
@spec cert_sha256!(String.t()) :: String.t()
|
||||||
|
def cert_sha256!(certfile) do
|
||||||
|
certfile
|
||||||
|
|> der_cert!()
|
||||||
|
|> then(&Base.encode64(:crypto.hash(:sha256, &1)))
|
||||||
|
end
|
||||||
|
|
||||||
|
defp issue_cert!(dir, ca, name, common_name, san_entries, extended_key_usage) do
|
||||||
|
keyfile = Path.join(dir, "#{name}.key.pem")
|
||||||
|
csrfile = Path.join(dir, "#{name}.csr.pem")
|
||||||
|
certfile = Path.join(dir, "#{name}.cert.pem")
|
||||||
|
extfile = Path.join(dir, "#{name}.ext.cnf")
|
||||||
|
|
||||||
|
openssl!([
|
||||||
|
"req",
|
||||||
|
"-new",
|
||||||
|
"-newkey",
|
||||||
|
"rsa:2048",
|
||||||
|
"-nodes",
|
||||||
|
"-subj",
|
||||||
|
"/CN=#{common_name}",
|
||||||
|
"-keyout",
|
||||||
|
keyfile,
|
||||||
|
"-out",
|
||||||
|
csrfile
|
||||||
|
])
|
||||||
|
|
||||||
|
File.write!(extfile, extension_config(san_entries, extended_key_usage))
|
||||||
|
|
||||||
|
openssl!([
|
||||||
|
"x509",
|
||||||
|
"-req",
|
||||||
|
"-in",
|
||||||
|
csrfile,
|
||||||
|
"-CA",
|
||||||
|
ca.certfile,
|
||||||
|
"-CAkey",
|
||||||
|
ca.keyfile,
|
||||||
|
"-CAcreateserial",
|
||||||
|
"-out",
|
||||||
|
certfile,
|
||||||
|
"-days",
|
||||||
|
"2",
|
||||||
|
"-sha256",
|
||||||
|
"-extfile",
|
||||||
|
extfile,
|
||||||
|
"-extensions",
|
||||||
|
"v3_req"
|
||||||
|
])
|
||||||
|
|
||||||
|
%{keyfile: keyfile, certfile: certfile}
|
||||||
|
end
|
||||||
|
|
||||||
|
defp extension_config(san_entries, extended_key_usage) do
|
||||||
|
san_block =
|
||||||
|
case san_entries do
|
||||||
|
[] -> ""
|
||||||
|
entries -> "subjectAltName = #{Enum.join(entries, ",")}\n"
|
||||||
|
end
|
||||||
|
|
||||||
|
"""
|
||||||
|
[v3_req]
|
||||||
|
basicConstraints = CA:FALSE
|
||||||
|
keyUsage = digitalSignature,keyEncipherment
|
||||||
|
extendedKeyUsage = #{extended_key_usage}
|
||||||
|
#{san_block}
|
||||||
|
"""
|
||||||
|
end
|
||||||
|
|
||||||
|
defp der_cert!(certfile) do
|
||||||
|
certfile
|
||||||
|
|> File.read!()
|
||||||
|
|> :public_key.pem_decode()
|
||||||
|
|> List.first()
|
||||||
|
|> elem(1)
|
||||||
|
end
|
||||||
|
|
||||||
|
defp spki_pin(cert_der) do
|
||||||
|
cert = :public_key.pkix_decode_cert(cert_der, :plain)
|
||||||
|
spki = cert |> elem(1) |> elem(7)
|
||||||
|
|
||||||
|
:public_key.der_encode(:SubjectPublicKeyInfo, spki)
|
||||||
|
|> then(&:crypto.hash(:sha256, &1))
|
||||||
|
|> Base.encode64()
|
||||||
|
end
|
||||||
|
|
||||||
|
defp openssl!(args) do
|
||||||
|
case System.cmd("/usr/bin/openssl", args, stderr_to_stdout: true) do
|
||||||
|
{output, 0} -> output
|
||||||
|
{output, status} -> raise "openssl failed with status #{status}: #{output}"
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
File diff suppressed because it is too large
Load Diff
@@ -1,29 +1,102 @@
|
|||||||
defmodule Parrhesia.Web.Endpoint do
|
defmodule Parrhesia.Web.Endpoint do
|
||||||
@moduledoc """
|
@moduledoc """
|
||||||
Supervision entrypoint for WS/HTTP ingress.
|
Supervision entrypoint for configured ingress listeners.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
use Supervisor
|
use Supervisor
|
||||||
|
|
||||||
def start_link(init_arg \\ []) do
|
alias Parrhesia.Web.Listener
|
||||||
Supervisor.start_link(__MODULE__, init_arg, name: __MODULE__)
|
|
||||||
|
def start_link(opts \\ []) do
|
||||||
|
name = Keyword.get(opts, :name, __MODULE__)
|
||||||
|
listeners = Keyword.get(opts, :listeners, :configured)
|
||||||
|
Supervisor.start_link(__MODULE__, listeners, name: name)
|
||||||
|
end
|
||||||
|
|
||||||
|
@spec reload_listener(Supervisor.supervisor(), atom()) :: :ok | {:error, term()}
|
||||||
|
def reload_listener(supervisor \\ __MODULE__, listener_id) when is_atom(listener_id) do
|
||||||
|
with :ok <- Supervisor.terminate_child(supervisor, {:listener, listener_id}),
|
||||||
|
:ok <- clear_pem_cache(),
|
||||||
|
{:ok, _pid} <- Supervisor.restart_child(supervisor, {:listener, listener_id}) do
|
||||||
|
:ok
|
||||||
|
else
|
||||||
|
{:error, :not_found} = error -> error
|
||||||
|
{:error, _reason} = error -> error
|
||||||
|
other -> other
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
@spec reload_all(Supervisor.supervisor()) :: :ok | {:error, term()}
|
||||||
|
def reload_all(supervisor \\ __MODULE__) do
|
||||||
|
listener_ids =
|
||||||
|
supervisor
|
||||||
|
|> Supervisor.which_children()
|
||||||
|
|> Enum.flat_map(fn
|
||||||
|
{{:listener, listener_id}, _pid, _type, _modules} -> [listener_id]
|
||||||
|
_other -> []
|
||||||
|
end)
|
||||||
|
|
||||||
|
with :ok <- terminate_listeners(supervisor, listener_ids),
|
||||||
|
:ok <- clear_pem_cache() do
|
||||||
|
restart_listeners(supervisor, listener_ids)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp terminate_listeners(_supervisor, []), do: :ok
|
||||||
|
|
||||||
|
defp terminate_listeners(supervisor, [listener_id | rest]) do
|
||||||
|
case Supervisor.terminate_child(supervisor, {:listener, listener_id}) do
|
||||||
|
:ok -> terminate_listeners(supervisor, rest)
|
||||||
|
{:error, _reason} = error -> error
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
defp restart_listeners(_supervisor, []), do: :ok
|
||||||
|
|
||||||
|
defp restart_listeners(supervisor, [listener_id | rest]) do
|
||||||
|
case Supervisor.restart_child(supervisor, {:listener, listener_id}) do
|
||||||
|
{:ok, _pid} -> restart_listeners(supervisor, rest)
|
||||||
|
{:error, _reason} = error -> error
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
# OTP's ssl module caches PEM file contents by filename. When cert/key
|
||||||
|
# files are replaced on disk, the cache must be cleared so the restarted
|
||||||
|
# listener reads the updated files.
|
||||||
|
defp clear_pem_cache do
|
||||||
|
:ssl.clear_pem_cache()
|
||||||
|
:ok
|
||||||
end
|
end
|
||||||
|
|
||||||
@impl true
|
@impl true
|
||||||
def init(init_arg) do
|
def init(listeners) do
|
||||||
children = [
|
children =
|
||||||
{Bandit, bandit_options(init_arg)}
|
listeners(listeners)
|
||||||
]
|
|> Enum.map(fn listener ->
|
||||||
|
%{
|
||||||
|
id: {:listener, listener.id},
|
||||||
|
start: {Bandit, :start_link, [Listener.bandit_options(listener)]}
|
||||||
|
}
|
||||||
|
end)
|
||||||
|
|
||||||
Supervisor.init(children, strategy: :one_for_one)
|
Supervisor.init(children, strategy: :one_for_one)
|
||||||
end
|
end
|
||||||
|
|
||||||
defp bandit_options(overrides) do
|
defp listeners(:configured), do: Listener.all()
|
||||||
configured = Application.get_env(:parrhesia, __MODULE__, [])
|
|
||||||
|
|
||||||
configured
|
defp listeners(listeners) when is_list(listeners) do
|
||||||
|> Keyword.merge(overrides)
|
Enum.map(listeners, fn
|
||||||
|> Keyword.put_new(:scheme, :http)
|
{id, listener} when is_atom(id) and is_map(listener) ->
|
||||||
|> Keyword.put_new(:plug, Parrhesia.Web.Router)
|
Listener.from_opts(listener: Map.put_new(listener, :id, id))
|
||||||
|
|
||||||
|
listener ->
|
||||||
|
Listener.from_opts(listener: listener)
|
||||||
|
end)
|
||||||
|
end
|
||||||
|
|
||||||
|
defp listeners(listeners) when is_map(listeners) do
|
||||||
|
listeners
|
||||||
|
|> Enum.map(fn {id, listener} -> {id, listener} end)
|
||||||
|
|> listeners()
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user