19 Commits

Author SHA1 Message Date
e557eba437 Fix connection test harness startup assumptions
Some checks failed
CI / Test (OTP 27.2 / Elixir 1.18.2) (push) Failing after 1s
CI / Test (OTP 28.4 / Elixir 1.19.4 + Marmot E2E) (push) Failing after 1s
Release / Release Gate (push) Failing after 1s
Release / Build and publish image (push) Has been skipped
2026-03-17 03:13:45 +01:00
a652bf3448 Add sync e2e aliases and release checks
Some checks failed
CI / Test (OTP 27.2 / Elixir 1.18.2) (push) Failing after 1s
CI / Test (OTP 28.4 / Elixir 1.19.4 + Marmot E2E) (push) Failing after 1s
Release / Release Gate (push) Failing after 1s
Release / Build and publish image (push) Has been skipped
2026-03-17 03:00:50 +01:00
f518a25cab Add node sync e2e harness and bump 0.5.0
Some checks failed
CI / Test (OTP 27.2 / Elixir 1.18.2) (push) Failing after 1s
CI / Test (OTP 28.4 / Elixir 1.19.4 + Marmot E2E) (push) Failing after 1s
2026-03-17 02:47:42 +01:00
02f2584757 Align websocket and admin APIs with shared surfaces 2026-03-17 01:08:41 +01:00
e8fd6c7328 Add listener TLS support and pinning tests 2026-03-17 00:48:48 +01:00
1f608ee2bd Refactor ingress to listener-based configuration 2026-03-16 23:47:17 +01:00
5f4f086d28 Add outbound sync worker runtime 2026-03-16 21:55:03 +01:00
9be3b6ca52 Add sync control plane API 2026-03-16 21:23:39 +01:00
769177a63e Add shared auth and identity APIs 2026-03-16 21:07:26 +01:00
987415d80c docs: README intro
Some checks failed
CI / Test (OTP 27.2 / Elixir 1.18.2) (push) Failing after 1s
CI / Test (OTP 28.4 / Elixir 1.19.4 + Marmot E2E) (push) Failing after 1s
2026-03-16 20:31:21 +01:00
d119d21d99 Extract API events and stream layers
Some checks failed
CI / Test (OTP 27.2 / Elixir 1.18.2) (push) Failing after 1s
CI / Test (OTP 28.4 / Elixir 1.19.4 + Marmot E2E) (push) Failing after 1s
2026-03-16 20:21:58 +01:00
5d4d181d00 Add trusted proxy IP enforcement tests 2026-03-16 19:09:27 +01:00
fd17026c32 Implement ACL runtime enforcement and management API 2026-03-16 17:49:16 +01:00
14fb0f7ffb docs: Study Khatru 2026-03-16 16:53:55 +01:00
186d0f98ee improve: NIF-77 single-filter fast path 2026-03-16 16:30:07 +01:00
39dbc069a7 feat: NIF-77 negentropy sync 2026-03-16 16:00:15 +01:00
4c2c93deb3 docs: Sketch NIF-77 sync and ACLs 2026-03-16 14:57:08 +01:00
b628770517 docs: Narrow NIP-DBSYNC protocol 2026-03-16 12:58:21 +01:00
705971cbc4 docs: Nostr DB sync proposal 2026-03-16 12:57:22 +01:00
99 changed files with 12383 additions and 954 deletions

View File

@@ -113,5 +113,9 @@ jobs:
- name: Run tests - name: Run tests
run: mix test --color run: mix test --color
- name: Run Node Sync E2E tests
if: ${{ matrix.main }}
run: mix test.node_sync_e2e
- name: Run Marmot E2E tests - name: Run Marmot E2E tests
run: mix test.marmot_e2e run: mix test.marmot_e2e

View File

@@ -89,6 +89,17 @@ jobs:
if: steps.deps-cache.outputs.cache-hit != 'true' if: steps.deps-cache.outputs.cache-hit != 'true'
run: mix deps.get run: mix deps.get
- name: Check tag matches Mix version
if: ${{ startsWith(github.ref, 'refs/tags/v') }}
run: |
TAG_VERSION="${GITHUB_REF_NAME#v}"
MIX_VERSION="$(mix run --no-start -e 'IO.puts(Mix.Project.config()[:version])' | tail -n 1)"
if [ "$TAG_VERSION" != "$MIX_VERSION" ]; then
echo "Tag version $TAG_VERSION does not match mix.exs version $MIX_VERSION"
exit 1
fi
- name: Compile - name: Compile
run: mix compile --warnings-as-errors run: mix compile --warnings-as-errors
@@ -101,6 +112,9 @@ jobs:
- name: Run tests - name: Run tests
run: mix test --color run: mix test --color
- name: Run Node Sync E2E
run: mix test.node_sync_e2e
- name: Run Marmot E2E - name: Run Marmot E2E
run: mix test.marmot_e2e run: mix test.marmot_e2e

View File

@@ -4,12 +4,27 @@
Parrhesia is a Nostr relay server written in Elixir/OTP with PostgreSQL storage. Parrhesia is a Nostr relay server written in Elixir/OTP with PostgreSQL storage.
**ALPHA CONDITION BREAKING CHANGES MIGHT HAPPEN!**
- Advanced Querying: Full-text search (NIP-50) and COUNT queries (NIP-45).
- Secure Messaging: First-class support for Marmot MLS-encrypted groups and NIP-17/44/59 gift-wrapped DMs.
- Identity & Auth: NIP-42 authentication flows and NIP-86 management API with NIP-98 HTTP auth.
- Data Integrity: Negentropy-based synchronization and NIP-62 vanish flows.
It exposes: It exposes:
- a WebSocket relay endpoint at `/relay`
- listener-configurable WS/HTTP ingress, with a default `public` listener on port `4413`
- a WebSocket relay endpoint at `/relay` on listeners that enable the `nostr` feature
- NIP-11 relay info on `GET /relay` with `Accept: application/nostr+json` - NIP-11 relay info on `GET /relay` with `Accept: application/nostr+json`
- operational HTTP endpoints (`/health`, `/ready`, `/metrics`) - operational HTTP endpoints such as `/health`, `/ready`, and `/metrics` on listeners that enable them
- `/metrics` is restricted by default to private/loopback source IPs - a NIP-86-style management API at `POST /management` on listeners that enable the `admin` feature
- a NIP-86-style management API at `POST /management` (NIP-98 auth)
Listeners can run in plain HTTP, HTTPS, mutual TLS, or proxy-terminated TLS modes. The current TLS implementation supports:
- server TLS on listener sockets
- optional client certificate admission with listener-side client pin checks
- proxy-asserted client TLS identity on trusted proxy hops
- admin-triggered certificate reload by restarting an individual listener from disk
## Supported NIPs ## Supported NIPs
@@ -48,7 +63,7 @@ mix setup
mix run --no-halt mix run --no-halt
``` ```
Server listens on `http://localhost:4413` by default. The default `public` listener binds to `http://localhost:4413`.
WebSocket clients should connect to: WebSocket clients should connect to:
@@ -66,6 +81,28 @@ ws://localhost:4413/relay
--- ---
## Test suites
Primary test entrypoints:
- `mix test` for the ExUnit suite
- `mix test.marmot_e2e` for the Marmot client end-to-end suite
- `mix test.node_sync_e2e` for the two-node relay sync end-to-end suite
- `mix test.node_sync_docker_e2e` for the release-image Docker two-node relay sync suite
The node-sync harnesses are driven by:
- [`scripts/run_node_sync_e2e.sh`](./scripts/run_node_sync_e2e.sh)
- [`scripts/run_node_sync_docker_e2e.sh`](./scripts/run_node_sync_docker_e2e.sh)
- [`scripts/node_sync_e2e.exs`](./scripts/node_sync_e2e.exs)
- [`compose.node-sync-e2e.yaml`](./compose.node-sync-e2e.yaml)
`mix test.node_sync_e2e` runs two real Parrhesia nodes against separate PostgreSQL databases, verifies catch-up and live sync, restarts one node, and verifies persisted resume behavior. `mix test.node_sync_docker_e2e` runs the same scenario against the release Docker image.
GitHub CI currently runs the non-Docker node-sync e2e on the main Linux matrix job. The Docker node-sync e2e remains an explicit/manual check because it depends on release-image build/runtime fidelity and a working Docker host.
---
## Production configuration ## Production configuration
### Minimal setup ### Minimal setup
@@ -75,8 +112,8 @@ Before a Nostr client can publish its first event successfully, make sure these
1. PostgreSQL is reachable from Parrhesia. 1. PostgreSQL is reachable from Parrhesia.
Set `DATABASE_URL` and create/migrate the database with `Parrhesia.Release.migrate()` or `mix ecto.migrate`. Set `DATABASE_URL` and create/migrate the database with `Parrhesia.Release.migrate()` or `mix ecto.migrate`.
2. Parrhesia is reachable behind your reverse proxy. 2. Parrhesia listeners are configured for your deployment.
Parrhesia itself listens on plain HTTP on port `4413`, and the reverse proxy is expected to terminate TLS and forward WebSocket traffic to `/relay`. The default config exposes a `public` listener on plain HTTP port `4413`, and a reverse proxy can terminate TLS and forward WebSocket traffic to `/relay`. Additional listeners can be defined in `config/*.exs`.
3. `:relay_url` matches the public relay URL clients should use. 3. `:relay_url` matches the public relay URL clients should use.
Set `PARRHESIA_RELAY_URL` to the public relay URL exposed by the reverse proxy. Set `PARRHESIA_RELAY_URL` to the public relay URL exposed by the reverse proxy.
@@ -92,7 +129,7 @@ In `prod`, these environment variables are used:
- `DATABASE_URL` (**required**), e.g. `ecto://USER:PASS@HOST/parrhesia_prod` - `DATABASE_URL` (**required**), e.g. `ecto://USER:PASS@HOST/parrhesia_prod`
- `POOL_SIZE` (optional, default `32`) - `POOL_SIZE` (optional, default `32`)
- `PORT` (optional, default `4413`) - `PORT` (optional, default `4413`)
- `PARRHESIA_*` runtime overrides for relay config, limits, policies, metrics, and features - `PARRHESIA_*` runtime overrides for relay config, limits, policies, listener-related metrics helpers, and features
- `PARRHESIA_EXTRA_CONFIG` (optional path to an extra runtime config file) - `PARRHESIA_EXTRA_CONFIG` (optional path to an extra runtime config file)
`config/runtime.exs` reads these values at runtime in production releases. `config/runtime.exs` reads these values at runtime in production releases.
@@ -102,6 +139,7 @@ In `prod`, these environment variables are used:
For runtime overrides, use the `PARRHESIA_...` prefix: For runtime overrides, use the `PARRHESIA_...` prefix:
- `PARRHESIA_RELAY_URL` - `PARRHESIA_RELAY_URL`
- `PARRHESIA_TRUSTED_PROXIES`
- `PARRHESIA_MODERATION_CACHE_ENABLED` - `PARRHESIA_MODERATION_CACHE_ENABLED`
- `PARRHESIA_ENABLE_EXPIRATION_WORKER` - `PARRHESIA_ENABLE_EXPIRATION_WORKER`
- `PARRHESIA_LIMITS_*` - `PARRHESIA_LIMITS_*`
@@ -120,6 +158,8 @@ export PARRHESIA_METRICS_ALLOWED_CIDRS="10.0.0.0/8,192.168.0.0/16"
export PARRHESIA_LIMITS_OUTBOUND_OVERFLOW_STRATEGY=drop_oldest export PARRHESIA_LIMITS_OUTBOUND_OVERFLOW_STRATEGY=drop_oldest
``` ```
Listeners themselves are primarily configured under `config :parrhesia, :listeners, ...`. The current runtime env helpers tune the default public listener and the optional dedicated metrics listener.
For settings that are awkward to express as env vars, mount an extra config file and set `PARRHESIA_EXTRA_CONFIG` to its path inside the container. For settings that are awkward to express as env vars, mount an extra config file and set `PARRHESIA_EXTRA_CONFIG` to its path inside the container.
### Config reference ### Config reference
@@ -135,7 +175,7 @@ CSV env vars use comma-separated values. Boolean env vars accept `1/0`, `true/fa
| `:enable_expiration_worker` | `PARRHESIA_ENABLE_EXPIRATION_WORKER` | `true` | Toggle background expiration worker | | `:enable_expiration_worker` | `PARRHESIA_ENABLE_EXPIRATION_WORKER` | `true` | Toggle background expiration worker |
| `:limits` | `PARRHESIA_LIMITS_*` | see table below | Runtime override group | | `:limits` | `PARRHESIA_LIMITS_*` | see table below | Runtime override group |
| `:policies` | `PARRHESIA_POLICIES_*` | see table below | Runtime override group | | `:policies` | `PARRHESIA_POLICIES_*` | see table below | Runtime override group |
| `:metrics` | `PARRHESIA_METRICS_*` | see table below | Runtime override group | | `:listeners` | config-file driven | see notes below | Ingress listeners with bind, transport, feature, auth, network, and baseline ACL settings |
| `:retention` | `PARRHESIA_RETENTION_*` | see table below | Partition lifecycle and pruning policy | | `:retention` | `PARRHESIA_RETENTION_*` | see table below | Partition lifecycle and pruning policy |
| `:features` | `PARRHESIA_FEATURES_*` | see table below | Runtime override group | | `:features` | `PARRHESIA_FEATURES_*` | see table below | Runtime override group |
| `:storage.events` | `-` | `Parrhesia.Storage.Adapters.Postgres.Events` | Config-file override only | | `:storage.events` | `-` | `Parrhesia.Storage.Adapters.Postgres.Events` | Config-file override only |
@@ -153,19 +193,17 @@ CSV env vars use comma-separated values. Boolean env vars accept `1/0`, `true/fa
| `:queue_interval` | `DB_QUEUE_INTERVAL_MS` | `5000` | Ecto queue interval in ms | | `:queue_interval` | `DB_QUEUE_INTERVAL_MS` | `5000` | Ecto queue interval in ms |
| `:types` | `-` | `Parrhesia.PostgresTypes` | Internal config-file setting | | `:types` | `-` | `Parrhesia.PostgresTypes` | Internal config-file setting |
#### `Parrhesia.Web.Endpoint` #### `:listeners`
| Atom key | ENV | Default | Notes | | Atom key | ENV | Default | Notes |
| --- | --- | --- | --- | | --- | --- | --- | --- |
| `:port` | `PORT` | `4413` | Main HTTP/WebSocket listener | | `:public.bind.port` | `PORT` | `4413` | Default public listener port |
| `:public.proxy.trusted_cidrs` | `PARRHESIA_TRUSTED_PROXIES` | `[]` | Trusted reverse proxies for forwarded IP handling |
| `:public.features.metrics.*` | `PARRHESIA_METRICS_*` | see below | Convenience runtime overrides for metrics on the public listener |
| `:metrics.bind.port` | `PARRHESIA_METRICS_ENDPOINT_PORT` | `9568` | Optional dedicated metrics listener port |
| `:metrics.enabled` | `PARRHESIA_METRICS_ENDPOINT_ENABLED` | `false` | Enables the optional dedicated metrics listener |
#### `Parrhesia.Web.MetricsEndpoint` Listener `transport.tls` supports `:disabled`, `:server`, `:mutual`, and `:proxy_terminated`. For TLS-enabled listeners, the main config-file fields are `certfile`, `keyfile`, optional `cacertfile`, optional `cipher_suite`, optional `client_pins`, and `proxy_headers` for proxy-terminated identity.
| Atom key | ENV | Default | Notes |
| --- | --- | --- | --- |
| `:enabled` | `PARRHESIA_METRICS_ENDPOINT_ENABLED` | `false` | Enables dedicated metrics listener |
| `:ip` | `PARRHESIA_METRICS_ENDPOINT_IP` | `127.0.0.1` | IPv4 only |
| `:port` | `PARRHESIA_METRICS_ENDPOINT_PORT` | `9568` | Dedicated metrics port |
#### `:limits` #### `:limits`
@@ -215,11 +253,11 @@ CSV env vars use comma-separated values. Boolean env vars accept `1/0`, `true/fa
| `:marmot_push_max_server_recipients` | `PARRHESIA_POLICIES_MARMOT_PUSH_MAX_SERVER_RECIPIENTS` | `1` | | `:marmot_push_max_server_recipients` | `PARRHESIA_POLICIES_MARMOT_PUSH_MAX_SERVER_RECIPIENTS` | `1` |
| `:management_auth_required` | `PARRHESIA_POLICIES_MANAGEMENT_AUTH_REQUIRED` | `true` | | `:management_auth_required` | `PARRHESIA_POLICIES_MANAGEMENT_AUTH_REQUIRED` | `true` |
#### `:metrics` #### Listener-related Metrics Helpers
| Atom key | ENV | Default | | Atom key | ENV | Default |
| --- | --- | --- | | --- | --- | --- |
| `:enabled_on_main_endpoint` | `PARRHESIA_METRICS_ENABLED_ON_MAIN_ENDPOINT` | `true` | | `:public.features.metrics.enabled` | `PARRHESIA_METRICS_ENABLED_ON_MAIN_ENDPOINT` | `true` |
| `:public` | `PARRHESIA_METRICS_PUBLIC` | `false` | | `:public` | `PARRHESIA_METRICS_PUBLIC` | `false` |
| `:private_networks_only` | `PARRHESIA_METRICS_PRIVATE_NETWORKS_ONLY` | `true` | | `:private_networks_only` | `PARRHESIA_METRICS_PRIVATE_NETWORKS_ONLY` | `true` |
| `:allowed_cidrs` | `PARRHESIA_METRICS_ALLOWED_CIDRS` | `[]` | | `:allowed_cidrs` | `PARRHESIA_METRICS_ALLOWED_CIDRS` | `[]` |
@@ -267,10 +305,10 @@ mix compile
mix release mix release
_build/prod/rel/parrhesia/bin/parrhesia eval "Parrhesia.Release.migrate()" _build/prod/rel/parrhesia/bin/parrhesia eval "Parrhesia.Release.migrate()"
_build/prod/rel/parrhesia/bin/parrhesia foreground _build/prod/rel/parrhesia/bin/parrhesia start
``` ```
For systemd/process managers, run the release command in foreground mode. For systemd/process managers, run the release command with `start`.
### Option B: Nix release package (`default.nix`) ### Option B: Nix release package (`default.nix`)

View File

@@ -0,0 +1,92 @@
services:
db-a:
image: postgres:17
restart: unless-stopped
environment:
POSTGRES_DB: parrhesia_a
POSTGRES_USER: parrhesia
POSTGRES_PASSWORD: parrhesia
healthcheck:
test: ["CMD-SHELL", "pg_isready -U $$POSTGRES_USER -d $$POSTGRES_DB"]
interval: 5s
timeout: 5s
retries: 12
volumes:
- postgres-a-data:/var/lib/postgresql/data
db-b:
image: postgres:17
restart: unless-stopped
environment:
POSTGRES_DB: parrhesia_b
POSTGRES_USER: parrhesia
POSTGRES_PASSWORD: parrhesia
healthcheck:
test: ["CMD-SHELL", "pg_isready -U $$POSTGRES_USER -d $$POSTGRES_DB"]
interval: 5s
timeout: 5s
retries: 12
volumes:
- postgres-b-data:/var/lib/postgresql/data
migrate-a:
image: ${PARRHESIA_IMAGE:-parrhesia:latest}
restart: "no"
depends_on:
db-a:
condition: service_healthy
environment:
DATABASE_URL: ecto://parrhesia:parrhesia@db-a:5432/parrhesia_a
POOL_SIZE: ${POOL_SIZE:-20}
PARRHESIA_ACL_PROTECTED_FILTERS: ${PARRHESIA_ACL_PROTECTED_FILTERS}
command: ["eval", "Parrhesia.Release.migrate()"]
migrate-b:
image: ${PARRHESIA_IMAGE:-parrhesia:latest}
restart: "no"
depends_on:
db-b:
condition: service_healthy
environment:
DATABASE_URL: ecto://parrhesia:parrhesia@db-b:5432/parrhesia_b
POOL_SIZE: ${POOL_SIZE:-20}
PARRHESIA_ACL_PROTECTED_FILTERS: ${PARRHESIA_ACL_PROTECTED_FILTERS}
command: ["eval", "Parrhesia.Release.migrate()"]
parrhesia-a:
image: ${PARRHESIA_IMAGE:-parrhesia:latest}
restart: unless-stopped
depends_on:
db-a:
condition: service_healthy
environment:
DATABASE_URL: ecto://parrhesia:parrhesia@db-a:5432/parrhesia_a
POOL_SIZE: ${POOL_SIZE:-20}
PORT: 4413
PARRHESIA_RELAY_URL: ${PARRHESIA_NODE_A_RELAY_URL:-ws://parrhesia-a:4413/relay}
PARRHESIA_ACL_PROTECTED_FILTERS: ${PARRHESIA_ACL_PROTECTED_FILTERS}
PARRHESIA_IDENTITY_PATH: /tmp/parrhesia-a/server_identity.json
PARRHESIA_SYNC_PATH: /tmp/parrhesia-a/sync_servers.json
ports:
- "${PARRHESIA_NODE_A_HOST_PORT:-45131}:4413"
parrhesia-b:
image: ${PARRHESIA_IMAGE:-parrhesia:latest}
restart: unless-stopped
depends_on:
db-b:
condition: service_healthy
environment:
DATABASE_URL: ecto://parrhesia:parrhesia@db-b:5432/parrhesia_b
POOL_SIZE: ${POOL_SIZE:-20}
PORT: 4413
PARRHESIA_RELAY_URL: ${PARRHESIA_NODE_B_RELAY_URL:-ws://parrhesia-b:4413/relay}
PARRHESIA_ACL_PROTECTED_FILTERS: ${PARRHESIA_ACL_PROTECTED_FILTERS}
PARRHESIA_IDENTITY_PATH: /tmp/parrhesia-b/server_identity.json
PARRHESIA_SYNC_PATH: /tmp/parrhesia-b/sync_servers.json
ports:
- "${PARRHESIA_NODE_B_HOST_PORT:-45132}:4413"
volumes:
postgres-a-data:
postgres-b-data:

View File

@@ -5,6 +5,14 @@ config :postgrex, :json_library, JSON
config :parrhesia, config :parrhesia,
moderation_cache_enabled: true, moderation_cache_enabled: true,
relay_url: "ws://localhost:4413/relay", relay_url: "ws://localhost:4413/relay",
identity: [
path: nil,
private_key: nil
],
sync: [
path: nil,
start_workers?: true
],
limits: [ limits: [
max_frame_bytes: 1_048_576, max_frame_bytes: 1_048_576,
max_event_bytes: 262_144, max_event_bytes: 262_144,
@@ -21,6 +29,8 @@ config :parrhesia,
max_negentropy_payload_bytes: 4096, max_negentropy_payload_bytes: 4096,
max_negentropy_sessions_per_connection: 8, max_negentropy_sessions_per_connection: 8,
max_negentropy_total_sessions: 10_000, max_negentropy_total_sessions: 10_000,
max_negentropy_items_per_session: 50_000,
negentropy_id_list_threshold: 32,
negentropy_session_idle_timeout_seconds: 60, negentropy_session_idle_timeout_seconds: 60,
negentropy_session_sweep_interval_seconds: 10 negentropy_session_sweep_interval_seconds: 10
], ],
@@ -47,13 +57,26 @@ config :parrhesia,
marmot_push_max_server_recipients: 1, marmot_push_max_server_recipients: 1,
management_auth_required: true management_auth_required: true
], ],
metrics: [ listeners: %{
enabled_on_main_endpoint: true, public: %{
public: false, enabled: true,
private_networks_only: true, bind: %{ip: {0, 0, 0, 0}, port: 4413},
allowed_cidrs: [], transport: %{scheme: :http, tls: %{mode: :disabled}},
auth_token: nil proxy: %{trusted_cidrs: [], honor_x_forwarded_for: true},
], network: %{allow_all: true},
features: %{
nostr: %{enabled: true},
admin: %{enabled: true},
metrics: %{
enabled: true,
access: %{private_networks_only: true},
auth_token: nil
}
},
auth: %{nip42_required: false, nip98_required_for_admin: true},
baseline_acl: %{read: [], write: []}
}
},
retention: [ retention: [
check_interval_hours: 24, check_interval_hours: 24,
months_ahead: 2, months_ahead: 2,
@@ -75,13 +98,6 @@ config :parrhesia,
admin: Parrhesia.Storage.Adapters.Postgres.Admin admin: Parrhesia.Storage.Adapters.Postgres.Admin
] ]
config :parrhesia, Parrhesia.Web.Endpoint, port: 4413
config :parrhesia, Parrhesia.Web.MetricsEndpoint,
enabled: false,
ip: {127, 0, 0, 1},
port: 9568
config :parrhesia, Parrhesia.Repo, types: Parrhesia.PostgresTypes config :parrhesia, Parrhesia.Repo, types: Parrhesia.PostgresTypes
config :parrhesia, ecto_repos: [Parrhesia.Repo] config :parrhesia, ecto_repos: [Parrhesia.Repo]

View File

@@ -48,6 +48,25 @@ csv_env = fn name, default ->
end end
end end
json_env = fn name, default ->
case System.get_env(name) do
nil ->
default
"" ->
default
value ->
case JSON.decode(value) do
{:ok, decoded} ->
decoded
{:error, reason} ->
raise "environment variable #{name} must contain valid JSON: #{inspect(reason)}"
end
end
end
infinity_or_int_env = fn name, default -> infinity_or_int_env = fn name, default ->
case System.get_env(name) do case System.get_env(name) do
nil -> nil ->
@@ -121,10 +140,10 @@ if config_env() == :prod do
limits_defaults = Application.get_env(:parrhesia, :limits, []) limits_defaults = Application.get_env(:parrhesia, :limits, [])
policies_defaults = Application.get_env(:parrhesia, :policies, []) policies_defaults = Application.get_env(:parrhesia, :policies, [])
metrics_defaults = Application.get_env(:parrhesia, :metrics, []) listeners_defaults = Application.get_env(:parrhesia, :listeners, %{})
retention_defaults = Application.get_env(:parrhesia, :retention, []) retention_defaults = Application.get_env(:parrhesia, :retention, [])
features_defaults = Application.get_env(:parrhesia, :features, []) features_defaults = Application.get_env(:parrhesia, :features, [])
metrics_endpoint_defaults = Application.get_env(:parrhesia, Parrhesia.Web.MetricsEndpoint, []) acl_defaults = Application.get_env(:parrhesia, :acl, [])
default_pool_size = Keyword.get(repo_defaults, :pool_size, 32) default_pool_size = Keyword.get(repo_defaults, :pool_size, 32)
default_queue_target = Keyword.get(repo_defaults, :queue_target, 1_000) default_queue_target = Keyword.get(repo_defaults, :queue_target, 1_000)
@@ -210,6 +229,16 @@ if config_env() == :prod do
"PARRHESIA_LIMITS_MAX_NEGENTROPY_TOTAL_SESSIONS", "PARRHESIA_LIMITS_MAX_NEGENTROPY_TOTAL_SESSIONS",
Keyword.get(limits_defaults, :max_negentropy_total_sessions, 10_000) Keyword.get(limits_defaults, :max_negentropy_total_sessions, 10_000)
), ),
max_negentropy_items_per_session:
int_env.(
"PARRHESIA_LIMITS_MAX_NEGENTROPY_ITEMS_PER_SESSION",
Keyword.get(limits_defaults, :max_negentropy_items_per_session, 50_000)
),
negentropy_id_list_threshold:
int_env.(
"PARRHESIA_LIMITS_NEGENTROPY_ID_LIST_THRESHOLD",
Keyword.get(limits_defaults, :negentropy_id_list_threshold, 32)
),
negentropy_session_idle_timeout_seconds: negentropy_session_idle_timeout_seconds:
int_env.( int_env.(
"PARRHESIA_LIMITS_NEGENTROPY_SESSION_IDLE_TIMEOUT_SECONDS", "PARRHESIA_LIMITS_NEGENTROPY_SESSION_IDLE_TIMEOUT_SECONDS",
@@ -330,33 +359,170 @@ if config_env() == :prod do
) )
] ]
metrics = [ public_listener_defaults = Map.get(listeners_defaults, :public, %{})
enabled_on_main_endpoint: public_bind_defaults = Map.get(public_listener_defaults, :bind, %{})
bool_env.( public_transport_defaults = Map.get(public_listener_defaults, :transport, %{})
"PARRHESIA_METRICS_ENABLED_ON_MAIN_ENDPOINT", public_proxy_defaults = Map.get(public_listener_defaults, :proxy, %{})
Keyword.get(metrics_defaults, :enabled_on_main_endpoint, true) public_network_defaults = Map.get(public_listener_defaults, :network, %{})
), public_features_defaults = Map.get(public_listener_defaults, :features, %{})
public: public_auth_defaults = Map.get(public_listener_defaults, :auth, %{})
bool_env.( public_metrics_defaults = Map.get(public_features_defaults, :metrics, %{})
"PARRHESIA_METRICS_PUBLIC", public_metrics_access_defaults = Map.get(public_metrics_defaults, :access, %{})
Keyword.get(metrics_defaults, :public, false)
), metrics_listener_defaults = Map.get(listeners_defaults, :metrics, %{})
private_networks_only: metrics_listener_bind_defaults = Map.get(metrics_listener_defaults, :bind, %{})
bool_env.( metrics_listener_transport_defaults = Map.get(metrics_listener_defaults, :transport, %{})
"PARRHESIA_METRICS_PRIVATE_NETWORKS_ONLY", metrics_listener_network_defaults = Map.get(metrics_listener_defaults, :network, %{})
Keyword.get(metrics_defaults, :private_networks_only, true)
), metrics_listener_metrics_defaults =
allowed_cidrs: metrics_listener_defaults
csv_env.( |> Map.get(:features, %{})
"PARRHESIA_METRICS_ALLOWED_CIDRS", |> Map.get(:metrics, %{})
Keyword.get(metrics_defaults, :allowed_cidrs, [])
), metrics_listener_metrics_access_defaults =
auth_token: Map.get(metrics_listener_metrics_defaults, :access, %{})
string_env.(
"PARRHESIA_METRICS_AUTH_TOKEN", public_listener = %{
Keyword.get(metrics_defaults, :auth_token) enabled: Map.get(public_listener_defaults, :enabled, true),
bind: %{
ip: Map.get(public_bind_defaults, :ip, {0, 0, 0, 0}),
port: int_env.("PORT", Map.get(public_bind_defaults, :port, 4413))
},
transport: %{
scheme: Map.get(public_transport_defaults, :scheme, :http),
tls: Map.get(public_transport_defaults, :tls, %{mode: :disabled})
},
proxy: %{
trusted_cidrs:
csv_env.(
"PARRHESIA_TRUSTED_PROXIES",
Map.get(public_proxy_defaults, :trusted_cidrs, [])
),
honor_x_forwarded_for: Map.get(public_proxy_defaults, :honor_x_forwarded_for, true)
},
network: %{
allow_cidrs: Map.get(public_network_defaults, :allow_cidrs, []),
private_networks_only: Map.get(public_network_defaults, :private_networks_only, false),
public: Map.get(public_network_defaults, :public, false),
allow_all: Map.get(public_network_defaults, :allow_all, true)
},
features: %{
nostr: %{
enabled: public_features_defaults |> Map.get(:nostr, %{}) |> Map.get(:enabled, true)
},
admin: %{
enabled: public_features_defaults |> Map.get(:admin, %{}) |> Map.get(:enabled, true)
},
metrics: %{
enabled:
bool_env.(
"PARRHESIA_METRICS_ENABLED_ON_MAIN_ENDPOINT",
Map.get(public_metrics_defaults, :enabled, true)
),
auth_token:
string_env.(
"PARRHESIA_METRICS_AUTH_TOKEN",
Map.get(public_metrics_defaults, :auth_token)
),
access: %{
public:
bool_env.(
"PARRHESIA_METRICS_PUBLIC",
Map.get(public_metrics_access_defaults, :public, false)
),
private_networks_only:
bool_env.(
"PARRHESIA_METRICS_PRIVATE_NETWORKS_ONLY",
Map.get(public_metrics_access_defaults, :private_networks_only, true)
),
allow_cidrs:
csv_env.(
"PARRHESIA_METRICS_ALLOWED_CIDRS",
Map.get(public_metrics_access_defaults, :allow_cidrs, [])
),
allow_all: Map.get(public_metrics_access_defaults, :allow_all, true)
}
}
},
auth: %{
nip42_required: Map.get(public_auth_defaults, :nip42_required, false),
nip98_required_for_admin:
bool_env.(
"PARRHESIA_POLICIES_MANAGEMENT_AUTH_REQUIRED",
Map.get(public_auth_defaults, :nip98_required_for_admin, true)
)
},
baseline_acl: Map.get(public_listener_defaults, :baseline_acl, %{read: [], write: []})
}
listeners =
if Map.get(metrics_listener_defaults, :enabled, false) or
bool_env.("PARRHESIA_METRICS_ENDPOINT_ENABLED", false) do
Map.put(
%{public: public_listener},
:metrics,
%{
enabled: true,
bind: %{
ip: Map.get(metrics_listener_bind_defaults, :ip, {127, 0, 0, 1}),
port:
int_env.(
"PARRHESIA_METRICS_ENDPOINT_PORT",
Map.get(metrics_listener_bind_defaults, :port, 9568)
)
},
transport: %{
scheme: Map.get(metrics_listener_transport_defaults, :scheme, :http),
tls: Map.get(metrics_listener_transport_defaults, :tls, %{mode: :disabled})
},
network: %{
allow_cidrs: Map.get(metrics_listener_network_defaults, :allow_cidrs, []),
private_networks_only:
Map.get(metrics_listener_network_defaults, :private_networks_only, false),
public: Map.get(metrics_listener_network_defaults, :public, false),
allow_all: Map.get(metrics_listener_network_defaults, :allow_all, true)
},
features: %{
nostr: %{enabled: false},
admin: %{enabled: false},
metrics: %{
enabled: true,
auth_token:
string_env.(
"PARRHESIA_METRICS_AUTH_TOKEN",
Map.get(metrics_listener_metrics_defaults, :auth_token)
),
access: %{
public:
bool_env.(
"PARRHESIA_METRICS_PUBLIC",
Map.get(metrics_listener_metrics_access_defaults, :public, false)
),
private_networks_only:
bool_env.(
"PARRHESIA_METRICS_PRIVATE_NETWORKS_ONLY",
Map.get(
metrics_listener_metrics_access_defaults,
:private_networks_only,
true
)
),
allow_cidrs:
csv_env.(
"PARRHESIA_METRICS_ALLOWED_CIDRS",
Map.get(metrics_listener_metrics_access_defaults, :allow_cidrs, [])
),
allow_all: Map.get(metrics_listener_metrics_access_defaults, :allow_all, true)
}
}
},
auth: %{nip42_required: false, nip98_required_for_admin: true},
baseline_acl: %{read: [], write: []}
}
) )
] else
%{public: public_listener}
end
retention = [ retention = [
check_interval_hours: check_interval_hours:
@@ -420,34 +586,34 @@ if config_env() == :prod do
queue_target: queue_target, queue_target: queue_target,
queue_interval: queue_interval queue_interval: queue_interval
config :parrhesia, Parrhesia.Web.Endpoint, port: int_env.("PORT", 4413)
config :parrhesia, Parrhesia.Web.MetricsEndpoint,
enabled:
bool_env.(
"PARRHESIA_METRICS_ENDPOINT_ENABLED",
Keyword.get(metrics_endpoint_defaults, :enabled, false)
),
ip:
ipv4_env.(
"PARRHESIA_METRICS_ENDPOINT_IP",
Keyword.get(metrics_endpoint_defaults, :ip, {127, 0, 0, 1})
),
port:
int_env.(
"PARRHESIA_METRICS_ENDPOINT_PORT",
Keyword.get(metrics_endpoint_defaults, :port, 9568)
)
config :parrhesia, config :parrhesia,
relay_url: string_env.("PARRHESIA_RELAY_URL", relay_url_default), relay_url: string_env.("PARRHESIA_RELAY_URL", relay_url_default),
acl: [
protected_filters:
json_env.(
"PARRHESIA_ACL_PROTECTED_FILTERS",
Keyword.get(acl_defaults, :protected_filters, [])
)
],
identity: [
path: string_env.("PARRHESIA_IDENTITY_PATH", nil),
private_key: string_env.("PARRHESIA_IDENTITY_PRIVATE_KEY", nil)
],
sync: [
path: string_env.("PARRHESIA_SYNC_PATH", nil),
start_workers?:
bool_env.(
"PARRHESIA_SYNC_START_WORKERS",
Keyword.get(Application.get_env(:parrhesia, :sync, []), :start_workers?, true)
)
],
moderation_cache_enabled: moderation_cache_enabled:
bool_env.("PARRHESIA_MODERATION_CACHE_ENABLED", moderation_cache_enabled_default), bool_env.("PARRHESIA_MODERATION_CACHE_ENABLED", moderation_cache_enabled_default),
enable_expiration_worker: enable_expiration_worker:
bool_env.("PARRHESIA_ENABLE_EXPIRATION_WORKER", enable_expiration_worker_default), bool_env.("PARRHESIA_ENABLE_EXPIRATION_WORKER", enable_expiration_worker_default),
listeners: listeners,
limits: limits, limits: limits,
policies: policies, policies: policies,
metrics: metrics,
retention: retention, retention: retention,
features: features features: features

View File

@@ -8,13 +8,33 @@ test_endpoint_port =
value -> String.to_integer(value) value -> String.to_integer(value)
end end
config :parrhesia, Parrhesia.Web.Endpoint, config :parrhesia, :listeners,
port: test_endpoint_port, public: %{
ip: {127, 0, 0, 1} enabled: true,
bind: %{ip: {127, 0, 0, 1}, port: test_endpoint_port},
transport: %{scheme: :http, tls: %{mode: :disabled}},
proxy: %{trusted_cidrs: [], honor_x_forwarded_for: true},
network: %{allow_all: true},
features: %{
nostr: %{enabled: true},
admin: %{enabled: true},
metrics: %{enabled: true, access: %{private_networks_only: true}, auth_token: nil}
},
auth: %{nip42_required: false, nip98_required_for_admin: true},
baseline_acl: %{read: [], write: []}
}
config :parrhesia, config :parrhesia,
enable_expiration_worker: false, enable_expiration_worker: false,
moderation_cache_enabled: false, moderation_cache_enabled: false,
identity: [
path: Path.join(System.tmp_dir!(), "parrhesia_test_identity.json"),
private_key: nil
],
sync: [
path: Path.join(System.tmp_dir!(), "parrhesia_test_sync.json"),
start_workers?: false
],
features: [verify_event_signatures: false] features: [verify_event_signatures: false]
pg_host = System.get_env("PGHOST") pg_host = System.get_env("PGHOST")

View File

@@ -10,7 +10,7 @@
vips, vips,
}: let }: let
pname = "parrhesia"; pname = "parrhesia";
version = "0.4.0"; version = "0.5.0";
beamPackages = beam.packages.erlang_28.extend ( beamPackages = beam.packages.erlang_28.extend (
final: _prev: { final: _prev: {
@@ -48,7 +48,7 @@
beamPackages.fetchMixDeps { beamPackages.fetchMixDeps {
pname = "${pname}-mix-deps"; pname = "${pname}-mix-deps";
inherit version src; inherit version src;
hash = "sha256-I09Q2PG22lOrZjjXoq8Py3P3o5dgaz9LhKJSmP+/r6k="; hash = "sha256-D69wuFnIChQzm1PmpIW+X/1sPpsIcDHe4V5fKmFeJ3k=";
} }
else null; else null;

View File

@@ -68,10 +68,10 @@ Notes:
## 3) System architecture (high level) ## 3) System architecture (high level)
```text ```text
WS/HTTP Edge (Bandit/Plug) Configured WS/HTTP Listeners (Bandit/Plug)
-> Protocol Decoder/Encoder -> Protocol Decoder/Encoder
-> Command Router (EVENT/REQ/CLOSE/AUTH/COUNT/NEG-*) -> Command Router (EVENT/REQ/CLOSE/AUTH/COUNT/NEG-*)
-> Policy Pipeline (validation, auth, ACL, PoW, NIP-70) -> Policy Pipeline (listener baseline, validation, auth, ACL, PoW, NIP-70)
-> Event Service / Query Service -> Event Service / Query Service
-> Storage Port (behavior) -> Storage Port (behavior)
-> Postgres Adapter (Ecto) -> Postgres Adapter (Ecto)
@@ -90,15 +90,24 @@ WS/HTTP Edge (Bandit/Plug)
4. `Parrhesia.Subscriptions.Supervisor` subscription index + fanout workers 4. `Parrhesia.Subscriptions.Supervisor` subscription index + fanout workers
5. `Parrhesia.Auth.Supervisor` AUTH challenge/session tracking 5. `Parrhesia.Auth.Supervisor` AUTH challenge/session tracking
6. `Parrhesia.Policy.Supervisor` rate limiters / ACL caches 6. `Parrhesia.Policy.Supervisor` rate limiters / ACL caches
7. `Parrhesia.Web.Endpoint` WS + HTTP ingress 7. `Parrhesia.Web.Endpoint` supervises configured WS + HTTP listeners
8. `Parrhesia.Tasks.Supervisor` background jobs (expiry purge, maintenance) 8. `Parrhesia.Tasks.Supervisor` background jobs (expiry purge, maintenance)
Failure model: Failure model:
- Connection failures are isolated per socket process. - Connection failures are isolated per socket process.
- Listener failures are isolated per Bandit child and restarted independently.
- Storage outages degrade with explicit `OK/CLOSED` error prefixes (`error:`) per NIP-01. - Storage outages degrade with explicit `OK/CLOSED` error prefixes (`error:`) per NIP-01.
- Non-critical workers are `:transient`; core infra is `:permanent`. - Non-critical workers are `:transient`; core infra is `:permanent`.
Ingress model:
- Ingress is defined through `config :parrhesia, :listeners, ...`.
- Each listener has its own bind/transport settings, TLS mode, proxy trust, network allowlist, enabled features (`nostr`, `admin`, `metrics`), auth requirements, and baseline read/write ACL.
- Listeners can therefore expose different security postures, for example a public relay listener and a VPN-only sync-capable listener.
- TLS-capable listeners support direct server TLS, mutual TLS with optional client pin checks, and proxy-terminated TLS identity on explicitly trusted proxy hops.
- Certificate reload is currently implemented as admin-triggered listener restart from disk rather than background file watching.
## 5) Core runtime components ## 5) Core runtime components
### 5.1 Connection process ### 5.1 Connection process

140
docs/KHATRU.md Normal file
View File

@@ -0,0 +1,140 @@
# Khatru-Inspired Runtime Improvements
This document collects refactoring and extension ideas learned from studying Khatru-style relay design.
It is intentionally **not** about the new public API surface or the sync ACL model. Those live in `docs/slop/LOCAL_API.md` and `docs/SYNC.md`.
The focus here is runtime shape, protocol behavior, and operator-visible relay features.
---
## 1. Why This Matters
Khatru appears mature mainly because it exposes clearer relay pipeline stages.
That gives three practical benefits:
- less policy drift between storage, websocket, and management code,
- easier feature addition without hard-coding more branches into one connection module,
- better composability for relay profiles with different trust and traffic models.
Parrhesia should borrow that clarity without copying Khatru's code-first hook model wholesale.
---
## 2. Proposed Runtime Refactors
### 2.1 Staged policy pipeline
Parrhesia should stop treating policy as one coarse `EventPolicy` module plus scattered special cases.
Recommended internal stages:
1. connection admission
2. authentication challenge and validation
3. publish/write authorization
4. query/count authorization
5. stream subscription authorization
6. negentropy authorization
7. response shaping
8. broadcast/fanout suppression
This is an internal runtime refactor. It does not imply a new public API.
### 2.2 Richer internal request context
The runtime should carry a structured request context through all stages.
Useful fields:
- authenticated pubkeys
- caller kind
- remote IP
- subscription id
- peer id
- negentropy session flag
- internal-call flag
This reduces ad-hoc branching and makes audit/telemetry more coherent.
### 2.3 Separate policy from storage presence tables
Moderation state should remain data.
Runtime enforcement should be a first-class layer that consumes that data, not a side effect of whether a table exists.
This is especially important for:
- blocked IP enforcement,
- pubkey allowlists,
- future kind- or tag-scoped restrictions.
---
## 3. Protocol and Relay Features
### 3.1 Real COUNT sketches
Parrhesia currently returns a synthetic `hll` payload for NIP-45-style count responses.
If approximate count exchange matters, implement a real reusable HLL sketch path instead of hashing `filters + count`.
### 3.2 Relay identity in NIP-11
Once Parrhesia owns a stable server identity, NIP-11 should expose the relay pubkey instead of returning `nil`.
This is useful beyond sync:
- operator visibility,
- relay fingerprinting,
- future trust tooling.
### 3.3 Connection-level IP enforcement
Blocked IP support should be enforced on actual connection admission, not only stored in management tables.
This should happen early, before expensive protocol handling.
### 3.4 Better response shaping
Introduce a narrow internal response shaping layer for cases where returned events or counts need controlled rewriting or suppression.
Examples:
- hide fields for specific relay profiles,
- suppress rebroadcast of locally-ingested remote sync traffic,
- shape relay notices consistently.
This should stay narrow and deterministic. It should not become arbitrary app semantics.
---
## 4. Suggested Extension Points
These should be internal runtime seams, not necessarily public interfaces:
- `ConnectionPolicy`
- `AuthPolicy`
- `ReadPolicy`
- `WritePolicy`
- `NegentropyPolicy`
- `ResponsePolicy`
- `BroadcastPolicy`
They may initially be plain modules with well-defined callbacks or functions.
The point is not pluggability for its own sake. The point is to make policy stages explicit and testable.
---
## 5. Near-Term Priority
Recommended order:
1. enforce blocked IPs and any future connection-gating on the real connection path
2. split the current websocket flow into explicit read/write/negentropy policy stages
3. enrich runtime request context and telemetry metadata
4. expose relay pubkey in NIP-11 once identity lands
5. replace fake HLL payloads with a real approximate-count implementation if NIP-45 support matters operationally
This keeps the runtime improvements incremental and independent from the ongoing API and ACL implementation.

354
docs/NIP-DBSYNC.md Normal file
View File

@@ -0,0 +1,354 @@
# NIP-DBSYNC — Minimal Mutation Events over Nostr
`draft` `optional`
Defines a minimal event format for publishing immutable application mutation events over Nostr.
This draft intentionally standardizes only the wire format for mutation transport. It does **not** standardize database replication strategy, conflict resolution, relay retention, or key derivation.
---
## Abstract
This NIP defines one regular event kind, **5000**, for signed mutation events.
A mutation event identifies:
- the object namespace being mutated,
- the object identifier within that namespace,
- the mutation operation,
- an optional parent mutation event,
- an application-defined payload.
The purpose of this NIP is to make signed mutation logs portable across Nostr clients and relays without requiring relays to implement database-specific behavior.
---
## Motivation
Many applications need a way to distribute signed state changes across multiple publishers, consumers, or services.
Today this can be done with private event kinds, but private schemas make cross-implementation interoperability harder than necessary. This NIP defines a small shared envelope for mutation events while leaving application-specific state semantics in the payload.
This NIP is intended for use cases such as:
- synchronizing object changes between cooperating services,
- publishing auditable mutation logs,
- replaying application events from ordinary Nostr relays,
- bridging non-Nostr systems into a Nostr-based event stream.
This NIP is **not** a consensus protocol. It does not provide:
- total ordering,
- transactional guarantees,
- global conflict resolution,
- authorization rules,
- guaranteed relay retention.
Applications that require those properties MUST define them separately.
---
## Specification
### Event Kind
| Kind | Category | Name |
|------|----------|------|
| 5000 | Regular | Mutation |
Kind `5000` is a regular event. Relays that support this NIP MAY store it like any other regular event.
This NIP does **not** require relays to:
- retain all historical events,
- index any specific tag beyond normal NIP-01 behavior,
- deliver events in causal or chronological order,
- detect or resolve conflicts.
Applications that depend on durable replay or custom indexing MUST choose relays whose policies satisfy those needs.
### Event Structure
```json
{
"id": "<32-byte lowercase hex>",
"pubkey": "<32-byte lowercase hex>",
"created_at": "<unix timestamp, seconds>",
"kind": 5000,
"tags": [
["r", "<resource namespace>"],
["i", "<object identifier>"],
["op", "<mutation operation>"],
["e", "<parent mutation event id>"]
],
"content": "<JSON-encoded application payload>",
"sig": "<64-byte lowercase hex>"
}
```
The `content` field is a JSON-encoded string. Its structure is defined below.
---
## Tags
| Tag | Required | Description |
|-----|----------|-------------|
| `r` | Yes | Stable resource namespace for the mutated object type. Reverse-DNS style names are RECOMMENDED, for example `com.example.accounts.user`. |
| `i` | Yes | Opaque object identifier, unique within the `r` namespace. Consumers MUST treat this as a string. |
| `op` | Yes | Mutation operation. This NIP defines only `upsert` and `delete`. |
| `e` | No | Parent mutation event id, if the publisher wants to express ancestry. At most one `e` tag SHOULD be included in this version of the protocol. |
| `v` | No | Application payload schema version as a string. RECOMMENDED when the payload format may evolve over time. |
### Tag Rules
Publishers:
- MUST include exactly one `r` tag.
- MUST include exactly one `i` tag.
- MUST include exactly one `op` tag.
- MUST set `op` to either `upsert` or `delete`.
- SHOULD include at most one `e` tag.
- MAY include one `v` tag.
Consumers:
- MUST ignore unknown tags.
- MUST NOT assume tag ordering.
- MUST treat the `e` tag as an ancestry hint, not as proof of global ordering.
### Resource Namespaces
The `r` tag identifies an application-level object type.
This NIP does not define a global registry of resource namespaces. To reduce collisions, publishers SHOULD use a stable namespace they control, such as reverse-DNS notation.
Examples:
- `com.example.accounts.user`
- `org.example.inventory.item`
- `net.example.billing.invoice`
Publishers MUST document the payload schema associated with each resource namespace they use.
---
## Content Payload
The `content` field MUST be a JSON-encoded object.
```json
{
"value": {},
"patch": "merge"
}
```
| Field | Required | Description |
|-------|----------|-------------|
| `value` | Yes | Application-defined mutation payload. For `upsert`, this is the state fragment or full post-mutation state being published. For `delete`, this MAY be an empty object or a small reason object. |
| `patch` | No | How `value` should be interpreted. This NIP defines `merge` and `replace`. If omitted, consumers MUST treat it as application-defined. |
### Payload Rules
For `op = upsert`:
- `value` MUST be a JSON object.
- Publishers SHOULD publish either:
- a partial object intended to be merged, or
- a full post-mutation object intended to replace prior state.
- If the interpretation is important for interoperability, publishers SHOULD set `patch` to `merge` or `replace`.
For `op = delete`:
- `value` MAY be `{}`.
- Consumers MUST treat `delete` as an application-level tombstone signal.
- This NIP does not define whether deletion means hard delete, soft delete, archival, or hiding. Applications MUST define that separately.
### Serialization
All payload values MUST be JSON-serializable.
The following representations are RECOMMENDED:
| Type | Representation |
|------|----------------|
| Timestamp / datetime | ISO 8601 string |
| Decimal | String |
| Binary | Base64 string |
| Null | JSON `null` |
Publishers MAY define additional type mappings, but those mappings are application-specific and MUST be documented outside this NIP.
---
## Ancestry and Replay
The optional `e` tag allows a publisher to indicate which prior mutation event it considered the parent when creating a new mutation.
This supports applications that want ancestry hints for:
- local conflict detection,
- replay ordering,
- branch inspection,
- audit tooling.
However:
- the `e` tag does **not** create a global ordering guarantee,
- relays are not required to deliver parents before children,
- consumers MUST be prepared to receive out-of-order events,
- consumers MAY buffer, defer, ignore, or immediately apply parent-missing events according to local policy.
This NIP does not define a merge event format.
This NIP does not define conflict resolution. If two valid mutation events for the same `(r, i)` object are concurrent or incompatible, consumers MUST resolve them using application-specific rules.
---
## Authorization
This NIP does not define who is authorized to publish mutation events for a given resource or object.
Authorization is application-specific.
Consumers MUST NOT assume that a valid Nostr signature alone authorizes a mutation. Consumers MUST apply their own trust policy, which MAY include:
- explicit pubkey allowlists,
- per-resource ACLs,
- external capability documents,
- relay-level write restrictions,
- application-specific verification.
This NIP does not define custodial keys, deterministic key derivation, shared cluster secrets, or delegation schemes.
---
## Relay Behavior
A relay implementing only NIP-01 remains compatible with this NIP.
No new relay messages are required beyond `REQ`, `EVENT`, and `CLOSE`.
Relays:
- MAY index the `r` and `i` tags using existing single-letter tag indexing conventions.
- MAY apply normal retention, rate-limit, and access-control policies.
- MAY reject events that are too large or otherwise violate local policy.
- MUST NOT be expected to validate application payload semantics.
Applications that require stronger guarantees, such as durable retention or strict admission control, MUST obtain those guarantees from relay policy or from a separate protocol profile.
---
## Subscription Filters
This NIP works with ordinary NIP-01 filters.
### All mutations for one resource
```json
{
"kinds": [5000],
"#r": ["com.example.accounts.user"]
}
```
### Mutation history for one object
```json
{
"kinds": [5000],
"#r": ["com.example.accounts.user"],
"#i": ["550e8400-e29b-41d4-a716-446655440000"]
}
```
### Mutations from trusted authors
```json
{
"kinds": [5000],
"authors": [
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"
]
}
```
Applications SHOULD prefer narrow subscriptions over broad network-wide firehoses.
---
## Examples
### Upsert with parent
```json
{
"id": "1111111111111111111111111111111111111111111111111111111111111111",
"pubkey": "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"created_at": 1710500300,
"kind": 5000,
"tags": [
["r", "com.example.accounts.user"],
["i", "550e8400-e29b-41d4-a716-446655440000"],
["op", "upsert"],
["e", "0000000000000000000000000000000000000000000000000000000000000000"],
["v", "1"]
],
"content": "{\"value\":{\"email\":\"jane.doe@newdomain.com\",\"updated_at\":\"2025-03-15T14:35:00Z\"},\"patch\":\"merge\"}",
"sig": "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc"
}
```
### Delete tombstone
```json
{
"id": "2222222222222222222222222222222222222222222222222222222222222222",
"pubkey": "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
"created_at": 1710500600,
"kind": 5000,
"tags": [
["r", "com.example.accounts.user"],
["i", "550e8400-e29b-41d4-a716-446655440000"],
["op", "delete"],
["e", "1111111111111111111111111111111111111111111111111111111111111111"],
["v", "1"]
],
"content": "{\"value\":{\"reason\":\"user_requested\"}}",
"sig": "dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd"
}
```
---
## Security Considerations
- **Unauthorized writes:** A valid signature proves authorship, not authorization. Consumers MUST enforce their own trust policy.
- **Replay:** Old valid events may be redelivered by relays or attackers. Consumers SHOULD deduplicate by event id and apply local replay policy.
- **Reordering:** Events may arrive out of order. Consumers MUST NOT treat `created_at` or `e` as a guaranteed total order.
- **Conflict flooding:** Multiple valid mutations may target the same object. Consumers SHOULD rate-limit, bound buffering, and define local conflict policy.
- **Sensitive data exposure:** Nostr events are typically widely replicable. Publishers SHOULD NOT put secrets or regulated data in mutation payloads unless they provide application-layer encryption.
- **Relay retention variance:** Some relays will prune history. Applications that depend on full replay MUST choose relays accordingly or maintain an external archive.
---
## Extension Points
Future drafts or companion NIPs may define:
- snapshot events for faster bootstrap,
- object-head or checkpoint events,
- capability or delegation profiles for authorized writers,
- standardized conflict-resolution profiles for specific application classes.
Such extensions SHOULD remain optional and MUST NOT change the meaning of kind `5000` mutation events defined here.
---
## References
- [NIP-01](https://github.com/nostr-protocol/nips/blob/master/01.md) — Basic protocol flow description

417
docs/SYNC.md Normal file
View File

@@ -0,0 +1,417 @@
# Parrhesia Relay Sync
## 1. Purpose
This document defines the Parrhesia proposal for **relay-to-relay event synchronization**.
It is intentionally transport-focused:
- manage remote relay peers,
- catch up on matching events,
- keep a live stream open,
- expose health and basic stats.
It does **not** define application data semantics.
Parrhesia syncs Nostr events. Callers decide which events matter and how to apply them.
---
## 2. Boundary
### Parrhesia is responsible for
- storing and validating events,
- querying and streaming events,
- running outbound sync workers against remote relays,
- tracking peer configuration, worker health, and sync counters,
- exposing peer management through `Parrhesia.API.Sync`.
### Parrhesia is not responsible for
- resource mapping,
- trusted node allowlists for an app profile,
- mutation payload validation beyond normal event validation,
- conflict resolution,
- replay winner selection,
- database upsert/delete semantics.
For Tribes, those remain in `TRIBES-NOSTRSYNC` and `AshNostrSync`.
---
## 3. Security Foundation
### Default posture
The baseline posture for sync traffic is:
- no access to sync events by default,
- no implicit trust from ordinary relay usage,
- no reliance on plaintext confidentiality from public relays.
For the first implementation, Parrhesia should protect sync data primarily with:
- authenticated server identities,
- ACL-gated read and write access,
- TLS with certificate pinning for outbound peers.
### Server identity
Parrhesia owns a low-level server identity used for relay-to-relay authentication.
This identity is separate from:
- TLS endpoint identity,
- application event author pubkeys.
Recommended model:
- Parrhesia has one local server-auth pubkey,
- sync peers authenticate as server-auth pubkeys,
- ACL grants are bound to those authenticated server-auth pubkeys,
- application-level writer trust remains outside Parrhesia.
Identity lifecycle:
1. use configured/imported key if provided,
2. otherwise use persisted local identity,
3. otherwise generate once during initial startup and persist it.
Private key export should not be supported.
### ACLs
Sync traffic should use a real ACL layer, not moderation allowlists.
Current implementation note:
- Parrhesia already has storage-backed moderation state such as `allowed_pubkeys` and `blocked_ips`,
- that is not the sync ACL model,
- sync protection must be enforced in the active websocket/query/count/negentropy/write path, not inferred from management tables alone.
Initial ACL model:
- principal: authenticated pubkey,
- capabilities: `sync_read`, `sync_write`,
- match: event/filter shape such as `kinds: [5000]` and namespace tags.
This is enough for now. We do **not** need a separate user ACL model and server ACL model yet.
A sync peer is simply an authenticated principal with sync capabilities.
### TLS pinning
Each outbound sync peer must include pinned TLS material.
Recommended pin type:
- SPKI SHA-256 pins
Multiple pins should be allowed to support certificate rotation.
---
## 4. Sync Model
Each configured sync server represents one outbound worker managed by Parrhesia.
Implementation note:
- Khatru-style relay designs benefit from explicit runtime stages,
- Parrhesia sync should therefore plug into clear internal phases for connection admission, auth, query/count, subscription, negentropy, publish, and fanout,
- this should stay a runtime refactor, not become extra sync semantics.
Minimum behavior:
1. connect to the remote relay,
2. run an initial catch-up query for the configured filters,
3. ingest received events into the local relay through the normal API path,
4. switch to a live subscription for the same filters,
5. reconnect with backoff when disconnected.
The worker treats filters as opaque Nostr filters. It does not interpret app payloads.
### Initial implementation mode
Initial implementation should use ordinary NIP-01 behavior:
- catch-up via `REQ`-style query,
- live updates via `REQ` subscription.
This is enough for Tribes and keeps the first version simple.
### NIP-77
Parrhesia now has a real reusable relay-side NIP-77 engine:
- proper `NEG-OPEN` / `NEG-MSG` / `NEG-CLOSE` / `NEG-ERR` framing,
- a reusable negentropy codec and reconciliation engine,
- bounded local `(created_at, id)` snapshot enumeration for matching filters,
- connection/session integration with policy checks and resource limits.
That means NIP-77 can be used for bandwidth-efficient catch-up between trusted nodes.
The first sync worker implementation may still default to ordinary NIP-01 catch-up plus live replay, because that path is operationally simpler and already matches the current Tribes sync profile. `:negentropy` can now be introduced as an optimization mode rather than a future prerequisite.
---
## 5. API Surface
Primary control plane:
- `Parrhesia.API.Identity.get/1`
- `Parrhesia.API.Identity.ensure/1`
- `Parrhesia.API.Identity.import/2`
- `Parrhesia.API.Identity.rotate/1`
- `Parrhesia.API.ACL.grant/2`
- `Parrhesia.API.ACL.revoke/2`
- `Parrhesia.API.ACL.list/1`
- `Parrhesia.API.Sync.put_server/2`
- `Parrhesia.API.Sync.remove_server/2`
- `Parrhesia.API.Sync.get_server/2`
- `Parrhesia.API.Sync.list_servers/1`
- `Parrhesia.API.Sync.start_server/2`
- `Parrhesia.API.Sync.stop_server/2`
- `Parrhesia.API.Sync.sync_now/2`
- `Parrhesia.API.Sync.server_stats/2`
- `Parrhesia.API.Sync.sync_stats/1`
- `Parrhesia.API.Sync.sync_health/1`
These APIs are in-process. HTTP management may expose them through `Parrhesia.API.Admin` or direct routing to `Parrhesia.API.Sync`.
---
## 6. Server Specification
`put_server/2` is an upsert.
Suggested server shape:
```elixir
%{
id: "tribes-primary",
url: "wss://relay-a.example/relay",
enabled?: true,
auth_pubkey: "<remote-server-auth-pubkey>",
mode: :req_stream,
filters: [
%{
"kinds" => [5000],
"authors" => ["<trusted-node-pubkey-a>", "<trusted-node-pubkey-b>"],
"#r" => ["tribes.accounts.user", "tribes.chat.tribe"]
}
],
overlap_window_seconds: 300,
auth: %{
type: :nip42
},
tls: %{
mode: :required,
hostname: "relay-a.example",
pins: [
%{type: :spki_sha256, value: "<pin-a>"},
%{type: :spki_sha256, value: "<pin-b>"}
]
},
metadata: %{}
}
```
Required fields:
- `id`
- `url`
- `auth_pubkey`
- `filters`
- `tls`
Recommended fields:
- `enabled?`
- `mode`
- `overlap_window_seconds`
- `auth`
- `metadata`
Rules:
- `id` must be stable and unique locally.
- `url` is the remote relay websocket URL.
- `auth_pubkey` is the expected remote server-auth pubkey.
- `filters` must be valid NIP-01 filters.
- filters are owned by the caller; Parrhesia only validates filter shape.
- `mode` defaults to `:req_stream`.
- `tls.mode` defaults to `:required`.
- `tls.pins` must be non-empty for synced peers.
---
## 7. Runtime State
Each server should have both configuration and runtime status.
Suggested runtime fields:
```elixir
%{
server_id: "tribes-primary",
state: :running,
connected?: true,
last_connected_at: ~U[2026-03-16 10:00:00Z],
last_disconnected_at: nil,
last_sync_started_at: ~U[2026-03-16 10:00:00Z],
last_sync_completed_at: ~U[2026-03-16 10:00:02Z],
last_event_received_at: ~U[2026-03-16 10:12:45Z],
last_eose_at: ~U[2026-03-16 10:00:02Z],
reconnect_attempts: 0,
last_error: nil
}
```
Parrhesia should keep this state generic. It is about relay sync health, not app state convergence.
---
## 8. Stats and Health
### Per-server stats
`server_stats/2` should return basic counters such as:
- `events_received`
- `events_accepted`
- `events_duplicate`
- `events_rejected`
- `query_runs`
- `subscription_restarts`
- `reconnects`
- `last_remote_eose_at`
- `last_error`
### Aggregate sync stats
`sync_stats/1` should summarize:
- total configured servers,
- enabled servers,
- running servers,
- connected servers,
- aggregate event counters,
- aggregate reconnect count.
### Health
`sync_health/1` should be operator-oriented, for example:
```elixir
%{
"status" => "degraded",
"servers_total" => 3,
"servers_connected" => 2,
"servers_failing" => [
%{"id" => "tribes-secondary", "reason" => "connection_refused"}
]
}
```
This is intentionally simple. It should answer “is sync working?” without pretending to prove application convergence.
---
## 9. Event Ingest Path
Events received from a remote sync worker should enter Parrhesia through the same ingest path as any other accepted event.
That means:
1. validate the event,
2. run normal write policy,
3. persist or reject,
4. fan out locally,
5. rely on duplicate-event behavior for idempotency.
This avoids a second ingest path with divergent behavior.
Before normal event acceptance, the sync worker should enforce:
1. pinned TLS validation for the remote endpoint,
2. remote server-auth identity match,
3. local ACL grant permitting the peer to perform sync reads and/or writes.
The sync worker may attach request-context metadata such as:
```elixir
%Parrhesia.API.RequestContext{
caller: :sync,
peer_id: "tribes-primary",
metadata: %{sync_server_id: "tribes-primary"}
}
```
Recommended additional context when available:
- `remote_ip`
- `subscription_id`
This context is for telemetry, policy, and audit only. It must not become app sync semantics.
---
## 10. Persistence
Parrhesia should persist enough sync control-plane state to survive restart:
- local server identity reference,
- configured ACL rules for sync principals,
- configured servers,
- whether a server is enabled,
- optional catch-up cursor or watermark per server,
- basic last-error and last-success markers.
Parrhesia does not need to persist application replay heads or winner state. That remains in the embedding application.
---
## 11. Relationship to Current Features
### BEAM cluster fanout
`Parrhesia.Fanout.MultiNode` is a separate feature.
It provides best-effort live fanout between connected BEAM nodes. It is not remote relay sync and is not a substitute for `Parrhesia.API.Sync`.
### Management stats
Current admin `stats` is relay-global and minimal.
Sync adds a new dimension:
- peer config,
- worker state,
- per-peer counters,
- sync health summary.
That should be exposed without coupling it to app-specific sync semantics.
---
## 12. Tribes Usage
For Tribes, `AshNostrSync` should be able to:
1. rely on Parrhesias local server identity,
2. register one or more remote relays with `Parrhesia.API.Sync.put_server/2`,
3. grant sync ACLs for trusted server-auth pubkeys,
4. provide narrow Nostr filters for `kind: 5000`,
5. observe sync health and counters,
6. consume events via the normal local Parrhesia ingest/query/stream surface.
Tribes should not need Parrhesia to know:
- what a resource namespace means,
- which node pubkeys are trusted for Tribes,
- how to resolve conflicts,
- how to apply an upsert or delete.
That is the key boundary.

View File

@@ -1,70 +1,111 @@
# Parrhesia Shared API + Local API Design (Option 1) # Parrhesia Shared API
## 1) Goal ## 1. Goal
Expose a stable in-process API for embedding apps **and** refactor server transports to consume the same API. Expose a stable in-process API that:
Desired end state: - is used by WebSocket, HTTP management, local callers, and sync workers,
- keeps protocol and storage behavior in one place,
- stays neutral about application-level replication semantics.
- WebSocket server, HTTP management, and embedding app all call one shared core API. This document defines the Parrhesia contract. It does **not** define Tribes or Ash sync behavior.
- Transport layers (WS/HTTP/local) only do framing, auth header extraction, and response encoding.
- Policy/storage/fanout/business semantics live in one place.
This keeps everything in the same dependency (`:parrhesia`) and avoids a second package.
--- ---
## 2) Key architectural decision ## 2. Scope
Previous direction: `Parrhesia.Local.*` as primary public API. ### In scope
Updated direction (this doc): - event ingest/query/count parity with WebSocket behavior,
- local subscription APIs,
- NIP-98 validation helpers,
- management/admin helpers,
- remote relay sync worker control and health reporting.
- Introduce **shared core API modules** under `Parrhesia.API.*`. ### Out of scope
- Make server code (`Parrhesia.Web.Connection`, management handlers) delegate to `Parrhesia.API.*`.
- Keep `Parrhesia.Local.*` as optional convenience wrappers over `Parrhesia.API.*`.
This ensures no divergence between local embedding behavior and websocket behavior. - resource registration,
- trusted app writers,
- mutation payload semantics,
- conflict resolution,
- replay winner selection,
- Ash action mapping.
Those belong in app profiles such as `TRIBES-NOSTRSYNC`, not in Parrhesia.
--- ---
## 3) Layered design ## 3. Layering
```text ```text
Transport layer Transport / embedding / background workers
- Parrhesia.Web.Connection (WS) - Parrhesia.Web.Connection
- Parrhesia.Web.Management (HTTP) - Parrhesia.Web.Management
- Parrhesia.Local.* wrappers (in-process) - Parrhesia.Local.*
- Parrhesia.Sync.*
Shared API layer Shared API
- Parrhesia.API.Auth - Parrhesia.API.Auth
- Parrhesia.API.Events - Parrhesia.API.Events
- Parrhesia.API.Stream (optional) - Parrhesia.API.Stream
- Parrhesia.API.Admin (optional, for management methods) - Parrhesia.API.Admin
- Parrhesia.API.Identity
- Parrhesia.API.ACL
- Parrhesia.API.Sync
Domain/runtime dependencies Runtime internals
- Parrhesia.Policy.EventPolicy - Parrhesia.Policy.EventPolicy
- Parrhesia.Storage.* adapters - Parrhesia.Storage.*
- Parrhesia.Groups.Flow - Parrhesia.Groups.Flow
- Parrhesia.Subscriptions.Index - Parrhesia.Subscriptions.Index
- Parrhesia.Fanout.MultiNode - Parrhesia.Fanout.MultiNode
- Parrhesia.Telemetry - Parrhesia.Telemetry
``` ```
Rule: all ingest/query/count decisions happen in `Parrhesia.API.Events`. Rule: transport framing stays at the edge. Business decisions happen in `Parrhesia.API.*`.
Implementation note:
- the runtime beneath `Parrhesia.API.*` should expose clearer internal policy stages than it does today,
- at minimum: connection/auth, publish, query/count, stream subscription, negentropy, response shaping, and broadcast/fanout,
- these are internal runtime seams, not additional public APIs.
--- ---
## 4) Public module plan ## 4. Core Context
## 4.1 `Parrhesia.API.Auth` ```elixir
defmodule Parrhesia.API.RequestContext do
defstruct authenticated_pubkeys: MapSet.new(),
actor: nil,
caller: :local,
remote_ip: nil,
subscription_id: nil,
peer_id: nil,
metadata: %{}
end
```
`caller` is for telemetry and policy parity, for example `:websocket`, `:http`, `:local`, or `:sync`.
Recommended usage:
- `remote_ip` for connection-level policy and audit,
- `subscription_id` for query/stream/negentropy context,
- `peer_id` for trusted sync peer identity when applicable,
- `metadata` for transport-specific details that should not become API fields.
---
## 5. Public Modules
### 5.1 `Parrhesia.API.Auth`
Purpose: Purpose:
- event validation helpers
- NIP-98 verification
- optional embedding account resolution hook
Proposed functions: - event validation helpers,
- NIP-98 verification,
- optional embedding account resolution.
```elixir ```elixir
@spec validate_event(map()) :: :ok | {:error, term()} @spec validate_event(map()) :: :ok | {:error, term()}
@@ -77,100 +118,65 @@ Proposed functions:
{:ok, Parrhesia.API.Auth.Context.t()} | {:error, term()} {:ok, Parrhesia.API.Auth.Context.t()} | {:error, term()}
``` ```
`validate_nip98/4` options: ### 5.2 `Parrhesia.API.Events`
```elixir
account_resolver: (pubkey_hex :: String.t(), auth_event :: map() ->
{:ok, account :: term()} | {:error, term()})
```
Context struct:
```elixir
defmodule Parrhesia.API.Auth.Context do
@enforce_keys [:pubkey, :auth_event]
defstruct [:pubkey, :auth_event, :account, claims: %{}]
end
```
---
## 4.2 `Parrhesia.API.Events`
Purpose: Purpose:
- canonical ingress/query/count API used by WS + local + HTTP integrations.
Proposed functions: - canonical ingest/query/count path used by WS, HTTP, local callers, and sync workers.
```elixir ```elixir
@spec publish(map(), keyword()) :: {:ok, Parrhesia.API.Events.PublishResult.t()} | {:error, term()} @spec publish(map(), keyword()) ::
@spec query([map()], keyword()) :: {:ok, [map()]} | {:error, term()} {:ok, Parrhesia.API.Events.PublishResult.t()} | {:error, term()}
@spec count([map()], keyword()) :: {:ok, non_neg_integer() | map()} | {:error, term()}
@spec query([map()], keyword()) ::
{:ok, [map()]} | {:error, term()}
@spec count([map()], keyword()) ::
{:ok, non_neg_integer() | map()} | {:error, term()}
``` ```
Request context: Required options:
```elixir - `:context` - `%Parrhesia.API.RequestContext{}`
defmodule Parrhesia.API.RequestContext do
defstruct authenticated_pubkeys: MapSet.new(),
actor: nil,
metadata: %{}
end
```
Publish result: `publish/2` must preserve current `EVENT` semantics:
```elixir 1. size checks,
defmodule Parrhesia.API.Events.PublishResult do 2. `Protocol.validate_event/1`,
@enforce_keys [:event_id, :accepted, :message] 3. `EventPolicy.authorize_write/2`,
defstruct [:event_id, :accepted, :message] 4. group handling,
end 5. persistence or control-event path,
``` 6. local plus multi-node fanout,
7. telemetry.
### Publish semantics (must match websocket EVENT) Return shape mirrors `OK`:
Pipeline in `publish/2`:
1. frame/event size limits
2. `Parrhesia.Protocol.validate_event/1`
3. `Parrhesia.Policy.EventPolicy.authorize_write/2`
4. group handling (`Parrhesia.Groups.Flow.handle_event/1`)
5. persistence path (`put_event`, deletion, vanish, ephemeral rules)
6. fanout (local + multi-node)
7. telemetry emit
Return shape mirrors Nostr `OK` semantics:
```elixir ```elixir
{:ok, %PublishResult{event_id: id, accepted: true, message: "ok: event stored"}} {:ok, %PublishResult{event_id: id, accepted: true, message: "ok: event stored"}}
{:ok, %PublishResult{event_id: id, accepted: false, message: "blocked: ..."}} {:ok, %PublishResult{event_id: id, accepted: false, message: "blocked: ..."}}
``` ```
### Query/count semantics (must match websocket REQ/COUNT) `query/2` and `count/2` must preserve current `REQ` and `COUNT` behavior, including giftwrap restrictions and server-side filter validation.
`query/2` and `count/2`: ### 5.3 `Parrhesia.API.Stream`
1. validate filters
2. run read policy (`EventPolicy.authorize_read/2`)
3. call storage with `requester_pubkeys` from context
4. return ordered events/count payload
Giftwrap restrictions (`kind 1059`) must remain identical to websocket behavior.
---
## 4.3 `Parrhesia.API.Stream` (optional but recommended)
Purpose: Purpose:
- local in-process subscriptions using same subscription index/fanout model.
Proposed functions: - in-process subscription surface with the same semantics as a WebSocket `REQ`.
This is **required** for embedding and sync consumers.
```elixir ```elixir
@spec subscribe(pid(), String.t(), [map()], keyword()) :: {:ok, reference()} | {:error, term()} @spec subscribe(pid(), String.t(), [map()], keyword()) ::
{:ok, reference()} | {:error, term()}
@spec unsubscribe(reference()) :: :ok @spec unsubscribe(reference()) :: :ok
``` ```
Required options:
- `:context` - `%Parrhesia.API.RequestContext{}`
Subscriber contract: Subscriber contract:
```elixir ```elixir
@@ -179,220 +185,240 @@ Subscriber contract:
{:parrhesia, :closed, ref, subscription_id, reason} {:parrhesia, :closed, ref, subscription_id, reason}
``` ```
--- `subscribe/4` must:
## 4.4 `Parrhesia.Local.*` wrappers 1. validate filters,
2. apply read policy,
3. emit initial catch-up events in the same order as `REQ`,
4. emit exactly one `:eose`,
5. register for live fanout until `unsubscribe/1`.
`Parrhesia.Local.*` remain as convenience API for embedding apps, implemented as thin wrappers: This module does **not** know why a caller wants the stream.
- `Parrhesia.Local.Auth` -> delegates to `Parrhesia.API.Auth` ### 5.4 `Parrhesia.API.Admin`
- `Parrhesia.Local.Events` -> delegates to `Parrhesia.API.Events`
- `Parrhesia.Local.Stream` -> delegates to `Parrhesia.API.Stream`
- `Parrhesia.Local.Client` -> use-case helpers (posts + private messages)
No business logic in wrappers. Purpose:
- stable in-process facade for management operations already exposed over HTTP.
```elixir
@spec execute(String.t() | atom(), map(), keyword()) :: {:ok, map()} | {:error, term()}
@spec stats(keyword()) :: {:ok, map()} | {:error, term()}
@spec health(keyword()) :: {:ok, map()} | {:error, term()}
@spec list_audit_logs(keyword()) :: {:ok, [map()]} | {:error, term()}
```
Baseline methods:
- `ping`
- `stats`
- `health`
- moderation methods already supported by the storage admin adapter
`stats/1` is relay-level and cheap. `health/1` is liveness/readiness-oriented and may include worker state.
`API.Admin` is the operator-facing umbrella for management. It may delegate domain-specific work to `API.Identity`, `API.ACL`, and `API.Sync`.
### 5.5 `Parrhesia.API.Identity`
Purpose:
- manage Parrhesia-owned server identity,
- expose public identity metadata,
- support explicit import and rotation,
- keep private key material internal.
Parrhesia owns a low-level server identity used for relay-to-relay auth and other transport-local security features.
```elixir
@spec get(keyword()) :: {:ok, map()} | {:error, term()}
@spec ensure(keyword()) :: {:ok, map()} | {:error, term()}
@spec import(map(), keyword()) :: {:ok, map()} | {:error, term()}
@spec rotate(keyword()) :: {:ok, map()} | {:error, term()}
@spec sign_event(map(), keyword()) :: {:ok, map()} | {:error, term()}
```
Rules:
- private key material must never be returned by API,
- production deployments should be able to import a configured key,
- local/dev deployments may generate on first init if none exists,
- identity creation should be eager and deterministic, not lazy on first sync use.
Recommended boot order:
1. configured/imported key,
2. persisted local identity,
3. generate once and persist.
### 5.6 `Parrhesia.API.ACL`
Purpose:
- enforce event/filter ACLs for authenticated principals,
- support default-deny sync visibility,
- allow dynamic grants for trusted sync peers.
This is a real authorization layer, not a reuse of moderation allowlists.
Current implementation note:
- Parrhesia already has storage-backed moderation presence tables such as `allowed_pubkeys` and `blocked_ips`,
- those are not sufficient for sync ACLs,
- the new ACL layer must be enforced directly in the active read/write/query/negentropy path, not only through management tables.
```elixir
@spec grant(map(), keyword()) :: :ok | {:error, term()}
@spec revoke(map(), keyword()) :: :ok | {:error, term()}
@spec list(keyword()) :: {:ok, [map()]} | {:error, term()}
@spec check(atom(), map(), keyword()) :: :ok | {:error, term()}
```
Suggested rule shape:
```elixir
%{
principal_type: :pubkey,
principal: "<server-auth-pubkey>",
capability: :sync_read,
match: %{
"kinds" => [5000],
"#r" => ["tribes.accounts.user", "tribes.chat.tribe"]
}
}
```
For the first implementation, principals should be authenticated pubkeys only.
We do **not** need a separate user-vs-server ACL model yet. A sync peer is simply a principal with sync capabilities.
Initial required capabilities:
- `:sync_read`
- `:sync_write`
Recommended baseline:
- ordinary events follow existing relay behavior,
- sync traffic is default-deny,
- access is lifted only by explicit ACL grants for authenticated server pubkeys.
### 5.7 `Parrhesia.API.Sync`
Purpose:
- manage remote relay sync workers without embedding app-specific replication semantics.
Parrhesia syncs **events**, not records.
```elixir
@spec put_server(map(), keyword()) ::
{:ok, Parrhesia.API.Sync.Server.t()} | {:error, term()}
@spec remove_server(String.t(), keyword()) :: :ok | {:error, term()}
@spec get_server(String.t(), keyword()) ::
{:ok, Parrhesia.API.Sync.Server.t()} | :error | {:error, term()}
@spec list_servers(keyword()) ::
{:ok, [Parrhesia.API.Sync.Server.t()]} | {:error, term()}
@spec start_server(String.t(), keyword()) :: :ok | {:error, term()}
@spec stop_server(String.t(), keyword()) :: :ok | {:error, term()}
@spec sync_now(String.t(), keyword()) :: :ok | {:error, term()}
@spec server_stats(String.t(), keyword()) ::
{:ok, map()} | :error | {:error, term()}
@spec sync_stats(keyword()) :: {:ok, map()} | {:error, term()}
@spec sync_health(keyword()) :: {:ok, map()} | {:error, term()}
```
`put_server/2` is upsert-style. It covers both add and update.
Minimum server shape:
```elixir
%{
id: "tribes-a",
url: "wss://relay-a.example/relay",
enabled?: true,
auth_pubkey: "<remote-server-auth-pubkey>",
filters: [%{"kinds" => [5000], "#r" => ["tribes.accounts.user"]}],
mode: :req_stream,
auth: %{type: :nip42},
tls: %{
mode: :required,
hostname: "relay-a.example",
pins: [
%{type: :spki_sha256, value: "<base64-sha256-spki-pin>"}
]
}
}
```
Important constraints:
- filters are caller-provided and opaque to Parrhesia,
- Parrhesia does not inspect `kind: 5000` payload semantics,
- Parrhesia may persist peer config and runtime counters,
- Parrhesia may reconnect and resume catch-up using generic event cursors,
- Parrhesia must expose worker health and basic counters,
- remote relay TLS pinning is required,
- sync peer auth is bound to a server-auth pubkey, not inferred from event author pubkeys.
- sync enforcement should reuse the same runtime policy stages as ordinary websocket traffic rather than inventing a parallel trust path.
Server identity model:
- Parrhesia owns its local server-auth identity via `API.Identity`,
- peer config declares the expected remote server-auth pubkey,
- ACL grants are bound to authenticated server-auth pubkeys,
- event author pubkeys remain a separate application concern.
Initial mode should be `:req_stream`:
1. run catch-up with `API.Events.query/2`-equivalent client behavior against the remote relay,
2. switch to a live subscription,
3. ingest received events through local `API.Events.publish/2`.
Future optimization:
- `:negentropy` may be added as an optimization mode on top of the simpler `:req_stream` baseline.
- Parrhesia now has a reusable NIP-77 engine, but a sync worker does not need to depend on it for the first implementation.
--- ---
## 5) Server integration plan (critical) ## 6. Server Integration
## 5.1 WebSocket (`Parrhesia.Web.Connection`) ### WebSocket
After decode:
- `EVENT` -> `Parrhesia.API.Events.publish/2` - `EVENT` -> `Parrhesia.API.Events.publish/2`
- `REQ` -> `Parrhesia.API.Events.query/2` - `REQ` -> `Parrhesia.API.Stream.subscribe/4`
- `COUNT` -> `Parrhesia.API.Events.count/2` - `COUNT` -> `Parrhesia.API.Events.count/2`
- `AUTH` keep transport-specific challenge/session flow, but can use `API.Auth.validate_event/1` internally - `AUTH` stays connection-specific, but validation helpers may move to `API.Auth`
- `NEG-*` maps to the reusable NIP-77 engine and remains exposed through the websocket transport boundary
WebSocket keeps responsibility for: ### HTTP management
- websocket framing
- subscription lifecycle per connection
- AUTH challenge rotation protocol frames
## 5.2 HTTP management (`Parrhesia.Web.Management`) - NIP-98 validation via `Parrhesia.API.Auth.validate_nip98/3`
- management methods via `Parrhesia.API.Admin`
- sync peer CRUD and health endpoints may delegate to `Parrhesia.API.Sync`
- identity and ACL management may delegate to `API.Identity` and `API.ACL`
- NIP-98 header validation via `Parrhesia.API.Auth.validate_nip98/3` ### Local wrappers
- command execution via `Parrhesia.API.Admin` (or existing storage admin adapter via API facade)
`Parrhesia.Local.*` remain thin delegates over `Parrhesia.API.*`.
--- ---
## 6) High-level client helpers for embedding app use case ## 7. Relationship to Sync Profiles
These helpers are optional and live in `Parrhesia.Local.Client`. This document is intentionally lower-level than `TRIBES-NOSTRSYNC` and `SYNC_DB.md`.
## 6.1 Public posts Those documents may require:
```elixir - `Parrhesia.API.Events.publish/2`
@spec publish_post(Parrhesia.API.Auth.Context.t(), String.t(), keyword()) :: - `Parrhesia.API.Events.query/2`
{:ok, Parrhesia.API.Events.PublishResult.t()} | {:error, term()} - `Parrhesia.API.Stream.subscribe/4`
- `Parrhesia.API.Sync.*`
@spec list_posts(keyword()) :: {:ok, [map()]} | {:error, term()} But they must not move application conflict rules or payload semantics into Parrhesia.
@spec stream_posts(pid(), keyword()) :: {:ok, reference()} | {:error, term()}
```
`publish_post/3` options:
- `:tags`
- `:created_at`
- `:signer` callback (required unless fully signed event provided)
Signer contract:
```elixir
(unsigned_event_map -> {:ok, signed_event_map} | {:error, term()})
```
Parrhesia does not store or manage private keys.
## 6.2 Private messages (giftwrap kind 1059)
```elixir
@spec send_private_message(
Parrhesia.API.Auth.Context.t(),
recipient_pubkey :: String.t(),
encrypted_payload :: String.t(),
keyword()
) :: {:ok, Parrhesia.API.Events.PublishResult.t()} | {:error, term()}
@spec inbox(Parrhesia.API.Auth.Context.t(), keyword()) :: {:ok, [map()]} | {:error, term()}
@spec stream_inbox(pid(), Parrhesia.API.Auth.Context.t(), keyword()) :: {:ok, reference()} | {:error, term()}
```
Behavior:
- `send_private_message/4` builds event template with kind `1059` and `p` tag.
- host signer signs template.
- publish through `API.Events.publish/2`.
- `inbox/2` queries `%{"kinds" => [1059], "#p" => [auth.pubkey]}` with authenticated context.
---
## 7) Error model
Shared API should normalize output regardless of transport.
Guideline:
- protocol/policy rejection -> `{:ok, %{accepted: false, message: "..."}}`
- runtime/system failure -> `{:error, term()}`
Common reason mapping:
| Reason | Message prefix |
|---|---|
| `:auth_required` | `auth-required:` |
| `:restricted_giftwrap` | `restricted:` |
| `:invalid_event` | `invalid:` |
| `:duplicate_event` | `duplicate:` |
| `:event_rate_limited` | `rate-limited:` |
---
## 8) Telemetry
Emit shared events in API layer (not transport-specific):
- `[:parrhesia, :api, :publish, :stop]`
- `[:parrhesia, :api, :query, :stop]`
- `[:parrhesia, :api, :count, :stop]`
- `[:parrhesia, :api, :auth, :stop]`
Metadata:
- `traffic_class`
- `caller` (`:websocket | :http | :local`)
- optional `account_present?`
Transport-level telemetry can remain separate where needed.
---
## 9) Refactor sequence
### Phase 1: Extract shared API
1. Create `Parrhesia.API.Events` with publish/query/count from current `Web.Connection` paths.
2. Create `Parrhesia.API.Auth` wrappers for NIP-98/event validation.
3. Add API-level tests.
### Phase 2: Migrate transports
1. Update `Parrhesia.Web.Connection` to delegate publish/query/count to `API.Events`.
2. Update `Parrhesia.Web.Management` to use `API.Auth`.
3. Keep behavior unchanged.
### Phase 3: Add local wrappers/helpers
1. Implement `Parrhesia.Local.Auth/Events/Stream` as thin delegates.
2. Add `Parrhesia.Local.Client` post/inbox/send helpers.
3. Add embedding documentation.
### Phase 4: Lock parity
1. Add parity tests: WS vs Local API for same inputs and policy outcomes.
2. Add property tests for query/count equivalence where feasible.
---
## 10) Testing requirements
1. **Transport parity tests**
- Same signed event via WS and API => same accepted/message semantics.
2. **Policy parity tests**
- Giftwrap visibility and auth-required behavior identical across WS/API/local.
3. **Auth tests**
- NIP-98 success/failure + account resolver success/failure.
4. **Fanout tests**
- publish via API reaches local stream subscribers and WS subscribers.
5. **Failure tests**
- storage failures surface deterministic errors in all transports.
---
## 11) Backwards compatibility
- No breaking change to websocket protocol.
- No breaking change to management endpoint contract.
- New API modules are additive.
- Existing apps can ignore local API entirely.
---
## 12) Embedding example flow
### 12.1 Login/auth
```elixir
with {:ok, auth} <- Parrhesia.API.Auth.validate_nip98(header, method, url,
account_resolver: &MyApp.Accounts.resolve_nostr_pubkey/2
) do
# use auth.pubkey/auth.account in host session
end
```
### 12.2 Post publish
```elixir
Parrhesia.Local.Client.publish_post(auth, "hello", signer: &MyApp.NostrSigner.sign/1)
```
### 12.3 Private message
```elixir
Parrhesia.Local.Client.send_private_message(
auth,
recipient_pubkey,
encrypted_payload,
signer: &MyApp.NostrSigner.sign/1
)
```
### 12.4 Inbox
```elixir
Parrhesia.Local.Client.inbox(auth, limit: 100)
```
---
## 13) Summary
Yes, this can and should be extracted into a shared API module. The server should consume it too.
That gives:
- one canonical behavior path,
- cleaner embedding,
- easier testing,
- lower long-term maintenance cost.

View File

@@ -45,7 +45,7 @@
config = { config = {
Entrypoint = ["${parrhesia}/bin/parrhesia"]; Entrypoint = ["${parrhesia}/bin/parrhesia"];
Cmd = ["foreground"]; Cmd = ["start"];
ExposedPorts = { ExposedPorts = {
"4413/tcp" = {}; "4413/tcp" = {};
}; };

250
lib/parrhesia/api/acl.ex Normal file
View File

@@ -0,0 +1,250 @@
defmodule Parrhesia.API.ACL do
@moduledoc """
Public ACL API and rule matching for protected sync traffic.
"""
alias Parrhesia.API.RequestContext
alias Parrhesia.Protocol.Filter
alias Parrhesia.Storage
@spec grant(map(), keyword()) :: :ok | {:error, term()}
def grant(rule, _opts \\ []) do
with {:ok, _stored_rule} <- Storage.acl().put_rule(%{}, normalize_rule(rule)) do
:ok
end
end
@spec revoke(map(), keyword()) :: :ok | {:error, term()}
def revoke(rule, _opts \\ []) do
Storage.acl().delete_rule(%{}, normalize_delete_selector(rule))
end
@spec list(keyword()) :: {:ok, [map()]} | {:error, term()}
def list(opts \\ []) do
Storage.acl().list_rules(%{}, normalize_list_opts(opts))
end
@spec check(atom(), map(), keyword()) :: :ok | {:error, term()}
def check(capability, subject, opts \\ [])
def check(capability, subject, opts)
when capability in [:sync_read, :sync_write] and is_map(subject) do
context = Keyword.get(opts, :context, %RequestContext{})
with {:ok, normalized_capability} <- normalize_capability(capability),
{:ok, normalized_context} <- normalize_context(context),
{:ok, protected_filters} <- protected_filters() do
if protected_subject?(normalized_capability, subject, protected_filters) do
authorize_subject(normalized_capability, subject, normalized_context)
else
:ok
end
end
end
def check(_capability, _subject, _opts), do: {:error, :invalid_acl_capability}
@spec protected_read?(map()) :: boolean()
def protected_read?(filter) when is_map(filter) do
case protected_filters() do
{:ok, protected_filters} ->
protected_subject?(:sync_read, filter, protected_filters)
{:error, _reason} ->
false
end
end
def protected_read?(_filter), do: false
@spec protected_write?(map()) :: boolean()
def protected_write?(event) when is_map(event) do
case protected_filters() do
{:ok, protected_filters} ->
protected_subject?(:sync_write, event, protected_filters)
{:error, _reason} ->
false
end
end
def protected_write?(_event), do: false
defp authorize_subject(capability, subject, %RequestContext{} = context) do
if MapSet.size(context.authenticated_pubkeys) == 0 do
{:error, :auth_required}
else
capability
|> list_rules_for_capability()
|> authorize_against_rules(capability, context.authenticated_pubkeys, subject)
end
end
defp list_rules_for_capability(capability) do
Storage.acl().list_rules(%{}, principal_type: :pubkey, capability: capability)
end
defp authorize_against_rules({:ok, rules}, capability, authenticated_pubkeys, subject) do
if Enum.any?(authenticated_pubkeys, &principal_authorized?(&1, subject, rules)) do
:ok
else
{:error, denial_reason(capability)}
end
end
defp authorize_against_rules({:error, reason}, _capability, _authenticated_pubkeys, _subject),
do: {:error, reason}
defp principal_authorized?(authenticated_pubkey, subject, rules) do
Enum.any?(rules, fn rule ->
rule.principal == authenticated_pubkey and
rule_covers_subject?(rule.capability, rule.match, subject)
end)
end
defp rule_covers_subject?(:sync_read, rule_match, filter),
do: filter_within_rule?(filter, rule_match)
defp rule_covers_subject?(:sync_write, rule_match, event),
do: Filter.matches_filter?(event, rule_match)
defp protected_subject?(:sync_read, filter, protected_filters) do
Enum.any?(protected_filters, &filters_overlap?(filter, &1))
end
defp protected_subject?(:sync_write, event, protected_filters) do
Enum.any?(protected_filters, &Filter.matches_filter?(event, &1))
end
defp filters_overlap?(left, right) when is_map(left) and is_map(right) do
comparable_keys =
left
|> comparable_filter_keys(right)
|> Enum.reject(&(&1 in ["limit", "search", "since", "until"]))
Enum.all?(
comparable_keys,
&filter_constraint_compatible?(Map.get(left, &1), Map.get(right, &1), &1)
) and
filter_ranges_overlap?(left, right)
end
defp filter_constraint_compatible?(nil, _right, _key), do: true
defp filter_constraint_compatible?(_left, nil, _key), do: true
defp filter_constraint_compatible?(left, right, _key) when is_list(left) and is_list(right) do
MapSet.disjoint?(MapSet.new(left), MapSet.new(right)) == false
end
defp filter_constraint_compatible?(left, right, _key), do: left == right
defp filter_within_rule?(filter, rule_match) when is_map(filter) and is_map(rule_match) do
Enum.reject(rule_match, fn {key, _value} -> key in ["since", "until", "limit", "search"] end)
|> Enum.all?(fn {key, rule_value} ->
requested_value = Map.get(filter, key)
requested_constraint_within_rule?(requested_value, rule_value, key)
end) and filter_range_within_rule?(filter, rule_match)
end
defp requested_constraint_within_rule?(nil, _rule_value, _key), do: false
defp requested_constraint_within_rule?(requested_values, rule_values, _key)
when is_list(requested_values) and is_list(rule_values) do
requested_values
|> MapSet.new()
|> MapSet.subset?(MapSet.new(rule_values))
end
defp requested_constraint_within_rule?(requested_value, rule_value, _key),
do: requested_value == rule_value
defp denial_reason(:sync_read), do: :sync_read_not_allowed
defp denial_reason(:sync_write), do: :sync_write_not_allowed
defp normalize_context(%RequestContext{} = context), do: {:ok, normalize_pubkeys(context)}
defp normalize_context(_context), do: {:error, :invalid_context}
defp normalize_pubkeys(%RequestContext{} = context) do
normalized_pubkeys =
context.authenticated_pubkeys
|> Enum.map(&String.downcase/1)
|> MapSet.new()
%RequestContext{context | authenticated_pubkeys: normalized_pubkeys}
end
defp normalize_rule(rule) when is_map(rule), do: rule
defp normalize_rule(_rule), do: %{}
defp normalize_delete_selector(selector) when is_map(selector), do: selector
defp normalize_delete_selector(_selector), do: %{}
defp normalize_list_opts(opts) do
[]
|> maybe_put_opt(:principal_type, Keyword.get(opts, :principal_type))
|> maybe_put_opt(:principal, normalize_list_principal(Keyword.get(opts, :principal)))
|> maybe_put_opt(:capability, Keyword.get(opts, :capability))
end
defp normalize_list_principal(nil), do: nil
defp normalize_list_principal(principal) when is_binary(principal),
do: String.downcase(principal)
defp normalize_list_principal(principal), do: principal
defp maybe_put_opt(opts, _key, nil), do: opts
defp maybe_put_opt(opts, key, value), do: Keyword.put(opts, key, value)
defp normalize_capability(capability) do
case capability do
:sync_read -> {:ok, :sync_read}
:sync_write -> {:ok, :sync_write}
_other -> {:error, :invalid_acl_capability}
end
end
defp protected_filters do
filters =
:parrhesia
|> Application.get_env(:acl, [])
|> Keyword.get(:protected_filters, [])
if is_list(filters) and
Enum.all?(filters, &(match?(%{}, &1) and Filter.validate_filter(&1) == :ok)) do
{:ok, filters}
else
{:error, :invalid_protected_filters}
end
end
defp comparable_filter_keys(left, right) do
Map.keys(left)
|> Kernel.++(Map.keys(right))
|> Enum.uniq()
end
defp filter_ranges_overlap?(left, right) do
since = max(boundary_value(left, "since", :lower), boundary_value(right, "since", :lower))
until = min(boundary_value(left, "until", :upper), boundary_value(right, "until", :upper))
since <= until
end
defp filter_range_within_rule?(filter, rule_match) do
requested_since = Map.get(filter, "since")
requested_until = Map.get(filter, "until")
rule_since = Map.get(rule_match, "since")
rule_until = Map.get(rule_match, "until")
lower_ok? =
is_nil(rule_since) or (is_integer(requested_since) and requested_since >= rule_since)
upper_ok? =
is_nil(rule_until) or (is_integer(requested_until) and requested_until <= rule_until)
lower_ok? and upper_ok?
end
defp boundary_value(filter, key, :lower), do: Map.get(filter, key, 0)
defp boundary_value(filter, key, :upper), do: Map.get(filter, key, 9_223_372_036_854_775_807)
end

286
lib/parrhesia/api/admin.ex Normal file
View File

@@ -0,0 +1,286 @@
defmodule Parrhesia.API.Admin do
@moduledoc """
Public management API facade.
"""
alias Parrhesia.API.ACL
alias Parrhesia.API.Identity
alias Parrhesia.API.Sync
alias Parrhesia.Storage
alias Parrhesia.Web.Endpoint
@supported_admin_methods ~w(health list_audit_logs stats)
@supported_acl_methods ~w(acl_grant acl_revoke acl_list)
@supported_identity_methods ~w(identity_ensure identity_get identity_import identity_rotate)
@supported_listener_methods ~w(listener_reload)
@supported_sync_methods ~w(
sync_get_server
sync_health
sync_list_servers
sync_put_server
sync_remove_server
sync_server_stats
sync_start_server
sync_stats
sync_stop_server
sync_sync_now
)
@spec execute(String.t() | atom(), map(), keyword()) :: {:ok, map()} | {:error, term()}
def execute(method, params, opts \\ [])
def execute(method, params, opts) when is_map(params) do
method_name = normalize_method_name(method)
case execute_builtin(method_name, params, opts) do
{:continue, other_method} -> Storage.admin().execute(%{}, other_method, params)
result -> result
end
end
def execute(method, _params, _opts),
do: {:error, {:unsupported_method, normalize_method_name(method)}}
@spec stats(keyword()) :: {:ok, map()} | {:error, term()}
def stats(opts \\ []) do
with {:ok, relay_stats} <- relay_stats(),
{:ok, sync_stats} <- Sync.sync_stats(opts) do
{:ok, Map.put(relay_stats, "sync", sync_stats)}
end
end
@spec health(keyword()) :: {:ok, map()} | {:error, term()}
def health(opts \\ []) do
with {:ok, sync_health} <- Sync.sync_health(opts) do
{:ok,
%{
"status" => overall_health_status(sync_health),
"sync" => sync_health
}}
end
end
@spec list_audit_logs(keyword()) :: {:ok, [map()]} | {:error, term()}
def list_audit_logs(opts \\ []) do
Storage.admin().list_audit_logs(%{}, opts)
end
defp acl_grant(params) do
with :ok <- ACL.grant(params) do
{:ok, %{"ok" => true}}
end
end
defp acl_revoke(params) do
with :ok <- ACL.revoke(params) do
{:ok, %{"ok" => true}}
end
end
defp acl_list(params) do
with {:ok, rules} <- ACL.list(acl_list_opts(params)) do
{:ok, %{"rules" => rules}}
end
end
defp acl_list_opts(params) do
[]
|> maybe_put_opt(:principal_type, fetch_value(params, :principal_type))
|> maybe_put_opt(:principal, fetch_value(params, :principal))
|> maybe_put_opt(:capability, fetch_value(params, :capability))
end
defp supported_methods do
storage_supported =
case Storage.admin().execute(%{}, :supportedmethods, %{}) do
{:ok, methods} when is_list(methods) -> methods
{:ok, %{"methods" => methods}} when is_list(methods) -> methods
_other -> []
end
(storage_supported ++
@supported_admin_methods ++
@supported_acl_methods ++
@supported_identity_methods ++ @supported_listener_methods ++ @supported_sync_methods)
|> Enum.uniq()
|> Enum.sort()
end
defp identity_get(_params), do: Identity.get()
defp identity_ensure(_params), do: Identity.ensure()
defp identity_rotate(_params), do: Identity.rotate()
defp identity_import(params) do
Identity.import(params)
end
defp admin_stats(_params, opts), do: stats(opts)
defp admin_health(_params, opts), do: health(opts)
defp admin_list_audit_logs(params, _opts) do
list_audit_logs(audit_log_opts(params))
end
defp listener_reload(params) do
case normalize_listener_id(fetch_value(params, :id)) do
:all ->
Endpoint.reload_all()
|> ok_result()
{:ok, listener_id} ->
listener_id
|> Endpoint.reload_listener()
|> ok_result()
:error ->
{:error, :not_found}
end
end
defp sync_put_server(params, opts), do: Sync.put_server(params, opts)
defp sync_remove_server(params, opts) do
with {:ok, server_id} <- fetch_required_string(params, :id),
:ok <- Sync.remove_server(server_id, opts) do
{:ok, %{"ok" => true}}
end
end
defp sync_get_server(params, opts) do
with {:ok, server_id} <- fetch_required_string(params, :id),
{:ok, server} <- Sync.get_server(server_id, opts) do
{:ok, server}
else
:error -> {:error, :not_found}
other -> other
end
end
defp sync_list_servers(_params, opts), do: Sync.list_servers(opts)
defp sync_start_server(params, opts) do
with {:ok, server_id} <- fetch_required_string(params, :id),
:ok <- Sync.start_server(server_id, opts) do
{:ok, %{"ok" => true}}
end
end
defp sync_stop_server(params, opts) do
with {:ok, server_id} <- fetch_required_string(params, :id),
:ok <- Sync.stop_server(server_id, opts) do
{:ok, %{"ok" => true}}
end
end
defp sync_sync_now(params, opts) do
with {:ok, server_id} <- fetch_required_string(params, :id),
:ok <- Sync.sync_now(server_id, opts) do
{:ok, %{"ok" => true}}
end
end
defp sync_server_stats(params, opts) do
with {:ok, server_id} <- fetch_required_string(params, :id),
{:ok, stats} <- Sync.server_stats(server_id, opts) do
{:ok, stats}
else
:error -> {:error, :not_found}
other -> other
end
end
defp sync_stats(_params, opts), do: Sync.sync_stats(opts)
defp sync_health(_params, opts), do: Sync.sync_health(opts)
defp execute_builtin("stats", params, opts), do: admin_stats(params, opts)
defp execute_builtin("health", params, opts), do: admin_health(params, opts)
defp execute_builtin("list_audit_logs", params, opts), do: admin_list_audit_logs(params, opts)
defp execute_builtin("acl_grant", params, _opts), do: acl_grant(params)
defp execute_builtin("acl_revoke", params, _opts), do: acl_revoke(params)
defp execute_builtin("acl_list", params, _opts), do: acl_list(params)
defp execute_builtin("identity_get", params, _opts), do: identity_get(params)
defp execute_builtin("identity_ensure", params, _opts), do: identity_ensure(params)
defp execute_builtin("identity_import", params, _opts), do: identity_import(params)
defp execute_builtin("identity_rotate", params, _opts), do: identity_rotate(params)
defp execute_builtin("listener_reload", params, _opts), do: listener_reload(params)
defp execute_builtin("sync_put_server", params, opts), do: sync_put_server(params, opts)
defp execute_builtin("sync_remove_server", params, opts), do: sync_remove_server(params, opts)
defp execute_builtin("sync_get_server", params, opts), do: sync_get_server(params, opts)
defp execute_builtin("sync_list_servers", params, opts), do: sync_list_servers(params, opts)
defp execute_builtin("sync_start_server", params, opts), do: sync_start_server(params, opts)
defp execute_builtin("sync_stop_server", params, opts), do: sync_stop_server(params, opts)
defp execute_builtin("sync_sync_now", params, opts), do: sync_sync_now(params, opts)
defp execute_builtin("sync_server_stats", params, opts), do: sync_server_stats(params, opts)
defp execute_builtin("sync_stats", params, opts), do: sync_stats(params, opts)
defp execute_builtin("sync_health", params, opts), do: sync_health(params, opts)
defp execute_builtin("supportedmethods", _params, _opts),
do: {:ok, %{"methods" => supported_methods()}}
defp execute_builtin(other_method, _params, _opts), do: {:continue, other_method}
defp relay_stats do
case Storage.admin().execute(%{}, :stats, %{}) do
{:ok, stats} when is_map(stats) -> {:ok, stats}
{:error, {:unsupported_method, _method}} -> {:ok, %{}}
other -> other
end
end
defp overall_health_status(%{"status" => "degraded"}), do: "degraded"
defp overall_health_status(_sync_health), do: "ok"
defp audit_log_opts(params) do
[]
|> maybe_put_opt(:limit, fetch_value(params, :limit))
|> maybe_put_opt(:method, fetch_value(params, :method))
|> maybe_put_opt(:actor_pubkey, fetch_value(params, :actor_pubkey))
end
defp maybe_put_opt(opts, _key, nil), do: opts
defp maybe_put_opt(opts, key, value), do: Keyword.put(opts, key, value)
defp ok_result(:ok), do: {:ok, %{"ok" => true}}
defp ok_result({:error, _reason} = error), do: error
defp ok_result(other), do: other
defp normalize_listener_id(nil), do: :all
defp normalize_listener_id(listener_id) when is_atom(listener_id) do
{:ok, listener_id}
end
defp normalize_listener_id(listener_id) when is_binary(listener_id) do
case Supervisor.which_children(Endpoint) do
children when is_list(children) ->
Enum.find_value(children, :error, &match_listener_child(&1, listener_id))
_other ->
:error
end
end
defp normalize_listener_id(_listener_id), do: :error
defp match_listener_child({{:listener, id}, _pid, _type, _modules}, listener_id) do
normalized_id = Atom.to_string(id)
if normalized_id == listener_id, do: {:ok, id}, else: false
end
defp match_listener_child(_child, _listener_id), do: false
defp fetch_required_string(map, key) do
case fetch_value(map, key) do
value when is_binary(value) and value != "" -> {:ok, value}
_other -> {:error, {:missing_param, Atom.to_string(key)}}
end
end
defp fetch_value(map, key), do: Map.get(map, key) || Map.get(map, Atom.to_string(key))
defp normalize_method_name(method) when is_atom(method), do: Atom.to_string(method)
defp normalize_method_name(method) when is_binary(method), do: method
defp normalize_method_name(method), do: inspect(method)
end

48
lib/parrhesia/api/auth.ex Normal file
View File

@@ -0,0 +1,48 @@
defmodule Parrhesia.API.Auth do
@moduledoc """
Shared auth and event validation helpers.
"""
alias Parrhesia.API.Auth.Context
alias Parrhesia.API.RequestContext
alias Parrhesia.Auth.Nip98
alias Parrhesia.Protocol.EventValidator
@spec validate_event(map()) :: :ok | {:error, term()}
def validate_event(event), do: EventValidator.validate(event)
@spec compute_event_id(map()) :: String.t()
def compute_event_id(event), do: EventValidator.compute_id(event)
@spec validate_nip98(String.t() | nil, String.t(), String.t()) ::
{:ok, Context.t()} | {:error, term()}
def validate_nip98(authorization, method, url) do
validate_nip98(authorization, method, url, [])
end
@spec validate_nip98(String.t() | nil, String.t(), String.t(), keyword()) ::
{:ok, Context.t()} | {:error, term()}
def validate_nip98(authorization, method, url, opts)
when is_binary(method) and is_binary(url) and is_list(opts) do
with {:ok, auth_event} <-
Nip98.validate_authorization_header(authorization, method, url, opts),
pubkey when is_binary(pubkey) <- Map.get(auth_event, "pubkey") do
{:ok,
%Context{
auth_event: auth_event,
pubkey: pubkey,
request_context: %RequestContext{
authenticated_pubkeys: MapSet.new([pubkey]),
caller: :http
},
metadata: %{
method: method,
url: url
}
}}
else
nil -> {:error, :invalid_event}
{:error, reason} -> {:error, reason}
end
end
end

View File

@@ -0,0 +1,19 @@
defmodule Parrhesia.API.Auth.Context do
@moduledoc """
Authenticated request details returned by shared auth helpers.
"""
alias Parrhesia.API.RequestContext
defstruct auth_event: nil,
pubkey: nil,
request_context: %RequestContext{},
metadata: %{}
@type t :: %__MODULE__{
auth_event: map() | nil,
pubkey: String.t() | nil,
request_context: RequestContext.t(),
metadata: map()
}
end

373
lib/parrhesia/api/events.ex Normal file
View File

@@ -0,0 +1,373 @@
defmodule Parrhesia.API.Events do
@moduledoc """
Canonical event publish, query, and count API.
"""
alias Parrhesia.API.Events.PublishResult
alias Parrhesia.API.RequestContext
alias Parrhesia.Fanout.MultiNode
alias Parrhesia.Groups.Flow
alias Parrhesia.Policy.EventPolicy
alias Parrhesia.Protocol
alias Parrhesia.Protocol.Filter
alias Parrhesia.Storage
alias Parrhesia.Subscriptions.Index
alias Parrhesia.Telemetry
@default_max_event_bytes 262_144
@marmot_kinds MapSet.new([
443,
444,
445,
1059,
10_050,
10_051,
446,
447,
448,
449
])
@spec publish(map(), keyword()) :: {:ok, PublishResult.t()} | {:error, term()}
def publish(event, opts \\ [])
def publish(event, opts) when is_map(event) and is_list(opts) do
started_at = System.monotonic_time()
event_id = Map.get(event, "id", "")
with {:ok, context} <- fetch_context(opts),
:ok <- validate_event_payload_size(event, max_event_bytes(opts)),
:ok <- Protocol.validate_event(event),
:ok <- EventPolicy.authorize_write(event, context.authenticated_pubkeys, context),
:ok <- maybe_process_group_event(event),
{:ok, _stored, message} <- persist_event(event) do
Telemetry.emit(
[:parrhesia, :ingest, :stop],
%{duration: System.monotonic_time() - started_at},
telemetry_metadata_for_event(event)
)
fanout_event(event)
maybe_publish_multi_node(event)
{:ok,
%PublishResult{
event_id: event_id,
accepted: true,
message: message,
reason: nil
}}
else
{:error, :invalid_context} = error ->
error
{:error, reason} ->
{:ok,
%PublishResult{
event_id: event_id,
accepted: false,
message: error_message_for_publish_failure(reason),
reason: reason
}}
end
end
def publish(_event, _opts), do: {:error, :invalid_event}
@spec query([map()], keyword()) :: {:ok, [map()]} | {:error, term()}
def query(filters, opts \\ [])
def query(filters, opts) when is_list(filters) and is_list(opts) do
started_at = System.monotonic_time()
with {:ok, context} <- fetch_context(opts),
:ok <- maybe_validate_filters(filters, opts),
:ok <- maybe_authorize_read(filters, context, opts),
{:ok, events} <- Storage.events().query(%{}, filters, storage_query_opts(context, opts)) do
Telemetry.emit(
[:parrhesia, :query, :stop],
%{duration: System.monotonic_time() - started_at},
telemetry_metadata_for_filters(filters)
)
{:ok, events}
end
end
def query(_filters, _opts), do: {:error, :invalid_filters}
@spec count([map()], keyword()) :: {:ok, non_neg_integer() | map()} | {:error, term()}
def count(filters, opts \\ [])
def count(filters, opts) when is_list(filters) and is_list(opts) do
started_at = System.monotonic_time()
with {:ok, context} <- fetch_context(opts),
:ok <- maybe_validate_filters(filters, opts),
:ok <- maybe_authorize_read(filters, context, opts),
{:ok, count} <-
Storage.events().count(%{}, filters, requester_pubkeys: requester_pubkeys(context)),
{:ok, result} <- maybe_build_count_result(filters, count, Keyword.get(opts, :options)) do
Telemetry.emit(
[:parrhesia, :query, :stop],
%{duration: System.monotonic_time() - started_at},
telemetry_metadata_for_filters(filters)
)
{:ok, result}
end
end
def count(_filters, _opts), do: {:error, :invalid_filters}
defp maybe_validate_filters(filters, opts) do
if Keyword.get(opts, :validate_filters?, true) do
Filter.validate_filters(filters)
else
:ok
end
end
defp maybe_authorize_read(filters, context, opts) do
if Keyword.get(opts, :authorize_read?, true) do
EventPolicy.authorize_read(filters, context.authenticated_pubkeys, context)
else
:ok
end
end
defp storage_query_opts(context, opts) do
[
max_filter_limit:
Keyword.get(opts, :max_filter_limit, Parrhesia.Config.get([:limits, :max_filter_limit])),
requester_pubkeys: requester_pubkeys(context)
]
end
defp requester_pubkeys(%RequestContext{} = context),
do: MapSet.to_list(context.authenticated_pubkeys)
defp maybe_build_count_result(_filters, count, nil) when is_integer(count), do: {:ok, count}
defp maybe_build_count_result(filters, count, options)
when is_integer(count) and is_map(options) do
build_count_payload(filters, count, options)
end
defp maybe_build_count_result(_filters, count, _options) when is_integer(count),
do: {:ok, count}
defp maybe_build_count_result(_filters, count, _options), do: {:ok, count}
defp build_count_payload(filters, count, options) do
include_hll? =
Map.get(options, "hll", false) and Parrhesia.Config.get([:features, :nip_45_count], true)
payload = %{"count" => count, "approximate" => false}
payload =
if include_hll? do
Map.put(payload, "hll", generate_hll_payload(filters, count))
else
payload
end
{:ok, payload}
end
defp generate_hll_payload(filters, count) do
filters
|> JSON.encode!()
|> then(&"#{&1}:#{count}")
|> then(&:crypto.hash(:sha256, &1))
|> Base.encode64()
end
defp maybe_process_group_event(event) do
if Flow.group_related_kind?(Map.get(event, "kind")) do
Flow.handle_event(event)
else
:ok
end
end
defp persist_event(event) do
kind = Map.get(event, "kind")
cond do
kind in [5, 62] -> persist_control_event(kind, event)
ephemeral_kind?(kind) -> persist_ephemeral_event()
true -> persist_regular_event(event)
end
end
defp persist_control_event(5, event) do
with {:ok, deleted_count} <- Storage.events().delete_by_request(%{}, event) do
{:ok, deleted_count, "ok: deletion request processed"}
end
end
defp persist_control_event(62, event) do
with {:ok, deleted_count} <- Storage.events().vanish(%{}, event) do
{:ok, deleted_count, "ok: vanish request processed"}
end
end
defp persist_ephemeral_event do
if accept_ephemeral_events?() do
{:ok, :ephemeral, "ok: ephemeral event accepted"}
else
{:error, :ephemeral_events_disabled}
end
end
defp persist_regular_event(event) do
case Storage.events().put_event(%{}, event) do
{:ok, persisted_event} -> {:ok, persisted_event, "ok: event stored"}
{:error, :duplicate_event} -> {:error, :duplicate_event}
{:error, reason} -> {:error, reason}
end
end
defp fanout_event(event) do
case Index.candidate_subscription_keys(event) do
candidates when is_list(candidates) ->
Enum.each(candidates, fn {owner_pid, subscription_id} ->
send(owner_pid, {:fanout_event, subscription_id, event})
end)
_other ->
:ok
end
catch
:exit, _reason -> :ok
end
defp maybe_publish_multi_node(event) do
MultiNode.publish(event)
:ok
catch
:exit, _reason -> :ok
end
defp telemetry_metadata_for_event(event) do
%{traffic_class: traffic_class_for_event(event)}
end
defp telemetry_metadata_for_filters(filters) do
%{traffic_class: traffic_class_for_filters(filters)}
end
defp traffic_class_for_filters(filters) do
if Enum.any?(filters, &marmot_filter?/1) do
:marmot
else
:generic
end
end
defp marmot_filter?(filter) when is_map(filter) do
has_marmot_kind? =
case Map.get(filter, "kinds") do
kinds when is_list(kinds) -> Enum.any?(kinds, &MapSet.member?(@marmot_kinds, &1))
_other -> false
end
has_marmot_kind? or Map.has_key?(filter, "#h") or Map.has_key?(filter, "#i")
end
defp marmot_filter?(_filter), do: false
defp traffic_class_for_event(event) when is_map(event) do
if MapSet.member?(@marmot_kinds, Map.get(event, "kind")) do
:marmot
else
:generic
end
end
defp traffic_class_for_event(_event), do: :generic
defp fetch_context(opts) do
case Keyword.get(opts, :context) do
%RequestContext{} = context -> {:ok, context}
_other -> {:error, :invalid_context}
end
end
defp error_message_for_publish_failure(:duplicate_event),
do: "duplicate: event already stored"
defp error_message_for_publish_failure(:event_too_large),
do: "invalid: event exceeds max event size"
defp error_message_for_publish_failure(:ephemeral_events_disabled),
do: "blocked: ephemeral events are disabled"
defp error_message_for_publish_failure(reason)
when reason in [
:auth_required,
:pubkey_not_allowed,
:restricted_giftwrap,
:sync_write_not_allowed,
:protected_event_requires_auth,
:protected_event_pubkey_mismatch,
:pow_below_minimum,
:pubkey_banned,
:event_banned,
:media_metadata_tags_exceeded,
:media_metadata_tag_value_too_large,
:media_metadata_url_too_long,
:media_metadata_invalid_url,
:media_metadata_invalid_hash,
:media_metadata_invalid_mime,
:media_metadata_mime_not_allowed,
:media_metadata_unsupported_version,
:push_notification_relay_tags_exceeded,
:push_notification_payload_too_large,
:push_notification_replay_window_exceeded,
:push_notification_missing_expiration,
:push_notification_expiration_too_far,
:push_notification_server_recipients_exceeded
],
do: EventPolicy.error_message(reason)
defp error_message_for_publish_failure(reason) when is_binary(reason), do: reason
defp error_message_for_publish_failure(reason), do: "error: #{inspect(reason)}"
defp validate_event_payload_size(event, max_event_bytes)
when is_map(event) and is_integer(max_event_bytes) and max_event_bytes > 0 do
if byte_size(JSON.encode!(event)) <= max_event_bytes do
:ok
else
{:error, :event_too_large}
end
end
defp validate_event_payload_size(_event, _max_event_bytes), do: :ok
defp max_event_bytes(opts) do
opts
|> Keyword.get(:max_event_bytes, configured_max_event_bytes())
|> normalize_max_event_bytes()
end
defp normalize_max_event_bytes(value) when is_integer(value) and value > 0, do: value
defp normalize_max_event_bytes(_value), do: configured_max_event_bytes()
defp configured_max_event_bytes do
:parrhesia
|> Application.get_env(:limits, [])
|> Keyword.get(:max_event_bytes, @default_max_event_bytes)
end
defp ephemeral_kind?(kind) when is_integer(kind), do: kind >= 20_000 and kind < 30_000
defp ephemeral_kind?(_kind), do: false
defp accept_ephemeral_events? do
:parrhesia
|> Application.get_env(:policies, [])
|> Keyword.get(:accept_ephemeral_events, true)
end
end

View File

@@ -0,0 +1,14 @@
defmodule Parrhesia.API.Events.PublishResult do
@moduledoc """
Result shape for event publish attempts.
"""
defstruct [:event_id, :accepted, :message, :reason]
@type t :: %__MODULE__{
event_id: String.t(),
accepted: boolean(),
message: String.t(),
reason: term()
}
end

View File

@@ -0,0 +1,243 @@
defmodule Parrhesia.API.Identity do
@moduledoc """
Server-auth identity management.
"""
alias Parrhesia.API.Auth
@type identity_metadata :: %{
pubkey: String.t(),
source: :configured | :persisted | :generated | :imported
}
@spec get(keyword()) :: {:ok, identity_metadata()} | {:error, term()}
def get(opts \\ []) do
with {:ok, identity} <- fetch_existing_identity(opts) do
{:ok, public_identity(identity)}
end
end
@spec ensure(keyword()) :: {:ok, identity_metadata()} | {:error, term()}
def ensure(opts \\ []) do
with {:ok, identity} <- ensure_identity(opts) do
{:ok, public_identity(identity)}
end
end
@spec import(map(), keyword()) :: {:ok, identity_metadata()} | {:error, term()}
def import(identity, opts \\ [])
def import(identity, opts) when is_map(identity) do
with {:ok, secret_key} <- fetch_secret_key(identity),
{:ok, normalized_identity} <- build_identity(secret_key, :imported),
:ok <- persist_identity(normalized_identity, opts) do
{:ok, public_identity(normalized_identity)}
end
end
def import(_identity, _opts), do: {:error, :invalid_identity}
@spec rotate(keyword()) :: {:ok, identity_metadata()} | {:error, term()}
def rotate(opts \\ []) do
with :ok <- ensure_rotation_allowed(opts),
{:ok, identity} <- generate_identity(:generated),
:ok <- persist_identity(identity, opts) do
{:ok, public_identity(identity)}
end
end
@spec sign_event(map(), keyword()) :: {:ok, map()} | {:error, term()}
def sign_event(event, opts \\ [])
def sign_event(event, opts) when is_map(event) and is_list(opts) do
with :ok <- validate_signable_event(event),
{:ok, identity} <- ensure_identity(opts),
signed_event <- attach_signature(event, identity) do
{:ok, signed_event}
end
end
def sign_event(_event, _opts), do: {:error, :invalid_event}
def default_path do
Path.join([default_data_dir(), "server_identity.json"])
end
defp ensure_identity(opts) do
case fetch_existing_identity(opts) do
{:ok, identity} ->
{:ok, identity}
{:error, :identity_not_found} ->
with {:ok, identity} <- generate_identity(:generated),
:ok <- persist_identity(identity, opts) do
{:ok, identity}
end
{:error, reason} ->
{:error, reason}
end
end
defp fetch_existing_identity(opts) do
if configured_private_key = configured_private_key(opts) do
build_identity(configured_private_key, :configured)
else
read_persisted_identity(opts)
end
end
defp ensure_rotation_allowed(opts) do
if configured_private_key(opts) do
{:error, :configured_identity_cannot_rotate}
else
:ok
end
end
defp validate_signable_event(event) do
signable =
is_integer(Map.get(event, "created_at")) and
is_integer(Map.get(event, "kind")) and
is_list(Map.get(event, "tags")) and
is_binary(Map.get(event, "content", ""))
if signable, do: :ok, else: {:error, :invalid_event}
end
defp attach_signature(event, identity) do
unsigned_event =
event
|> Map.put("pubkey", identity.pubkey)
|> Map.put("sig", String.duplicate("0", 128))
event_id =
unsigned_event
|> Auth.compute_event_id()
signature =
event_id
|> Base.decode16!(case: :lower)
|> Secp256k1.schnorr_sign(identity.secret_key)
|> Base.encode16(case: :lower)
unsigned_event
|> Map.put("id", event_id)
|> Map.put("sig", signature)
end
defp read_persisted_identity(opts) do
path = identity_path(opts)
case File.read(path) do
{:ok, payload} ->
with {:ok, decoded} <- JSON.decode(payload),
{:ok, secret_key} <- fetch_secret_key(decoded),
{:ok, identity} <- build_identity(secret_key, :persisted) do
{:ok, identity}
else
{:error, reason} -> {:error, reason}
end
{:error, :enoent} ->
{:error, :identity_not_found}
{:error, reason} ->
{:error, reason}
end
end
defp persist_identity(identity, opts) do
path = identity_path(opts)
temp_path = path <> ".tmp"
with :ok <- File.mkdir_p(Path.dirname(path)),
:ok <- File.write(temp_path, JSON.encode!(persisted_identity(identity))),
:ok <- File.rename(temp_path, path) do
:ok
else
{:error, reason} ->
_ = File.rm(temp_path)
{:error, reason}
end
end
defp persisted_identity(identity) do
%{
"secret_key" => Base.encode16(identity.secret_key, case: :lower),
"pubkey" => identity.pubkey
}
end
defp generate_identity(source) do
{secret_key, pubkey} = Secp256k1.keypair(:xonly)
{:ok,
%{
secret_key: secret_key,
pubkey: Base.encode16(pubkey, case: :lower),
source: source
}}
rescue
_error -> {:error, :identity_generation_failed}
end
defp build_identity(secret_key_hex, source) when is_binary(secret_key_hex) do
with {:ok, secret_key} <- decode_secret_key(secret_key_hex),
pubkey <- Secp256k1.pubkey(secret_key, :xonly) do
{:ok,
%{
secret_key: secret_key,
pubkey: Base.encode16(pubkey, case: :lower),
source: source
}}
end
rescue
_error -> {:error, :invalid_secret_key}
end
defp decode_secret_key(secret_key_hex) when is_binary(secret_key_hex) do
normalized = String.downcase(secret_key_hex)
case Base.decode16(normalized, case: :lower) do
{:ok, <<_::256>> = secret_key} -> {:ok, secret_key}
_other -> {:error, :invalid_secret_key}
end
end
defp fetch_secret_key(identity) when is_map(identity) do
case Map.get(identity, :secret_key) || Map.get(identity, "secret_key") do
secret_key when is_binary(secret_key) -> {:ok, secret_key}
_other -> {:error, :invalid_identity}
end
end
defp configured_private_key(opts) do
opts[:private_key] || opts[:configured_private_key] || config_value(:private_key)
end
defp identity_path(opts) do
opts[:path] || config_value(:path) || default_path()
end
defp public_identity(identity) do
%{
pubkey: identity.pubkey,
source: identity.source
}
end
defp config_value(key) do
:parrhesia
|> Application.get_env(:identity, [])
|> Keyword.get(key)
end
defp default_data_dir do
base_dir =
System.get_env("XDG_DATA_HOME") ||
Path.join(System.user_home!(), ".local/share")
Path.join(base_dir, "parrhesia")
end
end

View File

@@ -0,0 +1,25 @@
defmodule Parrhesia.API.Identity.Manager do
@moduledoc false
use GenServer
alias Parrhesia.API.Identity
require Logger
def start_link(opts \\ []) do
GenServer.start_link(__MODULE__, opts, name: __MODULE__)
end
@impl true
def init(_opts) do
case Identity.ensure() do
{:ok, _identity} ->
{:ok, %{}}
{:error, reason} ->
Logger.error("failed to ensure server identity: #{inspect(reason)}")
{:ok, %{}}
end
end
end

View File

@@ -0,0 +1,30 @@
defmodule Parrhesia.API.RequestContext do
@moduledoc """
Shared request context used across API and policy surfaces.
"""
defstruct authenticated_pubkeys: MapSet.new(),
actor: nil,
caller: :local,
remote_ip: nil,
subscription_id: nil,
peer_id: nil,
transport_identity: nil,
metadata: %{}
@type t :: %__MODULE__{
authenticated_pubkeys: MapSet.t(String.t()),
actor: term(),
caller: atom(),
remote_ip: String.t() | nil,
subscription_id: String.t() | nil,
peer_id: String.t() | nil,
transport_identity: map() | nil,
metadata: map()
}
@spec put_metadata(t(), map()) :: t()
def put_metadata(%__MODULE__{} = context, metadata) when is_map(metadata) do
%__MODULE__{context | metadata: Map.merge(context.metadata, metadata)}
end
end

View File

@@ -0,0 +1,97 @@
defmodule Parrhesia.API.Stream do
@moduledoc """
In-process subscription API with relay-equivalent catch-up and live fanout semantics.
"""
alias Parrhesia.API.Events
alias Parrhesia.API.RequestContext
alias Parrhesia.API.Stream.Subscription
alias Parrhesia.Policy.EventPolicy
alias Parrhesia.Protocol.Filter
@spec subscribe(pid(), String.t(), [map()], keyword()) :: {:ok, reference()} | {:error, term()}
def subscribe(subscriber, subscription_id, filters, opts \\ [])
def subscribe(subscriber, subscription_id, filters, opts)
when is_pid(subscriber) and is_binary(subscription_id) and is_list(filters) and
is_list(opts) do
with {:ok, context} <- fetch_context(opts),
:ok <- Filter.validate_filters(filters),
:ok <-
EventPolicy.authorize_read(
filters,
context.authenticated_pubkeys,
stream_context(context, subscription_id)
) do
ref = make_ref()
case DynamicSupervisor.start_child(
Parrhesia.API.Stream.Supervisor,
{Subscription,
ref: ref, subscriber: subscriber, subscription_id: subscription_id, filters: filters}
) do
{:ok, pid} ->
finalize_subscription(pid, ref, filters, stream_context(context, subscription_id))
{:error, reason} ->
{:error, reason}
end
end
end
def subscribe(_subscriber, _subscription_id, _filters, _opts),
do: {:error, :invalid_subscription}
@spec unsubscribe(reference()) :: :ok
def unsubscribe(ref) when is_reference(ref) do
case Registry.lookup(Parrhesia.API.Stream.Registry, ref) do
[{pid, _value}] ->
try do
:ok = GenServer.stop(pid, :normal)
catch
:exit, _reason -> :ok
end
:ok
[] ->
:ok
end
end
def unsubscribe(_ref), do: :ok
defp fetch_context(opts) do
case Keyword.get(opts, :context) do
%RequestContext{} = context -> {:ok, context}
_other -> {:error, :invalid_context}
end
end
defp finalize_subscription(pid, ref, filters, context) do
with {:ok, initial_events} <-
Events.query(filters,
context: context,
validate_filters?: false,
authorize_read?: false
),
:ok <- Subscription.deliver_initial(pid, initial_events) do
{:ok, ref}
else
{:error, reason} ->
_ = safe_stop_subscription(pid)
{:error, reason}
end
end
defp safe_stop_subscription(pid) do
GenServer.stop(pid, :shutdown)
:ok
catch
:exit, _reason -> :ok
end
defp stream_context(%RequestContext{} = context, subscription_id) do
%RequestContext{context | subscription_id: subscription_id}
end
end

View File

@@ -0,0 +1,178 @@
defmodule Parrhesia.API.Stream.Subscription do
@moduledoc false
use GenServer
alias Parrhesia.Protocol.Filter
alias Parrhesia.Subscriptions.Index
defstruct [
:ref,
:subscriber,
:subscriber_monitor_ref,
:subscription_id,
:filters,
ready?: false,
buffered_events: []
]
@type t :: %__MODULE__{
ref: reference(),
subscriber: pid(),
subscriber_monitor_ref: reference(),
subscription_id: String.t(),
filters: [map()],
ready?: boolean(),
buffered_events: [map()]
}
@spec start_link(keyword()) :: GenServer.on_start()
def start_link(opts) when is_list(opts) do
ref = Keyword.fetch!(opts, :ref)
GenServer.start_link(__MODULE__, opts, name: via_tuple(ref))
end
@spec deliver_initial(GenServer.server(), [map()]) :: :ok | {:error, term()}
def deliver_initial(server, initial_events) when is_list(initial_events) do
GenServer.call(server, {:deliver_initial, initial_events})
end
@impl true
def init(opts) do
with {:ok, subscriber} <- fetch_subscriber(opts),
{:ok, subscription_id} <- fetch_subscription_id(opts),
{:ok, filters} <- fetch_filters(opts),
:ok <-
maybe_upsert_index_subscription(subscription_index(opts), subscription_id, filters) do
monitor_ref = Process.monitor(subscriber)
state = %__MODULE__{
ref: Keyword.fetch!(opts, :ref),
subscriber: subscriber,
subscriber_monitor_ref: monitor_ref,
subscription_id: subscription_id,
filters: filters,
ready?: false,
buffered_events: []
}
{:ok, state}
else
{:error, reason} -> {:stop, reason}
end
end
@impl true
def handle_call({:deliver_initial, initial_events}, _from, %__MODULE__{} = state) do
send_initial_events(state, initial_events)
Enum.each(Enum.reverse(state.buffered_events), fn event ->
send(state.subscriber, {:parrhesia, :event, state.ref, state.subscription_id, event})
end)
{:reply, :ok, %__MODULE__{state | ready?: true, buffered_events: []}}
end
@impl true
def handle_info({:fanout_event, subscription_id, event}, %__MODULE__{} = state)
when is_binary(subscription_id) and is_map(event) do
handle_fanout_event(state, subscription_id, event)
end
def handle_info({:DOWN, monitor_ref, :process, subscriber, _reason}, %__MODULE__{} = state)
when monitor_ref == state.subscriber_monitor_ref and subscriber == state.subscriber do
{:stop, :normal, state}
end
def handle_info(_message, %__MODULE__{} = state), do: {:noreply, state}
@impl true
def terminate(reason, %__MODULE__{} = state) do
:ok = maybe_remove_index_subscription(state.subscription_id)
if reason not in [:normal, :shutdown] do
send(state.subscriber, {:parrhesia, :closed, state.ref, state.subscription_id, reason})
end
:ok
end
defp send_initial_events(state, events) do
Enum.each(events, fn event ->
send(state.subscriber, {:parrhesia, :event, state.ref, state.subscription_id, event})
end)
send(state.subscriber, {:parrhesia, :eose, state.ref, state.subscription_id})
end
defp via_tuple(ref), do: {:via, Registry, {Parrhesia.API.Stream.Registry, ref}}
defp fetch_subscriber(opts) do
case Keyword.get(opts, :subscriber) do
subscriber when is_pid(subscriber) -> {:ok, subscriber}
_other -> {:error, :invalid_subscriber}
end
end
defp fetch_subscription_id(opts) do
case Keyword.get(opts, :subscription_id) do
subscription_id when is_binary(subscription_id) -> {:ok, subscription_id}
_other -> {:error, :invalid_subscription_id}
end
end
defp fetch_filters(opts) do
case Keyword.get(opts, :filters) do
filters when is_list(filters) -> {:ok, filters}
_other -> {:error, :invalid_filters}
end
end
defp subscription_index(opts) do
case Keyword.get(opts, :subscription_index, Index) do
subscription_index when is_pid(subscription_index) or is_atom(subscription_index) ->
subscription_index
_other ->
nil
end
end
defp maybe_upsert_index_subscription(nil, _subscription_id, _filters),
do: {:error, :subscription_index_unavailable}
defp maybe_upsert_index_subscription(subscription_index, subscription_id, filters) do
case Index.upsert(subscription_index, self(), subscription_id, filters) do
:ok -> :ok
{:error, reason} -> {:error, reason}
end
catch
:exit, _reason -> {:error, :subscription_index_unavailable}
end
defp maybe_remove_index_subscription(subscription_id) do
:ok = Index.remove(Index, self(), subscription_id)
:ok
catch
:exit, _reason -> :ok
end
defp handle_fanout_event(%__MODULE__{} = state, subscription_id, event) do
cond do
subscription_id != state.subscription_id ->
{:noreply, state}
not Filter.matches_any?(event, state.filters) ->
{:noreply, state}
state.ready? ->
send(state.subscriber, {:parrhesia, :event, state.ref, state.subscription_id, event})
{:noreply, state}
true ->
buffered_events = [event | state.buffered_events]
{:noreply, %__MODULE__{state | buffered_events: buffered_events}}
end
end
end

103
lib/parrhesia/api/sync.ex Normal file
View File

@@ -0,0 +1,103 @@
defmodule Parrhesia.API.Sync do
@moduledoc """
Sync server control-plane API.
"""
alias Parrhesia.API.Sync.Manager
@type server :: map()
@spec put_server(map(), keyword()) :: {:ok, server()} | {:error, term()}
def put_server(server, opts \\ [])
def put_server(server, opts) when is_map(server) and is_list(opts) do
Manager.put_server(manager_name(opts), server)
end
def put_server(_server, _opts), do: {:error, :invalid_server}
@spec remove_server(String.t(), keyword()) :: :ok | {:error, term()}
def remove_server(server_id, opts \\ [])
def remove_server(server_id, opts) when is_binary(server_id) and is_list(opts) do
Manager.remove_server(manager_name(opts), server_id)
end
def remove_server(_server_id, _opts), do: {:error, :invalid_server_id}
@spec get_server(String.t(), keyword()) :: {:ok, server()} | :error | {:error, term()}
def get_server(server_id, opts \\ [])
def get_server(server_id, opts) when is_binary(server_id) and is_list(opts) do
Manager.get_server(manager_name(opts), server_id)
end
def get_server(_server_id, _opts), do: {:error, :invalid_server_id}
@spec list_servers(keyword()) :: {:ok, [server()]} | {:error, term()}
def list_servers(opts \\ []) when is_list(opts) do
Manager.list_servers(manager_name(opts))
end
@spec start_server(String.t(), keyword()) :: :ok | {:error, term()}
def start_server(server_id, opts \\ [])
def start_server(server_id, opts) when is_binary(server_id) and is_list(opts) do
Manager.start_server(manager_name(opts), server_id)
end
def start_server(_server_id, _opts), do: {:error, :invalid_server_id}
@spec stop_server(String.t(), keyword()) :: :ok | {:error, term()}
def stop_server(server_id, opts \\ [])
def stop_server(server_id, opts) when is_binary(server_id) and is_list(opts) do
Manager.stop_server(manager_name(opts), server_id)
end
def stop_server(_server_id, _opts), do: {:error, :invalid_server_id}
@spec sync_now(String.t(), keyword()) :: :ok | {:error, term()}
def sync_now(server_id, opts \\ [])
def sync_now(server_id, opts) when is_binary(server_id) and is_list(opts) do
Manager.sync_now(manager_name(opts), server_id)
end
def sync_now(_server_id, _opts), do: {:error, :invalid_server_id}
@spec server_stats(String.t(), keyword()) :: {:ok, map()} | :error | {:error, term()}
def server_stats(server_id, opts \\ [])
def server_stats(server_id, opts) when is_binary(server_id) and is_list(opts) do
Manager.server_stats(manager_name(opts), server_id)
end
def server_stats(_server_id, _opts), do: {:error, :invalid_server_id}
@spec sync_stats(keyword()) :: {:ok, map()} | {:error, term()}
def sync_stats(opts \\ []) when is_list(opts) do
Manager.sync_stats(manager_name(opts))
end
@spec sync_health(keyword()) :: {:ok, map()} | {:error, term()}
def sync_health(opts \\ []) when is_list(opts) do
Manager.sync_health(manager_name(opts))
end
def default_path do
Path.join([default_data_dir(), "sync_servers.json"])
end
defp manager_name(opts) do
opts[:manager] || opts[:name] || Manager
end
defp default_data_dir do
base_dir =
System.get_env("XDG_DATA_HOME") ||
Path.join(System.user_home!(), ".local/share")
Path.join(base_dir, "parrhesia")
end
end

View File

@@ -0,0 +1,939 @@
defmodule Parrhesia.API.Sync.Manager do
@moduledoc false
use GenServer
alias Parrhesia.API.Sync
alias Parrhesia.Protocol.Filter
alias Parrhesia.Sync.Transport.WebSockexClient
alias Parrhesia.Sync.Worker
require Logger
@default_overlap_window_seconds 300
@default_mode :req_stream
@default_auth_type :nip42
@default_tls_mode :required
@hex64 ~r/\A[0-9a-f]{64}\z/
def start_link(opts \\ []) do
name = Keyword.get(opts, :name, __MODULE__)
GenServer.start_link(__MODULE__, opts, name: name)
end
def put_server(name, server), do: GenServer.call(name, {:put_server, server})
def remove_server(name, server_id), do: GenServer.call(name, {:remove_server, server_id})
def get_server(name, server_id), do: GenServer.call(name, {:get_server, server_id})
def list_servers(name), do: GenServer.call(name, :list_servers)
def start_server(name, server_id), do: GenServer.call(name, {:start_server, server_id})
def stop_server(name, server_id), do: GenServer.call(name, {:stop_server, server_id})
def sync_now(name, server_id), do: GenServer.call(name, {:sync_now, server_id})
def server_stats(name, server_id), do: GenServer.call(name, {:server_stats, server_id})
def sync_stats(name), do: GenServer.call(name, :sync_stats)
def sync_health(name), do: GenServer.call(name, :sync_health)
def runtime_event(name, server_id, kind, attrs \\ %{}) do
GenServer.cast(name, {:runtime_event, server_id, kind, attrs})
end
@impl true
def init(opts) do
path = Keyword.get(opts, :path, config_path() || Sync.default_path())
state =
load_state(path)
|> Map.merge(%{
start_workers?: Keyword.get(opts, :start_workers?, config_value(:start_workers?, true)),
worker_supervisor: Keyword.get(opts, :worker_supervisor, Parrhesia.Sync.WorkerSupervisor),
worker_registry: Keyword.get(opts, :worker_registry, Parrhesia.Sync.WorkerRegistry),
transport_module: Keyword.get(opts, :transport_module, WebSockexClient),
relay_info_opts: Keyword.get(opts, :relay_info_opts, []),
transport_opts: Keyword.get(opts, :transport_opts, [])
})
{:ok, state, {:continue, :bootstrap}}
end
@impl true
def handle_continue(:bootstrap, state) do
next_state =
if state.start_workers? do
state.servers
|> Map.keys()
|> Enum.reduce(state, fn server_id, acc -> maybe_start_worker(acc, server_id) end)
else
state
end
{:noreply, next_state}
end
@impl true
def handle_call({:put_server, server}, _from, state) do
case normalize_server(server) do
{:ok, normalized_server} ->
updated_state =
state
|> put_server_state(normalized_server)
|> persist_and_reconcile!(normalized_server.id)
{:reply, {:ok, merged_server(updated_state, normalized_server.id)}, updated_state}
{:error, reason} ->
{:reply, {:error, reason}, state}
end
end
def handle_call({:remove_server, server_id}, _from, state) do
if Map.has_key?(state.servers, server_id) do
next_state =
state
|> stop_worker_if_running(server_id)
|> Map.update!(:servers, &Map.delete(&1, server_id))
|> Map.update!(:runtime, &Map.delete(&1, server_id))
with :ok <- persist_state(next_state) do
{:reply, :ok, next_state}
end
else
{:reply, {:error, :not_found}, state}
end
end
def handle_call({:get_server, server_id}, _from, state) do
case Map.fetch(state.servers, server_id) do
{:ok, _server} -> {:reply, {:ok, merged_server(state, server_id)}, state}
:error -> {:reply, :error, state}
end
end
def handle_call(:list_servers, _from, state) do
servers =
state.servers
|> Map.keys()
|> Enum.sort()
|> Enum.map(&merged_server(state, &1))
{:reply, {:ok, servers}, state}
end
def handle_call({:start_server, server_id}, _from, state) do
case Map.fetch(state.runtime, server_id) do
{:ok, runtime} ->
next_state =
state
|> put_runtime(server_id, %{runtime | state: :running, last_error: nil})
|> persist_and_reconcile!(server_id)
{:reply, :ok, next_state}
:error ->
{:reply, {:error, :not_found}, state}
end
end
def handle_call({:stop_server, server_id}, _from, state) do
case Map.fetch(state.runtime, server_id) do
{:ok, runtime} ->
next_runtime =
runtime
|> Map.put(:state, :stopped)
|> Map.put(:connected?, false)
|> Map.put(:last_disconnected_at, now())
next_state =
state
|> stop_worker_if_running(server_id)
|> put_runtime(server_id, next_runtime)
with :ok <- persist_state(next_state) do
{:reply, :ok, next_state}
end
:error ->
{:reply, {:error, :not_found}, state}
end
end
def handle_call({:sync_now, server_id}, _from, state) do
case {Map.has_key?(state.runtime, server_id), state.start_workers?,
lookup_worker(state, server_id)} do
{false, _start_workers?, _worker_pid} ->
{:reply, {:error, :not_found}, state}
{true, true, worker_pid} when is_pid(worker_pid) ->
Worker.sync_now(worker_pid)
{:reply, :ok, state}
{true, true, nil} ->
next_state =
state
|> put_in([:runtime, server_id, :state], :running)
|> persist_and_reconcile!(server_id)
{:reply, :ok, next_state}
{true, false, _worker_pid} ->
next_state =
apply_runtime_event(state, server_id, :sync_started, %{})
|> apply_runtime_event(server_id, :sync_completed, %{})
with :ok <- persist_state(next_state) do
{:reply, :ok, next_state}
end
end
end
def handle_call({:server_stats, server_id}, _from, state) do
case Map.fetch(state.runtime, server_id) do
{:ok, runtime} -> {:reply, {:ok, runtime_stats(runtime)}, state}
:error -> {:reply, :error, state}
end
end
def handle_call(:sync_stats, _from, state), do: {:reply, {:ok, aggregate_stats(state)}, state}
def handle_call(:sync_health, _from, state), do: {:reply, {:ok, health_summary(state)}, state}
@impl true
def handle_cast({:runtime_event, server_id, kind, attrs}, state) do
next_state =
state
|> apply_runtime_event(server_id, kind, attrs)
|> persist_state_if_known_server(server_id)
{:noreply, next_state}
end
defp persist_state_if_known_server(state, server_id) do
if Map.has_key?(state.runtime, server_id) do
case persist_state(state) do
:ok ->
state
{:error, reason} ->
Logger.warning("failed to persist sync runtime for #{server_id}: #{inspect(reason)}")
state
end
else
state
end
end
defp put_server_state(state, server) do
runtime =
case Map.get(state.runtime, server.id) do
nil -> default_runtime(server)
existing_runtime -> existing_runtime
end
%{
state
| servers: Map.put(state.servers, server.id, server),
runtime: Map.put(state.runtime, server.id, runtime)
}
end
defp put_runtime(state, server_id, runtime) do
%{state | runtime: Map.put(state.runtime, server_id, runtime)}
end
defp persist_and_reconcile!(state, server_id) do
:ok = persist_state(state)
reconcile_worker(state, server_id)
end
defp reconcile_worker(state, server_id) do
cond do
not state.start_workers? ->
state
desired_running?(state, server_id) ->
state
|> stop_worker_if_running(server_id)
|> maybe_start_worker(server_id)
true ->
stop_worker_if_running(state, server_id)
end
end
defp maybe_start_worker(state, server_id) do
cond do
not state.start_workers? ->
state
not desired_running?(state, server_id) ->
state
lookup_worker(state, server_id) != nil ->
state
true ->
server = Map.fetch!(state.servers, server_id)
runtime = Map.fetch!(state.runtime, server_id)
child_spec = %{
id: {:sync_worker, server_id},
start:
{Worker, :start_link,
[
[
name: via_tuple(server_id, state.worker_registry),
server: server,
runtime: runtime,
manager: self(),
transport_module: state.transport_module,
relay_info_opts: state.relay_info_opts,
transport_opts: state.transport_opts
]
]},
restart: :transient
}
case DynamicSupervisor.start_child(state.worker_supervisor, child_spec) do
{:ok, _pid} ->
state
{:error, {:already_started, _pid}} ->
state
{:error, reason} ->
Logger.warning("failed to start sync worker #{server_id}: #{inspect(reason)}")
state
end
end
end
defp stop_worker_if_running(state, server_id) do
if worker_pid = lookup_worker(state, server_id) do
_ = Worker.stop(worker_pid)
end
state
end
defp desired_running?(state, server_id) do
case Map.fetch(state.runtime, server_id) do
{:ok, runtime} -> runtime.state == :running
:error -> false
end
end
defp lookup_worker(state, server_id) do
case Registry.lookup(state.worker_registry, server_id) do
[{pid, _value}] -> pid
[] -> nil
end
catch
:exit, _reason -> nil
end
defp via_tuple(server_id, registry) do
{:via, Registry, {registry, server_id}}
end
defp merged_server(state, server_id) do
state.servers
|> Map.fetch!(server_id)
|> Map.put(:runtime, Map.fetch!(state.runtime, server_id))
end
defp runtime_stats(runtime) do
%{
"server_id" => runtime.server_id,
"state" => Atom.to_string(runtime.state),
"connected" => runtime.connected?,
"events_received" => runtime.events_received,
"events_accepted" => runtime.events_accepted,
"events_duplicate" => runtime.events_duplicate,
"events_rejected" => runtime.events_rejected,
"query_runs" => runtime.query_runs,
"subscription_restarts" => runtime.subscription_restarts,
"reconnects" => runtime.reconnects,
"last_sync_started_at" => runtime.last_sync_started_at,
"last_sync_completed_at" => runtime.last_sync_completed_at,
"last_remote_eose_at" => runtime.last_remote_eose_at,
"last_error" => runtime.last_error,
"cursor_created_at" => runtime.cursor_created_at,
"cursor_event_id" => runtime.cursor_event_id
}
end
defp aggregate_stats(state) do
runtimes = Map.values(state.runtime)
%{
"servers_total" => map_size(state.servers),
"servers_enabled" => Enum.count(state.servers, fn {_id, server} -> server.enabled? end),
"servers_running" => Enum.count(runtimes, &(&1.state == :running)),
"servers_connected" => Enum.count(runtimes, & &1.connected?),
"events_received" => Enum.reduce(runtimes, 0, &(&1.events_received + &2)),
"events_accepted" => Enum.reduce(runtimes, 0, &(&1.events_accepted + &2)),
"events_duplicate" => Enum.reduce(runtimes, 0, &(&1.events_duplicate + &2)),
"events_rejected" => Enum.reduce(runtimes, 0, &(&1.events_rejected + &2)),
"query_runs" => Enum.reduce(runtimes, 0, &(&1.query_runs + &2)),
"subscription_restarts" => Enum.reduce(runtimes, 0, &(&1.subscription_restarts + &2)),
"reconnects" => Enum.reduce(runtimes, 0, &(&1.reconnects + &2))
}
end
defp health_summary(state) do
failing_servers =
state.runtime
|> Enum.flat_map(fn {server_id, runtime} ->
if is_binary(runtime.last_error) and runtime.last_error != "" do
[%{"id" => server_id, "reason" => runtime.last_error}]
else
[]
end
end)
%{
"status" => if(failing_servers == [], do: "ok", else: "degraded"),
"servers_total" => map_size(state.servers),
"servers_connected" =>
Enum.count(state.runtime, fn {_id, runtime} -> runtime.connected? end),
"servers_failing" => failing_servers
}
end
defp apply_runtime_event(state, server_id, kind, attrs) do
case Map.fetch(state.runtime, server_id) do
{:ok, runtime} ->
updated_runtime = update_runtime_for_event(runtime, kind, attrs)
put_runtime(state, server_id, updated_runtime)
:error ->
state
end
end
defp update_runtime_for_event(runtime, :connected, _attrs) do
runtime
|> Map.put(:state, :running)
|> Map.put(:connected?, true)
|> Map.put(:last_connected_at, now())
|> Map.put(:last_error, nil)
end
defp update_runtime_for_event(runtime, :disconnected, attrs) do
reason = format_reason(Map.get(attrs, :reason))
runtime
|> Map.put(:connected?, false)
|> Map.put(:last_disconnected_at, now())
|> Map.update!(:reconnects, &(&1 + 1))
|> Map.put(:last_error, reason)
end
defp update_runtime_for_event(runtime, :error, attrs) do
Map.put(runtime, :last_error, format_reason(Map.get(attrs, :reason)))
end
defp update_runtime_for_event(runtime, :sync_started, _attrs) do
runtime
|> Map.put(:last_sync_started_at, now())
|> Map.update!(:query_runs, &(&1 + 1))
end
defp update_runtime_for_event(runtime, :sync_completed, _attrs) do
timestamp = now()
runtime
|> Map.put(:last_sync_completed_at, timestamp)
|> Map.put(:last_eose_at, timestamp)
|> Map.put(:last_remote_eose_at, timestamp)
end
defp update_runtime_for_event(runtime, :subscription_restart, _attrs) do
Map.update!(runtime, :subscription_restarts, &(&1 + 1))
end
defp update_runtime_for_event(runtime, :cursor_advanced, attrs) do
runtime
|> Map.put(:cursor_created_at, Map.get(attrs, :created_at))
|> Map.put(:cursor_event_id, Map.get(attrs, :event_id))
end
defp update_runtime_for_event(runtime, :event_result, attrs) do
event = Map.get(attrs, :event, %{})
result = Map.get(attrs, :result)
runtime
|> Map.update!(:events_received, &(&1 + 1))
|> Map.put(:last_event_received_at, now())
|> increment_result_counter(result)
|> maybe_put_last_error(attrs)
|> maybe_advance_runtime_cursor(event, result)
end
defp update_runtime_for_event(runtime, _kind, _attrs), do: runtime
defp increment_result_counter(runtime, :accepted),
do: Map.update!(runtime, :events_accepted, &(&1 + 1))
defp increment_result_counter(runtime, :duplicate),
do: Map.update!(runtime, :events_duplicate, &(&1 + 1))
defp increment_result_counter(runtime, :rejected),
do: Map.update!(runtime, :events_rejected, &(&1 + 1))
defp increment_result_counter(runtime, _result), do: runtime
defp maybe_put_last_error(runtime, %{reason: nil}), do: runtime
defp maybe_put_last_error(runtime, attrs),
do: Map.put(runtime, :last_error, format_reason(attrs[:reason]))
defp maybe_advance_runtime_cursor(runtime, event, result)
when result in [:accepted, :duplicate] do
created_at = Map.get(event, "created_at")
event_id = Map.get(event, "id")
cond do
not is_integer(created_at) or not is_binary(event_id) ->
runtime
is_nil(runtime.cursor_created_at) ->
runtime
|> Map.put(:cursor_created_at, created_at)
|> Map.put(:cursor_event_id, event_id)
created_at > runtime.cursor_created_at ->
runtime
|> Map.put(:cursor_created_at, created_at)
|> Map.put(:cursor_event_id, event_id)
created_at == runtime.cursor_created_at and event_id > runtime.cursor_event_id ->
runtime
|> Map.put(:cursor_created_at, created_at)
|> Map.put(:cursor_event_id, event_id)
true ->
runtime
end
end
defp maybe_advance_runtime_cursor(runtime, _event, _result), do: runtime
defp format_reason(nil), do: nil
defp format_reason(reason) when is_binary(reason), do: reason
defp format_reason(reason), do: inspect(reason)
defp load_state(path) do
case File.read(path) do
{:ok, payload} ->
case decode_persisted_state(payload, path) do
{:ok, state} ->
state
{:error, reason} ->
Logger.warning("failed to load sync state from #{path}: #{inspect(reason)}")
empty_state(path)
end
{:error, :enoent} ->
empty_state(path)
{:error, reason} ->
Logger.warning("failed to read sync state from #{path}: #{inspect(reason)}")
empty_state(path)
end
end
defp decode_persisted_state(payload, path) do
with {:ok, decoded} <- JSON.decode(payload),
{:ok, servers} <- decode_servers(Map.get(decoded, "servers", %{})),
{:ok, runtime} <- decode_runtime(Map.get(decoded, "runtime", %{}), servers) do
{:ok, %{path: path, servers: servers, runtime: runtime}}
end
end
defp decode_servers(servers) when is_map(servers) do
Enum.reduce_while(servers, {:ok, %{}}, fn {_id, server_payload}, {:ok, acc} ->
case normalize_server(server_payload) do
{:ok, server} -> {:cont, {:ok, Map.put(acc, server.id, server)}}
{:error, reason} -> {:halt, {:error, reason}}
end
end)
end
defp decode_servers(_servers), do: {:error, :invalid_servers_state}
defp decode_runtime(runtime_payload, servers)
when is_map(runtime_payload) and is_map(servers) do
runtime =
Enum.reduce(servers, %{}, fn {server_id, server}, acc ->
decoded_runtime =
runtime_payload
|> Map.get(server_id)
|> normalize_runtime(server)
Map.put(acc, server_id, decoded_runtime)
end)
{:ok, runtime}
end
defp decode_runtime(_runtime_payload, _servers), do: {:error, :invalid_runtime_state}
defp normalize_runtime(nil, server), do: default_runtime(server)
defp normalize_runtime(runtime, server) when is_map(runtime) do
%{
server_id: server.id,
state: normalize_runtime_state(fetch_value(runtime, :state)),
connected?: fetch_boolean(runtime, :connected?) || false,
last_connected_at: fetch_string_or_nil(runtime, :last_connected_at),
last_disconnected_at: fetch_string_or_nil(runtime, :last_disconnected_at),
last_sync_started_at: fetch_string_or_nil(runtime, :last_sync_started_at),
last_sync_completed_at: fetch_string_or_nil(runtime, :last_sync_completed_at),
last_event_received_at: fetch_string_or_nil(runtime, :last_event_received_at),
last_eose_at: fetch_string_or_nil(runtime, :last_eose_at),
reconnect_attempts: fetch_non_neg_integer(runtime, :reconnect_attempts),
last_error: fetch_string_or_nil(runtime, :last_error),
events_received: fetch_non_neg_integer(runtime, :events_received),
events_accepted: fetch_non_neg_integer(runtime, :events_accepted),
events_duplicate: fetch_non_neg_integer(runtime, :events_duplicate),
events_rejected: fetch_non_neg_integer(runtime, :events_rejected),
query_runs: fetch_non_neg_integer(runtime, :query_runs),
subscription_restarts: fetch_non_neg_integer(runtime, :subscription_restarts),
reconnects: fetch_non_neg_integer(runtime, :reconnects),
last_remote_eose_at: fetch_string_or_nil(runtime, :last_remote_eose_at),
cursor_created_at: fetch_optional_integer(runtime, :cursor_created_at),
cursor_event_id: fetch_string_or_nil(runtime, :cursor_event_id)
}
end
defp normalize_runtime(_runtime, server), do: default_runtime(server)
defp persist_state(%{path: path} = state) do
temp_path = path <> ".tmp"
with :ok <- File.mkdir_p(Path.dirname(path)),
:ok <- File.write(temp_path, JSON.encode!(encode_state(state))),
:ok <- File.rename(temp_path, path) do
:ok
else
{:error, reason} ->
_ = File.rm(temp_path)
{:error, reason}
end
end
defp encode_state(state) do
%{
"version" => 2,
"servers" =>
Map.new(state.servers, fn {server_id, server} -> {server_id, encode_server(server)} end),
"runtime" =>
Map.new(state.runtime, fn {server_id, runtime} -> {server_id, encode_runtime(runtime)} end)
}
end
defp encode_server(server) do
%{
"id" => server.id,
"url" => server.url,
"enabled?" => server.enabled?,
"auth_pubkey" => server.auth_pubkey,
"filters" => server.filters,
"mode" => Atom.to_string(server.mode),
"overlap_window_seconds" => server.overlap_window_seconds,
"auth" => %{"type" => Atom.to_string(server.auth.type)},
"tls" => %{
"mode" => Atom.to_string(server.tls.mode),
"hostname" => server.tls.hostname,
"pins" =>
Enum.map(server.tls.pins, fn pin ->
%{
"type" => Atom.to_string(pin.type),
"value" => pin.value
}
end)
},
"metadata" => server.metadata
}
end
defp encode_runtime(runtime) do
%{
"server_id" => runtime.server_id,
"state" => Atom.to_string(runtime.state),
"connected?" => runtime.connected?,
"last_connected_at" => runtime.last_connected_at,
"last_disconnected_at" => runtime.last_disconnected_at,
"last_sync_started_at" => runtime.last_sync_started_at,
"last_sync_completed_at" => runtime.last_sync_completed_at,
"last_event_received_at" => runtime.last_event_received_at,
"last_eose_at" => runtime.last_eose_at,
"reconnect_attempts" => runtime.reconnect_attempts,
"last_error" => runtime.last_error,
"events_received" => runtime.events_received,
"events_accepted" => runtime.events_accepted,
"events_duplicate" => runtime.events_duplicate,
"events_rejected" => runtime.events_rejected,
"query_runs" => runtime.query_runs,
"subscription_restarts" => runtime.subscription_restarts,
"reconnects" => runtime.reconnects,
"last_remote_eose_at" => runtime.last_remote_eose_at,
"cursor_created_at" => runtime.cursor_created_at,
"cursor_event_id" => runtime.cursor_event_id
}
end
defp empty_state(path) do
%{path: path, servers: %{}, runtime: %{}}
end
defp default_runtime(server) do
%{
server_id: server.id,
state: if(server.enabled?, do: :running, else: :stopped),
connected?: false,
last_connected_at: nil,
last_disconnected_at: nil,
last_sync_started_at: nil,
last_sync_completed_at: nil,
last_event_received_at: nil,
last_eose_at: nil,
reconnect_attempts: 0,
last_error: nil,
events_received: 0,
events_accepted: 0,
events_duplicate: 0,
events_rejected: 0,
query_runs: 0,
subscription_restarts: 0,
reconnects: 0,
last_remote_eose_at: nil,
cursor_created_at: nil,
cursor_event_id: nil
}
end
defp normalize_server(server) when is_map(server) do
with {:ok, id} <- normalize_non_empty_string(fetch_value(server, :id), :invalid_server_id),
{:ok, {url, host, scheme}} <- normalize_url(fetch_value(server, :url)),
{:ok, enabled?} <- normalize_boolean(fetch_value(server, :enabled?), true),
{:ok, auth_pubkey} <- normalize_pubkey(fetch_value(server, :auth_pubkey)),
{:ok, filters} <- normalize_filters(fetch_value(server, :filters)),
{:ok, mode} <- normalize_mode(fetch_value(server, :mode)),
{:ok, overlap_window_seconds} <-
normalize_overlap_window(fetch_value(server, :overlap_window_seconds)),
{:ok, auth} <- normalize_auth(fetch_value(server, :auth)),
{:ok, tls} <- normalize_tls(fetch_value(server, :tls), host, scheme),
{:ok, metadata} <- normalize_metadata(fetch_value(server, :metadata)) do
{:ok,
%{
id: id,
url: url,
enabled?: enabled?,
auth_pubkey: auth_pubkey,
filters: filters,
mode: mode,
overlap_window_seconds: overlap_window_seconds,
auth: auth,
tls: tls,
metadata: metadata
}}
end
end
defp normalize_server(_server), do: {:error, :invalid_server}
defp normalize_url(url) when is_binary(url) and url != "" do
uri = URI.parse(url)
if uri.scheme in ["ws", "wss"] and is_binary(uri.host) and uri.host != "" do
{:ok, {URI.to_string(uri), uri.host, uri.scheme}}
else
{:error, :invalid_url}
end
end
defp normalize_url(_url), do: {:error, :invalid_url}
defp normalize_pubkey(pubkey) when is_binary(pubkey) do
normalized = String.downcase(pubkey)
if String.match?(normalized, @hex64) do
{:ok, normalized}
else
{:error, :invalid_auth_pubkey}
end
end
defp normalize_pubkey(_pubkey), do: {:error, :invalid_auth_pubkey}
defp normalize_filters(filters) when is_list(filters) do
normalized_filters = Enum.map(filters, &normalize_filter_map/1)
with :ok <- Filter.validate_filters(normalized_filters) do
{:ok, normalized_filters}
end
end
defp normalize_filters(_filters), do: {:error, :invalid_filters}
defp normalize_mode(nil), do: {:ok, @default_mode}
defp normalize_mode(:req_stream), do: {:ok, :req_stream}
defp normalize_mode("req_stream"), do: {:ok, :req_stream}
defp normalize_mode(_mode), do: {:error, :invalid_mode}
defp normalize_overlap_window(nil), do: {:ok, @default_overlap_window_seconds}
defp normalize_overlap_window(seconds) when is_integer(seconds) and seconds >= 0,
do: {:ok, seconds}
defp normalize_overlap_window(_seconds), do: {:error, :invalid_overlap_window_seconds}
defp normalize_auth(nil), do: {:ok, %{type: @default_auth_type}}
defp normalize_auth(auth) when is_map(auth) do
with {:ok, type} <- normalize_auth_type(fetch_value(auth, :type)) do
{:ok, %{type: type}}
end
end
defp normalize_auth(_auth), do: {:error, :invalid_auth}
defp normalize_auth_type(nil), do: {:ok, @default_auth_type}
defp normalize_auth_type(:nip42), do: {:ok, :nip42}
defp normalize_auth_type("nip42"), do: {:ok, :nip42}
defp normalize_auth_type(_type), do: {:error, :invalid_auth_type}
defp normalize_tls(tls, host, scheme) when is_map(tls) do
with {:ok, mode} <- normalize_tls_mode(fetch_value(tls, :mode)),
:ok <- validate_tls_mode_against_scheme(mode, scheme),
{:ok, hostname} <- normalize_hostname(fetch_value(tls, :hostname) || host),
{:ok, pins} <- normalize_tls_pins(mode, fetch_value(tls, :pins)) do
{:ok, %{mode: mode, hostname: hostname, pins: pins}}
end
end
defp normalize_tls(_tls, _host, _scheme), do: {:error, :invalid_tls}
defp normalize_tls_mode(nil), do: {:ok, @default_tls_mode}
defp normalize_tls_mode(:required), do: {:ok, :required}
defp normalize_tls_mode("required"), do: {:ok, :required}
defp normalize_tls_mode(:disabled), do: {:ok, :disabled}
defp normalize_tls_mode("disabled"), do: {:ok, :disabled}
defp normalize_tls_mode(_mode), do: {:error, :invalid_tls_mode}
defp validate_tls_mode_against_scheme(:required, "wss"), do: :ok
defp validate_tls_mode_against_scheme(:required, _scheme), do: {:error, :invalid_url}
defp validate_tls_mode_against_scheme(:disabled, _scheme), do: :ok
defp normalize_hostname(hostname) when is_binary(hostname) and hostname != "",
do: {:ok, hostname}
defp normalize_hostname(_hostname), do: {:error, :invalid_tls_hostname}
defp normalize_tls_pins(:disabled, nil), do: {:ok, []}
defp normalize_tls_pins(:disabled, pins) when is_list(pins), do: {:ok, []}
defp normalize_tls_pins(:required, pins) when is_list(pins) and pins != [] do
Enum.reduce_while(pins, {:ok, []}, fn pin, {:ok, acc} ->
case normalize_tls_pin(pin) do
{:ok, normalized_pin} -> {:cont, {:ok, [normalized_pin | acc]}}
{:error, reason} -> {:halt, {:error, reason}}
end
end)
|> case do
{:ok, normalized_pins} -> {:ok, Enum.reverse(normalized_pins)}
error -> error
end
end
defp normalize_tls_pins(:required, _pins), do: {:error, :invalid_tls_pins}
defp normalize_tls_pin(pin) when is_map(pin) do
with {:ok, type} <- normalize_tls_pin_type(fetch_value(pin, :type)),
{:ok, value} <- normalize_non_empty_string(fetch_value(pin, :value), :invalid_tls_pin) do
{:ok, %{type: type, value: value}}
end
end
defp normalize_tls_pin(_pin), do: {:error, :invalid_tls_pin}
defp normalize_tls_pin_type(:spki_sha256), do: {:ok, :spki_sha256}
defp normalize_tls_pin_type("spki_sha256"), do: {:ok, :spki_sha256}
defp normalize_tls_pin_type(_type), do: {:error, :invalid_tls_pin}
defp normalize_metadata(nil), do: {:ok, %{}}
defp normalize_metadata(metadata) when is_map(metadata), do: {:ok, metadata}
defp normalize_metadata(_metadata), do: {:error, :invalid_metadata}
defp normalize_boolean(nil, default), do: {:ok, default}
defp normalize_boolean(value, _default) when is_boolean(value), do: {:ok, value}
defp normalize_boolean(_value, _default), do: {:error, :invalid_enabled_flag}
defp normalize_non_empty_string(value, _reason) when is_binary(value) and value != "",
do: {:ok, value}
defp normalize_non_empty_string(_value, reason), do: {:error, reason}
defp normalize_filter_map(filter) when is_map(filter) do
Map.new(filter, fn
{key, value} when is_atom(key) -> {Atom.to_string(key), value}
{key, value} -> {key, value}
end)
end
defp normalize_filter_map(filter), do: filter
defp normalize_runtime_state("running"), do: :running
defp normalize_runtime_state(:running), do: :running
defp normalize_runtime_state("stopped"), do: :stopped
defp normalize_runtime_state(:stopped), do: :stopped
defp normalize_runtime_state(_state), do: :stopped
defp fetch_non_neg_integer(map, key) do
case fetch_value(map, key) do
value when is_integer(value) and value >= 0 -> value
_other -> 0
end
end
defp fetch_optional_integer(map, key) do
case fetch_value(map, key) do
value when is_integer(value) and value >= 0 -> value
_other -> nil
end
end
defp fetch_boolean(map, key) do
case fetch_value(map, key) do
value when is_boolean(value) -> value
_other -> nil
end
end
defp fetch_string_or_nil(map, key) do
case fetch_value(map, key) do
value when is_binary(value) and value != "" -> value
_other -> nil
end
end
defp fetch_value(map, key) when is_map(map) do
Map.get(map, key) || Map.get(map, Atom.to_string(key))
end
defp config_path do
config_value(:path)
end
defp config_value(key, default \\ nil) do
:parrhesia
|> Application.get_env(:sync, [])
|> Keyword.get(key, default)
end
defp now do
DateTime.utc_now()
|> DateTime.truncate(:second)
|> DateTime.to_iso8601()
end
end

View File

@@ -11,9 +11,9 @@ defmodule Parrhesia.Application do
Parrhesia.Storage.Supervisor, Parrhesia.Storage.Supervisor,
Parrhesia.Subscriptions.Supervisor, Parrhesia.Subscriptions.Supervisor,
Parrhesia.Auth.Supervisor, Parrhesia.Auth.Supervisor,
Parrhesia.Sync.Supervisor,
Parrhesia.Policy.Supervisor, Parrhesia.Policy.Supervisor,
Parrhesia.Web.Endpoint, Parrhesia.Web.Endpoint,
Parrhesia.Web.MetricsEndpoint,
Parrhesia.Tasks.Supervisor Parrhesia.Tasks.Supervisor
] ]

View File

@@ -9,13 +9,20 @@ defmodule Parrhesia.Auth.Nip98 do
@spec validate_authorization_header(String.t() | nil, String.t(), String.t()) :: @spec validate_authorization_header(String.t() | nil, String.t(), String.t()) ::
{:ok, map()} | {:error, atom()} {:ok, map()} | {:error, atom()}
def validate_authorization_header(nil, _method, _url), do: {:error, :missing_authorization} def validate_authorization_header(authorization, method, url) do
validate_authorization_header(authorization, method, url, [])
end
def validate_authorization_header("Nostr " <> encoded_event, method, url) @spec validate_authorization_header(String.t() | nil, String.t(), String.t(), keyword()) ::
when is_binary(method) and is_binary(url) do {:ok, map()} | {:error, atom()}
def validate_authorization_header(nil, _method, _url, _opts),
do: {:error, :missing_authorization}
def validate_authorization_header("Nostr " <> encoded_event, method, url, opts)
when is_binary(method) and is_binary(url) and is_list(opts) do
with {:ok, event_json} <- decode_base64(encoded_event), with {:ok, event_json} <- decode_base64(encoded_event),
{:ok, event} <- JSON.decode(event_json), {:ok, event} <- JSON.decode(event_json),
:ok <- validate_event_shape(event), :ok <- validate_event_shape(event, opts),
:ok <- validate_http_binding(event, method, url) do :ok <- validate_http_binding(event, method, url) do
{:ok, event} {:ok, event}
else else
@@ -24,7 +31,8 @@ defmodule Parrhesia.Auth.Nip98 do
end end
end end
def validate_authorization_header(_header, _method, _url), do: {:error, :invalid_authorization} def validate_authorization_header(_header, _method, _url, _opts),
do: {:error, :invalid_authorization}
defp decode_base64(encoded_event) do defp decode_base64(encoded_event) do
case Base.decode64(encoded_event) do case Base.decode64(encoded_event) do
@@ -33,33 +41,35 @@ defmodule Parrhesia.Auth.Nip98 do
end end
end end
defp validate_event_shape(event) when is_map(event) do defp validate_event_shape(event, opts) when is_map(event) do
with :ok <- EventValidator.validate(event), with :ok <- EventValidator.validate(event),
:ok <- validate_kind(event), :ok <- validate_kind(event),
:ok <- validate_fresh_created_at(event) do :ok <- validate_fresh_created_at(event, opts) do
:ok :ok
else else
:ok -> :ok {:error, :stale_event} -> {:error, :stale_event}
{:error, _reason} -> {:error, :invalid_event} {:error, _reason} -> {:error, :invalid_event}
end end
end end
defp validate_event_shape(_event), do: {:error, :invalid_event} defp validate_event_shape(_event, _opts), do: {:error, :invalid_event}
defp validate_kind(%{"kind" => 27_235}), do: :ok defp validate_kind(%{"kind" => 27_235}), do: :ok
defp validate_kind(_event), do: {:error, :invalid_event} defp validate_kind(_event), do: {:error, :invalid_event}
defp validate_fresh_created_at(%{"created_at" => created_at}) when is_integer(created_at) do defp validate_fresh_created_at(%{"created_at" => created_at}, opts)
when is_integer(created_at) do
now = System.system_time(:second) now = System.system_time(:second)
max_age_seconds = Keyword.get(opts, :max_age_seconds, @max_age_seconds)
if abs(now - created_at) <= @max_age_seconds do if abs(now - created_at) <= max_age_seconds do
:ok :ok
else else
{:error, :stale_event} {:error, :stale_event}
end end
end end
defp validate_fresh_created_at(_event), do: {:error, :invalid_event} defp validate_fresh_created_at(_event, _opts), do: {:error, :invalid_event}
defp validate_http_binding(event, method, url) do defp validate_http_binding(event, method, url) do
tags = Map.get(event, "tags", []) tags = Map.get(event, "tags", [])

View File

@@ -12,7 +12,8 @@ defmodule Parrhesia.Auth.Supervisor do
@impl true @impl true
def init(_init_arg) do def init(_init_arg) do
children = [ children = [
{Parrhesia.Auth.Challenges, name: Parrhesia.Auth.Challenges} {Parrhesia.Auth.Challenges, name: Parrhesia.Auth.Challenges},
{Parrhesia.API.Identity.Manager, []}
] ]
Supervisor.init(children, strategy: :one_for_one) Supervisor.init(children, strategy: :one_for_one)

View File

@@ -0,0 +1,136 @@
defmodule Parrhesia.Negentropy.Engine do
@moduledoc """
Relay/client-agnostic negentropy reconciliation engine.
"""
alias Parrhesia.Negentropy.Message
@default_id_list_threshold 32
@type item :: Message.item()
@spec initial_message([item()], keyword()) :: binary()
def initial_message(items, opts \\ []) when is_list(opts) do
normalized_items = normalize_items(items)
Message.encode([
describe_range(normalized_items, :infinity, id_list_threshold(opts))
])
end
@spec answer([item()], binary(), keyword()) :: {:ok, binary()} | {:error, term()}
def answer(items, incoming_message, opts \\ [])
when is_binary(incoming_message) and is_list(opts) do
normalized_items = normalize_items(items)
threshold = id_list_threshold(opts)
case Message.decode(incoming_message) do
{:ok, ranges} ->
response_ranges =
respond_to_ranges(normalized_items, ranges, Message.initial_lower_bound(), threshold)
{:ok, Message.encode(response_ranges)}
{:unsupported_version, _supported_version} ->
{:ok, Message.supported_version_message()}
{:error, reason} ->
{:error, reason}
end
end
defp respond_to_ranges(_items, [], _lower_bound, _threshold), do: []
defp respond_to_ranges(items, [range | rest], lower_bound, threshold) do
upper_bound = Map.fetch!(range, :upper_bound)
items_in_range =
Enum.filter(items, fn item ->
Message.item_in_range?(item, lower_bound, upper_bound)
end)
response =
case range.mode do
:skip ->
[%{upper_bound: upper_bound, mode: :skip, payload: nil}]
:fingerprint ->
respond_to_fingerprint_range(items_in_range, upper_bound, range.payload, threshold)
:id_list ->
respond_to_id_list_range(items_in_range, upper_bound, range.payload, threshold)
end
response ++ respond_to_ranges(items, rest, upper_bound, threshold)
end
defp respond_to_fingerprint_range(items, upper_bound, remote_fingerprint, threshold) do
if Message.fingerprint(items) == remote_fingerprint do
[%{upper_bound: upper_bound, mode: :skip, payload: nil}]
else
mismatch_response(items, upper_bound, threshold)
end
end
defp respond_to_id_list_range(items, upper_bound, remote_ids, threshold) do
if Enum.map(items, & &1.id) == remote_ids do
[%{upper_bound: upper_bound, mode: :skip, payload: nil}]
else
mismatch_response(items, upper_bound, threshold)
end
end
defp mismatch_response(items, upper_bound, threshold) do
if length(items) <= threshold do
[%{upper_bound: upper_bound, mode: :id_list, payload: Enum.map(items, & &1.id)}]
else
split_response(items, upper_bound, threshold)
end
end
defp split_response(items, upper_bound, threshold) do
midpoint = div(length(items), 2)
left_items = Enum.take(items, midpoint)
right_items = Enum.drop(items, midpoint)
boundary =
left_items
|> List.last()
|> then(&Message.split_bound(&1, hd(right_items)))
[
describe_range(left_items, boundary, threshold),
describe_range(right_items, upper_bound, threshold)
]
end
defp describe_range(items, upper_bound, threshold) do
if length(items) <= threshold do
%{upper_bound: upper_bound, mode: :id_list, payload: Enum.map(items, & &1.id)}
else
%{upper_bound: upper_bound, mode: :fingerprint, payload: Message.fingerprint(items)}
end
end
defp normalize_items(items) do
items
|> Enum.map(&normalize_item/1)
|> Enum.sort(&(Message.compare_items(&1, &2) != :gt))
end
defp normalize_item(%{created_at: created_at, id: id})
when is_integer(created_at) and created_at >= 0 and is_binary(id) and byte_size(id) == 32 do
%{created_at: created_at, id: id}
end
defp normalize_item(item) do
raise ArgumentError, "invalid negentropy item: #{inspect(item)}"
end
defp id_list_threshold(opts) do
case Keyword.get(opts, :id_list_threshold, @default_id_list_threshold) do
threshold when is_integer(threshold) and threshold > 0 -> threshold
_other -> @default_id_list_threshold
end
end
end

View File

@@ -0,0 +1,349 @@
defmodule Parrhesia.Negentropy.Message do
@moduledoc """
NIP-77 negentropy message codec and helpers.
"""
import Bitwise
@protocol_version 0x61
@id_size 32
@fingerprint_size 16
@u256_mod 1 <<< 256
@zero_id <<0::size(256)>>
@type item :: %{created_at: non_neg_integer(), id: binary()}
@type bound :: :infinity | {non_neg_integer(), binary()}
@type range ::
%{
upper_bound: bound(),
mode: :skip | :fingerprint | :id_list,
payload: nil | binary() | [binary()]
}
@spec protocol_version() :: byte()
def protocol_version, do: @protocol_version
@spec supported_version_message() :: binary()
def supported_version_message, do: <<@protocol_version>>
@spec decode(binary()) :: {:ok, [range()]} | {:unsupported_version, byte()} | {:error, term()}
def decode(<<version, _rest::binary>>) when version != @protocol_version,
do: {:unsupported_version, @protocol_version}
def decode(<<@protocol_version, rest::binary>>) do
decode_ranges(rest, 0, initial_lower_bound(), [])
end
def decode(_message), do: {:error, :invalid_message}
@spec encode([range()]) :: binary()
def encode(ranges) when is_list(ranges) do
ranges
|> drop_trailing_skip_ranges()
|> Enum.reduce({[@protocol_version], 0}, fn range, {acc, previous_timestamp} ->
{encoded_range, next_timestamp} = encode_range(range, previous_timestamp)
{[acc, encoded_range], next_timestamp}
end)
|> elem(0)
|> IO.iodata_to_binary()
end
@spec fingerprint([item()]) :: binary()
def fingerprint(items) when is_list(items) do
sum =
Enum.reduce(items, 0, fn %{id: id}, acc ->
<<id_integer::unsigned-little-size(256)>> = id
rem(acc + id_integer, @u256_mod)
end)
payload = [<<sum::unsigned-little-size(256)>>, encode_varint(length(items))]
payload
|> IO.iodata_to_binary()
|> then(&:crypto.hash(:sha256, &1))
|> binary_part(0, @fingerprint_size)
end
@spec compare_items(item(), item()) :: :lt | :eq | :gt
def compare_items(left, right) do
cond do
left.created_at < right.created_at -> :lt
left.created_at > right.created_at -> :gt
left.id < right.id -> :lt
left.id > right.id -> :gt
true -> :eq
end
end
@spec compare_bound(bound(), bound()) :: :lt | :eq | :gt
def compare_bound(:infinity, :infinity), do: :eq
def compare_bound(:infinity, _other), do: :gt
def compare_bound(_other, :infinity), do: :lt
def compare_bound({left_timestamp, left_id}, {right_timestamp, right_id}) do
cond do
left_timestamp < right_timestamp -> :lt
left_timestamp > right_timestamp -> :gt
left_id < right_id -> :lt
left_id > right_id -> :gt
true -> :eq
end
end
@spec item_in_range?(item(), bound(), bound()) :: boolean()
def item_in_range?(item, lower_bound, upper_bound) do
compare_item_to_bound(item, lower_bound) != :lt and
compare_item_to_bound(item, upper_bound) == :lt
end
@spec initial_lower_bound() :: bound()
def initial_lower_bound, do: {0, @zero_id}
@spec zero_id() :: binary()
def zero_id, do: @zero_id
@spec split_bound(item(), item()) :: bound()
def split_bound(previous_item, next_item)
when is_map(previous_item) and is_map(next_item) do
cond do
previous_item.created_at < next_item.created_at ->
{next_item.created_at, @zero_id}
previous_item.created_at == next_item.created_at ->
prefix_length = shared_prefix_length(previous_item.id, next_item.id) + 1
<<prefix::binary-size(prefix_length), _rest::binary>> = next_item.id
{next_item.created_at, prefix <> :binary.copy(<<0>>, @id_size - prefix_length)}
true ->
raise ArgumentError, "split_bound/2 requires previous_item <= next_item"
end
end
defp decode_ranges(<<>>, _previous_timestamp, _lower_bound, ranges),
do: {:ok, Enum.reverse(ranges)}
defp decode_ranges(binary, previous_timestamp, lower_bound, ranges) do
with {:ok, upper_bound, rest, next_timestamp} <- decode_bound(binary, previous_timestamp),
:ok <- validate_upper_bound(lower_bound, upper_bound),
{:ok, mode, payload, tail} <- decode_payload(rest) do
next_ranges = [%{upper_bound: upper_bound, mode: mode, payload: payload} | ranges]
if upper_bound == :infinity and tail != <<>> do
{:error, :invalid_message}
else
decode_ranges(tail, next_timestamp, upper_bound, next_ranges)
end
end
end
defp validate_upper_bound(lower_bound, upper_bound) do
if compare_bound(lower_bound, upper_bound) == :lt do
:ok
else
{:error, :invalid_message}
end
end
defp decode_bound(binary, previous_timestamp) do
with {:ok, encoded_timestamp, rest} <- decode_varint(binary),
{:ok, length, tail} <- decode_varint(rest),
:ok <- validate_bound_prefix_length(length),
{:ok, prefix, remainder} <- decode_prefix(tail, length) do
decode_bound_value(encoded_timestamp, length, prefix, remainder, previous_timestamp)
end
end
defp decode_payload(binary) do
with {:ok, mode_value, rest} <- decode_varint(binary) do
case mode_value do
0 ->
{:ok, :skip, nil, rest}
1 ->
decode_fingerprint_payload(rest)
2 ->
decode_id_list_payload(rest)
_other ->
{:error, :invalid_message}
end
end
end
defp decode_varint(binary), do: decode_varint(binary, 0)
defp decode_varint(<<>>, _acc), do: {:error, :invalid_message}
defp decode_varint(<<byte, rest::binary>>, acc) do
value = acc * 128 + band(byte, 0x7F)
if band(byte, 0x80) == 0 do
{:ok, value, rest}
else
decode_varint(rest, value)
end
end
defp encode_range(range, previous_timestamp) do
{encoded_bound, next_timestamp} = encode_bound(range.upper_bound, previous_timestamp)
{mode, payload} = encode_payload(range)
{[encoded_bound, mode, payload], next_timestamp}
end
defp encode_bound(:infinity, previous_timestamp),
do: {[encode_varint(0), encode_varint(0)], previous_timestamp}
defp encode_bound({timestamp, id}, previous_timestamp) do
prefix_length = id_prefix_length(id)
<<prefix::binary-size(prefix_length), _rest::binary>> = id
{
[encode_varint(timestamp - previous_timestamp + 1), encode_varint(prefix_length), prefix],
timestamp
}
end
defp encode_payload(%{mode: :skip}) do
{encode_varint(0), <<>>}
end
defp encode_payload(%{mode: :fingerprint, payload: fingerprint})
when is_binary(fingerprint) and byte_size(fingerprint) == @fingerprint_size do
{encode_varint(1), fingerprint}
end
defp encode_payload(%{mode: :id_list, payload: ids}) when is_list(ids) do
encoded_ids = Enum.map(ids, fn id -> validate_id!(id) end)
{encode_varint(2), [encode_varint(length(encoded_ids)), encoded_ids]}
end
defp encode_varint(value) when is_integer(value) and value >= 0 do
digits = collect_base128_digits(value, [])
last_index = length(digits) - 1
digits
|> Enum.with_index()
|> Enum.map(fn {digit, index} ->
if index == last_index do
digit
else
digit + 128
end
end)
|> :erlang.list_to_binary()
end
defp collect_base128_digits(value, acc) do
quotient = div(value, 128)
remainder = rem(value, 128)
if quotient == 0 do
[remainder | acc]
else
collect_base128_digits(quotient, [remainder | acc])
end
end
defp unpack_ids(binary), do: unpack_ids(binary, [])
defp unpack_ids(<<>>, acc), do: Enum.reverse(acc)
defp unpack_ids(<<id::binary-size(@id_size), rest::binary>>, acc),
do: unpack_ids(rest, [id | acc])
defp decode_prefix(binary, length) when byte_size(binary) >= length do
<<prefix::binary-size(length), rest::binary>> = binary
{:ok, prefix, rest}
end
defp decode_prefix(_binary, _length), do: {:error, :invalid_message}
defp decode_bound_value(0, 0, _prefix, remainder, previous_timestamp),
do: {:ok, :infinity, remainder, previous_timestamp}
defp decode_bound_value(0, _length, _prefix, _remainder, _previous_timestamp),
do: {:error, :invalid_message}
defp decode_bound_value(encoded_timestamp, length, prefix, remainder, previous_timestamp) do
timestamp = previous_timestamp + encoded_timestamp - 1
id = prefix <> :binary.copy(<<0>>, @id_size - length)
{:ok, {timestamp, id}, remainder, timestamp}
end
defp decode_fingerprint_payload(<<fingerprint::binary-size(@fingerprint_size), tail::binary>>),
do: {:ok, :fingerprint, fingerprint, tail}
defp decode_fingerprint_payload(_payload), do: {:error, :invalid_message}
defp decode_id_list_payload(rest) do
with {:ok, count, tail} <- decode_varint(rest),
{:ok, ids, remainder} <- decode_id_list_bytes(tail, count) do
{:ok, :id_list, ids, remainder}
end
end
defp decode_id_list_bytes(tail, count) do
expected_bytes = count * @id_size
if byte_size(tail) >= expected_bytes do
<<ids::binary-size(expected_bytes), remainder::binary>> = tail
{:ok, unpack_ids(ids), remainder}
else
{:error, :invalid_message}
end
end
defp validate_bound_prefix_length(length)
when is_integer(length) and length >= 0 and length <= @id_size,
do: :ok
defp validate_bound_prefix_length(_length), do: {:error, :invalid_message}
defp id_prefix_length(id) do
id
|> validate_id!()
|> :binary.bin_to_list()
|> Enum.reverse()
|> Enum.drop_while(&(&1 == 0))
|> length()
end
defp shared_prefix_length(left_id, right_id) do
left_id = validate_id!(left_id)
right_id = validate_id!(right_id)
left_id
|> :binary.bin_to_list()
|> Enum.zip(:binary.bin_to_list(right_id))
|> Enum.reduce_while(0, fn
{left_byte, right_byte}, acc when left_byte == right_byte -> {:cont, acc + 1}
_pair, acc -> {:halt, acc}
end)
end
defp drop_trailing_skip_ranges(ranges) do
ranges
|> Enum.reverse()
|> Enum.drop_while(fn range -> range.mode == :skip end)
|> Enum.reverse()
end
defp compare_item_to_bound(_item, :infinity), do: :lt
defp compare_item_to_bound(item, {timestamp, id}) do
cond do
item.created_at < timestamp -> :lt
item.created_at > timestamp -> :gt
item.id < id -> :lt
item.id > id -> :gt
true -> :eq
end
end
defp validate_id!(id) when is_binary(id) and byte_size(id) == @id_size, do: id
defp validate_id!(_id) do
raise ArgumentError, "negentropy ids must be 32-byte binaries"
end
end

View File

@@ -1,10 +1,13 @@
defmodule Parrhesia.Negentropy.Sessions do defmodule Parrhesia.Negentropy.Sessions do
@moduledoc """ @moduledoc """
In-memory NEG-* session tracking. In-memory NIP-77 session tracking over bounded local event snapshots.
""" """
use GenServer use GenServer
alias Parrhesia.Negentropy.Engine
alias Parrhesia.Storage
@type session_key :: {pid(), String.t()} @type session_key :: {pid(), String.t()}
@default_max_payload_bytes 4096 @default_max_payload_bytes 4096
@@ -12,6 +15,8 @@ defmodule Parrhesia.Negentropy.Sessions do
@default_max_total_sessions 10_000 @default_max_total_sessions 10_000
@default_max_idle_seconds 60 @default_max_idle_seconds 60
@default_sweep_interval_seconds 10 @default_sweep_interval_seconds 10
@default_max_items_per_session 50_000
@default_id_list_threshold 32
@sweep_idle_sessions :sweep_idle_sessions @sweep_idle_sessions :sweep_idle_sessions
@spec start_link(keyword()) :: GenServer.on_start() @spec start_link(keyword()) :: GenServer.on_start()
@@ -20,16 +25,19 @@ defmodule Parrhesia.Negentropy.Sessions do
GenServer.start_link(__MODULE__, opts, name: name) GenServer.start_link(__MODULE__, opts, name: name)
end end
@spec open(GenServer.server(), pid(), String.t(), map()) :: {:ok, map()} | {:error, term()} @spec open(GenServer.server(), pid(), String.t(), map(), binary(), keyword()) ::
def open(server \\ __MODULE__, owner_pid, subscription_id, params) {:ok, binary()} | {:error, term()}
when is_pid(owner_pid) and is_binary(subscription_id) and is_map(params) do def open(server \\ __MODULE__, owner_pid, subscription_id, filter, message, opts \\ [])
GenServer.call(server, {:open, owner_pid, subscription_id, params}) when is_pid(owner_pid) and is_binary(subscription_id) and is_map(filter) and
is_binary(message) and is_list(opts) do
GenServer.call(server, {:open, owner_pid, subscription_id, filter, message, opts})
end end
@spec message(GenServer.server(), pid(), String.t(), map()) :: {:ok, map()} | {:error, term()} @spec message(GenServer.server(), pid(), String.t(), binary()) ::
def message(server \\ __MODULE__, owner_pid, subscription_id, payload) {:ok, binary()} | {:error, term()}
when is_pid(owner_pid) and is_binary(subscription_id) and is_map(payload) do def message(server \\ __MODULE__, owner_pid, subscription_id, message)
GenServer.call(server, {:message, owner_pid, subscription_id, payload}) when is_pid(owner_pid) and is_binary(subscription_id) and is_binary(message) do
GenServer.call(server, {:message, owner_pid, subscription_id, message})
end end
@spec close(GenServer.server(), pid(), String.t()) :: :ok @spec close(GenServer.server(), pid(), String.t()) :: :ok
@@ -63,7 +71,17 @@ defmodule Parrhesia.Negentropy.Sessions do
max_total_sessions: max_total_sessions:
normalize_positive_integer(Keyword.get(opts, :max_total_sessions), max_total_sessions()), normalize_positive_integer(Keyword.get(opts, :max_total_sessions), max_total_sessions()),
max_idle_ms: max_idle_ms, max_idle_ms: max_idle_ms,
sweep_interval_ms: sweep_interval_ms sweep_interval_ms: sweep_interval_ms,
max_items_per_session:
normalize_positive_integer(
Keyword.get(opts, :max_items_per_session),
max_items_per_session()
),
id_list_threshold:
normalize_positive_integer(
Keyword.get(opts, :id_list_threshold),
id_list_threshold()
)
} }
:ok = schedule_idle_sweep(sweep_interval_ms) :ok = schedule_idle_sweep(sweep_interval_ms)
@@ -72,16 +90,19 @@ defmodule Parrhesia.Negentropy.Sessions do
end end
@impl true @impl true
def handle_call({:open, owner_pid, subscription_id, params}, _from, state) do def handle_call({:open, owner_pid, subscription_id, filter, message, opts}, _from, state) do
key = {owner_pid, subscription_id} key = {owner_pid, subscription_id}
with :ok <- validate_payload_size(params, state.max_payload_bytes), with :ok <- validate_payload_size(filter, message, state.max_payload_bytes),
:ok <- enforce_session_limits(state, owner_pid, key) do :ok <- enforce_session_limits(state, owner_pid, key),
{:ok, refs} <- fetch_event_refs(filter, opts, state.max_items_per_session),
{:ok, response} <-
Engine.answer(refs, message, id_list_threshold: state.id_list_threshold) do
now_ms = System.monotonic_time(:millisecond) now_ms = System.monotonic_time(:millisecond)
session = %{ session = %{
cursor: 0, filter: filter,
params: params, refs: refs,
opened_at: System.system_time(:second), opened_at: System.system_time(:second),
last_active_at_ms: now_ms last_active_at_ms: now_ms
} }
@@ -91,14 +112,14 @@ defmodule Parrhesia.Negentropy.Sessions do
|> ensure_monitor(owner_pid) |> ensure_monitor(owner_pid)
|> put_in([:sessions, key], session) |> put_in([:sessions, key], session)
{:reply, {:ok, %{"status" => "open", "cursor" => 0}}, state} {:reply, {:ok, response}, state}
else else
{:error, reason} -> {:error, reason} ->
{:reply, {:error, reason}, state} {:reply, {:error, reason}, state}
end end
end end
def handle_call({:message, owner_pid, subscription_id, payload}, _from, state) do def handle_call({:message, owner_pid, subscription_id, message}, _from, state) do
key = {owner_pid, subscription_id} key = {owner_pid, subscription_id}
case Map.get(state.sessions, key) do case Map.get(state.sessions, key) do
@@ -106,20 +127,18 @@ defmodule Parrhesia.Negentropy.Sessions do
{:reply, {:error, :unknown_session}, state} {:reply, {:error, :unknown_session}, state}
session -> session ->
case validate_payload_size(payload, state.max_payload_bytes) do with :ok <- validate_payload_size(session.filter, message, state.max_payload_bytes),
:ok -> {:ok, response} <-
cursor = session.cursor + 1 Engine.answer(session.refs, message, id_list_threshold: state.id_list_threshold) do
next_session = %{
session
| last_active_at_ms: System.monotonic_time(:millisecond)
}
next_session = %{ state = put_in(state, [:sessions, key], next_session)
session
| cursor: cursor,
last_active_at_ms: System.monotonic_time(:millisecond)
}
state = put_in(state, [:sessions, key], next_session)
{:reply, {:ok, %{"status" => "ack", "cursor" => cursor}}, state}
{:reply, {:ok, response}, state}
else
{:error, reason} -> {:error, reason} ->
{:reply, {:error, reason}, state} {:reply, {:error, reason}, state}
end end
@@ -185,6 +204,21 @@ defmodule Parrhesia.Negentropy.Sessions do
def handle_info(_message, state), do: {:noreply, state} def handle_info(_message, state), do: {:noreply, state}
defp fetch_event_refs(filter, opts, max_items_per_session) do
query_opts =
opts
|> Keyword.take([:now, :requester_pubkeys])
|> Keyword.put(:limit, max_items_per_session + 1)
with {:ok, refs} <- Storage.events().query_event_refs(%{}, [filter], query_opts) do
if length(refs) > max_items_per_session do
{:error, :query_too_big}
else
{:ok, refs}
end
end
end
defp clear_monitors_without_sessions(state, owner_pids) do defp clear_monitors_without_sessions(state, owner_pids) do
Enum.reduce(Map.keys(state.monitors), state, fn owner_pid, acc -> Enum.reduce(Map.keys(state.monitors), state, fn owner_pid, acc ->
if MapSet.member?(owner_pids, owner_pid) do if MapSet.member?(owner_pids, owner_pid) do
@@ -203,8 +237,8 @@ defmodule Parrhesia.Negentropy.Sessions do
end) end)
end end
defp validate_payload_size(payload, max_payload_bytes) do defp validate_payload_size(filter, message, max_payload_bytes) do
if :erlang.external_size(payload) <= max_payload_bytes do if :erlang.external_size({filter, message}) <= max_payload_bytes do
:ok :ok
else else
{:error, :payload_too_large} {:error, :payload_too_large}
@@ -296,6 +330,18 @@ defmodule Parrhesia.Negentropy.Sessions do
|> Keyword.get(:negentropy_session_sweep_interval_seconds, @default_sweep_interval_seconds) |> Keyword.get(:negentropy_session_sweep_interval_seconds, @default_sweep_interval_seconds)
end end
defp max_items_per_session do
:parrhesia
|> Application.get_env(:limits, [])
|> Keyword.get(:max_negentropy_items_per_session, @default_max_items_per_session)
end
defp id_list_threshold do
:parrhesia
|> Application.get_env(:limits, [])
|> Keyword.get(:negentropy_id_list_threshold, @default_id_list_threshold)
end
defp normalize_positive_integer(value, _default) when is_integer(value) and value > 0, defp normalize_positive_integer(value, _default) when is_integer(value) and value > 0,
do: value do: value

View File

@@ -0,0 +1,68 @@
defmodule Parrhesia.Policy.ConnectionPolicy do
@moduledoc """
Connection/session-level policy checks shared by websocket and management entrypoints.
"""
alias Parrhesia.Storage
@spec authorize_remote_ip(tuple() | String.t() | nil) :: :ok | {:error, :ip_blocked}
def authorize_remote_ip(remote_ip) do
case normalize_ip(remote_ip) do
nil ->
:ok
normalized_ip ->
case Storage.moderation().ip_blocked?(%{}, normalized_ip) do
{:ok, true} -> {:error, :ip_blocked}
_other -> :ok
end
end
end
@spec authorize_authenticated_pubkey(String.t()) :: :ok | {:error, :pubkey_not_allowed}
def authorize_authenticated_pubkey(pubkey) when is_binary(pubkey) do
if allowlist_active?() do
case Storage.moderation().pubkey_allowed?(%{}, pubkey) do
{:ok, true} -> :ok
_other -> {:error, :pubkey_not_allowed}
end
else
:ok
end
end
@spec authorize_authenticated_pubkeys(MapSet.t(String.t())) ::
:ok | {:error, :auth_required | :pubkey_not_allowed}
def authorize_authenticated_pubkeys(authenticated_pubkeys) do
if allowlist_active?() do
cond do
MapSet.size(authenticated_pubkeys) == 0 ->
{:error, :auth_required}
Enum.any?(authenticated_pubkeys, &(authorize_authenticated_pubkey(&1) == :ok)) ->
:ok
true ->
{:error, :pubkey_not_allowed}
end
else
:ok
end
end
defp allowlist_active? do
case Storage.moderation().has_allowed_pubkeys?(%{}) do
{:ok, true} -> true
_other -> false
end
end
defp normalize_ip(nil), do: nil
defp normalize_ip({_, _, _, _} = remote_ip), do: :inet.ntoa(remote_ip) |> to_string()
defp normalize_ip({_, _, _, _, _, _, _, _} = remote_ip),
do: :inet.ntoa(remote_ip) |> to_string()
defp normalize_ip(remote_ip) when is_binary(remote_ip), do: remote_ip
defp normalize_ip(_remote_ip), do: nil
end

View File

@@ -3,11 +3,17 @@ defmodule Parrhesia.Policy.EventPolicy do
Write/read policy checks for relay operations. Write/read policy checks for relay operations.
""" """
alias Parrhesia.API.ACL
alias Parrhesia.API.RequestContext
alias Parrhesia.Policy.ConnectionPolicy
alias Parrhesia.Storage alias Parrhesia.Storage
@type policy_error :: @type policy_error ::
:auth_required :auth_required
| :pubkey_not_allowed
| :restricted_giftwrap | :restricted_giftwrap
| :sync_read_not_allowed
| :sync_write_not_allowed
| :marmot_group_h_tag_required | :marmot_group_h_tag_required
| :marmot_group_h_values_exceeded | :marmot_group_h_values_exceeded
| :marmot_group_filter_window_too_wide | :marmot_group_filter_window_too_wide
@@ -33,15 +39,31 @@ defmodule Parrhesia.Policy.EventPolicy do
@spec authorize_read([map()], MapSet.t(String.t())) :: :ok | {:error, policy_error()} @spec authorize_read([map()], MapSet.t(String.t())) :: :ok | {:error, policy_error()}
def authorize_read(filters, authenticated_pubkeys) when is_list(filters) do def authorize_read(filters, authenticated_pubkeys) when is_list(filters) do
authorize_read(filters, authenticated_pubkeys, request_context(authenticated_pubkeys))
end
@spec authorize_read([map()], MapSet.t(String.t()), RequestContext.t()) ::
:ok | {:error, policy_error()}
def authorize_read(filters, authenticated_pubkeys, %RequestContext{} = context)
when is_list(filters) do
auth_required? = config_bool([:policies, :auth_required_for_reads], false) auth_required? = config_bool([:policies, :auth_required_for_reads], false)
cond do cond do
match?(
{:error, _reason},
ConnectionPolicy.authorize_authenticated_pubkeys(authenticated_pubkeys)
) ->
ConnectionPolicy.authorize_authenticated_pubkeys(authenticated_pubkeys)
auth_required? and MapSet.size(authenticated_pubkeys) == 0 -> auth_required? and MapSet.size(authenticated_pubkeys) == 0 ->
{:error, :auth_required} {:error, :auth_required}
giftwrap_restricted?(filters, authenticated_pubkeys) -> giftwrap_restricted?(filters, authenticated_pubkeys) ->
{:error, :restricted_giftwrap} {:error, :restricted_giftwrap}
match?({:error, _reason}, authorize_sync_reads(filters, context)) ->
authorize_sync_reads(filters, context)
true -> true ->
enforce_marmot_group_read_guardrails(filters) enforce_marmot_group_read_guardrails(filters)
end end
@@ -49,8 +71,17 @@ defmodule Parrhesia.Policy.EventPolicy do
@spec authorize_write(map(), MapSet.t(String.t())) :: :ok | {:error, policy_error()} @spec authorize_write(map(), MapSet.t(String.t())) :: :ok | {:error, policy_error()}
def authorize_write(event, authenticated_pubkeys) when is_map(event) do def authorize_write(event, authenticated_pubkeys) when is_map(event) do
authorize_write(event, authenticated_pubkeys, request_context(authenticated_pubkeys))
end
@spec authorize_write(map(), MapSet.t(String.t()), RequestContext.t()) ::
:ok | {:error, policy_error()}
def authorize_write(event, authenticated_pubkeys, %RequestContext{} = context)
when is_map(event) do
checks = [ checks = [
fn -> ConnectionPolicy.authorize_authenticated_pubkeys(authenticated_pubkeys) end,
fn -> maybe_require_auth_for_write(authenticated_pubkeys) end, fn -> maybe_require_auth_for_write(authenticated_pubkeys) end,
fn -> authorize_sync_write(event, context) end,
fn -> reject_if_pubkey_banned(event) end, fn -> reject_if_pubkey_banned(event) end,
fn -> reject_if_event_banned(event) end, fn -> reject_if_event_banned(event) end,
fn -> enforce_pow(event) end, fn -> enforce_pow(event) end,
@@ -69,10 +100,17 @@ defmodule Parrhesia.Policy.EventPolicy do
@spec error_message(policy_error()) :: String.t() @spec error_message(policy_error()) :: String.t()
def error_message(:auth_required), do: "auth-required: authentication required" def error_message(:auth_required), do: "auth-required: authentication required"
def error_message(:pubkey_not_allowed), do: "restricted: authenticated pubkey is not allowed"
def error_message(:restricted_giftwrap), def error_message(:restricted_giftwrap),
do: "restricted: giftwrap access requires recipient authentication" do: "restricted: giftwrap access requires recipient authentication"
def error_message(:sync_read_not_allowed),
do: "restricted: sync read not allowed for authenticated pubkey"
def error_message(:sync_write_not_allowed),
do: "restricted: sync write not allowed for authenticated pubkey"
def error_message(:marmot_group_h_tag_required), def error_message(:marmot_group_h_tag_required),
do: "restricted: kind 445 queries must include a #h tag" do: "restricted: kind 445 queries must include a #h tag"
@@ -143,6 +181,19 @@ defmodule Parrhesia.Policy.EventPolicy do
end end
end end
defp authorize_sync_reads(filters, %RequestContext{} = context) do
Enum.reduce_while(filters, :ok, fn filter, :ok ->
case ACL.check(:sync_read, filter, context: context) do
:ok -> {:cont, :ok}
{:error, reason} -> {:halt, {:error, reason}}
end
end)
end
defp authorize_sync_write(event, %RequestContext{} = context) do
ACL.check(:sync_write, event, context: context)
end
defp giftwrap_restricted?(filters, authenticated_pubkeys) do defp giftwrap_restricted?(filters, authenticated_pubkeys) do
if MapSet.size(authenticated_pubkeys) == 0 do if MapSet.size(authenticated_pubkeys) == 0 do
any_filter_targets_giftwrap?(filters) any_filter_targets_giftwrap?(filters)
@@ -672,4 +723,8 @@ defmodule Parrhesia.Policy.EventPolicy do
default default
end end
end end
defp request_context(authenticated_pubkeys) do
%RequestContext{authenticated_pubkeys: authenticated_pubkeys}
end
end end

View File

@@ -14,8 +14,8 @@ defmodule Parrhesia.Protocol do
| {:close, String.t()} | {:close, String.t()}
| {:auth, event()} | {:auth, event()}
| {:count, String.t(), [filter()], map()} | {:count, String.t(), [filter()], map()}
| {:neg_open, String.t(), map()} | {:neg_open, String.t(), filter(), binary()}
| {:neg_msg, String.t(), map()} | {:neg_msg, String.t(), binary()}
| {:neg_close, String.t()} | {:neg_close, String.t()}
@type relay_message :: @type relay_message ::
@@ -26,7 +26,8 @@ defmodule Parrhesia.Protocol do
| {:event, String.t(), event()} | {:event, String.t(), event()}
| {:auth, String.t()} | {:auth, String.t()}
| {:count, String.t(), map()} | {:count, String.t(), map()}
| {:neg_msg, String.t(), map()} | {:neg_msg, String.t(), String.t()}
| {:neg_err, String.t(), String.t()}
@type decode_error :: @type decode_error ::
:invalid_json :invalid_json
@@ -122,21 +123,25 @@ defmodule Parrhesia.Protocol do
defp decode_message(["AUTH", _invalid]), do: {:error, :invalid_auth} defp decode_message(["AUTH", _invalid]), do: {:error, :invalid_auth}
defp decode_message(["NEG-OPEN", subscription_id, payload]) defp decode_message(["NEG-OPEN", subscription_id, filter, initial_message])
when is_binary(subscription_id) and is_map(payload) do when is_binary(subscription_id) and is_map(filter) and is_binary(initial_message) do
if valid_subscription_id?(subscription_id) do with true <- valid_subscription_id?(subscription_id),
{:ok, {:neg_open, subscription_id, payload}} {:ok, decoded_message} <- decode_negentropy_hex(initial_message) do
{:ok, {:neg_open, subscription_id, filter, decoded_message}}
else else
{:error, :invalid_subscription_id} false -> {:error, :invalid_subscription_id}
{:error, _reason} -> {:error, :invalid_negentropy}
end end
end end
defp decode_message(["NEG-MSG", subscription_id, payload]) defp decode_message(["NEG-MSG", subscription_id, payload])
when is_binary(subscription_id) and is_map(payload) do when is_binary(subscription_id) and is_binary(payload) do
if valid_subscription_id?(subscription_id) do with true <- valid_subscription_id?(subscription_id),
{:ok, {:neg_msg, subscription_id, payload}} {:ok, decoded_payload} <- decode_negentropy_hex(payload) do
{:ok, {:neg_msg, subscription_id, decoded_payload}}
else else
{:error, :invalid_subscription_id} false -> {:error, :invalid_subscription_id}
{:error, _reason} -> {:error, :invalid_negentropy}
end end
end end
@@ -215,7 +220,19 @@ defmodule Parrhesia.Protocol do
defp relay_frame({:neg_msg, subscription_id, payload}), defp relay_frame({:neg_msg, subscription_id, payload}),
do: ["NEG-MSG", subscription_id, payload] do: ["NEG-MSG", subscription_id, payload]
defp relay_frame({:neg_err, subscription_id, reason}),
do: ["NEG-ERR", subscription_id, reason]
defp valid_subscription_id?(subscription_id) do defp valid_subscription_id?(subscription_id) do
subscription_id != "" and String.length(subscription_id) <= 64 subscription_id != "" and String.length(subscription_id) <= 64
end end
defp decode_negentropy_hex(payload) when is_binary(payload) and payload != "" do
case Base.decode16(payload, case: :mixed) do
{:ok, decoded} when decoded != <<>> -> {:ok, decoded}
_other -> {:error, :invalid_negentropy}
end
end
defp decode_negentropy_hex(_payload), do: {:error, :invalid_negentropy}
end end

View File

@@ -8,6 +8,7 @@ defmodule Parrhesia.Storage do
@default_modules [ @default_modules [
events: Parrhesia.Storage.Adapters.Postgres.Events, events: Parrhesia.Storage.Adapters.Postgres.Events,
acl: Parrhesia.Storage.Adapters.Postgres.ACL,
moderation: Parrhesia.Storage.Adapters.Postgres.Moderation, moderation: Parrhesia.Storage.Adapters.Postgres.Moderation,
groups: Parrhesia.Storage.Adapters.Postgres.Groups, groups: Parrhesia.Storage.Adapters.Postgres.Groups,
admin: Parrhesia.Storage.Adapters.Postgres.Admin admin: Parrhesia.Storage.Adapters.Postgres.Admin
@@ -19,6 +20,9 @@ defmodule Parrhesia.Storage do
@spec moderation() :: module() @spec moderation() :: module()
def moderation, do: fetch_module!(:moderation, Parrhesia.Storage.Moderation) def moderation, do: fetch_module!(:moderation, Parrhesia.Storage.Moderation)
@spec acl() :: module()
def acl, do: fetch_module!(:acl, Parrhesia.Storage.ACL)
@spec groups() :: module() @spec groups() :: module()
def groups, do: fetch_module!(:groups, Parrhesia.Storage.Groups) def groups, do: fetch_module!(:groups, Parrhesia.Storage.Groups)

View File

@@ -0,0 +1,14 @@
defmodule Parrhesia.Storage.ACL do
@moduledoc """
Storage callbacks for persisted ACL rules.
"""
@type context :: map()
@type rule :: map()
@type opts :: keyword()
@type reason :: term()
@callback put_rule(context(), rule()) :: {:ok, rule()} | {:error, reason()}
@callback delete_rule(context(), map()) :: :ok | {:error, reason()}
@callback list_rules(context(), opts()) :: {:ok, [rule()]} | {:error, reason()}
end

View File

@@ -0,0 +1,157 @@
defmodule Parrhesia.Storage.Adapters.Memory.ACL do
@moduledoc """
In-memory prototype adapter for `Parrhesia.Storage.ACL`.
"""
alias Parrhesia.Storage.Adapters.Memory.Store
@behaviour Parrhesia.Storage.ACL
@impl true
def put_rule(_context, rule) when is_map(rule) do
with {:ok, normalized_rule} <- normalize_rule(rule) do
Store.get_and_update(fn state -> put_rule_in_state(state, normalized_rule) end)
end
end
def put_rule(_context, _rule), do: {:error, :invalid_acl_rule}
@impl true
def delete_rule(_context, selector) when is_map(selector) do
case normalize_delete_selector(selector) do
{:ok, {:id, id}} ->
Store.update(fn state ->
%{state | acl_rules: Enum.reject(state.acl_rules, &(&1.id == id))}
end)
:ok
{:ok, {:exact, rule}} ->
Store.update(fn state ->
%{state | acl_rules: Enum.reject(state.acl_rules, &same_rule?(&1, rule))}
end)
:ok
{:error, reason} ->
{:error, reason}
end
end
def delete_rule(_context, _selector), do: {:error, :invalid_acl_rule}
@impl true
def list_rules(_context, opts) when is_list(opts) do
rules =
Store.get(fn state -> Enum.reverse(state.acl_rules) end)
|> Enum.filter(fn rule ->
matches_principal_type?(rule, Keyword.get(opts, :principal_type)) and
matches_principal?(rule, Keyword.get(opts, :principal)) and
matches_capability?(rule, Keyword.get(opts, :capability))
end)
{:ok, rules}
end
def list_rules(_context, _opts), do: {:error, :invalid_opts}
defp put_rule_in_state(state, normalized_rule) do
case Enum.find(state.acl_rules, &same_rule?(&1, normalized_rule)) do
nil ->
next_id = state.next_acl_rule_id
persisted_rule = Map.put(normalized_rule, :id, next_id)
{{:ok, persisted_rule},
%{
state
| acl_rules: [persisted_rule | state.acl_rules],
next_acl_rule_id: next_id + 1
}}
existing_rule ->
{{:ok, existing_rule}, state}
end
end
defp matches_principal_type?(_rule, nil), do: true
defp matches_principal_type?(rule, principal_type), do: rule.principal_type == principal_type
defp matches_principal?(_rule, nil), do: true
defp matches_principal?(rule, principal), do: rule.principal == principal
defp matches_capability?(_rule, nil), do: true
defp matches_capability?(rule, capability), do: rule.capability == capability
defp same_rule?(left, right) do
left.principal_type == right.principal_type and
left.principal == right.principal and
left.capability == right.capability and
left.match == right.match
end
defp normalize_delete_selector(%{"id" => id}), do: normalize_delete_selector(%{id: id})
defp normalize_delete_selector(%{id: id}) when is_integer(id) and id > 0,
do: {:ok, {:id, id}}
defp normalize_delete_selector(selector) do
case normalize_rule(selector) do
{:ok, rule} -> {:ok, {:exact, rule}}
{:error, reason} -> {:error, reason}
end
end
defp normalize_rule(rule) when is_map(rule) do
with {:ok, principal_type} <- normalize_principal_type(fetch(rule, :principal_type)),
{:ok, principal} <- normalize_principal(fetch(rule, :principal)),
{:ok, capability} <- normalize_capability(fetch(rule, :capability)),
{:ok, match} <- normalize_match(fetch(rule, :match)) do
{:ok,
%{
principal_type: principal_type,
principal: principal,
capability: capability,
match: match
}}
end
end
defp normalize_rule(_rule), do: {:error, :invalid_acl_rule}
defp normalize_principal_type(:pubkey), do: {:ok, :pubkey}
defp normalize_principal_type("pubkey"), do: {:ok, :pubkey}
defp normalize_principal_type(_value), do: {:error, :invalid_acl_principal_type}
defp normalize_principal(value) when is_binary(value) and byte_size(value) == 64,
do: {:ok, String.downcase(value)}
defp normalize_principal(_value), do: {:error, :invalid_acl_principal}
defp normalize_capability(:sync_read), do: {:ok, :sync_read}
defp normalize_capability(:sync_write), do: {:ok, :sync_write}
defp normalize_capability("sync_read"), do: {:ok, :sync_read}
defp normalize_capability("sync_write"), do: {:ok, :sync_write}
defp normalize_capability(_value), do: {:error, :invalid_acl_capability}
defp normalize_match(match) when is_map(match) do
normalized_match =
Enum.reduce(match, %{}, fn
{key, values}, acc when is_binary(key) ->
Map.put(acc, key, values)
{key, values}, acc when is_atom(key) ->
Map.put(acc, Atom.to_string(key), values)
_entry, acc ->
acc
end)
{:ok, normalized_match}
end
defp normalize_match(_match), do: {:error, :invalid_acl_match}
defp fetch(map, key) do
Map.get(map, key) || Map.get(map, Atom.to_string(key))
end
end

View File

@@ -55,6 +55,24 @@ defmodule Parrhesia.Storage.Adapters.Memory.Events do
end end
end end
@impl true
def query_event_refs(context, filters, opts) do
with {:ok, events} <- query(context, filters, opts) do
refs =
events
|> Enum.map(fn event ->
%{
created_at: Map.fetch!(event, "created_at"),
id: Base.decode16!(Map.fetch!(event, "id"), case: :mixed)
}
end)
|> Enum.sort(&(compare_event_refs(&1, &2) != :gt))
|> maybe_limit_event_refs(opts)
{:ok, refs}
end
end
@impl true @impl true
def count(context, filters, opts) do def count(context, filters, opts) do
with {:ok, events} <- query(context, filters, opts) do with {:ok, events} <- query(context, filters, opts) do
@@ -189,4 +207,21 @@ defmodule Parrhesia.Storage.Adapters.Memory.Events do
_tag -> false _tag -> false
end) end)
end end
defp compare_event_refs(left, right) do
cond do
left.created_at < right.created_at -> :lt
left.created_at > right.created_at -> :gt
left.id < right.id -> :lt
left.id > right.id -> :gt
true -> :eq
end
end
defp maybe_limit_event_refs(refs, opts) do
case Keyword.get(opts, :limit) do
limit when is_integer(limit) and limit > 0 -> Enum.take(refs, limit)
_other -> refs
end
end
end end

View File

@@ -33,6 +33,11 @@ defmodule Parrhesia.Storage.Adapters.Memory.Moderation do
{:ok, Store.get(fn state -> MapSet.member?(state.allowed_pubkeys, pubkey) end)} {:ok, Store.get(fn state -> MapSet.member?(state.allowed_pubkeys, pubkey) end)}
end end
@impl true
def has_allowed_pubkeys?(_context) do
{:ok, Store.get(fn state -> MapSet.size(state.allowed_pubkeys) > 0 end)}
end
@impl true @impl true
def ban_event(_context, event_id), do: update_ban_set(:events, event_id, :add) def ban_event(_context, event_id), do: update_ban_set(:events, event_id, :add)

View File

@@ -10,6 +10,8 @@ defmodule Parrhesia.Storage.Adapters.Memory.Store do
deleted: MapSet.new(), deleted: MapSet.new(),
bans: %{pubkeys: MapSet.new(), events: MapSet.new(), ips: MapSet.new()}, bans: %{pubkeys: MapSet.new(), events: MapSet.new(), ips: MapSet.new()},
allowed_pubkeys: MapSet.new(), allowed_pubkeys: MapSet.new(),
acl_rules: [],
next_acl_rule_id: 1,
groups: %{}, groups: %{},
roles: %{}, roles: %{},
audit_logs: [] audit_logs: []

View File

@@ -0,0 +1,273 @@
defmodule Parrhesia.Storage.Adapters.Postgres.ACL do
@moduledoc """
PostgreSQL-backed implementation for `Parrhesia.Storage.ACL`.
"""
import Ecto.Query
alias Parrhesia.Repo
@behaviour Parrhesia.Storage.ACL
@impl true
def put_rule(_context, rule) when is_map(rule) do
with {:ok, normalized_rule} <- normalize_rule(rule) do
normalized_rule
|> find_matching_rule()
|> maybe_insert_rule(normalized_rule)
end
end
def put_rule(_context, _rule), do: {:error, :invalid_acl_rule}
defp maybe_insert_rule(nil, normalized_rule), do: insert_rule(normalized_rule)
defp maybe_insert_rule(existing_rule, _normalized_rule), do: {:ok, existing_rule}
@impl true
def delete_rule(_context, selector) when is_map(selector) do
case normalize_delete_selector(selector) do
{:ok, {:id, id}} ->
query = from(rule in "acl_rules", where: rule.id == ^id)
{_deleted, _result} = Repo.delete_all(query)
:ok
{:ok, {:exact, rule}} ->
query =
from(stored_rule in "acl_rules",
where:
stored_rule.principal_type == ^rule.principal_type and
stored_rule.principal == ^rule.principal and
stored_rule.capability == ^rule.capability and
stored_rule.match == ^rule.match
)
{_deleted, _result} = Repo.delete_all(query)
:ok
{:error, reason} ->
{:error, reason}
end
end
def delete_rule(_context, _selector), do: {:error, :invalid_acl_rule}
@impl true
def list_rules(_context, opts) when is_list(opts) do
query =
from(rule in "acl_rules",
order_by: [
asc: rule.principal_type,
asc: rule.principal,
asc: rule.capability,
asc: rule.id
],
select: %{
id: rule.id,
principal_type: rule.principal_type,
principal: rule.principal,
capability: rule.capability,
match: rule.match,
inserted_at: rule.inserted_at
}
)
|> maybe_filter_principal_type(Keyword.get(opts, :principal_type))
|> maybe_filter_principal(Keyword.get(opts, :principal))
|> maybe_filter_capability(Keyword.get(opts, :capability))
{:ok, Enum.map(Repo.all(query), &normalize_persisted_rule/1)}
end
def list_rules(_context, _opts), do: {:error, :invalid_opts}
defp maybe_filter_principal_type(query, nil), do: query
defp maybe_filter_principal_type(query, principal_type) when is_atom(principal_type) do
maybe_filter_principal_type(query, Atom.to_string(principal_type))
end
defp maybe_filter_principal_type(query, principal_type) when is_binary(principal_type) do
where(query, [rule], rule.principal_type == ^principal_type)
end
defp maybe_filter_principal_type(query, _principal_type), do: query
defp maybe_filter_principal(query, nil), do: query
defp maybe_filter_principal(query, principal) when is_binary(principal) do
case decode_hex_or_binary(principal, 32, :invalid_acl_principal) do
{:ok, decoded_principal} -> where(query, [rule], rule.principal == ^decoded_principal)
{:error, _reason} -> where(query, [rule], false)
end
end
defp maybe_filter_principal(query, _principal), do: query
defp maybe_filter_capability(query, nil), do: query
defp maybe_filter_capability(query, capability) when is_atom(capability) do
maybe_filter_capability(query, Atom.to_string(capability))
end
defp maybe_filter_capability(query, capability) when is_binary(capability) do
where(query, [rule], rule.capability == ^capability)
end
defp maybe_filter_capability(query, _capability), do: query
defp find_matching_rule(normalized_rule) do
query =
from(stored_rule in "acl_rules",
where:
stored_rule.principal_type == ^normalized_rule.principal_type and
stored_rule.principal == ^normalized_rule.principal and
stored_rule.capability == ^normalized_rule.capability and
stored_rule.match == ^normalized_rule.match,
limit: 1,
select: %{
id: stored_rule.id,
principal_type: stored_rule.principal_type,
principal: stored_rule.principal,
capability: stored_rule.capability,
match: stored_rule.match,
inserted_at: stored_rule.inserted_at
}
)
case Repo.one(query) do
nil -> nil
stored_rule -> normalize_persisted_rule(stored_rule)
end
end
defp insert_rule(normalized_rule) do
now = DateTime.utc_now() |> DateTime.truncate(:microsecond)
row = %{
principal_type: normalized_rule.principal_type,
principal: normalized_rule.principal,
capability: normalized_rule.capability,
match: normalized_rule.match,
inserted_at: now
}
case Repo.insert_all("acl_rules", [row], returning: [:id, :inserted_at]) do
{1, [inserted_row]} ->
{:ok, normalize_persisted_rule(Map.merge(row, Map.new(inserted_row)))}
_other ->
{:error, :acl_rule_insert_failed}
end
end
defp normalize_persisted_rule(rule) do
%{
id: rule.id,
principal_type: normalize_principal_type(rule.principal_type),
principal: Base.encode16(rule.principal, case: :lower),
capability: normalize_capability(rule.capability),
match: normalize_match(rule.match),
inserted_at: rule.inserted_at
}
end
defp normalize_delete_selector(%{"id" => id}), do: normalize_delete_selector(%{id: id})
defp normalize_delete_selector(%{id: id}) when is_integer(id) and id > 0,
do: {:ok, {:id, id}}
defp normalize_delete_selector(selector) do
case normalize_rule(selector) do
{:ok, normalized_rule} -> {:ok, {:exact, normalized_rule}}
{:error, reason} -> {:error, reason}
end
end
defp normalize_rule(rule) when is_map(rule) do
with {:ok, principal_type} <- normalize_principal_type_value(fetch(rule, :principal_type)),
{:ok, principal} <-
decode_hex_or_binary(fetch(rule, :principal), 32, :invalid_acl_principal),
{:ok, capability} <- normalize_capability_value(fetch(rule, :capability)),
{:ok, match} <- normalize_match_value(fetch(rule, :match)) do
{:ok,
%{
principal_type: principal_type,
principal: principal,
capability: capability,
match: match
}}
end
end
defp normalize_rule(_rule), do: {:error, :invalid_acl_rule}
defp normalize_principal_type("pubkey"), do: :pubkey
defp normalize_principal_type(principal_type), do: principal_type
defp normalize_capability("sync_read"), do: :sync_read
defp normalize_capability("sync_write"), do: :sync_write
defp normalize_capability(capability), do: capability
defp normalize_principal_type_value(:pubkey), do: {:ok, "pubkey"}
defp normalize_principal_type_value("pubkey"), do: {:ok, "pubkey"}
defp normalize_principal_type_value(_principal_type), do: {:error, :invalid_acl_principal_type}
defp normalize_capability_value(:sync_read), do: {:ok, "sync_read"}
defp normalize_capability_value(:sync_write), do: {:ok, "sync_write"}
defp normalize_capability_value("sync_read"), do: {:ok, "sync_read"}
defp normalize_capability_value("sync_write"), do: {:ok, "sync_write"}
defp normalize_capability_value(_capability), do: {:error, :invalid_acl_capability}
defp normalize_match_value(match) when is_map(match) do
normalized_match =
Enum.reduce(match, %{}, fn
{key, values}, acc when is_binary(key) ->
Map.put(acc, key, values)
{key, values}, acc when is_atom(key) ->
Map.put(acc, Atom.to_string(key), values)
_entry, acc ->
acc
end)
{:ok, normalize_match(normalized_match)}
end
defp normalize_match_value(_match), do: {:error, :invalid_acl_match}
defp normalize_match(match) when is_map(match) do
Enum.reduce(match, %{}, fn
{key, values}, acc when is_binary(key) and is_list(values) ->
Map.put(acc, key, Enum.uniq(values))
{key, value}, acc when is_binary(key) ->
Map.put(acc, key, value)
_entry, acc ->
acc
end)
end
defp normalize_match(_match), do: %{}
defp fetch(map, key) do
Map.get(map, key) || Map.get(map, Atom.to_string(key))
end
defp decode_hex_or_binary(value, expected_bytes, _reason)
when is_binary(value) and byte_size(value) == expected_bytes,
do: {:ok, value}
defp decode_hex_or_binary(value, expected_bytes, reason) when is_binary(value) do
if byte_size(value) == expected_bytes * 2 do
case Base.decode16(value, case: :mixed) do
{:ok, decoded} -> {:ok, decoded}
:error -> {:error, reason}
end
else
{:error, reason}
end
end
defp decode_hex_or_binary(_value, _expected_bytes, reason), do: {:error, reason}
end

View File

@@ -20,6 +20,7 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Admin do
case method_name do case method_name do
"ping" -> {:ok, %{"status" => "ok"}} "ping" -> {:ok, %{"status" => "ok"}}
"stats" -> {:ok, relay_stats()} "stats" -> {:ok, relay_stats()}
"supportedmethods" -> {:ok, %{"methods" => supported_methods()}}
"list_audit_logs" -> list_audit_logs(%{}, audit_list_opts(params)) "list_audit_logs" -> list_audit_logs(%{}, audit_list_opts(params))
_other -> execute_moderation_method(moderation, method_name, params) _other -> execute_moderation_method(moderation, method_name, params)
end end
@@ -84,15 +85,36 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Admin do
defp relay_stats do defp relay_stats do
events_count = Repo.aggregate("events", :count, :id) events_count = Repo.aggregate("events", :count, :id)
banned_pubkeys = Repo.aggregate("banned_pubkeys", :count, :pubkey) banned_pubkeys = Repo.aggregate("banned_pubkeys", :count, :pubkey)
allowed_pubkeys = Repo.aggregate("allowed_pubkeys", :count, :pubkey)
blocked_ips = Repo.aggregate("blocked_ips", :count, :ip) blocked_ips = Repo.aggregate("blocked_ips", :count, :ip)
acl_rules = Repo.aggregate("acl_rules", :count, :id)
%{ %{
"events" => events_count, "events" => events_count,
"banned_pubkeys" => banned_pubkeys, "banned_pubkeys" => banned_pubkeys,
"allowed_pubkeys" => allowed_pubkeys,
"acl_rules" => acl_rules,
"blocked_ips" => blocked_ips "blocked_ips" => blocked_ips
} }
end end
defp supported_methods do
[
"allow_pubkey",
"ban_event",
"ban_pubkey",
"block_ip",
"disallow_pubkey",
"list_audit_logs",
"ping",
"stats",
"supportedmethods",
"unban_event",
"unban_pubkey",
"unblock_ip"
]
end
defp execute_moderation_method(moderation, "ban_pubkey", params), defp execute_moderation_method(moderation, "ban_pubkey", params),
do: execute_pubkey_method(fn ctx, value -> moderation.ban_pubkey(ctx, value) end, params) do: execute_pubkey_method(fn ctx, value -> moderation.ban_pubkey(ctx, value) end, params)

View File

@@ -94,21 +94,21 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Events do
def query(_context, _filters, _opts), do: {:error, :invalid_opts} def query(_context, _filters, _opts), do: {:error, :invalid_opts}
@impl true
def query_event_refs(_context, filters, opts) when is_list(opts) do
with :ok <- Filter.validate_filters(filters) do
now = Keyword.get(opts, :now, System.system_time(:second))
{:ok, fetch_event_refs(filters, now, opts)}
end
end
def query_event_refs(_context, _filters, _opts), do: {:error, :invalid_opts}
@impl true @impl true
def count(_context, filters, opts) when is_list(opts) do def count(_context, filters, opts) when is_list(opts) do
with :ok <- Filter.validate_filters(filters) do with :ok <- Filter.validate_filters(filters) do
now = Keyword.get(opts, :now, System.system_time(:second)) now = Keyword.get(opts, :now, System.system_time(:second))
{:ok, count_events(filters, now, opts)}
total_count =
filters
|> event_id_union_query_for_filters(now, opts)
|> subquery()
|> then(fn union_query ->
from(event in union_query, select: count(event.id, :distinct))
end)
|> Repo.one()
{:ok, total_count}
end end
end end
@@ -607,92 +607,194 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Events do
end end
defp event_query_for_filter(filter, now, opts) do defp event_query_for_filter(filter, now, opts) do
base_query = {base_query, remaining_tag_filters} = event_source_query(filter, now)
from(event in "events",
where: is_nil(event.deleted_at) and (is_nil(event.expires_at) or event.expires_at > ^now),
order_by: [desc: event.created_at, asc: event.id],
select: %{
id: event.id,
pubkey: event.pubkey,
created_at: event.created_at,
kind: event.kind,
tags: event.tags,
content: event.content,
sig: event.sig
}
)
query = base_query
base_query |> apply_common_event_filters(filter, remaining_tag_filters, opts)
|> maybe_filter_ids(Map.get(filter, "ids")) |> order_by([event: event], desc: event.created_at, asc: event.id)
|> maybe_filter_authors(Map.get(filter, "authors")) |> select([event: event], %{
|> maybe_filter_kinds(Map.get(filter, "kinds")) id: event.id,
|> maybe_filter_since(Map.get(filter, "since")) pubkey: event.pubkey,
|> maybe_filter_until(Map.get(filter, "until")) created_at: event.created_at,
|> maybe_filter_search(Map.get(filter, "search")) kind: event.kind,
|> filter_by_tags(filter) tags: event.tags,
|> maybe_restrict_giftwrap_access(filter, opts) content: event.content,
sig: event.sig
maybe_limit_query(query, effective_filter_limit(filter, opts)) })
|> maybe_limit_query(effective_filter_limit(filter, opts))
end end
defp event_id_query_for_filter(filter, now, opts) do defp event_id_query_for_filter(filter, now, opts) do
from(event in "events", {base_query, remaining_tag_filters} = event_source_query(filter, now)
where: is_nil(event.deleted_at) and (is_nil(event.expires_at) or event.expires_at > ^now),
select: event.id base_query
) |> apply_common_event_filters(filter, remaining_tag_filters, opts)
|> maybe_filter_ids(Map.get(filter, "ids")) |> select([event: event], event.id)
|> maybe_filter_authors(Map.get(filter, "authors"))
|> maybe_filter_kinds(Map.get(filter, "kinds"))
|> maybe_filter_since(Map.get(filter, "since"))
|> maybe_filter_until(Map.get(filter, "until"))
|> maybe_filter_search(Map.get(filter, "search"))
|> filter_by_tags(filter)
|> maybe_restrict_giftwrap_access(filter, opts)
end end
defp event_id_union_query_for_filters([], now, _opts) do defp event_id_distinct_union_query_for_filters([], now, _opts) do
from(event in "events", from(event in "events",
where: event.created_at > ^now and event.created_at < ^now, where: event.created_at > ^now and event.created_at < ^now,
select: event.id select: event.id
) )
end end
defp event_id_union_query_for_filters([first_filter | rest_filters], now, opts) do defp event_id_distinct_union_query_for_filters([first_filter | rest_filters], now, opts) do
Enum.reduce(rest_filters, event_id_query_for_filter(first_filter, now, opts), fn filter, Enum.reduce(rest_filters, event_id_query_for_filter(first_filter, now, opts), fn filter,
acc -> acc ->
union_all(acc, ^event_id_query_for_filter(filter, now, opts)) union(acc, ^event_id_query_for_filter(filter, now, opts))
end) end)
end end
defp event_ref_query_for_filter(filter, now, opts) do
{base_query, remaining_tag_filters} = event_source_query(filter, now)
base_query
|> apply_common_event_filters(filter, remaining_tag_filters, opts)
|> order_by([event: event], asc: event.created_at, asc: event.id)
|> select([event: event], %{
created_at: event.created_at,
id: event.id
})
|> maybe_limit_query(effective_filter_limit(filter, opts))
end
defp event_ref_union_query_for_filters([], now, _opts) do
from(event in "events",
where: event.created_at > ^now and event.created_at < ^now,
select: %{created_at: event.created_at, id: event.id}
)
end
defp event_ref_union_query_for_filters([first_filter | rest_filters], now, opts) do
Enum.reduce(rest_filters, event_ref_query_for_filter(first_filter, now, opts), fn filter,
acc ->
union_all(acc, ^event_ref_query_for_filter(filter, now, opts))
end)
end
defp fetch_event_refs([filter], now, opts) do
filter
|> event_ref_query_for_filter(now, opts)
|> maybe_limit_query(Keyword.get(opts, :limit))
|> Repo.all()
end
defp fetch_event_refs(filters, now, opts) do
filters
|> event_ref_union_query_for_filters(now, opts)
|> subquery()
|> then(fn union_query ->
from(ref in union_query,
group_by: [ref.created_at, ref.id],
order_by: [asc: ref.created_at, asc: ref.id],
select: %{created_at: ref.created_at, id: ref.id}
)
end)
|> maybe_limit_query(Keyword.get(opts, :limit))
|> Repo.all()
end
defp count_events([filter], now, opts) do
filter
|> event_id_query_for_filter(now, opts)
|> subquery()
|> then(fn query ->
from(event in query, select: count())
end)
|> Repo.one()
end
defp count_events(filters, now, opts) do
filters
|> event_id_distinct_union_query_for_filters(now, opts)
|> subquery()
|> then(fn union_query ->
from(event in union_query, select: count())
end)
|> Repo.one()
end
defp event_source_query(filter, now) do
tag_filters = tag_filters(filter)
case primary_tag_filter(tag_filters) do
nil ->
{from(event in "events",
as: :event,
where:
is_nil(event.deleted_at) and
(is_nil(event.expires_at) or event.expires_at > ^now)
), []}
{tag_name, values} = primary_tag_filter ->
remaining_tag_filters = List.delete(tag_filters, primary_tag_filter)
{from(tag in "event_tags",
as: :primary_tag,
where: tag.name == ^tag_name and tag.value in ^values,
join: event in "events",
as: :event,
on: event.created_at == tag.event_created_at and event.id == tag.event_id,
where:
is_nil(event.deleted_at) and
(is_nil(event.expires_at) or event.expires_at > ^now),
distinct: [event.created_at, event.id]
), remaining_tag_filters}
end
end
defp apply_common_event_filters(query, filter, remaining_tag_filters, opts) do
query
|> maybe_filter_ids(Map.get(filter, "ids"))
|> maybe_filter_authors(Map.get(filter, "authors"))
|> maybe_filter_kinds(Map.get(filter, "kinds"))
|> maybe_filter_since(Map.get(filter, "since"))
|> maybe_filter_until(Map.get(filter, "until"))
|> maybe_filter_search(Map.get(filter, "search"))
|> filter_by_tag_filters(remaining_tag_filters)
|> maybe_restrict_giftwrap_access(filter, opts)
end
defp primary_tag_filter([]), do: nil
defp primary_tag_filter(tag_filters) do
Enum.find(tag_filters, fn {tag_name, _values} -> tag_name in ["h", "i"] end) ||
List.first(tag_filters)
end
defp maybe_filter_ids(query, nil), do: query defp maybe_filter_ids(query, nil), do: query
defp maybe_filter_ids(query, ids) do defp maybe_filter_ids(query, ids) do
decoded_ids = decode_hex_list(ids, :lower) decoded_ids = decode_hex_list(ids, :lower)
where(query, [event], event.id in ^decoded_ids) where(query, [event: event], event.id in ^decoded_ids)
end end
defp maybe_filter_authors(query, nil), do: query defp maybe_filter_authors(query, nil), do: query
defp maybe_filter_authors(query, authors) do defp maybe_filter_authors(query, authors) do
decoded_authors = decode_hex_list(authors, :lower) decoded_authors = decode_hex_list(authors, :lower)
where(query, [event], event.pubkey in ^decoded_authors) where(query, [event: event], event.pubkey in ^decoded_authors)
end end
defp maybe_filter_kinds(query, nil), do: query defp maybe_filter_kinds(query, nil), do: query
defp maybe_filter_kinds(query, kinds), do: where(query, [event], event.kind in ^kinds) defp maybe_filter_kinds(query, kinds), do: where(query, [event: event], event.kind in ^kinds)
defp maybe_filter_since(query, nil), do: query defp maybe_filter_since(query, nil), do: query
defp maybe_filter_since(query, since), do: where(query, [event], event.created_at >= ^since)
defp maybe_filter_since(query, since),
do: where(query, [event: event], event.created_at >= ^since)
defp maybe_filter_until(query, nil), do: query defp maybe_filter_until(query, nil), do: query
defp maybe_filter_until(query, until), do: where(query, [event], event.created_at <= ^until)
defp maybe_filter_until(query, until),
do: where(query, [event: event], event.created_at <= ^until)
defp maybe_filter_search(query, nil), do: query defp maybe_filter_search(query, nil), do: query
defp maybe_filter_search(query, search) when is_binary(search) and search != "" do defp maybe_filter_search(query, search) when is_binary(search) and search != "" do
escaped_search = escape_like_pattern(search) escaped_search = escape_like_pattern(search)
where(query, [event], ilike(event.content, ^"%#{escaped_search}%")) where(query, [event: event], ilike(event.content, ^"%#{escaped_search}%"))
end end
defp maybe_filter_search(query, _search), do: query defp maybe_filter_search(query, _search), do: query
@@ -704,13 +806,11 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Events do
|> String.replace("_", "\\_") |> String.replace("_", "\\_")
end end
defp filter_by_tags(query, filter) do defp filter_by_tag_filters(query, tag_filters) do
filter Enum.reduce(tag_filters, query, fn {tag_name, values}, acc ->
|> tag_filters()
|> Enum.reduce(query, fn {tag_name, values}, acc ->
where( where(
acc, acc,
[event], [event: event],
fragment( fragment(
"EXISTS (SELECT 1 FROM event_tags AS tag WHERE tag.event_created_at = ? AND tag.event_id = ? AND tag.name = ? AND tag.value = ANY(?))", "EXISTS (SELECT 1 FROM event_tags AS tag WHERE tag.event_created_at = ? AND tag.event_id = ? AND tag.name = ? AND tag.value = ANY(?))",
event.created_at, event.created_at,
@@ -740,7 +840,7 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Events do
targets_giftwrap?(filter) and requester_pubkeys != [] -> targets_giftwrap?(filter) and requester_pubkeys != [] ->
where( where(
query, query,
[event], [event: event],
fragment( fragment(
"EXISTS (SELECT 1 FROM event_tags AS tag WHERE tag.event_created_at = ? AND tag.event_id = ? AND tag.name = 'p' AND tag.value = ANY(?))", "EXISTS (SELECT 1 FROM event_tags AS tag WHERE tag.event_created_at = ? AND tag.event_id = ? AND tag.name = 'p' AND tag.value = ANY(?))",
event.created_at, event.created_at,
@@ -750,7 +850,7 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Events do
) )
targets_giftwrap?(filter) -> targets_giftwrap?(filter) ->
where(query, [_event], false) where(query, [event: _event], false)
true -> true ->
query query

View File

@@ -67,6 +67,11 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Moderation do
end end
end end
@impl true
def has_allowed_pubkeys?(_context) do
{:ok, scope_populated?(:allowed_pubkeys)}
end
@impl true @impl true
def ban_event(_context, event_id) do def ban_event(_context, event_id) do
with {:ok, normalized_event_id} <- normalize_hex_or_binary(event_id, 32, :invalid_event_id), with {:ok, normalized_event_id} <- normalize_hex_or_binary(event_id, 32, :invalid_event_id),
@@ -163,6 +168,24 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Moderation do
end end
end end
defp scope_populated?(scope) do
{table, field} = cache_scope_source!(scope)
if moderation_cache_enabled?() do
case cache_table_ref() do
:undefined ->
scope_populated_db?(table, field)
cache_table ->
ensure_cache_scope_loaded(scope, cache_table)
:ets.select_count(cache_table, [{{{:member, scope, :_}, true}, [], [true]}]) > 0
end
else
scope_populated_db?(table, field)
end
end
defp ensure_cache_scope_loaded(scope, table) do defp ensure_cache_scope_loaded(scope, table) do
loaded_key = cache_loaded_key(scope) loaded_key = cache_loaded_key(scope)
@@ -246,6 +269,16 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Moderation do
Repo.one(query) == 1 Repo.one(query) == 1
end end
defp scope_populated_db?(table, field) do
query =
from(record in table,
select: field(record, ^field),
limit: 1
)
not is_nil(Repo.one(query))
end
defp normalize_hex_or_binary(value, expected_bytes, _reason) defp normalize_hex_or_binary(value, expected_bytes, _reason)
when is_binary(value) and byte_size(value) == expected_bytes, when is_binary(value) and byte_size(value) == expected_bytes,
do: {:ok, value} do: {:ok, value}

View File

@@ -7,6 +7,7 @@ defmodule Parrhesia.Storage.Events do
@type event_id :: binary() @type event_id :: binary()
@type event :: map() @type event :: map()
@type filter :: map() @type filter :: map()
@type event_ref :: %{created_at: non_neg_integer(), id: binary()}
@type query_opts :: keyword() @type query_opts :: keyword()
@type count_result :: non_neg_integer() | %{optional(atom()) => term()} @type count_result :: non_neg_integer() | %{optional(atom()) => term()}
@type reason :: term() @type reason :: term()
@@ -14,6 +15,8 @@ defmodule Parrhesia.Storage.Events do
@callback put_event(context(), event()) :: {:ok, event()} | {:error, reason()} @callback put_event(context(), event()) :: {:ok, event()} | {:error, reason()}
@callback get_event(context(), event_id()) :: {:ok, event() | nil} | {:error, reason()} @callback get_event(context(), event_id()) :: {:ok, event() | nil} | {:error, reason()}
@callback query(context(), [filter()], query_opts()) :: {:ok, [event()]} | {:error, reason()} @callback query(context(), [filter()], query_opts()) :: {:ok, [event()]} | {:error, reason()}
@callback query_event_refs(context(), [filter()], query_opts()) ::
{:ok, [event_ref()]} | {:error, reason()}
@callback count(context(), [filter()], query_opts()) :: @callback count(context(), [filter()], query_opts()) ::
{:ok, count_result()} | {:error, reason()} {:ok, count_result()} | {:error, reason()}
@callback delete_by_request(context(), event()) :: {:ok, non_neg_integer()} | {:error, reason()} @callback delete_by_request(context(), event()) :: {:ok, non_neg_integer()} | {:error, reason()}

View File

@@ -16,6 +16,7 @@ defmodule Parrhesia.Storage.Moderation do
@callback allow_pubkey(context(), pubkey()) :: :ok | {:error, reason()} @callback allow_pubkey(context(), pubkey()) :: :ok | {:error, reason()}
@callback disallow_pubkey(context(), pubkey()) :: :ok | {:error, reason()} @callback disallow_pubkey(context(), pubkey()) :: :ok | {:error, reason()}
@callback pubkey_allowed?(context(), pubkey()) :: {:ok, boolean()} | {:error, reason()} @callback pubkey_allowed?(context(), pubkey()) :: {:ok, boolean()} | {:error, reason()}
@callback has_allowed_pubkeys?(context()) :: {:ok, boolean()} | {:error, reason()}
@callback ban_event(context(), event_id()) :: :ok | {:error, reason()} @callback ban_event(context(), event_id()) :: :ok | {:error, reason()}
@callback unban_event(context(), event_id()) :: :ok | {:error, reason()} @callback unban_event(context(), event_id()) :: :ok | {:error, reason()}

View File

@@ -13,7 +13,9 @@ defmodule Parrhesia.Subscriptions.Supervisor do
def init(_init_arg) do def init(_init_arg) do
children = children =
[ [
{Parrhesia.Subscriptions.Index, name: Parrhesia.Subscriptions.Index} {Parrhesia.Subscriptions.Index, name: Parrhesia.Subscriptions.Index},
{Registry, keys: :unique, name: Parrhesia.API.Stream.Registry},
{DynamicSupervisor, strategy: :one_for_one, name: Parrhesia.API.Stream.Supervisor}
] ++ ] ++
negentropy_children() ++ [{Parrhesia.Fanout.MultiNode, name: Parrhesia.Fanout.MultiNode}] negentropy_children() ++ [{Parrhesia.Fanout.MultiNode, name: Parrhesia.Fanout.MultiNode}]

View File

@@ -0,0 +1,60 @@
defmodule Parrhesia.Sync.RelayInfoClient do
@moduledoc false
alias Parrhesia.Sync.TLS
@spec verify_remote_identity(map(), keyword()) :: :ok | {:error, term()}
def verify_remote_identity(server, opts \\ []) do
request_fun = Keyword.get(opts, :request_fun, &default_request/2)
with {:ok, response} <- request_fun.(relay_info_url(server.url), request_opts(server)),
{:ok, pubkey} <- extract_pubkey(response) do
if pubkey == server.auth_pubkey do
:ok
else
{:error, :remote_identity_mismatch}
end
end
end
defp default_request(url, opts) do
case Req.get(
url: url,
headers: [{"accept", "application/nostr+json"}],
decode_body: false,
connect_options: opts
) do
{:ok, response} -> {:ok, response}
{:error, reason} -> {:error, reason}
end
end
defp extract_pubkey(%Req.Response{status: 200, body: body}) when is_binary(body) do
with {:ok, payload} <- JSON.decode(body),
pubkey when is_binary(pubkey) and pubkey != "" <- Map.get(payload, "pubkey") do
{:ok, String.downcase(pubkey)}
else
nil -> {:error, :missing_remote_identity}
{:error, reason} -> {:error, reason}
_other -> {:error, :missing_remote_identity}
end
end
defp extract_pubkey(%Req.Response{status: status}),
do: {:error, {:relay_info_request_failed, status}}
defp extract_pubkey(_response), do: {:error, :invalid_relay_info}
defp request_opts(%{tls: %{mode: :disabled}}), do: []
defp request_opts(%{tls: tls}), do: TLS.req_connect_options(tls)
defp relay_info_url(relay_url) do
relay_url
|> URI.parse()
|> Map.update!(:scheme, fn
"wss" -> "https"
"ws" -> "http"
end)
|> URI.to_string()
end
end

View File

@@ -0,0 +1,43 @@
defmodule Parrhesia.Sync.Supervisor do
@moduledoc """
Supervision entrypoint for sync control-plane processes.
"""
use Supervisor
def start_link(init_arg \\ []) do
name = Keyword.get(init_arg, :name, __MODULE__)
Supervisor.start_link(__MODULE__, init_arg, name: name)
end
@impl true
def init(init_arg) do
worker_registry = Keyword.get(init_arg, :worker_registry, Parrhesia.Sync.WorkerRegistry)
worker_supervisor = Keyword.get(init_arg, :worker_supervisor, Parrhesia.Sync.WorkerSupervisor)
manager_name = Keyword.get(init_arg, :manager, Parrhesia.API.Sync.Manager)
children = [
{Registry, keys: :unique, name: worker_registry},
{DynamicSupervisor, strategy: :one_for_one, name: worker_supervisor},
{Parrhesia.API.Sync.Manager,
manager_opts(init_arg, manager_name, worker_registry, worker_supervisor)}
]
Supervisor.init(children, strategy: :one_for_one)
end
defp manager_opts(init_arg, manager_name, worker_registry, worker_supervisor) do
[
name: manager_name,
worker_registry: worker_registry,
worker_supervisor: worker_supervisor
] ++
Keyword.take(init_arg, [
:path,
:start_workers?,
:transport_module,
:relay_info_opts,
:transport_opts
])
end
end

112
lib/parrhesia/sync/tls.ex Normal file
View File

@@ -0,0 +1,112 @@
defmodule Parrhesia.Sync.TLS do
@moduledoc false
@type tls_config :: %{
mode: :required | :disabled,
hostname: String.t(),
pins: [%{type: :spki_sha256, value: String.t()}]
}
@spec websocket_options(tls_config()) :: keyword()
def websocket_options(%{mode: :disabled}), do: [insecure: true]
def websocket_options(%{mode: :required} = tls) do
[
ssl_options: transport_opts(tls)
]
end
@spec req_connect_options(tls_config()) :: keyword()
def req_connect_options(%{mode: :disabled}), do: []
def req_connect_options(%{mode: :required} = tls) do
[
transport_opts: transport_opts(tls)
]
end
def transport_opts(%{hostname: hostname, pins: pins}) do
[
verify: :verify_peer,
cacerts: system_cacerts(),
server_name_indication: String.to_charlist(hostname),
customize_hostname_check: [
match_fun: :public_key.pkix_verify_hostname_match_fun(:https)
]
]
|> maybe_put_verify_fun(pins)
end
defp maybe_put_verify_fun(options, []), do: options
defp maybe_put_verify_fun(options, pins) do
Keyword.put(
options,
:verify_fun,
{&verify_certificate/3, %{pins: MapSet.new(Enum.map(pins, & &1.value)), matched?: false}}
)
end
defp verify_certificate(_cert, :valid_peer, %{matched?: true} = state), do: {:valid, state}
defp verify_certificate(_cert, :valid_peer, _state), do: {:fail, :pin_mismatch}
defp verify_certificate(_cert, {:bad_cert, reason}, _state), do: {:fail, reason}
defp verify_certificate(cert, _event, state) when is_binary(cert) do
matched? = MapSet.member?(state.pins, spki_pin_from_verify(cert))
{:valid, %{state | matched?: state.matched? or matched?}}
rescue
_error -> {:fail, :invalid_certificate}
end
defp verify_certificate({:OTPCertificate, _tbs, _sig_alg, _sig} = cert, _event, state) do
matched? = MapSet.member?(state.pins, spki_pin_from_verify(cert))
{:valid, %{state | matched?: state.matched? or matched?}}
rescue
_error -> {:fail, :invalid_certificate}
end
defp verify_certificate({:Certificate, _tbs, _sig_alg, _sig} = cert, _event, state) do
matched? = MapSet.member?(state.pins, spki_pin_from_verify(cert))
{:valid, %{state | matched?: state.matched? or matched?}}
rescue
_error -> {:fail, :invalid_certificate}
end
defp verify_certificate(_cert, _event, state), do: {:valid, state}
defp spki_pin(cert_der) do
cert = :public_key.pkix_decode_cert(cert_der, :plain)
spki = cert |> elem(1) |> elem(7)
:public_key.der_encode(:SubjectPublicKeyInfo, spki)
|> then(&:crypto.hash(:sha256, &1))
|> Base.encode64()
end
defp spki_pin_from_verify(cert) when is_binary(cert), do: spki_pin(cert)
defp spki_pin_from_verify({:OTPCertificate, _tbs, _sig_alg, _sig} = cert) do
cert
|> then(&:public_key.pkix_encode(:OTPCertificate, &1, :otp))
|> spki_pin()
end
defp spki_pin_from_verify({:Certificate, _tbs, _sig_alg, _sig} = cert) do
cert
|> then(&:public_key.der_encode(:Certificate, &1))
|> spki_pin()
end
defp spki_pin_from_verify(_cert) do
raise(ArgumentError, "invalid certificate")
end
defp system_cacerts do
if function_exported?(:public_key, :cacerts_get, 0) do
:public_key.cacerts_get()
else
[]
end
end
end

View File

@@ -0,0 +1,7 @@
defmodule Parrhesia.Sync.Transport do
@moduledoc false
@callback connect(pid(), map(), keyword()) :: {:ok, pid()} | {:error, term()}
@callback send_json(pid(), term()) :: :ok | {:error, term()}
@callback close(pid()) :: :ok
end

View File

@@ -0,0 +1,93 @@
defmodule Parrhesia.Sync.Transport.WebSockexClient do
@moduledoc false
use WebSockex
alias Parrhesia.Sync.TLS
@behaviour Parrhesia.Sync.Transport
@impl true
def connect(owner, server, opts \\ []) do
state = %{
owner: owner,
server: server
}
transport_opts =
server.tls
|> TLS.websocket_options()
|> merge_websocket_opts(Keyword.get(opts, :websocket_opts, []))
|> Keyword.put(:handle_initial_conn_failure, true)
WebSockex.start(server.url, __MODULE__, state, transport_opts)
end
@impl true
def send_json(pid, payload) do
WebSockex.cast(pid, {:send_json, payload})
end
@impl true
def close(pid) do
WebSockex.cast(pid, :close)
:ok
end
@impl true
def handle_connect(conn, state) do
send(state.owner, {:sync_transport, self(), :connected, %{resp_headers: conn.resp_headers}})
{:ok, state}
end
@impl true
def handle_frame({:text, payload}, state) do
message =
case JSON.decode(payload) do
{:ok, frame} -> frame
{:error, reason} -> {:decode_error, reason, payload}
end
send(state.owner, {:sync_transport, self(), :frame, message})
{:ok, state}
end
def handle_frame(frame, state) do
send(state.owner, {:sync_transport, self(), :frame, frame})
{:ok, state}
end
@impl true
def handle_cast({:send_json, payload}, state) do
{:reply, {:text, JSON.encode!(payload)}, state}
end
def handle_cast(:close, state) do
{:close, state}
end
@impl true
def handle_disconnect(status, state) do
send(state.owner, {:sync_transport, self(), :disconnected, status})
{:ok, state}
end
defp merge_websocket_opts(base_opts, override_opts) do
override_ssl_options = Keyword.get(override_opts, :ssl_options)
merged_ssl_options =
case {Keyword.get(base_opts, :ssl_options), override_ssl_options} do
{nil, nil} -> nil
{base_ssl, nil} -> base_ssl
{nil, override_ssl} -> override_ssl
{base_ssl, override_ssl} -> Keyword.merge(base_ssl, override_ssl)
end
base_opts
|> Keyword.merge(Keyword.delete(override_opts, :ssl_options))
|> maybe_put_ssl_options(merged_ssl_options)
end
defp maybe_put_ssl_options(opts, nil), do: opts
defp maybe_put_ssl_options(opts, ssl_options), do: Keyword.put(opts, :ssl_options, ssl_options)
end

View File

@@ -0,0 +1,367 @@
defmodule Parrhesia.Sync.Worker do
@moduledoc false
use GenServer
alias Parrhesia.API.Events
alias Parrhesia.API.Identity
alias Parrhesia.API.RequestContext
alias Parrhesia.API.Sync.Manager
alias Parrhesia.Sync.RelayInfoClient
alias Parrhesia.Sync.Transport.WebSockexClient
@initial_backoff_ms 1_000
@max_backoff_ms 30_000
@auth_kind 22_242
defstruct server: nil,
manager: nil,
transport_module: WebSockexClient,
transport_pid: nil,
phase: :idle,
current_subscription_id: nil,
backoff_ms: @initial_backoff_ms,
authenticated?: false,
auth_event_id: nil,
resubscribe_after_auth?: false,
cursor_created_at: nil,
cursor_event_id: nil,
relay_info_opts: [],
transport_opts: []
@type t :: %__MODULE__{}
def child_spec(opts) do
server = Keyword.fetch!(opts, :server)
%{
id: {:sync_worker, server.id},
start: {__MODULE__, :start_link, [opts]},
restart: :transient
}
end
def start_link(opts) do
name = Keyword.get(opts, :name)
GenServer.start_link(__MODULE__, opts, name: name)
end
def sync_now(worker), do: GenServer.cast(worker, :sync_now)
def stop(worker), do: GenServer.stop(worker, :normal)
@impl true
def init(opts) do
server = Keyword.fetch!(opts, :server)
runtime = Keyword.get(opts, :runtime, %{})
state = %__MODULE__{
server: server,
manager: Keyword.fetch!(opts, :manager),
transport_module: Keyword.get(opts, :transport_module, WebSockexClient),
cursor_created_at: Map.get(runtime, :cursor_created_at),
cursor_event_id: Map.get(runtime, :cursor_event_id),
relay_info_opts: Keyword.get(opts, :relay_info_opts, []),
transport_opts: Keyword.get(opts, :transport_opts, [])
}
send(self(), :connect)
{:ok, state}
end
@impl true
def handle_cast(:sync_now, state) do
Manager.runtime_event(state.manager, state.server.id, :subscription_restart)
next_state =
state
|> close_subscription()
|> issue_subscription()
{:noreply, next_state}
end
@impl true
def handle_info(:connect, %__MODULE__{transport_pid: nil} = state) do
case RelayInfoClient.verify_remote_identity(state.server, state.relay_info_opts) do
:ok ->
connect_transport(state)
{:error, reason} ->
Manager.runtime_event(state.manager, state.server.id, :disconnected, %{reason: reason})
{:noreply, schedule_reconnect(state)}
end
end
def handle_info(:connect, state), do: {:noreply, state}
def handle_info({:sync_transport, transport_pid, :connected, _info}, state) do
Manager.runtime_event(state.manager, state.server.id, :connected, %{})
next_state =
state
|> Map.put(:transport_pid, transport_pid)
|> Map.put(:backoff_ms, @initial_backoff_ms)
|> Map.put(:authenticated?, false)
|> Map.put(:auth_event_id, nil)
|> Map.put(:resubscribe_after_auth?, false)
|> issue_subscription()
{:noreply, next_state}
end
def handle_info({:sync_transport, _transport_pid, :frame, frame}, state) do
{:noreply, handle_transport_frame(state, frame)}
end
def handle_info({:sync_transport, _transport_pid, :disconnected, status}, state) do
Manager.runtime_event(state.manager, state.server.id, :disconnected, %{reason: status.reason})
next_state =
state
|> Map.put(:transport_pid, nil)
|> Map.put(:phase, :idle)
|> Map.put(:authenticated?, false)
|> Map.put(:auth_event_id, nil)
|> Map.put(:resubscribe_after_auth?, false)
|> Map.put(:current_subscription_id, nil)
|> schedule_reconnect()
{:noreply, next_state}
end
def handle_info(_message, state), do: {:noreply, state}
defp connect_transport(state) do
case state.transport_module.connect(self(), state.server, state.transport_opts) do
{:ok, transport_pid} ->
{:noreply, %{state | transport_pid: transport_pid, phase: :connecting}}
{:error, reason} ->
Manager.runtime_event(state.manager, state.server.id, :disconnected, %{reason: reason})
{:noreply, schedule_reconnect(state)}
end
end
defp handle_transport_frame(state, ["AUTH", challenge]) when is_binary(challenge) do
case send_auth_event(state, challenge) do
{:ok, auth_event_id} ->
%{state | auth_event_id: auth_event_id, phase: :authenticating}
{:error, reason} ->
Manager.runtime_event(state.manager, state.server.id, :error, %{reason: reason})
state
end
end
defp handle_transport_frame(state, ["OK", event_id, true, _message])
when event_id == state.auth_event_id do
next_state = %{state | authenticated?: true, auth_event_id: nil}
if next_state.resubscribe_after_auth? do
next_state
|> Map.put(:resubscribe_after_auth?, false)
|> issue_subscription()
else
next_state
end
end
defp handle_transport_frame(state, ["OK", event_id, false, message])
when event_id == state.auth_event_id do
Manager.runtime_event(state.manager, state.server.id, :error, %{reason: message})
schedule_reconnect(%{state | auth_event_id: nil, authenticated?: false})
end
defp handle_transport_frame(state, ["EVENT", subscription_id, event])
when subscription_id == state.current_subscription_id and is_map(event) do
handle_remote_event(state, event)
end
defp handle_transport_frame(state, ["EOSE", subscription_id])
when subscription_id == state.current_subscription_id do
Manager.runtime_event(state.manager, state.server.id, :sync_completed, %{})
%{state | phase: :streaming}
end
defp handle_transport_frame(state, ["CLOSED", subscription_id, message])
when subscription_id == state.current_subscription_id do
auth_required? = is_binary(message) and String.contains?(String.downcase(message), "auth")
next_state =
state
|> Map.put(:current_subscription_id, nil)
|> Map.put(:phase, :idle)
if auth_required? and not state.authenticated? do
%{next_state | resubscribe_after_auth?: true}
else
Manager.runtime_event(state.manager, state.server.id, :error, %{reason: message})
schedule_reconnect(next_state)
end
end
defp handle_transport_frame(state, {:decode_error, reason, _payload}) do
Manager.runtime_event(state.manager, state.server.id, :error, %{reason: reason})
state
end
defp handle_transport_frame(state, _frame), do: state
defp issue_subscription(%__MODULE__{transport_pid: nil} = state), do: state
defp issue_subscription(state) do
subscription_id = subscription_id(state.server.id)
filters = sync_filters(state)
:ok =
state.transport_module.send_json(state.transport_pid, ["REQ", subscription_id | filters])
Manager.runtime_event(state.manager, state.server.id, :sync_started, %{})
%{
state
| current_subscription_id: subscription_id,
phase: :catchup
}
end
defp close_subscription(%__MODULE__{transport_pid: nil} = state), do: state
defp close_subscription(%__MODULE__{current_subscription_id: nil} = state), do: state
defp close_subscription(state) do
:ok =
state.transport_module.send_json(state.transport_pid, [
"CLOSE",
state.current_subscription_id
])
%{state | current_subscription_id: nil}
end
defp send_auth_event(state, challenge) do
event = %{
"created_at" => System.system_time(:second),
"kind" => @auth_kind,
"tags" => [["challenge", challenge], ["relay", state.server.url]],
"content" => ""
}
with {:ok, signed_event} <- Identity.sign_event(event) do
:ok = state.transport_module.send_json(state.transport_pid, ["AUTH", signed_event])
{:ok, signed_event["id"]}
end
end
defp handle_remote_event(state, event) do
context = request_context(state)
case Events.publish(event, context: context) do
{:ok, %{accepted: true}} ->
Manager.runtime_event(state.manager, state.server.id, :event_result, %{
result: :accepted,
event: event
})
advance_cursor(state, event)
{:ok, %{accepted: false, reason: :duplicate_event}} ->
Manager.runtime_event(state.manager, state.server.id, :event_result, %{
result: :duplicate,
event: event
})
advance_cursor(state, event)
{:ok, %{accepted: false, reason: reason}} ->
Manager.runtime_event(state.manager, state.server.id, :event_result, %{
result: :rejected,
event: event,
reason: reason
})
state
{:error, reason} ->
Manager.runtime_event(state.manager, state.server.id, :event_result, %{
result: :rejected,
event: event,
reason: reason
})
state
end
end
defp request_context(state) do
%RequestContext{
authenticated_pubkeys: MapSet.new([state.server.auth_pubkey]),
caller: :sync,
subscription_id: state.current_subscription_id,
peer_id: state.server.id,
metadata: %{
sync_server_id: state.server.id,
remote_url: state.server.url
}
}
end
defp advance_cursor(state, event) do
created_at = Map.get(event, "created_at")
event_id = Map.get(event, "id")
if newer_cursor?(state.cursor_created_at, state.cursor_event_id, created_at, event_id) do
Manager.runtime_event(state.manager, state.server.id, :cursor_advanced, %{
created_at: created_at,
event_id: event_id
})
%{state | cursor_created_at: created_at, cursor_event_id: event_id}
else
state
end
end
defp newer_cursor?(nil, _cursor_event_id, created_at, event_id),
do: is_integer(created_at) and is_binary(event_id)
defp newer_cursor?(cursor_created_at, cursor_event_id, created_at, event_id) do
cond do
not is_integer(created_at) or not is_binary(event_id) ->
false
created_at > cursor_created_at ->
true
created_at == cursor_created_at and is_binary(cursor_event_id) and
event_id > cursor_event_id ->
true
true ->
false
end
end
defp sync_filters(state) do
Enum.map(state.server.filters, fn filter ->
case since_value(state, filter) do
nil -> filter
since -> Map.put(filter, "since", since)
end
end)
end
defp since_value(%__MODULE__{cursor_created_at: nil}, _filter), do: nil
defp since_value(state, _filter) do
max(state.cursor_created_at - state.server.overlap_window_seconds, 0)
end
defp schedule_reconnect(state) do
Process.send_after(self(), :connect, state.backoff_ms)
%{state | backoff_ms: min(state.backoff_ms * 2, @max_backoff_ms)}
end
defp subscription_id(server_id) do
"sync-#{server_id}-#{System.unique_integer([:positive, :monotonic])}"
end
end

View File

@@ -12,6 +12,9 @@ defmodule Parrhesia.TestSupport.ExpirationStubEvents do
@impl true @impl true
def query(_context, _filters, _opts), do: {:ok, []} def query(_context, _filters, _opts), do: {:ok, []}
@impl true
def query_event_refs(_context, _filters, _opts), do: {:ok, []}
@impl true @impl true
def count(_context, _filters, _opts), do: {:ok, 0} def count(_context, _filters, _opts), do: {:ok, 0}

View File

@@ -12,6 +12,9 @@ defmodule Parrhesia.TestSupport.FailingEvents do
@impl true @impl true
def query(_context, _filters, _opts), do: {:error, :db_down} def query(_context, _filters, _opts), do: {:error, :db_down}
@impl true
def query_event_refs(_context, _filters, _opts), do: {:error, :db_down}
@impl true @impl true
def count(_context, _filters, _opts), do: {:error, :db_down} def count(_context, _filters, _opts), do: {:error, :db_down}

View File

@@ -21,6 +21,9 @@ defmodule Parrhesia.TestSupport.PermissiveModeration do
@impl true @impl true
def pubkey_allowed?(_context, _pubkey), do: {:ok, true} def pubkey_allowed?(_context, _pubkey), do: {:ok, true}
@impl true
def has_allowed_pubkeys?(_context), do: {:ok, false}
@impl true @impl true
def ban_event(_context, _event_id), do: :ok def ban_event(_context, _event_id), do: :ok

View File

@@ -0,0 +1,49 @@
defmodule Parrhesia.TestSupport.SyncFakeRelay.Plug do
@moduledoc false
import Plug.Conn
alias Parrhesia.TestSupport.SyncFakeRelay.Server
def init(opts), do: opts
def call(conn, opts) do
server = Keyword.fetch!(opts, :server)
cond do
conn.request_path == "/relay" and wants_nip11?(conn) ->
send_json(conn, 200, Server.document(server))
conn.request_path == "/relay" ->
conn
|> WebSockAdapter.upgrade(
Parrhesia.TestSupport.SyncFakeRelay.Socket,
%{server: server, relay_url: relay_url(conn)},
timeout: 60_000
)
|> halt()
true ->
send_resp(conn, 404, "not found")
end
end
defp wants_nip11?(conn) do
conn
|> get_req_header("accept")
|> Enum.any?(&String.contains?(&1, "application/nostr+json"))
end
defp send_json(conn, status, body) do
encoded = JSON.encode!(body)
conn
|> put_resp_content_type("application/nostr+json")
|> send_resp(status, encoded)
end
defp relay_url(conn) do
scheme = if conn.scheme == :https, do: "wss", else: "ws"
"#{scheme}://#{conn.host}:#{conn.port}#{conn.request_path}"
end
end

View File

@@ -0,0 +1,65 @@
defmodule Parrhesia.TestSupport.SyncFakeRelay.Server do
@moduledoc false
use Agent
def start_link(opts) do
name = Keyword.fetch!(opts, :name)
initial_state = %{
pubkey: Keyword.fetch!(opts, :pubkey),
expected_client_pubkey: Keyword.fetch!(opts, :expected_client_pubkey),
initial_events: Keyword.get(opts, :initial_events, []),
subscribers: %{}
}
Agent.start_link(fn -> initial_state end, name: name)
end
def document(server) do
Agent.get(server, fn state ->
%{
"name" => "Sync Fake Relay",
"description" => "test relay",
"pubkey" => state.pubkey,
"supported_nips" => [1, 11, 42]
}
end)
end
def initial_events(server) do
Agent.get(server, & &1.initial_events)
end
def expected_client_pubkey(server) do
Agent.get(server, & &1.expected_client_pubkey)
end
def register_subscription(server, pid, subscription_id) do
Agent.update(server, fn state ->
put_in(state, [:subscribers, {pid, subscription_id}], true)
end)
end
def unregister_subscription(server, pid, subscription_id) do
Agent.update(server, fn state ->
update_in(state.subscribers, &Map.delete(&1, {pid, subscription_id}))
end)
end
def publish_live_event(server, event) do
subscribers =
Agent.get_and_update(server, fn state ->
{
Map.keys(state.subscribers),
%{state | initial_events: state.initial_events ++ [event]}
}
end)
Enum.each(subscribers, fn {pid, subscription_id} ->
send(pid, {:sync_fake_relay_event, subscription_id, event})
end)
:ok
end
end

View File

@@ -0,0 +1,118 @@
defmodule Parrhesia.TestSupport.SyncFakeRelay.Socket do
@moduledoc false
@behaviour WebSock
alias Parrhesia.TestSupport.SyncFakeRelay.Server
def init(state), do: {:ok, Map.put(state, :authenticated?, false)}
def handle_in({payload, [opcode: :text]}, state) do
case JSON.decode(payload) do
{:ok, ["REQ", subscription_id | _filters]} ->
maybe_authorize_req(state, subscription_id)
{:ok, ["AUTH", auth_event]} when is_map(auth_event) ->
handle_auth(auth_event, state)
{:ok, ["CLOSE", subscription_id]} ->
Server.unregister_subscription(state.server, self(), subscription_id)
{:push, {:text, JSON.encode!(["CLOSED", subscription_id, "error: subscription closed"])},
state}
_other ->
{:ok, state}
end
end
def handle_in(_frame, state), do: {:ok, state}
def handle_info({:sync_fake_relay_event, subscription_id, event}, state) do
{:push, {:text, JSON.encode!(["EVENT", subscription_id, event])}, state}
end
def handle_info(_message, state), do: {:ok, state}
def terminate(_reason, state) do
Enum.each(Map.get(state, :subscriptions, []), fn subscription_id ->
Server.unregister_subscription(state.server, self(), subscription_id)
end)
:ok
end
defp maybe_authorize_req(%{authenticated?: true} = state, subscription_id) do
Server.register_subscription(state.server, self(), subscription_id)
frames =
Server.initial_events(state.server)
|> Enum.map(fn event -> {:text, JSON.encode!(["EVENT", subscription_id, event])} end)
|> Kernel.++([{:text, JSON.encode!(["EOSE", subscription_id])}])
next_state =
state
|> Map.update(:subscriptions, [subscription_id], &[subscription_id | &1])
{:push, frames, next_state}
end
defp maybe_authorize_req(state, subscription_id) do
challenge = Base.encode16(:crypto.strong_rand_bytes(12), case: :lower)
next_state =
state
|> Map.put(:challenge, challenge)
|> Map.put(:pending_subscription_id, subscription_id)
{:push,
[
{:text, JSON.encode!(["AUTH", challenge])},
{:text,
JSON.encode!(["CLOSED", subscription_id, "auth-required: sync access requires AUTH"])}
], next_state}
end
defp handle_auth(auth_event, state) do
challenge_ok? = has_tag?(auth_event, "challenge", state.challenge)
relay_ok? = has_tag?(auth_event, "relay", state.relay_url)
pubkey_ok? = Map.get(auth_event, "pubkey") == Server.expected_client_pubkey(state.server)
if challenge_ok? and relay_ok? and pubkey_ok? do
accepted_state = %{state | authenticated?: true}
ok_frame = ["OK", Map.get(auth_event, "id"), true, "ok: auth accepted"]
if subscription_id = Map.get(accepted_state, :pending_subscription_id) do
next_state =
accepted_state
|> Map.delete(:pending_subscription_id)
|> Map.update(:subscriptions, [subscription_id], &[subscription_id | &1])
Server.register_subscription(state.server, self(), subscription_id)
{:push,
[{:text, JSON.encode!(ok_frame)} | auth_success_frames(accepted_state, subscription_id)],
next_state}
else
{:push, {:text, JSON.encode!(ok_frame)}, accepted_state}
end
else
{:push,
{:text, JSON.encode!(["OK", Map.get(auth_event, "id"), false, "invalid: auth rejected"])},
state}
end
end
defp auth_success_frames(state, subscription_id) do
Server.initial_events(state.server)
|> Enum.map(fn event -> {:text, JSON.encode!(["EVENT", subscription_id, event])} end)
|> Kernel.++([{:text, JSON.encode!(["EOSE", subscription_id])}])
end
defp has_tag?(event, name, expected_value) do
Enum.any?(Map.get(event, "tags", []), fn
[^name, ^expected_value | _rest] -> true
_other -> false
end)
end
end

View File

@@ -0,0 +1,145 @@
defmodule Parrhesia.TestSupport.TLSCerts do
@moduledoc false
@spec create_ca!(String.t(), String.t()) :: map()
def create_ca!(dir, name) do
keyfile = Path.join(dir, "#{name}-ca.key.pem")
certfile = Path.join(dir, "#{name}-ca.cert.pem")
openssl!([
"req",
"-x509",
"-newkey",
"rsa:2048",
"-nodes",
"-sha256",
"-days",
"2",
"-subj",
"/CN=#{name} Test CA",
"-keyout",
keyfile,
"-out",
certfile
])
%{keyfile: keyfile, certfile: certfile}
end
@spec issue_server_cert!(String.t(), map(), String.t()) :: map()
def issue_server_cert!(dir, ca, name) do
issue_cert!(
dir,
ca,
name,
"localhost",
["DNS:localhost", "IP:127.0.0.1"],
"serverAuth"
)
end
@spec issue_client_cert!(String.t(), map(), String.t()) :: map()
def issue_client_cert!(dir, ca, name) do
issue_cert!(dir, ca, name, name, [], "clientAuth")
end
@spec spki_pin!(String.t()) :: String.t()
def spki_pin!(certfile) do
certfile
|> der_cert!()
|> spki_pin()
end
@spec cert_sha256!(String.t()) :: String.t()
def cert_sha256!(certfile) do
certfile
|> der_cert!()
|> then(&Base.encode64(:crypto.hash(:sha256, &1)))
end
defp issue_cert!(dir, ca, name, common_name, san_entries, extended_key_usage) do
keyfile = Path.join(dir, "#{name}.key.pem")
csrfile = Path.join(dir, "#{name}.csr.pem")
certfile = Path.join(dir, "#{name}.cert.pem")
extfile = Path.join(dir, "#{name}.ext.cnf")
openssl!([
"req",
"-new",
"-newkey",
"rsa:2048",
"-nodes",
"-subj",
"/CN=#{common_name}",
"-keyout",
keyfile,
"-out",
csrfile
])
File.write!(extfile, extension_config(san_entries, extended_key_usage))
openssl!([
"x509",
"-req",
"-in",
csrfile,
"-CA",
ca.certfile,
"-CAkey",
ca.keyfile,
"-CAcreateserial",
"-out",
certfile,
"-days",
"2",
"-sha256",
"-extfile",
extfile,
"-extensions",
"v3_req"
])
%{keyfile: keyfile, certfile: certfile}
end
defp extension_config(san_entries, extended_key_usage) do
san_block =
case san_entries do
[] -> ""
entries -> "subjectAltName = #{Enum.join(entries, ",")}\n"
end
"""
[v3_req]
basicConstraints = CA:FALSE
keyUsage = digitalSignature,keyEncipherment
extendedKeyUsage = #{extended_key_usage}
#{san_block}
"""
end
defp der_cert!(certfile) do
certfile
|> File.read!()
|> :public_key.pem_decode()
|> List.first()
|> elem(1)
end
defp spki_pin(cert_der) do
cert = :public_key.pkix_decode_cert(cert_der, :plain)
spki = cert |> elem(1) |> elem(7)
:public_key.der_encode(:SubjectPublicKeyInfo, spki)
|> then(&:crypto.hash(:sha256, &1))
|> Base.encode64()
end
defp openssl!(args) do
case System.cmd("/usr/bin/openssl", args, stderr_to_stdout: true) do
{output, 0} -> output
{output, status} -> raise "openssl failed with status #{status}: #{output}"
end
end
end

File diff suppressed because it is too large Load Diff

View File

@@ -1,29 +1,74 @@
defmodule Parrhesia.Web.Endpoint do defmodule Parrhesia.Web.Endpoint do
@moduledoc """ @moduledoc """
Supervision entrypoint for WS/HTTP ingress. Supervision entrypoint for configured ingress listeners.
""" """
use Supervisor use Supervisor
def start_link(init_arg \\ []) do alias Parrhesia.Web.Listener
Supervisor.start_link(__MODULE__, init_arg, name: __MODULE__)
def start_link(opts \\ []) do
name = Keyword.get(opts, :name, __MODULE__)
listeners = Keyword.get(opts, :listeners, :configured)
Supervisor.start_link(__MODULE__, listeners, name: name)
end
@spec reload_listener(Supervisor.supervisor(), atom()) :: :ok | {:error, term()}
def reload_listener(supervisor \\ __MODULE__, listener_id) when is_atom(listener_id) do
with :ok <- Supervisor.terminate_child(supervisor, {:listener, listener_id}),
{:ok, _pid} <- Supervisor.restart_child(supervisor, {:listener, listener_id}) do
:ok
else
{:error, :not_found} = error -> error
{:error, _reason} = error -> error
other -> other
end
end
@spec reload_all(Supervisor.supervisor()) :: :ok | {:error, term()}
def reload_all(supervisor \\ __MODULE__) do
supervisor
|> Supervisor.which_children()
|> Enum.filter(fn {id, _pid, _type, _modules} ->
match?({:listener, _listener_id}, id)
end)
|> Enum.reduce_while(:ok, fn {{:listener, listener_id}, _pid, _type, _modules}, :ok ->
case reload_listener(supervisor, listener_id) do
:ok -> {:cont, :ok}
{:error, _reason} = error -> {:halt, error}
end
end)
end end
@impl true @impl true
def init(init_arg) do def init(listeners) do
children = [ children =
{Bandit, bandit_options(init_arg)} listeners(listeners)
] |> Enum.map(fn listener ->
%{
id: {:listener, listener.id},
start: {Bandit, :start_link, [Listener.bandit_options(listener)]}
}
end)
Supervisor.init(children, strategy: :one_for_one) Supervisor.init(children, strategy: :one_for_one)
end end
defp bandit_options(overrides) do defp listeners(:configured), do: Listener.all()
configured = Application.get_env(:parrhesia, __MODULE__, [])
configured defp listeners(listeners) when is_list(listeners) do
|> Keyword.merge(overrides) Enum.map(listeners, fn
|> Keyword.put_new(:scheme, :http) {id, listener} when is_atom(id) and is_map(listener) ->
|> Keyword.put_new(:plug, Parrhesia.Web.Router) Listener.from_opts(listener: Map.put_new(listener, :id, id))
listener ->
Listener.from_opts(listener: listener)
end)
end
defp listeners(listeners) when is_map(listeners) do
listeners
|> Enum.map(fn {id, listener} -> {id, listener} end)
|> listeners()
end end
end end

View File

@@ -0,0 +1,662 @@
defmodule Parrhesia.Web.Listener do
@moduledoc false
import Bitwise
alias Parrhesia.Protocol.Filter
alias Parrhesia.Web.TLS
@private_cidrs [
"127.0.0.0/8",
"10.0.0.0/8",
"172.16.0.0/12",
"192.168.0.0/16",
"169.254.0.0/16",
"::1/128",
"fc00::/7",
"fe80::/10"
]
@type t :: %{
id: atom(),
enabled: boolean(),
bind: %{ip: tuple(), port: pos_integer()},
transport: map(),
proxy: map(),
network: map(),
features: map(),
auth: map(),
baseline_acl: map(),
bandit_options: keyword()
}
@spec all() :: [t()]
def all do
:parrhesia
|> Application.get_env(:listeners, %{})
|> normalize_listeners()
|> Enum.filter(& &1.enabled)
end
@spec from_opts(keyword() | map()) :: t()
def from_opts(opts) when is_list(opts) do
opts
|> Keyword.get(:listener, default_listener())
|> normalize_listener()
end
def from_opts(opts) when is_map(opts) do
opts
|> Map.get(:listener, default_listener())
|> normalize_listener()
end
@spec from_conn(Plug.Conn.t()) :: t()
def from_conn(conn) do
conn.private
|> Map.get(:parrhesia_listener, default_listener())
|> normalize_listener()
end
@spec put_conn(Plug.Conn.t(), keyword()) :: Plug.Conn.t()
def put_conn(conn, opts) when is_list(opts) do
Plug.Conn.put_private(conn, :parrhesia_listener, from_opts(opts))
end
@spec feature_enabled?(t(), atom()) :: boolean()
def feature_enabled?(listener, feature) when is_map(listener) and is_atom(feature) do
listener
|> Map.get(:features, %{})
|> Map.get(feature, %{})
|> Map.get(:enabled, false)
end
@spec nip42_required?(t()) :: boolean()
def nip42_required?(listener), do: listener.auth.nip42_required
@spec admin_auth_required?(t()) :: boolean()
def admin_auth_required?(listener), do: listener.auth.nip98_required_for_admin
@spec trusted_proxies(t()) :: [String.t()]
def trusted_proxies(listener) do
listener.proxy.trusted_cidrs
end
@spec trusted_proxy_request?(t(), Plug.Conn.t()) :: boolean()
def trusted_proxy_request?(listener, conn) do
TLS.trusted_proxy_request?(conn, trusted_proxies(listener))
end
@spec remote_ip_allowed?(t(), tuple() | String.t() | nil) :: boolean()
def remote_ip_allowed?(listener, remote_ip) do
access_allowed?(listener.network, remote_ip)
end
@spec authorize_transport_request(t(), Plug.Conn.t()) ::
{:ok, map() | nil} | {:error, atom()}
def authorize_transport_request(listener, conn) do
TLS.authorize_request(listener.transport.tls, conn, trusted_proxy_request?(listener, conn))
end
@spec request_scheme(t(), Plug.Conn.t()) :: :http | :https
def request_scheme(listener, conn) do
TLS.request_scheme(listener.transport.tls, conn, trusted_proxy_request?(listener, conn))
end
@spec request_host(t(), Plug.Conn.t()) :: String.t()
def request_host(listener, conn) do
TLS.request_host(conn, trusted_proxy_request?(listener, conn))
end
@spec request_port(t(), Plug.Conn.t()) :: non_neg_integer()
def request_port(listener, conn) do
scheme = request_scheme(listener, conn)
TLS.request_port(listener.transport.tls, conn, trusted_proxy_request?(listener, conn), scheme)
end
@spec metrics_allowed?(t(), Plug.Conn.t()) :: boolean()
def metrics_allowed?(listener, conn) do
metrics = Map.get(listener.features, :metrics, %{})
feature_enabled?(listener, :metrics) and
access_allowed?(Map.get(metrics, :access, %{}), conn.remote_ip) and
metrics_token_allowed?(metrics, conn)
end
@spec relay_url(t(), Plug.Conn.t()) :: String.t()
def relay_url(listener, conn) do
scheme = request_scheme(listener, conn)
host = request_host(listener, conn)
port = request_port(listener, conn)
ws_scheme = if scheme == :https, do: "wss", else: "ws"
port_segment =
if default_http_port?(scheme, port) do
""
else
":#{port}"
end
"#{ws_scheme}://#{host}#{port_segment}#{conn.request_path}"
end
@spec relay_auth_required?(t()) :: boolean()
def relay_auth_required?(listener), do: listener.auth.nip42_required
@spec authorize_read(t(), [map()]) :: :ok | {:error, :listener_read_not_allowed}
def authorize_read(listener, filters) when is_list(filters) do
case evaluate_rules(listener.baseline_acl.read, filters, :read) do
:allow -> :ok
:deny -> {:error, :listener_read_not_allowed}
end
end
@spec authorize_write(t(), map()) :: :ok | {:error, :listener_write_not_allowed}
def authorize_write(listener, event) when is_map(event) do
case evaluate_rules(listener.baseline_acl.write, event, :write) do
:allow -> :ok
:deny -> {:error, :listener_write_not_allowed}
end
end
@spec bandit_options(t()) :: keyword()
def bandit_options(listener) do
scheme =
case listener.transport.tls.mode do
mode when mode in [:server, :mutual] -> :https
_other -> listener.transport.scheme
end
[
ip: listener.bind.ip,
port: listener.bind.port,
scheme: scheme,
plug: {Parrhesia.Web.ListenerPlug, listener: listener}
] ++ TLS.bandit_options(listener.transport.tls) ++ listener.bandit_options
end
defp normalize_listeners(listeners) when is_list(listeners) do
Enum.map(listeners, fn
{id, listener} when is_atom(id) and is_map(listener) ->
normalize_listener(Map.put(listener, :id, id))
listener when is_map(listener) ->
normalize_listener(listener)
end)
end
defp normalize_listeners(listeners) when is_map(listeners) do
listeners
|> Enum.map(fn {id, listener} -> normalize_listener(Map.put(listener, :id, id)) end)
|> Enum.sort_by(& &1.id)
end
defp normalize_listener(listener) when is_map(listener) do
id = normalize_atom(fetch_value(listener, :id), :listener)
enabled = normalize_boolean(fetch_value(listener, :enabled), true)
bind = normalize_bind(fetch_value(listener, :bind), listener)
transport = normalize_transport(fetch_value(listener, :transport))
proxy = normalize_proxy(fetch_value(listener, :proxy))
network = normalize_access(fetch_value(listener, :network), %{allow_all?: true})
features = normalize_features(fetch_value(listener, :features))
auth = normalize_auth(fetch_value(listener, :auth))
baseline_acl = normalize_baseline_acl(fetch_value(listener, :baseline_acl))
bandit_options = normalize_bandit_options(fetch_value(listener, :bandit_options))
%{
id: id,
enabled: enabled,
bind: bind,
transport: transport,
proxy: proxy,
network: network,
features: features,
auth: auth,
baseline_acl: baseline_acl,
bandit_options: bandit_options
}
end
defp normalize_listener(_listener), do: default_listener()
defp normalize_bind(bind, listener) when is_map(bind) do
%{
ip: normalize_ip(fetch_value(bind, :ip), default_bind_ip(listener)),
port: normalize_port(fetch_value(bind, :port), 4413)
}
end
defp normalize_bind(_bind, listener) do
%{
ip: default_bind_ip(listener),
port: normalize_port(fetch_value(listener, :port), 4413)
}
end
defp default_bind_ip(listener) do
normalize_ip(fetch_value(listener, :ip), {0, 0, 0, 0})
end
defp normalize_transport(transport) when is_map(transport) do
scheme = normalize_scheme(fetch_value(transport, :scheme), :http)
%{
scheme: scheme,
tls: TLS.normalize_config(fetch_value(transport, :tls), scheme)
}
end
defp normalize_transport(_transport), do: %{scheme: :http, tls: TLS.default_config()}
defp normalize_proxy(proxy) when is_map(proxy) do
%{
trusted_cidrs: normalize_string_list(fetch_value(proxy, :trusted_cidrs)),
honor_x_forwarded_for: normalize_boolean(fetch_value(proxy, :honor_x_forwarded_for), true)
}
end
defp normalize_proxy(_proxy), do: %{trusted_cidrs: [], honor_x_forwarded_for: true}
defp normalize_features(features) when is_map(features) do
%{
nostr: normalize_simple_feature(fetch_value(features, :nostr), true),
admin: normalize_simple_feature(fetch_value(features, :admin), true),
metrics: normalize_metrics_feature(fetch_value(features, :metrics))
}
end
defp normalize_features(_features) do
%{
nostr: %{enabled: true},
admin: %{enabled: true},
metrics: %{enabled: false, access: default_feature_access()}
}
end
defp normalize_simple_feature(feature, default_enabled) when is_map(feature) do
%{enabled: normalize_boolean(fetch_value(feature, :enabled), default_enabled)}
end
defp normalize_simple_feature(feature, _default_enabled) when is_boolean(feature) do
%{enabled: feature}
end
defp normalize_simple_feature(_feature, default_enabled), do: %{enabled: default_enabled}
defp normalize_metrics_feature(feature) when is_map(feature) do
%{
enabled: normalize_boolean(fetch_value(feature, :enabled), false),
auth_token: normalize_optional_string(fetch_value(feature, :auth_token)),
access:
normalize_access(fetch_value(feature, :access), %{
private_networks_only?: false,
allow_all?: true
})
}
end
defp normalize_metrics_feature(feature) when is_boolean(feature) do
%{enabled: feature, auth_token: nil, access: default_feature_access()}
end
defp normalize_metrics_feature(_feature),
do: %{enabled: false, auth_token: nil, access: default_feature_access()}
defp default_feature_access do
%{public?: false, private_networks_only?: false, allow_cidrs: [], allow_all?: true}
end
defp normalize_auth(auth) when is_map(auth) do
%{
nip42_required: normalize_boolean(fetch_value(auth, :nip42_required), false),
nip98_required_for_admin:
normalize_boolean(fetch_value(auth, :nip98_required_for_admin), true)
}
end
defp normalize_auth(_auth), do: %{nip42_required: false, nip98_required_for_admin: true}
defp normalize_baseline_acl(acl) when is_map(acl) do
%{
read: normalize_baseline_rules(fetch_value(acl, :read)),
write: normalize_baseline_rules(fetch_value(acl, :write))
}
end
defp normalize_baseline_acl(_acl), do: %{read: [], write: []}
defp normalize_baseline_rules(rules) when is_list(rules) do
Enum.flat_map(rules, fn
%{match: match} = rule when is_map(match) ->
[
%{
action: normalize_rule_action(fetch_value(rule, :action)),
match: normalize_filter_map(match)
}
]
_other ->
[]
end)
end
defp normalize_baseline_rules(_rules), do: []
defp normalize_rule_action(:deny), do: :deny
defp normalize_rule_action("deny"), do: :deny
defp normalize_rule_action(_action), do: :allow
defp normalize_bandit_options(options) when is_list(options), do: options
defp normalize_bandit_options(_options), do: []
defp normalize_access(access, defaults) when is_map(access) do
%{
public?:
normalize_boolean(
first_present(access, [:public, :public?]),
Map.get(defaults, :public?, false)
),
private_networks_only?:
normalize_boolean(
first_present(access, [:private_networks_only, :private_networks_only?]),
Map.get(defaults, :private_networks_only?, false)
),
allow_cidrs: normalize_string_list(fetch_value(access, :allow_cidrs)),
allow_all?:
normalize_boolean(
first_present(access, [:allow_all, :allow_all?]),
Map.get(defaults, :allow_all?, false)
)
}
end
defp normalize_access(_access, defaults) do
%{
public?: Map.get(defaults, :public?, false),
private_networks_only?: Map.get(defaults, :private_networks_only?, false),
allow_cidrs: [],
allow_all?: Map.get(defaults, :allow_all?, false)
}
end
defp access_allowed?(%{public?: true}, _remote_ip), do: true
defp access_allowed?(%{allow_cidrs: allow_cidrs}, remote_ip) when allow_cidrs != [] do
Enum.any?(allow_cidrs, &ip_in_cidr?(remote_ip, &1))
end
defp access_allowed?(%{private_networks_only?: true}, remote_ip) do
Enum.any?(@private_cidrs, &ip_in_cidr?(remote_ip, &1))
end
defp access_allowed?(%{allow_all?: allow_all?}, _remote_ip), do: allow_all?
defp metrics_token_allowed?(metrics, conn) do
case metrics.auth_token do
nil ->
true
token ->
conn
|> Plug.Conn.get_req_header("authorization")
|> List.first()
|> normalize_authorization_header()
|> Kernel.==(token)
end
end
defp normalize_authorization_header("Bearer " <> token), do: token
defp normalize_authorization_header(token) when is_binary(token), do: token
defp normalize_authorization_header(_header), do: nil
defp evaluate_rules([], _subject, _mode), do: :allow
defp evaluate_rules(rules, subject, mode) do
has_allow_rules? = Enum.any?(rules, &(&1.action == :allow))
case Enum.find(rules, &rule_matches?(&1, subject, mode)) do
%{action: :deny} -> :deny
%{action: :allow} -> :allow
nil when has_allow_rules? -> :deny
nil -> :allow
end
end
defp rule_matches?(rule, filters, :read) when is_list(filters) do
Enum.any?(filters, &filters_overlap?(&1, rule.match))
end
defp rule_matches?(rule, event, :write) when is_map(event) do
Filter.matches_filter?(event, rule.match)
end
defp rule_matches?(_rule, _subject, _mode), do: false
defp filters_overlap?(left, right) when is_map(left) and is_map(right) do
comparable_keys =
left
|> Map.keys()
|> Kernel.++(Map.keys(right))
|> Enum.uniq()
|> Enum.reject(&(&1 in ["limit", "search", "since", "until"]))
Enum.all?(comparable_keys, fn key ->
filter_constraint_compatible?(Map.get(left, key), Map.get(right, key))
end) and filter_ranges_overlap?(left, right)
end
defp filter_constraint_compatible?(nil, _right), do: true
defp filter_constraint_compatible?(_left, nil), do: true
defp filter_constraint_compatible?(left, right) when is_list(left) and is_list(right) do
not MapSet.disjoint?(MapSet.new(left), MapSet.new(right))
end
defp filter_constraint_compatible?(left, right), do: left == right
defp filter_ranges_overlap?(left, right) do
since = max(Map.get(left, "since", 0), Map.get(right, "since", 0))
until =
min(
Map.get(left, "until", 9_223_372_036_854_775_807),
Map.get(right, "until", 9_223_372_036_854_775_807)
)
since <= until
end
defp default_listener do
case configured_default_listener() do
nil -> fallback_listener()
listener -> normalize_listener(listener)
end
end
defp configured_default_listener do
listeners = Application.get_env(:parrhesia, :listeners, %{})
case fetch_public_listener(listeners) do
nil -> first_configured_listener(listeners)
listener -> listener
end
end
defp fetch_public_listener(%{public: listener}) when is_map(listener),
do: Map.put_new(listener, :id, :public)
defp fetch_public_listener(listeners) when is_list(listeners) do
case Keyword.fetch(listeners, :public) do
{:ok, listener} when is_map(listener) -> Map.put_new(listener, :id, :public)
_other -> nil
end
end
defp fetch_public_listener(_listeners), do: nil
defp first_configured_listener(listeners) when is_list(listeners) do
case listeners do
[{id, listener} | _rest] when is_atom(id) and is_map(listener) ->
Map.put_new(listener, :id, id)
_other ->
nil
end
end
defp first_configured_listener(listeners) when is_map(listeners) and map_size(listeners) > 0 do
{id, listener} = Enum.at(Enum.sort_by(listeners, fn {key, _value} -> key end), 0)
Map.put_new(listener, :id, id)
end
defp first_configured_listener(_listeners), do: nil
defp fallback_listener do
%{
id: :public,
enabled: true,
bind: %{ip: {0, 0, 0, 0}, port: 4413},
transport: %{scheme: :http, tls: TLS.default_config()},
proxy: %{trusted_cidrs: [], honor_x_forwarded_for: true},
network: %{public?: false, private_networks_only?: false, allow_cidrs: [], allow_all?: true},
features: %{
nostr: %{enabled: true},
admin: %{enabled: true},
metrics: %{enabled: false, auth_token: nil, access: default_feature_access()}
},
auth: %{nip42_required: false, nip98_required_for_admin: true},
baseline_acl: %{read: [], write: []},
bandit_options: []
}
end
defp fetch_value(map, key) when is_map(map) do
cond do
Map.has_key?(map, key) ->
Map.get(map, key)
is_atom(key) and Map.has_key?(map, Atom.to_string(key)) ->
Map.get(map, Atom.to_string(key))
true ->
nil
end
end
defp first_present(map, keys) do
Enum.find_value(keys, fn key ->
cond do
Map.has_key?(map, key) ->
{:present, Map.get(map, key)}
is_atom(key) and Map.has_key?(map, Atom.to_string(key)) ->
{:present, Map.get(map, Atom.to_string(key))}
true ->
nil
end
end)
|> case do
{:present, value} -> value
nil -> nil
end
end
defp normalize_boolean(value, _default) when is_boolean(value), do: value
defp normalize_boolean(nil, default), do: default
defp normalize_boolean(_value, default), do: default
defp normalize_optional_string(value) when is_binary(value) and value != "", do: value
defp normalize_optional_string(_value), do: nil
defp normalize_string_list(values) when is_list(values) do
Enum.filter(values, &(is_binary(&1) and &1 != ""))
end
defp normalize_string_list(_values), do: []
defp normalize_ip({_, _, _, _} = ip, _default), do: ip
defp normalize_ip({_, _, _, _, _, _, _, _} = ip, _default), do: ip
defp normalize_ip(_ip, default), do: default
defp normalize_port(port, _default) when is_integer(port) and port > 0, do: port
defp normalize_port(0, _default), do: 0
defp normalize_port(_port, default), do: default
defp normalize_scheme(:https, _default), do: :https
defp normalize_scheme("https", _default), do: :https
defp normalize_scheme(_scheme, default), do: default
defp normalize_atom(value, _default) when is_atom(value), do: value
defp normalize_atom(_value, default), do: default
defp normalize_filter_map(filter) when is_map(filter) do
Map.new(filter, fn
{key, value} when is_atom(key) -> {Atom.to_string(key), value}
{key, value} -> {key, value}
end)
end
defp normalize_filter_map(filter), do: filter
defp default_http_port?(:http, 80), do: true
defp default_http_port?(:https, 443), do: true
defp default_http_port?(_scheme, _port), do: false
defp ip_in_cidr?(ip, cidr) do
with {network, prefix_len} <- parse_cidr(cidr),
{:ok, ip_size, ip_value} <- ip_to_int(ip),
{:ok, network_size, network_value} <- ip_to_int(network),
true <- ip_size == network_size,
true <- prefix_len >= 0,
true <- prefix_len <= ip_size do
mask = network_mask(ip_size, prefix_len)
(ip_value &&& mask) == (network_value &&& mask)
else
_other -> false
end
end
defp parse_cidr(cidr) when is_binary(cidr) do
case String.split(cidr, "/", parts: 2) do
[address, prefix_str] ->
with {prefix_len, ""} <- Integer.parse(prefix_str),
{:ok, ip} <- :inet.parse_address(String.to_charlist(address)) do
{ip, prefix_len}
else
_other -> :error
end
[address] ->
case :inet.parse_address(String.to_charlist(address)) do
{:ok, {_, _, _, _} = ip} -> {ip, 32}
{:ok, {_, _, _, _, _, _, _, _} = ip} -> {ip, 128}
_other -> :error
end
_other ->
:error
end
end
defp parse_cidr(_cidr), do: :error
defp ip_to_int({a, b, c, d}) do
{:ok, 32, (a <<< 24) + (b <<< 16) + (c <<< 8) + d}
end
defp ip_to_int({a, b, c, d, e, f, g, h}) do
{:ok, 128,
(a <<< 112) + (b <<< 96) + (c <<< 80) + (d <<< 64) + (e <<< 48) + (f <<< 32) + (g <<< 16) +
h}
end
defp ip_to_int(_ip), do: :error
defp network_mask(_size, 0), do: 0
defp network_mask(size, prefix_len) do
all_ones = (1 <<< size) - 1
all_ones <<< (size - prefix_len)
end
end

View File

@@ -0,0 +1,14 @@
defmodule Parrhesia.Web.ListenerPlug do
@moduledoc false
alias Parrhesia.Web.Listener
alias Parrhesia.Web.Router
def init(opts), do: opts
def call(conn, opts) do
conn
|> Listener.put_conn(opts)
|> Router.call([])
end
end

View File

@@ -5,19 +5,22 @@ defmodule Parrhesia.Web.Management do
import Plug.Conn import Plug.Conn
alias Parrhesia.Auth.Nip98 alias Parrhesia.API.Admin
alias Parrhesia.Storage alias Parrhesia.API.Auth
alias Parrhesia.Web.Listener
@spec handle(Plug.Conn.t()) :: Plug.Conn.t() @spec handle(Plug.Conn.t(), keyword()) :: Plug.Conn.t()
def handle(conn) do def handle(conn, opts \\ []) do
full_url = full_request_url(conn) full_url = full_request_url(conn)
method = conn.method method = conn.method
authorization = get_req_header(conn, "authorization") |> List.first() authorization = get_req_header(conn, "authorization") |> List.first()
auth_required? = admin_auth_required?(opts)
with {:ok, auth_event} <- Nip98.validate_authorization_header(authorization, method, full_url), with {:ok, auth_context} <-
maybe_validate_nip98(auth_required?, authorization, method, full_url),
{:ok, payload} <- parse_payload(conn.body_params), {:ok, payload} <- parse_payload(conn.body_params),
{:ok, result} <- execute_method(payload), {:ok, result} <- execute_method(payload, opts),
:ok <- append_audit_log(auth_event, payload, result) do :ok <- append_audit_log(auth_context, payload, result) do
send_json(conn, 200, %{"ok" => true, "result" => result}) send_json(conn, 200, %{"ok" => true, "result" => result})
else else
{:error, :missing_authorization} -> {:error, :missing_authorization} ->
@@ -46,6 +49,14 @@ defmodule Parrhesia.Web.Management do
end end
end end
defp maybe_validate_nip98(true, authorization, method, url) do
Auth.validate_nip98(authorization, method, url)
end
defp maybe_validate_nip98(false, _authorization, _method, _url) do
{:ok, %{pubkey: nil}}
end
defp parse_payload(%{"method" => method} = payload) when is_binary(method) do defp parse_payload(%{"method" => method} = payload) when is_binary(method) do
params = Map.get(payload, "params", %{}) params = Map.get(payload, "params", %{})
@@ -58,14 +69,14 @@ defmodule Parrhesia.Web.Management do
defp parse_payload(_payload), do: {:error, :invalid_payload} defp parse_payload(_payload), do: {:error, :invalid_payload}
defp execute_method(payload) do defp execute_method(payload, opts) do
Storage.admin().execute(%{}, payload.method, payload.params) Admin.execute(payload.method, payload.params, opts)
end end
defp append_audit_log(auth_event, payload, result) do defp append_audit_log(auth_context, payload, result) do
Storage.admin().append_audit_log(%{}, %{ Parrhesia.Storage.admin().append_audit_log(%{}, %{
method: payload.method, method: payload.method,
actor_pubkey: Map.get(auth_event, "pubkey"), actor_pubkey: auth_context.pubkey,
params: payload.params, params: payload.params,
result: normalize_result(result) result: normalize_result(result)
}) })
@@ -84,14 +95,15 @@ defmodule Parrhesia.Web.Management do
end end
defp full_request_url(conn) do defp full_request_url(conn) do
scheme = Atom.to_string(conn.scheme) listener = Listener.from_conn(conn)
host = conn.host scheme = Listener.request_scheme(listener, conn)
port = conn.port host = Listener.request_host(listener, conn)
port = Listener.request_port(listener, conn)
port_suffix = port_suffix =
cond do cond do
conn.scheme == :http and port == 80 -> "" scheme == :http and port == 80 -> ""
conn.scheme == :https and port == 443 -> "" scheme == :https and port == 443 -> ""
true -> ":#{port}" true -> ":#{port}"
end end
@@ -99,4 +111,13 @@ defmodule Parrhesia.Web.Management do
"#{scheme}://#{host}#{port_suffix}#{conn.request_path}#{query_suffix}" "#{scheme}://#{host}#{port_suffix}#{conn.request_path}#{query_suffix}"
end end
defp admin_auth_required?(opts) do
opts
|> Keyword.get(:listener)
|> case do
%{auth: %{nip98_required_for_admin: value}} -> value
_other -> true
end
end
end end

View File

@@ -4,18 +4,13 @@ defmodule Parrhesia.Web.Metrics do
import Plug.Conn import Plug.Conn
alias Parrhesia.Telemetry alias Parrhesia.Telemetry
alias Parrhesia.Web.MetricsAccess alias Parrhesia.Web.Listener
@spec enabled_on_main_endpoint?() :: boolean()
def enabled_on_main_endpoint? do
:parrhesia
|> Application.get_env(:metrics, [])
|> Keyword.get(:enabled_on_main_endpoint, true)
end
@spec handle(Plug.Conn.t()) :: Plug.Conn.t() @spec handle(Plug.Conn.t()) :: Plug.Conn.t()
def handle(conn) do def handle(conn) do
if MetricsAccess.allowed?(conn) do listener = Listener.from_conn(conn)
if Listener.metrics_allowed?(listener, conn) do
body = TelemetryMetricsPrometheus.Core.scrape(Telemetry.prometheus_reporter()) body = TelemetryMetricsPrometheus.Core.scrape(Telemetry.prometheus_reporter())
conn conn

View File

@@ -3,16 +3,19 @@ defmodule Parrhesia.Web.RelayInfo do
NIP-11 relay information document. NIP-11 relay information document.
""" """
@spec document() :: map() alias Parrhesia.API.Identity
def document do alias Parrhesia.Web.Listener
@spec document(Listener.t()) :: map()
def document(listener) do
%{ %{
"name" => "Parrhesia", "name" => "Parrhesia",
"description" => "Nostr/Marmot relay", "description" => "Nostr/Marmot relay",
"pubkey" => nil, "pubkey" => relay_pubkey(),
"supported_nips" => supported_nips(), "supported_nips" => supported_nips(),
"software" => "https://git.teralink.net/self/parrhesia", "software" => "https://git.teralink.net/self/parrhesia",
"version" => Application.spec(:parrhesia, :vsn) |> to_string(), "version" => Application.spec(:parrhesia, :vsn) |> to_string(),
"limitation" => limitations() "limitation" => limitations(listener)
} }
end end
@@ -29,13 +32,13 @@ defmodule Parrhesia.Web.RelayInfo do
with_negentropy ++ [86, 98] with_negentropy ++ [86, 98]
end end
defp limitations do defp limitations(listener) do
%{ %{
"max_message_length" => Parrhesia.Config.get([:limits, :max_frame_bytes], 1_048_576), "max_message_length" => Parrhesia.Config.get([:limits, :max_frame_bytes], 1_048_576),
"max_subscriptions" => "max_subscriptions" =>
Parrhesia.Config.get([:limits, :max_subscriptions_per_connection], 32), Parrhesia.Config.get([:limits, :max_subscriptions_per_connection], 32),
"max_filters" => Parrhesia.Config.get([:limits, :max_filters_per_req], 16), "max_filters" => Parrhesia.Config.get([:limits, :max_filters_per_req], 16),
"auth_required" => Parrhesia.Config.get([:policies, :auth_required_for_reads], false) "auth_required" => Listener.relay_auth_required?(listener)
} }
end end
@@ -44,4 +47,11 @@ defmodule Parrhesia.Web.RelayInfo do
|> Application.get_env(:features, []) |> Application.get_env(:features, [])
|> Keyword.get(:nip_77_negentropy, true) |> Keyword.get(:nip_77_negentropy, true)
end end
defp relay_pubkey do
case Identity.get() do
{:ok, %{pubkey: pubkey}} -> pubkey
{:error, _reason} -> nil
end
end
end end

View File

@@ -0,0 +1,187 @@
defmodule Parrhesia.Web.RemoteIp do
@moduledoc false
import Bitwise
alias Parrhesia.Web.Listener
@spec init(term()) :: term()
def init(opts), do: opts
@spec call(Plug.Conn.t(), term()) :: Plug.Conn.t()
def call(conn, _opts) do
if trusted_proxy?(conn) and honor_x_forwarded_for?(conn) do
case forwarded_ip(conn) do
nil -> conn
forwarded_ip -> %{conn | remote_ip: forwarded_ip}
end
else
conn
end
end
defp forwarded_ip(conn) do
conn
|> x_forwarded_for_ip()
|> fallback_forwarded_ip(conn)
|> fallback_real_ip(conn)
end
defp x_forwarded_for_ip(conn) do
conn
|> Plug.Conn.get_req_header("x-forwarded-for")
|> List.first()
|> parse_x_forwarded_for()
end
defp fallback_forwarded_ip(nil, conn) do
conn
|> Plug.Conn.get_req_header("forwarded")
|> List.first()
|> parse_forwarded_header()
end
defp fallback_forwarded_ip(ip, _conn), do: ip
defp fallback_real_ip(nil, conn) do
conn
|> Plug.Conn.get_req_header("x-real-ip")
|> List.first()
|> parse_ip_string()
end
defp fallback_real_ip(ip, _conn), do: ip
defp trusted_proxy?(conn) do
Enum.any?(trusted_proxies(conn), &ip_in_cidr?(conn.remote_ip, &1))
end
defp trusted_proxies(conn) do
listener = Listener.from_conn(conn)
case Listener.trusted_proxies(listener) do
[] ->
:parrhesia
|> Application.get_env(:trusted_proxies, [])
|> Enum.filter(&is_binary/1)
trusted_proxies ->
trusted_proxies
end
end
defp honor_x_forwarded_for?(conn) do
listener = Listener.from_conn(conn)
listener.proxy.honor_x_forwarded_for
end
defp parse_x_forwarded_for(value) when is_binary(value) do
value
|> String.split(",")
|> Enum.map(&String.trim/1)
|> Enum.find_value(&parse_ip_string/1)
end
defp parse_x_forwarded_for(_value), do: nil
defp parse_forwarded_header(value) when is_binary(value) do
value
|> String.split(",")
|> Enum.find_value(fn part ->
part
|> String.split(";")
|> Enum.find_value(&forwarded_for_segment/1)
end)
end
defp parse_forwarded_header(_value), do: nil
defp forwarded_for_segment(segment) do
case String.split(segment, "=", parts: 2) do
[key, ip] ->
if String.downcase(String.trim(key)) == "for" do
ip
|> String.trim()
|> String.trim("\"")
|> String.trim_leading("[")
|> String.trim_trailing("]")
|> parse_ip_string()
end
_other ->
nil
end
end
defp parse_ip_string(value) when is_binary(value) do
value
|> String.trim()
|> String.split(":", parts: 2)
|> List.first()
|> then(fn ip ->
case :inet.parse_address(String.to_charlist(ip)) do
{:ok, parsed_ip} -> parsed_ip
_other -> nil
end
end)
end
defp parse_ip_string(_value), do: nil
defp ip_in_cidr?(ip, cidr) do
with {network, prefix_len} <- parse_cidr(cidr),
{:ok, ip_size, ip_value} <- ip_to_int(ip),
{:ok, network_size, network_value} <- ip_to_int(network),
true <- ip_size == network_size,
true <- prefix_len >= 0,
true <- prefix_len <= ip_size do
mask = network_mask(ip_size, prefix_len)
(ip_value &&& mask) == (network_value &&& mask)
else
_other -> false
end
end
defp parse_cidr(cidr) when is_binary(cidr) do
case String.split(cidr, "/", parts: 2) do
[address, prefix_str] ->
with {prefix_len, ""} <- Integer.parse(prefix_str),
{:ok, ip} <- :inet.parse_address(String.to_charlist(address)) do
{ip, prefix_len}
else
_other -> :error
end
[address] ->
case :inet.parse_address(String.to_charlist(address)) do
{:ok, {_, _, _, _} = ip} -> {ip, 32}
{:ok, {_, _, _, _, _, _, _, _} = ip} -> {ip, 128}
_other -> :error
end
_other ->
:error
end
end
defp parse_cidr(_cidr), do: :error
defp ip_to_int({a, b, c, d}) do
{:ok, 32, (a <<< 24) + (b <<< 16) + (c <<< 8) + d}
end
defp ip_to_int({a, b, c, d, e, f, g, h}) do
{:ok, 128,
(a <<< 112) + (b <<< 96) + (c <<< 80) + (d <<< 64) + (e <<< 48) + (f <<< 32) + (g <<< 16) +
h}
end
defp ip_to_int(_ip), do: :error
defp network_mask(_size, 0), do: 0
defp network_mask(size, prefix_len) do
all_ones = (1 <<< size) - 1
all_ones <<< (size - prefix_len)
end
end

View File

@@ -3,17 +3,22 @@ defmodule Parrhesia.Web.Router do
use Plug.Router use Plug.Router
alias Parrhesia.Policy.ConnectionPolicy
alias Parrhesia.Web.Listener
alias Parrhesia.Web.Management alias Parrhesia.Web.Management
alias Parrhesia.Web.Metrics alias Parrhesia.Web.Metrics
alias Parrhesia.Web.Readiness alias Parrhesia.Web.Readiness
alias Parrhesia.Web.RelayInfo alias Parrhesia.Web.RelayInfo
plug(:put_listener)
plug(Plug.Parsers, plug(Plug.Parsers,
parsers: [:json], parsers: [:json],
pass: ["application/json"], pass: ["application/json"],
json_decoder: JSON json_decoder: JSON
) )
plug(Parrhesia.Web.RemoteIp)
plug(:match) plug(:match)
plug(:dispatch) plug(:dispatch)
@@ -30,33 +35,64 @@ defmodule Parrhesia.Web.Router do
end end
get "/metrics" do get "/metrics" do
if Metrics.enabled_on_main_endpoint?() do listener = Listener.from_conn(conn)
Metrics.handle(conn)
if Listener.feature_enabled?(listener, :metrics) do
case authorize_listener_request(conn, listener) do
{:ok, conn} -> Metrics.handle(conn)
{:error, :forbidden} -> send_resp(conn, 403, "forbidden")
end
else else
send_resp(conn, 404, "not found") send_resp(conn, 404, "not found")
end end
end end
post "/management" do post "/management" do
Management.handle(conn) listener = Listener.from_conn(conn)
if Listener.feature_enabled?(listener, :admin) do
case authorize_listener_request(conn, listener) do
{:ok, conn} -> Management.handle(conn, listener: listener)
{:error, :forbidden} -> send_resp(conn, 403, "forbidden")
end
else
send_resp(conn, 404, "not found")
end
end end
get "/relay" do get "/relay" do
if accepts_nip11?(conn) do listener = Listener.from_conn(conn)
body = JSON.encode!(RelayInfo.document())
conn if Listener.feature_enabled?(listener, :nostr) do
|> put_resp_content_type("application/nostr+json") case authorize_listener_request(conn, listener) do
|> send_resp(200, body) {:ok, conn} ->
if accepts_nip11?(conn) do
body = JSON.encode!(RelayInfo.document(listener))
conn
|> put_resp_content_type("application/nostr+json")
|> send_resp(200, body)
else
conn
|> WebSockAdapter.upgrade(
Parrhesia.Web.Connection,
%{
listener: listener,
relay_url: Listener.relay_url(listener, conn),
remote_ip: remote_ip(conn),
transport_identity: transport_identity(conn)
},
timeout: 60_000,
max_frame_size: max_frame_bytes()
)
|> halt()
end
{:error, :forbidden} ->
send_resp(conn, 403, "forbidden")
end
else else
conn send_resp(conn, 404, "not found")
|> WebSockAdapter.upgrade(
Parrhesia.Web.Connection,
%{relay_url: relay_url(conn)},
timeout: 60_000,
max_frame_size: max_frame_bytes()
)
|> halt()
end end
end end
@@ -64,30 +100,54 @@ defmodule Parrhesia.Web.Router do
send_resp(conn, 404, "not found") send_resp(conn, 404, "not found")
end end
defp put_listener(conn, opts) do
case conn.private do
%{parrhesia_listener: _listener} -> conn
_other -> Listener.put_conn(conn, opts)
end
end
defp accepts_nip11?(conn) do defp accepts_nip11?(conn) do
conn conn
|> get_req_header("accept") |> get_req_header("accept")
|> Enum.any?(&String.contains?(&1, "application/nostr+json")) |> Enum.any?(&String.contains?(&1, "application/nostr+json"))
end end
defp relay_url(conn) do
ws_scheme = if conn.scheme == :https, do: "wss", else: "ws"
port_segment =
if default_http_port?(conn.scheme, conn.port) do
""
else
":#{conn.port}"
end
"#{ws_scheme}://#{conn.host}#{port_segment}#{conn.request_path}"
end
defp default_http_port?(:http, 80), do: true
defp default_http_port?(:https, 443), do: true
defp default_http_port?(_scheme, _port), do: false
defp max_frame_bytes do defp max_frame_bytes do
Parrhesia.Config.get([:limits, :max_frame_bytes], 1_048_576) Parrhesia.Config.get([:limits, :max_frame_bytes], 1_048_576)
end end
defp authorize_listener_request(conn, listener) do
with :ok <- authorize_remote_ip(conn),
true <- Listener.remote_ip_allowed?(listener, conn.remote_ip),
{:ok, transport_identity} <- Listener.authorize_transport_request(listener, conn) do
{:ok, maybe_put_transport_identity(conn, transport_identity)}
else
{:error, :ip_blocked} -> {:error, :forbidden}
{:error, _reason} -> {:error, :forbidden}
false -> {:error, :forbidden}
end
end
defp authorize_remote_ip(conn) do
ConnectionPolicy.authorize_remote_ip(conn.remote_ip)
end
defp remote_ip(conn) do
case conn.remote_ip do
{_, _, _, _} = remote_ip -> :inet.ntoa(remote_ip) |> to_string()
{_, _, _, _, _, _, _, _} = remote_ip -> :inet.ntoa(remote_ip) |> to_string()
_other -> nil
end
end
defp maybe_put_transport_identity(conn, nil), do: conn
defp maybe_put_transport_identity(conn, transport_identity) do
Plug.Conn.put_private(conn, :parrhesia_transport_identity, transport_identity)
end
defp transport_identity(conn) do
Map.get(conn.private, :parrhesia_transport_identity)
end
end end

514
lib/parrhesia/web/tls.ex Normal file
View File

@@ -0,0 +1,514 @@
defmodule Parrhesia.Web.TLS do
@moduledoc false
import Bitwise
require Record
Record.defrecordp(
:otp_certificate,
Record.extract(:OTPCertificate, from_lib: "public_key/include/OTP-PUB-KEY.hrl")
)
Record.defrecordp(
:otp_tbs_certificate,
Record.extract(:OTPTBSCertificate, from_lib: "public_key/include/OTP-PUB-KEY.hrl")
)
@default_proxy_headers %{
enabled: false,
required: false,
verify_header: "x-parrhesia-client-cert-verified",
verified_values: ["1", "true", "success", "verified"],
cert_sha256_header: "x-parrhesia-client-cert-sha256",
spki_sha256_header: "x-parrhesia-client-spki-sha256",
subject_header: "x-parrhesia-client-cert-subject"
}
@type pin :: %{
type: :cert_sha256 | :spki_sha256,
value: String.t()
}
@type config :: %{
mode: :disabled | :server | :mutual | :proxy_terminated,
certfile: String.t() | nil,
keyfile: String.t() | nil,
cacertfile: String.t() | nil,
otp_app: atom() | nil,
cipher_suite: :strong | :compatible | nil,
client_pins: [pin()],
proxy_headers: map()
}
@spec default_config() :: config()
def default_config do
%{
mode: :disabled,
certfile: nil,
keyfile: nil,
cacertfile: nil,
otp_app: nil,
cipher_suite: nil,
client_pins: [],
proxy_headers: @default_proxy_headers
}
end
@spec normalize_config(map() | nil, atom()) :: config()
def normalize_config(tls, scheme)
def normalize_config(tls, scheme) when is_map(tls) do
defaults =
default_config()
|> Map.put(:mode, default_mode(scheme))
%{
mode: normalize_mode(fetch_value(tls, :mode), defaults.mode),
certfile: normalize_optional_string(fetch_value(tls, :certfile)),
keyfile: normalize_optional_string(fetch_value(tls, :keyfile)),
cacertfile: normalize_optional_string(fetch_value(tls, :cacertfile)),
otp_app: normalize_optional_atom(fetch_value(tls, :otp_app)),
cipher_suite: normalize_cipher_suite(fetch_value(tls, :cipher_suite)),
client_pins: normalize_pins(fetch_value(tls, :client_pins)),
proxy_headers: normalize_proxy_headers(fetch_value(tls, :proxy_headers))
}
end
def normalize_config(_tls, scheme) do
%{default_config() | mode: default_mode(scheme)}
end
@spec bandit_options(config()) :: keyword()
def bandit_options(%{mode: mode}) when mode in [:disabled, :proxy_terminated], do: []
def bandit_options(%{mode: mode} = tls) when mode in [:server, :mutual] do
transport_options =
transport_options(tls)
|> Keyword.merge(mutual_tls_options(tls))
|> configure_server_tls()
[
scheme: :https,
thousand_island_options: [transport_options: transport_options]
]
end
@spec authorize_request(config(), Plug.Conn.t(), boolean()) ::
{:ok, map() | nil} | {:error, atom()}
def authorize_request(%{mode: :disabled}, _conn, _trusted_proxy?), do: {:ok, nil}
def authorize_request(%{mode: :server}, conn, _trusted_proxy?) do
{:ok, socket_identity(conn)}
end
def authorize_request(%{mode: :mutual} = tls, conn, _trusted_proxy?) do
with %{} = identity <- socket_identity(conn),
:ok <- verify_pins(identity, tls.client_pins, :client_certificate_pin_mismatch) do
{:ok, identity}
else
nil -> {:error, :client_certificate_required}
{:error, _reason} = error -> error
end
end
def authorize_request(%{mode: :proxy_terminated} = tls, conn, trusted_proxy?) do
proxy_headers = tls.proxy_headers
if proxy_headers.enabled do
authorize_proxy_identity(conn, proxy_headers, tls.client_pins, trusted_proxy?)
else
{:ok, nil}
end
end
@spec request_scheme(config(), Plug.Conn.t(), boolean()) :: :http | :https
def request_scheme(%{mode: :proxy_terminated}, conn, true) do
case header_value(conn, "x-forwarded-proto") do
"https" -> :https
"http" -> :http
_other -> conn.scheme
end
end
def request_scheme(_tls, conn, _trusted_proxy?), do: conn.scheme
@spec request_host(Plug.Conn.t(), boolean()) :: String.t()
def request_host(conn, true) do
case header_value(conn, "x-forwarded-host") do
nil -> conn.host
value -> String.split(value, ",", parts: 2) |> List.first() |> String.trim()
end
end
def request_host(conn, false), do: conn.host
@spec request_port(config(), Plug.Conn.t(), boolean(), :http | :https) :: non_neg_integer()
def request_port(%{mode: :proxy_terminated}, conn, true, forwarded_scheme) do
case header_value(conn, "x-forwarded-port") do
nil -> default_port(forwarded_scheme)
value -> parse_port(value, default_port(forwarded_scheme))
end
end
def request_port(_tls, conn, _trusted_proxy?, _scheme), do: conn.port
@spec trusted_proxy_request?(Plug.Conn.t(), [String.t()]) :: boolean()
def trusted_proxy_request?(conn, trusted_cidrs) when is_list(trusted_cidrs) do
case Plug.Conn.get_peer_data(conn) do
%{address: address} -> Enum.any?(trusted_cidrs, &ip_in_cidr?(address, &1))
_other -> false
end
end
def trusted_proxy_request?(_conn, _trusted_cidrs), do: false
defp transport_options(tls) do
[]
|> maybe_put_opt(:keyfile, tls.keyfile)
|> maybe_put_opt(:certfile, tls.certfile)
|> maybe_put_opt(:cacertfile, tls.cacertfile)
|> maybe_put_opt(:otp_app, tls.otp_app)
|> maybe_put_opt(:cipher_suite, tls.cipher_suite)
end
defp mutual_tls_options(%{mode: :mutual, cacertfile: nil}) do
[
verify: :verify_peer,
fail_if_no_peer_cert: true,
cacerts: system_cacerts()
]
end
defp mutual_tls_options(%{mode: :mutual}) do
[
verify: :verify_peer,
fail_if_no_peer_cert: true
]
end
defp mutual_tls_options(_tls), do: []
defp configure_server_tls(options) do
case Plug.SSL.configure(options) do
{:ok, configured} -> configured
{:error, message} -> raise ArgumentError, "invalid listener TLS config: #{message}"
end
end
defp authorize_proxy_identity(conn, proxy_headers, pins, trusted_proxy?) do
cond do
not trusted_proxy? and proxy_headers.required ->
{:error, :proxy_tls_identity_required}
not trusted_proxy? ->
{:ok, nil}
true ->
conn
|> proxy_identity(proxy_headers)
|> authorize_proxy_identity_value(proxy_headers.required, pins)
end
end
defp authorize_proxy_identity_value(nil, true, _pins),
do: {:error, :proxy_tls_identity_required}
defp authorize_proxy_identity_value(nil, false, _pins), do: {:ok, nil}
defp authorize_proxy_identity_value(%{verified?: false}, _required, _pins) do
{:error, :proxy_tls_identity_unverified}
end
defp authorize_proxy_identity_value(identity, _required, pins) do
case verify_pins(identity, pins, :proxy_tls_identity_pin_mismatch) do
:ok -> {:ok, identity}
{:error, _reason} = error -> error
end
end
defp proxy_identity(conn, proxy_headers) do
cert_sha256 = header_value(conn, proxy_headers.cert_sha256_header)
spki_sha256 = header_value(conn, proxy_headers.spki_sha256_header)
subject = header_value(conn, proxy_headers.subject_header)
verified? = verified_header?(conn, proxy_headers)
if cert_sha256 || spki_sha256 || subject do
%{
source: :proxy,
verified?: verified?,
cert_sha256: cert_sha256,
spki_sha256: spki_sha256,
subject: subject
}
end
end
defp verified_header?(conn, proxy_headers) do
case header_value(conn, proxy_headers.verify_header) do
nil ->
false
value ->
normalized = String.downcase(String.trim(value))
normalized in Enum.map(proxy_headers.verified_values, &String.downcase/1)
end
end
defp socket_identity(conn) do
case Plug.Conn.get_peer_data(conn) do
%{ssl_cert: cert_der} when is_binary(cert_der) ->
certificate_identity(cert_der, :socket)
_other ->
nil
end
rescue
_error -> nil
end
defp certificate_identity(cert_der, source) do
%{
source: source,
verified?: true,
cert_sha256: Base.encode64(:crypto.hash(:sha256, cert_der)),
spki_sha256: spki_pin(cert_der),
subject: certificate_subject(cert_der)
}
end
defp certificate_subject(cert_der) do
cert = :public_key.pkix_decode_cert(cert_der, :otp)
cert
|> otp_certificate(:tbsCertificate)
|> otp_tbs_certificate(:subject)
|> inspect()
end
defp verify_pins(_identity, [], _reason), do: :ok
defp verify_pins(identity, pins, reason) do
if Enum.any?(pins, &pin_matches?(identity, &1)) do
:ok
else
{:error, reason}
end
end
defp pin_matches?(identity, %{type: :cert_sha256, value: value}) do
identity.cert_sha256 == value
end
defp pin_matches?(identity, %{type: :spki_sha256, value: value}) do
identity.spki_sha256 == value
end
defp spki_pin(cert_der) do
cert = :public_key.pkix_decode_cert(cert_der, :plain)
spki = cert |> elem(1) |> elem(7)
:public_key.der_encode(:SubjectPublicKeyInfo, spki)
|> then(&:crypto.hash(:sha256, &1))
|> Base.encode64()
end
defp header_value(conn, header) when is_binary(header) and header != "" do
conn
|> Plug.Conn.get_req_header(header)
|> List.first()
end
defp header_value(_conn, _header), do: nil
defp default_mode(:https), do: :server
defp default_mode(_scheme), do: :disabled
defp default_port(:https), do: 443
defp default_port(_scheme), do: 80
defp parse_port(value, default) when is_binary(value) do
case Integer.parse(String.trim(value)) do
{port, ""} when port >= 0 -> port
_other -> default
end
end
defp normalize_mode(:server, _default), do: :server
defp normalize_mode("server", _default), do: :server
defp normalize_mode(:mutual, _default), do: :mutual
defp normalize_mode("mutual", _default), do: :mutual
defp normalize_mode(:proxy_terminated, _default), do: :proxy_terminated
defp normalize_mode("proxy_terminated", _default), do: :proxy_terminated
defp normalize_mode(:disabled, _default), do: :disabled
defp normalize_mode("disabled", _default), do: :disabled
defp normalize_mode(_value, default), do: default
defp normalize_cipher_suite(:strong), do: :strong
defp normalize_cipher_suite("strong"), do: :strong
defp normalize_cipher_suite(:compatible), do: :compatible
defp normalize_cipher_suite("compatible"), do: :compatible
defp normalize_cipher_suite(_value), do: nil
defp normalize_proxy_headers(headers) when is_map(headers) do
%{
enabled: normalize_boolean(fetch_value(headers, :enabled), false),
required: normalize_boolean(fetch_value(headers, :required), false),
verify_header:
normalize_optional_string(fetch_value(headers, :verify_header)) ||
@default_proxy_headers.verify_header,
verified_values:
normalize_string_list(fetch_value(headers, :verified_values)) ++
Enum.filter(
@default_proxy_headers.verified_values,
&(&1 not in normalize_string_list(fetch_value(headers, :verified_values)))
),
cert_sha256_header:
normalize_optional_string(fetch_value(headers, :cert_sha256_header)) ||
@default_proxy_headers.cert_sha256_header,
spki_sha256_header:
normalize_optional_string(fetch_value(headers, :spki_sha256_header)) ||
@default_proxy_headers.spki_sha256_header,
subject_header:
normalize_optional_string(fetch_value(headers, :subject_header)) ||
@default_proxy_headers.subject_header
}
end
defp normalize_proxy_headers(_headers), do: @default_proxy_headers
defp normalize_pins(pins) when is_list(pins) do
Enum.flat_map(pins, fn
%{type: type, value: value} ->
case normalize_pin(type, value) do
nil -> []
pin -> [pin]
end
%{"type" => type, "value" => value} ->
case normalize_pin(type, value) do
nil -> []
pin -> [pin]
end
_other ->
[]
end)
end
defp normalize_pins(_pins), do: []
defp normalize_pin(type, value) do
with normalized_type when normalized_type in [:cert_sha256, :spki_sha256] <-
normalize_pin_type(type),
normalized_value when is_binary(normalized_value) and normalized_value != "" <-
normalize_optional_string(value) do
%{type: normalized_type, value: normalized_value}
else
_other -> nil
end
end
defp normalize_pin_type(:cert_sha256), do: :cert_sha256
defp normalize_pin_type("cert_sha256"), do: :cert_sha256
defp normalize_pin_type(:spki_sha256), do: :spki_sha256
defp normalize_pin_type("spki_sha256"), do: :spki_sha256
defp normalize_pin_type(_type), do: nil
defp maybe_put_opt(options, _key, nil), do: options
defp maybe_put_opt(options, key, value), do: Keyword.put(options, key, value)
defp fetch_value(map, key) when is_map(map) do
cond do
Map.has_key?(map, key) ->
Map.get(map, key)
is_atom(key) and Map.has_key?(map, Atom.to_string(key)) ->
Map.get(map, Atom.to_string(key))
true ->
nil
end
end
defp normalize_optional_string(value) when is_binary(value) and value != "", do: value
defp normalize_optional_string(_value), do: nil
defp normalize_optional_atom(value) when is_atom(value), do: value
defp normalize_optional_atom(_value), do: nil
defp normalize_boolean(value, _default) when is_boolean(value), do: value
defp normalize_boolean(nil, default), do: default
defp normalize_boolean(_value, default), do: default
defp normalize_string_list(values) when is_list(values) do
Enum.filter(values, &(is_binary(&1) and &1 != ""))
end
defp normalize_string_list(_values), do: []
defp system_cacerts do
if function_exported?(:public_key, :cacerts_get, 0) do
:public_key.cacerts_get()
else
[]
end
end
defp ip_in_cidr?(ip, cidr) do
with {network, prefix_len} <- parse_cidr(cidr),
{:ok, ip_size, ip_value} <- ip_to_int(ip),
{:ok, network_size, network_value} <- ip_to_int(network),
true <- ip_size == network_size,
true <- prefix_len >= 0,
true <- prefix_len <= ip_size do
mask = network_mask(ip_size, prefix_len)
(ip_value &&& mask) == (network_value &&& mask)
else
_other -> false
end
end
defp parse_cidr(cidr) when is_binary(cidr) do
case String.split(cidr, "/", parts: 2) do
[address, prefix_str] ->
with {prefix_len, ""} <- Integer.parse(prefix_str),
{:ok, ip} <- :inet.parse_address(String.to_charlist(address)) do
{ip, prefix_len}
else
_other -> :error
end
[address] ->
case :inet.parse_address(String.to_charlist(address)) do
{:ok, {_, _, _, _} = ip} -> {ip, 32}
{:ok, {_, _, _, _, _, _, _, _} = ip} -> {ip, 128}
_other -> :error
end
_other ->
:error
end
end
defp parse_cidr(_cidr), do: :error
defp ip_to_int({a, b, c, d}) do
{:ok, 32, (a <<< 24) + (b <<< 16) + (c <<< 8) + d}
end
defp ip_to_int({a, b, c, d, e, f, g, h}) do
{:ok, 128,
(a <<< 112) + (b <<< 96) + (c <<< 80) + (d <<< 64) + (e <<< 48) + (f <<< 32) + (g <<< 16) +
h}
end
defp ip_to_int(_ip), do: :error
defp network_mask(_size, 0), do: 0
defp network_mask(size, prefix_len) do
all_ones = (1 <<< size) - 1
all_ones <<< (size - prefix_len)
end
end

View File

@@ -4,7 +4,7 @@ defmodule Parrhesia.MixProject do
def project do def project do
[ [
app: :parrhesia, app: :parrhesia,
version: "0.4.0", version: "0.5.0",
elixir: "~> 1.18", elixir: "~> 1.18",
start_permanent: Mix.env() == :prod, start_permanent: Mix.env() == :prod,
deps: deps(), deps: deps(),
@@ -36,6 +36,7 @@ defmodule Parrhesia.MixProject do
# Runtime: storage adapter (Postgres first) # Runtime: storage adapter (Postgres first)
{:ecto_sql, "~> 3.12"}, {:ecto_sql, "~> 3.12"},
{:postgrex, ">= 0.0.0"}, {:postgrex, ">= 0.0.0"},
{:req, "~> 0.5"},
# Runtime: telemetry + prometheus exporter (/metrics) # Runtime: telemetry + prometheus exporter (/metrics)
{:telemetry_metrics, "~> 1.0"}, {:telemetry_metrics, "~> 1.0"},
@@ -44,7 +45,7 @@ defmodule Parrhesia.MixProject do
# Test tooling # Test tooling
{:stream_data, "~> 1.0", only: :test}, {:stream_data, "~> 1.0", only: :test},
{:websockex, "~> 0.4", only: :test}, {:websockex, "~> 0.4"},
# Project tooling # Project tooling
{:credo, "~> 1.7", only: [:dev, :test], runtime: false}, {:credo, "~> 1.7", only: [:dev, :test], runtime: false},
@@ -61,6 +62,8 @@ defmodule Parrhesia.MixProject do
test: ["ecto.create --quiet", "ecto.migrate --quiet", "test"], test: ["ecto.create --quiet", "ecto.migrate --quiet", "test"],
"test.nak_e2e": ["cmd ./scripts/run_nak_e2e.sh"], "test.nak_e2e": ["cmd ./scripts/run_nak_e2e.sh"],
"test.marmot_e2e": ["cmd ./scripts/run_marmot_e2e.sh"], "test.marmot_e2e": ["cmd ./scripts/run_marmot_e2e.sh"],
"test.node_sync_e2e": ["cmd ./scripts/run_node_sync_e2e.sh"],
"test.node_sync_docker_e2e": ["cmd ./scripts/run_node_sync_docker_e2e.sh"],
bench: ["cmd ./scripts/run_bench_compare.sh"], bench: ["cmd ./scripts/run_bench_compare.sh"],
# cov: ["cmd mix coveralls.lcov"], # cov: ["cmd mix coveralls.lcov"],
lint: ["format --check-formatted", "credo"], lint: ["format --check-formatted", "credo"],

View File

@@ -0,0 +1,15 @@
defmodule Parrhesia.Repo.Migrations.AddAclRules do
use Ecto.Migration
def change do
create table(:acl_rules) do
add(:principal_type, :string, null: false)
add(:principal, :binary, null: false)
add(:capability, :string, null: false)
add(:match, :map, null: false, default: %{})
timestamps(updated_at: false, type: :utc_datetime_usec)
end
create(index(:acl_rules, [:principal_type, :principal, :capability]))
end
end

847
scripts/node_sync_e2e.exs Normal file
View File

@@ -0,0 +1,847 @@
defmodule NodeSyncE2E.RelayClient do
use WebSockex
def start_link(url, owner, opts \\ []) do
WebSockex.start_link(
url,
__MODULE__,
owner,
Keyword.put(opts, :handle_initial_conn_failure, true)
)
end
def send_json(pid, payload) do
WebSockex.cast(pid, {:send_json, payload})
end
def close(pid) do
WebSockex.cast(pid, :close)
end
@impl true
def handle_connect(_conn, owner) do
send(owner, {:node_sync_e2e_relay_client, self(), :connected})
{:ok, owner}
end
@impl true
def handle_frame({:text, payload}, owner) do
frame =
case JSON.decode(payload) do
{:ok, decoded} -> decoded
{:error, reason} -> {:decode_error, reason, payload}
end
send(owner, {:node_sync_e2e_relay_client, self(), :frame, frame})
{:ok, owner}
end
def handle_frame(frame, owner) do
send(owner, {:node_sync_e2e_relay_client, self(), :frame, frame})
{:ok, owner}
end
@impl true
def handle_cast({:send_json, payload}, owner) do
{:reply, {:text, JSON.encode!(payload)}, owner}
end
def handle_cast(:close, owner) do
{:close, owner}
end
@impl true
def handle_disconnect(status, owner) do
send(owner, {:node_sync_e2e_relay_client, self(), :disconnected, status})
{:ok, owner}
end
end
defmodule NodeSyncE2E.Runner do
alias NodeSyncE2E.RelayClient
alias Parrhesia.API.Auth
@kind 5000
@subsystem_tag "node-sync-e2e"
@default_resource "tribes.accounts.user"
@default_server_id "node-a-upstream"
@default_admin_private_key String.duplicate("1", 64)
@default_client_private_key String.duplicate("2", 64)
@frame_timeout_ms 5_000
def main(argv) do
with {:ok, _apps} <- Application.ensure_all_started(:req),
{:ok, _apps} <- Application.ensure_all_started(:websockex),
{:ok, command, opts} <- parse_args(argv),
{:ok, config} <- load_config(),
:ok <- dispatch(command, config, opts) do
IO.puts("node-sync-e2e #{command} completed")
else
{:error, reason} ->
IO.puts(:stderr, "node-sync-e2e failed: #{format_reason(reason)}")
System.halt(1)
end
end
defp parse_args(argv) do
{opts, rest, invalid} = OptionParser.parse(argv, strict: [state_file: :string])
cond do
invalid != [] ->
{:error, {:invalid_arguments, invalid}}
length(rest) != 1 ->
{:error, :missing_command}
true ->
{:ok, hd(rest), opts}
end
end
defp dispatch("bootstrap", config, opts) do
with {:ok, state_file} <- fetch_state_file(opts),
:ok <- ensure_nodes_ready(config),
{:ok, node_a_pubkey} <- fetch_node_pubkey(config, config.node_a),
{:ok, node_b_pubkey} <- fetch_node_pubkey(config, config.node_b),
:ok <- ensure_identity_matches(config.node_a, node_a_pubkey, :node_a),
:ok <- ensure_identity_matches(config.node_b, node_b_pubkey, :node_b),
:ok <- ensure_acl(config, config.node_a, node_b_pubkey, "sync_read", config.filter),
:ok <-
ensure_acl(config, config.node_a, config.client_pubkey, "sync_write", config.filter),
:ok <- ensure_acl(config, config.node_b, node_a_pubkey, "sync_write", config.filter),
:ok <-
ensure_acl(config, config.node_b, config.client_pubkey, "sync_read", config.filter),
{:ok, catchup_event} <- publish_phase_event(config, config.node_a, "catchup"),
:ok <- configure_sync(config, node_a_pubkey),
:ok <- wait_for_sync_connected(config, config.node_b, config.server_id),
:ok <- wait_for_event(config, config.node_b, catchup_event["id"]),
{:ok, live_event} <- publish_phase_event(config, config.node_a, "live"),
:ok <- wait_for_event(config, config.node_b, live_event["id"]),
{:ok, stats} <- fetch_sync_server_stats(config, config.node_b, config.server_id),
:ok <- ensure_minimum_counter(stats, "events_accepted", 2),
:ok <-
save_state(state_file, %{
"run_id" => config.run_id,
"resource" => config.resource,
"server_id" => config.server_id,
"node_a_pubkey" => node_a_pubkey,
"node_b_pubkey" => node_b_pubkey,
"catchup_event_id" => catchup_event["id"],
"live_event_id" => live_event["id"]
}) do
:ok
end
end
defp dispatch("publish-resume", config, opts) do
with {:ok, state_file} <- fetch_state_file(opts),
:ok <- ensure_run_matches(config, load_state(state_file)),
{:ok, resume_event} <- publish_phase_event(config, config.node_a, "resume"),
:ok <-
save_state(state_file, %{
"run_id" => config.run_id,
"resource" => config.resource,
"server_id" => config.server_id,
"resume_event_id" => resume_event["id"]
}) do
:ok
end
end
defp dispatch("verify-resume", config, opts) do
with {:ok, state_file} <- fetch_state_file(opts),
state = load_state(state_file),
:ok <- ensure_run_matches(config, state),
{:ok, resume_event_id} <- fetch_state_value(state, "resume_event_id"),
:ok <- ensure_nodes_ready(config),
:ok <- wait_for_sync_connected(config, config.node_b, config.server_id),
:ok <- wait_for_event(config, config.node_b, resume_event_id),
{:ok, stats} <- fetch_sync_server_stats(config, config.node_b, config.server_id),
:ok <- ensure_minimum_counter(stats, "events_accepted", 3),
:ok <- ensure_minimum_counter(stats, "query_runs", 2) do
:ok
end
end
defp dispatch(other, _config, _opts), do: {:error, {:unknown_command, other}}
defp fetch_state_file(opts) do
case Keyword.get(opts, :state_file) do
nil -> {:error, :missing_state_file}
path -> {:ok, path}
end
end
defp load_config do
resource = System.get_env("PARRHESIA_NODE_SYNC_E2E_RESOURCE", @default_resource)
admin_private_key =
System.get_env("PARRHESIA_NODE_SYNC_E2E_ADMIN_PRIVATE_KEY", @default_admin_private_key)
client_private_key =
System.get_env("PARRHESIA_NODE_SYNC_E2E_CLIENT_PRIVATE_KEY", @default_client_private_key)
with {:ok, node_a} <- load_node("A"),
{:ok, node_b} <- load_node("B"),
{:ok, client_pubkey} <- derive_pubkey(client_private_key) do
{:ok,
%{
run_id: System.get_env("PARRHESIA_NODE_SYNC_E2E_RUN_ID", default_run_id()),
resource: resource,
filter: %{"kinds" => [@kind], "#r" => [resource]},
admin_private_key: admin_private_key,
client_private_key: client_private_key,
client_pubkey: client_pubkey,
server_id: System.get_env("PARRHESIA_NODE_SYNC_E2E_SERVER_ID", @default_server_id),
node_a: node_a,
node_b: node_b
}}
end
end
defp load_node(suffix) do
http_url =
System.get_env("PARRHESIA_NODE_#{suffix}_HTTP_URL") ||
System.get_env("PARRHESIA_NODE_#{suffix}_MANAGEMENT_BASE_URL")
websocket_url = System.get_env("PARRHESIA_NODE_#{suffix}_WS_URL")
relay_auth_url = System.get_env("PARRHESIA_NODE_#{suffix}_RELAY_AUTH_URL", websocket_url)
sync_url = System.get_env("PARRHESIA_NODE_#{suffix}_SYNC_URL", relay_auth_url)
cond do
is_nil(http_url) or http_url == "" ->
{:error, {:missing_env, "PARRHESIA_NODE_#{suffix}_HTTP_URL"}}
is_nil(websocket_url) or websocket_url == "" ->
{:error, {:missing_env, "PARRHESIA_NODE_#{suffix}_WS_URL"}}
true ->
{:ok,
%{
http_url: http_url,
websocket_url: websocket_url,
relay_auth_url: relay_auth_url,
sync_url: sync_url
}}
end
end
defp ensure_nodes_ready(config) do
with :ok <- wait_for_health(config.node_a),
:ok <- wait_for_health(config.node_b) do
:ok
end
end
defp wait_for_health(node) do
case wait_until("node health #{node.http_url}", 15_000, 250, fn ->
health_url = node.http_url <> "/health"
case Req.get(
url: health_url,
decode_body: false,
connect_options: [timeout: 1_000],
receive_timeout: 1_000
) do
{:ok, %{status: 200, body: "ok"}} -> {:ok, :ready}
{:ok, %{status: status}} -> {:retry, {:unexpected_status, status}}
{:error, reason} -> {:retry, reason}
end
end) do
{:ok, _value} -> :ok
{:error, reason} -> {:error, reason}
end
end
defp fetch_node_pubkey(config, node) do
case management_call(config, node, "identity_get", %{}) do
{:ok, %{"pubkey" => pubkey}} when is_binary(pubkey) -> {:ok, String.downcase(pubkey)}
{:ok, other} -> {:error, {:unexpected_identity_payload, other}}
{:error, reason} -> {:error, {:identity_get_failed, reason}}
end
end
defp ensure_identity_matches(node, expected_pubkey, label) do
if fetch_nip11_pubkey(node) == expected_pubkey do
:ok
else
{:error, {label, :identity_mismatch}}
end
end
defp fetch_nip11_pubkey(node) do
relay_url = node.http_url <> "/relay"
case Req.get(
url: relay_url,
headers: [{"accept", "application/nostr+json"}],
decode_body: false,
connect_options: [timeout: 1_000],
receive_timeout: 1_000
) do
{:ok, %{status: 200, body: body}} ->
case JSON.decode(body) do
{:ok, %{"pubkey" => pubkey}} when is_binary(pubkey) -> String.downcase(pubkey)
{:ok, other} -> raise "unexpected relay info payload: #{inspect(other)}"
{:error, reason} -> raise "relay info JSON decode failed: #{inspect(reason)}"
end
{:ok, %{status: status}} ->
raise "relay info request failed with status #{status}"
{:error, reason} ->
raise "relay info request failed: #{inspect(reason)}"
end
end
defp ensure_acl(config, node, principal, capability, match) do
params = %{
"principal_type" => "pubkey",
"principal" => principal,
"capability" => capability,
"match" => match
}
case management_call(config, node, "acl_grant", params) do
{:ok, %{"ok" => true}} -> :ok
{:ok, other} -> {:error, {:unexpected_acl_result, other}}
{:error, reason} -> {:error, {:acl_grant_failed, capability, principal, reason}}
end
end
defp configure_sync(config, node_a_pubkey) do
params = %{
"id" => config.server_id,
"url" => config.node_a.sync_url,
"enabled?" => true,
"auth_pubkey" => node_a_pubkey,
"filters" => [config.filter],
"tls" => sync_tls_config(config.node_a.sync_url)
}
with {:ok, _server} <- management_call(config, config.node_b, "sync_put_server", params),
{:ok, %{"ok" => true}} <-
management_call(config, config.node_b, "sync_start_server", %{"id" => config.server_id}) do
:ok
end
end
defp sync_tls_config("wss://" <> _rest) do
raise "wss sync URLs are not supported by this harness without explicit pin configuration"
end
defp sync_tls_config(_url) do
%{"mode" => "disabled", "pins" => []}
end
defp publish_phase_event(config, node, phase) do
event =
%{
"created_at" => System.system_time(:second),
"kind" => @kind,
"tags" => [
["r", config.resource],
["t", @subsystem_tag],
["run", config.run_id],
["phase", phase]
],
"content" => "#{phase}:#{config.run_id}"
}
|> sign_event!(config.client_private_key)
with {:ok, client} <- RelayClient.start_link(node.websocket_url, self()),
:ok <- await_client_connect(client) do
try do
case publish_event(client, node.relay_auth_url, config.client_private_key, event) do
:ok -> {:ok, event}
{:error, reason} -> {:error, reason}
end
after
RelayClient.close(client)
end
end
end
defp wait_for_event(config, node, event_id) do
case wait_until("event #{event_id} on #{node.websocket_url}", 20_000, 250, fn ->
filter =
config.filter
|> Map.put("ids", [event_id])
|> Map.put("limit", 1)
case query_events(node, config.client_private_key, filter) do
{:ok, events} ->
if Enum.any?(events, &(&1["id"] == event_id)) do
{:ok, :replicated}
else
{:retry, :missing_event}
end
{:error, reason} ->
{:retry, reason}
end
end) do
{:ok, _value} -> :ok
{:error, reason} -> {:error, reason}
end
end
defp wait_for_sync_connected(config, node, server_id) do
case wait_until("sync connected #{server_id}", 20_000, 250, fn ->
case management_call(config, node, "sync_server_stats", %{"id" => server_id}) do
{:ok, %{"connected" => true, "query_runs" => query_runs} = stats}
when query_runs >= 1 ->
{:ok, stats}
{:ok, stats} ->
{:retry, stats}
{:error, reason} ->
{:retry, reason}
end
end) do
{:ok, _value} -> :ok
{:error, reason} -> {:error, reason}
end
end
defp fetch_sync_server_stats(config, node, server_id) do
case management_call(config, node, "sync_server_stats", %{"id" => server_id}) do
{:ok, stats} -> {:ok, stats}
{:error, reason} -> {:error, {:sync_server_stats_failed, reason}}
end
end
defp query_events(node, private_key, filter) do
with {:ok, client} <- RelayClient.start_link(node.websocket_url, self()),
:ok <- await_client_connect(client) do
subscription_id = "node-sync-e2e-#{System.unique_integer([:positive, :monotonic])}"
try do
:ok = RelayClient.send_json(client, ["REQ", subscription_id, filter])
authenticated_query(
client,
node.relay_auth_url,
private_key,
subscription_id,
[filter],
[],
false,
nil
)
after
RelayClient.close(client)
end
end
end
defp authenticated_query(
client,
relay_auth_url,
private_key,
subscription_id,
filters,
events,
authenticated?,
auth_event_id
) do
receive do
{:node_sync_e2e_relay_client, ^client, :frame, ["AUTH", challenge]} ->
auth_event =
auth_event(relay_auth_url, challenge)
|> sign_event!(private_key)
:ok = RelayClient.send_json(client, ["AUTH", auth_event])
authenticated_query(
client,
relay_auth_url,
private_key,
subscription_id,
filters,
events,
authenticated?,
auth_event["id"]
)
{:node_sync_e2e_relay_client, ^client, :frame, ["OK", event_id, true, _message]}
when event_id == auth_event_id ->
:ok = RelayClient.send_json(client, ["REQ", subscription_id | filters])
authenticated_query(
client,
relay_auth_url,
private_key,
subscription_id,
filters,
events,
true,
nil
)
{:node_sync_e2e_relay_client, ^client, :frame, ["OK", event_id, false, message]}
when event_id == auth_event_id ->
{:error, {:auth_failed, message}}
{:node_sync_e2e_relay_client, ^client, :frame, ["EVENT", ^subscription_id, event]} ->
authenticated_query(
client,
relay_auth_url,
private_key,
subscription_id,
filters,
[event | events],
authenticated?,
auth_event_id
)
{:node_sync_e2e_relay_client, ^client, :frame, ["EOSE", ^subscription_id]} ->
:ok = RelayClient.send_json(client, ["CLOSE", subscription_id])
{:ok, Enum.reverse(events)}
{:node_sync_e2e_relay_client, ^client, :frame, ["CLOSED", ^subscription_id, message]} ->
cond do
authenticated? and not auth_required_message?(message) ->
{:error, {:subscription_closed, message}}
auth_required_message?(message) and not is_nil(auth_event_id) ->
authenticated_query(
client,
relay_auth_url,
private_key,
subscription_id,
filters,
events,
authenticated?,
auth_event_id
)
true ->
{:error, {:subscription_closed, message}}
end
{:node_sync_e2e_relay_client, ^client, :frame, {:decode_error, reason, payload}} ->
{:error, {:decode_error, reason, payload}}
{:node_sync_e2e_relay_client, ^client, :disconnected, status} ->
{:error, {:disconnected, status.reason}}
after
@frame_timeout_ms -> {:error, :query_timeout}
end
end
defp publish_event(client, relay_auth_url, private_key, event) do
:ok = RelayClient.send_json(client, ["EVENT", event])
do_publish_event(
client,
relay_auth_url,
private_key,
event,
Map.fetch!(event, "id"),
false,
nil,
false
)
end
defp do_publish_event(
client,
relay_auth_url,
private_key,
event,
published_event_id,
authenticated?,
auth_event_id,
replayed_after_auth?
) do
receive do
{:node_sync_e2e_relay_client, ^client, :frame, ["AUTH", challenge]} ->
auth_event =
auth_event(relay_auth_url, challenge)
|> sign_event!(private_key)
:ok = RelayClient.send_json(client, ["AUTH", auth_event])
do_publish_event(
client,
relay_auth_url,
private_key,
event,
published_event_id,
authenticated?,
auth_event["id"],
replayed_after_auth?
)
{:node_sync_e2e_relay_client, ^client, :frame, ["OK", event_id, true, _message]}
when event_id == auth_event_id ->
:ok = RelayClient.send_json(client, ["EVENT", event])
do_publish_event(
client,
relay_auth_url,
private_key,
event,
published_event_id,
true,
nil,
true
)
{:node_sync_e2e_relay_client, ^client, :frame, ["OK", event_id, false, message]}
when event_id == auth_event_id ->
{:error, {:auth_failed, message}}
{:node_sync_e2e_relay_client, ^client, :frame, ["OK", event_id, true, _message]}
when event_id == published_event_id ->
:ok
{:node_sync_e2e_relay_client, ^client, :frame, ["OK", event_id, false, message]}
when event_id == published_event_id ->
cond do
authenticated? and replayed_after_auth? and not auth_required_message?(message) ->
{:error, {:event_rejected, message}}
auth_required_message?(message) ->
do_publish_event(
client,
relay_auth_url,
private_key,
event,
published_event_id,
authenticated?,
auth_event_id,
replayed_after_auth?
)
true ->
{:error, {:event_rejected, message}}
end
{:node_sync_e2e_relay_client, ^client, :frame, {:decode_error, reason, payload}} ->
{:error, {:decode_error, reason, payload}}
{:node_sync_e2e_relay_client, ^client, :disconnected, status} ->
{:error, {:disconnected, status.reason}}
after
@frame_timeout_ms -> {:error, :publish_timeout}
end
end
defp await_client_connect(client) do
receive do
{:node_sync_e2e_relay_client, ^client, :connected} ->
:ok
{:node_sync_e2e_relay_client, ^client, :disconnected, status} ->
{:error, {:disconnected, status.reason}}
after
@frame_timeout_ms -> {:error, :connect_timeout}
end
end
defp management_call(config, node, method, params) do
url = node.http_url <> "/management"
auth_header =
nip98_event("POST", url)
|> sign_event!(config.admin_private_key)
|> then(&("Nostr " <> Base.encode64(JSON.encode!(&1))))
case Req.post(
url: url,
headers: [{"authorization", auth_header}],
json: %{"method" => method, "params" => params},
decode_body: false,
connect_options: [timeout: 1_000],
receive_timeout: 5_000
) do
{:ok, %{status: 200, body: body}} ->
decode_management_response(body)
{:ok, %{status: status, body: body}} ->
{:error, {:management_http_error, status, body}}
{:error, reason} ->
{:error, reason}
end
end
defp decode_management_response(body) when is_binary(body) do
with {:ok, %{"ok" => true, "result" => result}} <- JSON.decode(body) do
{:ok, result}
else
{:ok, %{"ok" => false, "error" => error}} -> {:error, {:management_error, error}}
{:ok, other} -> {:error, {:unexpected_management_response, other}}
{:error, reason} -> {:error, {:invalid_management_json, reason}}
end
end
defp sign_event!(event, private_key_hex) do
{:ok, pubkey} = derive_pubkey(private_key_hex)
seckey = Base.decode16!(String.downcase(private_key_hex), case: :lower)
unsigned_event =
event
|> Map.put("pubkey", pubkey)
|> Map.put("sig", String.duplicate("0", 128))
id = Auth.compute_event_id(unsigned_event)
signature =
id
|> Base.decode16!(case: :lower)
|> Secp256k1.schnorr_sign(seckey)
|> Base.encode16(case: :lower)
unsigned_event
|> Map.put("id", id)
|> Map.put("sig", signature)
end
defp derive_pubkey(private_key_hex) do
normalized = String.downcase(private_key_hex)
case Base.decode16(normalized, case: :lower) do
{:ok, <<_::256>> = seckey} ->
pubkey =
seckey
|> Secp256k1.pubkey(:xonly)
|> Base.encode16(case: :lower)
{:ok, pubkey}
_other ->
{:error, {:invalid_private_key, private_key_hex}}
end
rescue
_error -> {:error, {:invalid_private_key, private_key_hex}}
end
defp nip98_event(method, url) do
%{
"created_at" => System.system_time(:second),
"kind" => 27_235,
"tags" => [["method", method], ["u", url]],
"content" => ""
}
end
defp auth_event(relay_auth_url, challenge) do
%{
"created_at" => System.system_time(:second),
"kind" => 22_242,
"tags" => [["challenge", challenge], ["relay", relay_auth_url]],
"content" => ""
}
end
defp wait_until(label, timeout_ms, interval_ms, fun) do
started_at = System.monotonic_time(:millisecond)
do_wait_until(label, timeout_ms, interval_ms, started_at, fun)
end
defp do_wait_until(label, timeout_ms, interval_ms, started_at, fun) do
case fun.() do
{:ok, value} ->
{:ok, value}
{:retry, reason} ->
if System.monotonic_time(:millisecond) - started_at >= timeout_ms do
{:error, {:timeout, label, reason}}
else
Process.sleep(interval_ms)
do_wait_until(label, timeout_ms, interval_ms, started_at, fun)
end
end
end
defp load_state(path) do
case File.read(path) do
{:ok, body} ->
case JSON.decode(body) do
{:ok, state} when is_map(state) -> state
{:ok, _other} -> %{}
{:error, _reason} -> %{}
end
{:error, :enoent} ->
%{}
{:error, reason} ->
raise "failed to read state file #{path}: #{inspect(reason)}"
end
end
defp save_state(path, attrs) when is_binary(path) and is_map(attrs) do
existing = load_state(path)
merged = Map.merge(existing, attrs)
with :ok <- File.mkdir_p(Path.dirname(path)),
:ok <- File.write(path, JSON.encode!(merged)) do
:ok
end
end
defp ensure_run_matches(config, %{"run_id" => run_id}) when run_id == config.run_id, do: :ok
defp ensure_run_matches(config, %{"run_id" => run_id}),
do: {:error, {:run_id_mismatch, run_id, config.run_id}}
defp ensure_run_matches(_config, %{}), do: :ok
defp fetch_state_value(state, key) do
case Map.fetch(state, key) do
{:ok, value} -> {:ok, value}
:error -> {:error, {:missing_state_value, key}}
end
end
defp ensure_minimum_counter(stats, key, minimum) do
case Map.get(stats, key) do
value when is_integer(value) and value >= minimum -> :ok
_other -> {:error, {:unexpected_sync_stats, stats}}
end
end
defp auth_required_message?(message) when is_binary(message) do
String.contains?(String.downcase(message), "auth")
end
defp auth_required_message?(_message), do: false
defp default_run_id do
"run-#{System.system_time(:millisecond)}-#{System.unique_integer([:positive, :monotonic])}"
end
defp format_reason({:timeout, label, reason}),
do: "timeout waiting for #{label}: #{inspect(reason)}"
defp format_reason({:invalid_arguments, invalid}),
do: "invalid arguments: #{inspect(invalid)}"
defp format_reason({:missing_env, env_var}),
do: "missing environment variable #{env_var}"
defp format_reason({:unknown_command, command}),
do: "unknown command #{command}"
defp format_reason({:run_id_mismatch, stored_run_id, requested_run_id}),
do: "state file run id #{stored_run_id} does not match requested run id #{requested_run_id}"
defp format_reason({:missing_state_value, key}),
do: "state file is missing #{key}"
defp format_reason(:missing_command),
do:
"usage: elixir scripts/node_sync_e2e.exs <bootstrap|publish-resume|verify-resume> --state-file <path>"
defp format_reason(:missing_state_file),
do: "--state-file is required"
defp format_reason(reason), do: inspect(reason)
end
NodeSyncE2E.Runner.main(System.argv())

View File

@@ -0,0 +1,111 @@
#!/usr/bin/env bash
set -euo pipefail
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
cd "$ROOT_DIR"
RUN_ID="${PARRHESIA_NODE_SYNC_E2E_RUN_ID:-docker-$(date +%s)}"
RESOURCE="${PARRHESIA_NODE_SYNC_E2E_RESOURCE:-tribes.accounts.user}"
RUNNER_MIX_ENV="${PARRHESIA_NODE_SYNC_E2E_RUNNER_MIX_ENV:-test}"
TMP_DIR="${PARRHESIA_NODE_SYNC_E2E_TMP_DIR:-$(mktemp -d "${TMPDIR:-/tmp}/parrhesia-node-sync-docker-e2e.XXXXXX")}"
STATE_FILE="$TMP_DIR/state.json"
COMPOSE_FILE="$ROOT_DIR/compose.node-sync-e2e.yaml"
COMPOSE_PROJECT_SUFFIX="$(basename "$TMP_DIR" | tr '[:upper:]' '[:lower:]' | tr -c 'a-z0-9' '_')"
COMPOSE_PROJECT_NAME="${COMPOSE_PROJECT_NAME:-parrhesia-node-sync-e2e-${COMPOSE_PROJECT_SUFFIX}}"
export COMPOSE_PROJECT_NAME
NODE_A_HOST_PORT="${PARRHESIA_NODE_A_HOST_PORT:-45131}"
NODE_B_HOST_PORT="${PARRHESIA_NODE_B_HOST_PORT:-45132}"
NODE_A_HTTP_URL="http://127.0.0.1:${NODE_A_HOST_PORT}"
NODE_B_HTTP_URL="http://127.0.0.1:${NODE_B_HOST_PORT}"
NODE_A_WS_URL="ws://127.0.0.1:${NODE_A_HOST_PORT}/relay"
NODE_B_WS_URL="ws://127.0.0.1:${NODE_B_HOST_PORT}/relay"
NODE_A_INTERNAL_RELAY_URL="${PARRHESIA_NODE_A_INTERNAL_RELAY_URL:-ws://parrhesia-a:4413/relay}"
NODE_B_INTERNAL_RELAY_URL="${PARRHESIA_NODE_B_INTERNAL_RELAY_URL:-ws://parrhesia-b:4413/relay}"
printf -v PROTECTED_FILTERS_JSON '[{"kinds":[5000],"#r":["%s"]}]' "$RESOURCE"
cleanup() {
docker compose -f "$COMPOSE_FILE" down -v >/dev/null 2>&1 || true
if [[ "${PARRHESIA_NODE_SYNC_E2E_KEEP_TMP:-0}" != "1" ]]; then
rm -rf "$TMP_DIR"
fi
}
trap cleanup EXIT INT TERM
load_docker_image() {
if [[ -n "${PARRHESIA_IMAGE:-}" ]]; then
return
fi
if [[ "$(uname -s)" != "Linux" ]]; then
echo "PARRHESIA_IMAGE must be set on non-Linux hosts; .#dockerImage is Linux-only." >&2
exit 1
fi
local image_path
image_path="$(nix build .#dockerImage --print-out-paths --no-link)"
docker load <"$image_path" >/dev/null
export PARRHESIA_IMAGE="parrhesia:latest"
}
wait_for_health() {
local url="$1"
local label="$2"
for _ in {1..150}; do
if curl -fsS "$url/health" >/dev/null 2>&1; then
return
fi
sleep 0.2
done
echo "${label} did not become healthy at ${url}" >&2
exit 1
}
run_runner() {
ERL_LIBS="_build/${RUNNER_MIX_ENV}/lib" \
elixir scripts/node_sync_e2e.exs "$@" --state-file "$STATE_FILE"
}
load_docker_image
MIX_ENV="$RUNNER_MIX_ENV" mix compile >/dev/null
export PARRHESIA_NODE_A_HOST_PORT
export PARRHESIA_NODE_B_HOST_PORT
export PARRHESIA_NODE_A_RELAY_URL="$NODE_A_INTERNAL_RELAY_URL"
export PARRHESIA_NODE_B_RELAY_URL="$NODE_B_INTERNAL_RELAY_URL"
export PARRHESIA_ACL_PROTECTED_FILTERS="$PROTECTED_FILTERS_JSON"
docker compose -f "$COMPOSE_FILE" up -d db-a db-b
docker compose -f "$COMPOSE_FILE" run -T --rm migrate-a
docker compose -f "$COMPOSE_FILE" run -T --rm migrate-b
docker compose -f "$COMPOSE_FILE" up -d parrhesia-a parrhesia-b
wait_for_health "$NODE_A_HTTP_URL" "Node A"
wait_for_health "$NODE_B_HTTP_URL" "Node B"
export PARRHESIA_NODE_SYNC_E2E_RUN_ID="$RUN_ID"
export PARRHESIA_NODE_SYNC_E2E_RESOURCE="$RESOURCE"
export PARRHESIA_NODE_A_HTTP_URL="$NODE_A_HTTP_URL"
export PARRHESIA_NODE_B_HTTP_URL="$NODE_B_HTTP_URL"
export PARRHESIA_NODE_A_WS_URL="$NODE_A_WS_URL"
export PARRHESIA_NODE_B_WS_URL="$NODE_B_WS_URL"
export PARRHESIA_NODE_A_RELAY_AUTH_URL="$NODE_A_WS_URL"
export PARRHESIA_NODE_B_RELAY_AUTH_URL="$NODE_B_WS_URL"
export PARRHESIA_NODE_A_SYNC_URL="$NODE_A_INTERNAL_RELAY_URL"
export PARRHESIA_NODE_B_SYNC_URL="$NODE_B_INTERNAL_RELAY_URL"
run_runner bootstrap
docker compose -f "$COMPOSE_FILE" stop parrhesia-b
run_runner publish-resume
docker compose -f "$COMPOSE_FILE" up -d parrhesia-b
wait_for_health "$NODE_B_HTTP_URL" "Node B"
run_runner verify-resume
printf 'node-sync-e2e docker run completed\nstate: %s\n' "$STATE_FILE"

227
scripts/run_node_sync_e2e.sh Executable file
View File

@@ -0,0 +1,227 @@
#!/usr/bin/env bash
set -euo pipefail
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
cd "$ROOT_DIR"
RUN_ID="${PARRHESIA_NODE_SYNC_E2E_RUN_ID:-local-$(date +%s)}"
RESOURCE="${PARRHESIA_NODE_SYNC_E2E_RESOURCE:-tribes.accounts.user}"
RUNNER_MIX_ENV="${PARRHESIA_NODE_SYNC_E2E_RUNNER_MIX_ENV:-test}"
TMP_DIR="${PARRHESIA_NODE_SYNC_E2E_TMP_DIR:-$(mktemp -d "${TMPDIR:-/tmp}/parrhesia-node-sync-e2e.XXXXXX")}"
STATE_FILE="$TMP_DIR/state.json"
LOG_DIR="$TMP_DIR/logs"
mkdir -p "$LOG_DIR"
SUFFIX="$(basename "$TMP_DIR" | tr -c 'a-zA-Z0-9' '_')"
DB_NAME_A="${PARRHESIA_NODE_SYNC_E2E_DB_A:-parrhesia_node_sync_a_${SUFFIX}}"
DB_NAME_B="${PARRHESIA_NODE_SYNC_E2E_DB_B:-parrhesia_node_sync_b_${SUFFIX}}"
port_in_use() {
local port="$1"
if command -v ss >/dev/null 2>&1; then
ss -ltn "( sport = :${port} )" | tail -n +2 | grep -q .
return
fi
if command -v lsof >/dev/null 2>&1; then
lsof -nP -iTCP:"${port}" -sTCP:LISTEN >/dev/null 2>&1
return
fi
echo "Neither ss nor lsof is available for checking port usage." >&2
exit 1
}
pick_port() {
local port
while true; do
port="$(( (RANDOM % 10000) + 40000 ))"
if ! port_in_use "$port"; then
printf '%s\n' "$port"
return
fi
done
}
NODE_A_PORT="${PARRHESIA_NODE_A_PORT:-$(pick_port)}"
NODE_B_PORT="${PARRHESIA_NODE_B_PORT:-$(pick_port)}"
if [[ "$NODE_A_PORT" == "$NODE_B_PORT" ]]; then
echo "Node A and Node B ports must differ." >&2
exit 1
fi
database_url_for() {
local database_name="$1"
local pg_user="${PGUSER:-${USER:-agent}}"
local pg_host="${PGHOST:-localhost}"
local pg_port="${PGPORT:-5432}"
if [[ "$pg_host" == /* ]]; then
if [[ -n "${PGPASSWORD:-}" ]]; then
printf 'ecto://%s:%s@localhost/%s?socket_dir=%s&port=%s\n' \
"$pg_user" "$PGPASSWORD" "$database_name" "$pg_host" "$pg_port"
else
printf 'ecto://%s@localhost/%s?socket_dir=%s&port=%s\n' \
"$pg_user" "$database_name" "$pg_host" "$pg_port"
fi
else
if [[ -n "${PGPASSWORD:-}" ]]; then
printf 'ecto://%s:%s@%s:%s/%s\n' \
"$pg_user" "$PGPASSWORD" "$pg_host" "$pg_port" "$database_name"
else
printf 'ecto://%s@%s:%s/%s\n' \
"$pg_user" "$pg_host" "$pg_port" "$database_name"
fi
fi
}
DATABASE_URL_A="$(database_url_for "$DB_NAME_A")"
DATABASE_URL_B="$(database_url_for "$DB_NAME_B")"
printf -v PROTECTED_FILTERS_JSON '[{"kinds":[5000],"#r":["%s"]}]' "$RESOURCE"
cleanup() {
if [[ -n "${NODE_A_PID:-}" ]] && kill -0 "$NODE_A_PID" 2>/dev/null; then
kill "$NODE_A_PID" 2>/dev/null || true
wait "$NODE_A_PID" 2>/dev/null || true
fi
if [[ -n "${NODE_B_PID:-}" ]] && kill -0 "$NODE_B_PID" 2>/dev/null; then
kill "$NODE_B_PID" 2>/dev/null || true
wait "$NODE_B_PID" 2>/dev/null || true
fi
if [[ "${PARRHESIA_NODE_SYNC_E2E_DROP_DB_ON_EXIT:-1}" == "1" ]]; then
DATABASE_URL="$DATABASE_URL_A" MIX_ENV=prod mix ecto.drop --quiet --force || true
DATABASE_URL="$DATABASE_URL_B" MIX_ENV=prod mix ecto.drop --quiet --force || true
fi
if [[ "${PARRHESIA_NODE_SYNC_E2E_KEEP_TMP:-0}" != "1" ]]; then
rm -rf "$TMP_DIR"
fi
}
trap cleanup EXIT INT TERM
wait_for_health() {
local port="$1"
local label="$2"
for _ in {1..150}; do
if curl -fsS "http://127.0.0.1:${port}/health" >/dev/null 2>&1; then
return
fi
sleep 0.1
done
echo "${label} did not become healthy on port ${port}" >&2
exit 1
}
setup_database() {
local database_url="$1"
DATABASE_URL="$database_url" MIX_ENV=prod mix ecto.drop --quiet --force || true
DATABASE_URL="$database_url" MIX_ENV=prod mix ecto.create --quiet
DATABASE_URL="$database_url" MIX_ENV=prod mix ecto.migrate --quiet
}
start_node() {
local node_name="$1"
local port="$2"
local database_url="$3"
local relay_url="$4"
local identity_path="$5"
local sync_path="$6"
local log_path="$7"
DATABASE_URL="$database_url" \
PORT="$port" \
PARRHESIA_RELAY_URL="$relay_url" \
PARRHESIA_ACL_PROTECTED_FILTERS="$PROTECTED_FILTERS_JSON" \
PARRHESIA_IDENTITY_PATH="$identity_path" \
PARRHESIA_SYNC_PATH="$sync_path" \
MIX_ENV=prod \
mix run --no-halt >"$log_path" 2>&1 &
if [[ "$node_name" == "a" ]]; then
NODE_A_PID=$!
else
NODE_B_PID=$!
fi
}
run_runner() {
ERL_LIBS="_build/${RUNNER_MIX_ENV}/lib" \
elixir scripts/node_sync_e2e.exs "$@" --state-file "$STATE_FILE"
}
export DATABASE_URL="$DATABASE_URL_A"
MIX_ENV=prod mix compile
MIX_ENV="$RUNNER_MIX_ENV" mix compile >/dev/null
setup_database "$DATABASE_URL_A"
setup_database "$DATABASE_URL_B"
NODE_A_HTTP_URL="http://127.0.0.1:${NODE_A_PORT}"
NODE_B_HTTP_URL="http://127.0.0.1:${NODE_B_PORT}"
NODE_A_WS_URL="ws://127.0.0.1:${NODE_A_PORT}/relay"
NODE_B_WS_URL="ws://127.0.0.1:${NODE_B_PORT}/relay"
start_node \
a \
"$NODE_A_PORT" \
"$DATABASE_URL_A" \
"$NODE_A_WS_URL" \
"$TMP_DIR/node-a-identity.json" \
"$TMP_DIR/node-a-sync.json" \
"$LOG_DIR/node-a.log"
start_node \
b \
"$NODE_B_PORT" \
"$DATABASE_URL_B" \
"$NODE_B_WS_URL" \
"$TMP_DIR/node-b-identity.json" \
"$TMP_DIR/node-b-sync.json" \
"$LOG_DIR/node-b.log"
wait_for_health "$NODE_A_PORT" "Node A"
wait_for_health "$NODE_B_PORT" "Node B"
export PARRHESIA_NODE_SYNC_E2E_RUN_ID="$RUN_ID"
export PARRHESIA_NODE_SYNC_E2E_RESOURCE="$RESOURCE"
export PARRHESIA_NODE_A_HTTP_URL="$NODE_A_HTTP_URL"
export PARRHESIA_NODE_B_HTTP_URL="$NODE_B_HTTP_URL"
export PARRHESIA_NODE_A_WS_URL="$NODE_A_WS_URL"
export PARRHESIA_NODE_B_WS_URL="$NODE_B_WS_URL"
export PARRHESIA_NODE_A_RELAY_AUTH_URL="$NODE_A_WS_URL"
export PARRHESIA_NODE_B_RELAY_AUTH_URL="$NODE_B_WS_URL"
export PARRHESIA_NODE_A_SYNC_URL="$NODE_A_WS_URL"
export PARRHESIA_NODE_B_SYNC_URL="$NODE_B_WS_URL"
run_runner bootstrap
kill "$NODE_B_PID"
wait "$NODE_B_PID" 2>/dev/null || true
unset NODE_B_PID
run_runner publish-resume
start_node \
b \
"$NODE_B_PORT" \
"$DATABASE_URL_B" \
"$NODE_B_WS_URL" \
"$TMP_DIR/node-b-identity.json" \
"$TMP_DIR/node-b-sync.json" \
"$LOG_DIR/node-b.log"
wait_for_health "$NODE_B_PORT" "Node B"
run_runner verify-resume
printf 'node-sync-e2e local run completed\nlogs: %s\n' "$LOG_DIR"

View File

@@ -0,0 +1,85 @@
defmodule Parrhesia.API.ACLTest do
use ExUnit.Case, async: false
alias Ecto.Adapters.SQL.Sandbox
alias Parrhesia.API.ACL
alias Parrhesia.API.RequestContext
alias Parrhesia.Repo
setup do
:ok = Sandbox.checkout(Repo)
previous_acl = Application.get_env(:parrhesia, :acl, [])
Application.put_env(
:parrhesia,
:acl,
protected_filters: [%{"kinds" => [5000], "#r" => ["tribes.accounts.user"]}]
)
on_exit(fn ->
Application.put_env(:parrhesia, :acl, previous_acl)
end)
:ok
end
test "grant/list/revoke round-trips rules" do
rule = %{
principal_type: :pubkey,
principal: String.duplicate("a", 64),
capability: :sync_read,
match: %{"kinds" => [5000], "#r" => ["tribes.accounts.user"]}
}
assert :ok = ACL.grant(rule)
assert {:ok, [stored_rule]} = ACL.list(principal: rule.principal, capability: :sync_read)
assert stored_rule.match == rule.match
assert :ok = ACL.revoke(%{id: stored_rule.id})
assert {:ok, []} = ACL.list(principal: rule.principal)
end
test "check/3 requires auth and matching grant for protected sync reads" do
filter = %{"kinds" => [5000], "#r" => ["tribes.accounts.user"]}
authenticated_pubkey = String.duplicate("b", 64)
assert {:error, :auth_required} =
ACL.check(:sync_read, filter, context: %RequestContext{})
assert {:error, :sync_read_not_allowed} =
ACL.check(:sync_read, filter,
context: %RequestContext{authenticated_pubkeys: MapSet.new([authenticated_pubkey])}
)
assert :ok =
ACL.grant(%{
principal_type: :pubkey,
principal: authenticated_pubkey,
capability: :sync_read,
match: filter
})
assert :ok =
ACL.check(:sync_read, filter,
context: %RequestContext{authenticated_pubkeys: MapSet.new([authenticated_pubkey])}
)
end
test "check/3 rejects broader filters than the granted rule" do
principal = String.duplicate("c", 64)
assert :ok =
ACL.grant(%{
principal_type: :pubkey,
principal: principal,
capability: :sync_read,
match: %{"kinds" => [5000], "#r" => ["tribes.accounts.user"]}
})
assert {:error, :sync_read_not_allowed} =
ACL.check(:sync_read, %{"kinds" => [5000]},
context: %RequestContext{authenticated_pubkeys: MapSet.new([principal])}
)
end
end

View File

@@ -0,0 +1,62 @@
defmodule Parrhesia.API.AuthTest do
use ExUnit.Case, async: true
alias Parrhesia.API.Auth
alias Parrhesia.Protocol.EventValidator
test "validate_event delegates to event validation" do
assert {:error, :invalid_shape} = Auth.validate_event(%{})
end
test "compute_event_id matches the protocol event validator" do
event = %{
"pubkey" => String.duplicate("a", 64),
"created_at" => System.system_time(:second),
"kind" => 1,
"tags" => [],
"content" => "hello",
"sig" => String.duplicate("b", 128)
}
assert Auth.compute_event_id(event) == EventValidator.compute_id(event)
end
test "validate_nip98 returns shared auth context" do
url = "http://example.com/management"
event = nip98_event("POST", url)
header = "Nostr " <> Base.encode64(JSON.encode!(event))
assert {:ok, auth_context} = Auth.validate_nip98(header, "POST", url)
assert auth_context.pubkey == event["pubkey"]
assert auth_context.auth_event["id"] == event["id"]
assert auth_context.request_context.caller == :http
assert MapSet.member?(auth_context.request_context.authenticated_pubkeys, event["pubkey"])
assert auth_context.metadata == %{method: "POST", url: url}
end
test "validate_nip98 accepts custom freshness window" do
url = "http://example.com/management"
event = nip98_event("POST", url, %{"created_at" => System.system_time(:second) - 120})
header = "Nostr " <> Base.encode64(JSON.encode!(event))
assert {:error, :stale_event} = Auth.validate_nip98(header, "POST", url)
assert {:ok, _context} = Auth.validate_nip98(header, "POST", url, max_age_seconds: 180)
end
defp nip98_event(method, url, overrides \\ %{}) do
now = System.system_time(:second)
base = %{
"pubkey" => String.duplicate("a", 64),
"created_at" => now,
"kind" => 27_235,
"tags" => [["method", method], ["u", url]],
"content" => "",
"sig" => String.duplicate("b", 128)
}
base
|> Map.merge(overrides)
|> Map.put("id", EventValidator.compute_id(Map.merge(base, overrides)))
end
end

View File

@@ -0,0 +1,81 @@
defmodule Parrhesia.API.EventsTest do
use ExUnit.Case, async: false
alias Ecto.Adapters.SQL.Sandbox
alias Parrhesia.API.Events
alias Parrhesia.API.RequestContext
alias Parrhesia.Protocol.EventValidator
alias Parrhesia.Repo
setup do
:ok = Sandbox.checkout(Repo)
:ok
end
test "publish stores valid events through the shared API" do
event = valid_event()
assert {:ok, result} = Events.publish(event, context: %RequestContext{})
assert result.accepted
assert result.event_id == event["id"]
assert result.message == "ok: event stored"
assert result.reason == nil
assert {:ok, stored_event} = Parrhesia.Storage.events().get_event(%{}, event["id"])
assert stored_event["id"] == event["id"]
end
test "publish returns duplicate results without raising transport errors" do
event = valid_event()
assert {:ok, first_result} = Events.publish(event, context: %RequestContext{})
assert first_result.accepted
assert {:ok, second_result} = Events.publish(event, context: %RequestContext{})
refute second_result.accepted
assert second_result.reason == :duplicate_event
assert second_result.message == "duplicate: event already stored"
end
test "query and count preserve read semantics through the shared API" do
now = System.system_time(:second)
first = valid_event(%{"content" => "first", "created_at" => now})
second = valid_event(%{"content" => "second", "created_at" => now + 1})
assert {:ok, %{accepted: true}} = Events.publish(first, context: %RequestContext{})
assert {:ok, %{accepted: true}} = Events.publish(second, context: %RequestContext{})
assert {:ok, events} =
Events.query([%{"kinds" => [1]}], context: %RequestContext{})
assert Enum.map(events, & &1["id"]) == [second["id"], first["id"]]
assert {:ok, 2} =
Events.count([%{"kinds" => [1]}], context: %RequestContext{})
assert {:ok, %{"count" => 2, "approximate" => false}} =
Events.count([%{"kinds" => [1]}],
context: %RequestContext{},
options: %{}
)
end
defp valid_event(overrides \\ %{}) do
base_event = %{
"pubkey" => String.duplicate("1", 64),
"created_at" => System.system_time(:second),
"kind" => 1,
"tags" => [],
"content" => "hello",
"sig" => String.duplicate("3", 128)
}
base_event
|> Map.merge(overrides)
|> recalculate_event_id()
end
defp recalculate_event_id(event) do
Map.put(event, "id", EventValidator.compute_id(event))
end
end

View File

@@ -0,0 +1,75 @@
defmodule Parrhesia.API.IdentityTest do
use ExUnit.Case, async: false
alias Parrhesia.API.Auth
alias Parrhesia.API.Identity
test "ensure generates and persists a server identity" do
path = unique_identity_path()
assert {:error, :identity_not_found} = Identity.get(path: path)
assert {:ok, %{pubkey: pubkey, source: :generated}} = Identity.ensure(path: path)
assert File.exists?(path)
assert {:ok, %{pubkey: ^pubkey, source: :persisted}} = Identity.get(path: path)
assert {:ok, %{pubkey: ^pubkey, source: :persisted}} = Identity.ensure(path: path)
end
test "import persists an explicit secret key and sign_event uses it" do
path = unique_identity_path()
secret_key = String.duplicate("1", 64)
expected_pubkey =
secret_key
|> Base.decode16!(case: :lower)
|> Secp256k1.pubkey(:xonly)
|> Base.encode16(case: :lower)
assert {:ok, %{pubkey: ^expected_pubkey, source: :imported}} =
Identity.import(%{secret_key: secret_key}, path: path)
assert {:ok, %{pubkey: ^expected_pubkey, source: :persisted}} = Identity.get(path: path)
event = %{
"created_at" => System.system_time(:second),
"kind" => 22_242,
"tags" => [],
"content" => "identity-auth"
}
assert {:ok, signed_event} = Identity.sign_event(event, path: path)
assert signed_event["pubkey"] == expected_pubkey
assert signed_event["id"] == Auth.compute_event_id(signed_event)
signature = Base.decode16!(signed_event["sig"], case: :lower)
event_id = Base.decode16!(signed_event["id"], case: :lower)
pubkey = Base.decode16!(signed_event["pubkey"], case: :lower)
assert Secp256k1.schnorr_valid?(signature, event_id, pubkey)
end
test "rotate rejects configured identities and sign_event validates shape" do
path = unique_identity_path()
secret_key = String.duplicate("2", 64)
assert {:error, :configured_identity_cannot_rotate} =
Identity.rotate(path: path, configured_private_key: secret_key)
assert {:error, :invalid_event} = Identity.sign_event(%{"kind" => 1}, path: path)
end
defp unique_identity_path do
path =
Path.join(
System.tmp_dir!(),
"parrhesia_identity_#{System.unique_integer([:positive, :monotonic])}.json"
)
on_exit(fn ->
_ = File.rm(path)
end)
path
end
end

View File

@@ -0,0 +1,80 @@
defmodule Parrhesia.API.StreamTest do
use ExUnit.Case, async: false
alias Ecto.Adapters.SQL.Sandbox
alias Parrhesia.API.Events
alias Parrhesia.API.RequestContext
alias Parrhesia.API.Stream
alias Parrhesia.Protocol.EventValidator
alias Parrhesia.Repo
setup do
:ok = Sandbox.checkout(Repo)
:ok
end
test "subscribe streams catch-up events followed by eose" do
event = valid_event()
context = %RequestContext{}
assert {:ok, %{accepted: true}} = Events.publish(event, context: context)
assert {:ok, ref} = Stream.subscribe(self(), "sub-1", [%{"kinds" => [1]}], context: context)
assert_receive {:parrhesia, :event, ^ref, "sub-1", received_event}
assert received_event["id"] == event["id"]
assert_receive {:parrhesia, :eose, ^ref, "sub-1"}
assert :ok = Stream.unsubscribe(ref)
end
test "subscribe receives live fanout events after eose" do
context = %RequestContext{}
event = valid_event()
assert {:ok, ref} =
Stream.subscribe(self(), "sub-live", [%{"kinds" => [1]}], context: context)
assert_receive {:parrhesia, :eose, ^ref, "sub-live"}, 1_000
assert {:ok, %{accepted: true}} = Events.publish(event, context: context)
assert_receive {:parrhesia, :event, ^ref, "sub-live", received_event}, 1_000
assert received_event["id"] == event["id"]
assert :ok = Stream.unsubscribe(ref)
end
test "unsubscribe stops the subscription bridge" do
context = %RequestContext{}
assert {:ok, ref} =
Stream.subscribe(self(), "sub-stop", [%{"kinds" => [1]}], context: context)
assert_receive {:parrhesia, :eose, ^ref, "sub-stop"}
[{stream_pid, _value}] = Registry.lookup(Parrhesia.API.Stream.Registry, ref)
_ = :sys.get_state(stream_pid)
monitor_ref = Process.monitor(stream_pid)
assert :ok = Stream.unsubscribe(ref)
assert_receive {:DOWN, ^monitor_ref, :process, ^stream_pid, reason}
assert reason in [:normal, :noproc]
end
defp valid_event(overrides \\ %{}) do
base_event = %{
"pubkey" => String.duplicate("1", 64),
"created_at" => System.system_time(:second),
"kind" => 1,
"tags" => [],
"content" => "hello",
"sig" => String.duplicate("3", 128)
}
base_event
|> Map.merge(overrides)
|> recalculate_event_id()
end
defp recalculate_event_id(event) do
Map.put(event, "id", EventValidator.compute_id(event))
end
end

View File

@@ -0,0 +1,219 @@
defmodule Parrhesia.API.SyncTest do
use ExUnit.Case, async: false
alias Ecto.Adapters.SQL.Sandbox
alias Parrhesia.API.Admin
alias Parrhesia.API.Sync
alias Parrhesia.API.Sync.Manager
alias Parrhesia.Repo
setup do
:ok = Sandbox.checkout(Repo)
:ok
end
test "put_server stores normalized config and persists it across restart" do
{manager, path, pid} = start_sync_manager()
assert {:ok, stored_server} = Sync.put_server(valid_server(), manager: manager)
assert stored_server.id == "tribes-primary"
assert stored_server.mode == :req_stream
assert stored_server.auth.type == :nip42
assert stored_server.tls.mode == :required
assert stored_server.tls.hostname == "relay-a.example"
assert stored_server.runtime.state == :running
assert File.exists?(path)
assert {:ok, fetched_server} = Sync.get_server("tribes-primary", manager: manager)
assert fetched_server == stored_server
assert {:ok, [listed_server]} = Sync.list_servers(manager: manager)
assert listed_server.id == "tribes-primary"
monitor_ref = Process.monitor(pid)
assert :ok = GenServer.stop(pid)
assert_receive {:DOWN, ^monitor_ref, :process, ^pid, :normal}
assert {:ok, persisted_server} = wait_for_server(manager, "tribes-primary")
assert persisted_server.id == "tribes-primary"
assert persisted_server.tls.hostname == "relay-a.example"
assert persisted_server.runtime.state == :running
end
test "start_server stop_server and sync_now update runtime stats" do
{manager, _path, _pid} = start_sync_manager()
disabled_server = valid_server(%{"id" => "tribes-disabled", "enabled?" => false})
assert {:ok, stored_server} = Sync.put_server(disabled_server, manager: manager)
assert stored_server.runtime.state == :stopped
assert :ok = Sync.start_server("tribes-disabled", manager: manager)
assert {:ok, started_server} = Sync.get_server("tribes-disabled", manager: manager)
assert started_server.runtime.state == :running
assert :ok = Sync.sync_now("tribes-disabled", manager: manager)
assert {:ok, stats} = Sync.server_stats("tribes-disabled", manager: manager)
assert stats["server_id"] == "tribes-disabled"
assert stats["state"] == "running"
assert stats["query_runs"] == 1
assert is_binary(stats["last_sync_started_at"])
assert is_binary(stats["last_sync_completed_at"])
assert :ok = Sync.stop_server("tribes-disabled", manager: manager)
assert {:ok, stopped_server} = Sync.get_server("tribes-disabled", manager: manager)
assert stopped_server.runtime.state == :stopped
assert is_binary(stopped_server.runtime.last_disconnected_at)
assert {:ok, sync_stats} = Sync.sync_stats(manager: manager)
assert sync_stats["servers_total"] == 1
assert sync_stats["servers_enabled"] == 0
assert sync_stats["servers_running"] == 0
assert sync_stats["query_runs"] == 1
assert {:ok, sync_health} = Sync.sync_health(manager: manager)
assert sync_health == %{
"status" => "ok",
"servers_total" => 1,
"servers_connected" => 0,
"servers_failing" => []
}
end
test "put_server rejects invalid sync server shapes" do
{manager, _path, _pid} = start_sync_manager()
assert {:error, :invalid_url} =
Sync.put_server(Map.put(valid_server(), "url", "https://relay-a.example"),
manager: manager
)
assert {:error, :empty_filters} =
Sync.put_server(Map.put(valid_server(), "filters", []), manager: manager)
assert {:error, :invalid_tls_pins} =
Sync.put_server(
put_in(valid_server()["tls"]["pins"], []),
manager: manager
)
end
test "admin executes sync methods against an injected sync manager" do
{manager, _path, _pid} = start_sync_manager()
assert {:ok, created_server} =
Admin.execute("sync_put_server", valid_server(%{"id" => "tribes-admin"}),
manager: manager
)
assert created_server.id == "tribes-admin"
assert {:ok, listed_servers} = Admin.execute("sync_list_servers", %{}, manager: manager)
assert Enum.any?(listed_servers, &(&1.id == "tribes-admin"))
assert {:ok, %{"ok" => true}} =
Admin.execute("sync_sync_now", %{"id" => "tribes-admin"}, manager: manager)
assert {:ok, sync_stats} = Admin.stats(manager: manager)
assert sync_stats["sync"]["servers_total"] == 1
assert sync_stats["sync"]["query_runs"] == 1
assert {:ok, execute_stats} = Admin.execute("stats", %{}, manager: manager)
assert execute_stats["sync"]["servers_total"] == 1
assert {:ok, health} = Admin.health(manager: manager)
assert health["status"] == "ok"
assert health["sync"]["servers_total"] == 1
assert {:ok, execute_health} = Admin.execute("health", %{}, manager: manager)
assert execute_health["status"] == "ok"
assert execute_health["sync"]["servers_total"] == 1
end
defp start_sync_manager do
path = unique_sync_path()
manager = {:global, {:sync_manager, System.unique_integer([:positive, :monotonic])}}
pid = start_supervised!({Manager, name: manager, path: path, start_workers?: false})
{manager, path, pid}
end
defp valid_server(overrides \\ %{}) do
Map.merge(
%{
"id" => "tribes-primary",
"url" => "wss://relay-a.example/relay",
"enabled?" => true,
"auth_pubkey" => String.duplicate("a", 64),
"filters" => [
%{
"kinds" => [5000],
"#r" => ["tribes.accounts.user"]
}
],
"tls" => %{
"pins" => [
%{
"type" => "spki_sha256",
"value" => "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA="
}
]
},
"metadata" => %{"cluster" => "primary"}
},
overrides
)
end
defp unique_sync_path do
path =
Path.join(
System.tmp_dir!(),
"parrhesia_sync_#{System.unique_integer([:positive, :monotonic])}.json"
)
on_exit(fn ->
_ = File.rm(path)
end)
path
end
defp wait_for_server(manager, server_id, attempts \\ 10)
defp wait_for_server(_manager, _server_id, 0), do: :error
defp wait_for_server(manager, server_id, attempts) do
result =
try do
Sync.get_server(server_id, manager: manager)
catch
:exit, _reason -> {:error, :noproc}
end
case result do
{:ok, server} ->
{:ok, server}
:error ->
receive do
after
10 -> wait_for_server(manager, server_id, attempts - 1)
end
{:error, :noproc} ->
receive do
after
10 -> wait_for_server(manager, server_id, attempts - 1)
end
{:error, {:noproc, _details}} ->
receive do
after
10 -> wait_for_server(manager, server_id, attempts - 1)
end
end
end
end

View File

@@ -8,9 +8,9 @@ defmodule Parrhesia.ApplicationTest do
assert is_pid(Process.whereis(Parrhesia.Storage.Supervisor)) assert is_pid(Process.whereis(Parrhesia.Storage.Supervisor))
assert is_pid(Process.whereis(Parrhesia.Subscriptions.Supervisor)) assert is_pid(Process.whereis(Parrhesia.Subscriptions.Supervisor))
assert is_pid(Process.whereis(Parrhesia.Auth.Supervisor)) assert is_pid(Process.whereis(Parrhesia.Auth.Supervisor))
assert is_pid(Process.whereis(Parrhesia.Sync.Supervisor))
assert is_pid(Process.whereis(Parrhesia.Policy.Supervisor)) assert is_pid(Process.whereis(Parrhesia.Policy.Supervisor))
assert is_pid(Process.whereis(Parrhesia.Web.Endpoint)) assert is_pid(Process.whereis(Parrhesia.Web.Endpoint))
assert is_pid(Process.whereis(Parrhesia.Web.MetricsEndpoint))
assert is_pid(Process.whereis(Parrhesia.Tasks.Supervisor)) assert is_pid(Process.whereis(Parrhesia.Tasks.Supervisor))
assert Enum.any?(Supervisor.which_children(Parrhesia.Web.Endpoint), fn {_id, pid, _type, assert Enum.any?(Supervisor.which_children(Parrhesia.Web.Endpoint), fn {_id, pid, _type,
@@ -19,6 +19,8 @@ defmodule Parrhesia.ApplicationTest do
end) end)
assert is_pid(Process.whereis(Parrhesia.Auth.Challenges)) assert is_pid(Process.whereis(Parrhesia.Auth.Challenges))
assert is_pid(Process.whereis(Parrhesia.API.Identity.Manager))
assert is_pid(Process.whereis(Parrhesia.API.Sync.Manager))
if negentropy_enabled?() do if negentropy_enabled?() do
assert is_pid(Process.whereis(Parrhesia.Negentropy.Sessions)) assert is_pid(Process.whereis(Parrhesia.Negentropy.Sessions))

View File

@@ -25,7 +25,18 @@ defmodule Parrhesia.Auth.Nip98Test do
Nip98.validate_authorization_header(header, "POST", "http://example.com/other") Nip98.validate_authorization_header(header, "POST", "http://example.com/other")
end end
defp nip98_event(method, url) do test "supports overriding the freshness window" do
url = "http://example.com/management"
event = nip98_event("POST", url, %{"created_at" => System.system_time(:second) - 120})
header = "Nostr " <> Base.encode64(JSON.encode!(event))
assert {:error, :stale_event} = Nip98.validate_authorization_header(header, "POST", url)
assert {:ok, _event} =
Nip98.validate_authorization_header(header, "POST", url, max_age_seconds: 180)
end
defp nip98_event(method, url, overrides \\ %{}) do
now = System.system_time(:second) now = System.system_time(:second)
base = %{ base = %{
@@ -37,6 +48,7 @@ defmodule Parrhesia.Auth.Nip98Test do
"sig" => String.duplicate("b", 128) "sig" => String.duplicate("b", 128)
} }
Map.put(base, "id", EventValidator.compute_id(base)) event = Map.merge(base, overrides)
Map.put(event, "id", EventValidator.compute_id(event))
end end
end end

View File

@@ -0,0 +1,42 @@
defmodule Parrhesia.Negentropy.EngineTest do
use ExUnit.Case, async: true
alias Parrhesia.Negentropy.Engine
alias Parrhesia.Negentropy.Message
test "returns exact id list for small mismatched ranges" do
server_items = [
%{created_at: 10, id: <<1::size(256)>>},
%{created_at: 11, id: <<2::size(256)>>}
]
assert {:ok, response} = Engine.answer(server_items, Engine.initial_message([]))
assert {:ok, [%{mode: :id_list, payload: ids, upper_bound: :infinity}]} =
Message.decode(response)
assert ids == Enum.map(server_items, & &1.id)
end
test "splits large mismatched fingerprint ranges" do
client_items =
Enum.map(1..4, fn idx ->
%{created_at: 100 + idx, id: <<idx::size(256)>>}
end)
server_items =
client_items ++ [%{created_at: 200, id: <<99::size(256)>>}]
initial_message = Engine.initial_message(client_items, id_list_threshold: 1)
assert {:ok, response} = Engine.answer(server_items, initial_message, id_list_threshold: 1)
assert {:ok, ranges} = Message.decode(response)
assert Enum.all?(ranges, &(&1.mode in [:fingerprint, :id_list]))
assert length(ranges) >= 2
end
test "downgrades unsupported versions" do
assert {:ok, <<0x61>>} = Engine.answer([], <<0x62>>)
end
end

View File

@@ -0,0 +1,28 @@
defmodule Parrhesia.Negentropy.MessageTest do
use ExUnit.Case, async: true
alias Parrhesia.Negentropy.Message
test "encodes and decodes mixed range messages" do
first_id = <<1::size(256)>>
second_id = <<2::size(256)>>
boundary =
Message.split_bound(%{created_at: 10, id: first_id}, %{created_at: 10, id: second_id})
ranges = [
%{upper_bound: boundary, mode: :fingerprint, payload: <<0::size(128)>>},
%{upper_bound: {11, Message.zero_id()}, mode: :id_list, payload: [second_id]},
%{upper_bound: :infinity, mode: :skip, payload: nil}
]
assert {:ok, decoded_ranges} = ranges |> Message.encode() |> Message.decode()
assert decoded_ranges ==
Enum.reject(ranges, &(&1.mode == :skip and &1.upper_bound == :infinity))
end
test "rejects malformed bounds and payloads" do
assert {:error, :invalid_message} = Message.decode(<<0x61, 0x00, 0x01, 0x02>>)
end
end

View File

@@ -1,19 +1,64 @@
defmodule Parrhesia.Negentropy.SessionsTest do defmodule Parrhesia.Negentropy.SessionsTest do
use ExUnit.Case, async: true use ExUnit.Case, async: false
alias Ecto.Adapters.SQL.Sandbox
alias Parrhesia.Negentropy.Engine
alias Parrhesia.Negentropy.Message
alias Parrhesia.Negentropy.Sessions alias Parrhesia.Negentropy.Sessions
alias Parrhesia.Protocol.EventValidator
alias Parrhesia.Repo
alias Parrhesia.Storage.Adapters.Postgres.Events
test "opens, advances and closes sessions" do setup_all do
if is_nil(Process.whereis(Repo)) do
start_supervised!(Repo)
end
Sandbox.mode(Repo, :manual)
:ok
end
setup do
:ok = Sandbox.checkout(Repo)
:ok
end
test "opens, responds, advances and closes sessions" do
server = start_supervised!({Sessions, name: nil}) server = start_supervised!({Sessions, name: nil})
Sandbox.allow(Repo, self(), server)
assert {:ok, %{"status" => "open", "cursor" => 0}} = first =
Sessions.open(server, self(), "sub-neg", %{"cursor" => 0}) persist_event(%{
"created_at" => 1_700_100_000,
"content" => "neg-1"
})
assert {:ok, %{"status" => "ack", "cursor" => 1}} = second =
Sessions.message(server, self(), "sub-neg", %{"delta" => "abc"}) persist_event(%{
"created_at" => 1_700_100_001,
"content" => "neg-2"
})
initial_message = Engine.initial_message([])
assert {:ok, response_message} =
Sessions.open(server, self(), "sub-neg", %{"kinds" => [1]}, initial_message)
assert {:ok, [%{mode: :id_list, payload: ids, upper_bound: :infinity}]} =
Message.decode(response_message)
assert ids == [
Base.decode16!(first["id"], case: :mixed),
Base.decode16!(second["id"], case: :mixed)
]
{:ok, refs} = Events.query_event_refs(%{}, [%{"kinds" => [1]}], [])
matching_message = Engine.initial_message(refs)
assert {:ok, <<0x61>>} = Sessions.message(server, self(), "sub-neg", matching_message)
assert :ok = Sessions.close(server, self(), "sub-neg") assert :ok = Sessions.close(server, self(), "sub-neg")
assert {:error, :unknown_session} = Sessions.message(server, self(), "sub-neg", %{}) assert {:error, :unknown_session} = Sessions.message(server, self(), "sub-neg", <<0x61>>)
end end
test "rejects oversized NEG payloads" do test "rejects oversized NEG payloads" do
@@ -28,8 +73,16 @@ defmodule Parrhesia.Negentropy.SessionsTest do
sweep_interval_seconds: 60} sweep_interval_seconds: 60}
) )
Sandbox.allow(Repo, self(), server)
assert {:error, :payload_too_large} = assert {:error, :payload_too_large} =
Sessions.open(server, self(), "sub-neg", %{"delta" => String.duplicate("a", 256)}) Sessions.open(
server,
self(),
"sub-neg",
%{"kinds" => [1]},
String.duplicate(<<0x61>>, 128)
)
end end
test "enforces per-owner session limits" do test "enforces per-owner session limits" do
@@ -44,10 +97,60 @@ defmodule Parrhesia.Negentropy.SessionsTest do
sweep_interval_seconds: 60} sweep_interval_seconds: 60}
) )
assert {:ok, %{"status" => "open", "cursor" => 0}} = Sandbox.allow(Repo, self(), server)
Sessions.open(server, self(), "sub-1", %{})
assert {:ok, _response} =
Sessions.open(server, self(), "sub-1", %{"kinds" => [1]}, Engine.initial_message([]))
assert {:error, :owner_session_limit_reached} = assert {:error, :owner_session_limit_reached} =
Sessions.open(server, self(), "sub-2", %{}) Sessions.open(server, self(), "sub-2", %{"kinds" => [1]}, Engine.initial_message([]))
end
test "blocks queries larger than the configured session snapshot limit" do
server =
start_supervised!(
{Sessions,
name: nil,
max_payload_bytes: 1024,
max_sessions_per_owner: 8,
max_total_sessions: 16,
max_idle_seconds: 60,
sweep_interval_seconds: 60,
max_items_per_session: 1}
)
Sandbox.allow(Repo, self(), server)
persist_event(%{"created_at" => 1_700_200_000, "content" => "first"})
persist_event(%{"created_at" => 1_700_200_001, "content" => "second"})
assert {:error, :query_too_big} =
Sessions.open(
server,
self(),
"sub-neg",
%{"kinds" => [1]},
Engine.initial_message([])
)
end
defp persist_event(overrides) do
event = build_event(overrides)
assert {:ok, persisted_event} = Events.put_event(%{}, event)
persisted_event
end
defp build_event(overrides) do
base_event = %{
"pubkey" => String.duplicate("1", 64),
"created_at" => System.system_time(:second),
"kind" => 1,
"tags" => [],
"content" => "negentropy-test",
"sig" => String.duplicate("2", 128)
}
event = Map.merge(base_event, overrides)
Map.put(event, "id", EventValidator.compute_id(event))
end end
end end

View File

@@ -41,11 +41,13 @@ defmodule Parrhesia.ProtocolTest do
assert {:ok, {:auth, ^auth_event}} = assert {:ok, {:auth, ^auth_event}} =
Protocol.decode_client(JSON.encode!(["AUTH", auth_event])) Protocol.decode_client(JSON.encode!(["AUTH", auth_event]))
assert {:ok, {:neg_open, "sub-neg", %{"cursor" => 0}}} = assert {:ok, {:neg_open, "sub-neg", %{"kinds" => [1]}, <<0x61>>}} =
Protocol.decode_client(JSON.encode!(["NEG-OPEN", "sub-neg", %{"cursor" => 0}])) Protocol.decode_client(
JSON.encode!(["NEG-OPEN", "sub-neg", %{"kinds" => [1]}, "61"])
)
assert {:ok, {:neg_msg, "sub-neg", %{"delta" => "abc"}}} = assert {:ok, {:neg_msg, "sub-neg", <<0x61, 0x00>>}} =
Protocol.decode_client(JSON.encode!(["NEG-MSG", "sub-neg", %{"delta" => "abc"}])) Protocol.decode_client(JSON.encode!(["NEG-MSG", "sub-neg", "6100"]))
assert {:ok, {:neg_close, "sub-neg"}} = assert {:ok, {:neg_close, "sub-neg"}} =
Protocol.decode_client(JSON.encode!(["NEG-CLOSE", "sub-neg"])) Protocol.decode_client(JSON.encode!(["NEG-CLOSE", "sub-neg"]))
@@ -90,6 +92,12 @@ defmodule Parrhesia.ProtocolTest do
count_frame = Protocol.encode_relay({:count, "sub-1", %{"count" => 1}}) count_frame = Protocol.encode_relay({:count, "sub-1", %{"count" => 1}})
assert JSON.decode!(count_frame) == ["COUNT", "sub-1", %{"count" => 1}] assert JSON.decode!(count_frame) == ["COUNT", "sub-1", %{"count" => 1}]
neg_message_frame = Protocol.encode_relay({:neg_msg, "sub-neg", "61"})
assert JSON.decode!(neg_message_frame) == ["NEG-MSG", "sub-neg", "61"]
neg_error_frame = Protocol.encode_relay({:neg_err, "sub-neg", "closed: too slow"})
assert JSON.decode!(neg_error_frame) == ["NEG-ERR", "sub-neg", "closed: too slow"]
end end
defp valid_event do defp valid_event do

View File

@@ -1,6 +1,7 @@
defmodule Parrhesia.Storage.Adapters.Memory.AdapterTest do defmodule Parrhesia.Storage.Adapters.Memory.AdapterTest do
use ExUnit.Case, async: false use ExUnit.Case, async: false
alias Parrhesia.Storage.Adapters.Memory.ACL
alias Parrhesia.Storage.Adapters.Memory.Admin alias Parrhesia.Storage.Adapters.Memory.Admin
alias Parrhesia.Storage.Adapters.Memory.Events alias Parrhesia.Storage.Adapters.Memory.Events
alias Parrhesia.Storage.Adapters.Memory.Groups alias Parrhesia.Storage.Adapters.Memory.Groups
@@ -8,14 +9,36 @@ defmodule Parrhesia.Storage.Adapters.Memory.AdapterTest do
test "memory adapter supports basic behavior contract operations" do test "memory adapter supports basic behavior contract operations" do
event_id = String.duplicate("a", 64) event_id = String.duplicate("a", 64)
event = %{"id" => event_id, "pubkey" => "pk", "kind" => 1, "tags" => [], "content" => "hello"}
event = %{
"id" => event_id,
"pubkey" => "pk",
"created_at" => 1_700_000_000,
"kind" => 1,
"tags" => [],
"content" => "hello"
}
assert {:ok, _event} = Events.put_event(%{}, event) assert {:ok, _event} = Events.put_event(%{}, event)
assert {:ok, [result]} = Events.query(%{}, [%{"ids" => [event_id]}], []) assert {:ok, [result]} = Events.query(%{}, [%{"ids" => [event_id]}], [])
assert result["id"] == event_id assert result["id"] == event_id
assert {:ok, [%{created_at: 1_700_000_000, id: <<_::size(256)>>}]} =
Events.query_event_refs(%{}, [%{"ids" => [event_id]}], [])
assert :ok = Moderation.ban_pubkey(%{}, "pk") assert :ok = Moderation.ban_pubkey(%{}, "pk")
assert {:ok, true} = Moderation.pubkey_banned?(%{}, "pk") assert {:ok, true} = Moderation.pubkey_banned?(%{}, "pk")
assert {:ok, false} = Moderation.has_allowed_pubkeys?(%{})
assert :ok = Moderation.allow_pubkey(%{}, String.duplicate("f", 64))
assert {:ok, true} = Moderation.has_allowed_pubkeys?(%{})
assert {:ok, %{capability: :sync_read}} =
ACL.put_rule(%{}, %{
principal_type: :pubkey,
principal: String.duplicate("f", 64),
capability: :sync_read,
match: %{"kinds" => [5000], "#r" => ["tribes.accounts.user"]}
})
assert {:ok, membership} = assert {:ok, membership} =
Groups.put_membership(%{}, %{group_id: "g1", pubkey: "pk", role: "member"}) Groups.put_membership(%{}, %{group_id: "g1", pubkey: "pk", role: "member"})

View File

@@ -3,6 +3,7 @@ defmodule Parrhesia.Storage.Adapters.Postgres.AdapterContractTest do
alias Ecto.Adapters.SQL.Sandbox alias Ecto.Adapters.SQL.Sandbox
alias Parrhesia.Repo alias Parrhesia.Repo
alias Parrhesia.Storage.Adapters.Postgres.ACL
alias Parrhesia.Storage.Adapters.Postgres.Admin alias Parrhesia.Storage.Adapters.Postgres.Admin
alias Parrhesia.Storage.Adapters.Postgres.Groups alias Parrhesia.Storage.Adapters.Postgres.Groups
alias Parrhesia.Storage.Adapters.Postgres.Moderation alias Parrhesia.Storage.Adapters.Postgres.Moderation
@@ -32,10 +33,13 @@ defmodule Parrhesia.Storage.Adapters.Postgres.AdapterContractTest do
assert {:ok, false} = Moderation.pubkey_banned?(%{}, pubkey) assert {:ok, false} = Moderation.pubkey_banned?(%{}, pubkey)
assert {:ok, false} = Moderation.pubkey_allowed?(%{}, pubkey) assert {:ok, false} = Moderation.pubkey_allowed?(%{}, pubkey)
assert {:ok, false} = Moderation.has_allowed_pubkeys?(%{})
assert :ok = Moderation.allow_pubkey(%{}, pubkey) assert :ok = Moderation.allow_pubkey(%{}, pubkey)
assert {:ok, true} = Moderation.pubkey_allowed?(%{}, pubkey) assert {:ok, true} = Moderation.pubkey_allowed?(%{}, pubkey)
assert {:ok, true} = Moderation.has_allowed_pubkeys?(%{})
assert :ok = Moderation.disallow_pubkey(%{}, pubkey) assert :ok = Moderation.disallow_pubkey(%{}, pubkey)
assert {:ok, false} = Moderation.pubkey_allowed?(%{}, pubkey) assert {:ok, false} = Moderation.pubkey_allowed?(%{}, pubkey)
assert {:ok, false} = Moderation.has_allowed_pubkeys?(%{})
assert {:ok, false} = Moderation.event_banned?(%{}, event_id) assert {:ok, false} = Moderation.event_banned?(%{}, event_id)
assert :ok = Moderation.ban_event(%{}, event_id) assert :ok = Moderation.ban_event(%{}, event_id)
@@ -102,6 +106,28 @@ defmodule Parrhesia.Storage.Adapters.Postgres.AdapterContractTest do
assert {:ok, nil} = Groups.get_membership(%{}, group_id, member_pubkey) assert {:ok, nil} = Groups.get_membership(%{}, group_id, member_pubkey)
end end
test "acl adapter upserts, lists, and deletes rules" do
principal = String.duplicate("f", 64)
rule = %{
principal_type: :pubkey,
principal: principal,
capability: :sync_read,
match: %{"kinds" => [5000], "#r" => ["tribes.accounts.user"]}
}
assert {:ok, stored_rule} = ACL.put_rule(%{}, rule)
assert stored_rule.principal == principal
assert {:ok, [listed_rule]} =
ACL.list_rules(%{}, principal_type: :pubkey, capability: :sync_read)
assert listed_rule.id == stored_rule.id
assert :ok = ACL.delete_rule(%{}, %{id: stored_rule.id})
assert {:ok, []} = ACL.list_rules(%{}, principal: principal)
end
test "admin adapter appends and filters audit logs" do test "admin adapter appends and filters audit logs" do
actor_pubkey = String.duplicate("d", 64) actor_pubkey = String.duplicate("d", 64)
@@ -130,9 +156,19 @@ defmodule Parrhesia.Storage.Adapters.Postgres.AdapterContractTest do
assert {:ok, %{"status" => "ok"}} = Admin.execute(%{}, :ping, %{}) assert {:ok, %{"status" => "ok"}} = Admin.execute(%{}, :ping, %{})
assert {:ok, %{"events" => _events, "banned_pubkeys" => _banned, "blocked_ips" => _ips}} = assert {:ok,
%{
"events" => _events,
"banned_pubkeys" => _banned,
"allowed_pubkeys" => _allowed,
"acl_rules" => _acl_rules,
"blocked_ips" => _ips
}} =
Admin.execute(%{}, :stats, %{}) Admin.execute(%{}, :stats, %{})
assert {:ok, %{"methods" => methods}} = Admin.execute(%{}, :supportedmethods, %{})
assert "allow_pubkey" in methods
assert {:error, {:unsupported_method, "status"}} = Admin.execute(%{}, :status, %{}) assert {:error, {:unsupported_method, "status"}} = Admin.execute(%{}, :status, %{})
end end
end end

View File

@@ -106,6 +106,37 @@ defmodule Parrhesia.Storage.Adapters.Postgres.EventsQueryCountTest do
assert Enum.map(results, & &1["id"]) == [newest["id"], tie_winner_id] assert Enum.map(results, & &1["id"]) == [newest["id"], tie_winner_id]
end end
test "query_event_refs/3 returns sorted lightweight refs for negentropy" do
author = String.duplicate("9", 64)
later =
persist_event(%{
"pubkey" => author,
"created_at" => 1_700_000_510,
"kind" => 1,
"content" => "later"
})
earlier =
persist_event(%{
"pubkey" => author,
"created_at" => 1_700_000_500,
"kind" => 1,
"content" => "earlier"
})
assert {:ok, refs} =
Events.query_event_refs(%{}, [%{"authors" => [author], "kinds" => [1]}], [])
assert refs == [
%{
created_at: earlier["created_at"],
id: Base.decode16!(earlier["id"], case: :mixed)
},
%{created_at: later["created_at"], id: Base.decode16!(later["id"], case: :mixed)}
]
end
test "count/3 ORs filters, deduplicates matches and respects tag filters" do test "count/3 ORs filters, deduplicates matches and respects tag filters" do
now = 1_700_001_000 now = 1_700_001_000
target_pubkey = String.duplicate("f", 64) target_pubkey = String.duplicate("f", 64)

View File

@@ -3,7 +3,16 @@ defmodule Parrhesia.Storage.BehaviourContractsTest do
test "events behavior exposes expected callbacks" do test "events behavior exposes expected callbacks" do
assert callback_names(Parrhesia.Storage.Events) == assert callback_names(Parrhesia.Storage.Events) ==
[:count, :delete_by_request, :get_event, :purge_expired, :put_event, :query, :vanish] [
:count,
:delete_by_request,
:get_event,
:purge_expired,
:put_event,
:query,
:query_event_refs,
:vanish
]
end end
test "moderation behavior exposes expected callbacks" do test "moderation behavior exposes expected callbacks" do
@@ -15,6 +24,7 @@ defmodule Parrhesia.Storage.BehaviourContractsTest do
:block_ip, :block_ip,
:disallow_pubkey, :disallow_pubkey,
:event_banned?, :event_banned?,
:has_allowed_pubkeys?,
:ip_blocked?, :ip_blocked?,
:pubkey_allowed?, :pubkey_allowed?,
:pubkey_banned?, :pubkey_banned?,
@@ -24,6 +34,15 @@ defmodule Parrhesia.Storage.BehaviourContractsTest do
] ]
end end
test "acl behavior exposes expected callbacks" do
assert callback_names(Parrhesia.Storage.ACL) ==
[
:delete_rule,
:list_rules,
:put_rule
]
end
test "groups behavior exposes expected callbacks" do test "groups behavior exposes expected callbacks" do
assert callback_names(Parrhesia.Storage.Groups) == assert callback_names(Parrhesia.Storage.Groups) ==
[ [

View File

@@ -5,6 +5,7 @@ defmodule Parrhesia.StorageTest do
test "resolves default storage modules" do test "resolves default storage modules" do
assert Storage.events() == Parrhesia.Storage.Adapters.Postgres.Events assert Storage.events() == Parrhesia.Storage.Adapters.Postgres.Events
assert Storage.acl() == Parrhesia.Storage.Adapters.Postgres.ACL
assert Storage.moderation() == Parrhesia.Storage.Adapters.Postgres.Moderation assert Storage.moderation() == Parrhesia.Storage.Adapters.Postgres.Moderation
assert Storage.groups() == Parrhesia.Storage.Adapters.Postgres.Groups assert Storage.groups() == Parrhesia.Storage.Adapters.Postgres.Groups
assert Storage.admin() == Parrhesia.Storage.Adapters.Postgres.Admin assert Storage.admin() == Parrhesia.Storage.Adapters.Postgres.Admin

View File

@@ -0,0 +1,260 @@
defmodule Parrhesia.Sync.WorkerTest do
use ExUnit.Case, async: false
alias Ecto.Adapters.SQL.Sandbox
alias Parrhesia.API.ACL
alias Parrhesia.API.Events
alias Parrhesia.API.Identity
alias Parrhesia.API.RequestContext
alias Parrhesia.API.Sync
alias Parrhesia.Protocol.EventValidator
alias Parrhesia.Repo
alias Parrhesia.Sync.Supervisor
alias Parrhesia.TestSupport.SyncFakeRelay.Plug
alias Parrhesia.TestSupport.SyncFakeRelay.Server
setup do
:ok = Sandbox.checkout(Repo)
Sandbox.mode(Repo, {:shared, self()})
on_exit(fn ->
Sandbox.mode(Repo, :manual)
end)
:ok
end
test "req_stream worker verifies remote identity, authenticates, syncs catch-up, streams live, and sync_now reruns catch-up" do
{:ok, %{pubkey: local_pubkey}} = Identity.ensure()
remote_pubkey = String.duplicate("b", 64)
initial_event = valid_sync_event("initial-sync", 1_762_000_000)
live_event = valid_sync_event("live-sync", 1_762_000_100)
relay_server =
start_supervised!(
{Server,
name: unique_name("FakeRelayServer"),
pubkey: remote_pubkey,
expected_client_pubkey: local_pubkey,
initial_events: [initial_event]}
)
port = free_port()
start_supervised!(
{Bandit, plug: {Plug, server: relay_server}, ip: {127, 0, 0, 1}, port: port}
)
relay_url = "ws://127.0.0.1:#{port}/relay"
wait_for_relay(relay_url, remote_pubkey)
assert :ok =
ACL.grant(%{
principal_type: :pubkey,
principal: remote_pubkey,
capability: :sync_write,
match: %{"kinds" => [5000], "#r" => ["tribes.accounts.user"]}
})
assert :ok =
ACL.grant(%{
principal_type: :pubkey,
principal: remote_pubkey,
capability: :sync_read,
match: %{"kinds" => [5000], "#r" => ["tribes.accounts.user"]}
})
{manager_name, _supervisor_name} = start_sync_runtime()
assert {:ok, _server} =
Sync.put_server(
%{
"id" => "fake-relay",
"url" => relay_url,
"enabled?" => true,
"auth_pubkey" => remote_pubkey,
"filters" => [%{"kinds" => [5000], "#r" => ["tribes.accounts.user"]}],
"tls" => %{"mode" => "disabled", "pins" => []}
},
manager: manager_name
)
assert_event_synced(initial_event, remote_pubkey)
assert :ok = Server.publish_live_event(relay_server, live_event)
assert_event_synced(live_event, remote_pubkey)
assert {:ok, stats_before_sync_now} = Sync.sync_stats(manager: manager_name)
assert stats_before_sync_now["events_accepted"] >= 2
assert :ok = Sync.sync_now("fake-relay", manager: manager_name)
assert_eventually(fn ->
case Sync.sync_stats(manager: manager_name) do
{:ok, stats} -> stats["query_runs"] >= 2 and stats["subscription_restarts"] >= 1
_other -> false
end
end)
assert {:ok, health} = Sync.sync_health(manager: manager_name)
assert health["status"] == "ok"
assert health["servers_connected"] == 1
end
test "worker marks remote identity mismatches as failing health" do
{:ok, %{pubkey: local_pubkey}} = Identity.ensure()
relay_server =
start_supervised!(
{Server,
name: unique_name("MismatchRelayServer"),
pubkey: String.duplicate("d", 64),
expected_client_pubkey: local_pubkey,
initial_events: []}
)
port = free_port()
start_supervised!(
{Bandit, plug: {Plug, server: relay_server}, ip: {127, 0, 0, 1}, port: port}
)
relay_url = "ws://127.0.0.1:#{port}/relay"
wait_for_relay(relay_url, String.duplicate("d", 64))
{manager_name, _supervisor_name} = start_sync_runtime()
assert {:ok, _server} =
Sync.put_server(
%{
"id" => "mismatch-relay",
"url" => relay_url,
"enabled?" => true,
"auth_pubkey" => String.duplicate("e", 64),
"filters" => [%{"kinds" => [5000], "#r" => ["tribes.accounts.user"]}],
"tls" => %{"mode" => "disabled", "pins" => []}
},
manager: manager_name
)
assert_eventually(fn ->
case Sync.sync_health(manager: manager_name) do
{:ok, %{"status" => "degraded", "servers_failing" => servers}} ->
Enum.any?(
servers,
&(&1["id"] == "mismatch-relay" and &1["reason"] == ":remote_identity_mismatch")
)
_other ->
false
end
end)
end
defp start_sync_runtime do
manager_name = unique_name("SyncManager")
worker_registry = unique_name("SyncRegistry")
worker_supervisor = unique_name("SyncWorkerSupervisor")
supervisor_name = unique_name("SyncSupervisor")
start_supervised!(
{Supervisor,
name: supervisor_name,
manager: manager_name,
worker_registry: worker_registry,
worker_supervisor: worker_supervisor,
path: unique_sync_path(),
start_workers?: true}
)
{manager_name, supervisor_name}
end
defp assert_event_synced(event, remote_pubkey) do
assert_eventually(fn ->
case Events.query(
[%{"ids" => [event["id"]]}],
context: %RequestContext{
authenticated_pubkeys: MapSet.new([remote_pubkey])
}
) do
{:ok, [stored_event]} -> stored_event["id"] == event["id"]
_other -> false
end
end)
end
defp wait_for_relay(relay_url, expected_pubkey) do
info_url =
relay_url
|> String.replace_prefix("ws://", "http://")
|> String.replace_prefix("wss://", "https://")
assert_eventually(fn ->
with {:ok, %{status: 200, body: body}} <-
Req.get(
url: info_url,
headers: [{"accept", "application/nostr+json"}],
decode_body: false
),
{:ok, %{"pubkey" => ^expected_pubkey}} <- JSON.decode(body) do
true
else
_other -> false
end
end)
end
defp valid_sync_event(content, created_at) do
base_event = %{
"pubkey" => String.duplicate("f", 64),
"created_at" => created_at,
"kind" => 5000,
"tags" => [["r", "tribes.accounts.user"]],
"content" => content,
"sig" => String.duplicate("0", 128)
}
Map.put(base_event, "id", EventValidator.compute_id(base_event))
end
defp free_port do
{:ok, socket} = :gen_tcp.listen(0, [:binary, active: false, packet: :raw, reuseaddr: true])
{:ok, port} = :inet.port(socket)
:ok = :gen_tcp.close(socket)
port
end
defp unique_name(prefix) do
:"#{prefix}_#{System.unique_integer([:positive, :monotonic])}"
end
defp unique_sync_path do
path =
Path.join(
System.tmp_dir!(),
"parrhesia_sync_runtime_#{System.unique_integer([:positive, :monotonic])}.json"
)
on_exit(fn ->
_ = File.rm(path)
end)
path
end
defp assert_eventually(fun, attempts \\ 50)
defp assert_eventually(_fun, 0), do: flunk("condition was not met in time")
defp assert_eventually(fun, attempts) do
if fun.() do
:ok
else
receive do
after
50 -> assert_eventually(fun, attempts - 1)
end
end
end
end

View File

@@ -2,6 +2,11 @@ defmodule Parrhesia.Web.ConnectionTest do
use ExUnit.Case, async: false use ExUnit.Case, async: false
alias Ecto.Adapters.SQL.Sandbox alias Ecto.Adapters.SQL.Sandbox
alias Parrhesia.API.ACL
alias Parrhesia.API.Events
alias Parrhesia.API.RequestContext
alias Parrhesia.Negentropy.Engine
alias Parrhesia.Negentropy.Message
alias Parrhesia.Protocol.EventValidator alias Parrhesia.Protocol.EventValidator
alias Parrhesia.Repo alias Parrhesia.Repo
alias Parrhesia.Web.Connection alias Parrhesia.Web.Connection
@@ -105,6 +110,199 @@ defmodule Parrhesia.Web.ConnectionTest do
Enum.find(decoded, fn frame -> List.first(frame) == "OK" end) Enum.find(decoded, fn frame -> List.first(frame) == "OK" end)
end end
test "AUTH rejects pubkeys outside the allowlist" do
assert :ok = Parrhesia.Storage.moderation().allow_pubkey(%{}, String.duplicate("a", 64))
state = connection_state()
auth_event = valid_auth_event(state.auth_challenge)
payload = JSON.encode!(["AUTH", auth_event])
assert {:push, frames, _next_state} = Connection.handle_in({payload, [opcode: :text]}, state)
decoded = Enum.map(frames, fn {:text, frame} -> JSON.decode!(frame) end)
assert ["OK", _, false, "restricted: authenticated pubkey is not allowed"] =
Enum.find(decoded, fn frame -> List.first(frame) == "OK" end)
end
test "connection state keeps transport identity metadata" do
transport_identity = %{
source: :socket,
verified?: true,
spki_sha256: "client-spki-pin"
}
state = connection_state(transport_identity: transport_identity)
assert state.transport_identity == transport_identity
end
test "listener can require NIP-42 for reads and writes" do
listener =
listener(%{
auth: %{nip42_required: true, nip98_required_for_admin: true}
})
state = connection_state(listener: listener)
req_payload = JSON.encode!(["REQ", "sub-auth", %{"kinds" => [1]}])
assert {:push, frames, ^state} = Connection.handle_in({req_payload, [opcode: :text]}, state)
assert Enum.map(frames, fn {:text, frame} -> JSON.decode!(frame) end) == [
["AUTH", state.auth_challenge],
["CLOSED", "sub-auth", "auth-required: authentication required"]
]
event = valid_event(%{"content" => "auth required"})
assert {:push, event_frames, ^state} =
Connection.handle_in({JSON.encode!(["EVENT", event]), [opcode: :text]}, state)
decoded = Enum.map(event_frames, fn {:text, frame} -> JSON.decode!(frame) end)
assert ["AUTH", state.auth_challenge] in decoded
assert ["OK", event["id"], false, "auth-required: authentication required"] in decoded
end
test "listener baseline ACL can deny read and write shapes before sync ACLs" do
listener =
listener(%{
baseline_acl: %{
read: [%{action: :deny, match: %{"kinds" => [5000]}}],
write: [%{action: :deny, match: %{"kinds" => [5000]}}]
}
})
state = connection_state(listener: listener)
req_payload = JSON.encode!(["REQ", "sub-baseline", %{"kinds" => [5000]}])
assert {:push, req_frames, ^state} =
Connection.handle_in({req_payload, [opcode: :text]}, state)
assert Enum.map(req_frames, fn {:text, frame} -> JSON.decode!(frame) end) == [
["AUTH", state.auth_challenge],
["CLOSED", "sub-baseline", "restricted: listener baseline denies requested filters"]
]
event =
valid_event(%{"kind" => 5000, "content" => "baseline blocked"}) |> recalculate_event_id()
assert {:push, {:text, response}, ^state} =
Connection.handle_in({JSON.encode!(["EVENT", event]), [opcode: :text]}, state)
assert JSON.decode!(response) == [
"OK",
event["id"],
false,
"restricted: listener baseline denies event"
]
end
test "protected sync REQ requires matching ACL grant" do
previous_acl = Application.get_env(:parrhesia, :acl, [])
Application.put_env(
:parrhesia,
:acl,
protected_filters: [%{"kinds" => [5000], "#r" => ["tribes.accounts.user"]}]
)
on_exit(fn ->
Application.put_env(:parrhesia, :acl, previous_acl)
end)
state = connection_state()
auth_event = valid_auth_event(state.auth_challenge)
assert {:push, _, authed_state} =
Connection.handle_in({JSON.encode!(["AUTH", auth_event]), [opcode: :text]}, state)
req_payload =
JSON.encode!(["REQ", "sync-sub", %{"kinds" => [5000], "#r" => ["tribes.accounts.user"]}])
assert {:push, denied_frames, ^authed_state} =
Connection.handle_in({req_payload, [opcode: :text]}, authed_state)
assert Enum.map(denied_frames, fn {:text, frame} -> JSON.decode!(frame) end) == [
["AUTH", authed_state.auth_challenge],
["CLOSED", "sync-sub", "restricted: sync read not allowed for authenticated pubkey"]
]
assert :ok =
ACL.grant(%{
principal_type: :pubkey,
principal: auth_event["pubkey"],
capability: :sync_read,
match: %{"kinds" => [5000], "#r" => ["tribes.accounts.user"]}
})
assert {:push, responses, granted_state} =
Connection.handle_in({req_payload, [opcode: :text]}, authed_state)
assert Map.has_key?(granted_state.subscriptions, "sync-sub")
assert List.last(Enum.map(responses, fn {:text, frame} -> JSON.decode!(frame) end)) == [
"EOSE",
"sync-sub"
]
end
test "protected sync EVENT requires matching ACL grant" do
previous_acl = Application.get_env(:parrhesia, :acl, [])
Application.put_env(
:parrhesia,
:acl,
protected_filters: [%{"kinds" => [5000], "#r" => ["tribes.accounts.user"]}]
)
on_exit(fn ->
Application.put_env(:parrhesia, :acl, previous_acl)
end)
state = connection_state()
auth_event = valid_auth_event(state.auth_challenge)
assert {:push, _, authed_state} =
Connection.handle_in({JSON.encode!(["AUTH", auth_event]), [opcode: :text]}, state)
event =
valid_event(%{
"kind" => 5000,
"tags" => [["r", "tribes.accounts.user"]],
"content" => "sync payload"
})
payload = JSON.encode!(["EVENT", event])
assert {:push, {:text, denied_response}, denied_state} =
Connection.handle_in({payload, [opcode: :text]}, authed_state)
assert JSON.decode!(denied_response) == [
"OK",
event["id"],
false,
"restricted: sync write not allowed for authenticated pubkey"
]
assert denied_state.authenticated_pubkeys == authed_state.authenticated_pubkeys
assert :ok =
ACL.grant(%{
principal_type: :pubkey,
principal: auth_event["pubkey"],
capability: :sync_write,
match: %{"kinds" => [5000], "#r" => ["tribes.accounts.user"]}
})
assert {:push, {:text, accepted_response}, _next_state} =
Connection.handle_in({payload, [opcode: :text]}, authed_state)
assert JSON.decode!(accepted_response) == ["OK", event["id"], true, "ok: event stored"]
end
test "protected event is rejected unless authenticated" do test "protected event is rejected unless authenticated" do
state = connection_state() state = connection_state()
@@ -435,37 +633,154 @@ defmodule Parrhesia.Web.ConnectionTest do
] ]
end end
test "NEG sessions open and close" do test "NEG sessions open, return reconciliation payloads and close silently" do
state = connection_state() negentropy_sessions =
start_supervised!(
{Parrhesia.Negentropy.Sessions,
name: nil,
max_payload_bytes: 1024,
max_sessions_per_owner: 8,
max_total_sessions: 16,
max_idle_seconds: 60,
sweep_interval_seconds: 60}
)
open_payload = JSON.encode!(["NEG-OPEN", "neg-1", %{"cursor" => 0}]) Sandbox.allow(Repo, self(), negentropy_sessions)
state = connection_state(negentropy_sessions: negentropy_sessions)
first =
valid_event(%{
"created_at" => 1_700_300_000,
"content" => "neg-a"
})
second =
valid_event(%{
"created_at" => 1_700_300_001,
"content" => "neg-b"
})
assert {:push, {:text, _response}, _next_state} =
Connection.handle_in({JSON.encode!(["EVENT", first]), [opcode: :text]}, state)
assert {:push, {:text, _response}, _next_state} =
Connection.handle_in({JSON.encode!(["EVENT", second]), [opcode: :text]}, state)
open_payload =
JSON.encode!([
"NEG-OPEN",
"neg-1",
%{"kinds" => [1]},
Base.encode16(Engine.initial_message([]), case: :lower)
])
assert {:push, {:text, open_response}, _next_state} = assert {:push, {:text, open_response}, _next_state} =
Connection.handle_in({open_payload, [opcode: :text]}, state) Connection.handle_in({open_payload, [opcode: :text]}, state)
assert ["NEG-MSG", "neg-1", %{"status" => "open", "cursor" => 0}] = assert ["NEG-MSG", "neg-1", response_hex] = JSON.decode!(open_response)
JSON.decode!(open_response)
assert {:ok, [%{mode: :id_list, payload: ids, upper_bound: :infinity}]} =
response_hex |> Base.decode16!(case: :mixed) |> Message.decode()
assert ids == [
Base.decode16!(first["id"], case: :mixed),
Base.decode16!(second["id"], case: :mixed)
]
close_payload = JSON.encode!(["NEG-CLOSE", "neg-1"]) close_payload = JSON.encode!(["NEG-CLOSE", "neg-1"])
assert {:push, {:text, close_response}, _next_state} = assert {:ok, _next_state} =
Connection.handle_in({close_payload, [opcode: :text]}, state) Connection.handle_in({close_payload, [opcode: :text]}, state)
end
assert JSON.decode!(close_response) == ["NEG-MSG", "neg-1", %{"status" => "closed"}] test "NEG sessions return NEG-ERR for oversized snapshots" do
negentropy_sessions =
start_supervised!(
{Parrhesia.Negentropy.Sessions,
name: nil,
max_payload_bytes: 1024,
max_sessions_per_owner: 8,
max_total_sessions: 16,
max_idle_seconds: 60,
sweep_interval_seconds: 60,
max_items_per_session: 1}
)
Sandbox.allow(Repo, self(), negentropy_sessions)
state = connection_state(negentropy_sessions: negentropy_sessions)
first = valid_event(%{"created_at" => 1_700_301_000, "content" => "neg-big-a"})
second = valid_event(%{"created_at" => 1_700_301_001, "content" => "neg-big-b"})
assert {:push, {:text, _response}, _next_state} =
Connection.handle_in({JSON.encode!(["EVENT", first]), [opcode: :text]}, state)
assert {:push, {:text, _response}, _next_state} =
Connection.handle_in({JSON.encode!(["EVENT", second]), [opcode: :text]}, state)
open_payload =
JSON.encode!([
"NEG-OPEN",
"neg-oversized",
%{"kinds" => [1]},
Base.encode16(Engine.initial_message([]), case: :lower)
])
assert {:push, {:text, response}, _next_state} =
Connection.handle_in({open_payload, [opcode: :text]}, state)
assert JSON.decode!(response) == [
"NEG-ERR",
"neg-oversized",
"blocked: negentropy query is too big"
]
end end
test "CLOSE removes subscription and replies with CLOSED" do test "CLOSE removes subscription and replies with CLOSED" do
state = subscribed_connection_state([]) state = subscribed_connection_state([])
subscription = state.subscriptions["sub-1"]
[{stream_pid, _value}] = Registry.lookup(Parrhesia.API.Stream.Registry, subscription.ref)
monitor_ref = Process.monitor(stream_pid)
close_payload = JSON.encode!(["CLOSE", "sub-1"]) close_payload = JSON.encode!(["CLOSE", "sub-1"])
assert {:push, {:text, response}, next_state} = assert {:push, {:text, response}, next_state} =
Connection.handle_in({close_payload, [opcode: :text]}, state) Connection.handle_in({close_payload, [opcode: :text]}, state)
assert_receive {:DOWN, ^monitor_ref, :process, ^stream_pid, :normal}
refute Map.has_key?(next_state.subscriptions, "sub-1") refute Map.has_key?(next_state.subscriptions, "sub-1")
assert JSON.decode!(response) == ["CLOSED", "sub-1", "error: subscription closed"] assert JSON.decode!(response) == ["CLOSED", "sub-1", "error: subscription closed"]
end end
test "REQ live delivery is bridged through API.Stream" do
state = subscribed_connection_state([])
subscription = state.subscriptions["sub-1"]
subscription_ref = subscription.ref
event = valid_event(%{"content" => "stream-live"}) |> recalculate_event_id()
assert {:ok, %{accepted: true}} = Events.publish(event, context: %RequestContext{})
assert_receive {:parrhesia, :event, ^subscription_ref, "sub-1", received_event}
assert received_event["id"] == event["id"]
assert {:ok, queued_state} =
Connection.handle_info(
{:parrhesia, :event, subscription_ref, "sub-1", received_event},
state
)
assert queued_state.outbound_queue_size == 1
assert_receive :drain_outbound_queue
assert {:push, [{:text, payload}], drained_state} =
Connection.handle_info(:drain_outbound_queue, queued_state)
assert drained_state.outbound_queue_size == 0
assert JSON.decode!(payload) == ["EVENT", "sub-1", received_event]
end
test "fanout_event enqueues and drains matching events" do test "fanout_event enqueues and drains matching events" do
state = subscribed_connection_state([]) state = subscribed_connection_state([])
event = live_event("event-1", 1) event = live_event("event-1", 1)
@@ -559,6 +874,27 @@ defmodule Parrhesia.Web.ConnectionTest do
state state
end end
defp listener(overrides) do
base = %{
id: :test,
enabled: true,
bind: %{ip: {127, 0, 0, 1}, port: 4413},
transport: %{scheme: :http, tls: %{mode: :disabled}},
proxy: %{trusted_cidrs: [], honor_x_forwarded_for: true},
network: %{allow_all: true},
features: %{
nostr: %{enabled: true},
admin: %{enabled: true},
metrics: %{enabled: false, access: %{allow_all: true}, auth_token: nil}
},
auth: %{nip42_required: false, nip98_required_for_admin: true},
baseline_acl: %{read: [], write: []},
bandit_options: []
}
Map.merge(base, overrides)
end
defp live_event(id, kind) do defp live_event(id, kind) do
%{ %{
"id" => id, "id" => id,

View File

@@ -0,0 +1,105 @@
defmodule Parrhesia.Web.ProxyIpE2ETest do
use ExUnit.Case, async: false
alias __MODULE__.TestClient
alias Ecto.Adapters.SQL.Sandbox
alias Parrhesia.Repo
setup_all do
{:ok, _apps} = Application.ensure_all_started(:websockex)
:ok
end
setup do
:ok = Sandbox.checkout(Repo)
Sandbox.mode(Repo, {:shared, self()})
previous_trusted_proxies = Application.get_env(:parrhesia, :trusted_proxies, [])
on_exit(fn ->
Application.put_env(:parrhesia, :trusted_proxies, previous_trusted_proxies)
Sandbox.mode(Repo, :manual)
end)
{:ok, port: free_port()}
end
test "websocket relay blocks a forwarded client IP from a trusted proxy", %{port: port} do
Application.put_env(:parrhesia, :trusted_proxies, ["127.0.0.1/32"])
assert :ok = Parrhesia.Storage.moderation().block_ip(%{}, "203.0.113.10")
start_supervised!({Bandit, plug: Parrhesia.Web.Router, ip: {127, 0, 0, 1}, port: port})
wait_for_server(port)
assert {:error, %WebSockex.RequestError{code: 403, message: "Forbidden"}} =
TestClient.start_link(relay_url(port), self(),
extra_headers: [{"x-forwarded-for", "203.0.113.10"}]
)
end
test "websocket relay ignores forwarded client IPs from untrusted proxies", %{port: port} do
Application.put_env(:parrhesia, :trusted_proxies, [])
assert :ok = Parrhesia.Storage.moderation().block_ip(%{}, "203.0.113.10")
start_supervised!({Bandit, plug: Parrhesia.Web.Router, ip: {127, 0, 0, 1}, port: port})
wait_for_server(port)
assert {:ok, client} =
TestClient.start_link(relay_url(port), self(),
extra_headers: [{"x-forwarded-for", "203.0.113.10"}]
)
assert_receive :connected
Process.exit(client, :normal)
end
defp wait_for_server(port) do
health_url = "http://127.0.0.1:#{port}/health"
1..50
|> Enum.reduce_while(:error, fn _attempt, _acc ->
case Req.get(health_url, receive_timeout: 200, connect_options: [timeout: 200]) do
{:ok, %{status: 200, body: "ok"}} ->
{:halt, :ok}
_other ->
Process.sleep(50)
{:cont, :error}
end
end)
|> case do
:ok -> :ok
:error -> flunk("server was not ready at #{health_url}")
end
end
defp relay_url(port), do: "ws://127.0.0.1:#{port}/relay"
defp free_port do
{:ok, socket} = :gen_tcp.listen(0, [:binary, active: false, packet: :raw, reuseaddr: true])
{:ok, port} = :inet.port(socket)
:ok = :gen_tcp.close(socket)
port
end
defmodule TestClient do
use WebSockex
def start_link(url, parent, opts \\ []) do
WebSockex.start_link(url, __MODULE__, parent, opts)
end
@impl true
def handle_connect(_conn, parent) do
send(parent, :connected)
{:ok, parent}
end
@impl true
def handle_disconnect(_disconnect_map, parent) do
{:ok, parent}
end
end
end

View File

@@ -5,8 +5,10 @@ defmodule Parrhesia.Web.RouterTest do
import Plug.Test import Plug.Test
alias Ecto.Adapters.SQL.Sandbox alias Ecto.Adapters.SQL.Sandbox
alias Parrhesia.API.Sync
alias Parrhesia.Protocol.EventValidator alias Parrhesia.Protocol.EventValidator
alias Parrhesia.Repo alias Parrhesia.Repo
alias Parrhesia.Web.Listener
alias Parrhesia.Web.Router alias Parrhesia.Web.Router
setup do setup do
@@ -44,7 +46,13 @@ defmodule Parrhesia.Web.RouterTest do
end end
test "GET /metrics returns prometheus payload for private-network clients" do test "GET /metrics returns prometheus payload for private-network clients" do
conn = conn(:get, "/metrics") |> Router.call([]) conn =
conn(:get, "/metrics")
|> route_conn(
listener(%{
features: %{metrics: %{enabled: true, access: %{private_networks_only: true}}}
})
)
assert conn.status == 200 assert conn.status == 200
assert get_resp_header(conn, "content-type") == ["text/plain; charset=utf-8"] assert get_resp_header(conn, "content-type") == ["text/plain; charset=utf-8"]
@@ -53,6 +61,14 @@ defmodule Parrhesia.Web.RouterTest do
test "GET /metrics denies public-network clients by default" do test "GET /metrics denies public-network clients by default" do
conn = conn(:get, "/metrics") conn = conn(:get, "/metrics")
conn = %{conn | remote_ip: {8, 8, 8, 8}} conn = %{conn | remote_ip: {8, 8, 8, 8}}
test_listener =
listener(%{features: %{metrics: %{enabled: true, access: %{private_networks_only: true}}}})
conn = Listener.put_conn(conn, listener: test_listener)
refute Listener.metrics_allowed?(Listener.from_conn(conn), conn)
conn = Router.call(conn, []) conn = Router.call(conn, [])
assert conn.status == 403 assert conn.status == 403
@@ -60,51 +76,123 @@ defmodule Parrhesia.Web.RouterTest do
end end
test "GET /metrics can be disabled on the main endpoint" do test "GET /metrics can be disabled on the main endpoint" do
previous_metrics = Application.get_env(:parrhesia, :metrics, []) conn =
conn(:get, "/metrics")
Application.put_env( |> route_conn(listener(%{features: %{metrics: %{enabled: false}}}))
:parrhesia,
:metrics,
Keyword.put(previous_metrics, :enabled_on_main_endpoint, false)
)
on_exit(fn ->
Application.put_env(:parrhesia, :metrics, previous_metrics)
end)
conn = conn(:get, "/metrics") |> Router.call([])
assert conn.status == 404 assert conn.status == 404
assert conn.resp_body == "not found" assert conn.resp_body == "not found"
end end
test "GET /metrics accepts bearer auth when configured" do test "GET /metrics accepts bearer auth when configured" do
previous_metrics = Application.get_env(:parrhesia, :metrics, []) test_listener =
listener(%{
features: %{
metrics: %{
enabled: true,
access: %{private_networks_only: false},
auth_token: "secret-token"
}
}
})
Application.put_env( denied_conn = conn(:get, "/metrics") |> route_conn(test_listener)
:parrhesia,
:metrics,
previous_metrics
|> Keyword.put(:private_networks_only, false)
|> Keyword.put(:auth_token, "secret-token")
)
on_exit(fn ->
Application.put_env(:parrhesia, :metrics, previous_metrics)
end)
denied_conn = conn(:get, "/metrics") |> Router.call([])
assert denied_conn.status == 403 assert denied_conn.status == 403
allowed_conn = allowed_conn =
conn(:get, "/metrics") conn(:get, "/metrics")
|> put_req_header("authorization", "Bearer secret-token") |> put_req_header("authorization", "Bearer secret-token")
|> Router.call([]) |> route_conn(test_listener)
assert allowed_conn.status == 200 assert allowed_conn.status == 200
end end
test "GET /relay accepts proxy-asserted TLS identity from trusted proxies" do
test_listener =
listener(%{
transport: %{
scheme: :http,
tls: %{
mode: :proxy_terminated,
proxy_headers: %{enabled: true, required: true}
}
},
proxy: %{trusted_cidrs: ["10.0.0.0/8"], honor_x_forwarded_for: true}
})
conn =
conn(:get, "/relay")
|> put_req_header("accept", "application/nostr+json")
|> put_req_header("x-parrhesia-client-cert-verified", "true")
|> put_req_header("x-parrhesia-client-spki-sha256", "proxy-spki-pin")
|> Plug.Test.put_peer_data(%{
address: {10, 1, 2, 3},
port: 443,
ssl_cert: nil
})
|> route_conn(test_listener)
assert conn.status == 200
end
test "GET /relay rejects missing proxy-asserted TLS identity when required" do
test_listener =
listener(%{
transport: %{
scheme: :http,
tls: %{
mode: :proxy_terminated,
proxy_headers: %{enabled: true, required: true}
}
},
proxy: %{trusted_cidrs: ["10.0.0.0/8"], honor_x_forwarded_for: true}
})
conn =
conn(:get, "/relay")
|> put_req_header("accept", "application/nostr+json")
|> Plug.Test.put_peer_data(%{
address: {10, 1, 2, 3},
port: 443,
ssl_cert: nil
})
|> route_conn(test_listener)
assert conn.status == 403
assert conn.resp_body == "forbidden"
end
test "GET /relay rejects proxy-asserted TLS identity when the pin mismatches" do
test_listener =
listener(%{
transport: %{
scheme: :http,
tls: %{
mode: :proxy_terminated,
client_pins: [%{type: :spki_sha256, value: "expected-spki-pin"}],
proxy_headers: %{enabled: true, required: true}
}
},
proxy: %{trusted_cidrs: ["10.0.0.0/8"], honor_x_forwarded_for: true}
})
conn =
conn(:get, "/relay")
|> put_req_header("accept", "application/nostr+json")
|> put_req_header("x-parrhesia-client-cert-verified", "true")
|> put_req_header("x-parrhesia-client-spki-sha256", "wrong-spki-pin")
|> Plug.Test.put_peer_data(%{
address: {10, 1, 2, 3},
port: 443,
ssl_cert: nil
})
|> route_conn(test_listener)
assert conn.status == 403
assert conn.resp_body == "forbidden"
end
test "POST /management requires authorization" do test "POST /management requires authorization" do
conn = conn =
conn(:post, "/management", JSON.encode!(%{"method" => "ping", "params" => %{}})) conn(:post, "/management", JSON.encode!(%{"method" => "ping", "params" => %{}}))
@@ -135,6 +223,205 @@ defmodule Parrhesia.Web.RouterTest do
} }
end end
test "POST /management denies blocked IPs before auth" do
assert :ok = Parrhesia.Storage.moderation().block_ip(%{}, "8.8.8.8")
conn =
conn(:post, "/management", JSON.encode!(%{"method" => "ping", "params" => %{}}))
|> put_req_header("content-type", "application/json")
|> Map.put(:remote_ip, {8, 8, 8, 8})
|> Router.call([])
assert conn.status == 403
assert conn.resp_body == "forbidden"
end
test "GET /relay denies blocked IPs" do
assert :ok = Parrhesia.Storage.moderation().block_ip(%{}, "8.8.4.4")
conn =
conn(:get, "/relay")
|> put_req_header("accept", "application/nostr+json")
|> Map.put(:remote_ip, {8, 8, 4, 4})
|> Router.call([])
assert conn.status == 403
assert conn.resp_body == "forbidden"
end
test "POST /management supports ACL methods" do
management_url = "http://www.example.com/management"
auth_event = nip98_event("POST", management_url)
authorization = "Nostr " <> Base.encode64(JSON.encode!(auth_event))
grant_conn =
conn(
:post,
"/management",
JSON.encode!(%{
"method" => "acl_grant",
"params" => %{
"principal_type" => "pubkey",
"principal" => String.duplicate("c", 64),
"capability" => "sync_read",
"match" => %{"kinds" => [5000], "#r" => ["tribes.accounts.user"]}
}
})
)
|> put_req_header("content-type", "application/json")
|> put_req_header("authorization", authorization)
|> Router.call([])
assert grant_conn.status == 200
list_conn =
conn(
:post,
"/management",
JSON.encode!(%{
"method" => "acl_list",
"params" => %{"principal" => String.duplicate("c", 64)}
})
)
|> put_req_header("content-type", "application/json")
|> put_req_header("authorization", authorization)
|> Router.call([])
assert list_conn.status == 200
assert %{
"ok" => true,
"result" => %{
"rules" => [
%{
"principal" => principal,
"capability" => "sync_read"
}
]
}
} = JSON.decode!(list_conn.resp_body)
assert principal == String.duplicate("c", 64)
end
test "POST /management supports identity methods" do
management_url = "http://www.example.com/management"
auth_event = nip98_event("POST", management_url)
authorization = "Nostr " <> Base.encode64(JSON.encode!(auth_event))
conn =
conn(
:post,
"/management",
JSON.encode!(%{
"method" => "identity_ensure",
"params" => %{}
})
)
|> put_req_header("content-type", "application/json")
|> put_req_header("authorization", authorization)
|> Router.call([])
assert conn.status == 200
assert %{
"ok" => true,
"result" => %{
"pubkey" => pubkey
}
} = JSON.decode!(conn.resp_body)
assert is_binary(pubkey)
assert byte_size(pubkey) == 64
end
test "POST /management stats and health include sync summary" do
management_url = "http://www.example.com/management"
auth_event = nip98_event("POST", management_url)
authorization = "Nostr " <> Base.encode64(JSON.encode!(auth_event))
initial_total = Sync.sync_stats() |> elem(1) |> Map.fetch!("servers_total")
server_id = "router-sync-#{System.unique_integer([:positive, :monotonic])}"
assert {:ok, _server} =
Sync.put_server(%{
"id" => server_id,
"url" => "wss://relay-a.example/relay",
"enabled?" => false,
"auth_pubkey" => String.duplicate("a", 64),
"filters" => [%{"kinds" => [5000], "#r" => ["tribes.accounts.user"]}],
"tls" => %{
"pins" => [
%{
"type" => "spki_sha256",
"value" => "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA="
}
]
}
})
on_exit(fn ->
_ = Sync.remove_server(server_id)
end)
stats_conn =
conn(
:post,
"/management",
JSON.encode!(%{
"method" => "stats",
"params" => %{}
})
)
|> put_req_header("content-type", "application/json")
|> put_req_header("authorization", authorization)
|> Router.call([])
assert stats_conn.status == 200
assert %{
"ok" => true,
"result" => %{
"sync" => %{"servers_total" => servers_total}
}
} = JSON.decode!(stats_conn.resp_body)
assert servers_total == initial_total + 1
health_conn =
conn(
:post,
"/management",
JSON.encode!(%{
"method" => "health",
"params" => %{}
})
)
|> put_req_header("content-type", "application/json")
|> put_req_header("authorization", authorization)
|> Router.call([])
assert health_conn.status == 200
assert %{
"ok" => true,
"result" => %{
"status" => status,
"sync" => %{"servers_total" => ^servers_total}
}
} = JSON.decode!(health_conn.resp_body)
assert status in ["ok", "degraded"]
end
test "POST /management returns not found when admin feature is disabled on the listener" do
conn =
conn(:post, "/management", JSON.encode!(%{"method" => "ping", "params" => %{}}))
|> put_req_header("content-type", "application/json")
|> route_conn(listener(%{features: %{admin: %{enabled: false}}}))
assert conn.status == 404
end
defp nip98_event(method, url) do defp nip98_event(method, url) do
now = System.system_time(:second) now = System.system_time(:second)
@@ -149,4 +436,41 @@ defmodule Parrhesia.Web.RouterTest do
Map.put(base, "id", EventValidator.compute_id(base)) Map.put(base, "id", EventValidator.compute_id(base))
end end
defp listener(overrides) do
deep_merge(
%{
id: :test,
enabled: true,
bind: %{ip: {127, 0, 0, 1}, port: 4413},
transport: %{scheme: :http, tls: %{mode: :disabled}},
proxy: %{trusted_cidrs: [], honor_x_forwarded_for: true},
network: %{allow_all: true},
features: %{
nostr: %{enabled: true},
admin: %{enabled: true},
metrics: %{enabled: true, access: %{private_networks_only: true}, auth_token: nil}
},
auth: %{nip42_required: false, nip98_required_for_admin: true},
baseline_acl: %{read: [], write: []}
},
overrides
)
end
defp deep_merge(left, right) when is_map(left) and is_map(right) do
Map.merge(left, right, fn _key, left_value, right_value ->
if is_map(left_value) and is_map(right_value) do
deep_merge(left_value, right_value)
else
right_value
end
end)
end
defp route_conn(conn, listener) do
conn
|> Listener.put_conn(listener: listener)
|> Router.call([])
end
end end

View File

@@ -0,0 +1,343 @@
defmodule Parrhesia.Web.TLSE2ETest do
use ExUnit.Case, async: false
alias Ecto.Adapters.SQL.Sandbox
alias Parrhesia.Repo
alias Parrhesia.Sync.Transport.WebSockexClient
alias Parrhesia.TestSupport.TLSCerts
alias Parrhesia.Web.Endpoint
setup do
:ok = Sandbox.checkout(Repo)
Sandbox.mode(Repo, {:shared, self()})
tmp_dir =
Path.join(
System.tmp_dir!(),
"parrhesia_tls_e2e_#{System.unique_integer([:positive, :monotonic])}"
)
File.mkdir_p!(tmp_dir)
on_exit(fn ->
Sandbox.mode(Repo, :manual)
_ = File.rm_rf(tmp_dir)
end)
{:ok, tmp_dir: tmp_dir}
end
test "HTTPS listener serves NIP-11 and reloads certificate files from disk", %{tmp_dir: tmp_dir} do
ca = TLSCerts.create_ca!(tmp_dir, "reload")
server_a = TLSCerts.issue_server_cert!(tmp_dir, ca, "reload-server-a")
server_b = TLSCerts.issue_server_cert!(tmp_dir, ca, "reload-server-b")
active_certfile = Path.join(tmp_dir, "active-server.cert.pem")
active_keyfile = Path.join(tmp_dir, "active-server.key.pem")
File.cp!(server_a.certfile, active_certfile)
File.cp!(server_a.keyfile, active_keyfile)
port = free_port()
endpoint_name = unique_name("TLSEndpointReload")
listener_id = :reload_tls
start_supervised!(
{Endpoint,
name: endpoint_name,
listeners: %{
listener_id =>
listener(listener_id, port, %{
transport: %{
scheme: :https,
tls: %{
mode: :server,
certfile: active_certfile,
keyfile: active_keyfile,
cipher_suite: :compatible
}
}
})
}}
)
assert_eventually(fn ->
case nip11_request(port, ca.certfile) do
{:ok, 200} -> true
_other -> false
end
end)
first_fingerprint = server_cert_fingerprint(port)
assert first_fingerprint == TLSCerts.cert_sha256!(server_a.certfile)
File.cp!(server_b.certfile, active_certfile)
File.cp!(server_b.keyfile, active_keyfile)
assert :ok = Endpoint.reload_listener(endpoint_name, listener_id)
assert_eventually(fn ->
server_cert_fingerprint(port) == TLSCerts.cert_sha256!(server_b.certfile)
end)
end
test "mutual TLS requires a client certificate and enforces optional client pins", %{
tmp_dir: tmp_dir
} do
ca = TLSCerts.create_ca!(tmp_dir, "mutual")
server = TLSCerts.issue_server_cert!(tmp_dir, ca, "mutual-server")
allowed_client = TLSCerts.issue_client_cert!(tmp_dir, ca, "allowed-client")
other_client = TLSCerts.issue_client_cert!(tmp_dir, ca, "other-client")
allowed_pin = TLSCerts.spki_pin!(allowed_client.certfile)
port = free_port()
start_supervised!(
{Endpoint,
name: unique_name("TLSEndpointMutual"),
listeners: %{
mutual_tls:
listener(:mutual_tls, port, %{
transport: %{
scheme: :https,
tls: %{
mode: :mutual,
certfile: server.certfile,
keyfile: server.keyfile,
cacertfile: ca.certfile,
client_pins: [%{type: :spki_sha256, value: allowed_pin}]
}
}
})
}}
)
assert {:error, _reason} = nip11_request(port, ca.certfile)
assert {:ok, 403} =
nip11_request(
port,
ca.certfile,
certfile: other_client.certfile,
keyfile: other_client.keyfile
)
assert {:ok, 200} =
nip11_request(
port,
ca.certfile,
certfile: allowed_client.certfile,
keyfile: allowed_client.keyfile
)
end
test "WSS relay accepts a pinned server certificate", %{tmp_dir: tmp_dir} do
ca = TLSCerts.create_ca!(tmp_dir, "wss")
server = TLSCerts.issue_server_cert!(tmp_dir, ca, "wss-server")
server_pin = TLSCerts.spki_pin!(server.certfile)
port = free_port()
start_supervised!(
{Endpoint,
name: unique_name("TLSEndpointWSS"),
listeners: %{
wss_tls:
listener(:wss_tls, port, %{
transport: %{
scheme: :https,
tls: %{
mode: :server,
certfile: server.certfile,
keyfile: server.keyfile
}
}
})
}}
)
server_config = %{
url: "wss://localhost:#{port}/relay",
tls: %{
mode: :required,
hostname: "localhost",
pins: [%{type: :spki_sha256, value: server_pin}]
}
}
assert {:ok, websocket} =
WebSockexClient.connect(
self(),
server_config,
websocket_opts: [ssl_options: websocket_ssl_options(ca.certfile)]
)
assert_receive {:sync_transport, ^websocket, :connected, _metadata}, 5_000
assert :ok = WebSockexClient.send_json(websocket, ["COUNT", "tls-sub", %{"kinds" => [1]}])
assert_receive {:sync_transport, ^websocket, :frame, ["COUNT", "tls-sub", payload]}, 5_000
assert is_map(payload)
assert Map.has_key?(payload, "count")
end
test "WSS relay rejects a mismatched pinned server certificate", %{tmp_dir: tmp_dir} do
ca = TLSCerts.create_ca!(tmp_dir, "wss-mismatch")
server = TLSCerts.issue_server_cert!(tmp_dir, ca, "wss-mismatch-server")
wrong_server = TLSCerts.issue_server_cert!(tmp_dir, ca, "wss-mismatch-other")
wrong_pin = TLSCerts.spki_pin!(wrong_server.certfile)
port = free_port()
start_supervised!(
{Endpoint,
name: unique_name("TLSEndpointWSSMismatch"),
listeners: %{
wss_tls_mismatch:
listener(:wss_tls_mismatch, port, %{
transport: %{
scheme: :https,
tls: %{
mode: :server,
certfile: server.certfile,
keyfile: server.keyfile
}
}
})
}}
)
server_config = %{
url: "wss://localhost:#{port}/relay",
tls: %{
mode: :required,
hostname: "localhost",
pins: [%{type: :spki_sha256, value: wrong_pin}]
}
}
assert {:error, %WebSockex.ConnError{original: {:tls_alert, {:handshake_failure, _reason}}}} =
WebSockexClient.connect(
self(),
server_config,
websocket_opts: [ssl_options: websocket_ssl_options(ca.certfile)]
)
end
defp listener(id, port, overrides) do
deep_merge(
%{
id: id,
enabled: true,
bind: %{ip: {127, 0, 0, 1}, port: port},
transport: %{scheme: :http, tls: %{mode: :disabled}},
proxy: %{trusted_cidrs: [], honor_x_forwarded_for: true},
network: %{allow_all: true},
features: %{
nostr: %{enabled: true},
admin: %{enabled: true},
metrics: %{enabled: false, access: %{allow_all: true}, auth_token: nil}
},
auth: %{nip42_required: false, nip98_required_for_admin: true},
baseline_acl: %{read: [], write: []},
bandit_options: []
},
overrides
)
end
defp nip11_request(port, cacertfile, opts \\ []) do
transport_opts =
[
mode: :binary,
verify: :verify_peer,
cacertfile: String.to_charlist(cacertfile),
server_name_indication: ~c"localhost",
customize_hostname_check: [
match_fun: :public_key.pkix_verify_hostname_match_fun(:https)
]
]
|> maybe_put_file_opt(:certfile, Keyword.get(opts, :certfile))
|> maybe_put_file_opt(:keyfile, Keyword.get(opts, :keyfile))
case Req.get(
url: "https://localhost:#{port}/relay",
headers: [{"accept", "application/nostr+json"}],
decode_body: false,
connect_options: [transport_opts: transport_opts]
) do
{:ok, %Req.Response{status: status}} -> {:ok, status}
{:error, reason} -> {:error, reason}
end
end
defp server_cert_fingerprint(port) do
{:ok, socket} =
:ssl.connect(
~c"127.0.0.1",
port,
[verify: :verify_none, active: false, server_name_indication: ~c"localhost"],
5_000
)
{:ok, cert_der} = :ssl.peercert(socket)
:ok = :ssl.close(socket)
Base.encode64(:crypto.hash(:sha256, cert_der))
end
defp ca_certs(certfile) do
certfile
|> File.read!()
|> :public_key.pem_decode()
|> Enum.map(&elem(&1, 1))
end
defp maybe_put_file_opt(options, _key, nil), do: options
defp maybe_put_file_opt(options, key, value) do
Keyword.put(options, key, String.to_charlist(value))
end
defp websocket_ssl_options(cacertfile) do
[
cacerts: ca_certs(cacertfile),
server_name_indication: ~c"localhost",
customize_hostname_check: [
match_fun: :public_key.pkix_verify_hostname_match_fun(:https)
]
]
end
defp deep_merge(left, right) when is_map(left) and is_map(right) do
Map.merge(left, right, fn _key, left_value, right_value ->
if is_map(left_value) and is_map(right_value) do
deep_merge(left_value, right_value)
else
right_value
end
end)
end
defp free_port do
{:ok, socket} = :gen_tcp.listen(0, [:binary, active: false, packet: :raw, reuseaddr: true])
{:ok, port} = :inet.port(socket)
:ok = :gen_tcp.close(socket)
port
end
defp unique_name(prefix) do
:"#{prefix}_#{System.unique_integer([:positive, :monotonic])}"
end
defp assert_eventually(fun, attempts \\ 40)
defp assert_eventually(_fun, 0), do: flunk("condition was not met in time")
defp assert_eventually(fun, attempts) do
if fun.() do
:ok
else
receive do
after
50 -> assert_eventually(fun, attempts - 1)
end
end
end
end