Compare commits
12 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 8c8d5a8abb | |||
| 0fbd7008a1 | |||
| bfdb06b203 | |||
| 36365710a8 | |||
| e12085af2f | |||
| 18e429e05a | |||
| c7a9f152f9 | |||
| 238b44ff03 | |||
| 680a73ee33 | |||
| 63d3e7d55f | |||
| 54a54c026b | |||
| d348eab69e |
22
BENCHMARK.md
22
BENCHMARK.md
@@ -1,6 +1,6 @@
|
||||
Running 2 comparison run(s)...
|
||||
Versions:
|
||||
parrhesia 0.2.0
|
||||
parrhesia 0.3.0
|
||||
strfry 1.0.4 (nixpkgs)
|
||||
nostr-rs-relay 0.9.0
|
||||
nostr-bench 0.4.0
|
||||
@@ -16,18 +16,18 @@ Versions:
|
||||
=== Bench comparison (averages) ===
|
||||
metric parrhesia strfry nostr-rs-relay strfry/parrhesia nostr-rs/parrhesia
|
||||
-------------------------- --------- -------- -------------- ---------------- ------------------
|
||||
connect avg latency (ms) ↓ 10.00 3.00 2.50 0.30x 0.25x
|
||||
connect max latency (ms) ↓ 18.50 5.00 4.00 0.27x 0.22x
|
||||
echo throughput (TPS) ↑ 76972.00 68204.50 158779.00 0.89x 2.06x
|
||||
echo throughput (MiB/s) ↑ 42.15 38.15 86.95 0.91x 2.06x
|
||||
event throughput (TPS) ↑ 1749.00 3560.00 787.50 2.04x 0.45x
|
||||
event throughput (MiB/s) ↑ 1.15 2.30 0.50 2.00x 0.43x
|
||||
req throughput (TPS) ↑ 2463.00 1808.00 822.00 0.73x 0.33x
|
||||
req throughput (MiB/s) ↑ 13.00 11.70 2.25 0.90x 0.17x
|
||||
connect avg latency (ms) ↓ 13.50 3.00 2.00 0.22x 0.15x
|
||||
connect max latency (ms) ↓ 22.50 5.50 3.00 0.24x 0.13x
|
||||
echo throughput (TPS) ↑ 80385.00 61673.00 164516.00 0.77x 2.05x
|
||||
echo throughput (MiB/s) ↑ 44.00 34.45 90.10 0.78x 2.05x
|
||||
event throughput (TPS) ↑ 2000.00 3404.50 788.00 1.70x 0.39x
|
||||
event throughput (MiB/s) ↑ 1.30 2.20 0.50 1.69x 0.38x
|
||||
req throughput (TPS) ↑ 3664.00 1808.50 877.50 0.49x 0.24x
|
||||
req throughput (MiB/s) ↑ 20.75 11.75 2.45 0.57x 0.12x
|
||||
|
||||
Legend: ↑ higher is better, ↓ lower is better.
|
||||
Ratio columns are server/parrhesia (for ↓ metrics, <1.00x means that server is faster).
|
||||
|
||||
Run details:
|
||||
run 1: parrhesia(echo_tps=78336, event_tps=1796, req_tps=2493, connect_avg_ms=9) | strfry(echo_tps=70189, event_tps=3567, req_tps=1809, connect_avg_ms=3) | nostr-rs-relay(echo_tps=149317, event_tps=786, req_tps=854, connect_avg_ms=2)
|
||||
run 2: parrhesia(echo_tps=75608, event_tps=1702, req_tps=2433, connect_avg_ms=11) | strfry(echo_tps=66220, event_tps=3553, req_tps=1807, connect_avg_ms=3) | nostr-rs-relay(echo_tps=168241, event_tps=789, req_tps=790, connect_avg_ms=3)
|
||||
run 1: parrhesia(echo_tps=81402, event_tps=1979, req_tps=3639, connect_avg_ms=14) | strfry(echo_tps=61745, event_tps=3457, req_tps=1818, connect_avg_ms=3) | nostr-rs-relay(echo_tps=159974, event_tps=784, req_tps=905, connect_avg_ms=2)
|
||||
run 2: parrhesia(echo_tps=79368, event_tps=2021, req_tps=3689, connect_avg_ms=13) | strfry(echo_tps=61601, event_tps=3352, req_tps=1799, connect_avg_ms=3) | nostr-rs-relay(echo_tps=169058, event_tps=792, req_tps=850, connect_avg_ms=2)
|
||||
|
||||
86
PROGRESS.md
86
PROGRESS.md
@@ -1,86 +0,0 @@
|
||||
# PROGRESS (ephemeral)
|
||||
|
||||
Implementation checklist for Parrhesia relay.
|
||||
|
||||
## Phase 0 — foundation
|
||||
|
||||
- [x] Confirm architecture doc with final NIP scope (`docs/ARCH.md`)
|
||||
- [x] Add core deps (websocket/http server, ecto_sql/postgrex, telemetry, test tooling)
|
||||
- [x] Establish application config structure (limits, policies, feature flags)
|
||||
- [x] Wire initial supervision tree skeleton
|
||||
|
||||
## Phase 1 — protocol core (NIP-01)
|
||||
|
||||
- [x] Implement websocket endpoint + per-connection process
|
||||
- [x] Implement message decode/encode for `EVENT`, `REQ`, `CLOSE`
|
||||
- [x] Implement strict event validation (`id`, `sig`, shape, timestamps)
|
||||
- [x] Implement filter evaluation engine (AND/OR semantics)
|
||||
- [x] Implement subscription lifecycle + `EOSE` behavior
|
||||
- [x] Implement canonical `OK`, `NOTICE`, `CLOSED` responses + prefixes
|
||||
|
||||
## Phase 2 — storage boundary + postgres adapter
|
||||
|
||||
- [x] Define `Parrhesia.Storage.*` behaviors (events/moderation/groups/admin)
|
||||
- [x] Implement Postgres adapter modules behind behaviors
|
||||
- [x] Create migrations for events, tags, moderation, membership
|
||||
- [x] Implement replaceable/addressable semantics at storage layer
|
||||
- [x] Add adapter contract test suite
|
||||
|
||||
## Phase 3 — fanout + performance primitives
|
||||
|
||||
- [x] Build ETS-backed subscription index
|
||||
- [x] Implement candidate narrowing by kind/author/tag
|
||||
- [x] Add bounded outbound queues/backpressure per connection
|
||||
- [x] Add telemetry for ingest/query/fanout latency + queue depth
|
||||
|
||||
## Phase 4 — relay metadata and auth
|
||||
|
||||
- [x] NIP-11 endpoint (`application/nostr+json`)
|
||||
- [x] NIP-42 challenge/auth flow
|
||||
- [x] Enforce NIP-70 protected events (default reject, auth override)
|
||||
- [x] Add auth-required/restricted response paths for writes and reqs
|
||||
|
||||
## Phase 5 — lifecycle and moderation features
|
||||
|
||||
- [x] NIP-09 deletion requests
|
||||
- [x] NIP-40 expiration handling + purge worker
|
||||
- [x] NIP-62 vanish requests (hard delete semantics)
|
||||
- [x] NIP-13 PoW gate (configurable minimum)
|
||||
- [x] Moderation tables + policy hooks (ban/allow/event/ip)
|
||||
|
||||
## Phase 6 — query extensions
|
||||
|
||||
- [x] NIP-45 `COUNT` (exact)
|
||||
- [x] Optional HLL response support
|
||||
- [x] NIP-50 search (`search` filter + ranking)
|
||||
- [x] NIP-77 negentropy (`NEG-OPEN/MSG/CLOSE`)
|
||||
|
||||
## Phase 7 — private messaging, groups, and MLS
|
||||
|
||||
- [x] NIP-17/59 recipient-protected giftwrap read path (`kind:1059`)
|
||||
- [x] NIP-29 group event policy + relay metadata events
|
||||
- [x] NIP-43 membership request flow (`28934/28935/28936`, `8000/8001`, `13534`)
|
||||
- [x] Marmot MIP relay surface: `443`, `445`, `10051` handling
|
||||
- [x] MLS retention policy + tests for commit race edge cases
|
||||
|
||||
## Phase 8 — management API + operations
|
||||
|
||||
- [x] NIP-86 HTTP management endpoint
|
||||
- [x] NIP-98 auth validation for management calls
|
||||
- [x] Implement supported management methods + audit logging
|
||||
- [x] Build health/readiness and Prometheus-compatible `/metrics` endpoints
|
||||
|
||||
## Phase 9 — full test + hardening pass
|
||||
|
||||
- [x] Unit + integration + property test coverage for all critical modules
|
||||
- [x] End-to-end websocket conformance scenarios
|
||||
- [x] Load/soak tests with target p95 latency budgets
|
||||
- [x] Fault-injection tests (DB outages, high churn, restart recovery)
|
||||
- [x] Final precommit run and fix all issues
|
||||
|
||||
## Nice-to-have / backlog
|
||||
|
||||
- [x] Multi-node fanout via PG LISTEN/NOTIFY or external bus
|
||||
- [x] Partitioned event storage + archival strategy
|
||||
- [x] Alternate storage adapter prototype (non-Postgres)
|
||||
- [x] Compatibility mode for Marmot protocol transition (not required per user)
|
||||
@@ -1,61 +0,0 @@
|
||||
# PROGRESS_MARMOT (ephemeral)
|
||||
|
||||
Marmot-specific implementation checklist for Parrhesia relay interoperability.
|
||||
|
||||
Spec source: `~/marmot/README.md` + MIP-00..05.
|
||||
|
||||
## M0 — spec lock + interoperability profile
|
||||
|
||||
- [ ] Freeze target profile to MIP-00..03 (mandatory)
|
||||
- [ ] Keep MIP-04 and MIP-05 behind feature flags (optional)
|
||||
- [ ] Document that legacy NIP-EE is superseded and no dedicated transition compatibility mode is planned
|
||||
- [ ] Publish operator-facing compatibility statement in docs
|
||||
|
||||
## M1 — MIP-00 (credentials + keypackages)
|
||||
|
||||
- [x] Enforce kind `443` required tags and encoding (`encoding=base64`)
|
||||
- [x] Validate `mls_protocol_version`, `mls_ciphersuite`, `mls_extensions`, `relays`, and `i` tag shape
|
||||
- [x] Add efficient `#i` query/index path for KeyPackageRef lookup
|
||||
- [x] Keep replaceable behavior for kind `10051` relay-list events
|
||||
- [x] Add conformance tests for valid/invalid KeyPackage envelopes
|
||||
|
||||
## M2 — MIP-01 (group construction data expectations)
|
||||
|
||||
- [x] Enforce relay-side routing prerequisites for Marmot groups (`#h` query path)
|
||||
- [x] Keep deterministic ordering for group-event catch-up (`created_at` + `id` tie-break)
|
||||
- [x] Add guardrails for group metadata traffic volume and filter windows
|
||||
- [x] Add tests for `#h` routing and ordering invariants
|
||||
|
||||
## M3 — MIP-02 (welcome events)
|
||||
|
||||
- [x] Support wrapped Welcome delivery via NIP-59 (`1059`) recipient-gated reads
|
||||
- [x] Validate relay behavior for unsigned inner Welcome semantics (kind `444` envelope expectations)
|
||||
- [x] Ensure durability/ack semantics support Commit-then-Welcome sequencing requirements
|
||||
- [x] Add negative tests for malformed wrapped Welcome payloads
|
||||
|
||||
## M4 — MIP-03 (group events)
|
||||
|
||||
- [x] Enforce kind `445` envelope validation (`#h` tag presence/shape, base64 content shape)
|
||||
- [x] Keep relay MLS-agnostic (no MLS decrypt/inspect in relay hot path)
|
||||
- [x] Add configurable retention policy for kind `445` traffic
|
||||
- [x] Add tests for high-volume fanout behavior and deterministic query results
|
||||
|
||||
## M5 — optional MIP-04 (encrypted media)
|
||||
|
||||
- [x] Accept/store MIP-04 metadata-bearing events as regular Nostr events
|
||||
- [x] Add policy hooks for media metadata limits and abuse controls
|
||||
- [x] Add tests for search/filter interactions with media metadata tags
|
||||
|
||||
## M6 — optional MIP-05 (push notifications)
|
||||
|
||||
- [x] Accept/store notification coordination events required by enabled profile
|
||||
- [x] Add policy/rate-limit controls for push-related event traffic
|
||||
- [x] Add abuse and replay protection tests for notification trigger paths
|
||||
|
||||
## M7 — hardening + operations
|
||||
|
||||
- [x] Add Marmot-focused telemetry breakdowns (ingest/query/fanout, queue pressure)
|
||||
- [x] Add query-plan regression checks for `#h` and `#i` heavy workloads
|
||||
- [x] Add fault-injection scenarios for relay outage/reordering behavior in group flows
|
||||
- [x] Add docs for operator limits tuned for Marmot traffic patterns
|
||||
- [x] Final `mix precommit` before merge
|
||||
16
README.md
16
README.md
@@ -6,6 +6,7 @@ It exposes:
|
||||
- a WebSocket relay endpoint at `/relay`
|
||||
- NIP-11 relay info on `GET /relay` with `Accept: application/nostr+json`
|
||||
- operational HTTP endpoints (`/health`, `/ready`, `/metrics`)
|
||||
- `/metrics` is restricted by default to private/loopback source IPs
|
||||
- a NIP-86-style management API at `POST /management` (NIP-98 auth)
|
||||
|
||||
## Supported NIPs
|
||||
@@ -56,7 +57,7 @@ ws://localhost:4000/relay
|
||||
|
||||
- `GET /health` -> `ok`
|
||||
- `GET /ready` -> readiness status
|
||||
- `GET /metrics` -> Prometheus metrics
|
||||
- `GET /metrics` -> Prometheus metrics (private/loopback source IPs by default)
|
||||
- `GET /relay` + `Accept: application/nostr+json` -> NIP-11 document
|
||||
- `POST /management` -> management API (requires NIP-98 auth)
|
||||
|
||||
@@ -81,7 +82,20 @@ config :parrhesia, Parrhesia.Web.Endpoint,
|
||||
ip: {0, 0, 0, 0},
|
||||
port: 4000
|
||||
|
||||
# Optional dedicated metrics listener (keep this internal)
|
||||
config :parrhesia, Parrhesia.Web.MetricsEndpoint,
|
||||
enabled: true,
|
||||
ip: {127, 0, 0, 1},
|
||||
port: 9568
|
||||
|
||||
config :parrhesia,
|
||||
metrics: [
|
||||
enabled_on_main_endpoint: false,
|
||||
public: false,
|
||||
private_networks_only: true,
|
||||
allowed_cidrs: [],
|
||||
auth_token: nil
|
||||
],
|
||||
limits: [
|
||||
max_frame_bytes: 1_048_576,
|
||||
max_event_bytes: 262_144,
|
||||
|
||||
@@ -1,6 +1,10 @@
|
||||
import Config
|
||||
|
||||
config :postgrex, :json_library, JSON
|
||||
|
||||
config :parrhesia,
|
||||
moderation_cache_enabled: true,
|
||||
relay_url: "ws://localhost:4000/relay",
|
||||
limits: [
|
||||
max_frame_bytes: 1_048_576,
|
||||
max_event_bytes: 262_144,
|
||||
@@ -8,9 +12,17 @@ config :parrhesia,
|
||||
max_filter_limit: 500,
|
||||
max_subscriptions_per_connection: 32,
|
||||
max_event_future_skew_seconds: 900,
|
||||
max_event_ingest_per_window: 120,
|
||||
event_ingest_window_seconds: 1,
|
||||
auth_max_age_seconds: 600,
|
||||
max_outbound_queue: 256,
|
||||
outbound_drain_batch_size: 64,
|
||||
outbound_overflow_strategy: :close
|
||||
outbound_overflow_strategy: :close,
|
||||
max_negentropy_payload_bytes: 4096,
|
||||
max_negentropy_sessions_per_connection: 8,
|
||||
max_negentropy_total_sessions: 10_000,
|
||||
negentropy_session_idle_timeout_seconds: 60,
|
||||
negentropy_session_sweep_interval_seconds: 10
|
||||
],
|
||||
policies: [
|
||||
auth_required_for_writes: false,
|
||||
@@ -35,7 +47,15 @@ config :parrhesia,
|
||||
marmot_push_max_server_recipients: 1,
|
||||
management_auth_required: true
|
||||
],
|
||||
metrics: [
|
||||
enabled_on_main_endpoint: true,
|
||||
public: false,
|
||||
private_networks_only: true,
|
||||
allowed_cidrs: [],
|
||||
auth_token: nil
|
||||
],
|
||||
features: [
|
||||
verify_event_signatures: true,
|
||||
nip_45_count: true,
|
||||
nip_50_search: true,
|
||||
nip_77_negentropy: true,
|
||||
@@ -50,6 +70,13 @@ config :parrhesia,
|
||||
|
||||
config :parrhesia, Parrhesia.Web.Endpoint, port: 4000
|
||||
|
||||
config :parrhesia, Parrhesia.Web.MetricsEndpoint,
|
||||
enabled: false,
|
||||
ip: {127, 0, 0, 1},
|
||||
port: 9568
|
||||
|
||||
config :parrhesia, Parrhesia.Repo, types: Parrhesia.PostgresTypes
|
||||
|
||||
config :parrhesia, ecto_repos: [Parrhesia.Repo]
|
||||
|
||||
import_config "#{config_env()}.exs"
|
||||
|
||||
@@ -1,3 +1,8 @@
|
||||
import Config
|
||||
|
||||
config :parrhesia, Parrhesia.Repo,
|
||||
pool_size: 32,
|
||||
queue_target: 1_000,
|
||||
queue_interval: 5_000
|
||||
|
||||
# Production runtime configuration lives in config/runtime.exs.
|
||||
|
||||
@@ -5,9 +5,35 @@ if config_env() == :prod do
|
||||
System.get_env("DATABASE_URL") ||
|
||||
raise "environment variable DATABASE_URL is missing. Example: ecto://USER:PASS@HOST/DATABASE"
|
||||
|
||||
repo_defaults = Application.get_env(:parrhesia, Parrhesia.Repo, [])
|
||||
|
||||
default_pool_size = Keyword.get(repo_defaults, :pool_size, 32)
|
||||
default_queue_target = Keyword.get(repo_defaults, :queue_target, 1_000)
|
||||
default_queue_interval = Keyword.get(repo_defaults, :queue_interval, 5_000)
|
||||
|
||||
pool_size =
|
||||
case System.get_env("POOL_SIZE") do
|
||||
nil -> default_pool_size
|
||||
value -> String.to_integer(value)
|
||||
end
|
||||
|
||||
queue_target =
|
||||
case System.get_env("DB_QUEUE_TARGET_MS") do
|
||||
nil -> default_queue_target
|
||||
value -> String.to_integer(value)
|
||||
end
|
||||
|
||||
queue_interval =
|
||||
case System.get_env("DB_QUEUE_INTERVAL_MS") do
|
||||
nil -> default_queue_interval
|
||||
value -> String.to_integer(value)
|
||||
end
|
||||
|
||||
config :parrhesia, Parrhesia.Repo,
|
||||
url: database_url,
|
||||
pool_size: String.to_integer(System.get_env("POOL_SIZE") || "10")
|
||||
pool_size: pool_size,
|
||||
queue_target: queue_target,
|
||||
queue_interval: queue_interval
|
||||
|
||||
config :parrhesia, Parrhesia.Web.Endpoint,
|
||||
port: String.to_integer(System.get_env("PORT") || "4000")
|
||||
|
||||
@@ -12,7 +12,10 @@ config :parrhesia, Parrhesia.Web.Endpoint,
|
||||
port: test_endpoint_port,
|
||||
ip: {127, 0, 0, 1}
|
||||
|
||||
config :parrhesia, enable_expiration_worker: false
|
||||
config :parrhesia,
|
||||
enable_expiration_worker: false,
|
||||
moderation_cache_enabled: false,
|
||||
features: [verify_event_signatures: false]
|
||||
|
||||
pg_host = System.get_env("PGHOST")
|
||||
|
||||
|
||||
66
default.nix
66
default.nix
@@ -1,11 +1,16 @@
|
||||
{
|
||||
lib,
|
||||
beam,
|
||||
fetchFromGitHub,
|
||||
runCommand,
|
||||
autoconf,
|
||||
automake,
|
||||
libtool,
|
||||
pkg-config,
|
||||
vips,
|
||||
}: let
|
||||
pname = "parrhesia";
|
||||
version = "0.1.0";
|
||||
version = "0.3.0";
|
||||
|
||||
beamPackages = beam.packages.erlang_28.extend (
|
||||
final: _prev: {
|
||||
@@ -43,16 +48,69 @@
|
||||
beamPackages.fetchMixDeps {
|
||||
pname = "${pname}-mix-deps";
|
||||
inherit version src;
|
||||
hash = "sha256-1v2+Q1MHbu09r5OBaLehiR+JfMP0Q5OHaWuwrQDzZJU=";
|
||||
hash = "sha256-0KOyYRbYM0jjmp3tPn64qkp0YkmZKlqkGrlu/wCr4m8=";
|
||||
}
|
||||
else null;
|
||||
|
||||
# lib_secp256k1 is a :make dep and may not be present in fetchMixDeps output.
|
||||
# Inject the Hex package explicitly, then vendor upstream bitcoin-core/secp256k1
|
||||
# sources to avoid build-time network access.
|
||||
libSecp256k1Hex = beamPackages.fetchHex {
|
||||
pkg = "lib_secp256k1";
|
||||
version = "0.7.1";
|
||||
sha256 = "sha256-eL3TZhoXRIr/Wu7FynTI3bwJsB8Oz6O6Gro+iuR6srM=";
|
||||
};
|
||||
|
||||
elixirMakeHex = beamPackages.fetchHex {
|
||||
pkg = "elixir_make";
|
||||
version = "0.9.0";
|
||||
sha256 = "sha256-2yPU/Yt1dGKtAviqc0MaQm/mZxyAsgDZcQyvPR3Q/9s=";
|
||||
};
|
||||
|
||||
secp256k1Src = fetchFromGitHub {
|
||||
owner = "bitcoin-core";
|
||||
repo = "secp256k1";
|
||||
rev = "v0.7.1";
|
||||
hash = "sha256-DnBgetf+98n7B1JGtyTdxyc+yQ51A3+ueTIPPSWCm4E=";
|
||||
};
|
||||
|
||||
patchedMixFodDeps =
|
||||
if mixFodDeps == null
|
||||
then null
|
||||
else
|
||||
runCommand mixFodDeps.name {} ''
|
||||
mkdir -p $out
|
||||
cp -r --no-preserve=mode ${mixFodDeps}/. $out
|
||||
chmod -R u+w $out
|
||||
|
||||
rm -rf $out/lib_secp256k1
|
||||
cp -r ${libSecp256k1Hex} $out/lib_secp256k1
|
||||
chmod -R u+w $out/lib_secp256k1
|
||||
|
||||
rm -rf $out/elixir_make
|
||||
cp -r ${elixirMakeHex} $out/elixir_make
|
||||
|
||||
rm -rf $out/lib_secp256k1/c_src/secp256k1
|
||||
mkdir -p $out/lib_secp256k1/c_src/secp256k1
|
||||
cp -r ${secp256k1Src}/. $out/lib_secp256k1/c_src/secp256k1/
|
||||
chmod -R u+w $out/lib_secp256k1/c_src/secp256k1
|
||||
|
||||
# mixRelease may copy deps without preserving +x bits, so avoid relying
|
||||
# on executable mode for autogen.sh.
|
||||
substituteInPlace $out/lib_secp256k1/Makefile \
|
||||
--replace-fail "./autogen.sh" "sh ./autogen.sh"
|
||||
|
||||
touch $out/lib_secp256k1/c_src/secp256k1/.fetched
|
||||
'';
|
||||
in
|
||||
beamPackages.mixRelease {
|
||||
inherit pname version src mixFodDeps;
|
||||
inherit pname version src;
|
||||
|
||||
mixFodDeps = patchedMixFodDeps;
|
||||
|
||||
mixEnv = "prod";
|
||||
removeCookie = false;
|
||||
nativeBuildInputs = [pkg-config];
|
||||
nativeBuildInputs = [pkg-config autoconf automake libtool];
|
||||
buildInputs = [vips];
|
||||
|
||||
preConfigure = ''
|
||||
|
||||
30
devenv.nix
30
devenv.nix
@@ -75,9 +75,16 @@ in {
|
||||
});
|
||||
nostr-bench = pkgs.callPackage ./nix/nostr-bench.nix {};
|
||||
in
|
||||
with pkgs; [
|
||||
with pkgs;
|
||||
[
|
||||
just
|
||||
gcc
|
||||
git
|
||||
gnumake
|
||||
autoconf
|
||||
automake
|
||||
libtool
|
||||
pkg-config
|
||||
# Nix code formatter
|
||||
alejandra
|
||||
# i18n
|
||||
@@ -94,6 +101,8 @@ in {
|
||||
nostr-bench
|
||||
# Nostr reference servers
|
||||
nostr-rs-relay
|
||||
]
|
||||
++ lib.optionals pkgs.stdenv.hostPlatform.isx86_64 [
|
||||
strfry
|
||||
];
|
||||
|
||||
@@ -121,6 +130,23 @@ in {
|
||||
services.postgres = {
|
||||
enable = true;
|
||||
package = pkgs.postgresql_18;
|
||||
|
||||
# Some tuning for the benchmark
|
||||
settings = {
|
||||
max_connections = 300;
|
||||
shared_buffers = "1GB";
|
||||
effective_cache_size = "3GB";
|
||||
work_mem = "16MB";
|
||||
maintenance_work_mem = "256MB";
|
||||
wal_compression = "on";
|
||||
checkpoint_timeout = "15min";
|
||||
checkpoint_completion_target = 0.9;
|
||||
min_wal_size = "1GB";
|
||||
max_wal_size = "4GB";
|
||||
random_page_cost = 1.1;
|
||||
effective_io_concurrency = 200;
|
||||
};
|
||||
|
||||
initialDatabases = [{name = "parrhesia_dev";} {name = "parrhesia_test";}];
|
||||
initialScript = ''
|
||||
CREATE ROLE dev WITH LOGIN PASSWORD 'dev' SUPERUSER;
|
||||
@@ -129,7 +155,7 @@ in {
|
||||
|
||||
DROP database template1;
|
||||
|
||||
CREATE DATABASE template1 WITH OWNER = agent
|
||||
CREATE DATABASE template1
|
||||
ENCODING = 'UTF8'
|
||||
TABLESPACE = pg_default
|
||||
LC_COLLATE = 'de_DE.UTF-8'
|
||||
|
||||
670
docs/slop/REVIEW.md
Normal file
670
docs/slop/REVIEW.md
Normal file
@@ -0,0 +1,670 @@
|
||||
# Parrhesia Relay — Technical Review
|
||||
|
||||
**Reviewer:** Case, Senior Systems & Protocol Engineer
|
||||
**Date:** 2026-03-14
|
||||
**Commit:** `63d3e7d` (master)
|
||||
**Scope:** Full codebase review against Nostr NIPs, MARMOT specs, and production readiness criteria
|
||||
|
||||
---
|
||||
|
||||
# Executive Summary
|
||||
|
||||
Parrhesia is a well-structured Nostr relay built on Elixir/OTP with PostgreSQL storage. The architecture is clean — clear separation between web, protocol, policy, and storage layers with a pluggable adapter pattern. Code quality is above average: consistent error handling, good use of `with` chains, comprehensive policy enforcement for MARMOT-specific concerns, and thoughtful outbound backpressure management. The developer clearly understands both the BEAM and the Nostr protocol.
|
||||
|
||||
However, the relay has **two critical defects** that make it unsuitable for any deployment beyond trusted local development: (1) **no Schnorr signature verification** — any client can forge events with arbitrary pubkeys, and (2) **lossy tag storage** — events returned from queries have truncated tags, violating NIP-01's data integrity guarantees. Several additional high-severity issues (no ephemeral event handling, missing NIP-42 relay tag validation, SQL LIKE injection vector, no ingest rate limiting) compound the risk.
|
||||
|
||||
**Overall risk rating: Critical**
|
||||
|
||||
This relay is **not production-ready** for any public deployment. It is suitable for local development and internal testing with trusted clients. With the critical and high findings addressed, it could serve as a solid private relay. Public internet deployment requires significant additional hardening.
|
||||
|
||||
---
|
||||
|
||||
# Top Findings
|
||||
|
||||
## [Critical] No Schnorr Signature Verification
|
||||
|
||||
**Area:** protocol correctness, security
|
||||
|
||||
**Why it matters:**
|
||||
NIP-01 mandates that relays MUST verify event signatures using Schnorr signatures over secp256k1. Without signature verification, any client can publish events with any pubkey. This completely breaks the identity and trust model of the Nostr protocol. Authentication (NIP-42), protected events (NIP-70), deletion (NIP-09), replaceable events — all rely on pubkey authenticity.
|
||||
|
||||
**Evidence:**
|
||||
`lib/parrhesia/protocol/event_validator.ex` validates the event ID hash (`validate_id_hash/1` at line 188) but never verifies the `sig` field against the `pubkey` using Schnorr/secp256k1. A `grep` for `schnorr`, `secp256k1`, `verify`, and `:crypto.verify` across the entire `lib/` directory returns zero results. The `validate_sig/1` function (line 182) only checks that `sig` is 64-byte lowercase hex — a format check, not a cryptographic verification.
|
||||
|
||||
**Spec reference:**
|
||||
NIP-01: "Each user has a keypair. Signatures, public key, and encodings are done according to the Schnorr signatures standard for the curve secp256k1." The relay is expected to verify signatures to ensure event integrity.
|
||||
|
||||
**Attack scenario:**
|
||||
An unauthenticated client connects and publishes `["EVENT", {"id": "<valid-hash>", "pubkey": "<victim-pubkey>", "sig": "<any-64-byte-hex>", ...}]`. The relay stores and fans out the forged event as if the victim authored it. This enables impersonation, reputation attacks, and poisoning of replaceable events (kind 0 profile, kind 3 contacts, kind 10002 relay lists).
|
||||
|
||||
**Recommended fix:**
|
||||
Add a secp256k1 library dependency (e.g., `ex_secp256k1` or `:crypto` with OTP 26+ Schnorr support) and add signature verification to `EventValidator.validate/1` after `validate_id_hash/1`. This is the single most important fix.
|
||||
|
||||
**Confidence:** High
|
||||
|
||||
---
|
||||
|
||||
## [Critical] Lossy Tag Storage — Events Returned With Truncated Tags
|
||||
|
||||
**Area:** protocol correctness, database
|
||||
|
||||
**Why it matters:**
|
||||
NIP-01 events have tags with arbitrary numbers of elements (e.g., `["e", "<event-id>", "<relay-url>", "<marker>"]`, `["p", "<pubkey>", "<relay-url>"]`, `["a", "<kind>:<pubkey>:<d-tag>", "<relay-url>"]`). The relay only stores the first two elements (`name` and `value`) of each tag in the `event_tags` table, and single-element tags (like `["-"]` for NIP-70 protected events) are dropped entirely. When events are queried back, the reconstructed tags are truncated.
|
||||
|
||||
**Evidence:**
|
||||
`lib/parrhesia/storage/adapters/postgres/events.ex`:
|
||||
- `insert_tags!/2` (line 266): pattern matches `[name, value | _rest]` — discards `_rest`, ignores tags with fewer than 2 elements.
|
||||
- `load_tags/1` (line 739): reconstructs tags as `[tag.name, tag.value]` — only 2 elements.
|
||||
- `to_nostr_event/2` (line 763): uses the truncated tags directly.
|
||||
|
||||
The `events` table itself does not store the full tag array. The full tags exist only in the original JSON during ingest, then are lost.
|
||||
|
||||
**Spec reference:**
|
||||
NIP-01: Tags are arrays of arbitrary strings. Relay implementations MUST return events with their complete, unmodified tags. Relay hints in `e`/`p` tags, markers, and other metadata are essential for client operation.
|
||||
|
||||
**Attack/failure scenario:**
|
||||
1. Client publishes event with `["e", "<id>", "wss://relay.example.com", "reply"]`.
|
||||
2. Another client queries and receives `["e", "<id>"]` — relay hint and marker lost.
|
||||
3. Client cannot follow the event reference to the correct relay.
|
||||
4. Protected events with `["-"]` tag lose their protection marker on retrieval, breaking NIP-70 semantics.
|
||||
|
||||
**Recommended fix:**
|
||||
Either (a) store the full tag JSON array in the events table (e.g., a `tags` JSONB column), using `event_tags` only as a query index, or (b) add additional columns to `event_tags` to preserve all elements (e.g., a `rest` text array column or store the full tag as a JSONB column). Option (a) is simpler and more correct.
|
||||
|
||||
**Confidence:** High
|
||||
|
||||
---
|
||||
|
||||
## [High] No Ephemeral Event Handling (Kind 20000–29999)
|
||||
|
||||
**Area:** protocol correctness, performance
|
||||
|
||||
**Why it matters:**
|
||||
NIP-01 defines kinds 20000–29999 as ephemeral events that relays are NOT expected to store. They should be fanned out to matching subscribers but never persisted. The relay currently persists all events regardless of kind, which wastes storage and violates client expectations.
|
||||
|
||||
**Evidence:**
|
||||
`lib/parrhesia/storage/adapters/postgres/events.ex`:
|
||||
- `replaceable_kind?/1` (line 515): handles kinds 0, 3, 10000–19999.
|
||||
- `addressable_kind?/1` (line 517): handles kinds 30000–39999.
|
||||
- No function checks for ephemeral kinds (20000–29999).
|
||||
- `put_event/2` persists all non-deletion, non-vanish events unconditionally.
|
||||
|
||||
`lib/parrhesia/web/connection.ex`:
|
||||
- `persist_event/1` (line 420): routes kind 5 to deletion, kind 62 to vanish, everything else to `put_event`. No ephemeral bypass.
|
||||
|
||||
The config has `accept_ephemeral_events: true` but it's never checked anywhere.
|
||||
|
||||
**Spec reference:**
|
||||
NIP-01: "Upon receiving an ephemeral event, a relay is NOT expected to store it and SHOULD send it directly to the clients that have matching filters open."
|
||||
|
||||
**Recommended fix:**
|
||||
In `persist_event/1`, check if the event kind is in the ephemeral range. If so, skip DB persistence and only fan out. The `accept_ephemeral_events` config should gate whether ephemeral events are accepted at all.
|
||||
|
||||
**Confidence:** High
|
||||
|
||||
---
|
||||
|
||||
## [High] NIP-42 AUTH Missing Relay Tag Validation
|
||||
|
||||
**Area:** protocol correctness, security
|
||||
|
||||
**Why it matters:**
|
||||
NIP-42 requires AUTH events to include a `["relay", "<relay-url>"]` tag that matches the relay's URL. Without this check, an AUTH event created for relay A can be replayed against relay B, enabling cross-relay authentication bypass.
|
||||
|
||||
**Evidence:**
|
||||
`lib/parrhesia/web/connection.ex`:
|
||||
- `validate_auth_event/1` (line 573): checks kind 22242 and presence of `challenge` tag.
|
||||
- `validate_auth_challenge/2` (line 590): checks challenge value matches.
|
||||
- **No validation of `relay` tag** anywhere in the auth flow.
|
||||
|
||||
**Spec reference:**
|
||||
NIP-42: AUTH event "MUST include `['relay', '<relay-url>']` tag". The relay MUST verify this tag matches its own URL.
|
||||
|
||||
**Attack scenario:**
|
||||
Attacker obtains an AUTH event from user for relay A (which may be the attacker's relay). Attacker replays this AUTH event against Parrhesia, which accepts it because the challenge is the only thing checked. If the challenge can be predicted or leaked, authentication is fully bypassed.
|
||||
|
||||
**Recommended fix:**
|
||||
Add relay URL validation to `validate_auth_event/1`. The relay should know its own canonical URL (from config or NIP-11 document) and verify the `relay` tag matches.
|
||||
|
||||
**Confidence:** High
|
||||
|
||||
---
|
||||
|
||||
## [High] SQL LIKE Pattern Injection in NIP-50 Search
|
||||
|
||||
**Area:** security, performance
|
||||
|
||||
**Why it matters:**
|
||||
The NIP-50 search implementation uses PostgreSQL `ILIKE` with unsanitized user input interpolated into the pattern. While not traditional SQL injection (the value is parameterized), LIKE metacharacters (`%`, `_`) in the search string alter the matching semantics and can cause catastrophic performance.
|
||||
|
||||
**Evidence:**
|
||||
`lib/parrhesia/storage/adapters/postgres/events.ex` line 627:
|
||||
```elixir
|
||||
where(query, [event], ilike(event.content, ^"%#{search}%"))
|
||||
```
|
||||
|
||||
The `search` variable is directly interpolated into the LIKE pattern. User-supplied values like `%a%b%c%d%e%f%g%h%i%j%` create pathological patterns that force PostgreSQL into exponential backtracking against the full `content` column of every matching row.
|
||||
|
||||
**Attack scenario:**
|
||||
Client sends `["REQ", "sub1", {"search": "%a%b%c%d%e%f%g%h%i%j%k%l%m%n%o%p%q%r%", "kinds": [1]}]`. PostgreSQL executes an expensive sequential scan with exponential LIKE pattern matching. A handful of concurrent requests with adversarial patterns can saturate the DB connection pool and CPU.
|
||||
|
||||
**Recommended fix:**
|
||||
1. Escape `%` and `_` characters in user search input before interpolation: `search |> String.replace("%", "\\%") |> String.replace("_", "\\_")`.
|
||||
2. Consider PostgreSQL full-text search (`tsvector`/`tsquery`) instead of ILIKE for better performance and correct semantics.
|
||||
3. Add a minimum search term length (e.g., 3 characters).
|
||||
|
||||
**Confidence:** High
|
||||
|
||||
---
|
||||
|
||||
## [High] No Per-Connection or Per-IP Rate Limiting on Event Ingestion
|
||||
|
||||
**Area:** security, robustness
|
||||
|
||||
**Why it matters:**
|
||||
There is no rate limiting on EVENT submissions. A single client can flood the relay with events at wire speed, consuming DB connections, CPU (for validation), and disk I/O. The outbound queue has backpressure, but the ingest path is completely unbounded.
|
||||
|
||||
**Evidence:**
|
||||
`lib/parrhesia/web/connection.ex`:
|
||||
- `handle_event_ingest/2` (line 186): processes every EVENT message immediately with no throttle.
|
||||
- No token bucket, sliding window, or any rate-limiting mechanism anywhere in the codebase.
|
||||
- `grep` for `rate.limit`, `throttle`, `rate_limit` across `lib/` returns only error message strings, not actual rate-limiting logic.
|
||||
|
||||
**Attack scenario:**
|
||||
A single WebSocket connection sends 10,000 EVENT messages per second. Each triggers validation, policy checks, and a DB transaction. The DB connection pool (default 32) saturates within milliseconds. All other clients experience timeouts.
|
||||
|
||||
**Recommended fix:**
|
||||
Implement per-connection rate limiting in the WebSocket handler (token bucket per connection state). Consider also per-pubkey and per-IP rate limiting as a separate layer. Start with a simple `{count, window_start}` in connection state.
|
||||
|
||||
**Confidence:** High
|
||||
|
||||
---
|
||||
|
||||
## [High] max_frame_bytes and max_event_bytes Not Enforced
|
||||
|
||||
**Area:** security, robustness
|
||||
|
||||
**Why it matters:**
|
||||
The configuration defines `max_frame_bytes: 1_048_576` and `max_event_bytes: 262_144` but neither value is actually used to limit incoming data. The max_frame_bytes is only reported in the NIP-11 document. An attacker can send arbitrarily large WebSocket frames and events.
|
||||
|
||||
**Evidence:**
|
||||
- `grep` for `max_frame_bytes` in `lib/`: only found in `relay_info.ex` for NIP-11 output.
|
||||
- `grep` for `max_event_bytes` in `lib/`: no results at all.
|
||||
- The Bandit WebSocket upgrade in `router.ex` line 53 passes `timeout: 60_000` but no `max_frame_size` option.
|
||||
- No payload size check in `handle_in/2` before JSON decoding.
|
||||
|
||||
**Attack scenario:**
|
||||
Client sends a 100MB WebSocket frame containing a single event with a massive `content` field or millions of tags. The relay attempts to JSON-decode the entire payload in memory, potentially causing OOM or extreme GC pressure.
|
||||
|
||||
**Recommended fix:**
|
||||
1. Pass `max_frame_size` to Bandit's WebSocket upgrade options.
|
||||
2. Check `byte_size(payload)` in `handle_in/2` before calling `Protocol.decode_client/1`.
|
||||
3. Optionally check individual event size after JSON decoding.
|
||||
|
||||
**Confidence:** High
|
||||
|
||||
---
|
||||
|
||||
## [Medium] NIP-09 Deletion Missing "a" Tag Support for Addressable Events
|
||||
|
||||
**Area:** protocol correctness
|
||||
|
||||
**Why it matters:**
|
||||
NIP-09 specifies that deletion events (kind 5) can reference addressable/replaceable events via `"a"` tags (format: `"<kind>:<pubkey>:<d-tag>"`). The current implementation only handles `"e"` tags.
|
||||
|
||||
**Evidence:**
|
||||
`lib/parrhesia/storage/adapters/postgres/events.ex`:
|
||||
- `extract_delete_event_ids/1` (line 821): only extracts `["e", event_id | _rest]` tags.
|
||||
- No handling of `["a", ...]` tags.
|
||||
- No query against addressable_event_state or events by kind+pubkey+d_tag.
|
||||
|
||||
**Spec reference:**
|
||||
NIP-09: "The deletion event MAY contain `a` tags pointing to the replaceable/addressable events to be deleted."
|
||||
|
||||
**Recommended fix:**
|
||||
Extract `"a"` tags from the deletion event, parse the `kind:pubkey:d_tag` format, and soft-delete matching events from the addressable/replaceable state tables, ensuring the deleter's pubkey matches.
|
||||
|
||||
**Confidence:** High
|
||||
|
||||
---
|
||||
|
||||
## [Medium] Subscription Index GenServer Is a Single-Point Bottleneck
|
||||
|
||||
**Area:** performance, OTP/design
|
||||
|
||||
**Why it matters:**
|
||||
Every event fanout goes through `Index.candidate_subscription_keys/1`, which is a synchronous `GenServer.call` to a single process. Under load with many connections and high event throughput, this process becomes the serialization point for all fanout operations.
|
||||
|
||||
**Evidence:**
|
||||
`lib/parrhesia/subscriptions/index.ex`:
|
||||
- `candidate_subscription_keys/2` (line 68): `GenServer.call(server, {:candidate_subscription_keys, event})`
|
||||
- This is called from every connection process for every ingested event (via `fanout_event/1` in `connection.ex` line 688).
|
||||
- The ETS tables are `:protected`, meaning only the owning GenServer can write but any process can read.
|
||||
|
||||
**Recommended fix:**
|
||||
Since the ETS tables are already `:protected` (readable by all processes), make `candidate_subscription_keys/1` read directly from ETS without going through the GenServer. Only mutations (upsert/remove) need to go through the GenServer. This eliminates the serialization bottleneck entirely.
|
||||
|
||||
Actually, the tables are `:protected` which means other processes CAN read. Refactor `candidate_subscription_keys` to read ETS directly from the caller's process, bypassing the GenServer.
|
||||
|
||||
**Confidence:** High
|
||||
|
||||
---
|
||||
|
||||
## [Medium] Moderation Cache ETS Table Creation Race Condition
|
||||
|
||||
**Area:** robustness, OTP/design
|
||||
|
||||
**Why it matters:**
|
||||
The moderation cache ETS table is lazily created on first access via `cache_table_ref/0`. If two processes simultaneously call a moderation function before the table exists, both will attempt `ets.new(:parrhesia_moderation_cache, [:named_table, ...])` — one will succeed and one will hit the rescue clause. While the rescue catches the `ArgumentError`, this is a race-prone pattern.
|
||||
|
||||
**Evidence:**
|
||||
`lib/parrhesia/storage/adapters/postgres/moderation.ex` lines 211–231:
|
||||
```elixir
|
||||
defp cache_table_ref do
|
||||
case :ets.whereis(@cache_table) do
|
||||
:undefined ->
|
||||
try do
|
||||
:ets.new(@cache_table, [...])
|
||||
rescue
|
||||
ArgumentError -> @cache_table
|
||||
end
|
||||
@cache_table
|
||||
_table_ref ->
|
||||
@cache_table
|
||||
end
|
||||
end
|
||||
```
|
||||
|
||||
Additionally, `ensure_cache_scope_loaded/1` has a TOCTOU race: it checks `ets.member(table, loaded_key)`, then loads from DB and inserts — two processes could both load and insert simultaneously, though this is less harmful (just redundant work).
|
||||
|
||||
**Recommended fix:**
|
||||
Create the ETS table in a supervised process (e.g., in `Parrhesia.Policy.Supervisor` or `Parrhesia.Storage.Supervisor`) at startup, not lazily. This eliminates the race entirely.
|
||||
|
||||
**Confidence:** High
|
||||
|
||||
---
|
||||
|
||||
## [Medium] Archiver SQL Injection
|
||||
|
||||
**Area:** security
|
||||
|
||||
**Why it matters:**
|
||||
The `Parrhesia.Storage.Archiver.archive_sql/2` function directly interpolates arguments into a SQL string without any sanitization or quoting.
|
||||
|
||||
**Evidence:**
|
||||
`lib/parrhesia/storage/archiver.ex` line 32:
|
||||
```elixir
|
||||
def archive_sql(partition_name, archive_table_name) do
|
||||
"INSERT INTO #{archive_table_name} SELECT * FROM #{partition_name};"
|
||||
end
|
||||
```
|
||||
|
||||
If either argument is derived from user input or external configuration, this is a SQL injection vector.
|
||||
|
||||
**Attack scenario:**
|
||||
If the management API or any admin tool passes user-controlled input to this function (e.g., a partition name from a web request), an attacker could inject: `archive_sql("events_default; DROP TABLE events; --", "archive")`.
|
||||
|
||||
**Recommended fix:**
|
||||
Quote identifiers using `~s("#{identifier}")` or better, use Ecto's `Ecto.Adapters.SQL.query/3` with proper identifier quoting. Validate that inputs match expected partition name patterns (e.g., `events_YYYYMM`).
|
||||
|
||||
**Confidence:** Medium (depends on whether this function is exposed to external input)
|
||||
|
||||
---
|
||||
|
||||
## [Medium] Count Query Materialises All Matching Event IDs in Memory
|
||||
|
||||
**Area:** performance
|
||||
|
||||
**Why it matters:**
|
||||
The COUNT implementation fetches all matching event IDs into Elixir memory, deduplicates them with `MapSet.new()`, then counts. For large result sets, this is orders of magnitude slower and more memory-intensive than a SQL `COUNT(DISTINCT id)`.
|
||||
|
||||
**Evidence:**
|
||||
`lib/parrhesia/storage/adapters/postgres/events.ex` lines 111–127:
|
||||
```elixir
|
||||
def count(_context, filters, opts) when is_list(opts) do
|
||||
...
|
||||
total_count =
|
||||
filters
|
||||
|> Enum.flat_map(fn filter ->
|
||||
filter
|
||||
|> event_id_query_for_filter(now, opts)
|
||||
|> Repo.all() # fetches ALL matching IDs
|
||||
end)
|
||||
|> MapSet.new() # deduplicates in memory
|
||||
|> MapSet.size()
|
||||
...
|
||||
end
|
||||
```
|
||||
|
||||
**Attack scenario:**
|
||||
Client sends `["COUNT", "c1", {"kinds": [1]}]` on a relay with 10 million kind-1 events. The relay fetches 10 million binary IDs into memory, builds a MapSet, then counts. This could use hundreds of megabytes of RAM per request.
|
||||
|
||||
**Recommended fix:**
|
||||
For single-filter counts, use `SELECT COUNT(*)` or `SELECT COUNT(DISTINCT id)` directly in SQL. For multi-filter counts where deduplication is needed, use `UNION` in SQL rather than materialising in Elixir.
|
||||
|
||||
**Confidence:** High
|
||||
|
||||
---
|
||||
|
||||
## [Medium] NIP-42 AUTH Does Not Validate created_at Freshness
|
||||
|
||||
**Area:** protocol correctness, security
|
||||
|
||||
**Why it matters:**
|
||||
NIP-42 suggests AUTH events should have a `created_at` close to current time (within ~10 minutes). The relay's AUTH handler validates the event (which includes a future-skew check of 15 minutes) but does not check if the event is too old. An AUTH event from days ago with a matching challenge could be replayed.
|
||||
|
||||
**Evidence:**
|
||||
`lib/parrhesia/web/connection.ex`:
|
||||
- `handle_auth/2` calls `Protocol.validate_event(auth_event)` which checks future skew but not past staleness.
|
||||
- `validate_auth_event/1` (line 573) only checks kind and challenge tag.
|
||||
- No `created_at` freshness check for AUTH events.
|
||||
|
||||
The NIP-98 implementation (`auth/nip98.ex`) DOES have a 60-second freshness check, but the WebSocket AUTH path does not.
|
||||
|
||||
**Recommended fix:**
|
||||
Add a staleness check: reject AUTH events where `created_at` is more than N seconds in the past (e.g., 600 seconds matching NIP-42 suggestion).
|
||||
|
||||
**Confidence:** High
|
||||
|
||||
---
|
||||
|
||||
## [Low] NIP-11 Missing CORS Headers
|
||||
|
||||
**Area:** protocol correctness
|
||||
|
||||
**Why it matters:**
|
||||
NIP-11 states relays MUST accept CORS requests by sending appropriate headers. The relay info endpoint does not set CORS headers.
|
||||
|
||||
**Evidence:**
|
||||
`lib/parrhesia/web/router.ex` line 44–55: the `/relay` GET handler returns NIP-11 JSON but does not set `Access-Control-Allow-Origin`, `Access-Control-Allow-Headers`, or `Access-Control-Allow-Methods` headers. No CORS plug is configured in the router.
|
||||
|
||||
**Recommended fix:**
|
||||
Add CORS headers to the NIP-11 response, at minimum `Access-Control-Allow-Origin: *`.
|
||||
|
||||
**Confidence:** High
|
||||
|
||||
---
|
||||
|
||||
## [Low] Event Query Deduplication Done in Elixir Instead of SQL
|
||||
|
||||
**Area:** performance
|
||||
|
||||
**Why it matters:**
|
||||
When a REQ has multiple filters, each filter runs a separate DB query, results are merged and deduplicated in Elixir using `Map.put_new/3`. This means the relay may fetch duplicate events from the DB and transfer them over the wire from PostgreSQL, only to discard them.
|
||||
|
||||
**Evidence:**
|
||||
`lib/parrhesia/storage/adapters/postgres/events.ex` lines 85–95:
|
||||
```elixir
|
||||
persisted_events =
|
||||
filters
|
||||
|> Enum.flat_map(fn filter ->
|
||||
filter |> event_query_for_filter(now, opts) |> Repo.all()
|
||||
end)
|
||||
|> deduplicate_events()
|
||||
|> sort_persisted_events()
|
||||
```
|
||||
|
||||
**Recommended fix:**
|
||||
For multiple filters, consider using SQL `UNION` or `UNION ALL` with a final `DISTINCT ON` to push deduplication to the database. Alternatively, for the common case of a single filter (which is the majority of REQ messages), this is fine as-is.
|
||||
|
||||
**Confidence:** Medium
|
||||
|
||||
---
|
||||
|
||||
## [Low] No Validation of Subscription ID Content
|
||||
|
||||
**Area:** robustness
|
||||
|
||||
**Why it matters:**
|
||||
Subscription IDs are validated for non-emptiness and max length (64 chars) but not for content. NIP-01 says subscription IDs are "arbitrary" strings, but allowing control characters, null bytes, or extremely long Unicode sequences could cause issues with logging, telemetry, or downstream systems.
|
||||
|
||||
**Evidence:**
|
||||
`lib/parrhesia/protocol.ex` line 218:
|
||||
```elixir
|
||||
defp valid_subscription_id?(subscription_id) do
|
||||
subscription_id != "" and String.length(subscription_id) <= 64
|
||||
end
|
||||
```
|
||||
|
||||
`String.length/1` counts Unicode graphemes, not bytes. A subscription ID of 64 emoji characters could be hundreds of bytes.
|
||||
|
||||
**Recommended fix:**
|
||||
Consider validating that subscription IDs contain only printable ASCII, or at least limit by byte size rather than grapheme count.
|
||||
|
||||
**Confidence:** Medium
|
||||
|
||||
---
|
||||
|
||||
# Protocol Compliance Review
|
||||
|
||||
## NIPs Implemented
|
||||
- **NIP-01**: Core protocol — substantially implemented. Critical gaps: no signature verification, lossy tags, no ephemeral handling.
|
||||
- **NIP-09**: Event deletion — partially implemented (kind 5 with `e` tags only, missing `a` tag deletion).
|
||||
- **NIP-11**: Relay information — implemented, missing CORS headers.
|
||||
- **NIP-22**: Event `created_at` limits — implemented (future skew check, configurable).
|
||||
- **NIP-40**: Expiration — implemented (storage, query filtering, periodic cleanup). Does not reject already-expired events on publish (SHOULD per spec).
|
||||
- **NIP-42**: Authentication — implemented with challenge-response. Missing relay tag validation, AUTH event staleness check.
|
||||
- **NIP-45**: COUNT — implemented with basic and HLL support. Performance concern with in-memory deduplication.
|
||||
- **NIP-50**: Search — implemented via ILIKE. SQL injection concern. No full-text search.
|
||||
- **NIP-70**: Protected events — implemented (tag check, pubkey match). Note: protected tag `["-"]` is lost on retrieval due to single-element tag storage bug.
|
||||
- **NIP-77**: Negentropy — stub implementation (session tracking only, no actual reconciliation logic).
|
||||
- **NIP-86**: Relay management — implemented with NIP-98 auth and audit logging.
|
||||
- **NIP-98**: HTTP auth — implemented with freshness check.
|
||||
- **MARMOT**: Kinds 443–449, 1059, 10050–10051 — validation and policy enforcement implemented.
|
||||
|
||||
## Non-Compliant Behaviours
|
||||
1. **No signature verification** — violates NIP-01 MUST.
|
||||
2. **Lossy tag storage** — violates NIP-01 data integrity.
|
||||
3. **Ephemeral events persisted** — violates NIP-01 SHOULD NOT store.
|
||||
4. **AUTH missing relay tag check** — violates NIP-42 MUST.
|
||||
5. **NIP-09 missing `a` tag deletion** — partial implementation.
|
||||
6. **NIP-40: expired events accepted on publish** — violates SHOULD reject.
|
||||
7. **NIP-11 missing CORS** — violates MUST.
|
||||
|
||||
## Ambiguous Areas
|
||||
- **NIP-01 replaceable event tie-breaking**: implemented correctly (lowest ID wins).
|
||||
- **Deletion event storage**: kind 5 events are stored (correct — relay SHOULD continue publishing deletion requests).
|
||||
- **NIP-45 HLL**: the HLL payload generation is a placeholder (hash of filter+count), not actual HyperLogLog registers. Clients expecting real HLL data will get nonsense.
|
||||
|
||||
---
|
||||
|
||||
# Robustness Review
|
||||
|
||||
The relay handles several failure modes well:
|
||||
- WebSocket binary frames are rejected with a clear notice.
|
||||
- Invalid JSON returns a structured NOTICE.
|
||||
- GenServer exits are caught with `catch :exit` patterns throughout the connection handler.
|
||||
- Outbound queue has configurable backpressure (close, drop_oldest, drop_newest).
|
||||
- Subscription limits are enforced per connection.
|
||||
- Process monitors clean up subscription index entries when connections die.
|
||||
|
||||
**Key resilience gaps:**
|
||||
1. **No ingest rate limiting** — one client can monopolise the relay.
|
||||
2. **No payload size enforcement** — oversized frames/events are processed.
|
||||
3. **Unbounded tag count** — an event with 100,000 tags will generate 100,000 DB inserts in a single transaction.
|
||||
4. **No filter complexity limits** — a filter with hundreds of tag values generates large `ANY(...)` queries.
|
||||
5. **COUNT query memory explosion** — large counts materialise all IDs in memory.
|
||||
6. **No timeout on DB queries** — a slow query (e.g., adversarial search pattern) blocks the connection process indefinitely.
|
||||
7. **Single-GenServer bottleneck** — Subscription Index serialises all fanout lookups.
|
||||
|
||||
**Can one bad client destabilise the relay?** Yes. Through event spam (no rate limit), adversarial search patterns (LIKE injection), or large COUNT queries (memory exhaustion).
|
||||
|
||||
---
|
||||
|
||||
# Security Review
|
||||
|
||||
**Primary Attack Surfaces:**
|
||||
1. **WebSocket ingress** — unauthenticated by default, no rate limiting, no payload size enforcement.
|
||||
2. **NIP-50 search** — LIKE pattern injection enables CPU/IO exhaustion.
|
||||
3. **NIP-86 management API** — properly gated by NIP-98, but `management_auth_required` is a config flag that defaults to `true`. If misconfigured, management API is open.
|
||||
4. **Event forgery** — no signature verification means complete trust of client-provided pubkeys.
|
||||
|
||||
**DoS Vectors (ranked by impact):**
|
||||
1. Event spam flood (unbounded ingest rate).
|
||||
2. Adversarial ILIKE search patterns (DB CPU exhaustion).
|
||||
3. Large COUNT queries (memory exhaustion).
|
||||
4. Many concurrent subscriptions with broad filters (fanout amplification).
|
||||
5. Oversized events with thousands of tags (transaction bloat).
|
||||
6. Rapid REQ/CLOSE cycling (subscription index churn through single GenServer).
|
||||
|
||||
**Authentication/Authorization:**
|
||||
- NIP-42 AUTH flow works but is weakened by missing relay tag validation.
|
||||
- Protected event enforcement is correct (pubkey match required).
|
||||
- Giftwrap (kind 1059) access control is properly implemented.
|
||||
- Management API NIP-98 auth is solid with freshness check.
|
||||
|
||||
**No dynamic atom creation risks found.** Method names in admin are handled as strings. No `String.to_atom` or unsafe deserialization patterns detected.
|
||||
|
||||
**Information leakage:** Error messages in some paths use `inspect(reason)` which could leak internal Elixir terms to clients (e.g., `connection.ex` line 297, line 353, line 389). Consider sanitising.
|
||||
|
||||
---
|
||||
|
||||
# Performance Review
|
||||
|
||||
**Likely Hotspots:**
|
||||
1. **Event ingest path**: validation → policy check → DB transaction (3 inserts + possible state table upsert). The transaction is the bottleneck — each event requires at minimum 2 DB round-trips (event_ids + events insert), plus tag inserts.
|
||||
2. **Subscription fanout**: `Index.candidate_subscription_keys/1` through GenServer.call — serialisation point.
|
||||
3. **Query path**: per-filter DB queries without UNION, Elixir-side deduplication and sorting.
|
||||
4. **COUNT path**: materialises all matching IDs in memory.
|
||||
5. **Search (ILIKE)**: sequential scan without text search index.
|
||||
|
||||
**Missing Indexes:**
|
||||
- No index on `events.content` for search (NIP-50). ILIKE requires sequential scan.
|
||||
- No composite index on `events (pubkey, kind, created_at)` for replaceable event queries.
|
||||
- The `event_tags` index on `(name, value, event_created_at)` is good for tag queries.
|
||||
|
||||
**Scaling Ceiling:**
|
||||
- **DB-bound** at moderate load (event ingest transactions).
|
||||
- **CPU-bound** at high event rates if signature verification is added.
|
||||
- **Memory-bound** if adversarial COUNT queries are submitted.
|
||||
- **GenServer-bound** on fanout at high subscription counts.
|
||||
|
||||
**Top 3 Performance Improvements by Impact:**
|
||||
1. **Make subscription index reads lock-free** — read ETS directly instead of through GenServer (effort: S, impact: High).
|
||||
2. **Push COUNT to SQL** — `SELECT COUNT(DISTINCT id)` instead of materialising (effort: S, impact: High).
|
||||
3. **Add full-text search index** — `GIN` index on `tsvector` column for NIP-50, replacing ILIKE (effort: M, impact: High).
|
||||
|
||||
---
|
||||
|
||||
# Database and Schema Review
|
||||
|
||||
**Strengths:**
|
||||
- Range partitioning on `events.created_at` — good for time-based queries and partition pruning.
|
||||
- Composite primary key `(created_at, id)` enables partition pruning on most queries.
|
||||
- `event_ids` table for deduplication with `ON CONFLICT :nothing` — clean idempotency.
|
||||
- State tables for replaceable/addressable events — correct approach with proper upsert/retire logic.
|
||||
- Partial indexes on `expires_at` and `deleted_at` — avoids indexing NULLs.
|
||||
- FK cascade from `event_tags` to `events` — ensures tag cleanup on delete.
|
||||
|
||||
**Weaknesses:**
|
||||
1. **No unique index on `events.id`** — only a non-unique index. Two events with the same ID but different `created_at` could theoretically exist (the `event_ids` table prevents this at the application level, but there's no DB-level constraint on the events table).
|
||||
2. **`event_tags` stores only name+value** — data loss for multi-element tags (Critical finding above).
|
||||
3. **No `content` index for search** — ILIKE without index = sequential scan.
|
||||
4. **`events.kind` is `integer` (4 bytes)** — NIP-01 allows kinds 0–65535, so `smallint` (2 bytes) would suffice and save space.
|
||||
5. **No retention/partitioning strategy documented** — the default partition catches everything. No automated partition creation or cleanup.
|
||||
6. **`d_tag` column in events table** — redundant with tag storage (but useful for addressable event queries). Not indexed, so no direct benefit. The addressable_event_state table handles this.
|
||||
7. **No index on `events (id, created_at)` for deletion queries** — `delete_by_request` queries by `id` and `pubkey` but the `id` index doesn't include `pubkey`.
|
||||
|
||||
**Missing DB-Level Invariants:**
|
||||
- Events table should have a unique constraint on `id` (across partitions, which is tricky with range partitioning — the `event_ids` table compensates).
|
||||
- No CHECK constraint on `kind >= 0`.
|
||||
- No CHECK constraint on `created_at >= 0`.
|
||||
|
||||
---
|
||||
|
||||
# Test Review
|
||||
|
||||
**Well-Covered Areas:**
|
||||
- Protocol encode/decode (`protocol_test.exs`)
|
||||
- Filter validation and matching, including property-based tests (`filter_test.exs`, `filter_property_test.exs`)
|
||||
- Event validation including MARMOT-specific kinds (`event_validator_marmot_test.exs`)
|
||||
- Policy enforcement (`event_policy_test.exs`)
|
||||
- Storage adapter contract compliance (`adapter_contract_test.exs`, `behaviour_contracts_test.exs`)
|
||||
- PostgreSQL event lifecycle (put, query, delete, replace) (`events_lifecycle_test.exs`)
|
||||
- WebSocket connection lifecycle (`connection_test.exs`)
|
||||
- Auth challenges (`challenges_test.exs`)
|
||||
- NIP-98 HTTP auth (`nip98_test.exs`)
|
||||
- Fault injection (`fault_injection_test.exs`)
|
||||
- Query plan regression (`query_plan_regression_test.exs`) — excellent practice
|
||||
|
||||
**Missing Critical Tests:**
|
||||
1. **No signature verification tests** (because the feature doesn't exist).
|
||||
2. **No test for tag data integrity** — round-trip test that verifies events with multi-element tags are returned unchanged.
|
||||
3. **No ephemeral event test** — verifying kind 20000+ events are not persisted.
|
||||
4. **No NIP-09 `a` tag deletion test**.
|
||||
5. **No adversarial input tests** — LIKE injection patterns, oversized payloads, events with extreme tag counts.
|
||||
6. **No concurrent write tests** — multiple processes writing the same replaceable event simultaneously.
|
||||
7. **No AUTH relay tag validation test**.
|
||||
8. **No test for expired event rejection on publish** (NIP-40).
|
||||
|
||||
**5 Most Valuable Tests to Add:**
|
||||
1. Round-trip tag integrity: publish event with multi-element tags, query back, verify tags are identical.
|
||||
2. Signature verification: publish event with wrong signature, verify rejection.
|
||||
3. Concurrent replaceable event upsert: 10 processes writing same pubkey+kind, verify only one winner.
|
||||
4. Adversarial search pattern: verify ILIKE with `%` metacharacters doesn't cause excessive query time.
|
||||
5. Ingest rate limiting under load: verify relay remains responsive under event flood.
|
||||
|
||||
---
|
||||
|
||||
# Quick Wins
|
||||
|
||||
| Change | Impact | Effort |
|
||||
|--------|--------|--------|
|
||||
| Add Schnorr signature verification | Critical | M |
|
||||
| Store full tags (add `tags` JSONB column to events) | Critical | M |
|
||||
| Escape LIKE metacharacters in search | High | S |
|
||||
| Read subscription index ETS directly (bypass GenServer for reads) | High | S |
|
||||
| Push COUNT to SQL `COUNT(DISTINCT)` | High | S |
|
||||
| Add `max_frame_size` to Bandit WebSocket options | High | S |
|
||||
| Add AUTH relay tag validation | High | S |
|
||||
| Skip persistence for ephemeral events | High | S |
|
||||
| Add payload size check before JSON decode | High | S |
|
||||
| Add CORS headers to NIP-11 endpoint | Low | S |
|
||||
| Create ETS moderation cache table in supervisor | Medium | S |
|
||||
| Add `created_at` staleness check to AUTH handler | Medium | S |
|
||||
|
||||
---
|
||||
|
||||
# Deep Refactor Opportunities
|
||||
|
||||
1. **Full-text search for NIP-50**: Replace ILIKE with PostgreSQL `tsvector`/`tsquery` and a GIN index. This eliminates the LIKE injection vector and dramatically improves search performance. Effort: M. Worth it if search is a used feature.
|
||||
|
||||
2. **SQL UNION for multi-filter queries**: Instead of running N queries and deduplicating in Elixir, build a single SQL query with UNION ALL and DISTINCT. Reduces DB round-trips and pushes deduplication to the engine. Effort: M.
|
||||
|
||||
3. **Per-connection rate limiter**: Add a token-bucket rate limiter to the connection state that throttles EVENT submissions. Consider a pluggable rate-limiting behaviour for flexibility. Effort: M.
|
||||
|
||||
4. **Event partitioning strategy**: Automate partition creation (monthly or weekly) and implement partition detach/archive for old data. The current default partition will accumulate all data forever. Effort: L.
|
||||
|
||||
5. **Batched tag insertion**: Instead of `Repo.insert_all` for tags within the transaction, accumulate tags and use a single multi-row insert with explicit conflict handling. Reduces round-trips for events with many tags. Effort: S.
|
||||
|
||||
---
|
||||
|
||||
# Final Verdict
|
||||
|
||||
**Would I trust this relay:**
|
||||
- **For local development:** Yes, with awareness of the signature bypass.
|
||||
- **For a small private relay (trusted clients):** Conditionally, after fixing lossy tags. The signature gap is tolerable only if all clients are trusted.
|
||||
- **For a medium public relay:** No. Missing rate limiting, signature verification, and the LIKE injection vector make it unsafe.
|
||||
- **For a hostile public internet deployment:** Absolutely not.
|
||||
|
||||
---
|
||||
|
||||
**Ship now?** No.
|
||||
|
||||
**Top blockers before deployment:**
|
||||
|
||||
1. **Add Schnorr signature verification** — without this, the relay has no identity security.
|
||||
2. **Fix lossy tag storage** — store full tag arrays so events survive round-trips intact.
|
||||
3. **Handle ephemeral events** — don't persist kinds 20000–29999.
|
||||
4. **Escape LIKE metacharacters in search** — prevent DoS via adversarial patterns.
|
||||
5. **Enforce payload size limits** — pass `max_frame_size` to Bandit, check payload size before decode.
|
||||
6. **Add basic ingest rate limiting** — per-connection token bucket at minimum.
|
||||
7. **Add AUTH relay tag validation** — prevent cross-relay AUTH replay.
|
||||
|
||||
After these seven fixes, the relay would be suitable for a private deployment with moderate trust. Public deployment would additionally require:
|
||||
- Per-IP rate limiting
|
||||
- Full-text search index (replacing ILIKE)
|
||||
- SQL-based COUNT
|
||||
- Lock-free subscription index reads
|
||||
- Ephemeral event NIP-09 `a` tag deletion
|
||||
- Comprehensive adversarial input testing
|
||||
@@ -13,6 +13,7 @@ defmodule Parrhesia.Application do
|
||||
Parrhesia.Auth.Supervisor,
|
||||
Parrhesia.Policy.Supervisor,
|
||||
Parrhesia.Web.Endpoint,
|
||||
Parrhesia.Web.MetricsEndpoint,
|
||||
Parrhesia.Tasks.Supervisor
|
||||
]
|
||||
|
||||
|
||||
@@ -7,10 +7,17 @@ defmodule Parrhesia.Negentropy.Sessions do
|
||||
|
||||
@type session_key :: {pid(), String.t()}
|
||||
|
||||
@default_max_payload_bytes 4096
|
||||
@default_max_sessions_per_owner 8
|
||||
@default_max_total_sessions 10_000
|
||||
@default_max_idle_seconds 60
|
||||
@default_sweep_interval_seconds 10
|
||||
@sweep_idle_sessions :sweep_idle_sessions
|
||||
|
||||
@spec start_link(keyword()) :: GenServer.on_start()
|
||||
def start_link(opts \\ []) do
|
||||
name = Keyword.get(opts, :name, __MODULE__)
|
||||
GenServer.start_link(__MODULE__, :ok, name: name)
|
||||
GenServer.start_link(__MODULE__, opts, name: name)
|
||||
end
|
||||
|
||||
@spec open(GenServer.server(), pid(), String.t(), map()) :: {:ok, map()} | {:error, term()}
|
||||
@@ -32,18 +39,51 @@ defmodule Parrhesia.Negentropy.Sessions do
|
||||
end
|
||||
|
||||
@impl true
|
||||
def init(:ok) do
|
||||
{:ok, %{sessions: %{}, monitors: %{}}}
|
||||
def init(opts) do
|
||||
max_idle_ms =
|
||||
normalize_positive_integer(Keyword.get(opts, :max_idle_seconds), max_idle_seconds()) * 1000
|
||||
|
||||
sweep_interval_ms =
|
||||
normalize_positive_integer(
|
||||
Keyword.get(opts, :sweep_interval_seconds),
|
||||
sweep_interval_seconds()
|
||||
) *
|
||||
1000
|
||||
|
||||
state = %{
|
||||
sessions: %{},
|
||||
monitors: %{},
|
||||
max_payload_bytes:
|
||||
normalize_positive_integer(Keyword.get(opts, :max_payload_bytes), max_payload_bytes()),
|
||||
max_sessions_per_owner:
|
||||
normalize_positive_integer(
|
||||
Keyword.get(opts, :max_sessions_per_owner),
|
||||
max_sessions_per_owner()
|
||||
),
|
||||
max_total_sessions:
|
||||
normalize_positive_integer(Keyword.get(opts, :max_total_sessions), max_total_sessions()),
|
||||
max_idle_ms: max_idle_ms,
|
||||
sweep_interval_ms: sweep_interval_ms
|
||||
}
|
||||
|
||||
:ok = schedule_idle_sweep(sweep_interval_ms)
|
||||
|
||||
{:ok, state}
|
||||
end
|
||||
|
||||
@impl true
|
||||
def handle_call({:open, owner_pid, subscription_id, params}, _from, state) do
|
||||
key = {owner_pid, subscription_id}
|
||||
|
||||
with :ok <- validate_payload_size(params, state.max_payload_bytes),
|
||||
:ok <- enforce_session_limits(state, owner_pid, key) do
|
||||
now_ms = System.monotonic_time(:millisecond)
|
||||
|
||||
session = %{
|
||||
cursor: 0,
|
||||
params: params,
|
||||
opened_at: System.system_time(:second)
|
||||
opened_at: System.system_time(:second),
|
||||
last_active_at_ms: now_ms
|
||||
}
|
||||
|
||||
state =
|
||||
@@ -52,6 +92,10 @@ defmodule Parrhesia.Negentropy.Sessions do
|
||||
|> put_in([:sessions, key], session)
|
||||
|
||||
{:reply, {:ok, %{"status" => "open", "cursor" => 0}}, state}
|
||||
else
|
||||
{:error, reason} ->
|
||||
{:reply, {:error, reason}, state}
|
||||
end
|
||||
end
|
||||
|
||||
def handle_call({:message, owner_pid, subscription_id, payload}, _from, state) do
|
||||
@@ -62,22 +106,68 @@ defmodule Parrhesia.Negentropy.Sessions do
|
||||
{:reply, {:error, :unknown_session}, state}
|
||||
|
||||
session ->
|
||||
case validate_payload_size(payload, state.max_payload_bytes) do
|
||||
:ok ->
|
||||
cursor = session.cursor + 1
|
||||
|
||||
next_session = %{session | cursor: cursor, params: Map.merge(session.params, payload)}
|
||||
next_session = %{
|
||||
session
|
||||
| cursor: cursor,
|
||||
last_active_at_ms: System.monotonic_time(:millisecond)
|
||||
}
|
||||
|
||||
state = put_in(state, [:sessions, key], next_session)
|
||||
|
||||
{:reply, {:ok, %{"status" => "ack", "cursor" => cursor}}, state}
|
||||
|
||||
{:error, reason} ->
|
||||
{:reply, {:error, reason}, state}
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
def handle_call({:close, owner_pid, subscription_id}, _from, state) do
|
||||
key = {owner_pid, subscription_id}
|
||||
state = update_in(state.sessions, &Map.delete(&1, key))
|
||||
|
||||
state =
|
||||
state
|
||||
|> update_in([:sessions], &Map.delete(&1, key))
|
||||
|> maybe_remove_monitor_if_owner_has_no_sessions(owner_pid)
|
||||
|
||||
{:reply, :ok, state}
|
||||
end
|
||||
|
||||
@impl true
|
||||
def handle_info(@sweep_idle_sessions, state) do
|
||||
now_ms = System.monotonic_time(:millisecond)
|
||||
|
||||
sessions =
|
||||
Enum.reduce(state.sessions, %{}, fn {key, session}, acc ->
|
||||
idle_ms = now_ms - Map.get(session, :last_active_at_ms, now_ms)
|
||||
|
||||
if idle_ms >= state.max_idle_ms do
|
||||
acc
|
||||
else
|
||||
Map.put(acc, key, session)
|
||||
end
|
||||
end)
|
||||
|
||||
owner_pids =
|
||||
sessions
|
||||
|> Map.keys()
|
||||
|> Enum.map(fn {owner_pid, _subscription_id} -> owner_pid end)
|
||||
|> MapSet.new()
|
||||
|
||||
state =
|
||||
state
|
||||
|> Map.put(:sessions, sessions)
|
||||
|> clear_monitors_without_sessions(owner_pids)
|
||||
|
||||
:ok = schedule_idle_sweep(state.sweep_interval_ms)
|
||||
|
||||
{:noreply, state}
|
||||
end
|
||||
|
||||
def handle_info({:DOWN, monitor_ref, :process, owner_pid, _reason}, state) do
|
||||
case Map.get(state.monitors, owner_pid) do
|
||||
^monitor_ref ->
|
||||
@@ -95,6 +185,16 @@ defmodule Parrhesia.Negentropy.Sessions do
|
||||
|
||||
def handle_info(_message, state), do: {:noreply, state}
|
||||
|
||||
defp clear_monitors_without_sessions(state, owner_pids) do
|
||||
Enum.reduce(Map.keys(state.monitors), state, fn owner_pid, acc ->
|
||||
if MapSet.member?(owner_pids, owner_pid) do
|
||||
acc
|
||||
else
|
||||
maybe_remove_monitor(acc, owner_pid)
|
||||
end
|
||||
end)
|
||||
end
|
||||
|
||||
defp remove_owner_sessions(state, owner_pid) do
|
||||
update_in(state.sessions, fn sessions ->
|
||||
sessions
|
||||
@@ -103,6 +203,39 @@ defmodule Parrhesia.Negentropy.Sessions do
|
||||
end)
|
||||
end
|
||||
|
||||
defp validate_payload_size(payload, max_payload_bytes) do
|
||||
if :erlang.external_size(payload) <= max_payload_bytes do
|
||||
:ok
|
||||
else
|
||||
{:error, :payload_too_large}
|
||||
end
|
||||
end
|
||||
|
||||
defp enforce_session_limits(state, owner_pid, key) do
|
||||
if Map.has_key?(state.sessions, key) do
|
||||
:ok
|
||||
else
|
||||
total_sessions = map_size(state.sessions)
|
||||
|
||||
cond do
|
||||
total_sessions >= state.max_total_sessions ->
|
||||
{:error, :session_limit_reached}
|
||||
|
||||
owner_session_count(state.sessions, owner_pid) >= state.max_sessions_per_owner ->
|
||||
{:error, :owner_session_limit_reached}
|
||||
|
||||
true ->
|
||||
:ok
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
defp owner_session_count(sessions, owner_pid) do
|
||||
Enum.count(sessions, fn {{session_owner, _subscription_id}, _session} ->
|
||||
session_owner == owner_pid
|
||||
end)
|
||||
end
|
||||
|
||||
defp ensure_monitor(state, owner_pid) do
|
||||
case Map.has_key?(state.monitors, owner_pid) do
|
||||
true -> state
|
||||
@@ -110,6 +243,14 @@ defmodule Parrhesia.Negentropy.Sessions do
|
||||
end
|
||||
end
|
||||
|
||||
defp maybe_remove_monitor_if_owner_has_no_sessions(state, owner_pid) do
|
||||
if owner_session_count(state.sessions, owner_pid) == 0 do
|
||||
maybe_remove_monitor(state, owner_pid)
|
||||
else
|
||||
state
|
||||
end
|
||||
end
|
||||
|
||||
defp maybe_remove_monitor(state, owner_pid) do
|
||||
{monitor_ref, monitors} = Map.pop(state.monitors, owner_pid)
|
||||
|
||||
@@ -119,4 +260,44 @@ defmodule Parrhesia.Negentropy.Sessions do
|
||||
|
||||
Map.put(state, :monitors, monitors)
|
||||
end
|
||||
|
||||
defp schedule_idle_sweep(sweep_interval_ms) do
|
||||
_timer_ref = Process.send_after(self(), @sweep_idle_sessions, sweep_interval_ms)
|
||||
:ok
|
||||
end
|
||||
|
||||
defp max_payload_bytes do
|
||||
:parrhesia
|
||||
|> Application.get_env(:limits, [])
|
||||
|> Keyword.get(:max_negentropy_payload_bytes, @default_max_payload_bytes)
|
||||
end
|
||||
|
||||
defp max_sessions_per_owner do
|
||||
:parrhesia
|
||||
|> Application.get_env(:limits, [])
|
||||
|> Keyword.get(:max_negentropy_sessions_per_connection, @default_max_sessions_per_owner)
|
||||
end
|
||||
|
||||
defp max_total_sessions do
|
||||
:parrhesia
|
||||
|> Application.get_env(:limits, [])
|
||||
|> Keyword.get(:max_negentropy_total_sessions, @default_max_total_sessions)
|
||||
end
|
||||
|
||||
defp max_idle_seconds do
|
||||
:parrhesia
|
||||
|> Application.get_env(:limits, [])
|
||||
|> Keyword.get(:negentropy_session_idle_timeout_seconds, @default_max_idle_seconds)
|
||||
end
|
||||
|
||||
defp sweep_interval_seconds do
|
||||
:parrhesia
|
||||
|> Application.get_env(:limits, [])
|
||||
|> Keyword.get(:negentropy_session_sweep_interval_seconds, @default_sweep_interval_seconds)
|
||||
end
|
||||
|
||||
defp normalize_positive_integer(value, _default) when is_integer(value) and value > 0,
|
||||
do: value
|
||||
|
||||
defp normalize_positive_integer(_value, default), do: default
|
||||
end
|
||||
|
||||
1
lib/parrhesia/postgres_types.ex
Normal file
1
lib/parrhesia/postgres_types.ex
Normal file
@@ -0,0 +1 @@
|
||||
Postgrex.Types.define(Parrhesia.PostgresTypes, [], json: JSON)
|
||||
@@ -21,6 +21,7 @@ defmodule Parrhesia.Protocol.EventValidator do
|
||||
| :invalid_content
|
||||
| :invalid_sig
|
||||
| :invalid_id_hash
|
||||
| :invalid_signature
|
||||
| :invalid_marmot_keypackage_content
|
||||
| :missing_marmot_encoding_tag
|
||||
| :invalid_marmot_encoding_tag
|
||||
@@ -54,7 +55,8 @@ defmodule Parrhesia.Protocol.EventValidator do
|
||||
:ok <- validate_tags(event["tags"]),
|
||||
:ok <- validate_content(event["content"]),
|
||||
:ok <- validate_sig(event["sig"]),
|
||||
:ok <- validate_id_hash(event) do
|
||||
:ok <- validate_id_hash(event),
|
||||
:ok <- validate_signature(event) do
|
||||
validate_kind_specific(event)
|
||||
end
|
||||
end
|
||||
@@ -89,6 +91,7 @@ defmodule Parrhesia.Protocol.EventValidator do
|
||||
invalid_content: "invalid: content must be a string",
|
||||
invalid_sig: "invalid: sig must be 64-byte lowercase hex",
|
||||
invalid_id_hash: "invalid: event id does not match serialized event",
|
||||
invalid_signature: "invalid: event signature is invalid",
|
||||
invalid_marmot_keypackage_content: "invalid: kind 443 content must be non-empty base64",
|
||||
missing_marmot_encoding_tag: "invalid: kind 443 must include [\"encoding\", \"base64\"]",
|
||||
invalid_marmot_encoding_tag: "invalid: kind 443 must include [\"encoding\", \"base64\"]",
|
||||
@@ -193,6 +196,29 @@ defmodule Parrhesia.Protocol.EventValidator do
|
||||
end
|
||||
end
|
||||
|
||||
defp validate_signature(event) do
|
||||
if verify_event_signatures?() do
|
||||
verify_signature(event)
|
||||
else
|
||||
:ok
|
||||
end
|
||||
end
|
||||
|
||||
defp verify_signature(%{"id" => id, "pubkey" => pubkey, "sig" => sig}) do
|
||||
with {:ok, id_bin} <- Base.decode16(id, case: :lower),
|
||||
{:ok, pubkey_bin} <- Base.decode16(pubkey, case: :lower),
|
||||
{:ok, sig_bin} <- Base.decode16(sig, case: :lower),
|
||||
true <- Secp256k1.schnorr_valid?(sig_bin, id_bin, pubkey_bin) do
|
||||
:ok
|
||||
else
|
||||
_other -> {:error, :invalid_signature}
|
||||
end
|
||||
rescue
|
||||
_error -> {:error, :invalid_signature}
|
||||
end
|
||||
|
||||
defp verify_signature(_event), do: {:error, :invalid_signature}
|
||||
|
||||
defp valid_tag?(tag) when is_list(tag) do
|
||||
tag != [] and Enum.all?(tag, &is_binary/1)
|
||||
end
|
||||
@@ -473,6 +499,12 @@ defmodule Parrhesia.Protocol.EventValidator do
|
||||
match?({:ok, _decoded}, Base.decode16(value, case: :lower))
|
||||
end
|
||||
|
||||
defp verify_event_signatures? do
|
||||
:parrhesia
|
||||
|> Application.get_env(:features, [])
|
||||
|> Keyword.get(:verify_event_signatures, true)
|
||||
end
|
||||
|
||||
defp max_event_future_skew_seconds do
|
||||
:parrhesia
|
||||
|> Application.get_env(:limits, [])
|
||||
|
||||
@@ -64,21 +64,49 @@ defmodule Parrhesia.Storage.Adapters.Memory.Events do
|
||||
|
||||
@impl true
|
||||
def delete_by_request(_context, event) do
|
||||
delete_ids =
|
||||
deleter_pubkey = Map.get(event, "pubkey")
|
||||
|
||||
delete_event_ids =
|
||||
event
|
||||
|> Map.get("tags", [])
|
||||
|> Enum.flat_map(fn
|
||||
["e", event_id | _rest] -> [event_id]
|
||||
["e", event_id | _rest] when is_binary(event_id) -> [event_id]
|
||||
_tag -> []
|
||||
end)
|
||||
|
||||
delete_coordinates =
|
||||
event
|
||||
|> Map.get("tags", [])
|
||||
|> Enum.flat_map(fn
|
||||
["a", coordinate | _rest] when is_binary(coordinate) ->
|
||||
case parse_delete_coordinate(coordinate) do
|
||||
{:ok, parsed_coordinate} -> [parsed_coordinate]
|
||||
{:error, _reason} -> []
|
||||
end
|
||||
|
||||
_tag ->
|
||||
[]
|
||||
end)
|
||||
|
||||
coordinate_delete_ids =
|
||||
Store.get(fn state ->
|
||||
state.events
|
||||
|> Map.values()
|
||||
|> Enum.filter(fn candidate ->
|
||||
matches_delete_coordinate?(candidate, delete_coordinates, deleter_pubkey)
|
||||
end)
|
||||
|> Enum.map(& &1["id"])
|
||||
end)
|
||||
|
||||
all_delete_ids = Enum.uniq(delete_event_ids ++ coordinate_delete_ids)
|
||||
|
||||
Store.update(fn state ->
|
||||
Enum.reduce(delete_ids, state, fn event_id, acc ->
|
||||
Enum.reduce(all_delete_ids, state, fn event_id, acc ->
|
||||
update_in(acc.deleted, &MapSet.put(&1, event_id))
|
||||
end)
|
||||
end)
|
||||
|
||||
{:ok, length(delete_ids)}
|
||||
{:ok, length(all_delete_ids)}
|
||||
end
|
||||
|
||||
@impl true
|
||||
@@ -105,6 +133,47 @@ defmodule Parrhesia.Storage.Adapters.Memory.Events do
|
||||
@impl true
|
||||
def purge_expired(_opts), do: {:ok, 0}
|
||||
|
||||
defp parse_delete_coordinate(coordinate) do
|
||||
case String.split(coordinate, ":", parts: 3) do
|
||||
[kind_part, pubkey, d_tag] ->
|
||||
case Integer.parse(kind_part) do
|
||||
{kind, ""} when kind >= 0 -> {:ok, %{kind: kind, pubkey: pubkey, d_tag: d_tag}}
|
||||
_other -> {:error, :invalid_coordinate}
|
||||
end
|
||||
|
||||
_other ->
|
||||
{:error, :invalid_coordinate}
|
||||
end
|
||||
end
|
||||
|
||||
defp matches_delete_coordinate?(candidate, delete_coordinates, deleter_pubkey) do
|
||||
Enum.any?(delete_coordinates, fn coordinate ->
|
||||
coordinate.pubkey == deleter_pubkey and
|
||||
candidate["pubkey"] == deleter_pubkey and
|
||||
candidate["kind"] == coordinate.kind and
|
||||
coordinate_match_for_kind?(candidate, coordinate)
|
||||
end)
|
||||
end
|
||||
|
||||
defp coordinate_match_for_kind?(candidate, coordinate) do
|
||||
if addressable_kind?(coordinate.kind) do
|
||||
candidate_d_tag =
|
||||
candidate
|
||||
|> Map.get("tags", [])
|
||||
|> Enum.find_value("", fn
|
||||
["d", value | _rest] -> value
|
||||
_tag -> nil
|
||||
end)
|
||||
|
||||
candidate_d_tag == coordinate.d_tag
|
||||
else
|
||||
replaceable_kind?(coordinate.kind)
|
||||
end
|
||||
end
|
||||
|
||||
defp replaceable_kind?(kind), do: kind in [0, 3] or (kind >= 10_000 and kind < 20_000)
|
||||
defp addressable_kind?(kind), do: kind >= 30_000 and kind < 40_000
|
||||
|
||||
defp giftwrap_visible_to_requester?(%{"kind" => 1059} = event, requester_pubkeys) do
|
||||
requester_pubkeys != [] and
|
||||
event_targets_any_recipient?(event, requester_pubkeys)
|
||||
|
||||
@@ -56,6 +56,7 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Events do
|
||||
pubkey: event.pubkey,
|
||||
created_at: event.created_at,
|
||||
kind: event.kind,
|
||||
tags: event.tags,
|
||||
content: event.content,
|
||||
sig: event.sig
|
||||
}
|
||||
@@ -66,13 +67,7 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Events do
|
||||
{:ok, nil}
|
||||
|
||||
persisted_event ->
|
||||
tags = load_tags([{persisted_event.created_at, persisted_event.id}])
|
||||
|
||||
{:ok,
|
||||
to_nostr_event(
|
||||
persisted_event,
|
||||
Map.get(tags, {persisted_event.created_at, persisted_event.id}, [])
|
||||
)}
|
||||
{:ok, to_nostr_event(persisted_event)}
|
||||
end
|
||||
end
|
||||
end
|
||||
@@ -93,15 +88,7 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Events do
|
||||
|> sort_persisted_events()
|
||||
|> maybe_apply_query_limit(opts)
|
||||
|
||||
event_keys = Enum.map(persisted_events, fn event -> {event.created_at, event.id} end)
|
||||
tags_by_event = load_tags(event_keys)
|
||||
|
||||
nostr_events =
|
||||
Enum.map(persisted_events, fn event ->
|
||||
to_nostr_event(event, Map.get(tags_by_event, {event.created_at, event.id}, []))
|
||||
end)
|
||||
|
||||
{:ok, nostr_events}
|
||||
{:ok, Enum.map(persisted_events, &to_nostr_event/1)}
|
||||
end
|
||||
end
|
||||
|
||||
@@ -114,13 +101,12 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Events do
|
||||
|
||||
total_count =
|
||||
filters
|
||||
|> Enum.flat_map(fn filter ->
|
||||
filter
|
||||
|> event_id_query_for_filter(now, opts)
|
||||
|> Repo.all()
|
||||
|> event_id_union_query_for_filters(now, opts)
|
||||
|> subquery()
|
||||
|> then(fn union_query ->
|
||||
from(event in union_query, select: count(event.id, :distinct))
|
||||
end)
|
||||
|> MapSet.new()
|
||||
|> MapSet.size()
|
||||
|> Repo.one()
|
||||
|
||||
{:ok, total_count}
|
||||
end
|
||||
@@ -131,7 +117,26 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Events do
|
||||
@impl true
|
||||
def delete_by_request(_context, event) do
|
||||
with {:ok, deleter_pubkey} <- decode_hex(Map.get(event, "pubkey"), 32, :invalid_pubkey),
|
||||
{:ok, delete_ids} <- extract_delete_event_ids(event) do
|
||||
{:ok, delete_targets} <- extract_delete_targets(event) do
|
||||
deleted_at = System.system_time(:second)
|
||||
|
||||
deleted_by_id_count =
|
||||
delete_targets
|
||||
|> Map.get(:event_ids, [])
|
||||
|> delete_events_by_ids(deleter_pubkey, deleted_at)
|
||||
|
||||
deleted_by_coordinate_count =
|
||||
delete_targets
|
||||
|> Map.get(:coordinates, [])
|
||||
|> delete_events_by_coordinates(deleter_pubkey, deleted_at)
|
||||
|
||||
{:ok, deleted_by_id_count + deleted_by_coordinate_count}
|
||||
end
|
||||
end
|
||||
|
||||
defp delete_events_by_ids([], _deleter_pubkey, _deleted_at), do: 0
|
||||
|
||||
defp delete_events_by_ids(delete_ids, deleter_pubkey, deleted_at) do
|
||||
query =
|
||||
from(stored_event in "events",
|
||||
where:
|
||||
@@ -140,9 +145,55 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Events do
|
||||
is_nil(stored_event.deleted_at)
|
||||
)
|
||||
|
||||
deleted_at = System.system_time(:second)
|
||||
{count, _result} = Repo.update_all(query, set: [deleted_at: deleted_at])
|
||||
{:ok, count}
|
||||
count
|
||||
end
|
||||
|
||||
defp delete_events_by_coordinates([], _deleter_pubkey, _deleted_at), do: 0
|
||||
|
||||
defp delete_events_by_coordinates(coordinates, deleter_pubkey, deleted_at) do
|
||||
relevant_coordinates =
|
||||
Enum.filter(coordinates, fn coordinate ->
|
||||
coordinate.pubkey == deleter_pubkey and
|
||||
(replaceable_kind?(coordinate.kind) or addressable_kind?(coordinate.kind))
|
||||
end)
|
||||
|
||||
if relevant_coordinates == [] do
|
||||
0
|
||||
else
|
||||
dynamic_conditions =
|
||||
Enum.reduce(relevant_coordinates, dynamic(false), fn coordinate, acc ->
|
||||
coordinate_condition =
|
||||
coordinate_delete_condition(coordinate, deleter_pubkey)
|
||||
|
||||
dynamic([stored_event], ^acc or ^coordinate_condition)
|
||||
end)
|
||||
|
||||
query =
|
||||
from(stored_event in "events",
|
||||
where: is_nil(stored_event.deleted_at)
|
||||
)
|
||||
|> where(^dynamic_conditions)
|
||||
|
||||
{count, _result} = Repo.update_all(query, set: [deleted_at: deleted_at])
|
||||
count
|
||||
end
|
||||
end
|
||||
|
||||
defp coordinate_delete_condition(coordinate, deleter_pubkey) do
|
||||
if addressable_kind?(coordinate.kind) do
|
||||
dynamic(
|
||||
[stored_event],
|
||||
stored_event.kind == ^coordinate.kind and
|
||||
stored_event.pubkey == ^deleter_pubkey and
|
||||
stored_event.d_tag == ^coordinate.d_tag
|
||||
)
|
||||
else
|
||||
dynamic(
|
||||
[stored_event],
|
||||
stored_event.kind == ^coordinate.kind and
|
||||
stored_event.pubkey == ^deleter_pubkey
|
||||
)
|
||||
end
|
||||
end
|
||||
|
||||
@@ -545,6 +596,7 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Events do
|
||||
pubkey: normalized_event.pubkey,
|
||||
created_at: normalized_event.created_at,
|
||||
kind: normalized_event.kind,
|
||||
tags: normalized_event.tags,
|
||||
content: normalized_event.content,
|
||||
sig: normalized_event.sig,
|
||||
d_tag: normalized_event.d_tag,
|
||||
@@ -564,6 +616,7 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Events do
|
||||
pubkey: event.pubkey,
|
||||
created_at: event.created_at,
|
||||
kind: event.kind,
|
||||
tags: event.tags,
|
||||
content: event.content,
|
||||
sig: event.sig
|
||||
}
|
||||
@@ -598,6 +651,20 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Events do
|
||||
|> maybe_restrict_giftwrap_access(filter, opts)
|
||||
end
|
||||
|
||||
defp event_id_union_query_for_filters([], now, _opts) do
|
||||
from(event in "events",
|
||||
where: event.created_at > ^now and event.created_at < ^now,
|
||||
select: event.id
|
||||
)
|
||||
end
|
||||
|
||||
defp event_id_union_query_for_filters([first_filter | rest_filters], now, opts) do
|
||||
Enum.reduce(rest_filters, event_id_query_for_filter(first_filter, now, opts), fn filter,
|
||||
acc ->
|
||||
union_all(acc, ^event_id_query_for_filter(filter, now, opts))
|
||||
end)
|
||||
end
|
||||
|
||||
defp maybe_filter_ids(query, nil), do: query
|
||||
|
||||
defp maybe_filter_ids(query, ids) do
|
||||
@@ -624,11 +691,19 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Events do
|
||||
defp maybe_filter_search(query, nil), do: query
|
||||
|
||||
defp maybe_filter_search(query, search) when is_binary(search) and search != "" do
|
||||
where(query, [event], ilike(event.content, ^"%#{search}%"))
|
||||
escaped_search = escape_like_pattern(search)
|
||||
where(query, [event], ilike(event.content, ^"%#{escaped_search}%"))
|
||||
end
|
||||
|
||||
defp maybe_filter_search(query, _search), do: query
|
||||
|
||||
defp escape_like_pattern(search) do
|
||||
search
|
||||
|> String.replace("\\", "\\\\")
|
||||
|> String.replace("%", "\\%")
|
||||
|> String.replace("_", "\\_")
|
||||
end
|
||||
|
||||
defp filter_by_tags(query, filter) do
|
||||
filter
|
||||
|> tag_filters()
|
||||
@@ -734,44 +809,21 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Events do
|
||||
end
|
||||
end
|
||||
|
||||
defp load_tags([]), do: %{}
|
||||
|
||||
defp load_tags(event_keys) when is_list(event_keys) do
|
||||
created_at_values = Enum.map(event_keys, fn {created_at, _event_id} -> created_at end)
|
||||
event_id_values = Enum.map(event_keys, fn {_created_at, event_id} -> event_id end)
|
||||
|
||||
query =
|
||||
from(tag in "event_tags",
|
||||
where: tag.event_created_at in ^created_at_values and tag.event_id in ^event_id_values,
|
||||
order_by: [asc: tag.idx],
|
||||
select: %{
|
||||
event_created_at: tag.event_created_at,
|
||||
event_id: tag.event_id,
|
||||
name: tag.name,
|
||||
value: tag.value
|
||||
}
|
||||
)
|
||||
|
||||
query
|
||||
|> Repo.all()
|
||||
|> Enum.group_by(
|
||||
fn tag -> {tag.event_created_at, tag.event_id} end,
|
||||
fn tag -> [tag.name, tag.value] end
|
||||
)
|
||||
end
|
||||
|
||||
defp to_nostr_event(persisted_event, tags) do
|
||||
defp to_nostr_event(persisted_event) do
|
||||
%{
|
||||
"id" => Base.encode16(persisted_event.id, case: :lower),
|
||||
"pubkey" => Base.encode16(persisted_event.pubkey, case: :lower),
|
||||
"created_at" => persisted_event.created_at,
|
||||
"kind" => persisted_event.kind,
|
||||
"tags" => tags,
|
||||
"tags" => normalize_persisted_tags(persisted_event.tags),
|
||||
"content" => persisted_event.content,
|
||||
"sig" => Base.encode16(persisted_event.sig, case: :lower)
|
||||
}
|
||||
end
|
||||
|
||||
defp normalize_persisted_tags(tags) when is_list(tags), do: tags
|
||||
defp normalize_persisted_tags(_tags), do: []
|
||||
|
||||
defp decode_hex(value, bytes, reason) when is_binary(value) do
|
||||
if byte_size(value) == bytes * 2 do
|
||||
case Base.decode16(value, case: :mixed) do
|
||||
@@ -818,23 +870,69 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Events do
|
||||
end)
|
||||
end
|
||||
|
||||
defp extract_delete_event_ids(event) do
|
||||
delete_ids =
|
||||
event
|
||||
|> Map.get("tags", [])
|
||||
|> Enum.reduce([], fn
|
||||
["e", event_id | _rest], acc when is_binary(event_id) -> [event_id | acc]
|
||||
_tag, acc -> acc
|
||||
end)
|
||||
|> Enum.uniq()
|
||||
defp extract_delete_targets(event) do
|
||||
with {:ok, targets} <- parse_delete_targets(Map.get(event, "tags", [])) do
|
||||
event_ids = targets.event_ids |> Enum.uniq()
|
||||
coordinates = targets.coordinates |> Enum.uniq()
|
||||
|
||||
if delete_ids == [] do
|
||||
if event_ids == [] and coordinates == [] do
|
||||
{:error, :no_delete_targets}
|
||||
else
|
||||
{:ok, Enum.map(delete_ids, &Base.decode16!(&1, case: :mixed))}
|
||||
{:ok, %{event_ids: event_ids, coordinates: coordinates}}
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
defp parse_delete_targets(tags) when is_list(tags) do
|
||||
Enum.reduce_while(tags, {:ok, %{event_ids: [], coordinates: []}}, fn tag, {:ok, acc} ->
|
||||
case parse_delete_target(tag) do
|
||||
{:ok, {:event_id, event_id}} ->
|
||||
{:cont, {:ok, %{acc | event_ids: [event_id | acc.event_ids]}}}
|
||||
|
||||
{:ok, {:coordinate, coordinate}} ->
|
||||
{:cont, {:ok, %{acc | coordinates: [coordinate | acc.coordinates]}}}
|
||||
|
||||
:ignore ->
|
||||
{:cont, {:ok, acc}}
|
||||
|
||||
{:error, _reason} = error ->
|
||||
{:halt, error}
|
||||
end
|
||||
end)
|
||||
end
|
||||
|
||||
defp parse_delete_targets(_tags), do: {:error, :invalid_delete_target}
|
||||
|
||||
defp parse_delete_target(["e", event_id | _rest]) when is_binary(event_id) do
|
||||
case decode_hex(event_id, 32, :invalid_delete_target) do
|
||||
{:ok, decoded_event_id} -> {:ok, {:event_id, decoded_event_id}}
|
||||
{:error, _reason} -> {:error, :invalid_delete_target}
|
||||
end
|
||||
end
|
||||
|
||||
defp parse_delete_target(["a", coordinate | _rest]) when is_binary(coordinate) do
|
||||
case parse_address_coordinate(coordinate) do
|
||||
{:ok, parsed_coordinate} -> {:ok, {:coordinate, parsed_coordinate}}
|
||||
{:error, _reason} -> {:error, :invalid_delete_target}
|
||||
end
|
||||
end
|
||||
|
||||
defp parse_delete_target(_tag), do: :ignore
|
||||
|
||||
defp parse_address_coordinate(coordinate) do
|
||||
case String.split(coordinate, ":", parts: 3) do
|
||||
[kind_part, pubkey_hex, d_tag] ->
|
||||
with {kind, ""} <- Integer.parse(kind_part),
|
||||
true <- kind >= 0,
|
||||
{:ok, pubkey} <- decode_hex(pubkey_hex, 32, :invalid_delete_target) do
|
||||
{:ok, %{kind: kind, pubkey: pubkey, d_tag: d_tag}}
|
||||
else
|
||||
_other -> {:error, :invalid_delete_target}
|
||||
end
|
||||
|
||||
_other ->
|
||||
{:error, :invalid_delete_target}
|
||||
end
|
||||
rescue
|
||||
ArgumentError -> {:error, :invalid_delete_target}
|
||||
end
|
||||
|
||||
defp extract_expiration(tags) do
|
||||
|
||||
@@ -9,87 +9,111 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Moderation do
|
||||
|
||||
@behaviour Parrhesia.Storage.Moderation
|
||||
|
||||
@cache_table :parrhesia_moderation_cache
|
||||
@cache_scope_sources %{
|
||||
banned_pubkeys: {"banned_pubkeys", :pubkey},
|
||||
allowed_pubkeys: {"allowed_pubkeys", :pubkey},
|
||||
banned_events: {"banned_events", :event_id},
|
||||
blocked_ips: {"blocked_ips", :ip}
|
||||
}
|
||||
|
||||
@impl true
|
||||
def ban_pubkey(_context, pubkey) do
|
||||
with {:ok, normalized_pubkey} <- normalize_hex_or_binary(pubkey, 32, :invalid_pubkey) do
|
||||
upsert_presence_table("banned_pubkeys", :pubkey, normalized_pubkey)
|
||||
with {:ok, normalized_pubkey} <- normalize_hex_or_binary(pubkey, 32, :invalid_pubkey),
|
||||
:ok <- upsert_presence_table("banned_pubkeys", :pubkey, normalized_pubkey) do
|
||||
cache_put(:banned_pubkeys, normalized_pubkey)
|
||||
:ok
|
||||
end
|
||||
end
|
||||
|
||||
@impl true
|
||||
def unban_pubkey(_context, pubkey) do
|
||||
with {:ok, normalized_pubkey} <- normalize_hex_or_binary(pubkey, 32, :invalid_pubkey) do
|
||||
delete_from_table("banned_pubkeys", :pubkey, normalized_pubkey)
|
||||
with {:ok, normalized_pubkey} <- normalize_hex_or_binary(pubkey, 32, :invalid_pubkey),
|
||||
:ok <- delete_from_table("banned_pubkeys", :pubkey, normalized_pubkey) do
|
||||
cache_delete(:banned_pubkeys, normalized_pubkey)
|
||||
:ok
|
||||
end
|
||||
end
|
||||
|
||||
@impl true
|
||||
def pubkey_banned?(_context, pubkey) do
|
||||
with {:ok, normalized_pubkey} <- normalize_hex_or_binary(pubkey, 32, :invalid_pubkey) do
|
||||
{:ok, exists_in_table?("banned_pubkeys", :pubkey, normalized_pubkey)}
|
||||
{:ok, exists_in_scope?(:banned_pubkeys, normalized_pubkey)}
|
||||
end
|
||||
end
|
||||
|
||||
@impl true
|
||||
def allow_pubkey(_context, pubkey) do
|
||||
with {:ok, normalized_pubkey} <- normalize_hex_or_binary(pubkey, 32, :invalid_pubkey) do
|
||||
upsert_presence_table("allowed_pubkeys", :pubkey, normalized_pubkey)
|
||||
with {:ok, normalized_pubkey} <- normalize_hex_or_binary(pubkey, 32, :invalid_pubkey),
|
||||
:ok <- upsert_presence_table("allowed_pubkeys", :pubkey, normalized_pubkey) do
|
||||
cache_put(:allowed_pubkeys, normalized_pubkey)
|
||||
:ok
|
||||
end
|
||||
end
|
||||
|
||||
@impl true
|
||||
def disallow_pubkey(_context, pubkey) do
|
||||
with {:ok, normalized_pubkey} <- normalize_hex_or_binary(pubkey, 32, :invalid_pubkey) do
|
||||
delete_from_table("allowed_pubkeys", :pubkey, normalized_pubkey)
|
||||
with {:ok, normalized_pubkey} <- normalize_hex_or_binary(pubkey, 32, :invalid_pubkey),
|
||||
:ok <- delete_from_table("allowed_pubkeys", :pubkey, normalized_pubkey) do
|
||||
cache_delete(:allowed_pubkeys, normalized_pubkey)
|
||||
:ok
|
||||
end
|
||||
end
|
||||
|
||||
@impl true
|
||||
def pubkey_allowed?(_context, pubkey) do
|
||||
with {:ok, normalized_pubkey} <- normalize_hex_or_binary(pubkey, 32, :invalid_pubkey) do
|
||||
{:ok, exists_in_table?("allowed_pubkeys", :pubkey, normalized_pubkey)}
|
||||
{:ok, exists_in_scope?(:allowed_pubkeys, normalized_pubkey)}
|
||||
end
|
||||
end
|
||||
|
||||
@impl true
|
||||
def ban_event(_context, event_id) do
|
||||
with {:ok, normalized_event_id} <- normalize_hex_or_binary(event_id, 32, :invalid_event_id) do
|
||||
upsert_presence_table("banned_events", :event_id, normalized_event_id)
|
||||
with {:ok, normalized_event_id} <- normalize_hex_or_binary(event_id, 32, :invalid_event_id),
|
||||
:ok <- upsert_presence_table("banned_events", :event_id, normalized_event_id) do
|
||||
cache_put(:banned_events, normalized_event_id)
|
||||
:ok
|
||||
end
|
||||
end
|
||||
|
||||
@impl true
|
||||
def unban_event(_context, event_id) do
|
||||
with {:ok, normalized_event_id} <- normalize_hex_or_binary(event_id, 32, :invalid_event_id) do
|
||||
delete_from_table("banned_events", :event_id, normalized_event_id)
|
||||
with {:ok, normalized_event_id} <- normalize_hex_or_binary(event_id, 32, :invalid_event_id),
|
||||
:ok <- delete_from_table("banned_events", :event_id, normalized_event_id) do
|
||||
cache_delete(:banned_events, normalized_event_id)
|
||||
:ok
|
||||
end
|
||||
end
|
||||
|
||||
@impl true
|
||||
def event_banned?(_context, event_id) do
|
||||
with {:ok, normalized_event_id} <- normalize_hex_or_binary(event_id, 32, :invalid_event_id) do
|
||||
{:ok, exists_in_table?("banned_events", :event_id, normalized_event_id)}
|
||||
{:ok, exists_in_scope?(:banned_events, normalized_event_id)}
|
||||
end
|
||||
end
|
||||
|
||||
@impl true
|
||||
def block_ip(_context, ip_address) do
|
||||
with {:ok, normalized_ip} <- normalize_ip(ip_address) do
|
||||
upsert_presence_table("blocked_ips", :ip, normalized_ip)
|
||||
with {:ok, normalized_ip} <- normalize_ip(ip_address),
|
||||
:ok <- upsert_presence_table("blocked_ips", :ip, normalized_ip) do
|
||||
cache_put(:blocked_ips, normalized_ip)
|
||||
:ok
|
||||
end
|
||||
end
|
||||
|
||||
@impl true
|
||||
def unblock_ip(_context, ip_address) do
|
||||
with {:ok, normalized_ip} <- normalize_ip(ip_address) do
|
||||
delete_from_table("blocked_ips", :ip, normalized_ip)
|
||||
with {:ok, normalized_ip} <- normalize_ip(ip_address),
|
||||
:ok <- delete_from_table("blocked_ips", :ip, normalized_ip) do
|
||||
cache_delete(:blocked_ips, normalized_ip)
|
||||
:ok
|
||||
end
|
||||
end
|
||||
|
||||
@impl true
|
||||
def ip_blocked?(_context, ip_address) do
|
||||
with {:ok, normalized_ip} <- normalize_ip(ip_address) do
|
||||
{:ok, exists_in_table?("blocked_ips", :ip, normalized_ip)}
|
||||
{:ok, exists_in_scope?(:blocked_ips, normalized_ip)}
|
||||
end
|
||||
end
|
||||
|
||||
@@ -122,7 +146,96 @@ defmodule Parrhesia.Storage.Adapters.Postgres.Moderation do
|
||||
:ok
|
||||
end
|
||||
|
||||
defp exists_in_table?(table, field, value) do
|
||||
defp exists_in_scope?(scope, value) do
|
||||
{table, field} = cache_scope_source!(scope)
|
||||
|
||||
if moderation_cache_enabled?() do
|
||||
case cache_table_ref() do
|
||||
:undefined ->
|
||||
exists_in_table_db?(table, field, value)
|
||||
|
||||
cache_table ->
|
||||
ensure_cache_scope_loaded(scope, cache_table)
|
||||
:ets.member(cache_table, cache_member_key(scope, value))
|
||||
end
|
||||
else
|
||||
exists_in_table_db?(table, field, value)
|
||||
end
|
||||
end
|
||||
|
||||
defp ensure_cache_scope_loaded(scope, table) do
|
||||
loaded_key = cache_loaded_key(scope)
|
||||
|
||||
if :ets.member(table, loaded_key) do
|
||||
:ok
|
||||
else
|
||||
{db_table, db_field} = cache_scope_source!(scope)
|
||||
values = load_scope_values(db_table, db_field)
|
||||
|
||||
entries = Enum.map(values, &{cache_member_key(scope, &1), true})
|
||||
|
||||
if entries != [] do
|
||||
true = :ets.insert(table, entries)
|
||||
end
|
||||
|
||||
true = :ets.insert(table, {loaded_key, true})
|
||||
:ok
|
||||
end
|
||||
end
|
||||
|
||||
defp load_scope_values(table, field) do
|
||||
query =
|
||||
from(record in table,
|
||||
select: field(record, ^field)
|
||||
)
|
||||
|
||||
Repo.all(query)
|
||||
end
|
||||
|
||||
defp cache_put(scope, value) do
|
||||
if moderation_cache_enabled?() do
|
||||
case cache_table_ref() do
|
||||
:undefined -> :ok
|
||||
cache_table -> true = :ets.insert(cache_table, {cache_member_key(scope, value), true})
|
||||
end
|
||||
end
|
||||
|
||||
:ok
|
||||
end
|
||||
|
||||
defp cache_delete(scope, value) do
|
||||
if moderation_cache_enabled?() do
|
||||
case cache_table_ref() do
|
||||
:undefined -> :ok
|
||||
cache_table -> true = :ets.delete(cache_table, cache_member_key(scope, value))
|
||||
end
|
||||
end
|
||||
|
||||
:ok
|
||||
end
|
||||
|
||||
defp cache_scope_source!(scope), do: Map.fetch!(@cache_scope_sources, scope)
|
||||
|
||||
defp cache_loaded_key(scope), do: {:loaded, scope}
|
||||
|
||||
defp cache_member_key(scope, value), do: {:member, scope, value}
|
||||
|
||||
defp cache_table_ref do
|
||||
case :ets.whereis(@cache_table) do
|
||||
:undefined -> :undefined
|
||||
_table_ref -> @cache_table
|
||||
end
|
||||
end
|
||||
|
||||
defp moderation_cache_enabled? do
|
||||
case Application.get_env(:parrhesia, :moderation_cache_enabled, true) do
|
||||
true -> true
|
||||
false -> false
|
||||
_other -> true
|
||||
end
|
||||
end
|
||||
|
||||
defp exists_in_table_db?(table, field, value) do
|
||||
query =
|
||||
from(record in table,
|
||||
where: field(record, ^field) == ^value,
|
||||
|
||||
28
lib/parrhesia/storage/adapters/postgres/moderation_cache.ex
Normal file
28
lib/parrhesia/storage/adapters/postgres/moderation_cache.ex
Normal file
@@ -0,0 +1,28 @@
|
||||
defmodule Parrhesia.Storage.Adapters.Postgres.ModerationCache do
|
||||
@moduledoc """
|
||||
ETS owner process for moderation cache tables.
|
||||
"""
|
||||
|
||||
use GenServer
|
||||
|
||||
@cache_table :parrhesia_moderation_cache
|
||||
|
||||
@spec start_link(keyword()) :: GenServer.on_start()
|
||||
def start_link(opts \\ []) do
|
||||
GenServer.start_link(__MODULE__, :ok, opts)
|
||||
end
|
||||
|
||||
@impl true
|
||||
def init(:ok) do
|
||||
_table =
|
||||
:ets.new(@cache_table, [
|
||||
:named_table,
|
||||
:set,
|
||||
:public,
|
||||
read_concurrency: true,
|
||||
write_concurrency: true
|
||||
])
|
||||
|
||||
{:ok, %{}}
|
||||
end
|
||||
end
|
||||
@@ -24,11 +24,28 @@ defmodule Parrhesia.Storage.Archiver do
|
||||
Repo.all(query)
|
||||
end
|
||||
|
||||
@identifier_pattern ~r/^[a-zA-Z_][a-zA-Z0-9_]*$/
|
||||
|
||||
@doc """
|
||||
Generates an archive SQL statement for the given partition.
|
||||
"""
|
||||
@spec archive_sql(String.t(), String.t()) :: String.t()
|
||||
def archive_sql(partition_name, archive_table_name) do
|
||||
"INSERT INTO #{archive_table_name} SELECT * FROM #{partition_name};"
|
||||
quoted_archive_table_name = quote_identifier!(archive_table_name)
|
||||
quoted_partition_name = quote_identifier!(partition_name)
|
||||
|
||||
"INSERT INTO #{quoted_archive_table_name} SELECT * FROM #{quoted_partition_name};"
|
||||
end
|
||||
|
||||
defp quote_identifier!(identifier) when is_binary(identifier) do
|
||||
if Regex.match?(@identifier_pattern, identifier) do
|
||||
~s("#{identifier}")
|
||||
else
|
||||
raise ArgumentError, "invalid SQL identifier: #{inspect(identifier)}"
|
||||
end
|
||||
end
|
||||
|
||||
defp quote_identifier!(identifier) do
|
||||
raise ArgumentError, "invalid SQL identifier: #{inspect(identifier)}"
|
||||
end
|
||||
end
|
||||
|
||||
@@ -12,6 +12,8 @@ defmodule Parrhesia.Storage.Supervisor do
|
||||
@impl true
|
||||
def init(_init_arg) do
|
||||
children = [
|
||||
{Parrhesia.Storage.Adapters.Postgres.ModerationCache,
|
||||
name: Parrhesia.Storage.Adapters.Postgres.ModerationCache},
|
||||
Parrhesia.Repo
|
||||
]
|
||||
|
||||
|
||||
@@ -11,6 +11,13 @@ defmodule Parrhesia.Subscriptions.Index do
|
||||
alias Parrhesia.Protocol.Filter
|
||||
|
||||
@wildcard_key :all
|
||||
@subscriptions_table_name :parrhesia_subscriptions_table
|
||||
@kind_index_table_name :parrhesia_subscription_kind_index
|
||||
@author_index_table_name :parrhesia_subscription_author_index
|
||||
@tag_index_table_name :parrhesia_subscription_tag_index
|
||||
@kind_wildcard_table_name :parrhesia_subscription_kind_wildcard_index
|
||||
@author_wildcard_table_name :parrhesia_subscription_author_wildcard_index
|
||||
@tag_wildcard_table_name :parrhesia_subscription_tag_wildcard_index
|
||||
|
||||
@type subscription_id :: String.t()
|
||||
@type owner :: pid()
|
||||
@@ -20,11 +27,12 @@ defmodule Parrhesia.Subscriptions.Index do
|
||||
@spec start_link(keyword()) :: GenServer.on_start()
|
||||
def start_link(opts \\ []) do
|
||||
name = Keyword.get(opts, :name)
|
||||
init_arg = %{named_tables?: name == __MODULE__}
|
||||
|
||||
if is_nil(name) do
|
||||
GenServer.start_link(__MODULE__, :ok)
|
||||
GenServer.start_link(__MODULE__, init_arg)
|
||||
else
|
||||
GenServer.start_link(__MODULE__, :ok, name: name)
|
||||
GenServer.start_link(__MODULE__, init_arg, name: name)
|
||||
end
|
||||
end
|
||||
|
||||
@@ -65,6 +73,13 @@ defmodule Parrhesia.Subscriptions.Index do
|
||||
end
|
||||
|
||||
@spec candidate_subscription_keys(GenServer.server(), map()) :: [subscription_key()]
|
||||
def candidate_subscription_keys(__MODULE__, event) do
|
||||
case named_tables() do
|
||||
{:ok, tables} -> candidate_subscription_keys_for_tables(tables, event)
|
||||
:error -> GenServer.call(__MODULE__, {:candidate_subscription_keys, event})
|
||||
end
|
||||
end
|
||||
|
||||
def candidate_subscription_keys(server, event) do
|
||||
GenServer.call(server, {:candidate_subscription_keys, event})
|
||||
end
|
||||
@@ -76,20 +91,15 @@ defmodule Parrhesia.Subscriptions.Index do
|
||||
end
|
||||
|
||||
@impl true
|
||||
def init(:ok) do
|
||||
def init(%{named_tables?: named_tables?}) do
|
||||
tables = create_tables(named_tables?)
|
||||
|
||||
{:ok,
|
||||
%{
|
||||
subscriptions_table: :ets.new(:subscriptions_table, [:set, :protected]),
|
||||
kind_index_table: :ets.new(:subscription_kind_index, [:bag, :protected]),
|
||||
author_index_table: :ets.new(:subscription_author_index, [:bag, :protected]),
|
||||
tag_index_table: :ets.new(:subscription_tag_index, [:bag, :protected]),
|
||||
kind_wildcard_table: :ets.new(:subscription_kind_wildcard_index, [:bag, :protected]),
|
||||
author_wildcard_table: :ets.new(:subscription_author_wildcard_index, [:bag, :protected]),
|
||||
tag_wildcard_table: :ets.new(:subscription_tag_wildcard_index, [:bag, :protected]),
|
||||
Map.merge(tables, %{
|
||||
owner_subscriptions: %{},
|
||||
owner_monitors: %{},
|
||||
monitor_owners: %{}
|
||||
}}
|
||||
})}
|
||||
end
|
||||
|
||||
@impl true
|
||||
@@ -128,14 +138,7 @@ defmodule Parrhesia.Subscriptions.Index do
|
||||
end
|
||||
|
||||
def handle_call({:candidate_subscription_keys, event}, _from, state) do
|
||||
candidates =
|
||||
state
|
||||
|> kind_candidates(event)
|
||||
|> MapSet.intersection(author_candidates(state, event))
|
||||
|> MapSet.intersection(tag_candidates(state, event))
|
||||
|> MapSet.to_list()
|
||||
|
||||
{:reply, candidates, state}
|
||||
{:reply, candidate_subscription_keys_for_tables(state, event), state}
|
||||
end
|
||||
|
||||
def handle_call({:fetch_filters, owner_pid, subscription_id}, _from, state) do
|
||||
@@ -371,28 +374,110 @@ defmodule Parrhesia.Subscriptions.Index do
|
||||
|> update_in([:owner_subscriptions], &Map.delete(&1, owner_pid))
|
||||
end
|
||||
|
||||
defp kind_candidates(state, event) do
|
||||
defp create_tables(true) do
|
||||
%{
|
||||
subscriptions_table:
|
||||
:ets.new(@subscriptions_table_name, [
|
||||
:set,
|
||||
:protected,
|
||||
:named_table,
|
||||
read_concurrency: true
|
||||
]),
|
||||
kind_index_table:
|
||||
:ets.new(@kind_index_table_name, [:bag, :protected, :named_table, read_concurrency: true]),
|
||||
author_index_table:
|
||||
:ets.new(@author_index_table_name, [
|
||||
:bag,
|
||||
:protected,
|
||||
:named_table,
|
||||
read_concurrency: true
|
||||
]),
|
||||
tag_index_table:
|
||||
:ets.new(@tag_index_table_name, [:bag, :protected, :named_table, read_concurrency: true]),
|
||||
kind_wildcard_table:
|
||||
:ets.new(@kind_wildcard_table_name, [
|
||||
:bag,
|
||||
:protected,
|
||||
:named_table,
|
||||
read_concurrency: true
|
||||
]),
|
||||
author_wildcard_table:
|
||||
:ets.new(@author_wildcard_table_name, [
|
||||
:bag,
|
||||
:protected,
|
||||
:named_table,
|
||||
read_concurrency: true
|
||||
]),
|
||||
tag_wildcard_table:
|
||||
:ets.new(@tag_wildcard_table_name, [
|
||||
:bag,
|
||||
:protected,
|
||||
:named_table,
|
||||
read_concurrency: true
|
||||
])
|
||||
}
|
||||
end
|
||||
|
||||
defp create_tables(false) do
|
||||
%{
|
||||
subscriptions_table: :ets.new(:subscriptions_table, [:set, :protected]),
|
||||
kind_index_table: :ets.new(:subscription_kind_index, [:bag, :protected]),
|
||||
author_index_table: :ets.new(:subscription_author_index, [:bag, :protected]),
|
||||
tag_index_table: :ets.new(:subscription_tag_index, [:bag, :protected]),
|
||||
kind_wildcard_table: :ets.new(:subscription_kind_wildcard_index, [:bag, :protected]),
|
||||
author_wildcard_table: :ets.new(:subscription_author_wildcard_index, [:bag, :protected]),
|
||||
tag_wildcard_table: :ets.new(:subscription_tag_wildcard_index, [:bag, :protected])
|
||||
}
|
||||
end
|
||||
|
||||
defp named_tables do
|
||||
tables = %{
|
||||
subscriptions_table: :ets.whereis(@subscriptions_table_name),
|
||||
kind_index_table: :ets.whereis(@kind_index_table_name),
|
||||
author_index_table: :ets.whereis(@author_index_table_name),
|
||||
tag_index_table: :ets.whereis(@tag_index_table_name),
|
||||
kind_wildcard_table: :ets.whereis(@kind_wildcard_table_name),
|
||||
author_wildcard_table: :ets.whereis(@author_wildcard_table_name),
|
||||
tag_wildcard_table: :ets.whereis(@tag_wildcard_table_name)
|
||||
}
|
||||
|
||||
if Enum.any?(tables, fn {_key, table_ref} -> table_ref == :undefined end) do
|
||||
:error
|
||||
else
|
||||
{:ok, tables}
|
||||
end
|
||||
end
|
||||
|
||||
defp candidate_subscription_keys_for_tables(tables, event) do
|
||||
tables
|
||||
|> kind_candidates(event)
|
||||
|> MapSet.intersection(author_candidates(tables, event))
|
||||
|> MapSet.intersection(tag_candidates(tables, event))
|
||||
|> MapSet.to_list()
|
||||
end
|
||||
|
||||
defp kind_candidates(tables, event) do
|
||||
event
|
||||
|> Map.get("kind")
|
||||
|> index_candidates_for_value(state.kind_index_table, state.kind_wildcard_table)
|
||||
|> index_candidates_for_value(tables.kind_index_table, tables.kind_wildcard_table)
|
||||
end
|
||||
|
||||
defp author_candidates(state, event) do
|
||||
defp author_candidates(tables, event) do
|
||||
event
|
||||
|> Map.get("pubkey")
|
||||
|> index_candidates_for_value(state.author_index_table, state.author_wildcard_table)
|
||||
|> index_candidates_for_value(tables.author_index_table, tables.author_wildcard_table)
|
||||
end
|
||||
|
||||
defp tag_candidates(state, event) do
|
||||
defp tag_candidates(tables, event) do
|
||||
tag_pairs = event_tag_pairs(Map.get(event, "tags"))
|
||||
wildcard_candidates = lookup_candidates(state.tag_wildcard_table, @wildcard_key)
|
||||
wildcard_candidates = lookup_candidates(tables.tag_wildcard_table, @wildcard_key)
|
||||
|
||||
if MapSet.size(tag_pairs) == 0 do
|
||||
wildcard_candidates
|
||||
else
|
||||
matched_candidates =
|
||||
Enum.reduce(tag_pairs, MapSet.new(), fn {tag_name, value}, acc ->
|
||||
MapSet.union(acc, lookup_candidates(state.tag_index_table, {tag_name, value}))
|
||||
MapSet.union(acc, lookup_candidates(tables.tag_index_table, {tag_name, value}))
|
||||
end)
|
||||
|
||||
MapSet.union(matched_candidates, wildcard_candidates)
|
||||
|
||||
@@ -11,12 +11,26 @@ defmodule Parrhesia.Subscriptions.Supervisor do
|
||||
|
||||
@impl true
|
||||
def init(_init_arg) do
|
||||
children = [
|
||||
{Parrhesia.Subscriptions.Index, name: Parrhesia.Subscriptions.Index},
|
||||
{Parrhesia.Negentropy.Sessions, name: Parrhesia.Negentropy.Sessions},
|
||||
{Parrhesia.Fanout.MultiNode, name: Parrhesia.Fanout.MultiNode}
|
||||
]
|
||||
children =
|
||||
[
|
||||
{Parrhesia.Subscriptions.Index, name: Parrhesia.Subscriptions.Index}
|
||||
] ++
|
||||
negentropy_children() ++ [{Parrhesia.Fanout.MultiNode, name: Parrhesia.Fanout.MultiNode}]
|
||||
|
||||
Supervisor.init(children, strategy: :one_for_one)
|
||||
end
|
||||
|
||||
defp negentropy_children do
|
||||
if negentropy_enabled?() do
|
||||
[{Parrhesia.Negentropy.Sessions, name: Parrhesia.Negentropy.Sessions}]
|
||||
else
|
||||
[]
|
||||
end
|
||||
end
|
||||
|
||||
defp negentropy_enabled? do
|
||||
:parrhesia
|
||||
|> Application.get_env(:features, [])
|
||||
|> Keyword.get(:nip_77_negentropy, true)
|
||||
end
|
||||
end
|
||||
|
||||
@@ -20,7 +20,13 @@ defmodule Parrhesia.Web.Connection do
|
||||
@default_max_outbound_queue 256
|
||||
@default_outbound_drain_batch_size 64
|
||||
@default_outbound_overflow_strategy :close
|
||||
@default_max_frame_bytes 1_048_576
|
||||
@default_max_event_bytes 262_144
|
||||
@default_event_ingest_rate_limit 120
|
||||
@default_event_ingest_window_seconds 1
|
||||
@default_auth_max_age_seconds 600
|
||||
@drain_outbound_queue :drain_outbound_queue
|
||||
@post_ack_ingest :post_ack_ingest
|
||||
@outbound_queue_pressure_threshold 0.75
|
||||
|
||||
@marmot_kinds MapSet.new([
|
||||
@@ -42,13 +48,21 @@ defmodule Parrhesia.Web.Connection do
|
||||
subscription_index: Index,
|
||||
auth_challenges: Challenges,
|
||||
auth_challenge: nil,
|
||||
relay_url: nil,
|
||||
negentropy_sessions: Sessions,
|
||||
outbound_queue: :queue.new(),
|
||||
outbound_queue_size: 0,
|
||||
max_outbound_queue: @default_max_outbound_queue,
|
||||
outbound_overflow_strategy: @default_outbound_overflow_strategy,
|
||||
outbound_drain_batch_size: @default_outbound_drain_batch_size,
|
||||
drain_scheduled?: false
|
||||
drain_scheduled?: false,
|
||||
max_frame_bytes: @default_max_frame_bytes,
|
||||
max_event_bytes: @default_max_event_bytes,
|
||||
max_event_ingest_per_window: @default_event_ingest_rate_limit,
|
||||
event_ingest_window_seconds: @default_event_ingest_window_seconds,
|
||||
event_ingest_window_started_at_ms: 0,
|
||||
event_ingest_count: 0,
|
||||
auth_max_age_seconds: @default_auth_max_age_seconds
|
||||
|
||||
@type overflow_strategy :: :close | :drop_oldest | :drop_newest
|
||||
|
||||
@@ -64,13 +78,21 @@ defmodule Parrhesia.Web.Connection do
|
||||
subscription_index: GenServer.server() | nil,
|
||||
auth_challenges: GenServer.server() | nil,
|
||||
auth_challenge: String.t() | nil,
|
||||
relay_url: String.t() | nil,
|
||||
negentropy_sessions: GenServer.server() | nil,
|
||||
outbound_queue: :queue.queue({String.t(), map()}),
|
||||
outbound_queue_size: non_neg_integer(),
|
||||
max_outbound_queue: pos_integer(),
|
||||
outbound_overflow_strategy: overflow_strategy(),
|
||||
outbound_drain_batch_size: pos_integer(),
|
||||
drain_scheduled?: boolean()
|
||||
drain_scheduled?: boolean(),
|
||||
max_frame_bytes: pos_integer(),
|
||||
max_event_bytes: pos_integer(),
|
||||
max_event_ingest_per_window: pos_integer(),
|
||||
event_ingest_window_seconds: pos_integer(),
|
||||
event_ingest_window_started_at_ms: integer(),
|
||||
event_ingest_count: non_neg_integer(),
|
||||
auth_max_age_seconds: pos_integer()
|
||||
}
|
||||
|
||||
@impl true
|
||||
@@ -82,10 +104,17 @@ defmodule Parrhesia.Web.Connection do
|
||||
subscription_index: subscription_index(opts),
|
||||
auth_challenges: auth_challenges,
|
||||
auth_challenge: maybe_issue_auth_challenge(auth_challenges),
|
||||
relay_url: relay_url(opts),
|
||||
negentropy_sessions: negentropy_sessions(opts),
|
||||
max_outbound_queue: max_outbound_queue(opts),
|
||||
outbound_overflow_strategy: outbound_overflow_strategy(opts),
|
||||
outbound_drain_batch_size: outbound_drain_batch_size(opts)
|
||||
outbound_drain_batch_size: outbound_drain_batch_size(opts),
|
||||
max_frame_bytes: max_frame_bytes(opts),
|
||||
max_event_bytes: max_event_bytes(opts),
|
||||
max_event_ingest_per_window: max_event_ingest_per_window(opts),
|
||||
event_ingest_window_seconds: event_ingest_window_seconds(opts),
|
||||
event_ingest_window_started_at_ms: System.monotonic_time(:millisecond),
|
||||
auth_max_age_seconds: auth_max_age_seconds(opts)
|
||||
}
|
||||
|
||||
{:ok, state}
|
||||
@@ -93,6 +122,15 @@ defmodule Parrhesia.Web.Connection do
|
||||
|
||||
@impl true
|
||||
def handle_in({payload, [opcode: :text]}, %__MODULE__{} = state) do
|
||||
if byte_size(payload) > state.max_frame_bytes do
|
||||
response =
|
||||
Protocol.encode_relay({
|
||||
:notice,
|
||||
"invalid: websocket frame exceeds max frame size"
|
||||
})
|
||||
|
||||
{:push, {:text, response}, state}
|
||||
else
|
||||
case Protocol.decode_client(payload) do
|
||||
{:ok, decoded_message} ->
|
||||
handle_decoded_message(decoded_message, state)
|
||||
@@ -102,6 +140,7 @@ defmodule Parrhesia.Web.Connection do
|
||||
{:push, {:text, response}, state}
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
@impl true
|
||||
def handle_in({_payload, [opcode: :binary]}, %__MODULE__{} = state) do
|
||||
@@ -155,6 +194,12 @@ defmodule Parrhesia.Web.Connection do
|
||||
handle_fanout_events(state, fanout_events)
|
||||
end
|
||||
|
||||
def handle_info({@post_ack_ingest, event}, %__MODULE__{} = state) when is_map(event) do
|
||||
fanout_event(event)
|
||||
maybe_publish_multi_node(event)
|
||||
{:ok, state}
|
||||
end
|
||||
|
||||
def handle_info(@drain_outbound_queue, %__MODULE__{} = state) do
|
||||
{frames, next_state} = drain_outbound_frames(state)
|
||||
|
||||
@@ -180,23 +225,53 @@ defmodule Parrhesia.Web.Connection do
|
||||
started_at = System.monotonic_time()
|
||||
event_id = Map.get(event, "id", "")
|
||||
|
||||
with :ok <- Protocol.validate_event(event),
|
||||
:ok <- EventPolicy.authorize_write(event, state.authenticated_pubkeys),
|
||||
case maybe_allow_event_ingest(state) do
|
||||
{:ok, next_state} ->
|
||||
result =
|
||||
with :ok <- validate_event_payload_size(event, next_state.max_event_bytes),
|
||||
:ok <- Protocol.validate_event(event),
|
||||
:ok <- EventPolicy.authorize_write(event, next_state.authenticated_pubkeys),
|
||||
:ok <- maybe_process_group_event(event),
|
||||
{:ok, _result, message} <- persist_event(event) do
|
||||
{:ok, message}
|
||||
end
|
||||
|
||||
handle_event_ingest_result(result, next_state, event, event_id, started_at)
|
||||
|
||||
{:error, reason} ->
|
||||
ingest_error_response(state, event_id, reason)
|
||||
end
|
||||
end
|
||||
|
||||
defp handle_event_ingest_result(
|
||||
{:ok, message},
|
||||
%__MODULE__{} = state,
|
||||
event,
|
||||
event_id,
|
||||
started_at
|
||||
) do
|
||||
Telemetry.emit(
|
||||
[:parrhesia, :ingest, :stop],
|
||||
%{duration: System.monotonic_time() - started_at},
|
||||
telemetry_metadata_for_event(event)
|
||||
)
|
||||
|
||||
fanout_event(event)
|
||||
maybe_publish_multi_node(event)
|
||||
send(self(), {@post_ack_ingest, event})
|
||||
|
||||
response = Protocol.encode_relay({:ok, event_id, true, message})
|
||||
{:push, {:text, response}, state}
|
||||
else
|
||||
{:error, reason} ->
|
||||
end
|
||||
|
||||
defp handle_event_ingest_result(
|
||||
{:error, reason},
|
||||
%__MODULE__{} = state,
|
||||
_event,
|
||||
event_id,
|
||||
_started_at
|
||||
),
|
||||
do: ingest_error_response(state, event_id, reason)
|
||||
|
||||
defp ingest_error_response(%__MODULE__{} = state, event_id, reason) do
|
||||
message = error_message_for_ingest_failure(reason)
|
||||
response = Protocol.encode_relay({:ok, event_id, false, message})
|
||||
|
||||
@@ -206,7 +281,6 @@ defmodule Parrhesia.Web.Connection do
|
||||
{:push, {:text, response}, state}
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
defp handle_req(%__MODULE__{} = state, subscription_id, filters) do
|
||||
started_at = System.monotonic_time()
|
||||
@@ -353,7 +427,7 @@ defmodule Parrhesia.Web.Connection do
|
||||
event_id = Map.get(auth_event, "id", "")
|
||||
|
||||
with :ok <- Protocol.validate_event(auth_event),
|
||||
:ok <- validate_auth_event(auth_event),
|
||||
:ok <- validate_auth_event(state, auth_event),
|
||||
:ok <- validate_auth_challenge(state, auth_event) do
|
||||
pubkey = Map.get(auth_event, "pubkey")
|
||||
|
||||
@@ -412,29 +486,55 @@ defmodule Parrhesia.Web.Connection do
|
||||
end
|
||||
|
||||
defp persist_event(event) do
|
||||
case Map.get(event, "kind") do
|
||||
5 ->
|
||||
kind = Map.get(event, "kind")
|
||||
|
||||
cond do
|
||||
kind in [5, 62] -> persist_control_event(kind, event)
|
||||
ephemeral_kind?(kind) -> persist_ephemeral_event()
|
||||
true -> persist_regular_event(event)
|
||||
end
|
||||
end
|
||||
|
||||
defp persist_control_event(5, event) do
|
||||
with {:ok, deleted_count} <- Storage.events().delete_by_request(%{}, event) do
|
||||
{:ok, deleted_count, "ok: deletion request processed"}
|
||||
end
|
||||
end
|
||||
|
||||
62 ->
|
||||
defp persist_control_event(62, event) do
|
||||
with {:ok, deleted_count} <- Storage.events().vanish(%{}, event) do
|
||||
{:ok, deleted_count, "ok: vanish request processed"}
|
||||
end
|
||||
end
|
||||
|
||||
_other ->
|
||||
defp persist_ephemeral_event do
|
||||
if accept_ephemeral_events?() do
|
||||
{:ok, :ephemeral, "ok: ephemeral event accepted"}
|
||||
else
|
||||
{:error, :ephemeral_events_disabled}
|
||||
end
|
||||
end
|
||||
|
||||
defp persist_regular_event(event) do
|
||||
case Storage.events().put_event(%{}, event) do
|
||||
{:ok, persisted_event} -> {:ok, persisted_event, "ok: event stored"}
|
||||
{:error, :duplicate_event} -> {:error, :duplicate_event}
|
||||
{:error, reason} -> {:error, reason}
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
defp error_message_for_ingest_failure(:duplicate_event),
|
||||
do: "duplicate: event already stored"
|
||||
|
||||
defp error_message_for_ingest_failure(:event_rate_limited),
|
||||
do: "rate-limited: too many EVENT messages"
|
||||
|
||||
defp error_message_for_ingest_failure(:event_too_large),
|
||||
do: "invalid: event exceeds max event size"
|
||||
|
||||
defp error_message_for_ingest_failure(:ephemeral_events_disabled),
|
||||
do: "blocked: ephemeral events are disabled"
|
||||
|
||||
defp error_message_for_ingest_failure(reason)
|
||||
when reason in [
|
||||
:auth_required,
|
||||
@@ -564,7 +664,7 @@ defmodule Parrhesia.Web.Connection do
|
||||
with_auth_challenge_frame(state, {:push, {:text, response}, state})
|
||||
end
|
||||
|
||||
defp validate_auth_event(%{"kind" => 22_242} = auth_event) do
|
||||
defp validate_auth_event(%__MODULE__{} = state, %{"kind" => 22_242} = auth_event) do
|
||||
tags = Map.get(auth_event, "tags", [])
|
||||
|
||||
challenge_tag? =
|
||||
@@ -573,10 +673,13 @@ defmodule Parrhesia.Web.Connection do
|
||||
_tag -> false
|
||||
end)
|
||||
|
||||
if challenge_tag?, do: :ok, else: {:error, :missing_challenge_tag}
|
||||
with :ok <- maybe_validate(challenge_tag?, :missing_challenge_tag),
|
||||
:ok <- validate_auth_relay_tag(state, tags) do
|
||||
validate_auth_created_at_freshness(auth_event, state.auth_max_age_seconds)
|
||||
end
|
||||
end
|
||||
|
||||
defp validate_auth_event(_auth_event), do: {:error, :invalid_auth_kind}
|
||||
defp validate_auth_event(_state, _auth_event), do: {:error, :invalid_auth_kind}
|
||||
|
||||
defp validate_auth_challenge(%__MODULE__{auth_challenge: nil}, _auth_event),
|
||||
do: {:error, :missing_challenge}
|
||||
@@ -593,8 +696,45 @@ defmodule Parrhesia.Web.Connection do
|
||||
if challenge_tag_matches?, do: :ok, else: {:error, :challenge_mismatch}
|
||||
end
|
||||
|
||||
defp validate_auth_relay_tag(%__MODULE__{relay_url: relay_url}, tags)
|
||||
when is_binary(relay_url) do
|
||||
relay_tag_matches? =
|
||||
Enum.any?(tags, fn
|
||||
["relay", ^relay_url | _rest] -> true
|
||||
_tag -> false
|
||||
end)
|
||||
|
||||
if relay_tag_matches?, do: :ok, else: {:error, :invalid_relay_tag}
|
||||
end
|
||||
|
||||
defp validate_auth_relay_tag(%__MODULE__{relay_url: nil}, _tags),
|
||||
do: {:error, :missing_relay_configuration}
|
||||
|
||||
defp validate_auth_created_at_freshness(auth_event, max_age_seconds)
|
||||
when is_integer(max_age_seconds) and max_age_seconds > 0 do
|
||||
created_at = Map.get(auth_event, "created_at", -1)
|
||||
now = System.system_time(:second)
|
||||
|
||||
if created_at >= now - max_age_seconds do
|
||||
:ok
|
||||
else
|
||||
{:error, :auth_event_too_old}
|
||||
end
|
||||
end
|
||||
|
||||
defp validate_auth_created_at_freshness(_auth_event, _max_age_seconds), do: :ok
|
||||
|
||||
defp maybe_validate(true, _reason), do: :ok
|
||||
defp maybe_validate(false, reason), do: {:error, reason}
|
||||
|
||||
defp auth_error_message(:invalid_auth_kind), do: "invalid: AUTH event kind must be 22242"
|
||||
defp auth_error_message(:missing_challenge_tag), do: "invalid: AUTH event missing challenge tag"
|
||||
defp auth_error_message(:invalid_relay_tag), do: "invalid: AUTH relay tag mismatch"
|
||||
|
||||
defp auth_error_message(:missing_relay_configuration),
|
||||
do: "invalid: relay URL is not configured"
|
||||
|
||||
defp auth_error_message(:auth_event_too_old), do: "invalid: AUTH event is too old"
|
||||
defp auth_error_message(:challenge_mismatch), do: "invalid: AUTH challenge mismatch"
|
||||
defp auth_error_message(:missing_challenge), do: "invalid: AUTH challenge unavailable"
|
||||
defp auth_error_message(reason) when is_binary(reason), do: reason
|
||||
@@ -1015,17 +1155,31 @@ defmodule Parrhesia.Web.Connection do
|
||||
|
||||
defp negentropy_sessions(opts) when is_list(opts) do
|
||||
opts
|
||||
|> Keyword.get(:negentropy_sessions, Sessions)
|
||||
|> Keyword.get(:negentropy_sessions, configured_negentropy_sessions())
|
||||
|> normalize_server_ref()
|
||||
end
|
||||
|
||||
defp negentropy_sessions(opts) when is_map(opts) do
|
||||
opts
|
||||
|> Map.get(:negentropy_sessions, Sessions)
|
||||
|> Map.get(:negentropy_sessions, configured_negentropy_sessions())
|
||||
|> normalize_server_ref()
|
||||
end
|
||||
|
||||
defp negentropy_sessions(_opts), do: Sessions
|
||||
defp negentropy_sessions(_opts), do: configured_negentropy_sessions()
|
||||
|
||||
defp configured_negentropy_sessions do
|
||||
if negentropy_enabled?() do
|
||||
Sessions
|
||||
else
|
||||
nil
|
||||
end
|
||||
end
|
||||
|
||||
defp negentropy_enabled? do
|
||||
:parrhesia
|
||||
|> Application.get_env(:features, [])
|
||||
|> Keyword.get(:nip_77_negentropy, true)
|
||||
end
|
||||
|
||||
defp normalize_server_ref(server_ref) when is_pid(server_ref) or is_atom(server_ref),
|
||||
do: server_ref
|
||||
@@ -1131,4 +1285,200 @@ defmodule Parrhesia.Web.Connection do
|
||||
|> Application.get_env(:limits, [])
|
||||
|> Keyword.get(:outbound_overflow_strategy, @default_outbound_overflow_strategy)
|
||||
end
|
||||
|
||||
defp relay_url(opts) when is_list(opts) do
|
||||
opts
|
||||
|> Keyword.get(:relay_url)
|
||||
|> normalize_relay_url()
|
||||
|> maybe_default_relay_url()
|
||||
end
|
||||
|
||||
defp relay_url(opts) when is_map(opts) do
|
||||
opts
|
||||
|> Map.get(:relay_url)
|
||||
|> normalize_relay_url()
|
||||
|> maybe_default_relay_url()
|
||||
end
|
||||
|
||||
defp relay_url(_opts), do: configured_relay_url()
|
||||
|
||||
defp normalize_relay_url(relay_url) when is_binary(relay_url) and relay_url != "", do: relay_url
|
||||
defp normalize_relay_url(_relay_url), do: nil
|
||||
|
||||
defp maybe_default_relay_url(nil), do: configured_relay_url()
|
||||
defp maybe_default_relay_url(relay_url), do: relay_url
|
||||
|
||||
defp configured_relay_url do
|
||||
:parrhesia
|
||||
|> Application.get_env(:relay_url)
|
||||
|> normalize_relay_url()
|
||||
end
|
||||
|
||||
defp max_frame_bytes(opts) when is_list(opts) do
|
||||
opts
|
||||
|> Keyword.get(:max_frame_bytes)
|
||||
|> normalize_max_frame_bytes()
|
||||
end
|
||||
|
||||
defp max_frame_bytes(opts) when is_map(opts) do
|
||||
opts
|
||||
|> Map.get(:max_frame_bytes)
|
||||
|> normalize_max_frame_bytes()
|
||||
end
|
||||
|
||||
defp max_frame_bytes(_opts), do: configured_max_frame_bytes()
|
||||
|
||||
defp normalize_max_frame_bytes(value) when is_integer(value) and value > 0, do: value
|
||||
defp normalize_max_frame_bytes(_value), do: configured_max_frame_bytes()
|
||||
|
||||
defp configured_max_frame_bytes do
|
||||
:parrhesia
|
||||
|> Application.get_env(:limits, [])
|
||||
|> Keyword.get(:max_frame_bytes, @default_max_frame_bytes)
|
||||
end
|
||||
|
||||
defp max_event_bytes(opts) when is_list(opts) do
|
||||
opts
|
||||
|> Keyword.get(:max_event_bytes)
|
||||
|> normalize_max_event_bytes()
|
||||
end
|
||||
|
||||
defp max_event_bytes(opts) when is_map(opts) do
|
||||
opts
|
||||
|> Map.get(:max_event_bytes)
|
||||
|> normalize_max_event_bytes()
|
||||
end
|
||||
|
||||
defp max_event_bytes(_opts), do: configured_max_event_bytes()
|
||||
|
||||
defp normalize_max_event_bytes(value) when is_integer(value) and value > 0, do: value
|
||||
defp normalize_max_event_bytes(_value), do: configured_max_event_bytes()
|
||||
|
||||
defp configured_max_event_bytes do
|
||||
:parrhesia
|
||||
|> Application.get_env(:limits, [])
|
||||
|> Keyword.get(:max_event_bytes, @default_max_event_bytes)
|
||||
end
|
||||
|
||||
defp max_event_ingest_per_window(opts) when is_list(opts) do
|
||||
opts
|
||||
|> Keyword.get(:max_event_ingest_per_window)
|
||||
|> normalize_max_event_ingest_per_window()
|
||||
end
|
||||
|
||||
defp max_event_ingest_per_window(opts) when is_map(opts) do
|
||||
opts
|
||||
|> Map.get(:max_event_ingest_per_window)
|
||||
|> normalize_max_event_ingest_per_window()
|
||||
end
|
||||
|
||||
defp max_event_ingest_per_window(_opts), do: configured_max_event_ingest_per_window()
|
||||
|
||||
defp normalize_max_event_ingest_per_window(value) when is_integer(value) and value > 0,
|
||||
do: value
|
||||
|
||||
defp normalize_max_event_ingest_per_window(_value),
|
||||
do: configured_max_event_ingest_per_window()
|
||||
|
||||
defp configured_max_event_ingest_per_window do
|
||||
:parrhesia
|
||||
|> Application.get_env(:limits, [])
|
||||
|> Keyword.get(:max_event_ingest_per_window, @default_event_ingest_rate_limit)
|
||||
end
|
||||
|
||||
defp event_ingest_window_seconds(opts) when is_list(opts) do
|
||||
opts
|
||||
|> Keyword.get(:event_ingest_window_seconds)
|
||||
|> normalize_event_ingest_window_seconds()
|
||||
end
|
||||
|
||||
defp event_ingest_window_seconds(opts) when is_map(opts) do
|
||||
opts
|
||||
|> Map.get(:event_ingest_window_seconds)
|
||||
|> normalize_event_ingest_window_seconds()
|
||||
end
|
||||
|
||||
defp event_ingest_window_seconds(_opts), do: configured_event_ingest_window_seconds()
|
||||
|
||||
defp normalize_event_ingest_window_seconds(value) when is_integer(value) and value > 0,
|
||||
do: value
|
||||
|
||||
defp normalize_event_ingest_window_seconds(_value), do: configured_event_ingest_window_seconds()
|
||||
|
||||
defp configured_event_ingest_window_seconds do
|
||||
:parrhesia
|
||||
|> Application.get_env(:limits, [])
|
||||
|> Keyword.get(:event_ingest_window_seconds, @default_event_ingest_window_seconds)
|
||||
end
|
||||
|
||||
defp auth_max_age_seconds(opts) when is_list(opts) do
|
||||
opts
|
||||
|> Keyword.get(:auth_max_age_seconds)
|
||||
|> normalize_auth_max_age_seconds()
|
||||
end
|
||||
|
||||
defp auth_max_age_seconds(opts) when is_map(opts) do
|
||||
opts
|
||||
|> Map.get(:auth_max_age_seconds)
|
||||
|> normalize_auth_max_age_seconds()
|
||||
end
|
||||
|
||||
defp auth_max_age_seconds(_opts), do: configured_auth_max_age_seconds()
|
||||
|
||||
defp normalize_auth_max_age_seconds(value) when is_integer(value) and value > 0, do: value
|
||||
defp normalize_auth_max_age_seconds(_value), do: configured_auth_max_age_seconds()
|
||||
|
||||
defp configured_auth_max_age_seconds do
|
||||
:parrhesia
|
||||
|> Application.get_env(:limits, [])
|
||||
|> Keyword.get(:auth_max_age_seconds, @default_auth_max_age_seconds)
|
||||
end
|
||||
|
||||
defp maybe_allow_event_ingest(
|
||||
%__MODULE__{
|
||||
event_ingest_window_started_at_ms: window_started_at_ms,
|
||||
event_ingest_window_seconds: window_seconds,
|
||||
event_ingest_count: count,
|
||||
max_event_ingest_per_window: max_event_ingest_per_window
|
||||
} = state
|
||||
) do
|
||||
now_ms = System.monotonic_time(:millisecond)
|
||||
window_ms = window_seconds * 1000
|
||||
|
||||
cond do
|
||||
now_ms - window_started_at_ms >= window_ms ->
|
||||
{:ok,
|
||||
%__MODULE__{
|
||||
state
|
||||
| event_ingest_window_started_at_ms: now_ms,
|
||||
event_ingest_count: 1
|
||||
}}
|
||||
|
||||
count < max_event_ingest_per_window ->
|
||||
{:ok, %__MODULE__{state | event_ingest_count: count + 1}}
|
||||
|
||||
true ->
|
||||
{:error, :event_rate_limited}
|
||||
end
|
||||
end
|
||||
|
||||
defp validate_event_payload_size(event, max_event_bytes)
|
||||
when is_map(event) and is_integer(max_event_bytes) and max_event_bytes > 0 do
|
||||
if byte_size(JSON.encode!(event)) <= max_event_bytes do
|
||||
:ok
|
||||
else
|
||||
{:error, :event_too_large}
|
||||
end
|
||||
end
|
||||
|
||||
defp validate_event_payload_size(_event, _max_event_bytes), do: :ok
|
||||
|
||||
defp ephemeral_kind?(kind) when is_integer(kind), do: kind >= 20_000 and kind < 30_000
|
||||
defp ephemeral_kind?(_kind), do: false
|
||||
|
||||
defp accept_ephemeral_events? do
|
||||
:parrhesia
|
||||
|> Application.get_env(:policies, [])
|
||||
|> Keyword.get(:accept_ephemeral_events, true)
|
||||
end
|
||||
end
|
||||
|
||||
28
lib/parrhesia/web/metrics.ex
Normal file
28
lib/parrhesia/web/metrics.ex
Normal file
@@ -0,0 +1,28 @@
|
||||
defmodule Parrhesia.Web.Metrics do
|
||||
@moduledoc false
|
||||
|
||||
import Plug.Conn
|
||||
|
||||
alias Parrhesia.Telemetry
|
||||
alias Parrhesia.Web.MetricsAccess
|
||||
|
||||
@spec enabled_on_main_endpoint?() :: boolean()
|
||||
def enabled_on_main_endpoint? do
|
||||
:parrhesia
|
||||
|> Application.get_env(:metrics, [])
|
||||
|> Keyword.get(:enabled_on_main_endpoint, true)
|
||||
end
|
||||
|
||||
@spec handle(Plug.Conn.t()) :: Plug.Conn.t()
|
||||
def handle(conn) do
|
||||
if MetricsAccess.allowed?(conn) do
|
||||
body = TelemetryMetricsPrometheus.Core.scrape(Telemetry.prometheus_reporter())
|
||||
|
||||
conn
|
||||
|> put_resp_content_type("text/plain")
|
||||
|> send_resp(200, body)
|
||||
else
|
||||
send_resp(conn, 403, "forbidden")
|
||||
end
|
||||
end
|
||||
end
|
||||
138
lib/parrhesia/web/metrics_access.ex
Normal file
138
lib/parrhesia/web/metrics_access.ex
Normal file
@@ -0,0 +1,138 @@
|
||||
defmodule Parrhesia.Web.MetricsAccess do
|
||||
@moduledoc false
|
||||
|
||||
import Plug.Conn
|
||||
import Bitwise
|
||||
|
||||
@private_cidrs [
|
||||
"127.0.0.0/8",
|
||||
"10.0.0.0/8",
|
||||
"172.16.0.0/12",
|
||||
"192.168.0.0/16",
|
||||
"169.254.0.0/16",
|
||||
"::1/128",
|
||||
"fc00::/7",
|
||||
"fe80::/10"
|
||||
]
|
||||
|
||||
@spec allowed?(Plug.Conn.t()) :: boolean()
|
||||
def allowed?(conn) do
|
||||
if metrics_public?() do
|
||||
true
|
||||
else
|
||||
token_allowed?(conn) and network_allowed?(conn)
|
||||
end
|
||||
end
|
||||
|
||||
defp token_allowed?(conn) do
|
||||
case configured_auth_token() do
|
||||
nil ->
|
||||
true
|
||||
|
||||
token ->
|
||||
provided_token(conn) == token
|
||||
end
|
||||
end
|
||||
|
||||
defp provided_token(conn) do
|
||||
conn
|
||||
|> get_req_header("authorization")
|
||||
|> List.first()
|
||||
|> normalize_authorization_header()
|
||||
end
|
||||
|
||||
defp normalize_authorization_header("Bearer " <> token), do: token
|
||||
defp normalize_authorization_header(token) when is_binary(token), do: token
|
||||
defp normalize_authorization_header(_header), do: nil
|
||||
|
||||
defp network_allowed?(conn) do
|
||||
remote_ip = conn.remote_ip
|
||||
|
||||
cond do
|
||||
configured_allowed_cidrs() != [] ->
|
||||
Enum.any?(configured_allowed_cidrs(), &ip_in_cidr?(remote_ip, &1))
|
||||
|
||||
metrics_private_networks_only?() ->
|
||||
Enum.any?(@private_cidrs, &ip_in_cidr?(remote_ip, &1))
|
||||
|
||||
true ->
|
||||
true
|
||||
end
|
||||
end
|
||||
|
||||
defp ip_in_cidr?(ip, cidr) do
|
||||
with {network, prefix_len} <- parse_cidr(cidr),
|
||||
{:ok, ip_size, ip_value} <- ip_to_int(ip),
|
||||
{:ok, network_size, network_value} <- ip_to_int(network),
|
||||
true <- ip_size == network_size,
|
||||
true <- prefix_len >= 0,
|
||||
true <- prefix_len <= ip_size do
|
||||
mask = network_mask(ip_size, prefix_len)
|
||||
(ip_value &&& mask) == (network_value &&& mask)
|
||||
else
|
||||
_other -> false
|
||||
end
|
||||
end
|
||||
|
||||
defp parse_cidr(cidr) when is_binary(cidr) do
|
||||
case String.split(cidr, "/", parts: 2) do
|
||||
[address, prefix_str] ->
|
||||
with {prefix_len, ""} <- Integer.parse(prefix_str),
|
||||
{:ok, ip} <- :inet.parse_address(String.to_charlist(address)) do
|
||||
{ip, prefix_len}
|
||||
else
|
||||
_other -> :error
|
||||
end
|
||||
|
||||
_other ->
|
||||
:error
|
||||
end
|
||||
end
|
||||
|
||||
defp parse_cidr(_cidr), do: :error
|
||||
|
||||
defp ip_to_int({a, b, c, d}) do
|
||||
{:ok, 32, (a <<< 24) + (b <<< 16) + (c <<< 8) + d}
|
||||
end
|
||||
|
||||
defp ip_to_int({a, b, c, d, e, f, g, h}) do
|
||||
{:ok, 128,
|
||||
(a <<< 112) + (b <<< 96) + (c <<< 80) + (d <<< 64) + (e <<< 48) + (f <<< 32) + (g <<< 16) +
|
||||
h}
|
||||
end
|
||||
|
||||
defp ip_to_int(_ip), do: :error
|
||||
|
||||
defp network_mask(_size, 0), do: 0
|
||||
|
||||
defp network_mask(size, prefix_len) do
|
||||
all_ones = (1 <<< size) - 1
|
||||
all_ones <<< (size - prefix_len)
|
||||
end
|
||||
|
||||
defp configured_allowed_cidrs do
|
||||
:parrhesia
|
||||
|> Application.get_env(:metrics, [])
|
||||
|> Keyword.get(:allowed_cidrs, [])
|
||||
|> Enum.filter(&is_binary/1)
|
||||
end
|
||||
|
||||
defp configured_auth_token do
|
||||
case :parrhesia |> Application.get_env(:metrics, []) |> Keyword.get(:auth_token) do
|
||||
token when is_binary(token) and token != "" -> token
|
||||
_other -> nil
|
||||
end
|
||||
end
|
||||
|
||||
defp metrics_public? do
|
||||
:parrhesia
|
||||
|> Application.get_env(:metrics, [])
|
||||
|> Keyword.get(:public, false)
|
||||
end
|
||||
|
||||
defp metrics_private_networks_only? do
|
||||
:parrhesia
|
||||
|> Application.get_env(:metrics, [])
|
||||
|> Keyword.get(:private_networks_only, true)
|
||||
end
|
||||
end
|
||||
34
lib/parrhesia/web/metrics_endpoint.ex
Normal file
34
lib/parrhesia/web/metrics_endpoint.ex
Normal file
@@ -0,0 +1,34 @@
|
||||
defmodule Parrhesia.Web.MetricsEndpoint do
|
||||
@moduledoc """
|
||||
Optional dedicated HTTP listener for Prometheus metrics scraping.
|
||||
"""
|
||||
|
||||
use Supervisor
|
||||
|
||||
def start_link(init_arg \\ []) do
|
||||
Supervisor.start_link(__MODULE__, init_arg, name: __MODULE__)
|
||||
end
|
||||
|
||||
@impl true
|
||||
def init(init_arg) do
|
||||
options = bandit_options(init_arg)
|
||||
|
||||
children =
|
||||
if Keyword.get(options, :enabled, false) do
|
||||
[{Bandit, Keyword.delete(options, :enabled)}]
|
||||
else
|
||||
[]
|
||||
end
|
||||
|
||||
Supervisor.init(children, strategy: :one_for_one)
|
||||
end
|
||||
|
||||
defp bandit_options(overrides) do
|
||||
configured = Application.get_env(:parrhesia, __MODULE__, [])
|
||||
|
||||
configured
|
||||
|> Keyword.merge(overrides)
|
||||
|> Keyword.put_new(:scheme, :http)
|
||||
|> Keyword.put_new(:plug, Parrhesia.Web.MetricsRouter)
|
||||
end
|
||||
end
|
||||
18
lib/parrhesia/web/metrics_router.ex
Normal file
18
lib/parrhesia/web/metrics_router.ex
Normal file
@@ -0,0 +1,18 @@
|
||||
defmodule Parrhesia.Web.MetricsRouter do
|
||||
@moduledoc false
|
||||
|
||||
use Plug.Router
|
||||
|
||||
alias Parrhesia.Web.Metrics
|
||||
|
||||
plug(:match)
|
||||
plug(:dispatch)
|
||||
|
||||
get "/metrics" do
|
||||
Metrics.handle(conn)
|
||||
end
|
||||
|
||||
match _ do
|
||||
send_resp(conn, 404, "not found")
|
||||
end
|
||||
end
|
||||
@@ -5,10 +5,24 @@ defmodule Parrhesia.Web.Readiness do
|
||||
def ready? do
|
||||
process_ready?(Parrhesia.Subscriptions.Index) and
|
||||
process_ready?(Parrhesia.Auth.Challenges) and
|
||||
process_ready?(Parrhesia.Negentropy.Sessions) and
|
||||
negentropy_ready?() and
|
||||
process_ready?(Parrhesia.Repo)
|
||||
end
|
||||
|
||||
defp negentropy_ready? do
|
||||
if negentropy_enabled?() do
|
||||
process_ready?(Parrhesia.Negentropy.Sessions)
|
||||
else
|
||||
true
|
||||
end
|
||||
end
|
||||
|
||||
defp negentropy_enabled? do
|
||||
:parrhesia
|
||||
|> Application.get_env(:features, [])
|
||||
|> Keyword.get(:nip_77_negentropy, true)
|
||||
end
|
||||
|
||||
defp process_ready?(name) do
|
||||
case Process.whereis(name) do
|
||||
pid when is_pid(pid) -> true
|
||||
|
||||
@@ -17,26 +17,16 @@ defmodule Parrhesia.Web.RelayInfo do
|
||||
end
|
||||
|
||||
defp supported_nips do
|
||||
[
|
||||
1,
|
||||
9,
|
||||
11,
|
||||
13,
|
||||
17,
|
||||
40,
|
||||
42,
|
||||
43,
|
||||
44,
|
||||
45,
|
||||
50,
|
||||
59,
|
||||
62,
|
||||
66,
|
||||
70,
|
||||
77,
|
||||
86,
|
||||
98
|
||||
]
|
||||
base = [1, 9, 11, 13, 17, 40, 42, 43, 44, 45, 50, 59, 62, 66, 70]
|
||||
|
||||
with_negentropy =
|
||||
if negentropy_enabled?() do
|
||||
base ++ [77]
|
||||
else
|
||||
base
|
||||
end
|
||||
|
||||
with_negentropy ++ [86, 98]
|
||||
end
|
||||
|
||||
defp limitations do
|
||||
@@ -48,4 +38,10 @@ defmodule Parrhesia.Web.RelayInfo do
|
||||
"auth_required" => Parrhesia.Config.get([:policies, :auth_required_for_reads], false)
|
||||
}
|
||||
end
|
||||
|
||||
defp negentropy_enabled? do
|
||||
:parrhesia
|
||||
|> Application.get_env(:features, [])
|
||||
|> Keyword.get(:nip_77_negentropy, true)
|
||||
end
|
||||
end
|
||||
|
||||
@@ -3,8 +3,8 @@ defmodule Parrhesia.Web.Router do
|
||||
|
||||
use Plug.Router
|
||||
|
||||
alias Parrhesia.Telemetry
|
||||
alias Parrhesia.Web.Management
|
||||
alias Parrhesia.Web.Metrics
|
||||
alias Parrhesia.Web.Readiness
|
||||
alias Parrhesia.Web.RelayInfo
|
||||
|
||||
@@ -30,11 +30,11 @@ defmodule Parrhesia.Web.Router do
|
||||
end
|
||||
|
||||
get "/metrics" do
|
||||
body = TelemetryMetricsPrometheus.Core.scrape(Telemetry.prometheus_reporter())
|
||||
|
||||
conn
|
||||
|> put_resp_content_type("text/plain")
|
||||
|> send_resp(200, body)
|
||||
if Metrics.enabled_on_main_endpoint?() do
|
||||
Metrics.handle(conn)
|
||||
else
|
||||
send_resp(conn, 404, "not found")
|
||||
end
|
||||
end
|
||||
|
||||
post "/management" do
|
||||
@@ -50,7 +50,12 @@ defmodule Parrhesia.Web.Router do
|
||||
|> send_resp(200, body)
|
||||
else
|
||||
conn
|
||||
|> WebSockAdapter.upgrade(Parrhesia.Web.Connection, %{}, timeout: 60_000)
|
||||
|> WebSockAdapter.upgrade(
|
||||
Parrhesia.Web.Connection,
|
||||
%{relay_url: relay_url(conn)},
|
||||
timeout: 60_000,
|
||||
max_frame_size: max_frame_bytes()
|
||||
)
|
||||
|> halt()
|
||||
end
|
||||
end
|
||||
@@ -64,4 +69,25 @@ defmodule Parrhesia.Web.Router do
|
||||
|> get_req_header("accept")
|
||||
|> Enum.any?(&String.contains?(&1, "application/nostr+json"))
|
||||
end
|
||||
|
||||
defp relay_url(conn) do
|
||||
ws_scheme = if conn.scheme == :https, do: "wss", else: "ws"
|
||||
|
||||
port_segment =
|
||||
if default_http_port?(conn.scheme, conn.port) do
|
||||
""
|
||||
else
|
||||
":#{conn.port}"
|
||||
end
|
||||
|
||||
"#{ws_scheme}://#{conn.host}#{port_segment}#{conn.request_path}"
|
||||
end
|
||||
|
||||
defp default_http_port?(:http, 80), do: true
|
||||
defp default_http_port?(:https, 443), do: true
|
||||
defp default_http_port?(_scheme, _port), do: false
|
||||
|
||||
defp max_frame_bytes do
|
||||
Parrhesia.Config.get([:limits, :max_frame_bytes], 1_048_576)
|
||||
end
|
||||
end
|
||||
|
||||
7
mix.exs
7
mix.exs
@@ -4,7 +4,7 @@ defmodule Parrhesia.MixProject do
|
||||
def project do
|
||||
[
|
||||
app: :parrhesia,
|
||||
version: "0.2.0",
|
||||
version: "0.3.0",
|
||||
elixir: "~> 1.19",
|
||||
start_permanent: Mix.env() == :prod,
|
||||
deps: deps(),
|
||||
@@ -31,6 +31,7 @@ defmodule Parrhesia.MixProject do
|
||||
{:bandit, "~> 1.5"},
|
||||
{:plug, "~> 1.15"},
|
||||
{:websock_adapter, "~> 0.5"},
|
||||
{:lib_secp256k1, "~> 0.7"},
|
||||
|
||||
# Runtime: storage adapter (Postgres first)
|
||||
{:ecto_sql, "~> 3.12"},
|
||||
@@ -43,8 +44,6 @@ defmodule Parrhesia.MixProject do
|
||||
|
||||
# Test tooling
|
||||
{:stream_data, "~> 1.0", only: :test},
|
||||
{:mox, "~> 1.1", only: :test},
|
||||
{:bypass, "~> 2.1", only: :test},
|
||||
{:websockex, "~> 0.4", only: :test},
|
||||
|
||||
# Project tooling
|
||||
@@ -71,7 +70,7 @@ defmodule Parrhesia.MixProject do
|
||||
"credo --strict --all",
|
||||
"deps.unlock --unused",
|
||||
"test",
|
||||
"test.nak_e2e",
|
||||
# "test.nak_e2e",
|
||||
"test.marmot_e2e"
|
||||
]
|
||||
]
|
||||
|
||||
5
mix.lock
5
mix.lock
@@ -1,7 +1,6 @@
|
||||
%{
|
||||
"bandit": {:hex, :bandit, "1.10.3", "1e5d168fa79ec8de2860d1b4d878d97d4fbbe2fdbe7b0a7d9315a4359d1d4bb9", [:mix], [{:hpax, "~> 1.0", [hex: :hpax, repo: "hexpm", optional: false]}, {:plug, "~> 1.18", [hex: :plug, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}, {:thousand_island, "~> 1.0", [hex: :thousand_island, repo: "hexpm", optional: false]}, {:websock, "~> 0.5", [hex: :websock, repo: "hexpm", optional: false]}], "hexpm", "99a52d909c48db65ca598e1962797659e3c0f1d06e825a50c3d75b74a5e2db18"},
|
||||
"bunt": {:hex, :bunt, "1.0.0", "081c2c665f086849e6d57900292b3a161727ab40431219529f13c4ddcf3e7a44", [:mix], [], "hexpm", "dc5f86aa08a5f6fa6b8096f0735c4e76d54ae5c9fa2c143e5a1fc7c1cd9bb6b5"},
|
||||
"bypass": {:hex, :bypass, "2.1.0", "909782781bf8e20ee86a9cabde36b259d44af8b9f38756173e8f5e2e1fabb9b1", [:mix], [{:plug, "~> 1.7", [hex: :plug, repo: "hexpm", optional: false]}, {:plug_cowboy, "~> 2.0", [hex: :plug_cowboy, repo: "hexpm", optional: false]}, {:ranch, "~> 1.3", [hex: :ranch, repo: "hexpm", optional: false]}], "hexpm", "d9b5df8fa5b7a6efa08384e9bbecfe4ce61c77d28a4282f79e02f1ef78d96b80"},
|
||||
"cowboy": {:hex, :cowboy, "2.14.2", "4008be1df6ade45e4f2a4e9e2d22b36d0b5aba4e20b0a0d7049e28d124e34847", [:make, :rebar3], [{:cowlib, ">= 2.16.0 and < 3.0.0", [hex: :cowlib, repo: "hexpm", optional: false]}, {:ranch, ">= 1.8.0 and < 3.0.0", [hex: :ranch, repo: "hexpm", optional: false]}], "hexpm", "569081da046e7b41b5df36aa359be71a0c8874e5b9cff6f747073fc57baf1ab9"},
|
||||
"cowboy_telemetry": {:hex, :cowboy_telemetry, "0.4.0", "f239f68b588efa7707abce16a84d0d2acf3a0f50571f8bb7f56a15865aae820c", [:rebar3], [{:cowboy, "~> 2.7", [hex: :cowboy, repo: "hexpm", optional: false]}, {:telemetry, "~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "7d98bac1ee4565d31b62d59f8823dfd8356a169e7fcbb83831b8a5397404c9de"},
|
||||
"cowlib": {:hex, :cowlib, "2.16.0", "54592074ebbbb92ee4746c8a8846e5605052f29309d3a873468d76cdf932076f", [:make, :rebar3], [], "hexpm", "7f478d80d66b747344f0ea7708c187645cfcc08b11aa424632f78e25bf05db51"},
|
||||
@@ -11,17 +10,17 @@
|
||||
"deps_changelog": {:hex, :deps_changelog, "0.3.5", "65981997d9bc893b8027a0c03da093a4083328c00b17f562df269c2b61d44073", [:mix], [], "hexpm", "298fcd7794395d8e61dba8d29ce8fcee09f1df4d48adb273a41e8f4a1736491e"},
|
||||
"ecto": {:hex, :ecto, "3.13.5", "9d4a69700183f33bf97208294768e561f5c7f1ecf417e0fa1006e4a91713a834", [:mix], [{:decimal, "~> 2.0", [hex: :decimal, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "df9efebf70cf94142739ba357499661ef5dbb559ef902b68ea1f3c1fabce36de"},
|
||||
"ecto_sql": {:hex, :ecto_sql, "3.13.5", "2f8282b2ad97bf0f0d3217ea0a6fff320ead9e2f8770f810141189d182dc304e", [:mix], [{:db_connection, "~> 2.4.1 or ~> 2.5", [hex: :db_connection, repo: "hexpm", optional: false]}, {:ecto, "~> 3.13.0", [hex: :ecto, repo: "hexpm", optional: false]}, {:myxql, "~> 0.7", [hex: :myxql, repo: "hexpm", optional: true]}, {:postgrex, "~> 0.19 or ~> 1.0", [hex: :postgrex, repo: "hexpm", optional: true]}, {:tds, "~> 2.1.1 or ~> 2.2", [hex: :tds, repo: "hexpm", optional: true]}, {:telemetry, "~> 0.4.0 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "aa36751f4e6a2b56ae79efb0e088042e010ff4935fc8684e74c23b1f49e25fdc"},
|
||||
"elixir_make": {:hex, :elixir_make, "0.9.0", "6484b3cd8c0cee58f09f05ecaf1a140a8c97670671a6a0e7ab4dc326c3109726", [:mix], [], "hexpm", "db23d4fd8b757462ad02f8aa73431a426fe6671c80b200d9710caf3d1dd0ffdb"},
|
||||
"file_system": {:hex, :file_system, "1.1.1", "31864f4685b0148f25bd3fbef2b1228457c0c89024ad67f7a81a3ffbc0bbad3a", [:mix], [], "hexpm", "7a15ff97dfe526aeefb090a7a9d3d03aa907e100e262a0f8f7746b78f8f87a5d"},
|
||||
"finch": {:hex, :finch, "0.21.0", "b1c3b2d48af02d0c66d2a9ebfb5622be5c5ecd62937cf79a88a7f98d48a8290c", [:mix], [{:mime, "~> 1.0 or ~> 2.0", [hex: :mime, repo: "hexpm", optional: false]}, {:mint, "~> 1.6.2 or ~> 1.7", [hex: :mint, repo: "hexpm", optional: false]}, {:nimble_options, "~> 0.4 or ~> 1.0", [hex: :nimble_options, repo: "hexpm", optional: false]}, {:nimble_pool, "~> 1.1", [hex: :nimble_pool, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "87dc6e169794cb2570f75841a19da99cfde834249568f2a5b121b809588a4377"},
|
||||
"glob_ex": {:hex, :glob_ex, "0.1.11", "cb50d3f1ef53f6ca04d6252c7fde09fd7a1cf63387714fe96f340a1349e62c93", [:mix], [], "hexpm", "342729363056e3145e61766b416769984c329e4378f1d558b63e341020525de4"},
|
||||
"hpax": {:hex, :hpax, "1.0.3", "ed67ef51ad4df91e75cc6a1494f851850c0bd98ebc0be6e81b026e765ee535aa", [:mix], [], "hexpm", "8eab6e1cfa8d5918c2ce4ba43588e894af35dbd8e91e6e55c817bca5847df34a"},
|
||||
"igniter": {:hex, :igniter, "0.7.4", "b5f9dd512eb1e672f1c141b523142b5b4602fcca231df5b4e362999df4b88e14", [:mix], [{:glob_ex, "~> 0.1.7", [hex: :glob_ex, repo: "hexpm", optional: false]}, {:jason, "~> 1.4", [hex: :jason, repo: "hexpm", optional: false]}, {:owl, "~> 0.11", [hex: :owl, repo: "hexpm", optional: false]}, {:phx_new, "~> 1.7", [hex: :phx_new, repo: "hexpm", optional: true]}, {:req, "~> 0.5", [hex: :req, repo: "hexpm", optional: false]}, {:rewrite, ">= 1.1.1 and < 2.0.0-0", [hex: :rewrite, repo: "hexpm", optional: false]}, {:sourceror, "~> 1.4", [hex: :sourceror, repo: "hexpm", optional: false]}, {:spitfire, ">= 0.1.3 and < 1.0.0-0", [hex: :spitfire, repo: "hexpm", optional: false]}], "hexpm", "971b240ee916a06b1af56381a262d9eeaff9610eddc299d61a213cd7a9d79efd"},
|
||||
"jason": {:hex, :jason, "1.4.4", "b9226785a9aa77b6857ca22832cffa5d5011a667207eb2a0ad56adb5db443b8a", [:mix], [{:decimal, "~> 1.0 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: true]}], "hexpm", "c5eb0cab91f094599f94d55bc63409236a8ec69a21a67814529e8d5f6cc90b3b"},
|
||||
"lib_secp256k1": {:hex, :lib_secp256k1, "0.7.1", "53cad778b8da3a29e453a7a477517d99fb5f13f615c8050eb2db8fd1dce7a1db", [:make, :mix], [{:elixir_make, "~> 0.9", [hex: :elixir_make, repo: "hexpm", optional: false]}], "hexpm", "78bdd3661a17448aff5aeec5ca74c8ddbc09b01f0ecfa3ba1aba3e8ae47ab2b3"},
|
||||
"mime": {:hex, :mime, "2.0.7", "b8d739037be7cd402aee1ba0306edfdef982687ee7e9859bee6198c1e7e2f128", [:mix], [], "hexpm", "6171188e399ee16023ffc5b76ce445eb6d9672e2e241d2df6050f3c771e80ccd"},
|
||||
"mint": {:hex, :mint, "1.7.1", "113fdb2b2f3b59e47c7955971854641c61f378549d73e829e1768de90fc1abf1", [:mix], [{:castore, "~> 0.1.0 or ~> 1.0", [hex: :castore, repo: "hexpm", optional: true]}, {:hpax, "~> 0.1.1 or ~> 0.2.0 or ~> 1.0", [hex: :hpax, repo: "hexpm", optional: false]}], "hexpm", "fceba0a4d0f24301ddee3024ae116df1c3f4bb7a563a731f45fdfeb9d39a231b"},
|
||||
"mox": {:hex, :mox, "1.2.0", "a2cd96b4b80a3883e3100a221e8adc1b98e4c3a332a8fc434c39526babafd5b3", [:mix], [{:nimble_ownership, "~> 1.0", [hex: :nimble_ownership, repo: "hexpm", optional: false]}], "hexpm", "c7b92b3cc69ee24a7eeeaf944cd7be22013c52fcb580c1f33f50845ec821089a"},
|
||||
"nimble_options": {:hex, :nimble_options, "1.1.1", "e3a492d54d85fc3fd7c5baf411d9d2852922f66e69476317787a7b2bb000a61b", [:mix], [], "hexpm", "821b2470ca9442c4b6984882fe9bb0389371b8ddec4d45a9504f00a66f650b44"},
|
||||
"nimble_ownership": {:hex, :nimble_ownership, "1.0.2", "fa8a6f2d8c592ad4d79b2ca617473c6aefd5869abfa02563a77682038bf916cf", [:mix], [], "hexpm", "098af64e1f6f8609c6672127cfe9e9590a5d3fcdd82bc17a377b8692fd81a879"},
|
||||
"nimble_pool": {:hex, :nimble_pool, "1.1.0", "bf9c29fbdcba3564a8b800d1eeb5a3c58f36e1e11d7b7fb2e084a643f645f06b", [:mix], [], "hexpm", "af2e4e6b34197db81f7aad230c1118eac993acc0dae6bc83bac0126d4ae0813a"},
|
||||
"owl": {:hex, :owl, "0.13.0", "26010e066d5992774268f3163506972ddac0a7e77bfe57fa42a250f24d6b876e", [:mix], [{:ucwidth, "~> 0.2", [hex: :ucwidth, repo: "hexpm", optional: true]}], "hexpm", "59bf9d11ce37a4db98f57cb68fbfd61593bf419ec4ed302852b6683d3d2f7475"},
|
||||
"plug": {:hex, :plug, "1.19.1", "09bac17ae7a001a68ae393658aa23c7e38782be5c5c00c80be82901262c394c0", [:mix], [{:mime, "~> 1.0 or ~> 2.0", [hex: :mime, repo: "hexpm", optional: false]}, {:plug_crypto, "~> 1.1.1 or ~> 1.2 or ~> 2.0", [hex: :plug_crypto, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4.3 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "560a0017a8f6d5d30146916862aaf9300b7280063651dd7e532b8be168511e62"},
|
||||
|
||||
@@ -0,0 +1,24 @@
|
||||
defmodule Parrhesia.Repo.Migrations.AddEventsTagsJsonb do
|
||||
use Ecto.Migration
|
||||
|
||||
def up do
|
||||
execute("ALTER TABLE events ADD COLUMN tags jsonb NOT NULL DEFAULT '[]'::jsonb")
|
||||
|
||||
execute("""
|
||||
UPDATE events AS event
|
||||
SET tags = COALESCE(
|
||||
(
|
||||
SELECT jsonb_agg(jsonb_build_array(tag.name, tag.value) ORDER BY tag.idx)
|
||||
FROM event_tags AS tag
|
||||
WHERE tag.event_created_at = event.created_at
|
||||
AND tag.event_id = event.id
|
||||
),
|
||||
'[]'::jsonb
|
||||
)
|
||||
""")
|
||||
end
|
||||
|
||||
def down do
|
||||
execute("ALTER TABLE events DROP COLUMN tags")
|
||||
end
|
||||
end
|
||||
@@ -10,9 +10,9 @@ usage:
|
||||
./scripts/run_bench_compare.sh
|
||||
|
||||
Runs the same nostr-bench suite against:
|
||||
1) Parrhesia (temporary test relay via run_e2e_suite.sh)
|
||||
2) strfry (ephemeral instance)
|
||||
3) nostr-rs-relay (ephemeral sqlite instance)
|
||||
1) Parrhesia (temporary prod relay via run_e2e_suite.sh)
|
||||
2) strfry (ephemeral instance) — optional, skipped if not in PATH
|
||||
3) nostr-rs-relay (ephemeral sqlite instance) — optional, skipped if not in PATH
|
||||
|
||||
Environment:
|
||||
PARRHESIA_BENCH_RUNS Number of comparison runs (default: 2)
|
||||
@@ -50,19 +50,28 @@ if ! command -v nostr-bench >/dev/null 2>&1; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! command -v strfry >/dev/null 2>&1; then
|
||||
echo "strfry not found in PATH. Enter devenv shell first." >&2
|
||||
exit 1
|
||||
# port_listening PORT — cross-platform check (Darwin: lsof, Linux: ss)
|
||||
port_listening() {
|
||||
local port="$1"
|
||||
if command -v ss >/dev/null 2>&1; then
|
||||
ss -ltn | grep -q ":${port} "
|
||||
else
|
||||
lsof -iTCP:"${port}" -sTCP:LISTEN -P -n >/dev/null 2>&1
|
||||
fi
|
||||
}
|
||||
|
||||
HAS_STRFRY=0
|
||||
if command -v strfry >/dev/null 2>&1; then
|
||||
HAS_STRFRY=1
|
||||
else
|
||||
echo "strfry not found in PATH — skipping strfry benchmarks"
|
||||
fi
|
||||
|
||||
if ! command -v nostr-rs-relay >/dev/null 2>&1; then
|
||||
echo "nostr-rs-relay not found in PATH. Enter devenv shell first." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! command -v ss >/dev/null 2>&1; then
|
||||
echo "ss command not found; cannot detect strfry readiness." >&2
|
||||
exit 1
|
||||
HAS_NOSTR_RS=0
|
||||
if command -v nostr-rs-relay >/dev/null 2>&1; then
|
||||
HAS_NOSTR_RS=1
|
||||
else
|
||||
echo "nostr-rs-relay not found in PATH — skipping nostr-rs-relay benchmarks"
|
||||
fi
|
||||
|
||||
RUNS="${PARRHESIA_BENCH_RUNS:-2}"
|
||||
@@ -98,8 +107,16 @@ resolve_strfry_version() {
|
||||
printf '%s\n' "$cli_version"
|
||||
}
|
||||
|
||||
STRFRY_VERSION="$(resolve_strfry_version)"
|
||||
NOSTR_RS_RELAY_VERSION="$(nostr-rs-relay --version 2>/dev/null | head -n 1 | tr -d '\r')"
|
||||
STRFRY_VERSION=""
|
||||
if (( HAS_STRFRY )); then
|
||||
STRFRY_VERSION="$(resolve_strfry_version)"
|
||||
fi
|
||||
|
||||
NOSTR_RS_RELAY_VERSION=""
|
||||
if (( HAS_NOSTR_RS )); then
|
||||
NOSTR_RS_RELAY_VERSION="$(nostr-rs-relay --version 2>/dev/null | head -n 1 | tr -d '\r')"
|
||||
fi
|
||||
|
||||
NOSTR_BENCH_VERSION="$(nostr-bench --version 2>/dev/null | head -n 1 | tr -d '\r')"
|
||||
|
||||
export PARRHESIA_BENCH_CONNECT_COUNT="${PARRHESIA_BENCH_CONNECT_COUNT:-200}"
|
||||
@@ -158,7 +175,7 @@ EOF
|
||||
STRFRY_PID=$!
|
||||
|
||||
for _ in {1..100}; do
|
||||
if ss -ltn | grep -q ":${port} "; then
|
||||
if port_listening "${port}"; then
|
||||
return 0
|
||||
fi
|
||||
sleep 0.1
|
||||
@@ -198,7 +215,7 @@ EOF
|
||||
NOSTR_RS_PID=$!
|
||||
|
||||
for _ in {1..100}; do
|
||||
if ss -ltn | grep -q ":${port} "; then
|
||||
if port_listening "${port}"; then
|
||||
return 0
|
||||
fi
|
||||
sleep 0.1
|
||||
@@ -220,8 +237,12 @@ stop_nostr_rs_relay() {
|
||||
echo "Running ${RUNS} comparison run(s)..."
|
||||
echo "Versions:"
|
||||
echo " parrhesia ${PARRHESIA_VERSION}"
|
||||
echo " ${STRFRY_VERSION}"
|
||||
echo " ${NOSTR_RS_RELAY_VERSION}"
|
||||
if (( HAS_STRFRY )); then
|
||||
echo " ${STRFRY_VERSION}"
|
||||
fi
|
||||
if (( HAS_NOSTR_RS )); then
|
||||
echo " ${NOSTR_RS_RELAY_VERSION}"
|
||||
fi
|
||||
echo " ${NOSTR_BENCH_VERSION}"
|
||||
echo
|
||||
|
||||
@@ -234,6 +255,7 @@ for run in $(seq 1 "$RUNS"); do
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if (( HAS_STRFRY )); then
|
||||
echo "[run ${run}/${RUNS}] strfry"
|
||||
strfry_log="$WORK_DIR/strfry_${run}.log"
|
||||
strfry_port=$((49000 + run))
|
||||
@@ -246,7 +268,9 @@ for run in $(seq 1 "$RUNS"); do
|
||||
exit 1
|
||||
fi
|
||||
stop_strfry
|
||||
fi
|
||||
|
||||
if (( HAS_NOSTR_RS )); then
|
||||
echo "[run ${run}/${RUNS}] nostr-rs-relay"
|
||||
nostr_rs_log="$WORK_DIR/nostr_rs_relay_${run}.log"
|
||||
nostr_rs_port=$((50000 + run))
|
||||
@@ -259,17 +283,20 @@ for run in $(seq 1 "$RUNS"); do
|
||||
exit 1
|
||||
fi
|
||||
stop_nostr_rs_relay
|
||||
fi
|
||||
|
||||
echo
|
||||
|
||||
done
|
||||
|
||||
node - "$WORK_DIR" "$RUNS" <<'NODE'
|
||||
node - "$WORK_DIR" "$RUNS" "$HAS_STRFRY" "$HAS_NOSTR_RS" <<'NODE'
|
||||
const fs = require("node:fs");
|
||||
const path = require("node:path");
|
||||
|
||||
const workDir = process.argv[2];
|
||||
const runs = Number(process.argv[3]);
|
||||
const hasStrfry = process.argv[4] === "1";
|
||||
const hasNostrRs = process.argv[5] === "1";
|
||||
|
||||
function parseLog(filePath) {
|
||||
const content = fs.readFileSync(filePath, "utf8");
|
||||
@@ -337,124 +364,61 @@ function loadRuns(prefix) {
|
||||
}
|
||||
|
||||
const parrhesiaRuns = loadRuns("parrhesia");
|
||||
const strfryRuns = loadRuns("strfry");
|
||||
const nostrRsRuns = loadRuns("nostr_rs_relay");
|
||||
const strfryRuns = hasStrfry ? loadRuns("strfry") : [];
|
||||
const nostrRsRuns = hasNostrRs ? loadRuns("nostr_rs_relay") : [];
|
||||
|
||||
const summary = {
|
||||
parrhesia: {
|
||||
connectAvgMs: mean(parrhesiaRuns.map((m) => m.connectAvgMs)),
|
||||
connectMaxMs: mean(parrhesiaRuns.map((m) => m.connectMaxMs)),
|
||||
echoTps: mean(parrhesiaRuns.map((m) => m.echoTps)),
|
||||
echoSizeMiBS: mean(parrhesiaRuns.map((m) => m.echoSizeMiBS)),
|
||||
eventTps: mean(parrhesiaRuns.map((m) => m.eventTps)),
|
||||
eventSizeMiBS: mean(parrhesiaRuns.map((m) => m.eventSizeMiBS)),
|
||||
reqTps: mean(parrhesiaRuns.map((m) => m.reqTps)),
|
||||
reqSizeMiBS: mean(parrhesiaRuns.map((m) => m.reqSizeMiBS)),
|
||||
},
|
||||
strfry: {
|
||||
connectAvgMs: mean(strfryRuns.map((m) => m.connectAvgMs)),
|
||||
connectMaxMs: mean(strfryRuns.map((m) => m.connectMaxMs)),
|
||||
echoTps: mean(strfryRuns.map((m) => m.echoTps)),
|
||||
echoSizeMiBS: mean(strfryRuns.map((m) => m.echoSizeMiBS)),
|
||||
eventTps: mean(strfryRuns.map((m) => m.eventTps)),
|
||||
eventSizeMiBS: mean(strfryRuns.map((m) => m.eventSizeMiBS)),
|
||||
reqTps: mean(strfryRuns.map((m) => m.reqTps)),
|
||||
reqSizeMiBS: mean(strfryRuns.map((m) => m.reqSizeMiBS)),
|
||||
},
|
||||
nostrRsRelay: {
|
||||
connectAvgMs: mean(nostrRsRuns.map((m) => m.connectAvgMs)),
|
||||
connectMaxMs: mean(nostrRsRuns.map((m) => m.connectMaxMs)),
|
||||
echoTps: mean(nostrRsRuns.map((m) => m.echoTps)),
|
||||
echoSizeMiBS: mean(nostrRsRuns.map((m) => m.echoSizeMiBS)),
|
||||
eventTps: mean(nostrRsRuns.map((m) => m.eventTps)),
|
||||
eventSizeMiBS: mean(nostrRsRuns.map((m) => m.eventSizeMiBS)),
|
||||
reqTps: mean(nostrRsRuns.map((m) => m.reqTps)),
|
||||
reqSizeMiBS: mean(nostrRsRuns.map((m) => m.reqSizeMiBS)),
|
||||
},
|
||||
};
|
||||
const metrics = [
|
||||
"connectAvgMs", "connectMaxMs",
|
||||
"echoTps", "echoSizeMiBS",
|
||||
"eventTps", "eventSizeMiBS",
|
||||
"reqTps", "reqSizeMiBS",
|
||||
];
|
||||
|
||||
function summarise(allRuns) {
|
||||
const out = {};
|
||||
for (const m of metrics) {
|
||||
out[m] = mean(allRuns.map((r) => r[m]));
|
||||
}
|
||||
return out;
|
||||
}
|
||||
|
||||
const summary = { parrhesia: summarise(parrhesiaRuns) };
|
||||
if (hasStrfry) summary.strfry = summarise(strfryRuns);
|
||||
if (hasNostrRs) summary.nostrRsRelay = summarise(nostrRsRuns);
|
||||
|
||||
function ratioVsParrhesia(serverKey, metric) {
|
||||
const p = summary.parrhesia[metric];
|
||||
const other = summary[serverKey][metric];
|
||||
const other = summary[serverKey]?.[metric];
|
||||
if (!Number.isFinite(p) || !Number.isFinite(other) || p === 0) return "n/a";
|
||||
return `${(other / p).toFixed(2)}x`;
|
||||
}
|
||||
|
||||
const rows = [
|
||||
[
|
||||
"connect avg latency (ms) ↓",
|
||||
toFixed(summary.parrhesia.connectAvgMs),
|
||||
toFixed(summary.strfry.connectAvgMs),
|
||||
toFixed(summary.nostrRsRelay.connectAvgMs),
|
||||
ratioVsParrhesia("strfry", "connectAvgMs"),
|
||||
ratioVsParrhesia("nostrRsRelay", "connectAvgMs"),
|
||||
],
|
||||
[
|
||||
"connect max latency (ms) ↓",
|
||||
toFixed(summary.parrhesia.connectMaxMs),
|
||||
toFixed(summary.strfry.connectMaxMs),
|
||||
toFixed(summary.nostrRsRelay.connectMaxMs),
|
||||
ratioVsParrhesia("strfry", "connectMaxMs"),
|
||||
ratioVsParrhesia("nostrRsRelay", "connectMaxMs"),
|
||||
],
|
||||
[
|
||||
"echo throughput (TPS) ↑",
|
||||
toFixed(summary.parrhesia.echoTps),
|
||||
toFixed(summary.strfry.echoTps),
|
||||
toFixed(summary.nostrRsRelay.echoTps),
|
||||
ratioVsParrhesia("strfry", "echoTps"),
|
||||
ratioVsParrhesia("nostrRsRelay", "echoTps"),
|
||||
],
|
||||
[
|
||||
"echo throughput (MiB/s) ↑",
|
||||
toFixed(summary.parrhesia.echoSizeMiBS),
|
||||
toFixed(summary.strfry.echoSizeMiBS),
|
||||
toFixed(summary.nostrRsRelay.echoSizeMiBS),
|
||||
ratioVsParrhesia("strfry", "echoSizeMiBS"),
|
||||
ratioVsParrhesia("nostrRsRelay", "echoSizeMiBS"),
|
||||
],
|
||||
[
|
||||
"event throughput (TPS) ↑",
|
||||
toFixed(summary.parrhesia.eventTps),
|
||||
toFixed(summary.strfry.eventTps),
|
||||
toFixed(summary.nostrRsRelay.eventTps),
|
||||
ratioVsParrhesia("strfry", "eventTps"),
|
||||
ratioVsParrhesia("nostrRsRelay", "eventTps"),
|
||||
],
|
||||
[
|
||||
"event throughput (MiB/s) ↑",
|
||||
toFixed(summary.parrhesia.eventSizeMiBS),
|
||||
toFixed(summary.strfry.eventSizeMiBS),
|
||||
toFixed(summary.nostrRsRelay.eventSizeMiBS),
|
||||
ratioVsParrhesia("strfry", "eventSizeMiBS"),
|
||||
ratioVsParrhesia("nostrRsRelay", "eventSizeMiBS"),
|
||||
],
|
||||
[
|
||||
"req throughput (TPS) ↑",
|
||||
toFixed(summary.parrhesia.reqTps),
|
||||
toFixed(summary.strfry.reqTps),
|
||||
toFixed(summary.nostrRsRelay.reqTps),
|
||||
ratioVsParrhesia("strfry", "reqTps"),
|
||||
ratioVsParrhesia("nostrRsRelay", "reqTps"),
|
||||
],
|
||||
[
|
||||
"req throughput (MiB/s) ↑",
|
||||
toFixed(summary.parrhesia.reqSizeMiBS),
|
||||
toFixed(summary.strfry.reqSizeMiBS),
|
||||
toFixed(summary.nostrRsRelay.reqSizeMiBS),
|
||||
ratioVsParrhesia("strfry", "reqSizeMiBS"),
|
||||
ratioVsParrhesia("nostrRsRelay", "reqSizeMiBS"),
|
||||
],
|
||||
const metricLabels = [
|
||||
["connect avg latency (ms) ↓", "connectAvgMs"],
|
||||
["connect max latency (ms) ↓", "connectMaxMs"],
|
||||
["echo throughput (TPS) ↑", "echoTps"],
|
||||
["echo throughput (MiB/s) ↑", "echoSizeMiBS"],
|
||||
["event throughput (TPS) ↑", "eventTps"],
|
||||
["event throughput (MiB/s) ↑", "eventSizeMiBS"],
|
||||
["req throughput (TPS) ↑", "reqTps"],
|
||||
["req throughput (MiB/s) ↑", "reqSizeMiBS"],
|
||||
];
|
||||
|
||||
const headers = [
|
||||
"metric",
|
||||
"parrhesia",
|
||||
"strfry",
|
||||
"nostr-rs-relay",
|
||||
"strfry/parrhesia",
|
||||
"nostr-rs/parrhesia",
|
||||
];
|
||||
const headers = ["metric", "parrhesia"];
|
||||
if (hasStrfry) headers.push("strfry");
|
||||
if (hasNostrRs) headers.push("nostr-rs-relay");
|
||||
if (hasStrfry) headers.push("strfry/parrhesia");
|
||||
if (hasNostrRs) headers.push("nostr-rs/parrhesia");
|
||||
|
||||
const rows = metricLabels.map(([label, key]) => {
|
||||
const row = [label, toFixed(summary.parrhesia[key])];
|
||||
if (hasStrfry) row.push(toFixed(summary.strfry[key]));
|
||||
if (hasNostrRs) row.push(toFixed(summary.nostrRsRelay[key]));
|
||||
if (hasStrfry) row.push(ratioVsParrhesia("strfry", key));
|
||||
if (hasNostrRs) row.push(ratioVsParrhesia("nostrRsRelay", key));
|
||||
return row;
|
||||
});
|
||||
|
||||
const widths = headers.map((h, i) => Math.max(h.length, ...rows.map((r) => r[i].length)));
|
||||
|
||||
function fmtRow(cols) {
|
||||
@@ -471,18 +435,25 @@ for (const row of rows) {
|
||||
}
|
||||
|
||||
console.log("\nLegend: ↑ higher is better, ↓ lower is better.");
|
||||
console.log("Ratio columns are server/parrhesia (for ↓ metrics, <1.00x means that server is faster).\n");
|
||||
if (hasStrfry || hasNostrRs) {
|
||||
console.log("Ratio columns are server/parrhesia (for ↓ metrics, <1.00x means that server is faster).\n");
|
||||
} else {
|
||||
console.log("");
|
||||
}
|
||||
|
||||
console.log("Run details:");
|
||||
for (let i = 0; i < runs; i += 1) {
|
||||
const p = parrhesiaRuns[i];
|
||||
let line = ` run ${i + 1}: ` +
|
||||
`parrhesia(echo_tps=${toFixed(p.echoTps, 0)}, event_tps=${toFixed(p.eventTps, 0)}, req_tps=${toFixed(p.reqTps, 0)}, connect_avg_ms=${toFixed(p.connectAvgMs, 0)})`;
|
||||
if (hasStrfry) {
|
||||
const s = strfryRuns[i];
|
||||
line += ` | strfry(echo_tps=${toFixed(s.echoTps, 0)}, event_tps=${toFixed(s.eventTps, 0)}, req_tps=${toFixed(s.reqTps, 0)}, connect_avg_ms=${toFixed(s.connectAvgMs, 0)})`;
|
||||
}
|
||||
if (hasNostrRs) {
|
||||
const n = nostrRsRuns[i];
|
||||
console.log(
|
||||
` run ${i + 1}: ` +
|
||||
`parrhesia(echo_tps=${toFixed(p.echoTps, 0)}, event_tps=${toFixed(p.eventTps, 0)}, req_tps=${toFixed(p.reqTps, 0)}, connect_avg_ms=${toFixed(p.connectAvgMs, 0)}) | ` +
|
||||
`strfry(echo_tps=${toFixed(s.echoTps, 0)}, event_tps=${toFixed(s.eventTps, 0)}, req_tps=${toFixed(s.reqTps, 0)}, connect_avg_ms=${toFixed(s.connectAvgMs, 0)}) | ` +
|
||||
`nostr-rs-relay(echo_tps=${toFixed(n.echoTps, 0)}, event_tps=${toFixed(n.eventTps, 0)}, req_tps=${toFixed(n.reqTps, 0)}, connect_avg_ms=${toFixed(n.connectAvgMs, 0)})`
|
||||
);
|
||||
line += ` | nostr-rs-relay(echo_tps=${toFixed(n.echoTps, 0)}, event_tps=${toFixed(n.eventTps, 0)}, req_tps=${toFixed(n.reqTps, 0)}, connect_avg_ms=${toFixed(n.connectAvgMs, 0)})`;
|
||||
}
|
||||
console.log(line);
|
||||
}
|
||||
NODE
|
||||
|
||||
@@ -12,7 +12,12 @@ shift
|
||||
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
|
||||
cd "$ROOT_DIR"
|
||||
|
||||
export MIX_ENV=test
|
||||
MIX_ENV="${PARRHESIA_E2E_MIX_ENV:-test}"
|
||||
if [[ "$MIX_ENV" != "test" && "$MIX_ENV" != "prod" ]]; then
|
||||
echo "PARRHESIA_E2E_MIX_ENV must be test or prod, got: $MIX_ENV" >&2
|
||||
exit 1
|
||||
fi
|
||||
export MIX_ENV
|
||||
|
||||
SUITE_SLUG="$(printf '%s' "$SUITE_NAME" | tr '[:upper:]' '[:lower:]' | tr -c 'a-z0-9' '_')"
|
||||
SUITE_UPPER="$(printf '%s' "$SUITE_SLUG" | tr '[:lower:]' '[:upper:]')"
|
||||
@@ -26,12 +31,40 @@ printf -v "$PORT_ENV_VAR" '%s' "$TEST_HTTP_PORT"
|
||||
export "$PORT_ENV_VAR"
|
||||
|
||||
if [[ -z "${PGDATABASE:-}" ]]; then
|
||||
export PGDATABASE="parrhesia_${SUITE_SLUG}_test"
|
||||
export PGDATABASE="parrhesia_${SUITE_SLUG}_${MIX_ENV}"
|
||||
fi
|
||||
|
||||
PARRHESIA_TEST_HTTP_PORT=0 mix ecto.drop --quiet || true
|
||||
PARRHESIA_TEST_HTTP_PORT=0 mix ecto.create --quiet
|
||||
PARRHESIA_TEST_HTTP_PORT=0 mix ecto.migrate --quiet
|
||||
if [[ -z "${DATABASE_URL:-}" ]]; then
|
||||
PGUSER_EFFECTIVE="${PGUSER:-${USER:-agent}}"
|
||||
PGHOST_EFFECTIVE="${PGHOST:-localhost}"
|
||||
PGPORT_EFFECTIVE="${PGPORT:-5432}"
|
||||
|
||||
# Ecto requires a URL host to be present. For unix sockets we keep a dummy
|
||||
# TCP host and pass the socket directory as query option.
|
||||
if [[ "$PGHOST_EFFECTIVE" == /* ]]; then
|
||||
if [[ -n "${PGPASSWORD:-}" ]]; then
|
||||
export DATABASE_URL="ecto://${PGUSER_EFFECTIVE}:${PGPASSWORD}@localhost/${PGDATABASE}?socket_dir=${PGHOST_EFFECTIVE}&port=${PGPORT_EFFECTIVE}"
|
||||
else
|
||||
export DATABASE_URL="ecto://${PGUSER_EFFECTIVE}@localhost/${PGDATABASE}?socket_dir=${PGHOST_EFFECTIVE}&port=${PGPORT_EFFECTIVE}"
|
||||
fi
|
||||
else
|
||||
if [[ -n "${PGPASSWORD:-}" ]]; then
|
||||
export DATABASE_URL="ecto://${PGUSER_EFFECTIVE}:${PGPASSWORD}@${PGHOST_EFFECTIVE}:${PGPORT_EFFECTIVE}/${PGDATABASE}"
|
||||
else
|
||||
export DATABASE_URL="ecto://${PGUSER_EFFECTIVE}@${PGHOST_EFFECTIVE}:${PGPORT_EFFECTIVE}/${PGDATABASE}"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ "$MIX_ENV" == "test" ]]; then
|
||||
PARRHESIA_TEST_HTTP_PORT=0 mix ecto.drop --quiet --force || true
|
||||
PARRHESIA_TEST_HTTP_PORT=0 mix ecto.create --quiet
|
||||
PARRHESIA_TEST_HTTP_PORT=0 mix ecto.migrate --quiet
|
||||
else
|
||||
mix ecto.drop --quiet --force || true
|
||||
mix ecto.create --quiet
|
||||
mix ecto.migrate --quiet
|
||||
fi
|
||||
|
||||
SERVER_LOG="${ROOT_DIR}/.${SUITE_SLUG}-e2e-server.log"
|
||||
: > "$SERVER_LOG"
|
||||
@@ -41,6 +74,14 @@ cleanup() {
|
||||
kill "$SERVER_PID" 2>/dev/null || true
|
||||
wait "$SERVER_PID" 2>/dev/null || true
|
||||
fi
|
||||
|
||||
if [[ "${PARRHESIA_E2E_DROP_DB_ON_EXIT:-0}" == "1" ]]; then
|
||||
if [[ "$MIX_ENV" == "test" ]]; then
|
||||
PARRHESIA_TEST_HTTP_PORT=0 mix ecto.drop --quiet --force || true
|
||||
else
|
||||
mix ecto.drop --quiet --force || true
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
trap cleanup EXIT INT TERM
|
||||
@@ -50,7 +91,11 @@ if ss -ltn "( sport = :${TEST_HTTP_PORT} )" | tail -n +2 | grep -q .; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
PARRHESIA_TEST_HTTP_PORT="$TEST_HTTP_PORT" mix run --no-halt >"$SERVER_LOG" 2>&1 &
|
||||
if [[ "$MIX_ENV" == "test" ]]; then
|
||||
PARRHESIA_TEST_HTTP_PORT="$TEST_HTTP_PORT" mix run --no-halt >"$SERVER_LOG" 2>&1 &
|
||||
else
|
||||
PORT="$TEST_HTTP_PORT" mix run --no-halt >"$SERVER_LOG" 2>&1 &
|
||||
fi
|
||||
SERVER_PID=$!
|
||||
|
||||
READY=0
|
||||
@@ -68,4 +113,8 @@ if [[ "$READY" -ne 1 ]]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
PARRHESIA_TEST_HTTP_PORT=0 "$@"
|
||||
if [[ "$MIX_ENV" == "test" ]]; then
|
||||
PARRHESIA_TEST_HTTP_PORT=0 "$@"
|
||||
else
|
||||
"$@"
|
||||
fi
|
||||
|
||||
@@ -2,6 +2,24 @@
|
||||
set -euo pipefail
|
||||
|
||||
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
|
||||
|
||||
# Check if the marmot-ts submodule is initialised
|
||||
if [[ ! -f "$ROOT_DIR/marmot-ts/package.json" ]]; then
|
||||
echo "marmot-ts submodule is not initialised." >&2
|
||||
if [[ -t 0 ]]; then
|
||||
read -rp "Initialise it now? [y/N] " answer
|
||||
if [[ "$answer" =~ ^[Yy]$ ]]; then
|
||||
git -C "$ROOT_DIR" submodule update --init marmot-ts
|
||||
else
|
||||
echo "Skipping marmot e2e tests."
|
||||
exit 0
|
||||
fi
|
||||
else
|
||||
echo "Run 'git submodule update --init marmot-ts' to initialise it." >&2
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
cd "$ROOT_DIR/marmot-ts"
|
||||
|
||||
if [[ ! -d node_modules ]]; then
|
||||
|
||||
@@ -10,9 +10,18 @@ usage:
|
||||
./scripts/run_nostr_bench.sh [all]
|
||||
./scripts/run_nostr_bench.sh <connect|echo|event|req> [nostr-bench options...]
|
||||
|
||||
Runs nostr-bench against a temporary Parrhesia test server started via
|
||||
Runs nostr-bench against a temporary Parrhesia prod server started via
|
||||
./scripts/run_e2e_suite.sh.
|
||||
|
||||
Pool tuning:
|
||||
POOL_SIZE optional override for prod pool size
|
||||
DB_QUEUE_TARGET_MS optional Repo queue target override
|
||||
DB_QUEUE_INTERVAL_MS optional Repo queue interval override
|
||||
|
||||
Database lifecycle:
|
||||
PGDATABASE optional override (auto-generated by default)
|
||||
PARRHESIA_E2E_DROP_DB_ON_EXIT=1 drop benchmark DB on exit (default: 1)
|
||||
|
||||
Examples:
|
||||
./scripts/run_nostr_bench.sh
|
||||
./scripts/run_nostr_bench.sh connect -c 500 -r 100
|
||||
@@ -54,6 +63,13 @@ if [[ "$MODE" == "all" && $# -gt 0 ]]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -z "${PGDATABASE:-}" ]]; then
|
||||
export PGDATABASE="parrhesia_bench_prod_$(date +%s)_$RANDOM"
|
||||
fi
|
||||
|
||||
export PARRHESIA_E2E_DROP_DB_ON_EXIT="${PARRHESIA_E2E_DROP_DB_ON_EXIT:-1}"
|
||||
|
||||
PARRHESIA_E2E_MIX_ENV="prod" \
|
||||
exec ./scripts/run_e2e_suite.sh \
|
||||
bench \
|
||||
bash -lc '
|
||||
|
||||
@@ -350,7 +350,7 @@ async function requestGiftWrapsWithAuth({ relayUrl, relayHttpUrl, signer, recipi
|
||||
created_at: unixNow(),
|
||||
tags: [
|
||||
["challenge", challenge],
|
||||
["relay", relayHttpUrl],
|
||||
["relay", relayUrl],
|
||||
],
|
||||
content: "",
|
||||
});
|
||||
|
||||
@@ -10,6 +10,7 @@ defmodule Parrhesia.ApplicationTest do
|
||||
assert is_pid(Process.whereis(Parrhesia.Auth.Supervisor))
|
||||
assert is_pid(Process.whereis(Parrhesia.Policy.Supervisor))
|
||||
assert is_pid(Process.whereis(Parrhesia.Web.Endpoint))
|
||||
assert is_pid(Process.whereis(Parrhesia.Web.MetricsEndpoint))
|
||||
assert is_pid(Process.whereis(Parrhesia.Tasks.Supervisor))
|
||||
|
||||
assert Enum.any?(Supervisor.which_children(Parrhesia.Web.Endpoint), fn {_id, pid, _type,
|
||||
@@ -18,6 +19,15 @@ defmodule Parrhesia.ApplicationTest do
|
||||
end)
|
||||
|
||||
assert is_pid(Process.whereis(Parrhesia.Auth.Challenges))
|
||||
|
||||
if negentropy_enabled?() do
|
||||
assert is_pid(Process.whereis(Parrhesia.Negentropy.Sessions))
|
||||
end
|
||||
end
|
||||
|
||||
defp negentropy_enabled? do
|
||||
:parrhesia
|
||||
|> Application.get_env(:features, [])
|
||||
|> Keyword.get(:nip_77_negentropy, true)
|
||||
end
|
||||
end
|
||||
|
||||
@@ -5,12 +5,17 @@ defmodule Parrhesia.ConfigTest do
|
||||
assert Parrhesia.Config.get([:limits, :max_frame_bytes]) == 1_048_576
|
||||
assert Parrhesia.Config.get([:limits, :max_event_bytes]) == 262_144
|
||||
assert Parrhesia.Config.get([:limits, :max_event_future_skew_seconds]) == 900
|
||||
assert Parrhesia.Config.get([:limits, :max_event_ingest_per_window]) == 120
|
||||
assert Parrhesia.Config.get([:limits, :event_ingest_window_seconds]) == 1
|
||||
assert Parrhesia.Config.get([:limits, :auth_max_age_seconds]) == 600
|
||||
assert Parrhesia.Config.get([:limits, :max_outbound_queue]) == 256
|
||||
assert Parrhesia.Config.get([:limits, :max_filter_limit]) == 500
|
||||
assert Parrhesia.Config.get([:relay_url]) == "ws://localhost:4000/relay"
|
||||
assert Parrhesia.Config.get([:policies, :auth_required_for_writes]) == false
|
||||
assert Parrhesia.Config.get([:policies, :marmot_media_max_imeta_tags_per_event]) == 8
|
||||
assert Parrhesia.Config.get([:policies, :marmot_media_reject_mip04_v1]) == true
|
||||
assert Parrhesia.Config.get([:policies, :marmot_push_max_trigger_age_seconds]) == 120
|
||||
assert Parrhesia.Config.get([:features, :verify_event_signatures]) == false
|
||||
assert Parrhesia.Config.get([:features, :nip_50_search]) == true
|
||||
assert Parrhesia.Config.get([:features, :marmot_push_notifications]) == false
|
||||
end
|
||||
|
||||
@@ -43,7 +43,7 @@ defmodule Parrhesia.FaultInjectionGroupFlowTest do
|
||||
|
||||
payload = JSON.encode!(["EVENT", group_event])
|
||||
|
||||
assert {:push, {:text, error_response}, ^state} =
|
||||
assert {:push, {:text, error_response}, _next_state} =
|
||||
Connection.handle_in({payload, [opcode: :text]}, state)
|
||||
|
||||
assert JSON.decode!(error_response) == ["OK", group_event["id"], false, "error: :db_down"]
|
||||
@@ -54,7 +54,7 @@ defmodule Parrhesia.FaultInjectionGroupFlowTest do
|
||||
previous_storage |> Keyword.put(:moderation, PermissiveModeration)
|
||||
)
|
||||
|
||||
assert {:push, {:text, ok_response}, ^state} =
|
||||
assert {:push, {:text, ok_response}, _next_state} =
|
||||
Connection.handle_in({payload, [opcode: :text]}, state)
|
||||
|
||||
assert JSON.decode!(ok_response) == ["OK", group_event["id"], true, "ok: event stored"]
|
||||
@@ -87,7 +87,7 @@ defmodule Parrhesia.FaultInjectionGroupFlowTest do
|
||||
"content" => Base.encode64("newer")
|
||||
})
|
||||
|
||||
assert {:push, {:text, outage_response}, ^state} =
|
||||
assert {:push, {:text, outage_response}, _next_state} =
|
||||
Connection.handle_in(
|
||||
{JSON.encode!(["EVENT", older_event]), [opcode: :text]},
|
||||
state
|
||||
@@ -101,7 +101,7 @@ defmodule Parrhesia.FaultInjectionGroupFlowTest do
|
||||
previous_storage |> Keyword.put(:moderation, PermissiveModeration)
|
||||
)
|
||||
|
||||
assert {:push, {:text, newer_response}, ^state} =
|
||||
assert {:push, {:text, newer_response}, _next_state} =
|
||||
Connection.handle_in(
|
||||
{JSON.encode!(["EVENT", newer_event]), [opcode: :text]},
|
||||
state
|
||||
@@ -109,7 +109,7 @@ defmodule Parrhesia.FaultInjectionGroupFlowTest do
|
||||
|
||||
assert JSON.decode!(newer_response) == ["OK", newer_event["id"], true, "ok: event stored"]
|
||||
|
||||
assert {:push, {:text, older_response}, ^state} =
|
||||
assert {:push, {:text, older_response}, _next_state} =
|
||||
Connection.handle_in(
|
||||
{JSON.encode!(["EVENT", older_event]), [opcode: :text]},
|
||||
state
|
||||
|
||||
@@ -29,7 +29,7 @@ defmodule Parrhesia.FaultInjectionTest do
|
||||
{:ok, state} = Connection.init(subscription_index: nil)
|
||||
event = valid_event()
|
||||
|
||||
assert {:push, {:text, response}, ^state} =
|
||||
assert {:push, {:text, response}, _next_state} =
|
||||
Connection.handle_in({JSON.encode!(["EVENT", event]), [opcode: :text]}, state)
|
||||
|
||||
assert JSON.decode!(response) == ["OK", event["id"], false, "error: :db_down"]
|
||||
|
||||
@@ -15,4 +15,39 @@ defmodule Parrhesia.Negentropy.SessionsTest do
|
||||
assert :ok = Sessions.close(server, self(), "sub-neg")
|
||||
assert {:error, :unknown_session} = Sessions.message(server, self(), "sub-neg", %{})
|
||||
end
|
||||
|
||||
test "rejects oversized NEG payloads" do
|
||||
server =
|
||||
start_supervised!(
|
||||
{Sessions,
|
||||
name: nil,
|
||||
max_payload_bytes: 32,
|
||||
max_sessions_per_owner: 8,
|
||||
max_total_sessions: 16,
|
||||
max_idle_seconds: 60,
|
||||
sweep_interval_seconds: 60}
|
||||
)
|
||||
|
||||
assert {:error, :payload_too_large} =
|
||||
Sessions.open(server, self(), "sub-neg", %{"delta" => String.duplicate("a", 256)})
|
||||
end
|
||||
|
||||
test "enforces per-owner session limits" do
|
||||
server =
|
||||
start_supervised!(
|
||||
{Sessions,
|
||||
name: nil,
|
||||
max_payload_bytes: 1024,
|
||||
max_sessions_per_owner: 1,
|
||||
max_total_sessions: 16,
|
||||
max_idle_seconds: 60,
|
||||
sweep_interval_seconds: 60}
|
||||
)
|
||||
|
||||
assert {:ok, %{"status" => "open", "cursor" => 0}} =
|
||||
Sessions.open(server, self(), "sub-1", %{})
|
||||
|
||||
assert {:error, :owner_session_limit_reached} =
|
||||
Sessions.open(server, self(), "sub-2", %{})
|
||||
end
|
||||
end
|
||||
|
||||
63
test/parrhesia/protocol/event_validator_signature_test.exs
Normal file
63
test/parrhesia/protocol/event_validator_signature_test.exs
Normal file
@@ -0,0 +1,63 @@
|
||||
defmodule Parrhesia.Protocol.EventValidatorSignatureTest do
|
||||
use ExUnit.Case, async: true
|
||||
|
||||
alias Parrhesia.Protocol.EventValidator
|
||||
|
||||
test "accepts valid Schnorr signatures when verification is enabled" do
|
||||
previous_features = Application.get_env(:parrhesia, :features, [])
|
||||
|
||||
Application.put_env(
|
||||
:parrhesia,
|
||||
:features,
|
||||
Keyword.put(previous_features, :verify_event_signatures, true)
|
||||
)
|
||||
|
||||
on_exit(fn ->
|
||||
Application.put_env(:parrhesia, :features, previous_features)
|
||||
end)
|
||||
|
||||
event = signed_event()
|
||||
|
||||
assert :ok = EventValidator.validate(event)
|
||||
end
|
||||
|
||||
test "rejects invalid Schnorr signatures when verification is enabled" do
|
||||
previous_features = Application.get_env(:parrhesia, :features, [])
|
||||
|
||||
Application.put_env(
|
||||
:parrhesia,
|
||||
:features,
|
||||
Keyword.put(previous_features, :verify_event_signatures, true)
|
||||
)
|
||||
|
||||
on_exit(fn ->
|
||||
Application.put_env(:parrhesia, :features, previous_features)
|
||||
end)
|
||||
|
||||
event =
|
||||
signed_event()
|
||||
|> Map.put("sig", String.duplicate("0", 128))
|
||||
|
||||
assert {:error, :invalid_signature} = EventValidator.validate(event)
|
||||
end
|
||||
|
||||
defp signed_event do
|
||||
{seckey, pubkey} = Secp256k1.keypair(:xonly)
|
||||
|
||||
event = %{
|
||||
"pubkey" => Base.encode16(pubkey, case: :lower),
|
||||
"created_at" => System.system_time(:second),
|
||||
"kind" => 1,
|
||||
"tags" => [["e", String.duplicate("a", 64), "wss://relay.example", "reply"]],
|
||||
"content" => "signed"
|
||||
}
|
||||
|
||||
id = EventValidator.compute_id(event)
|
||||
{:ok, id_bin} = Base.decode16(id, case: :lower)
|
||||
sig = Secp256k1.schnorr_sign(id_bin, seckey)
|
||||
|
||||
event
|
||||
|> Map.put("id", id)
|
||||
|> Map.put("sig", Base.encode16(sig, case: :lower))
|
||||
end
|
||||
end
|
||||
@@ -11,6 +11,24 @@ defmodule Parrhesia.Storage.Adapters.Postgres.EventsLifecycleTest do
|
||||
:ok
|
||||
end
|
||||
|
||||
test "event tags round-trip without truncation" do
|
||||
tagged_event =
|
||||
event(%{
|
||||
"kind" => 1,
|
||||
"tags" => [
|
||||
["e", String.duplicate("a", 64), "wss://relay.example", "reply"],
|
||||
["-"],
|
||||
["p", String.duplicate("b", 64), "wss://hint.example"]
|
||||
],
|
||||
"content" => "tag-roundtrip"
|
||||
})
|
||||
|
||||
assert {:ok, _event} = Events.put_event(%{}, tagged_event)
|
||||
assert {:ok, persisted_tagged_event} = Events.get_event(%{}, tagged_event["id"])
|
||||
|
||||
assert persisted_tagged_event["tags"] == tagged_event["tags"]
|
||||
end
|
||||
|
||||
test "delete_by_request tombstones owned target events" do
|
||||
target = event(%{"kind" => 1, "content" => "target"})
|
||||
assert {:ok, _event} = Events.put_event(%{}, target)
|
||||
@@ -26,6 +44,31 @@ defmodule Parrhesia.Storage.Adapters.Postgres.EventsLifecycleTest do
|
||||
assert {:ok, nil} = Events.get_event(%{}, target["id"])
|
||||
end
|
||||
|
||||
test "delete_by_request tombstones addressable targets referenced via a tags" do
|
||||
author = String.duplicate("4", 64)
|
||||
|
||||
target =
|
||||
event(%{
|
||||
"pubkey" => author,
|
||||
"kind" => 30_023,
|
||||
"tags" => [["d", "topic"]],
|
||||
"content" => "addressable-target"
|
||||
})
|
||||
|
||||
assert {:ok, _event} = Events.put_event(%{}, target)
|
||||
|
||||
delete_request =
|
||||
event(%{
|
||||
"pubkey" => author,
|
||||
"kind" => 5,
|
||||
"tags" => [["a", "30023:#{author}:topic"]],
|
||||
"content" => "delete-addressable"
|
||||
})
|
||||
|
||||
assert {:ok, 1} = Events.delete_by_request(%{}, delete_request)
|
||||
assert {:ok, nil} = Events.get_event(%{}, target["id"])
|
||||
end
|
||||
|
||||
test "vanish hard-deletes events authored by pubkey" do
|
||||
author = String.duplicate("3", 64)
|
||||
|
||||
|
||||
@@ -248,6 +248,26 @@ defmodule Parrhesia.Storage.Adapters.Postgres.EventsQueryCountTest do
|
||||
assert {:ok, 0} = Events.count(%{}, filters, requester_pubkeys: [])
|
||||
end
|
||||
|
||||
test "search treats % and _ as literals" do
|
||||
matching =
|
||||
persist_event(%{
|
||||
"kind" => 1,
|
||||
"content" => "literal 100%_match value"
|
||||
})
|
||||
|
||||
_other =
|
||||
persist_event(%{
|
||||
"kind" => 1,
|
||||
"content" => "literal 100Xmatch value"
|
||||
})
|
||||
|
||||
filters = [%{"kinds" => [1], "search" => "100%_match"}]
|
||||
|
||||
assert {:ok, [result]} = Events.query(%{}, filters, [])
|
||||
assert result["id"] == matching["id"]
|
||||
assert {:ok, 1} = Events.count(%{}, filters, [])
|
||||
end
|
||||
|
||||
test "query/3 combines search and media metadata tag filters" do
|
||||
media_hash = String.duplicate("a", 64)
|
||||
|
||||
|
||||
@@ -17,6 +17,12 @@ defmodule Parrhesia.Storage.ArchiverTest do
|
||||
|
||||
test "archive_sql builds insert-select statement" do
|
||||
assert Archiver.archive_sql("events_2026_03", "events_archive") ==
|
||||
"INSERT INTO events_archive SELECT * FROM events_2026_03;"
|
||||
~s(INSERT INTO "events_archive" SELECT * FROM "events_2026_03";)
|
||||
end
|
||||
|
||||
test "archive_sql rejects invalid SQL identifiers" do
|
||||
assert_raise ArgumentError, fn ->
|
||||
Archiver.archive_sql("events_default; DROP TABLE events", "events_archive")
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
@@ -37,7 +37,7 @@ defmodule Parrhesia.Web.ConformanceTest do
|
||||
|
||||
event = valid_event()
|
||||
|
||||
assert {:push, {:text, frame}, ^state} =
|
||||
assert {:push, {:text, frame}, _next_state} =
|
||||
Connection.handle_in({JSON.encode!(["EVENT", event]), [opcode: :text]}, state)
|
||||
|
||||
assert JSON.decode!(frame) == ["OK", event["id"], true, "ok: event stored"]
|
||||
@@ -54,7 +54,7 @@ defmodule Parrhesia.Web.ConformanceTest do
|
||||
"content" => "encrypted-welcome-payload"
|
||||
})
|
||||
|
||||
assert {:push, {:text, ok_frame}, ^state} =
|
||||
assert {:push, {:text, ok_frame}, _next_state} =
|
||||
Connection.handle_in(
|
||||
{JSON.encode!(["EVENT", wrapped_welcome]), [opcode: :text]},
|
||||
state
|
||||
@@ -64,7 +64,7 @@ defmodule Parrhesia.Web.ConformanceTest do
|
||||
|
||||
req_payload = JSON.encode!(["REQ", "sub-welcome", %{"kinds" => [1059], "#p" => [recipient]}])
|
||||
|
||||
assert {:push, restricted_frames, ^state} =
|
||||
assert {:push, restricted_frames, _next_state} =
|
||||
Connection.handle_in({req_payload, [opcode: :text]}, state)
|
||||
|
||||
decoded_restricted =
|
||||
@@ -106,7 +106,7 @@ defmodule Parrhesia.Web.ConformanceTest do
|
||||
"content" => Base.encode64("commit-envelope")
|
||||
})
|
||||
|
||||
assert {:push, {:text, commit_ok_frame}, ^state} =
|
||||
assert {:push, {:text, commit_ok_frame}, _next_state} =
|
||||
Connection.handle_in(
|
||||
{JSON.encode!(["EVENT", commit_event]), [opcode: :text]},
|
||||
state
|
||||
@@ -124,7 +124,7 @@ defmodule Parrhesia.Web.ConformanceTest do
|
||||
"content" => "encrypted-welcome-payload"
|
||||
})
|
||||
|
||||
assert {:push, {:text, welcome_ok_frame}, ^state} =
|
||||
assert {:push, {:text, welcome_ok_frame}, _next_state} =
|
||||
Connection.handle_in(
|
||||
{JSON.encode!(["EVENT", wrapped_welcome]), [opcode: :text]},
|
||||
state
|
||||
@@ -187,7 +187,7 @@ defmodule Parrhesia.Web.ConformanceTest do
|
||||
"content" => "encrypted-push"
|
||||
})
|
||||
|
||||
assert {:push, {:text, relay_ok_frame}, ^state} =
|
||||
assert {:push, {:text, relay_ok_frame}, _next_state} =
|
||||
Connection.handle_in(
|
||||
{JSON.encode!(["EVENT", relay_list_event]), [opcode: :text]},
|
||||
state
|
||||
@@ -200,7 +200,7 @@ defmodule Parrhesia.Web.ConformanceTest do
|
||||
"ok: event stored"
|
||||
]
|
||||
|
||||
assert {:push, {:text, trigger_ok_frame}, ^state} =
|
||||
assert {:push, {:text, trigger_ok_frame}, _next_state} =
|
||||
Connection.handle_in(
|
||||
{JSON.encode!(["EVENT", push_trigger]), [opcode: :text]},
|
||||
state
|
||||
@@ -232,11 +232,13 @@ defmodule Parrhesia.Web.ConformanceTest do
|
||||
end
|
||||
|
||||
defp valid_auth_event(challenge, pubkey) do
|
||||
relay_url = Parrhesia.Config.get([:relay_url])
|
||||
|
||||
event = %{
|
||||
"pubkey" => pubkey,
|
||||
"created_at" => System.system_time(:second),
|
||||
"kind" => 22_242,
|
||||
"tags" => [["challenge", challenge]],
|
||||
"tags" => [["challenge", challenge], ["relay", relay_url]],
|
||||
"content" => "",
|
||||
"sig" => String.duplicate("8", 128)
|
||||
}
|
||||
|
||||
@@ -34,7 +34,7 @@ defmodule Parrhesia.Web.ConnectionTest do
|
||||
|
||||
payload = JSON.encode!(["COUNT", "sub-count", %{"kinds" => [1]}])
|
||||
|
||||
assert {:push, {:text, response}, ^state} =
|
||||
assert {:push, {:text, response}, _next_state} =
|
||||
Connection.handle_in({payload, [opcode: :text]}, state)
|
||||
|
||||
assert ["COUNT", "sub-count", payload] = JSON.decode!(response)
|
||||
@@ -62,7 +62,7 @@ defmodule Parrhesia.Web.ConnectionTest do
|
||||
auth_event = valid_auth_event("wrong-challenge")
|
||||
payload = JSON.encode!(["AUTH", auth_event])
|
||||
|
||||
assert {:push, frames, ^state} = Connection.handle_in({payload, [opcode: :text]}, state)
|
||||
assert {:push, frames, _next_state} = Connection.handle_in({payload, [opcode: :text]}, state)
|
||||
|
||||
decoded = Enum.map(frames, fn {:text, frame} -> JSON.decode!(frame) end)
|
||||
|
||||
@@ -73,6 +73,38 @@ defmodule Parrhesia.Web.ConnectionTest do
|
||||
end)
|
||||
end
|
||||
|
||||
test "AUTH rejects relay tag mismatch" do
|
||||
state = connection_state(relay_url: "ws://localhost:4000/relay")
|
||||
|
||||
auth_event = valid_auth_event(state.auth_challenge, relay_url: "ws://attacker.example/relay")
|
||||
payload = JSON.encode!(["AUTH", auth_event])
|
||||
|
||||
assert {:push, frames, _next_state} = Connection.handle_in({payload, [opcode: :text]}, state)
|
||||
|
||||
decoded = Enum.map(frames, fn {:text, frame} -> JSON.decode!(frame) end)
|
||||
|
||||
assert ["OK", _, false, "invalid: AUTH relay tag mismatch"] =
|
||||
Enum.find(decoded, fn frame -> List.first(frame) == "OK" end)
|
||||
end
|
||||
|
||||
test "AUTH rejects stale events" do
|
||||
state = connection_state(auth_max_age_seconds: 600)
|
||||
|
||||
stale_auth_event =
|
||||
valid_auth_event(state.auth_challenge,
|
||||
created_at: System.system_time(:second) - 601
|
||||
)
|
||||
|
||||
payload = JSON.encode!(["AUTH", stale_auth_event])
|
||||
|
||||
assert {:push, frames, _next_state} = Connection.handle_in({payload, [opcode: :text]}, state)
|
||||
|
||||
decoded = Enum.map(frames, fn {:text, frame} -> JSON.decode!(frame) end)
|
||||
|
||||
assert ["OK", _, false, "invalid: AUTH event is too old"] =
|
||||
Enum.find(decoded, fn frame -> List.first(frame) == "OK" end)
|
||||
end
|
||||
|
||||
test "protected event is rejected unless authenticated" do
|
||||
state = connection_state()
|
||||
|
||||
@@ -83,7 +115,7 @@ defmodule Parrhesia.Web.ConnectionTest do
|
||||
|
||||
payload = JSON.encode!(["EVENT", event])
|
||||
|
||||
assert {:push, frames, ^state} = Connection.handle_in({payload, [opcode: :text]}, state)
|
||||
assert {:push, frames, _next_state} = Connection.handle_in({payload, [opcode: :text]}, state)
|
||||
|
||||
decoded = Enum.map(frames, fn {:text, frame} -> JSON.decode!(frame) end)
|
||||
|
||||
@@ -98,7 +130,8 @@ defmodule Parrhesia.Web.ConnectionTest do
|
||||
|
||||
req_payload = JSON.encode!(["REQ", "sub-445", %{"kinds" => [445]}])
|
||||
|
||||
assert {:push, frames, ^state} = Connection.handle_in({req_payload, [opcode: :text]}, state)
|
||||
assert {:push, frames, _next_state} =
|
||||
Connection.handle_in({req_payload, [opcode: :text]}, state)
|
||||
|
||||
decoded = Enum.map(frames, fn {:text, frame} -> JSON.decode!(frame) end)
|
||||
|
||||
@@ -112,19 +145,99 @@ defmodule Parrhesia.Web.ConnectionTest do
|
||||
event = valid_event()
|
||||
payload = JSON.encode!(["EVENT", event])
|
||||
|
||||
assert {:push, {:text, response}, ^state} =
|
||||
assert {:push, {:text, response}, _next_state} =
|
||||
Connection.handle_in({payload, [opcode: :text]}, state)
|
||||
|
||||
assert JSON.decode!(response) == ["OK", event["id"], true, "ok: event stored"]
|
||||
end
|
||||
|
||||
test "ephemeral events are accepted without persistence" do
|
||||
previous_policies = Application.get_env(:parrhesia, :policies, [])
|
||||
|
||||
Application.put_env(
|
||||
:parrhesia,
|
||||
:policies,
|
||||
Keyword.put(previous_policies, :accept_ephemeral_events, true)
|
||||
)
|
||||
|
||||
on_exit(fn ->
|
||||
Application.put_env(:parrhesia, :policies, previous_policies)
|
||||
end)
|
||||
|
||||
state = connection_state()
|
||||
|
||||
event = valid_event() |> Map.put("kind", 20_001) |> recalculate_event_id()
|
||||
payload = JSON.encode!(["EVENT", event])
|
||||
|
||||
assert {:push, {:text, response}, _next_state} =
|
||||
Connection.handle_in({payload, [opcode: :text]}, state)
|
||||
|
||||
assert JSON.decode!(response) == ["OK", event["id"], true, "ok: ephemeral event accepted"]
|
||||
assert {:ok, nil} = Parrhesia.Storage.events().get_event(%{}, event["id"])
|
||||
end
|
||||
|
||||
test "EVENT ingest enforces per-connection rate limits" do
|
||||
state = connection_state(max_event_ingest_per_window: 1, event_ingest_window_seconds: 60)
|
||||
|
||||
first_event = valid_event(%{"content" => "first"})
|
||||
second_event = valid_event(%{"content" => "second"})
|
||||
|
||||
assert {:push, {:text, first_response}, next_state} =
|
||||
Connection.handle_in({JSON.encode!(["EVENT", first_event]), [opcode: :text]}, state)
|
||||
|
||||
assert JSON.decode!(first_response) == ["OK", first_event["id"], true, "ok: event stored"]
|
||||
|
||||
assert {:push, {:text, second_response}, ^next_state} =
|
||||
Connection.handle_in(
|
||||
{JSON.encode!(["EVENT", second_event]), [opcode: :text]},
|
||||
next_state
|
||||
)
|
||||
|
||||
assert JSON.decode!(second_response) == [
|
||||
"OK",
|
||||
second_event["id"],
|
||||
false,
|
||||
"rate-limited: too many EVENT messages"
|
||||
]
|
||||
end
|
||||
|
||||
test "EVENT ingest enforces max event bytes" do
|
||||
state = connection_state(max_event_bytes: 128)
|
||||
|
||||
large_event =
|
||||
valid_event(%{"content" => String.duplicate("x", 256)})
|
||||
|> recalculate_event_id()
|
||||
|
||||
assert {:push, {:text, response}, _next_state} =
|
||||
Connection.handle_in({JSON.encode!(["EVENT", large_event]), [opcode: :text]}, state)
|
||||
|
||||
assert JSON.decode!(response) == [
|
||||
"OK",
|
||||
large_event["id"],
|
||||
false,
|
||||
"invalid: event exceeds max event size"
|
||||
]
|
||||
end
|
||||
|
||||
test "text frame size is rejected before JSON decoding" do
|
||||
state = connection_state(max_frame_bytes: 16)
|
||||
|
||||
assert {:push, {:text, response}, _next_state} =
|
||||
Connection.handle_in({String.duplicate("x", 17), [opcode: :text]}, state)
|
||||
|
||||
assert JSON.decode!(response) == [
|
||||
"NOTICE",
|
||||
"invalid: websocket frame exceeds max frame size"
|
||||
]
|
||||
end
|
||||
|
||||
test "invalid EVENT replies with OK false invalid prefix" do
|
||||
state = connection_state()
|
||||
|
||||
event = valid_event() |> Map.put("sig", "nope")
|
||||
payload = JSON.encode!(["EVENT", event])
|
||||
|
||||
assert {:push, {:text, response}, ^state} =
|
||||
assert {:push, {:text, response}, _next_state} =
|
||||
Connection.handle_in({payload, [opcode: :text]}, state)
|
||||
|
||||
assert JSON.decode!(response) == [
|
||||
@@ -147,7 +260,7 @@ defmodule Parrhesia.Web.ConnectionTest do
|
||||
|
||||
payload = JSON.encode!(["EVENT", event])
|
||||
|
||||
assert {:push, {:text, response}, ^state} =
|
||||
assert {:push, {:text, response}, _next_state} =
|
||||
Connection.handle_in({payload, [opcode: :text]}, state)
|
||||
|
||||
assert JSON.decode!(response) == [
|
||||
@@ -170,7 +283,7 @@ defmodule Parrhesia.Web.ConnectionTest do
|
||||
|
||||
payload = JSON.encode!(["EVENT", event])
|
||||
|
||||
assert {:push, {:text, response}, ^state} =
|
||||
assert {:push, {:text, response}, _next_state} =
|
||||
Connection.handle_in({payload, [opcode: :text]}, state)
|
||||
|
||||
assert JSON.decode!(response) == [
|
||||
@@ -204,7 +317,7 @@ defmodule Parrhesia.Web.ConnectionTest do
|
||||
|
||||
payload = JSON.encode!(["EVENT", event])
|
||||
|
||||
assert {:push, {:text, response}, ^state} =
|
||||
assert {:push, {:text, response}, _next_state} =
|
||||
Connection.handle_in({payload, [opcode: :text]}, state)
|
||||
|
||||
assert JSON.decode!(response) == [
|
||||
@@ -255,7 +368,7 @@ defmodule Parrhesia.Web.ConnectionTest do
|
||||
|
||||
payload = JSON.encode!(["EVENT", event])
|
||||
|
||||
assert {:push, {:text, response}, ^state} =
|
||||
assert {:push, {:text, response}, _next_state} =
|
||||
Connection.handle_in({payload, [opcode: :text]}, state)
|
||||
|
||||
assert JSON.decode!(response) == [
|
||||
@@ -306,12 +419,12 @@ defmodule Parrhesia.Web.ConnectionTest do
|
||||
|
||||
payload = JSON.encode!(["EVENT", event])
|
||||
|
||||
assert {:push, {:text, first_response}, ^state} =
|
||||
assert {:push, {:text, first_response}, _next_state} =
|
||||
Connection.handle_in({payload, [opcode: :text]}, state)
|
||||
|
||||
assert JSON.decode!(first_response) == ["OK", event["id"], true, "ok: event stored"]
|
||||
|
||||
assert {:push, {:text, second_response}, ^state} =
|
||||
assert {:push, {:text, second_response}, _next_state} =
|
||||
Connection.handle_in({payload, [opcode: :text]}, state)
|
||||
|
||||
assert JSON.decode!(second_response) == [
|
||||
@@ -327,7 +440,7 @@ defmodule Parrhesia.Web.ConnectionTest do
|
||||
|
||||
open_payload = JSON.encode!(["NEG-OPEN", "neg-1", %{"cursor" => 0}])
|
||||
|
||||
assert {:push, {:text, open_response}, ^state} =
|
||||
assert {:push, {:text, open_response}, _next_state} =
|
||||
Connection.handle_in({open_payload, [opcode: :text]}, state)
|
||||
|
||||
assert ["NEG-MSG", "neg-1", %{"status" => "open", "cursor" => 0}] =
|
||||
@@ -335,7 +448,7 @@ defmodule Parrhesia.Web.ConnectionTest do
|
||||
|
||||
close_payload = JSON.encode!(["NEG-CLOSE", "neg-1"])
|
||||
|
||||
assert {:push, {:text, close_response}, ^state} =
|
||||
assert {:push, {:text, close_response}, _next_state} =
|
||||
Connection.handle_in({close_payload, [opcode: :text]}, state)
|
||||
|
||||
assert JSON.decode!(close_response) == ["NEG-MSG", "neg-1", %{"status" => "closed"}]
|
||||
@@ -470,14 +583,15 @@ defmodule Parrhesia.Web.ConnectionTest do
|
||||
}
|
||||
end
|
||||
|
||||
defp valid_auth_event(challenge) do
|
||||
now = System.system_time(:second)
|
||||
defp valid_auth_event(challenge, opts \\ []) do
|
||||
now = Keyword.get(opts, :created_at, System.system_time(:second))
|
||||
relay_url = Keyword.get(opts, :relay_url, Parrhesia.Config.get([:relay_url]))
|
||||
|
||||
base = %{
|
||||
"pubkey" => String.duplicate("9", 64),
|
||||
"created_at" => now,
|
||||
"kind" => 22_242,
|
||||
"tags" => [["challenge", challenge]],
|
||||
"tags" => [["challenge", challenge], ["relay", relay_url]],
|
||||
"content" => "",
|
||||
"sig" => String.duplicate("8", 128)
|
||||
}
|
||||
@@ -510,7 +624,7 @@ defmodule Parrhesia.Web.ConnectionTest do
|
||||
end
|
||||
end
|
||||
|
||||
defp valid_event do
|
||||
defp valid_event(overrides \\ %{}) do
|
||||
base_event = %{
|
||||
"pubkey" => String.duplicate("1", 64),
|
||||
"created_at" => System.system_time(:second),
|
||||
@@ -520,6 +634,12 @@ defmodule Parrhesia.Web.ConnectionTest do
|
||||
"sig" => String.duplicate("3", 128)
|
||||
}
|
||||
|
||||
Map.put(base_event, "id", EventValidator.compute_id(base_event))
|
||||
base_event
|
||||
|> Map.merge(overrides)
|
||||
|> recalculate_event_id()
|
||||
end
|
||||
|
||||
defp recalculate_event_id(event) do
|
||||
Map.put(event, "id", EventValidator.compute_id(event))
|
||||
end
|
||||
end
|
||||
|
||||
@@ -43,13 +43,68 @@ defmodule Parrhesia.Web.RouterTest do
|
||||
assert 11 in body["supported_nips"]
|
||||
end
|
||||
|
||||
test "GET /metrics returns prometheus payload" do
|
||||
test "GET /metrics returns prometheus payload for private-network clients" do
|
||||
conn = conn(:get, "/metrics") |> Router.call([])
|
||||
|
||||
assert conn.status == 200
|
||||
assert get_resp_header(conn, "content-type") == ["text/plain; charset=utf-8"]
|
||||
end
|
||||
|
||||
test "GET /metrics denies public-network clients by default" do
|
||||
conn = conn(:get, "/metrics")
|
||||
conn = %{conn | remote_ip: {8, 8, 8, 8}}
|
||||
conn = Router.call(conn, [])
|
||||
|
||||
assert conn.status == 403
|
||||
assert conn.resp_body == "forbidden"
|
||||
end
|
||||
|
||||
test "GET /metrics can be disabled on the main endpoint" do
|
||||
previous_metrics = Application.get_env(:parrhesia, :metrics, [])
|
||||
|
||||
Application.put_env(
|
||||
:parrhesia,
|
||||
:metrics,
|
||||
Keyword.put(previous_metrics, :enabled_on_main_endpoint, false)
|
||||
)
|
||||
|
||||
on_exit(fn ->
|
||||
Application.put_env(:parrhesia, :metrics, previous_metrics)
|
||||
end)
|
||||
|
||||
conn = conn(:get, "/metrics") |> Router.call([])
|
||||
|
||||
assert conn.status == 404
|
||||
assert conn.resp_body == "not found"
|
||||
end
|
||||
|
||||
test "GET /metrics accepts bearer auth when configured" do
|
||||
previous_metrics = Application.get_env(:parrhesia, :metrics, [])
|
||||
|
||||
Application.put_env(
|
||||
:parrhesia,
|
||||
:metrics,
|
||||
previous_metrics
|
||||
|> Keyword.put(:private_networks_only, false)
|
||||
|> Keyword.put(:auth_token, "secret-token")
|
||||
)
|
||||
|
||||
on_exit(fn ->
|
||||
Application.put_env(:parrhesia, :metrics, previous_metrics)
|
||||
end)
|
||||
|
||||
denied_conn = conn(:get, "/metrics") |> Router.call([])
|
||||
|
||||
assert denied_conn.status == 403
|
||||
|
||||
allowed_conn =
|
||||
conn(:get, "/metrics")
|
||||
|> put_req_header("authorization", "Bearer secret-token")
|
||||
|> Router.call([])
|
||||
|
||||
assert allowed_conn.status == 200
|
||||
end
|
||||
|
||||
test "POST /management requires authorization" do
|
||||
conn =
|
||||
conn(:post, "/management", JSON.encode!(%{"method" => "ping", "params" => %{}}))
|
||||
|
||||
Reference in New Issue
Block a user