Compare commits
8 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| b86b5db78c | |||
| 5577445e80 | |||
| 1a4572013d | |||
| 5c2fadc28e | |||
| 7faf8c84c8 | |||
| 889d630c12 | |||
| 19664ac56c | |||
| 708e26e4f4 |
20
.env.example
Normal file
20
.env.example
Normal file
@@ -0,0 +1,20 @@
|
||||
PARRHESIA_IMAGE=parrhesia:latest
|
||||
PARRHESIA_HOST_PORT=4000
|
||||
|
||||
POSTGRES_DB=parrhesia
|
||||
POSTGRES_USER=parrhesia
|
||||
POSTGRES_PASSWORD=parrhesia
|
||||
|
||||
DATABASE_URL=ecto://parrhesia:parrhesia@db:5432/parrhesia
|
||||
POOL_SIZE=20
|
||||
|
||||
# Optional runtime overrides:
|
||||
# PARRHESIA_RELAY_URL=ws://localhost:4000/relay
|
||||
# PARRHESIA_POLICIES_AUTH_REQUIRED_FOR_WRITES=false
|
||||
# PARRHESIA_POLICIES_AUTH_REQUIRED_FOR_READS=false
|
||||
# PARRHESIA_POLICIES_MIN_POW_DIFFICULTY=0
|
||||
# PARRHESIA_FEATURES_VERIFY_EVENT_SIGNATURES=true
|
||||
# PARRHESIA_METRICS_ENABLED_ON_MAIN_ENDPOINT=true
|
||||
# PARRHESIA_METRICS_PRIVATE_NETWORKS_ONLY=true
|
||||
# PARRHESIA_METRICS_AUTH_TOKEN=
|
||||
# PARRHESIA_EXTRA_CONFIG=/config/parrhesia.runtime.exs
|
||||
117
.github/workflows/ci.yaml
vendored
Normal file
117
.github/workflows/ci.yaml
vendored
Normal file
@@ -0,0 +1,117 @@
|
||||
name: CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: ["**"]
|
||||
pull_request:
|
||||
branches: ["**"]
|
||||
|
||||
env:
|
||||
MIX_ENV: test
|
||||
MIX_OS_DEPS_COMPILE_PARTITION_COUNT: 8
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
test:
|
||||
name: ${{ matrix.name }}
|
||||
runs-on: ubuntu-24.04
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- name: Test (OTP 27.2 / Elixir 1.18.2)
|
||||
otp: "27.2"
|
||||
elixir: "1.18.2"
|
||||
main: false
|
||||
- name: Test (OTP 28.4 / Elixir 1.19.4 + Marmot E2E)
|
||||
otp: "28.4"
|
||||
elixir: "1.19.4"
|
||||
main: true
|
||||
|
||||
services:
|
||||
postgres:
|
||||
image: postgres:16-alpine
|
||||
env:
|
||||
POSTGRES_USER: postgres
|
||||
POSTGRES_PASSWORD: postgres
|
||||
POSTGRES_DB: app_test
|
||||
ports:
|
||||
- 5432:5432
|
||||
options: >-
|
||||
--health-cmd "pg_isready -U postgres"
|
||||
--health-interval 10s
|
||||
--health-timeout 5s
|
||||
--health-retries 5
|
||||
|
||||
env:
|
||||
PGHOST: localhost
|
||||
PGPORT: 5432
|
||||
PGUSER: postgres
|
||||
PGPASSWORD: postgres
|
||||
PGDATABASE: app_test
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: recursive
|
||||
|
||||
- name: Set up Elixir + OTP
|
||||
uses: erlef/setup-beam@v1
|
||||
with:
|
||||
otp-version: ${{ matrix.otp }}
|
||||
elixir-version: ${{ matrix.elixir }}
|
||||
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 24
|
||||
|
||||
# Cache deps/ directory — keyed on mix.lock
|
||||
- name: Cache Mix deps
|
||||
uses: actions/cache@v4
|
||||
id: deps-cache
|
||||
with:
|
||||
path: deps
|
||||
key: ${{ runner.os }}-mix-deps-${{ hashFiles('mix.lock') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-mix-deps-
|
||||
|
||||
# Cache _build/ — keyed on mix.lock + OTP/Elixir versions
|
||||
- name: Cache _build
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: _build
|
||||
key: ${{ runner.os }}-mix-build-${{ matrix.otp }}-${{ matrix.elixir }}-${{ hashFiles('mix.lock') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-mix-build-${{ matrix.otp }}-${{ matrix.elixir }}-
|
||||
|
||||
- name: Install Mix dependencies
|
||||
if: steps.deps-cache.outputs.cache-hit != 'true'
|
||||
run: mix deps.get
|
||||
|
||||
- name: Compile (warnings as errors)
|
||||
if: ${{ matrix.main }}
|
||||
run: mix compile --warnings-as-errors
|
||||
|
||||
- name: Check formatting
|
||||
if: ${{ matrix.main }}
|
||||
run: mix format --check-formatted
|
||||
|
||||
- name: Credo
|
||||
if: ${{ matrix.main }}
|
||||
run: mix credo --strict --all
|
||||
|
||||
- name: Check for unused locked deps
|
||||
if: ${{ matrix.main }}
|
||||
run: |
|
||||
mix deps.unlock --unused
|
||||
git diff --exit-code -- mix.lock
|
||||
|
||||
- name: Run tests
|
||||
run: mix test --color
|
||||
|
||||
- name: Run Marmot E2E tests
|
||||
run: mix test.marmot_e2e
|
||||
171
.github/workflows/release.yaml
vendored
Normal file
171
.github/workflows/release.yaml
vendored
Normal file
@@ -0,0 +1,171 @@
|
||||
name: Release
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- "v*.*.*"
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
push:
|
||||
description: "Push image to GHCR?"
|
||||
required: false
|
||||
default: "true"
|
||||
type: choice
|
||||
options: ["true", "false"]
|
||||
|
||||
env:
|
||||
REGISTRY: ghcr.io
|
||||
IMAGE_NAME: ${{ github.repository }}
|
||||
FLAKE_OUTPUT: packages.x86_64-linux.dockerImage
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
id-token: write
|
||||
|
||||
jobs:
|
||||
test:
|
||||
name: Release Gate
|
||||
runs-on: ubuntu-24.04
|
||||
|
||||
services:
|
||||
postgres:
|
||||
image: postgres:16-alpine
|
||||
env:
|
||||
POSTGRES_USER: postgres
|
||||
POSTGRES_PASSWORD: postgres
|
||||
POSTGRES_DB: app_test
|
||||
ports:
|
||||
- 5432:5432
|
||||
options: >-
|
||||
--health-cmd "pg_isready -U postgres"
|
||||
--health-interval 10s
|
||||
--health-timeout 5s
|
||||
--health-retries 5
|
||||
|
||||
env:
|
||||
MIX_ENV: test
|
||||
PGHOST: localhost
|
||||
PGPORT: 5432
|
||||
PGUSER: postgres
|
||||
PGPASSWORD: postgres
|
||||
PGDATABASE: app_test
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: recursive
|
||||
|
||||
- name: Set up Elixir + OTP
|
||||
uses: erlef/setup-beam@v1
|
||||
with:
|
||||
otp-version: "28.4"
|
||||
elixir-version: "1.19.4"
|
||||
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 24
|
||||
|
||||
- name: Cache Mix deps
|
||||
uses: actions/cache@v4
|
||||
id: deps-cache
|
||||
with:
|
||||
path: deps
|
||||
key: ${{ runner.os }}-mix-deps-${{ hashFiles('mix.lock') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-mix-deps-
|
||||
|
||||
- name: Cache _build
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: _build
|
||||
key: ${{ runner.os }}-mix-build-28.4-1.19.4-${{ hashFiles('mix.lock') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-mix-build-28.4-1.19.4-
|
||||
|
||||
- name: Install Mix dependencies
|
||||
if: steps.deps-cache.outputs.cache-hit != 'true'
|
||||
run: mix deps.get
|
||||
|
||||
- name: Compile
|
||||
run: mix compile --warnings-as-errors
|
||||
|
||||
- name: Check formatting
|
||||
run: mix format --check-formatted
|
||||
|
||||
- name: Credo
|
||||
run: mix credo --strict --all
|
||||
|
||||
- name: Run tests
|
||||
run: mix test --color
|
||||
|
||||
- name: Run Marmot E2E
|
||||
run: mix test.marmot_e2e
|
||||
|
||||
- name: Check for unused locked deps
|
||||
run: |
|
||||
mix deps.unlock --unused
|
||||
git diff --exit-code -- mix.lock
|
||||
|
||||
build-and-push:
|
||||
name: Build and publish image
|
||||
runs-on: ubuntu-24.04
|
||||
needs: test
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Install Nix
|
||||
uses: DeterminateSystems/nix-installer-action@main
|
||||
with:
|
||||
extra-conf: |
|
||||
experimental-features = nix-command flakes
|
||||
substituters = https://cache.nixos.org
|
||||
trusted-public-keys = cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY=
|
||||
|
||||
- name: Magic Nix Cache
|
||||
uses: DeterminateSystems/magic-nix-cache-action@main
|
||||
|
||||
- name: Extract image metadata
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
||||
tags: |
|
||||
type=semver,pattern={{version}}
|
||||
type=semver,pattern={{major}}.{{minor}}
|
||||
type=raw,value=latest,enable={{is_default_branch}}
|
||||
type=raw,value=latest,enable=${{ startsWith(github.ref, 'refs/tags/v') }}
|
||||
type=sha,prefix=sha-,format=short
|
||||
|
||||
- name: Build Docker image with Nix
|
||||
id: build
|
||||
run: |
|
||||
nix build .#${{ env.FLAKE_OUTPUT }} --out-link ./docker-image-result
|
||||
echo "archive_path=$(readlink -f ./docker-image-result)" >> "$GITHUB_OUTPUT"
|
||||
|
||||
- name: Push image to GHCR
|
||||
env:
|
||||
TAGS: ${{ steps.meta.outputs.tags }}
|
||||
SHOULD_PUSH: ${{ github.event.inputs.push != 'false' }}
|
||||
ARCHIVE_PATH: ${{ steps.build.outputs.archive_path }}
|
||||
run: |
|
||||
if [ "$SHOULD_PUSH" != "true" ]; then
|
||||
echo "Skipping push"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
IMAGE_ARCHIVE="docker-archive:${ARCHIVE_PATH}"
|
||||
|
||||
while IFS= read -r TAG; do
|
||||
if [ -n "$TAG" ]; then
|
||||
echo "Pushing $TAG"
|
||||
nix run nixpkgs#skopeo -- copy \
|
||||
--dest-creds "${{ github.actor }}:${{ secrets.GITHUB_TOKEN }}" \
|
||||
"$IMAGE_ARCHIVE" \
|
||||
"docker://$TAG"
|
||||
fi
|
||||
done <<< "$TAGS"
|
||||
22
BENCHMARK.md
22
BENCHMARK.md
@@ -1,6 +1,6 @@
|
||||
Running 2 comparison run(s)...
|
||||
Versions:
|
||||
parrhesia 0.3.0
|
||||
parrhesia 0.4.0
|
||||
strfry 1.0.4 (nixpkgs)
|
||||
nostr-rs-relay 0.9.0
|
||||
nostr-bench 0.4.0
|
||||
@@ -16,18 +16,18 @@ Versions:
|
||||
=== Bench comparison (averages) ===
|
||||
metric parrhesia strfry nostr-rs-relay strfry/parrhesia nostr-rs/parrhesia
|
||||
-------------------------- --------- -------- -------------- ---------------- ------------------
|
||||
connect avg latency (ms) ↓ 13.50 3.00 2.00 0.22x 0.15x
|
||||
connect max latency (ms) ↓ 22.50 5.50 3.00 0.24x 0.13x
|
||||
echo throughput (TPS) ↑ 80385.00 61673.00 164516.00 0.77x 2.05x
|
||||
echo throughput (MiB/s) ↑ 44.00 34.45 90.10 0.78x 2.05x
|
||||
event throughput (TPS) ↑ 2000.00 3404.50 788.00 1.70x 0.39x
|
||||
event throughput (MiB/s) ↑ 1.30 2.20 0.50 1.69x 0.38x
|
||||
req throughput (TPS) ↑ 3664.00 1808.50 877.50 0.49x 0.24x
|
||||
req throughput (MiB/s) ↑ 20.75 11.75 2.45 0.57x 0.12x
|
||||
connect avg latency (ms) ↓ 10.50 4.00 3.00 0.38x 0.29x
|
||||
connect max latency (ms) ↓ 19.50 7.50 4.00 0.38x 0.21x
|
||||
echo throughput (TPS) ↑ 78520.00 60353.00 164420.50 0.77x 2.09x
|
||||
echo throughput (MiB/s) ↑ 43.00 33.75 90.05 0.78x 2.09x
|
||||
event throughput (TPS) ↑ 1919.50 3520.50 781.00 1.83x 0.41x
|
||||
event throughput (MiB/s) ↑ 1.25 2.25 0.50 1.80x 0.40x
|
||||
req throughput (TPS) ↑ 4608.50 1809.50 875.50 0.39x 0.19x
|
||||
req throughput (MiB/s) ↑ 26.20 11.75 2.40 0.45x 0.09x
|
||||
|
||||
Legend: ↑ higher is better, ↓ lower is better.
|
||||
Ratio columns are server/parrhesia (for ↓ metrics, <1.00x means that server is faster).
|
||||
|
||||
Run details:
|
||||
run 1: parrhesia(echo_tps=81402, event_tps=1979, req_tps=3639, connect_avg_ms=14) | strfry(echo_tps=61745, event_tps=3457, req_tps=1818, connect_avg_ms=3) | nostr-rs-relay(echo_tps=159974, event_tps=784, req_tps=905, connect_avg_ms=2)
|
||||
run 2: parrhesia(echo_tps=79368, event_tps=2021, req_tps=3689, connect_avg_ms=13) | strfry(echo_tps=61601, event_tps=3352, req_tps=1799, connect_avg_ms=3) | nostr-rs-relay(echo_tps=169058, event_tps=792, req_tps=850, connect_avg_ms=2)
|
||||
run 1: parrhesia(echo_tps=78892, event_tps=1955, req_tps=4671, connect_avg_ms=10) | strfry(echo_tps=59132, event_tps=3462, req_tps=1806, connect_avg_ms=4) | nostr-rs-relay(echo_tps=159714, event_tps=785, req_tps=873, connect_avg_ms=3)
|
||||
run 2: parrhesia(echo_tps=78148, event_tps=1884, req_tps=4546, connect_avg_ms=11) | strfry(echo_tps=61574, event_tps=3579, req_tps=1813, connect_avg_ms=4) | nostr-rs-relay(echo_tps=169127, event_tps=777, req_tps=878, connect_avg_ms=3)
|
||||
|
||||
350
README.md
350
README.md
@@ -1,5 +1,7 @@
|
||||
# Parrhesia
|
||||
|
||||
<img alt="Parrhesia Logo" src="./docs/logo.svg" width="150" align="right">
|
||||
|
||||
Parrhesia is a Nostr relay server written in Elixir/OTP with PostgreSQL storage.
|
||||
|
||||
It exposes:
|
||||
@@ -20,6 +22,7 @@ Current `supported_nips` list:
|
||||
- Elixir `~> 1.19`
|
||||
- Erlang/OTP 28
|
||||
- PostgreSQL (18 used in the dev environment; 16+ recommended)
|
||||
- Docker or Podman plus Docker Compose support if you want to run the published container image
|
||||
|
||||
---
|
||||
|
||||
@@ -45,12 +48,12 @@ mix setup
|
||||
mix run --no-halt
|
||||
```
|
||||
|
||||
Server listens on `http://localhost:4000` by default.
|
||||
Server listens on `http://localhost:4413` by default.
|
||||
|
||||
WebSocket clients should connect to:
|
||||
|
||||
```text
|
||||
ws://localhost:4000/relay
|
||||
ws://localhost:4413/relay
|
||||
```
|
||||
|
||||
### Useful endpoints
|
||||
@@ -65,78 +68,189 @@ ws://localhost:4000/relay
|
||||
|
||||
## Production configuration
|
||||
|
||||
### Minimal setup
|
||||
|
||||
Before a Nostr client can publish its first event successfully, make sure these pieces are in place:
|
||||
|
||||
1. PostgreSQL is reachable from Parrhesia.
|
||||
Set `DATABASE_URL` and create/migrate the database with `Parrhesia.Release.migrate()` or `mix ecto.migrate`.
|
||||
|
||||
2. Parrhesia is reachable behind your reverse proxy.
|
||||
Parrhesia itself listens on plain HTTP on port `4413`, and the reverse proxy is expected to terminate TLS and forward WebSocket traffic to `/relay`.
|
||||
|
||||
3. `:relay_url` matches the public relay URL clients should use.
|
||||
Set `PARRHESIA_RELAY_URL` to the public relay URL exposed by the reverse proxy.
|
||||
In the normal deployment model, this should be your public `wss://.../relay` URL.
|
||||
|
||||
4. The database schema is migrated before starting normal traffic.
|
||||
The app image does not auto-run migrations on boot.
|
||||
|
||||
That is the actual minimum. With default policy settings, writes do not require auth, event signatures are verified, and no extra Nostr-specific bootstrap step is needed before posting ordinary events.
|
||||
|
||||
In `prod`, these environment variables are used:
|
||||
|
||||
- `DATABASE_URL` (**required**), e.g. `ecto://USER:PASS@HOST/parrhesia_prod`
|
||||
- `POOL_SIZE` (optional, default `10`)
|
||||
- `PORT` (optional, default `4000`)
|
||||
- `POOL_SIZE` (optional, default `32`)
|
||||
- `PORT` (optional, default `4413`)
|
||||
- `PARRHESIA_*` runtime overrides for relay config, limits, policies, metrics, and features
|
||||
- `PARRHESIA_EXTRA_CONFIG` (optional path to an extra runtime config file)
|
||||
|
||||
`config/runtime.exs` reads these values at runtime in production releases.
|
||||
|
||||
### Typical relay config
|
||||
### Runtime env naming
|
||||
|
||||
Add/override in config files (for example in `config/prod.exs` or a `config/runtime.exs`):
|
||||
For runtime overrides, use the `PARRHESIA_...` prefix:
|
||||
|
||||
```elixir
|
||||
config :parrhesia, Parrhesia.Web.Endpoint,
|
||||
ip: {0, 0, 0, 0},
|
||||
port: 4000
|
||||
- `PARRHESIA_RELAY_URL`
|
||||
- `PARRHESIA_MODERATION_CACHE_ENABLED`
|
||||
- `PARRHESIA_ENABLE_EXPIRATION_WORKER`
|
||||
- `PARRHESIA_LIMITS_*`
|
||||
- `PARRHESIA_POLICIES_*`
|
||||
- `PARRHESIA_METRICS_*`
|
||||
- `PARRHESIA_RETENTION_*`
|
||||
- `PARRHESIA_FEATURES_*`
|
||||
- `PARRHESIA_METRICS_ENDPOINT_*`
|
||||
|
||||
# Optional dedicated metrics listener (keep this internal)
|
||||
config :parrhesia, Parrhesia.Web.MetricsEndpoint,
|
||||
enabled: true,
|
||||
ip: {127, 0, 0, 1},
|
||||
port: 9568
|
||||
Examples:
|
||||
|
||||
config :parrhesia,
|
||||
metrics: [
|
||||
enabled_on_main_endpoint: false,
|
||||
public: false,
|
||||
private_networks_only: true,
|
||||
allowed_cidrs: [],
|
||||
auth_token: nil
|
||||
],
|
||||
limits: [
|
||||
max_frame_bytes: 1_048_576,
|
||||
max_event_bytes: 262_144,
|
||||
max_filters_per_req: 16,
|
||||
max_filter_limit: 500,
|
||||
max_subscriptions_per_connection: 32,
|
||||
max_event_future_skew_seconds: 900,
|
||||
max_outbound_queue: 256,
|
||||
outbound_drain_batch_size: 64,
|
||||
outbound_overflow_strategy: :close
|
||||
],
|
||||
policies: [
|
||||
auth_required_for_writes: false,
|
||||
auth_required_for_reads: false,
|
||||
min_pow_difficulty: 0,
|
||||
accept_ephemeral_events: true,
|
||||
mls_group_event_ttl_seconds: 300,
|
||||
marmot_require_h_for_group_queries: true,
|
||||
marmot_group_max_h_values_per_filter: 32,
|
||||
marmot_group_max_query_window_seconds: 2_592_000,
|
||||
marmot_media_max_imeta_tags_per_event: 8,
|
||||
marmot_media_max_field_value_bytes: 1024,
|
||||
marmot_media_max_url_bytes: 2048,
|
||||
marmot_media_allowed_mime_prefixes: [],
|
||||
marmot_media_reject_mip04_v1: true,
|
||||
marmot_push_server_pubkeys: [],
|
||||
marmot_push_max_relay_tags: 16,
|
||||
marmot_push_max_payload_bytes: 65_536,
|
||||
marmot_push_max_trigger_age_seconds: 120,
|
||||
marmot_push_require_expiration: true,
|
||||
marmot_push_max_expiration_window_seconds: 120,
|
||||
marmot_push_max_server_recipients: 1
|
||||
],
|
||||
features: [
|
||||
nip_45_count: true,
|
||||
nip_50_search: true,
|
||||
nip_77_negentropy: true,
|
||||
marmot_push_notifications: false
|
||||
]
|
||||
```bash
|
||||
export PARRHESIA_POLICIES_AUTH_REQUIRED_FOR_WRITES=true
|
||||
export PARRHESIA_FEATURES_VERIFY_EVENT_SIGNATURES=true
|
||||
export PARRHESIA_METRICS_ALLOWED_CIDRS="10.0.0.0/8,192.168.0.0/16"
|
||||
export PARRHESIA_LIMITS_OUTBOUND_OVERFLOW_STRATEGY=drop_oldest
|
||||
```
|
||||
|
||||
For settings that are awkward to express as env vars, mount an extra config file and set `PARRHESIA_EXTRA_CONFIG` to its path inside the container.
|
||||
|
||||
### Config reference
|
||||
|
||||
CSV env vars use comma-separated values. Boolean env vars accept `1/0`, `true/false`, `yes/no`, or `on/off`.
|
||||
|
||||
#### Top-level `:parrhesia`
|
||||
|
||||
| Atom key | ENV | Default | Notes |
|
||||
| --- | --- | --- | --- |
|
||||
| `:relay_url` | `PARRHESIA_RELAY_URL` | `ws://localhost:4413/relay` | Advertised relay URL and auth relay tag target |
|
||||
| `:moderation_cache_enabled` | `PARRHESIA_MODERATION_CACHE_ENABLED` | `true` | Toggle moderation cache |
|
||||
| `:enable_expiration_worker` | `PARRHESIA_ENABLE_EXPIRATION_WORKER` | `true` | Toggle background expiration worker |
|
||||
| `:limits` | `PARRHESIA_LIMITS_*` | see table below | Runtime override group |
|
||||
| `:policies` | `PARRHESIA_POLICIES_*` | see table below | Runtime override group |
|
||||
| `:metrics` | `PARRHESIA_METRICS_*` | see table below | Runtime override group |
|
||||
| `:retention` | `PARRHESIA_RETENTION_*` | see table below | Partition lifecycle and pruning policy |
|
||||
| `:features` | `PARRHESIA_FEATURES_*` | see table below | Runtime override group |
|
||||
| `:storage.events` | `-` | `Parrhesia.Storage.Adapters.Postgres.Events` | Config-file override only |
|
||||
| `:storage.moderation` | `-` | `Parrhesia.Storage.Adapters.Postgres.Moderation` | Config-file override only |
|
||||
| `:storage.groups` | `-` | `Parrhesia.Storage.Adapters.Postgres.Groups` | Config-file override only |
|
||||
| `:storage.admin` | `-` | `Parrhesia.Storage.Adapters.Postgres.Admin` | Config-file override only |
|
||||
|
||||
#### `Parrhesia.Repo`
|
||||
|
||||
| Atom key | ENV | Default | Notes |
|
||||
| --- | --- | --- | --- |
|
||||
| `:url` | `DATABASE_URL` | required | Example: `ecto://USER:PASS@HOST/DATABASE` |
|
||||
| `:pool_size` | `POOL_SIZE` | `32` | DB connection pool size |
|
||||
| `:queue_target` | `DB_QUEUE_TARGET_MS` | `1000` | Ecto queue target in ms |
|
||||
| `:queue_interval` | `DB_QUEUE_INTERVAL_MS` | `5000` | Ecto queue interval in ms |
|
||||
| `:types` | `-` | `Parrhesia.PostgresTypes` | Internal config-file setting |
|
||||
|
||||
#### `Parrhesia.Web.Endpoint`
|
||||
|
||||
| Atom key | ENV | Default | Notes |
|
||||
| --- | --- | --- | --- |
|
||||
| `:port` | `PORT` | `4413` | Main HTTP/WebSocket listener |
|
||||
|
||||
#### `Parrhesia.Web.MetricsEndpoint`
|
||||
|
||||
| Atom key | ENV | Default | Notes |
|
||||
| --- | --- | --- | --- |
|
||||
| `:enabled` | `PARRHESIA_METRICS_ENDPOINT_ENABLED` | `false` | Enables dedicated metrics listener |
|
||||
| `:ip` | `PARRHESIA_METRICS_ENDPOINT_IP` | `127.0.0.1` | IPv4 only |
|
||||
| `:port` | `PARRHESIA_METRICS_ENDPOINT_PORT` | `9568` | Dedicated metrics port |
|
||||
|
||||
#### `:limits`
|
||||
|
||||
| Atom key | ENV | Default |
|
||||
| --- | --- | --- |
|
||||
| `:max_frame_bytes` | `PARRHESIA_LIMITS_MAX_FRAME_BYTES` | `1048576` |
|
||||
| `:max_event_bytes` | `PARRHESIA_LIMITS_MAX_EVENT_BYTES` | `262144` |
|
||||
| `:max_filters_per_req` | `PARRHESIA_LIMITS_MAX_FILTERS_PER_REQ` | `16` |
|
||||
| `:max_filter_limit` | `PARRHESIA_LIMITS_MAX_FILTER_LIMIT` | `500` |
|
||||
| `:max_subscriptions_per_connection` | `PARRHESIA_LIMITS_MAX_SUBSCRIPTIONS_PER_CONNECTION` | `32` |
|
||||
| `:max_event_future_skew_seconds` | `PARRHESIA_LIMITS_MAX_EVENT_FUTURE_SKEW_SECONDS` | `900` |
|
||||
| `:max_event_ingest_per_window` | `PARRHESIA_LIMITS_MAX_EVENT_INGEST_PER_WINDOW` | `120` |
|
||||
| `:event_ingest_window_seconds` | `PARRHESIA_LIMITS_EVENT_INGEST_WINDOW_SECONDS` | `1` |
|
||||
| `:auth_max_age_seconds` | `PARRHESIA_LIMITS_AUTH_MAX_AGE_SECONDS` | `600` |
|
||||
| `:max_outbound_queue` | `PARRHESIA_LIMITS_MAX_OUTBOUND_QUEUE` | `256` |
|
||||
| `:outbound_drain_batch_size` | `PARRHESIA_LIMITS_OUTBOUND_DRAIN_BATCH_SIZE` | `64` |
|
||||
| `:outbound_overflow_strategy` | `PARRHESIA_LIMITS_OUTBOUND_OVERFLOW_STRATEGY` | `:close` |
|
||||
| `:max_negentropy_payload_bytes` | `PARRHESIA_LIMITS_MAX_NEGENTROPY_PAYLOAD_BYTES` | `4096` |
|
||||
| `:max_negentropy_sessions_per_connection` | `PARRHESIA_LIMITS_MAX_NEGENTROPY_SESSIONS_PER_CONNECTION` | `8` |
|
||||
| `:max_negentropy_total_sessions` | `PARRHESIA_LIMITS_MAX_NEGENTROPY_TOTAL_SESSIONS` | `10000` |
|
||||
| `:negentropy_session_idle_timeout_seconds` | `PARRHESIA_LIMITS_NEGENTROPY_SESSION_IDLE_TIMEOUT_SECONDS` | `60` |
|
||||
| `:negentropy_session_sweep_interval_seconds` | `PARRHESIA_LIMITS_NEGENTROPY_SESSION_SWEEP_INTERVAL_SECONDS` | `10` |
|
||||
|
||||
#### `:policies`
|
||||
|
||||
| Atom key | ENV | Default |
|
||||
| --- | --- | --- |
|
||||
| `:auth_required_for_writes` | `PARRHESIA_POLICIES_AUTH_REQUIRED_FOR_WRITES` | `false` |
|
||||
| `:auth_required_for_reads` | `PARRHESIA_POLICIES_AUTH_REQUIRED_FOR_READS` | `false` |
|
||||
| `:min_pow_difficulty` | `PARRHESIA_POLICIES_MIN_POW_DIFFICULTY` | `0` |
|
||||
| `:accept_ephemeral_events` | `PARRHESIA_POLICIES_ACCEPT_EPHEMERAL_EVENTS` | `true` |
|
||||
| `:mls_group_event_ttl_seconds` | `PARRHESIA_POLICIES_MLS_GROUP_EVENT_TTL_SECONDS` | `300` |
|
||||
| `:marmot_require_h_for_group_queries` | `PARRHESIA_POLICIES_MARMOT_REQUIRE_H_FOR_GROUP_QUERIES` | `true` |
|
||||
| `:marmot_group_max_h_values_per_filter` | `PARRHESIA_POLICIES_MARMOT_GROUP_MAX_H_VALUES_PER_FILTER` | `32` |
|
||||
| `:marmot_group_max_query_window_seconds` | `PARRHESIA_POLICIES_MARMOT_GROUP_MAX_QUERY_WINDOW_SECONDS` | `2592000` |
|
||||
| `:marmot_media_max_imeta_tags_per_event` | `PARRHESIA_POLICIES_MARMOT_MEDIA_MAX_IMETA_TAGS_PER_EVENT` | `8` |
|
||||
| `:marmot_media_max_field_value_bytes` | `PARRHESIA_POLICIES_MARMOT_MEDIA_MAX_FIELD_VALUE_BYTES` | `1024` |
|
||||
| `:marmot_media_max_url_bytes` | `PARRHESIA_POLICIES_MARMOT_MEDIA_MAX_URL_BYTES` | `2048` |
|
||||
| `:marmot_media_allowed_mime_prefixes` | `PARRHESIA_POLICIES_MARMOT_MEDIA_ALLOWED_MIME_PREFIXES` | `[]` |
|
||||
| `:marmot_media_reject_mip04_v1` | `PARRHESIA_POLICIES_MARMOT_MEDIA_REJECT_MIP04_V1` | `true` |
|
||||
| `:marmot_push_server_pubkeys` | `PARRHESIA_POLICIES_MARMOT_PUSH_SERVER_PUBKEYS` | `[]` |
|
||||
| `:marmot_push_max_relay_tags` | `PARRHESIA_POLICIES_MARMOT_PUSH_MAX_RELAY_TAGS` | `16` |
|
||||
| `:marmot_push_max_payload_bytes` | `PARRHESIA_POLICIES_MARMOT_PUSH_MAX_PAYLOAD_BYTES` | `65536` |
|
||||
| `:marmot_push_max_trigger_age_seconds` | `PARRHESIA_POLICIES_MARMOT_PUSH_MAX_TRIGGER_AGE_SECONDS` | `120` |
|
||||
| `:marmot_push_require_expiration` | `PARRHESIA_POLICIES_MARMOT_PUSH_REQUIRE_EXPIRATION` | `true` |
|
||||
| `:marmot_push_max_expiration_window_seconds` | `PARRHESIA_POLICIES_MARMOT_PUSH_MAX_EXPIRATION_WINDOW_SECONDS` | `120` |
|
||||
| `:marmot_push_max_server_recipients` | `PARRHESIA_POLICIES_MARMOT_PUSH_MAX_SERVER_RECIPIENTS` | `1` |
|
||||
| `:management_auth_required` | `PARRHESIA_POLICIES_MANAGEMENT_AUTH_REQUIRED` | `true` |
|
||||
|
||||
#### `:metrics`
|
||||
|
||||
| Atom key | ENV | Default |
|
||||
| --- | --- | --- |
|
||||
| `:enabled_on_main_endpoint` | `PARRHESIA_METRICS_ENABLED_ON_MAIN_ENDPOINT` | `true` |
|
||||
| `:public` | `PARRHESIA_METRICS_PUBLIC` | `false` |
|
||||
| `:private_networks_only` | `PARRHESIA_METRICS_PRIVATE_NETWORKS_ONLY` | `true` |
|
||||
| `:allowed_cidrs` | `PARRHESIA_METRICS_ALLOWED_CIDRS` | `[]` |
|
||||
| `:auth_token` | `PARRHESIA_METRICS_AUTH_TOKEN` | `nil` |
|
||||
|
||||
#### `:retention`
|
||||
|
||||
| Atom key | ENV | Default | Notes |
|
||||
| --- | --- | --- | --- |
|
||||
| `:check_interval_hours` | `PARRHESIA_RETENTION_CHECK_INTERVAL_HOURS` | `24` | Partition maintenance + pruning cadence |
|
||||
| `:months_ahead` | `PARRHESIA_RETENTION_MONTHS_AHEAD` | `2` | Pre-create current month plus N future monthly partitions for `events` and `event_tags` |
|
||||
| `:max_db_bytes` | `PARRHESIA_RETENTION_MAX_DB_BYTES` | `:infinity` | Interpreted as GiB threshold; accepts integer or `infinity` |
|
||||
| `:max_months_to_keep` | `PARRHESIA_RETENTION_MAX_MONTHS_TO_KEEP` | `:infinity` | Keep at most N months (including current month); accepts integer or `infinity` |
|
||||
| `:max_partitions_to_drop_per_run` | `PARRHESIA_RETENTION_MAX_PARTITIONS_TO_DROP_PER_RUN` | `1` | Safety cap for each maintenance run |
|
||||
|
||||
#### `:features`
|
||||
|
||||
| Atom key | ENV | Default |
|
||||
| --- | --- | --- |
|
||||
| `:verify_event_signatures` | `PARRHESIA_FEATURES_VERIFY_EVENT_SIGNATURES` | `true` |
|
||||
| `:nip_45_count` | `PARRHESIA_FEATURES_NIP_45_COUNT` | `true` |
|
||||
| `:nip_50_search` | `PARRHESIA_FEATURES_NIP_50_SEARCH` | `true` |
|
||||
| `:nip_77_negentropy` | `PARRHESIA_FEATURES_NIP_77_NEGENTROPY` | `true` |
|
||||
| `:marmot_push_notifications` | `PARRHESIA_FEATURES_MARMOT_PUSH_NOTIFICATIONS` | `false` |
|
||||
|
||||
#### Extra runtime config
|
||||
|
||||
| Atom key | ENV | Default | Notes |
|
||||
| --- | --- | --- | --- |
|
||||
| extra runtime config file | `PARRHESIA_EXTRA_CONFIG` | unset | Imports an additional runtime `.exs` file |
|
||||
|
||||
---
|
||||
|
||||
## Deploy
|
||||
@@ -150,15 +264,15 @@ export POOL_SIZE=20
|
||||
|
||||
mix deps.get --only prod
|
||||
mix compile
|
||||
mix ecto.migrate
|
||||
mix release
|
||||
|
||||
_build/prod/rel/parrhesia/bin/parrhesia eval "Parrhesia.Release.migrate()"
|
||||
_build/prod/rel/parrhesia/bin/parrhesia foreground
|
||||
```
|
||||
|
||||
For systemd/process managers, run the release command in foreground mode.
|
||||
|
||||
### Option B: Nix package (`default.nix`)
|
||||
### Option B: Nix release package (`default.nix`)
|
||||
|
||||
Build:
|
||||
|
||||
@@ -168,6 +282,110 @@ nix-build
|
||||
|
||||
Run the built release from `./result/bin/parrhesia` (release command interface).
|
||||
|
||||
### Option C: Docker image via Nix flake
|
||||
|
||||
Build the image tarball:
|
||||
|
||||
```bash
|
||||
nix build .#dockerImage
|
||||
# or with explicit build target:
|
||||
nix build .#packages.x86_64-linux.dockerImage
|
||||
```
|
||||
|
||||
Load it into Docker:
|
||||
|
||||
```bash
|
||||
docker load < result
|
||||
```
|
||||
|
||||
Run database migrations:
|
||||
|
||||
```bash
|
||||
docker run --rm \
|
||||
-e DATABASE_URL="ecto://USER:PASS@HOST/parrhesia_prod" \
|
||||
parrhesia:latest \
|
||||
eval "Parrhesia.Release.migrate()"
|
||||
```
|
||||
|
||||
Start the relay:
|
||||
|
||||
```bash
|
||||
docker run --rm \
|
||||
-p 4413:4413 \
|
||||
-e DATABASE_URL="ecto://USER:PASS@HOST/parrhesia_prod" \
|
||||
-e POOL_SIZE=20 \
|
||||
parrhesia:latest
|
||||
```
|
||||
|
||||
### Option D: Docker Compose with PostgreSQL
|
||||
|
||||
The repo includes [`compose.yaml`](./compose.yaml) and [`.env.example`](./.env.example) so Docker users can run Postgres and Parrhesia together.
|
||||
|
||||
Set up the environment file:
|
||||
|
||||
```bash
|
||||
cp .env.example .env
|
||||
```
|
||||
|
||||
If you are building locally from source, build and load the image first:
|
||||
|
||||
```bash
|
||||
nix build .#dockerImage
|
||||
docker load < result
|
||||
```
|
||||
|
||||
Then start the stack:
|
||||
|
||||
```bash
|
||||
docker compose up -d db
|
||||
docker compose run --rm migrate
|
||||
docker compose up -d parrhesia
|
||||
```
|
||||
|
||||
The relay will be available on:
|
||||
|
||||
```text
|
||||
ws://localhost:4413/relay
|
||||
```
|
||||
|
||||
Notes:
|
||||
|
||||
- `compose.yaml` keeps PostgreSQL in a separate container; the Parrhesia image only runs the app release.
|
||||
- The container listens on port `4413`; use `PARRHESIA_HOST_PORT` if you want a different published host port.
|
||||
- Migrations are run explicitly through the one-shot `migrate` service instead of on every app boot.
|
||||
- Common runtime overrides can go straight into `.env`; see [`.env.example`](./.env.example) for examples.
|
||||
- For more specialized overrides, mount a file and set `PARRHESIA_EXTRA_CONFIG=/path/in/container/runtime.exs`.
|
||||
- When a GHCR image is published, set `PARRHESIA_IMAGE=ghcr.io/<owner>/parrhesia:<tag>` in `.env` and reuse the same compose flow.
|
||||
|
||||
---
|
||||
|
||||
## Benchmark
|
||||
|
||||
The benchmark compares Parrhesia against [`strfry`](https://github.com/hoytech/strfry) and [`nostr-rs-relay`](https://sr.ht/~gheartsfield/nostr-rs-relay/) using [`nostr-bench`](https://github.com/rnostr/nostr-bench).
|
||||
|
||||
Run it with:
|
||||
|
||||
```bash
|
||||
mix bench
|
||||
```
|
||||
|
||||
Current comparison results from [BENCHMARK.md](./BENCHMARK.md):
|
||||
|
||||
| metric | parrhesia | strfry | nostr-rs-relay | strfry/parrhesia | nostr-rs/parrhesia |
|
||||
| --- | ---: | ---: | ---: | ---: | ---: |
|
||||
| connect avg latency (ms) ↓ | 13.50 | 3.00 | 2.00 | **0.22x** | **0.15x** |
|
||||
| connect max latency (ms) ↓ | 22.50 | 5.50 | 3.00 | **0.24x** | **0.13x** |
|
||||
| echo throughput (TPS) ↑ | 80385.00 | 61673.00 | 164516.00 | 0.77x | **2.05x** |
|
||||
| echo throughput (MiB/s) ↑ | 44.00 | 34.45 | 90.10 | 0.78x | **2.05x** |
|
||||
| event throughput (TPS) ↑ | 2000.00 | 3404.50 | 788.00 | **1.70x** | 0.39x |
|
||||
| event throughput (MiB/s) ↑ | 1.30 | 2.20 | 0.50 | **1.69x** | 0.38x |
|
||||
| req throughput (TPS) ↑ | 3664.00 | 1808.50 | 877.50 | 0.49x | 0.24x |
|
||||
| req throughput (MiB/s) ↑ | 20.75 | 11.75 | 2.45 | 0.57x | 0.12x |
|
||||
|
||||
Higher is better for `↑` metrics. Lower is better for `↓` metrics.
|
||||
|
||||
(Results from a Linux container on a 6-core Intel i5-8400T with NVMe drive, PostgreSQL 18)
|
||||
|
||||
---
|
||||
|
||||
## Development quality checks
|
||||
@@ -178,13 +396,13 @@ Before opening a PR:
|
||||
mix precommit
|
||||
```
|
||||
|
||||
For external CLI end-to-end checks with `nak`:
|
||||
Additional external CLI end-to-end checks with `nak`:
|
||||
|
||||
```bash
|
||||
mix test.nak_e2e
|
||||
```
|
||||
|
||||
For Marmot client end-to-end checks (TypeScript/Node suite using `marmot-ts`):
|
||||
For Marmot client end-to-end checks (TypeScript/Node suite using `marmot-ts`, included in `precommit`):
|
||||
|
||||
```bash
|
||||
mix test.marmot_e2e
|
||||
|
||||
42
compose.yaml
Normal file
42
compose.yaml
Normal file
@@ -0,0 +1,42 @@
|
||||
services:
|
||||
db:
|
||||
image: postgres:17
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
POSTGRES_DB: ${POSTGRES_DB:-parrhesia}
|
||||
POSTGRES_USER: ${POSTGRES_USER:-parrhesia}
|
||||
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-parrhesia}
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -U $$POSTGRES_USER -d $$POSTGRES_DB"]
|
||||
interval: 5s
|
||||
timeout: 5s
|
||||
retries: 12
|
||||
volumes:
|
||||
- postgres-data:/var/lib/postgresql/data
|
||||
|
||||
migrate:
|
||||
image: ${PARRHESIA_IMAGE:-parrhesia:latest}
|
||||
profiles: ["tools"]
|
||||
restart: "no"
|
||||
depends_on:
|
||||
db:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
DATABASE_URL: ${DATABASE_URL:-ecto://parrhesia:parrhesia@db:5432/parrhesia}
|
||||
POOL_SIZE: ${POOL_SIZE:-20}
|
||||
command: ["eval", "Parrhesia.Release.migrate()"]
|
||||
|
||||
parrhesia:
|
||||
image: ${PARRHESIA_IMAGE:-parrhesia:latest}
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
db:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
DATABASE_URL: ${DATABASE_URL:-ecto://parrhesia:parrhesia@db:5432/parrhesia}
|
||||
POOL_SIZE: ${POOL_SIZE:-20}
|
||||
ports:
|
||||
- "${PARRHESIA_HOST_PORT:-4413}:4413"
|
||||
|
||||
volumes:
|
||||
postgres-data:
|
||||
@@ -4,7 +4,7 @@ config :postgrex, :json_library, JSON
|
||||
|
||||
config :parrhesia,
|
||||
moderation_cache_enabled: true,
|
||||
relay_url: "ws://localhost:4000/relay",
|
||||
relay_url: "ws://localhost:4413/relay",
|
||||
limits: [
|
||||
max_frame_bytes: 1_048_576,
|
||||
max_event_bytes: 262_144,
|
||||
@@ -54,6 +54,13 @@ config :parrhesia,
|
||||
allowed_cidrs: [],
|
||||
auth_token: nil
|
||||
],
|
||||
retention: [
|
||||
check_interval_hours: 24,
|
||||
months_ahead: 2,
|
||||
max_db_bytes: :infinity,
|
||||
max_months_to_keep: :infinity,
|
||||
max_partitions_to_drop_per_run: 1
|
||||
],
|
||||
features: [
|
||||
verify_event_signatures: true,
|
||||
nip_45_count: true,
|
||||
@@ -68,7 +75,7 @@ config :parrhesia,
|
||||
admin: Parrhesia.Storage.Adapters.Postgres.Admin
|
||||
]
|
||||
|
||||
config :parrhesia, Parrhesia.Web.Endpoint, port: 4000
|
||||
config :parrhesia, Parrhesia.Web.Endpoint, port: 4413
|
||||
|
||||
config :parrhesia, Parrhesia.Web.MetricsEndpoint,
|
||||
enabled: false,
|
||||
|
||||
@@ -1,33 +1,418 @@
|
||||
import Config
|
||||
|
||||
string_env = fn name, default ->
|
||||
case System.get_env(name) do
|
||||
nil -> default
|
||||
"" -> default
|
||||
value -> value
|
||||
end
|
||||
end
|
||||
|
||||
int_env = fn name, default ->
|
||||
case System.get_env(name) do
|
||||
nil -> default
|
||||
value -> String.to_integer(value)
|
||||
end
|
||||
end
|
||||
|
||||
bool_env = fn name, default ->
|
||||
case System.get_env(name) do
|
||||
nil ->
|
||||
default
|
||||
|
||||
value ->
|
||||
case String.downcase(value) do
|
||||
"1" -> true
|
||||
"true" -> true
|
||||
"yes" -> true
|
||||
"on" -> true
|
||||
"0" -> false
|
||||
"false" -> false
|
||||
"no" -> false
|
||||
"off" -> false
|
||||
_other -> raise "environment variable #{name} must be a boolean value"
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
csv_env = fn name, default ->
|
||||
case System.get_env(name) do
|
||||
nil ->
|
||||
default
|
||||
|
||||
value ->
|
||||
value
|
||||
|> String.split(",", trim: true)
|
||||
|> Enum.map(&String.trim/1)
|
||||
|> Enum.reject(&(&1 == ""))
|
||||
end
|
||||
end
|
||||
|
||||
infinity_or_int_env = fn name, default ->
|
||||
case System.get_env(name) do
|
||||
nil ->
|
||||
default
|
||||
|
||||
value ->
|
||||
normalized = value |> String.trim() |> String.downcase()
|
||||
|
||||
if normalized == "infinity" do
|
||||
:infinity
|
||||
else
|
||||
String.to_integer(value)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
outbound_overflow_strategy_env = fn name, default ->
|
||||
case System.get_env(name) do
|
||||
nil ->
|
||||
default
|
||||
|
||||
"close" ->
|
||||
:close
|
||||
|
||||
"drop_oldest" ->
|
||||
:drop_oldest
|
||||
|
||||
"drop_newest" ->
|
||||
:drop_newest
|
||||
|
||||
_other ->
|
||||
raise "environment variable #{name} must be one of: close, drop_oldest, drop_newest"
|
||||
end
|
||||
end
|
||||
|
||||
ipv4_env = fn name, default ->
|
||||
case System.get_env(name) do
|
||||
nil ->
|
||||
default
|
||||
|
||||
value ->
|
||||
case String.split(value, ".", parts: 4) do
|
||||
[a, b, c, d] ->
|
||||
octets = Enum.map([a, b, c, d], &String.to_integer/1)
|
||||
|
||||
if Enum.all?(octets, &(&1 >= 0 and &1 <= 255)) do
|
||||
List.to_tuple(octets)
|
||||
else
|
||||
raise "environment variable #{name} must be a valid IPv4 address"
|
||||
end
|
||||
|
||||
_other ->
|
||||
raise "environment variable #{name} must be a valid IPv4 address"
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
if config_env() == :prod do
|
||||
database_url =
|
||||
System.get_env("DATABASE_URL") ||
|
||||
raise "environment variable DATABASE_URL is missing. Example: ecto://USER:PASS@HOST/DATABASE"
|
||||
|
||||
repo_defaults = Application.get_env(:parrhesia, Parrhesia.Repo, [])
|
||||
relay_url_default = Application.get_env(:parrhesia, :relay_url)
|
||||
|
||||
moderation_cache_enabled_default =
|
||||
Application.get_env(:parrhesia, :moderation_cache_enabled, true)
|
||||
|
||||
enable_expiration_worker_default =
|
||||
Application.get_env(:parrhesia, :enable_expiration_worker, true)
|
||||
|
||||
limits_defaults = Application.get_env(:parrhesia, :limits, [])
|
||||
policies_defaults = Application.get_env(:parrhesia, :policies, [])
|
||||
metrics_defaults = Application.get_env(:parrhesia, :metrics, [])
|
||||
retention_defaults = Application.get_env(:parrhesia, :retention, [])
|
||||
features_defaults = Application.get_env(:parrhesia, :features, [])
|
||||
metrics_endpoint_defaults = Application.get_env(:parrhesia, Parrhesia.Web.MetricsEndpoint, [])
|
||||
|
||||
default_pool_size = Keyword.get(repo_defaults, :pool_size, 32)
|
||||
default_queue_target = Keyword.get(repo_defaults, :queue_target, 1_000)
|
||||
default_queue_interval = Keyword.get(repo_defaults, :queue_interval, 5_000)
|
||||
|
||||
pool_size =
|
||||
case System.get_env("POOL_SIZE") do
|
||||
nil -> default_pool_size
|
||||
value -> String.to_integer(value)
|
||||
end
|
||||
pool_size = int_env.("POOL_SIZE", default_pool_size)
|
||||
queue_target = int_env.("DB_QUEUE_TARGET_MS", default_queue_target)
|
||||
queue_interval = int_env.("DB_QUEUE_INTERVAL_MS", default_queue_interval)
|
||||
|
||||
queue_target =
|
||||
case System.get_env("DB_QUEUE_TARGET_MS") do
|
||||
nil -> default_queue_target
|
||||
value -> String.to_integer(value)
|
||||
end
|
||||
limits = [
|
||||
max_frame_bytes:
|
||||
int_env.(
|
||||
"PARRHESIA_LIMITS_MAX_FRAME_BYTES",
|
||||
Keyword.get(limits_defaults, :max_frame_bytes, 1_048_576)
|
||||
),
|
||||
max_event_bytes:
|
||||
int_env.(
|
||||
"PARRHESIA_LIMITS_MAX_EVENT_BYTES",
|
||||
Keyword.get(limits_defaults, :max_event_bytes, 262_144)
|
||||
),
|
||||
max_filters_per_req:
|
||||
int_env.(
|
||||
"PARRHESIA_LIMITS_MAX_FILTERS_PER_REQ",
|
||||
Keyword.get(limits_defaults, :max_filters_per_req, 16)
|
||||
),
|
||||
max_filter_limit:
|
||||
int_env.(
|
||||
"PARRHESIA_LIMITS_MAX_FILTER_LIMIT",
|
||||
Keyword.get(limits_defaults, :max_filter_limit, 500)
|
||||
),
|
||||
max_subscriptions_per_connection:
|
||||
int_env.(
|
||||
"PARRHESIA_LIMITS_MAX_SUBSCRIPTIONS_PER_CONNECTION",
|
||||
Keyword.get(limits_defaults, :max_subscriptions_per_connection, 32)
|
||||
),
|
||||
max_event_future_skew_seconds:
|
||||
int_env.(
|
||||
"PARRHESIA_LIMITS_MAX_EVENT_FUTURE_SKEW_SECONDS",
|
||||
Keyword.get(limits_defaults, :max_event_future_skew_seconds, 900)
|
||||
),
|
||||
max_event_ingest_per_window:
|
||||
int_env.(
|
||||
"PARRHESIA_LIMITS_MAX_EVENT_INGEST_PER_WINDOW",
|
||||
Keyword.get(limits_defaults, :max_event_ingest_per_window, 120)
|
||||
),
|
||||
event_ingest_window_seconds:
|
||||
int_env.(
|
||||
"PARRHESIA_LIMITS_EVENT_INGEST_WINDOW_SECONDS",
|
||||
Keyword.get(limits_defaults, :event_ingest_window_seconds, 1)
|
||||
),
|
||||
auth_max_age_seconds:
|
||||
int_env.(
|
||||
"PARRHESIA_LIMITS_AUTH_MAX_AGE_SECONDS",
|
||||
Keyword.get(limits_defaults, :auth_max_age_seconds, 600)
|
||||
),
|
||||
max_outbound_queue:
|
||||
int_env.(
|
||||
"PARRHESIA_LIMITS_MAX_OUTBOUND_QUEUE",
|
||||
Keyword.get(limits_defaults, :max_outbound_queue, 256)
|
||||
),
|
||||
outbound_drain_batch_size:
|
||||
int_env.(
|
||||
"PARRHESIA_LIMITS_OUTBOUND_DRAIN_BATCH_SIZE",
|
||||
Keyword.get(limits_defaults, :outbound_drain_batch_size, 64)
|
||||
),
|
||||
outbound_overflow_strategy:
|
||||
outbound_overflow_strategy_env.(
|
||||
"PARRHESIA_LIMITS_OUTBOUND_OVERFLOW_STRATEGY",
|
||||
Keyword.get(limits_defaults, :outbound_overflow_strategy, :close)
|
||||
),
|
||||
max_negentropy_payload_bytes:
|
||||
int_env.(
|
||||
"PARRHESIA_LIMITS_MAX_NEGENTROPY_PAYLOAD_BYTES",
|
||||
Keyword.get(limits_defaults, :max_negentropy_payload_bytes, 4096)
|
||||
),
|
||||
max_negentropy_sessions_per_connection:
|
||||
int_env.(
|
||||
"PARRHESIA_LIMITS_MAX_NEGENTROPY_SESSIONS_PER_CONNECTION",
|
||||
Keyword.get(limits_defaults, :max_negentropy_sessions_per_connection, 8)
|
||||
),
|
||||
max_negentropy_total_sessions:
|
||||
int_env.(
|
||||
"PARRHESIA_LIMITS_MAX_NEGENTROPY_TOTAL_SESSIONS",
|
||||
Keyword.get(limits_defaults, :max_negentropy_total_sessions, 10_000)
|
||||
),
|
||||
negentropy_session_idle_timeout_seconds:
|
||||
int_env.(
|
||||
"PARRHESIA_LIMITS_NEGENTROPY_SESSION_IDLE_TIMEOUT_SECONDS",
|
||||
Keyword.get(limits_defaults, :negentropy_session_idle_timeout_seconds, 60)
|
||||
),
|
||||
negentropy_session_sweep_interval_seconds:
|
||||
int_env.(
|
||||
"PARRHESIA_LIMITS_NEGENTROPY_SESSION_SWEEP_INTERVAL_SECONDS",
|
||||
Keyword.get(limits_defaults, :negentropy_session_sweep_interval_seconds, 10)
|
||||
)
|
||||
]
|
||||
|
||||
queue_interval =
|
||||
case System.get_env("DB_QUEUE_INTERVAL_MS") do
|
||||
nil -> default_queue_interval
|
||||
value -> String.to_integer(value)
|
||||
end
|
||||
policies = [
|
||||
auth_required_for_writes:
|
||||
bool_env.(
|
||||
"PARRHESIA_POLICIES_AUTH_REQUIRED_FOR_WRITES",
|
||||
Keyword.get(policies_defaults, :auth_required_for_writes, false)
|
||||
),
|
||||
auth_required_for_reads:
|
||||
bool_env.(
|
||||
"PARRHESIA_POLICIES_AUTH_REQUIRED_FOR_READS",
|
||||
Keyword.get(policies_defaults, :auth_required_for_reads, false)
|
||||
),
|
||||
min_pow_difficulty:
|
||||
int_env.(
|
||||
"PARRHESIA_POLICIES_MIN_POW_DIFFICULTY",
|
||||
Keyword.get(policies_defaults, :min_pow_difficulty, 0)
|
||||
),
|
||||
accept_ephemeral_events:
|
||||
bool_env.(
|
||||
"PARRHESIA_POLICIES_ACCEPT_EPHEMERAL_EVENTS",
|
||||
Keyword.get(policies_defaults, :accept_ephemeral_events, true)
|
||||
),
|
||||
mls_group_event_ttl_seconds:
|
||||
int_env.(
|
||||
"PARRHESIA_POLICIES_MLS_GROUP_EVENT_TTL_SECONDS",
|
||||
Keyword.get(policies_defaults, :mls_group_event_ttl_seconds, 300)
|
||||
),
|
||||
marmot_require_h_for_group_queries:
|
||||
bool_env.(
|
||||
"PARRHESIA_POLICIES_MARMOT_REQUIRE_H_FOR_GROUP_QUERIES",
|
||||
Keyword.get(policies_defaults, :marmot_require_h_for_group_queries, true)
|
||||
),
|
||||
marmot_group_max_h_values_per_filter:
|
||||
int_env.(
|
||||
"PARRHESIA_POLICIES_MARMOT_GROUP_MAX_H_VALUES_PER_FILTER",
|
||||
Keyword.get(policies_defaults, :marmot_group_max_h_values_per_filter, 32)
|
||||
),
|
||||
marmot_group_max_query_window_seconds:
|
||||
int_env.(
|
||||
"PARRHESIA_POLICIES_MARMOT_GROUP_MAX_QUERY_WINDOW_SECONDS",
|
||||
Keyword.get(policies_defaults, :marmot_group_max_query_window_seconds, 2_592_000)
|
||||
),
|
||||
marmot_media_max_imeta_tags_per_event:
|
||||
int_env.(
|
||||
"PARRHESIA_POLICIES_MARMOT_MEDIA_MAX_IMETA_TAGS_PER_EVENT",
|
||||
Keyword.get(policies_defaults, :marmot_media_max_imeta_tags_per_event, 8)
|
||||
),
|
||||
marmot_media_max_field_value_bytes:
|
||||
int_env.(
|
||||
"PARRHESIA_POLICIES_MARMOT_MEDIA_MAX_FIELD_VALUE_BYTES",
|
||||
Keyword.get(policies_defaults, :marmot_media_max_field_value_bytes, 1024)
|
||||
),
|
||||
marmot_media_max_url_bytes:
|
||||
int_env.(
|
||||
"PARRHESIA_POLICIES_MARMOT_MEDIA_MAX_URL_BYTES",
|
||||
Keyword.get(policies_defaults, :marmot_media_max_url_bytes, 2048)
|
||||
),
|
||||
marmot_media_allowed_mime_prefixes:
|
||||
csv_env.(
|
||||
"PARRHESIA_POLICIES_MARMOT_MEDIA_ALLOWED_MIME_PREFIXES",
|
||||
Keyword.get(policies_defaults, :marmot_media_allowed_mime_prefixes, [])
|
||||
),
|
||||
marmot_media_reject_mip04_v1:
|
||||
bool_env.(
|
||||
"PARRHESIA_POLICIES_MARMOT_MEDIA_REJECT_MIP04_V1",
|
||||
Keyword.get(policies_defaults, :marmot_media_reject_mip04_v1, true)
|
||||
),
|
||||
marmot_push_server_pubkeys:
|
||||
csv_env.(
|
||||
"PARRHESIA_POLICIES_MARMOT_PUSH_SERVER_PUBKEYS",
|
||||
Keyword.get(policies_defaults, :marmot_push_server_pubkeys, [])
|
||||
),
|
||||
marmot_push_max_relay_tags:
|
||||
int_env.(
|
||||
"PARRHESIA_POLICIES_MARMOT_PUSH_MAX_RELAY_TAGS",
|
||||
Keyword.get(policies_defaults, :marmot_push_max_relay_tags, 16)
|
||||
),
|
||||
marmot_push_max_payload_bytes:
|
||||
int_env.(
|
||||
"PARRHESIA_POLICIES_MARMOT_PUSH_MAX_PAYLOAD_BYTES",
|
||||
Keyword.get(policies_defaults, :marmot_push_max_payload_bytes, 65_536)
|
||||
),
|
||||
marmot_push_max_trigger_age_seconds:
|
||||
int_env.(
|
||||
"PARRHESIA_POLICIES_MARMOT_PUSH_MAX_TRIGGER_AGE_SECONDS",
|
||||
Keyword.get(policies_defaults, :marmot_push_max_trigger_age_seconds, 120)
|
||||
),
|
||||
marmot_push_require_expiration:
|
||||
bool_env.(
|
||||
"PARRHESIA_POLICIES_MARMOT_PUSH_REQUIRE_EXPIRATION",
|
||||
Keyword.get(policies_defaults, :marmot_push_require_expiration, true)
|
||||
),
|
||||
marmot_push_max_expiration_window_seconds:
|
||||
int_env.(
|
||||
"PARRHESIA_POLICIES_MARMOT_PUSH_MAX_EXPIRATION_WINDOW_SECONDS",
|
||||
Keyword.get(policies_defaults, :marmot_push_max_expiration_window_seconds, 120)
|
||||
),
|
||||
marmot_push_max_server_recipients:
|
||||
int_env.(
|
||||
"PARRHESIA_POLICIES_MARMOT_PUSH_MAX_SERVER_RECIPIENTS",
|
||||
Keyword.get(policies_defaults, :marmot_push_max_server_recipients, 1)
|
||||
),
|
||||
management_auth_required:
|
||||
bool_env.(
|
||||
"PARRHESIA_POLICIES_MANAGEMENT_AUTH_REQUIRED",
|
||||
Keyword.get(policies_defaults, :management_auth_required, true)
|
||||
)
|
||||
]
|
||||
|
||||
metrics = [
|
||||
enabled_on_main_endpoint:
|
||||
bool_env.(
|
||||
"PARRHESIA_METRICS_ENABLED_ON_MAIN_ENDPOINT",
|
||||
Keyword.get(metrics_defaults, :enabled_on_main_endpoint, true)
|
||||
),
|
||||
public:
|
||||
bool_env.(
|
||||
"PARRHESIA_METRICS_PUBLIC",
|
||||
Keyword.get(metrics_defaults, :public, false)
|
||||
),
|
||||
private_networks_only:
|
||||
bool_env.(
|
||||
"PARRHESIA_METRICS_PRIVATE_NETWORKS_ONLY",
|
||||
Keyword.get(metrics_defaults, :private_networks_only, true)
|
||||
),
|
||||
allowed_cidrs:
|
||||
csv_env.(
|
||||
"PARRHESIA_METRICS_ALLOWED_CIDRS",
|
||||
Keyword.get(metrics_defaults, :allowed_cidrs, [])
|
||||
),
|
||||
auth_token:
|
||||
string_env.(
|
||||
"PARRHESIA_METRICS_AUTH_TOKEN",
|
||||
Keyword.get(metrics_defaults, :auth_token)
|
||||
)
|
||||
]
|
||||
|
||||
retention = [
|
||||
check_interval_hours:
|
||||
int_env.(
|
||||
"PARRHESIA_RETENTION_CHECK_INTERVAL_HOURS",
|
||||
Keyword.get(retention_defaults, :check_interval_hours, 24)
|
||||
),
|
||||
months_ahead:
|
||||
int_env.(
|
||||
"PARRHESIA_RETENTION_MONTHS_AHEAD",
|
||||
Keyword.get(retention_defaults, :months_ahead, 2)
|
||||
),
|
||||
max_db_bytes:
|
||||
infinity_or_int_env.(
|
||||
"PARRHESIA_RETENTION_MAX_DB_BYTES",
|
||||
Keyword.get(retention_defaults, :max_db_bytes, :infinity)
|
||||
),
|
||||
max_months_to_keep:
|
||||
infinity_or_int_env.(
|
||||
"PARRHESIA_RETENTION_MAX_MONTHS_TO_KEEP",
|
||||
Keyword.get(retention_defaults, :max_months_to_keep, :infinity)
|
||||
),
|
||||
max_partitions_to_drop_per_run:
|
||||
int_env.(
|
||||
"PARRHESIA_RETENTION_MAX_PARTITIONS_TO_DROP_PER_RUN",
|
||||
Keyword.get(retention_defaults, :max_partitions_to_drop_per_run, 1)
|
||||
)
|
||||
]
|
||||
|
||||
features = [
|
||||
verify_event_signatures:
|
||||
bool_env.(
|
||||
"PARRHESIA_FEATURES_VERIFY_EVENT_SIGNATURES",
|
||||
Keyword.get(features_defaults, :verify_event_signatures, true)
|
||||
),
|
||||
nip_45_count:
|
||||
bool_env.(
|
||||
"PARRHESIA_FEATURES_NIP_45_COUNT",
|
||||
Keyword.get(features_defaults, :nip_45_count, true)
|
||||
),
|
||||
nip_50_search:
|
||||
bool_env.(
|
||||
"PARRHESIA_FEATURES_NIP_50_SEARCH",
|
||||
Keyword.get(features_defaults, :nip_50_search, true)
|
||||
),
|
||||
nip_77_negentropy:
|
||||
bool_env.(
|
||||
"PARRHESIA_FEATURES_NIP_77_NEGENTROPY",
|
||||
Keyword.get(features_defaults, :nip_77_negentropy, true)
|
||||
),
|
||||
marmot_push_notifications:
|
||||
bool_env.(
|
||||
"PARRHESIA_FEATURES_MARMOT_PUSH_NOTIFICATIONS",
|
||||
Keyword.get(features_defaults, :marmot_push_notifications, false)
|
||||
)
|
||||
]
|
||||
|
||||
config :parrhesia, Parrhesia.Repo,
|
||||
url: database_url,
|
||||
@@ -35,6 +420,40 @@ if config_env() == :prod do
|
||||
queue_target: queue_target,
|
||||
queue_interval: queue_interval
|
||||
|
||||
config :parrhesia, Parrhesia.Web.Endpoint,
|
||||
port: String.to_integer(System.get_env("PORT") || "4000")
|
||||
config :parrhesia, Parrhesia.Web.Endpoint, port: int_env.("PORT", 4413)
|
||||
|
||||
config :parrhesia, Parrhesia.Web.MetricsEndpoint,
|
||||
enabled:
|
||||
bool_env.(
|
||||
"PARRHESIA_METRICS_ENDPOINT_ENABLED",
|
||||
Keyword.get(metrics_endpoint_defaults, :enabled, false)
|
||||
),
|
||||
ip:
|
||||
ipv4_env.(
|
||||
"PARRHESIA_METRICS_ENDPOINT_IP",
|
||||
Keyword.get(metrics_endpoint_defaults, :ip, {127, 0, 0, 1})
|
||||
),
|
||||
port:
|
||||
int_env.(
|
||||
"PARRHESIA_METRICS_ENDPOINT_PORT",
|
||||
Keyword.get(metrics_endpoint_defaults, :port, 9568)
|
||||
)
|
||||
|
||||
config :parrhesia,
|
||||
relay_url: string_env.("PARRHESIA_RELAY_URL", relay_url_default),
|
||||
moderation_cache_enabled:
|
||||
bool_env.("PARRHESIA_MODERATION_CACHE_ENABLED", moderation_cache_enabled_default),
|
||||
enable_expiration_worker:
|
||||
bool_env.("PARRHESIA_ENABLE_EXPIRATION_WORKER", enable_expiration_worker_default),
|
||||
limits: limits,
|
||||
policies: policies,
|
||||
metrics: metrics,
|
||||
retention: retention,
|
||||
features: features
|
||||
|
||||
case System.get_env("PARRHESIA_EXTRA_CONFIG") do
|
||||
nil -> :ok
|
||||
"" -> :ok
|
||||
path -> import_config path
|
||||
end
|
||||
end
|
||||
|
||||
@@ -10,7 +10,7 @@
|
||||
vips,
|
||||
}: let
|
||||
pname = "parrhesia";
|
||||
version = "0.3.0";
|
||||
version = "0.4.0";
|
||||
|
||||
beamPackages = beam.packages.erlang_28.extend (
|
||||
final: _prev: {
|
||||
@@ -48,7 +48,7 @@
|
||||
beamPackages.fetchMixDeps {
|
||||
pname = "${pname}-mix-deps";
|
||||
inherit version src;
|
||||
hash = "sha256-0KOyYRbYM0jjmp3tPn64qkp0YkmZKlqkGrlu/wCr4m8=";
|
||||
hash = "sha256-I09Q2PG22lOrZjjXoq8Py3P3o5dgaz9LhKJSmP+/r6k=";
|
||||
}
|
||||
else null;
|
||||
|
||||
|
||||
13
devenv.nix
13
devenv.nix
@@ -106,12 +106,6 @@ in {
|
||||
strfry
|
||||
];
|
||||
|
||||
# https://devenv.sh/tests/
|
||||
# enterTest = ''
|
||||
# echo "Running tests"
|
||||
# git --version | grep "2.42.0"
|
||||
# '';
|
||||
|
||||
# https://devenv.sh/languages/
|
||||
languages = {
|
||||
elixir = {
|
||||
@@ -131,7 +125,7 @@ in {
|
||||
enable = true;
|
||||
package = pkgs.postgresql_18;
|
||||
|
||||
# Some tuning for the benchmark
|
||||
# Some tuning for the benchmark - doesn't seem to do much
|
||||
settings = {
|
||||
max_connections = 300;
|
||||
shared_buffers = "1GB";
|
||||
@@ -151,6 +145,7 @@ in {
|
||||
initialScript = ''
|
||||
CREATE ROLE dev WITH LOGIN PASSWORD 'dev' SUPERUSER;
|
||||
|
||||
-- Make sure we get the right collation
|
||||
ALTER database template1 is_template=false;
|
||||
|
||||
DROP database template1;
|
||||
@@ -167,12 +162,10 @@ in {
|
||||
'';
|
||||
};
|
||||
|
||||
# https://devenv.sh/pre-commit-hooks/
|
||||
# pre-commit.hooks.shellcheck.enable = true;
|
||||
|
||||
dotenv.enable = true;
|
||||
devenv.warnOnNewVersion = false;
|
||||
|
||||
# https://devenv.sh/pre-commit-hooks/
|
||||
git-hooks.hooks = {
|
||||
alejandra.enable = true;
|
||||
check-added-large-files = {
|
||||
|
||||
234
docs/CLUSTER.md
Normal file
234
docs/CLUSTER.md
Normal file
@@ -0,0 +1,234 @@
|
||||
# Parrhesia clustering and distributed fanout
|
||||
|
||||
This document describes:
|
||||
|
||||
1. the **current** distributed fanout behavior implemented today, and
|
||||
2. a practical evolution path to a more production-grade clustered relay.
|
||||
|
||||
---
|
||||
|
||||
## 1) Current state (implemented today)
|
||||
|
||||
### 1.1 What exists right now
|
||||
|
||||
Parrhesia currently includes a lightweight multi-node live fanout path (untested!):
|
||||
|
||||
- `Parrhesia.Fanout.MultiNode` (`lib/parrhesia/fanout/multi_node.ex`)
|
||||
- GenServer that joins a `:pg` process group.
|
||||
- Receives locally-published events and forwards them to other group members.
|
||||
- Receives remote events and performs local fanout lookup.
|
||||
- `Parrhesia.Web.Connection` (`lib/parrhesia/web/connection.ex`)
|
||||
- On successful ingest, after ACK scheduling, it does:
|
||||
1. local fanout (`fanout_event/1`), then
|
||||
2. cross-node publish (`maybe_publish_multi_node/1`).
|
||||
- `Parrhesia.Subscriptions.Supervisor` (`lib/parrhesia/subscriptions/supervisor.ex`)
|
||||
- Starts `Parrhesia.Fanout.MultiNode` unconditionally.
|
||||
|
||||
In other words: **if BEAM nodes are connected, live events are fanned out cross-node**.
|
||||
|
||||
### 1.2 What is not included yet
|
||||
|
||||
- No automatic cluster formation/discovery (no `libcluster`, DNS polling, gossip, etc.).
|
||||
- No durable inter-node event transport.
|
||||
- No replay/recovery of missed cross-node live events.
|
||||
- No explicit per-node delivery ACK between relay nodes.
|
||||
|
||||
---
|
||||
|
||||
## 2) Current runtime behavior in detail
|
||||
|
||||
### 2.1 Local ingest flow and publish ordering
|
||||
|
||||
For an accepted event in `Parrhesia.Web.Connection`:
|
||||
|
||||
1. validate/policy/persist path runs.
|
||||
2. Client receives `OK` reply.
|
||||
3. A post-ACK message triggers:
|
||||
- local fanout (`Index.candidate_subscription_keys/1` + send `{:fanout_event, ...}`),
|
||||
- multi-node publish (`MultiNode.publish/1`).
|
||||
|
||||
Important semantics:
|
||||
|
||||
- Regular persisted events: ACK implies DB persistence succeeded.
|
||||
- Ephemeral events: ACK implies accepted by policy, but no DB durability.
|
||||
- Cross-node fanout happens **after** ACK path is scheduled.
|
||||
|
||||
### 2.2 Multi-node transport mechanics
|
||||
|
||||
`Parrhesia.Fanout.MultiNode` uses `:pg` membership:
|
||||
|
||||
- On init:
|
||||
- ensures `:pg` is started,
|
||||
- joins group `Parrhesia.Fanout.MultiNode`.
|
||||
- On publish:
|
||||
- gets all group members,
|
||||
- excludes itself,
|
||||
- sends `{:remote_fanout_event, event}` to each member pid.
|
||||
- On remote receive:
|
||||
- runs local subscription candidate narrowing via `Parrhesia.Subscriptions.Index`,
|
||||
- forwards matching candidates to local connection owners as `{:fanout_event, sub_id, event}`.
|
||||
|
||||
No republish on remote receive, so this path does not create fanout loops.
|
||||
|
||||
### 2.3 Subscription index locality
|
||||
|
||||
The subscription index is local ETS state per node (`Parrhesia.Subscriptions.Index`).
|
||||
|
||||
- Each node only tracks subscriptions of its local websocket processes.
|
||||
- Each node independently decides which local subscribers match a remote event.
|
||||
- There is no global cross-node subscription registry.
|
||||
|
||||
### 2.4 Delivery model and guarantees (current)
|
||||
|
||||
Current model is **best-effort live propagation** among connected nodes.
|
||||
|
||||
- If nodes are connected and healthy, remote live subscribers should receive events quickly.
|
||||
- If there is a netsplit or temporary disconnection:
|
||||
- remote live subscribers may miss events,
|
||||
- persisted events can still be recovered by normal `REQ`/history query,
|
||||
- ephemeral events are not recoverable.
|
||||
|
||||
### 2.5 Cluster preconditions
|
||||
|
||||
For cross-node fanout to work, operators must provide distributed BEAM connectivity:
|
||||
|
||||
- consistent Erlang cookie,
|
||||
- named nodes (`--name`/`--sname`),
|
||||
- network reachability for Erlang distribution ports,
|
||||
- explicit node connections (or external discovery tooling).
|
||||
|
||||
Parrhesia currently does not automate these steps.
|
||||
|
||||
---
|
||||
|
||||
## 3) Operational characteristics of current design
|
||||
|
||||
### 3.1 Performance shape
|
||||
|
||||
For each accepted event on one node:
|
||||
|
||||
- one local fanout lookup + local sends,
|
||||
- one cluster publish that sends to `N - 1` remote bus members,
|
||||
- on each remote node: one local fanout lookup + local sends.
|
||||
|
||||
So inter-node traffic scales roughly linearly with node count per event (full-cluster broadcast).
|
||||
|
||||
This is simple and low-latency for small-to-medium clusters, but can become expensive as node count grows.
|
||||
|
||||
### 3.2 Failure behavior
|
||||
|
||||
- Remote node down: send attempts to that member stop once membership updates; no replay.
|
||||
- Netsplit: live propagation gap during split.
|
||||
- Recovery: local clients can catch up via DB-backed queries (except ephemeral kinds).
|
||||
|
||||
### 3.3 Consistency expectations
|
||||
|
||||
- No global total-ordering guarantee for live delivery across nodes.
|
||||
- Per-connection ordering is preserved by each connection process queue/drain behavior.
|
||||
- Duplicate suppression for ingestion uses storage semantics (`duplicate_event`), but transport itself is not exactly-once.
|
||||
|
||||
### 3.4 Observability today
|
||||
|
||||
Relevant metrics exist for fanout/queue pressure (see `Parrhesia.Telemetry`), e.g.:
|
||||
|
||||
- `parrhesia.fanout.duration.ms`
|
||||
- `parrhesia.connection.outbound_queue.depth`
|
||||
- `parrhesia.connection.outbound_queue.pressure`
|
||||
- `parrhesia.connection.outbound_queue.overflow.count`
|
||||
|
||||
These are useful but do not yet fully separate local-vs-remote fanout pipeline stages.
|
||||
|
||||
---
|
||||
|
||||
## 4) Practical extension path to a fully-fledged clustered system
|
||||
|
||||
A realistic path is incremental. Suggested phases:
|
||||
|
||||
### Phase A — hardened BEAM cluster control plane
|
||||
|
||||
1. Add cluster discovery/formation (e.g. `libcluster`) with environment-specific topology:
|
||||
- Kubernetes DNS,
|
||||
- static nodes,
|
||||
- cloud VM discovery.
|
||||
2. Add clear node liveness/partition telemetry and alerts.
|
||||
3. Provide operator docs for cookie, node naming, and network requirements.
|
||||
|
||||
Outcome: simpler and safer cluster operations, same data plane semantics.
|
||||
|
||||
### Phase B — resilient distributed fanout data plane
|
||||
|
||||
Introduce a durable fanout stream for persisted events.
|
||||
|
||||
Recommended pattern:
|
||||
|
||||
1. On successful DB commit of event, append to a monotonic fanout log (or use DB sequence-based stream view).
|
||||
2. Each relay node runs a consumer with a stored cursor.
|
||||
3. On restart/partition recovery, node resumes from cursor and replays missed events.
|
||||
4. Local fanout remains same (subscription index + per-connection queues).
|
||||
|
||||
Semantics target:
|
||||
|
||||
- **at-least-once** node-to-node propagation,
|
||||
- replay after downtime,
|
||||
- idempotent handling keyed by event id.
|
||||
|
||||
Notes:
|
||||
|
||||
- Ephemeral events can remain best-effort (or have a separate short-lived transport), since no storage source exists for replay.
|
||||
|
||||
### Phase C — scale and efficiency improvements
|
||||
|
||||
As cluster size grows, avoid naive full broadcast where possible:
|
||||
|
||||
1. Optional node-level subscription summaries (coarse bloom/bitset or keyed summaries) to reduce unnecessary remote sends.
|
||||
2. Shard fanout workers for CPU locality and mailbox control.
|
||||
3. Batch remote delivery payloads.
|
||||
4. Separate traffic classes (e.g. Marmot-heavy streams vs generic) with independent queues.
|
||||
|
||||
Outcome: higher throughput per node and lower inter-node amplification.
|
||||
|
||||
### Phase D — stronger observability and SLOs
|
||||
|
||||
Add explicit distributed pipeline metrics:
|
||||
|
||||
- publish enqueue/dequeue latency,
|
||||
- cross-node delivery lag (commit -> remote fanout enqueue),
|
||||
- replay backlog depth,
|
||||
- per-node dropped/expired transport messages,
|
||||
- partition detection counters.
|
||||
|
||||
Define cluster SLO examples:
|
||||
|
||||
- p95 commit->remote-live enqueue under nominal load,
|
||||
- max replay catch-up time after node restart,
|
||||
- bounded message loss for best-effort channels.
|
||||
|
||||
---
|
||||
|
||||
## 5) How a fully-fledged system would behave in practice
|
||||
|
||||
With Phases A-D implemented, expected behavior:
|
||||
|
||||
- **Normal operation:**
|
||||
- low-latency local fanout,
|
||||
- remote nodes receive events via stream consumers quickly,
|
||||
- consistent operational visibility of end-to-end lag.
|
||||
- **Node restart:**
|
||||
- node reconnects and replays from stored cursor,
|
||||
- local subscribers begin receiving new + missed persisted events.
|
||||
- **Transient partition:**
|
||||
- live best-effort path may degrade,
|
||||
- persisted events converge after partition heals via replay.
|
||||
- **High fanout bursts:**
|
||||
- batching + sharding keeps queue pressure bounded,
|
||||
- overflow policies remain connection-local and measurable.
|
||||
|
||||
This approach gives a good trade-off between Nostr relay latency and distributed robustness without requiring strict exactly-once semantics.
|
||||
|
||||
---
|
||||
|
||||
## 6) Current status summary
|
||||
|
||||
Today, Parrhesia already supports **lightweight distributed live fanout** when BEAM nodes are connected.
|
||||
|
||||
It is intentionally simple and fast for smaller clusters, and provides a solid base for a more durable, observable cluster architecture as relay scale and availability requirements grow.
|
||||
@@ -1,69 +0,0 @@
|
||||
# Marmot operations guide (relay operator tuning)
|
||||
|
||||
This document captures practical limits and operational defaults for Marmot-heavy traffic (`443`, `445`, `10051`, wrapped `1059`, optional media/push flows).
|
||||
|
||||
## 1) Recommended baseline limits
|
||||
|
||||
Use these as a starting point and tune from production telemetry.
|
||||
|
||||
```elixir
|
||||
config :parrhesia,
|
||||
limits: [
|
||||
max_filter_limit: 500,
|
||||
max_filters_per_req: 16,
|
||||
max_outbound_queue: 256,
|
||||
outbound_drain_batch_size: 64
|
||||
],
|
||||
policies: [
|
||||
# Marmot group routing/query guards
|
||||
marmot_require_h_for_group_queries: true,
|
||||
marmot_group_max_h_values_per_filter: 32,
|
||||
marmot_group_max_query_window_seconds: 2_592_000,
|
||||
|
||||
# Kind 445 retention
|
||||
mls_group_event_ttl_seconds: 300,
|
||||
|
||||
# MIP-04 metadata controls
|
||||
marmot_media_max_imeta_tags_per_event: 8,
|
||||
marmot_media_max_field_value_bytes: 1024,
|
||||
marmot_media_max_url_bytes: 2048,
|
||||
marmot_media_allowed_mime_prefixes: [],
|
||||
marmot_media_reject_mip04_v1: true,
|
||||
|
||||
# MIP-05 push controls (optional)
|
||||
marmot_push_server_pubkeys: [],
|
||||
marmot_push_max_relay_tags: 16,
|
||||
marmot_push_max_payload_bytes: 65_536,
|
||||
marmot_push_max_trigger_age_seconds: 120,
|
||||
marmot_push_require_expiration: true,
|
||||
marmot_push_max_expiration_window_seconds: 120,
|
||||
marmot_push_max_server_recipients: 1
|
||||
]
|
||||
```
|
||||
|
||||
## 2) Index expectations for Marmot workloads
|
||||
|
||||
The Postgres adapter relies on dedicated partial tag indexes for hot Marmot selectors:
|
||||
|
||||
- `event_tags_h_value_created_at_idx` for `#h` group routing
|
||||
- `event_tags_i_value_created_at_idx` for `#i` keypackage reference lookups
|
||||
|
||||
Query-plan regression tests assert these paths remain usable for heavy workloads.
|
||||
|
||||
## 3) Telemetry to watch
|
||||
|
||||
Key metrics for Marmot traffic and pressure:
|
||||
|
||||
- `parrhesia.ingest.duration.ms{traffic_class="marmot|generic"}`
|
||||
- `parrhesia.query.duration.ms{traffic_class="marmot|generic"}`
|
||||
- `parrhesia.fanout.duration.ms{traffic_class="marmot|generic"}`
|
||||
- `parrhesia.connection.outbound_queue.depth{traffic_class=...}`
|
||||
- `parrhesia.connection.outbound_queue.pressure{traffic_class=...}`
|
||||
- `parrhesia.connection.outbound_queue.pressure_events.count{traffic_class=...}`
|
||||
- `parrhesia.connection.outbound_queue.overflow.count{traffic_class=...}`
|
||||
|
||||
Operational target: keep queue pressure below sustained 0.75 and avoid overflow spikes during `445` bursts.
|
||||
|
||||
## 4) Fault and recovery expectations
|
||||
|
||||
During storage outages, Marmot group-flow writes must fail with explicit `OK false` errors. After recovery, reordered group events should still query deterministically by `created_at DESC, id ASC`.
|
||||
BIN
docs/logo.afdesign
Normal file
BIN
docs/logo.afdesign
Normal file
Binary file not shown.
1
docs/logo.svg
Normal file
1
docs/logo.svg
Normal file
File diff suppressed because one or more lines are too long
|
After Width: | Height: | Size: 37 KiB |
279
docs/slop/HARDEN.md
Normal file
279
docs/slop/HARDEN.md
Normal file
@@ -0,0 +1,279 @@
|
||||
# Hardening Review: Parrhesia Nostr Relay
|
||||
|
||||
You are a security engineer specialising in real-time WebSocket servers, Erlang/OTP systems, and protocol-level abuse. You are reviewing **Parrhesia**, a Nostr relay (NIP-01 compliant) written in Elixir, for hardening opportunities — with a primary focus on **denial-of-service resilience** and a secondary focus on the full attack surface.
|
||||
|
||||
Produce a prioritised list of **specific, actionable recommendations** with rationale. For each recommendation, state:
|
||||
1. The attack or failure mode it mitigates
|
||||
2. Suggested implementation (config change, code change, or architectural change)
|
||||
3. Severity estimate (critical / high / medium / low)
|
||||
|
||||
---
|
||||
|
||||
## 1. Architecture Overview
|
||||
|
||||
| Component | Technology | Notes |
|
||||
|---|---|---|
|
||||
| Runtime | Elixir/OTP 27, BEAM VM | Each WS connection is a separate process |
|
||||
| HTTP server | Bandit (pure Elixir) | HTTP/1.1 only, no HTTP/2 |
|
||||
| WebSocket | `websock_adapter` | Text frames only; binary rejected |
|
||||
| Database | PostgreSQL via Ecto | Range-partitioned `events` table by `created_at` |
|
||||
| Caching | ETS | Config snapshot + moderation ban/allow lists |
|
||||
| Multi-node | Erlang `:pg` groups | Fanout across BEAM cluster nodes |
|
||||
| Metrics | Prometheus (Telemetry) | `/metrics` endpoint |
|
||||
| TLS termination | **Out of scope** — handled by reverse proxy (nginx/Caddy) |
|
||||
|
||||
### Supervision Tree
|
||||
|
||||
```
|
||||
Parrhesia.Supervisor
|
||||
├─ Telemetry (Prometheus exporter)
|
||||
├─ Config (ETS snapshot of runtime config)
|
||||
├─ Storage.Supervisor (Ecto repo + moderation cache)
|
||||
├─ Subscriptions.Supervisor (ETS subscription index for fanout)
|
||||
├─ Auth.Supervisor (NIP-42 challenge GenServer)
|
||||
├─ Policy.Supervisor (policy enforcement)
|
||||
├─ Web.Endpoint (Bandit listener)
|
||||
└─ Tasks.Supervisor (ExpirationWorker, 30s GC loop)
|
||||
```
|
||||
|
||||
### Data Flow
|
||||
|
||||
1. Client connects via WebSocket at `/relay`
|
||||
2. NIP-42 AUTH challenge issued immediately (16-byte random, base64url)
|
||||
3. Inbound text frames are: size-checked → JSON-decoded → rate-limited → protocol-dispatched
|
||||
4. EVENT messages: validated → policy-checked → stored in Postgres → ACK → async fanout to matching subscriptions
|
||||
5. REQ messages: filters validated → Postgres query → results streamed → EOSE → live subscription registered
|
||||
6. Fanout: post-ingest, subscription index (ETS) is traversed; matching connection processes receive events via `send/2`
|
||||
|
||||
---
|
||||
|
||||
## 2. Current Defences Inventory
|
||||
|
||||
### Connection Layer
|
||||
|
||||
| Defence | Value | Enforcement Point |
|
||||
|---|---|---|
|
||||
| Max WebSocket frame size | **1,048,576 bytes (1 MiB)** | Checked in `handle_in` *before* JSON decode, and at Bandit upgrade (`max_frame_size`) |
|
||||
| WebSocket upgrade timeout | **60,000 ms** | Passed to `WebSockAdapter.upgrade` |
|
||||
| Binary frame rejection | Returns NOTICE, connection stays open | `handle_in` opcode check |
|
||||
| Outbound queue limit | **256 events** per connection | Overflow strategy: **`:close`** (WS 1008) |
|
||||
| Outbound drain batch | **64 events** | Async drain via `send(self(), :drain_outbound_queue)` |
|
||||
| Outbound pressure telemetry | Threshold at **75%** of queue | Emits telemetry event only, no enforcement |
|
||||
| IP blocking | Via moderation cache (ETS) | Management API can add blocked IPs |
|
||||
|
||||
### Protocol Layer
|
||||
|
||||
| Defence | Value | Notes |
|
||||
|---|---|---|
|
||||
| Max event JSON size | **262,144 bytes (256 KiB)** | Re-serialises decoded event and checks byte size |
|
||||
| Max filters per REQ | **16** | Rejected at filter validation |
|
||||
| Max filter `limit` | **500** | `min(client_limit, 500)` applied at query time |
|
||||
| Max subscriptions per connection | **32** | Existing sub IDs updated without counting toward limit |
|
||||
| Subscription ID max length | **64 characters** | Must be non-empty |
|
||||
| Event kind range | **0–65,535** | Integer range check |
|
||||
| Max future event skew | **900 seconds (15 min)** | Events with `created_at > now + 900` rejected |
|
||||
| Unknown filter keys | **Rejected** | Allowed: `ids`, `authors`, `kinds`, `since`, `until`, `limit`, `search`, `#<letter>` |
|
||||
|
||||
### Event Validation Pipeline
|
||||
|
||||
Strict order:
|
||||
1. Required fields present (`id`, `pubkey`, `created_at`, `kind`, `tags`, `content`, `sig`)
|
||||
2. `id` — 64-char lowercase hex
|
||||
3. `pubkey` — 64-char lowercase hex
|
||||
4. `created_at` — non-negative integer, max 900s future skew
|
||||
5. `kind` — integer in [0, 65535]
|
||||
6. `tags` — list of non-empty string arrays (**no length limit on tags array or individual tag values**)
|
||||
7. `content` — any binary string
|
||||
8. `sig` — 128-char lowercase hex
|
||||
9. ID hash recomputation and comparison
|
||||
10. Schnorr signature verification via `lib_secp256k1` (gated by `verify_event_signatures` flag, default `true`)
|
||||
|
||||
### Rate Limiting
|
||||
|
||||
| Defence | Value | Notes |
|
||||
|---|---|---|
|
||||
| Event ingest rate | **120 events per window** | Per-connection sliding window |
|
||||
| Ingest window | **1 second** | Resets on first event after expiry |
|
||||
| No per-IP connection rate limiting | — | Must be handled at reverse proxy |
|
||||
| No global connection count ceiling | — | BEAM handles thousands but no configured limit |
|
||||
|
||||
### Authentication (NIP-42)
|
||||
|
||||
- Challenge issued to **all** connections on connect (optional escalation model)
|
||||
- AUTH event must: pass full NIP-01 validation, be kind `22242`, contain matching `challenge` tag, contain matching `relay` tag
|
||||
- `created_at` freshness: must be `>= now - 600s` (10 min)
|
||||
- On success: pubkey added to `authenticated_pubkeys` MapSet; challenge rotated
|
||||
- Supports multiple authenticated pubkeys per connection
|
||||
|
||||
### Authentication (NIP-98 HTTP)
|
||||
|
||||
- Management endpoint (`POST /management`) requires NIP-98 header
|
||||
- Auth event must be kind `27235`, `created_at` within **60 seconds** of now
|
||||
- Must include `method` and `u` tags matching request exactly
|
||||
|
||||
### Access Control
|
||||
|
||||
- `auth_required_for_writes`: default **false** (configurable)
|
||||
- `auth_required_for_reads`: default **false** (configurable)
|
||||
- Protected events (NIP-70, tagged `["-"]`): require auth + pubkey match
|
||||
- Giftwrap (kind 1059): unauthenticated REQ → CLOSED; authenticated REQ must include `#p` containing own pubkey
|
||||
|
||||
### Database
|
||||
|
||||
- All queries use Ecto parameterised bindings — no raw string interpolation
|
||||
- LIKE search patterns escaped (`%`, `_`, `\` characters)
|
||||
- Deletion enforces `pubkey == deleter_pubkey` in WHERE clause
|
||||
- Soft-delete via `deleted_at`; hard-delete only via vanish (NIP-62) or expiration purge
|
||||
- DB pool: **32 connections** (prod), queue target 1s, interval 5s
|
||||
|
||||
### Moderation
|
||||
|
||||
- Banned pubkeys, allowed pubkeys, banned events, blocked IPs stored in ETS cache
|
||||
- Management API (NIP-98 authed) for CRUD on moderation lists
|
||||
- Cache invalidated atomically on writes
|
||||
|
||||
---
|
||||
|
||||
## 3. Known Gaps and Areas of Concern
|
||||
|
||||
The following are areas where the current implementation may be vulnerable or where defences could be strengthened. **Please evaluate each and provide recommendations.**
|
||||
|
||||
### 3.1 Connection Exhaustion
|
||||
|
||||
- There is **no global limit on concurrent WebSocket connections**. Each connection is an Elixir process (~2–3 KiB base), but subscriptions, auth state, and outbound queues add per-connection memory.
|
||||
- There is **no per-IP connection rate limiting at the application layer**. IP blocking exists but is reactive (management API), not automatic.
|
||||
- There is **no idle timeout** after the WebSocket upgrade completes. A connection can remain open indefinitely without sending or receiving messages.
|
||||
|
||||
**Questions:**
|
||||
- What connection limits should be configured at the Bandit/BEAM level?
|
||||
- Should an idle timeout be implemented? If so, what value balances real-time subscription use against resource waste?
|
||||
- Should per-IP connection counting be implemented at the application layer, or is this strictly a reverse proxy concern?
|
||||
|
||||
### 3.2 Subscription Abuse
|
||||
|
||||
- A single connection can hold **32 subscriptions**, each with up to **16 filters**. That's 512 filter predicates per connection being evaluated on every fanout.
|
||||
- Filter arrays (`ids`, `authors`, `kinds`, tag values) have **no element count limits**. A filter could contain thousands of author pubkeys.
|
||||
- There is no cost accounting for "expensive" subscriptions (e.g., wide open filters matching all events).
|
||||
|
||||
**Questions:**
|
||||
- Should filter array element counts be bounded? If so, what limits per field?
|
||||
- Should there be a per-connection "filter complexity" budget?
|
||||
- How expensive is the current ETS subscription index traversal at scale (e.g., 10K concurrent connections × 32 subs each)?
|
||||
|
||||
### 3.3 Tag Array Size
|
||||
|
||||
- Event validation does **not limit the number of tags** or the length of individual tag values beyond the 256 KiB total event size cap.
|
||||
- A maximally-tagged event could contain thousands of short tags, causing amplification in `event_tags` table inserts (one row per tag).
|
||||
|
||||
**Questions:**
|
||||
- Should a max tag count be enforced? What is a reasonable limit?
|
||||
- What is the insert cost of storing e.g. 1,000 tags per event? Could this be used for write amplification?
|
||||
- Should individual tag value lengths be bounded?
|
||||
|
||||
### 3.4 AUTH Timing
|
||||
|
||||
- AUTH event `created_at` freshness only checks the **lower bound** (`>= now - 600`). An AUTH event with `created_at` far in the future passes validation.
|
||||
- Regular events have a future skew cap of 900s, but AUTH events do not.
|
||||
|
||||
**Questions:**
|
||||
- Should AUTH events also enforce a future `created_at` bound?
|
||||
- Is a 600-second AUTH window too wide? Could it be reduced?
|
||||
|
||||
### 3.5 Outbound Amplification
|
||||
|
||||
- A single inbound EVENT can fan out to an unbounded number of matching subscriptions across all connections.
|
||||
- The outbound queue (256 events, `:close` strategy) protects individual connections but does not limit total fanout work per event.
|
||||
- The fanout traverses the ETS subscription index synchronously in the ingesting connection's process.
|
||||
|
||||
**Questions:**
|
||||
- Should fanout be bounded per event (e.g., max N recipients before yielding)?
|
||||
- Should fanout happen in a separate process pool rather than inline?
|
||||
- Is the `:close` overflow strategy optimal, or would `:drop_oldest` be better for well-behaved clients with temporary backpressure?
|
||||
|
||||
### 3.6 Query Amplification
|
||||
|
||||
- A single REQ with 16 filters, each with `limit: 500`, could trigger 16 separate Postgres queries returning up to 8,000 events total.
|
||||
- COUNT requests also execute per-filter queries (now deduplicated via UNION ALL).
|
||||
- `search` filters use `ILIKE %pattern%` which cannot use B-tree indexes.
|
||||
|
||||
**Questions:**
|
||||
- Should there be a per-REQ total result cap (across all filters)?
|
||||
- Should `search` queries be rate-limited or require a minimum pattern length?
|
||||
- Should COUNT be disabled or rate-limited separately?
|
||||
- Are there missing indexes that would help common query patterns?
|
||||
|
||||
### 3.7 Multi-Node Trust
|
||||
|
||||
- Events received via `:remote_fanout_event` from peer BEAM nodes **skip all validation and policy checks** and go directly to the subscription index.
|
||||
- This assumes all cluster peers are trusted.
|
||||
|
||||
**Questions:**
|
||||
- If cluster membership is dynamic or spans trust boundaries, should remote events be re-validated?
|
||||
- Should there be a shared secret or HMAC on inter-node messages?
|
||||
|
||||
### 3.8 Metrics Endpoint
|
||||
|
||||
- `/metrics` (Prometheus) is **unauthenticated**.
|
||||
- Exposes internal telemetry: connection counts, event throughput, queue depths, database timing.
|
||||
|
||||
**Questions:**
|
||||
- Should `/metrics` require authentication or be restricted to internal networks?
|
||||
- Could metrics data be used to profile the relay's capacity and craft targeted attacks?
|
||||
|
||||
### 3.9 Negentropy Stub
|
||||
|
||||
- NEG-OPEN, NEG-MSG, NEG-CLOSE messages are accepted and acknowledged but the reconciliation logic is a stub (cursor counter only).
|
||||
- Are there resource implications of accepting negentropy sessions without real implementation?
|
||||
|
||||
### 3.10 Event Re-Serialisation Cost
|
||||
|
||||
- To enforce the 256 KiB event size limit, the relay calls `JSON.encode!(event)` on the already-decoded event map. This re-serialisation happens on every inbound EVENT.
|
||||
- Could this be replaced with a byte-length check on the raw frame payload (already available)?
|
||||
|
||||
---
|
||||
|
||||
## 4. Specific Review Requests
|
||||
|
||||
Beyond the gaps above, please also evaluate:
|
||||
|
||||
1. **Bandit configuration**: Are there Bandit-level options (max connections, header limits, request timeouts, keepalive settings) that should be tuned for a public-facing relay?
|
||||
|
||||
2. **BEAM VM flags**: Are there any Erlang VM flags (`+P`, `+Q`, `+S`, memory limits) that should be set for production hardening?
|
||||
|
||||
3. **Ecto pool exhaustion**: With 32 DB connections and potentially thousands of concurrent REQ queries, what happens under pool exhaustion? Is the 1s queue target + 5s interval appropriate?
|
||||
|
||||
4. **ETS table sizing**: The subscription index and moderation cache use ETS. Are there memory limits or table options (`read_concurrency`, `write_concurrency`, `compressed`) that should be tuned?
|
||||
|
||||
5. **Process mailbox overflow**: Connection processes receive events via `send/2` during fanout. If a process is slow to consume, its mailbox grows. The outbound queue mechanism is application-level — but is the BEAM-level mailbox also protected?
|
||||
|
||||
6. **Reverse proxy recommendations**: What nginx/Caddy configuration should complement the relay's defences? (Rate limiting, connection limits, WebSocket-specific settings, request body size.)
|
||||
|
||||
7. **Monitoring and alerting**: What telemetry signals should trigger alerts? (Connection count spikes, queue overflow rates, DB pool saturation, error rates.)
|
||||
|
||||
---
|
||||
|
||||
## 5. Out of Scope
|
||||
|
||||
The following are **not** in scope for this review:
|
||||
- TLS configuration (handled by reverse proxy)
|
||||
- DNS and network-level DDoS mitigation
|
||||
- Operating system hardening
|
||||
- Key management for the relay identity
|
||||
- Client-side security
|
||||
- Nostr protocol design flaws (we implement the spec as-is)
|
||||
|
||||
---
|
||||
|
||||
## 6. Response Format
|
||||
|
||||
For each recommendation, use this format:
|
||||
|
||||
### [Severity] Title
|
||||
|
||||
**Attack/failure mode:** What goes wrong without this mitigation.
|
||||
|
||||
**Current state:** What exists today (or doesn't).
|
||||
|
||||
**Recommendation:** Specific change — config value, code change, or architectural decision.
|
||||
|
||||
**Trade-offs:** Any impact on legitimate users or operational complexity.
|
||||
398
docs/slop/LOCAL_API.md
Normal file
398
docs/slop/LOCAL_API.md
Normal file
@@ -0,0 +1,398 @@
|
||||
# Parrhesia Shared API + Local API Design (Option 1)
|
||||
|
||||
## 1) Goal
|
||||
|
||||
Expose a stable in-process API for embedding apps **and** refactor server transports to consume the same API.
|
||||
|
||||
Desired end state:
|
||||
|
||||
- WebSocket server, HTTP management, and embedding app all call one shared core API.
|
||||
- Transport layers (WS/HTTP/local) only do framing, auth header extraction, and response encoding.
|
||||
- Policy/storage/fanout/business semantics live in one place.
|
||||
|
||||
This keeps everything in the same dependency (`:parrhesia`) and avoids a second package.
|
||||
|
||||
---
|
||||
|
||||
## 2) Key architectural decision
|
||||
|
||||
Previous direction: `Parrhesia.Local.*` as primary public API.
|
||||
|
||||
Updated direction (this doc):
|
||||
|
||||
- Introduce **shared core API modules** under `Parrhesia.API.*`.
|
||||
- Make server code (`Parrhesia.Web.Connection`, management handlers) delegate to `Parrhesia.API.*`.
|
||||
- Keep `Parrhesia.Local.*` as optional convenience wrappers over `Parrhesia.API.*`.
|
||||
|
||||
This ensures no divergence between local embedding behavior and websocket behavior.
|
||||
|
||||
---
|
||||
|
||||
## 3) Layered design
|
||||
|
||||
```text
|
||||
Transport layer
|
||||
- Parrhesia.Web.Connection (WS)
|
||||
- Parrhesia.Web.Management (HTTP)
|
||||
- Parrhesia.Local.* wrappers (in-process)
|
||||
|
||||
Shared API layer
|
||||
- Parrhesia.API.Auth
|
||||
- Parrhesia.API.Events
|
||||
- Parrhesia.API.Stream (optional)
|
||||
- Parrhesia.API.Admin (optional, for management methods)
|
||||
|
||||
Domain/runtime dependencies
|
||||
- Parrhesia.Policy.EventPolicy
|
||||
- Parrhesia.Storage.* adapters
|
||||
- Parrhesia.Groups.Flow
|
||||
- Parrhesia.Subscriptions.Index
|
||||
- Parrhesia.Fanout.MultiNode
|
||||
- Parrhesia.Telemetry
|
||||
```
|
||||
|
||||
Rule: all ingest/query/count decisions happen in `Parrhesia.API.Events`.
|
||||
|
||||
---
|
||||
|
||||
## 4) Public module plan
|
||||
|
||||
## 4.1 `Parrhesia.API.Auth`
|
||||
|
||||
Purpose:
|
||||
- event validation helpers
|
||||
- NIP-98 verification
|
||||
- optional embedding account resolution hook
|
||||
|
||||
Proposed functions:
|
||||
|
||||
```elixir
|
||||
@spec validate_event(map()) :: :ok | {:error, term()}
|
||||
@spec compute_event_id(map()) :: String.t()
|
||||
|
||||
@spec validate_nip98(String.t() | nil, String.t(), String.t()) ::
|
||||
{:ok, Parrhesia.API.Auth.Context.t()} | {:error, term()}
|
||||
|
||||
@spec validate_nip98(String.t() | nil, String.t(), String.t(), keyword()) ::
|
||||
{:ok, Parrhesia.API.Auth.Context.t()} | {:error, term()}
|
||||
```
|
||||
|
||||
`validate_nip98/4` options:
|
||||
|
||||
```elixir
|
||||
account_resolver: (pubkey_hex :: String.t(), auth_event :: map() ->
|
||||
{:ok, account :: term()} | {:error, term()})
|
||||
```
|
||||
|
||||
Context struct:
|
||||
|
||||
```elixir
|
||||
defmodule Parrhesia.API.Auth.Context do
|
||||
@enforce_keys [:pubkey, :auth_event]
|
||||
defstruct [:pubkey, :auth_event, :account, claims: %{}]
|
||||
end
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 4.2 `Parrhesia.API.Events`
|
||||
|
||||
Purpose:
|
||||
- canonical ingress/query/count API used by WS + local + HTTP integrations.
|
||||
|
||||
Proposed functions:
|
||||
|
||||
```elixir
|
||||
@spec publish(map(), keyword()) :: {:ok, Parrhesia.API.Events.PublishResult.t()} | {:error, term()}
|
||||
@spec query([map()], keyword()) :: {:ok, [map()]} | {:error, term()}
|
||||
@spec count([map()], keyword()) :: {:ok, non_neg_integer() | map()} | {:error, term()}
|
||||
```
|
||||
|
||||
Request context:
|
||||
|
||||
```elixir
|
||||
defmodule Parrhesia.API.RequestContext do
|
||||
defstruct authenticated_pubkeys: MapSet.new(),
|
||||
actor: nil,
|
||||
metadata: %{}
|
||||
end
|
||||
```
|
||||
|
||||
Publish result:
|
||||
|
||||
```elixir
|
||||
defmodule Parrhesia.API.Events.PublishResult do
|
||||
@enforce_keys [:event_id, :accepted, :message]
|
||||
defstruct [:event_id, :accepted, :message]
|
||||
end
|
||||
```
|
||||
|
||||
### Publish semantics (must match websocket EVENT)
|
||||
|
||||
Pipeline in `publish/2`:
|
||||
|
||||
1. frame/event size limits
|
||||
2. `Parrhesia.Protocol.validate_event/1`
|
||||
3. `Parrhesia.Policy.EventPolicy.authorize_write/2`
|
||||
4. group handling (`Parrhesia.Groups.Flow.handle_event/1`)
|
||||
5. persistence path (`put_event`, deletion, vanish, ephemeral rules)
|
||||
6. fanout (local + multi-node)
|
||||
7. telemetry emit
|
||||
|
||||
Return shape mirrors Nostr `OK` semantics:
|
||||
|
||||
```elixir
|
||||
{:ok, %PublishResult{event_id: id, accepted: true, message: "ok: event stored"}}
|
||||
{:ok, %PublishResult{event_id: id, accepted: false, message: "blocked: ..."}}
|
||||
```
|
||||
|
||||
### Query/count semantics (must match websocket REQ/COUNT)
|
||||
|
||||
`query/2` and `count/2`:
|
||||
|
||||
1. validate filters
|
||||
2. run read policy (`EventPolicy.authorize_read/2`)
|
||||
3. call storage with `requester_pubkeys` from context
|
||||
4. return ordered events/count payload
|
||||
|
||||
Giftwrap restrictions (`kind 1059`) must remain identical to websocket behavior.
|
||||
|
||||
---
|
||||
|
||||
## 4.3 `Parrhesia.API.Stream` (optional but recommended)
|
||||
|
||||
Purpose:
|
||||
- local in-process subscriptions using same subscription index/fanout model.
|
||||
|
||||
Proposed functions:
|
||||
|
||||
```elixir
|
||||
@spec subscribe(pid(), String.t(), [map()], keyword()) :: {:ok, reference()} | {:error, term()}
|
||||
@spec unsubscribe(reference()) :: :ok
|
||||
```
|
||||
|
||||
Subscriber contract:
|
||||
|
||||
```elixir
|
||||
{:parrhesia, :event, ref, subscription_id, event}
|
||||
{:parrhesia, :eose, ref, subscription_id}
|
||||
{:parrhesia, :closed, ref, subscription_id, reason}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 4.4 `Parrhesia.Local.*` wrappers
|
||||
|
||||
`Parrhesia.Local.*` remain as convenience API for embedding apps, implemented as thin wrappers:
|
||||
|
||||
- `Parrhesia.Local.Auth` -> delegates to `Parrhesia.API.Auth`
|
||||
- `Parrhesia.Local.Events` -> delegates to `Parrhesia.API.Events`
|
||||
- `Parrhesia.Local.Stream` -> delegates to `Parrhesia.API.Stream`
|
||||
- `Parrhesia.Local.Client` -> use-case helpers (posts + private messages)
|
||||
|
||||
No business logic in wrappers.
|
||||
|
||||
---
|
||||
|
||||
## 5) Server integration plan (critical)
|
||||
|
||||
## 5.1 WebSocket (`Parrhesia.Web.Connection`)
|
||||
|
||||
After decode:
|
||||
- `EVENT` -> `Parrhesia.API.Events.publish/2`
|
||||
- `REQ` -> `Parrhesia.API.Events.query/2`
|
||||
- `COUNT` -> `Parrhesia.API.Events.count/2`
|
||||
- `AUTH` keep transport-specific challenge/session flow, but can use `API.Auth.validate_event/1` internally
|
||||
|
||||
WebSocket keeps responsibility for:
|
||||
- websocket framing
|
||||
- subscription lifecycle per connection
|
||||
- AUTH challenge rotation protocol frames
|
||||
|
||||
## 5.2 HTTP management (`Parrhesia.Web.Management`)
|
||||
|
||||
- NIP-98 header validation via `Parrhesia.API.Auth.validate_nip98/3`
|
||||
- command execution via `Parrhesia.API.Admin` (or existing storage admin adapter via API facade)
|
||||
|
||||
---
|
||||
|
||||
## 6) High-level client helpers for embedding app use case
|
||||
|
||||
These helpers are optional and live in `Parrhesia.Local.Client`.
|
||||
|
||||
## 6.1 Public posts
|
||||
|
||||
```elixir
|
||||
@spec publish_post(Parrhesia.API.Auth.Context.t(), String.t(), keyword()) ::
|
||||
{:ok, Parrhesia.API.Events.PublishResult.t()} | {:error, term()}
|
||||
|
||||
@spec list_posts(keyword()) :: {:ok, [map()]} | {:error, term()}
|
||||
@spec stream_posts(pid(), keyword()) :: {:ok, reference()} | {:error, term()}
|
||||
```
|
||||
|
||||
`publish_post/3` options:
|
||||
- `:tags`
|
||||
- `:created_at`
|
||||
- `:signer` callback (required unless fully signed event provided)
|
||||
|
||||
Signer contract:
|
||||
|
||||
```elixir
|
||||
(unsigned_event_map -> {:ok, signed_event_map} | {:error, term()})
|
||||
```
|
||||
|
||||
Parrhesia does not store or manage private keys.
|
||||
|
||||
## 6.2 Private messages (giftwrap kind 1059)
|
||||
|
||||
```elixir
|
||||
@spec send_private_message(
|
||||
Parrhesia.API.Auth.Context.t(),
|
||||
recipient_pubkey :: String.t(),
|
||||
encrypted_payload :: String.t(),
|
||||
keyword()
|
||||
) :: {:ok, Parrhesia.API.Events.PublishResult.t()} | {:error, term()}
|
||||
|
||||
@spec inbox(Parrhesia.API.Auth.Context.t(), keyword()) :: {:ok, [map()]} | {:error, term()}
|
||||
@spec stream_inbox(pid(), Parrhesia.API.Auth.Context.t(), keyword()) :: {:ok, reference()} | {:error, term()}
|
||||
```
|
||||
|
||||
Behavior:
|
||||
- `send_private_message/4` builds event template with kind `1059` and `p` tag.
|
||||
- host signer signs template.
|
||||
- publish through `API.Events.publish/2`.
|
||||
- `inbox/2` queries `%{"kinds" => [1059], "#p" => [auth.pubkey]}` with authenticated context.
|
||||
|
||||
---
|
||||
|
||||
## 7) Error model
|
||||
|
||||
Shared API should normalize output regardless of transport.
|
||||
|
||||
Guideline:
|
||||
- protocol/policy rejection -> `{:ok, %{accepted: false, message: "..."}}`
|
||||
- runtime/system failure -> `{:error, term()}`
|
||||
|
||||
Common reason mapping:
|
||||
|
||||
| Reason | Message prefix |
|
||||
|---|---|
|
||||
| `:auth_required` | `auth-required:` |
|
||||
| `:restricted_giftwrap` | `restricted:` |
|
||||
| `:invalid_event` | `invalid:` |
|
||||
| `:duplicate_event` | `duplicate:` |
|
||||
| `:event_rate_limited` | `rate-limited:` |
|
||||
|
||||
---
|
||||
|
||||
## 8) Telemetry
|
||||
|
||||
Emit shared events in API layer (not transport-specific):
|
||||
|
||||
- `[:parrhesia, :api, :publish, :stop]`
|
||||
- `[:parrhesia, :api, :query, :stop]`
|
||||
- `[:parrhesia, :api, :count, :stop]`
|
||||
- `[:parrhesia, :api, :auth, :stop]`
|
||||
|
||||
Metadata:
|
||||
- `traffic_class`
|
||||
- `caller` (`:websocket | :http | :local`)
|
||||
- optional `account_present?`
|
||||
|
||||
Transport-level telemetry can remain separate where needed.
|
||||
|
||||
---
|
||||
|
||||
## 9) Refactor sequence
|
||||
|
||||
### Phase 1: Extract shared API
|
||||
1. Create `Parrhesia.API.Events` with publish/query/count from current `Web.Connection` paths.
|
||||
2. Create `Parrhesia.API.Auth` wrappers for NIP-98/event validation.
|
||||
3. Add API-level tests.
|
||||
|
||||
### Phase 2: Migrate transports
|
||||
1. Update `Parrhesia.Web.Connection` to delegate publish/query/count to `API.Events`.
|
||||
2. Update `Parrhesia.Web.Management` to use `API.Auth`.
|
||||
3. Keep behavior unchanged.
|
||||
|
||||
### Phase 3: Add local wrappers/helpers
|
||||
1. Implement `Parrhesia.Local.Auth/Events/Stream` as thin delegates.
|
||||
2. Add `Parrhesia.Local.Client` post/inbox/send helpers.
|
||||
3. Add embedding documentation.
|
||||
|
||||
### Phase 4: Lock parity
|
||||
1. Add parity tests: WS vs Local API for same inputs and policy outcomes.
|
||||
2. Add property tests for query/count equivalence where feasible.
|
||||
|
||||
---
|
||||
|
||||
## 10) Testing requirements
|
||||
|
||||
1. **Transport parity tests**
|
||||
- Same signed event via WS and API => same accepted/message semantics.
|
||||
2. **Policy parity tests**
|
||||
- Giftwrap visibility and auth-required behavior identical across WS/API/local.
|
||||
3. **Auth tests**
|
||||
- NIP-98 success/failure + account resolver success/failure.
|
||||
4. **Fanout tests**
|
||||
- publish via API reaches local stream subscribers and WS subscribers.
|
||||
5. **Failure tests**
|
||||
- storage failures surface deterministic errors in all transports.
|
||||
|
||||
---
|
||||
|
||||
## 11) Backwards compatibility
|
||||
|
||||
- No breaking change to websocket protocol.
|
||||
- No breaking change to management endpoint contract.
|
||||
- New API modules are additive.
|
||||
- Existing apps can ignore local API entirely.
|
||||
|
||||
---
|
||||
|
||||
## 12) Embedding example flow
|
||||
|
||||
### 12.1 Login/auth
|
||||
|
||||
```elixir
|
||||
with {:ok, auth} <- Parrhesia.API.Auth.validate_nip98(header, method, url,
|
||||
account_resolver: &MyApp.Accounts.resolve_nostr_pubkey/2
|
||||
) do
|
||||
# use auth.pubkey/auth.account in host session
|
||||
end
|
||||
```
|
||||
|
||||
### 12.2 Post publish
|
||||
|
||||
```elixir
|
||||
Parrhesia.Local.Client.publish_post(auth, "hello", signer: &MyApp.NostrSigner.sign/1)
|
||||
```
|
||||
|
||||
### 12.3 Private message
|
||||
|
||||
```elixir
|
||||
Parrhesia.Local.Client.send_private_message(
|
||||
auth,
|
||||
recipient_pubkey,
|
||||
encrypted_payload,
|
||||
signer: &MyApp.NostrSigner.sign/1
|
||||
)
|
||||
```
|
||||
|
||||
### 12.4 Inbox
|
||||
|
||||
```elixir
|
||||
Parrhesia.Local.Client.inbox(auth, limit: 100)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 13) Summary
|
||||
|
||||
Yes, this can and should be extracted into a shared API module. The server should consume it too.
|
||||
|
||||
That gives:
|
||||
- one canonical behavior path,
|
||||
- cleaner embedding,
|
||||
- easier testing,
|
||||
- lower long-term maintenance cost.
|
||||
27
flake.lock
generated
Normal file
27
flake.lock
generated
Normal file
@@ -0,0 +1,27 @@
|
||||
{
|
||||
"nodes": {
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1773389992,
|
||||
"narHash": "sha256-wvfdLLWJ2I9oEpDd9PfMA8osfIZicoQ5MT1jIwNs9Tk=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "c06b4ae3d6599a672a6210b7021d699c351eebda",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "nixos-unstable",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"root": {
|
||||
"inputs": {
|
||||
"nixpkgs": "nixpkgs"
|
||||
}
|
||||
}
|
||||
},
|
||||
"root": "root",
|
||||
"version": 7
|
||||
}
|
||||
68
flake.nix
Normal file
68
flake.nix
Normal file
@@ -0,0 +1,68 @@
|
||||
{
|
||||
description = "Parrhesia Nostr relay";
|
||||
|
||||
inputs.nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable";
|
||||
|
||||
outputs = {nixpkgs, ...}: let
|
||||
systems = [
|
||||
"x86_64-linux"
|
||||
"aarch64-linux"
|
||||
"x86_64-darwin"
|
||||
"aarch64-darwin"
|
||||
];
|
||||
|
||||
forAllSystems = nixpkgs.lib.genAttrs systems;
|
||||
in {
|
||||
formatter = forAllSystems (system: (import nixpkgs {inherit system;}).alejandra);
|
||||
|
||||
packages = forAllSystems (
|
||||
system: let
|
||||
pkgs = import nixpkgs {inherit system;};
|
||||
lib = pkgs.lib;
|
||||
parrhesia = pkgs.callPackage ./default.nix {};
|
||||
in
|
||||
{
|
||||
default = parrhesia;
|
||||
inherit parrhesia;
|
||||
}
|
||||
// lib.optionalAttrs pkgs.stdenv.hostPlatform.isLinux {
|
||||
dockerImage = pkgs.dockerTools.buildLayeredImage {
|
||||
name = "parrhesia";
|
||||
tag = "latest";
|
||||
|
||||
contents = [
|
||||
parrhesia
|
||||
pkgs.bash
|
||||
pkgs.cacert
|
||||
pkgs.coreutils
|
||||
pkgs.fakeNss
|
||||
];
|
||||
|
||||
extraCommands = ''
|
||||
mkdir -p tmp
|
||||
chmod 1777 tmp
|
||||
'';
|
||||
|
||||
config = {
|
||||
Entrypoint = ["${parrhesia}/bin/parrhesia"];
|
||||
Cmd = ["foreground"];
|
||||
ExposedPorts = {
|
||||
"4413/tcp" = {};
|
||||
};
|
||||
WorkingDir = "/";
|
||||
User = "65534:65534";
|
||||
Env = [
|
||||
"HOME=/tmp"
|
||||
"LANG=C.UTF-8"
|
||||
"LC_ALL=C.UTF-8"
|
||||
"MIX_ENV=prod"
|
||||
"PORT=4413"
|
||||
"RELEASE_DISTRIBUTION=none"
|
||||
"SSL_CERT_FILE=${pkgs.cacert}/etc/ssl/certs/ca-bundle.crt"
|
||||
];
|
||||
};
|
||||
};
|
||||
}
|
||||
);
|
||||
};
|
||||
}
|
||||
35
lib/parrhesia/release.ex
Normal file
35
lib/parrhesia/release.ex
Normal file
@@ -0,0 +1,35 @@
|
||||
defmodule Parrhesia.Release do
|
||||
@moduledoc """
|
||||
Helpers for running Ecto tasks from a production release.
|
||||
"""
|
||||
|
||||
@app :parrhesia
|
||||
|
||||
def migrate do
|
||||
load_app()
|
||||
|
||||
for repo <- repos() do
|
||||
{:ok, _, _} =
|
||||
Ecto.Migrator.with_repo(repo, fn repo ->
|
||||
Ecto.Migrator.run(repo, :up, all: true)
|
||||
end)
|
||||
end
|
||||
end
|
||||
|
||||
def rollback(repo, version) when is_atom(repo) and is_integer(version) do
|
||||
load_app()
|
||||
|
||||
{:ok, _, _} =
|
||||
Ecto.Migrator.with_repo(repo, fn repo ->
|
||||
Ecto.Migrator.run(repo, :down, to: version)
|
||||
end)
|
||||
end
|
||||
|
||||
defp load_app do
|
||||
Application.load(@app)
|
||||
end
|
||||
|
||||
defp repos do
|
||||
Application.fetch_env!(@app, :ecto_repos)
|
||||
end
|
||||
end
|
||||
@@ -1,51 +0,0 @@
|
||||
defmodule Parrhesia.Storage.Archiver do
|
||||
@moduledoc """
|
||||
Partition-aware archival helpers for Postgres event partitions.
|
||||
"""
|
||||
|
||||
import Ecto.Query
|
||||
|
||||
alias Parrhesia.Repo
|
||||
|
||||
@doc """
|
||||
Lists all `events_*` partitions excluding the default partition.
|
||||
"""
|
||||
@spec list_partitions() :: [String.t()]
|
||||
def list_partitions do
|
||||
query =
|
||||
from(table in "pg_tables",
|
||||
where: table.schemaname == "public",
|
||||
where: like(table.tablename, "events_%"),
|
||||
where: table.tablename != "events_default",
|
||||
select: table.tablename,
|
||||
order_by: [asc: table.tablename]
|
||||
)
|
||||
|
||||
Repo.all(query)
|
||||
end
|
||||
|
||||
@identifier_pattern ~r/^[a-zA-Z_][a-zA-Z0-9_]*$/
|
||||
|
||||
@doc """
|
||||
Generates an archive SQL statement for the given partition.
|
||||
"""
|
||||
@spec archive_sql(String.t(), String.t()) :: String.t()
|
||||
def archive_sql(partition_name, archive_table_name) do
|
||||
quoted_archive_table_name = quote_identifier!(archive_table_name)
|
||||
quoted_partition_name = quote_identifier!(partition_name)
|
||||
|
||||
"INSERT INTO #{quoted_archive_table_name} SELECT * FROM #{quoted_partition_name};"
|
||||
end
|
||||
|
||||
defp quote_identifier!(identifier) when is_binary(identifier) do
|
||||
if Regex.match?(@identifier_pattern, identifier) do
|
||||
~s("#{identifier}")
|
||||
else
|
||||
raise ArgumentError, "invalid SQL identifier: #{inspect(identifier)}"
|
||||
end
|
||||
end
|
||||
|
||||
defp quote_identifier!(identifier) do
|
||||
raise ArgumentError, "invalid SQL identifier: #{inspect(identifier)}"
|
||||
end
|
||||
end
|
||||
310
lib/parrhesia/storage/partitions.ex
Normal file
310
lib/parrhesia/storage/partitions.ex
Normal file
@@ -0,0 +1,310 @@
|
||||
defmodule Parrhesia.Storage.Partitions do
|
||||
@moduledoc """
|
||||
Partition lifecycle helpers for Postgres `events` and `event_tags` monthly partitions.
|
||||
"""
|
||||
|
||||
import Ecto.Query
|
||||
|
||||
alias Parrhesia.Repo
|
||||
|
||||
@identifier_pattern ~r/^[a-zA-Z_][a-zA-Z0-9_]*$/
|
||||
@monthly_partition_pattern ~r/^events_(\d{4})_(\d{2})$/
|
||||
@events_partition_prefix "events"
|
||||
@event_tags_partition_prefix "event_tags"
|
||||
@default_months_ahead 2
|
||||
|
||||
@type monthly_partition :: %{
|
||||
name: String.t(),
|
||||
year: pos_integer(),
|
||||
month: pos_integer(),
|
||||
month_start_unix: non_neg_integer(),
|
||||
month_end_unix: non_neg_integer()
|
||||
}
|
||||
|
||||
@doc """
|
||||
Lists all `events_*` partitions excluding the default partition.
|
||||
"""
|
||||
@spec list_partitions() :: [String.t()]
|
||||
def list_partitions do
|
||||
query =
|
||||
from(table in "pg_tables",
|
||||
where: table.schemaname == "public",
|
||||
where: like(table.tablename, "events_%"),
|
||||
where: table.tablename != "events_default",
|
||||
select: table.tablename,
|
||||
order_by: [asc: table.tablename]
|
||||
)
|
||||
|
||||
Repo.all(query)
|
||||
end
|
||||
|
||||
@doc """
|
||||
Lists monthly event partitions that match `events_YYYY_MM` naming.
|
||||
"""
|
||||
@spec list_monthly_partitions() :: [monthly_partition()]
|
||||
def list_monthly_partitions do
|
||||
list_partitions()
|
||||
|> Enum.map(&parse_monthly_partition/1)
|
||||
|> Enum.reject(&is_nil/1)
|
||||
|> Enum.sort_by(&{&1.year, &1.month})
|
||||
end
|
||||
|
||||
@doc """
|
||||
Ensures monthly partitions exist for the current month and `months_ahead` future months.
|
||||
"""
|
||||
@spec ensure_monthly_partitions(keyword()) :: :ok | {:error, term()}
|
||||
def ensure_monthly_partitions(opts \\ []) when is_list(opts) do
|
||||
months_ahead =
|
||||
opts
|
||||
|> Keyword.get(:months_ahead, @default_months_ahead)
|
||||
|> normalize_non_negative_integer(@default_months_ahead)
|
||||
|
||||
reference_date =
|
||||
opts
|
||||
|> Keyword.get(:reference_date, Date.utc_today())
|
||||
|> normalize_reference_date()
|
||||
|
||||
reference_month = month_start(reference_date)
|
||||
|
||||
offsets =
|
||||
if months_ahead == 0 do
|
||||
[0]
|
||||
else
|
||||
Enum.to_list(0..months_ahead)
|
||||
end
|
||||
|
||||
Enum.reduce_while(offsets, :ok, fn offset, :ok ->
|
||||
target_month = shift_month(reference_month, offset)
|
||||
|
||||
case create_monthly_partitions(target_month) do
|
||||
:ok -> {:cont, :ok}
|
||||
{:error, reason} -> {:halt, {:error, reason}}
|
||||
end
|
||||
end)
|
||||
end
|
||||
|
||||
@doc """
|
||||
Returns the current database size in bytes.
|
||||
"""
|
||||
@spec database_size_bytes() :: {:ok, non_neg_integer()} | {:error, term()}
|
||||
def database_size_bytes do
|
||||
case Repo.query("SELECT pg_database_size(current_database())") do
|
||||
{:ok, %{rows: [[size]]}} when is_integer(size) and size >= 0 -> {:ok, size}
|
||||
{:ok, _result} -> {:error, :unexpected_result}
|
||||
{:error, reason} -> {:error, reason}
|
||||
end
|
||||
end
|
||||
|
||||
@doc """
|
||||
Drops an event partition table by name.
|
||||
|
||||
For monthly `events_YYYY_MM` partitions, the matching `event_tags_YYYY_MM`
|
||||
partition is dropped first to keep partition lifecycle aligned.
|
||||
"""
|
||||
@spec drop_partition(String.t()) :: :ok | {:error, term()}
|
||||
def drop_partition(partition_name) when is_binary(partition_name) do
|
||||
if protected_partition?(partition_name) do
|
||||
{:error, :protected_partition}
|
||||
else
|
||||
drop_partition_tables(partition_name)
|
||||
end
|
||||
end
|
||||
|
||||
@doc """
|
||||
Returns the monthly `events` partition name for a date.
|
||||
"""
|
||||
@spec month_partition_name(Date.t()) :: String.t()
|
||||
def month_partition_name(%Date{} = date) do
|
||||
monthly_partition_name(@events_partition_prefix, date)
|
||||
end
|
||||
|
||||
@doc """
|
||||
Returns the monthly `event_tags` partition name for a date.
|
||||
"""
|
||||
@spec event_tags_month_partition_name(Date.t()) :: String.t()
|
||||
def event_tags_month_partition_name(%Date{} = date) do
|
||||
monthly_partition_name(@event_tags_partition_prefix, date)
|
||||
end
|
||||
|
||||
defp monthly_partition_name(prefix, %Date{} = date) do
|
||||
month_suffix = date.month |> Integer.to_string() |> String.pad_leading(2, "0")
|
||||
"#{prefix}_#{date.year}_#{month_suffix}"
|
||||
end
|
||||
|
||||
defp create_monthly_partitions(%Date{} = month_date) do
|
||||
{start_unix, end_unix} = month_bounds_unix(month_date.year, month_date.month)
|
||||
|
||||
case create_monthly_partition(
|
||||
month_partition_name(month_date),
|
||||
@events_partition_prefix,
|
||||
start_unix,
|
||||
end_unix
|
||||
) do
|
||||
:ok ->
|
||||
create_monthly_partition(
|
||||
event_tags_month_partition_name(month_date),
|
||||
@event_tags_partition_prefix,
|
||||
start_unix,
|
||||
end_unix
|
||||
)
|
||||
|
||||
{:error, reason} ->
|
||||
{:error, reason}
|
||||
end
|
||||
end
|
||||
|
||||
defp create_monthly_partition(partition_name, parent_table_name, start_unix, end_unix) do
|
||||
quoted_partition_name = quote_identifier!(partition_name)
|
||||
quoted_parent_table_name = quote_identifier!(parent_table_name)
|
||||
|
||||
sql =
|
||||
"""
|
||||
CREATE TABLE IF NOT EXISTS #{quoted_partition_name}
|
||||
PARTITION OF #{quoted_parent_table_name}
|
||||
FOR VALUES FROM (#{start_unix}) TO (#{end_unix})
|
||||
"""
|
||||
|
||||
case Repo.query(sql) do
|
||||
{:ok, _result} -> :ok
|
||||
{:error, reason} -> {:error, reason}
|
||||
end
|
||||
end
|
||||
|
||||
defp drop_partition_tables(partition_name) do
|
||||
case parse_monthly_partition(partition_name) do
|
||||
nil -> drop_table(partition_name)
|
||||
monthly_partition -> drop_monthly_partition(partition_name, monthly_partition)
|
||||
end
|
||||
end
|
||||
|
||||
defp drop_monthly_partition(partition_name, %{year: year, month: month}) do
|
||||
month_date = Date.new!(year, month, 1)
|
||||
tags_partition_name = monthly_partition_name(@event_tags_partition_prefix, month_date)
|
||||
|
||||
with :ok <- maybe_detach_events_partition(partition_name),
|
||||
:ok <- drop_table(tags_partition_name) do
|
||||
drop_table(partition_name)
|
||||
end
|
||||
end
|
||||
|
||||
defp maybe_detach_events_partition(partition_name) do
|
||||
if attached_partition?(partition_name, @events_partition_prefix) do
|
||||
quoted_parent_table_name = quote_identifier!(@events_partition_prefix)
|
||||
quoted_partition_name = quote_identifier!(partition_name)
|
||||
|
||||
case Repo.query(
|
||||
"ALTER TABLE #{quoted_parent_table_name} DETACH PARTITION #{quoted_partition_name}"
|
||||
) do
|
||||
{:ok, _result} -> :ok
|
||||
{:error, reason} -> {:error, reason}
|
||||
end
|
||||
else
|
||||
:ok
|
||||
end
|
||||
end
|
||||
|
||||
defp attached_partition?(partition_name, parent_table_name) do
|
||||
query =
|
||||
"""
|
||||
SELECT 1
|
||||
FROM pg_inherits AS inheritance
|
||||
JOIN pg_class AS child ON child.oid = inheritance.inhrelid
|
||||
JOIN pg_namespace AS child_ns ON child_ns.oid = child.relnamespace
|
||||
JOIN pg_class AS parent ON parent.oid = inheritance.inhparent
|
||||
JOIN pg_namespace AS parent_ns ON parent_ns.oid = parent.relnamespace
|
||||
WHERE child_ns.nspname = 'public'
|
||||
AND parent_ns.nspname = 'public'
|
||||
AND child.relname = $1
|
||||
AND parent.relname = $2
|
||||
LIMIT 1
|
||||
"""
|
||||
|
||||
case Repo.query(query, [partition_name, parent_table_name]) do
|
||||
{:ok, %{rows: [[1]]}} -> true
|
||||
{:ok, %{rows: []}} -> false
|
||||
{:ok, _result} -> false
|
||||
{:error, _reason} -> false
|
||||
end
|
||||
end
|
||||
|
||||
defp drop_table(table_name) do
|
||||
quoted_table_name = quote_identifier!(table_name)
|
||||
|
||||
case Repo.query("DROP TABLE IF EXISTS #{quoted_table_name}") do
|
||||
{:ok, _result} -> :ok
|
||||
{:error, reason} -> {:error, reason}
|
||||
end
|
||||
end
|
||||
|
||||
defp protected_partition?(partition_name) do
|
||||
partition_name in ["events", "events_default", "event_tags", "event_tags_default"]
|
||||
end
|
||||
|
||||
defp parse_monthly_partition(partition_name) do
|
||||
case Regex.run(@monthly_partition_pattern, partition_name, capture: :all_but_first) do
|
||||
[year_text, month_text] ->
|
||||
{year, ""} = Integer.parse(year_text)
|
||||
{month, ""} = Integer.parse(month_text)
|
||||
|
||||
if month in 1..12 do
|
||||
{month_start_unix, month_end_unix} = month_bounds_unix(year, month)
|
||||
|
||||
%{
|
||||
name: partition_name,
|
||||
year: year,
|
||||
month: month,
|
||||
month_start_unix: month_start_unix,
|
||||
month_end_unix: month_end_unix
|
||||
}
|
||||
else
|
||||
nil
|
||||
end
|
||||
|
||||
_other ->
|
||||
nil
|
||||
end
|
||||
end
|
||||
|
||||
defp month_bounds_unix(year, month) do
|
||||
month_date = Date.new!(year, month, 1)
|
||||
next_month_date = shift_month(month_date, 1)
|
||||
|
||||
{date_to_unix(month_date), date_to_unix(next_month_date)}
|
||||
end
|
||||
|
||||
defp date_to_unix(%Date{} = date) do
|
||||
date
|
||||
|> DateTime.new!(~T[00:00:00], "Etc/UTC")
|
||||
|> DateTime.to_unix()
|
||||
end
|
||||
|
||||
defp month_start(%Date{} = date), do: Date.new!(date.year, date.month, 1)
|
||||
|
||||
defp shift_month(%Date{} = date, month_delta) when is_integer(month_delta) do
|
||||
month_index = date.year * 12 + date.month - 1 + month_delta
|
||||
shifted_year = div(month_index, 12)
|
||||
shifted_month = rem(month_index, 12) + 1
|
||||
|
||||
Date.new!(shifted_year, shifted_month, 1)
|
||||
end
|
||||
|
||||
defp normalize_reference_date(%Date{} = date), do: date
|
||||
defp normalize_reference_date(_other), do: Date.utc_today()
|
||||
|
||||
defp normalize_non_negative_integer(value, _default) when is_integer(value) and value >= 0,
|
||||
do: value
|
||||
|
||||
defp normalize_non_negative_integer(_value, default), do: default
|
||||
|
||||
defp quote_identifier!(identifier) when is_binary(identifier) do
|
||||
if Regex.match?(@identifier_pattern, identifier) do
|
||||
~s("#{identifier}")
|
||||
else
|
||||
raise ArgumentError, "invalid SQL identifier: #{inspect(identifier)}"
|
||||
end
|
||||
end
|
||||
|
||||
defp quote_identifier!(identifier) do
|
||||
raise ArgumentError, "invalid SQL identifier: #{inspect(identifier)}"
|
||||
end
|
||||
end
|
||||
280
lib/parrhesia/tasks/partition_retention_worker.ex
Normal file
280
lib/parrhesia/tasks/partition_retention_worker.ex
Normal file
@@ -0,0 +1,280 @@
|
||||
defmodule Parrhesia.Tasks.PartitionRetentionWorker do
|
||||
@moduledoc """
|
||||
Periodic worker that ensures monthly event partitions and applies retention pruning.
|
||||
"""
|
||||
|
||||
use GenServer
|
||||
|
||||
alias Parrhesia.Storage.Partitions
|
||||
alias Parrhesia.Telemetry
|
||||
|
||||
@default_check_interval_hours 24
|
||||
@default_months_ahead 2
|
||||
@default_max_partitions_to_drop_per_run 1
|
||||
@bytes_per_gib 1_073_741_824
|
||||
|
||||
@type monthly_partition :: Partitions.monthly_partition()
|
||||
|
||||
@spec start_link(keyword()) :: GenServer.on_start()
|
||||
def start_link(opts \\ []) do
|
||||
name = Keyword.get(opts, :name, __MODULE__)
|
||||
GenServer.start_link(__MODULE__, opts, name: name)
|
||||
end
|
||||
|
||||
@impl true
|
||||
def init(opts) do
|
||||
retention_config = Application.get_env(:parrhesia, :retention, [])
|
||||
|
||||
state = %{
|
||||
partition_ops: Keyword.get(opts, :partition_ops, Partitions),
|
||||
interval_ms: interval_ms(opts, retention_config),
|
||||
months_ahead: months_ahead(opts, retention_config),
|
||||
max_db_gib: max_db_gib(opts, retention_config),
|
||||
max_months_to_keep: max_months_to_keep(opts, retention_config),
|
||||
max_partitions_to_drop_per_run: max_partitions_to_drop_per_run(opts, retention_config),
|
||||
today_fun: today_fun(opts)
|
||||
}
|
||||
|
||||
schedule_tick(0)
|
||||
{:ok, state}
|
||||
end
|
||||
|
||||
@impl true
|
||||
def handle_info(:tick, state) do
|
||||
started_at = System.monotonic_time()
|
||||
|
||||
{dropped_count, status} =
|
||||
case run_maintenance(state) do
|
||||
{:ok, count} -> {count, :ok}
|
||||
{:error, _reason} -> {0, :error}
|
||||
end
|
||||
|
||||
Telemetry.emit(
|
||||
[:parrhesia, :maintenance, :partition_retention, :stop],
|
||||
%{
|
||||
duration: System.monotonic_time() - started_at,
|
||||
dropped_partitions: dropped_count
|
||||
},
|
||||
%{status: status}
|
||||
)
|
||||
|
||||
schedule_tick(state.interval_ms)
|
||||
{:noreply, state}
|
||||
end
|
||||
|
||||
def handle_info(_message, state), do: {:noreply, state}
|
||||
|
||||
defp run_maintenance(state) do
|
||||
case state.partition_ops.ensure_monthly_partitions(months_ahead: state.months_ahead) do
|
||||
:ok -> maybe_drop_oldest_partitions(state)
|
||||
{:error, reason} -> {:error, reason}
|
||||
end
|
||||
end
|
||||
|
||||
defp maybe_drop_oldest_partitions(%{max_partitions_to_drop_per_run: max_drops})
|
||||
when max_drops <= 0,
|
||||
do: {:ok, 0}
|
||||
|
||||
defp maybe_drop_oldest_partitions(state) do
|
||||
1..state.max_partitions_to_drop_per_run
|
||||
|> Enum.reduce_while({:ok, 0}, fn _attempt, {:ok, dropped_count} ->
|
||||
drop_oldest_partition_once(state, dropped_count)
|
||||
end)
|
||||
end
|
||||
|
||||
defp drop_oldest_partition_once(state, dropped_count) do
|
||||
case next_partition_to_drop(state) do
|
||||
{:ok, partition} -> apply_partition_drop(state, partition, dropped_count)
|
||||
{:error, reason} -> {:halt, {:error, reason}}
|
||||
end
|
||||
end
|
||||
|
||||
defp apply_partition_drop(_state, nil, dropped_count), do: {:halt, {:ok, dropped_count}}
|
||||
|
||||
defp apply_partition_drop(state, partition, dropped_count) do
|
||||
case state.partition_ops.drop_partition(partition.name) do
|
||||
:ok -> {:cont, {:ok, dropped_count + 1}}
|
||||
{:error, reason} -> {:halt, {:error, reason}}
|
||||
end
|
||||
end
|
||||
|
||||
defp next_partition_to_drop(state) do
|
||||
partitions = state.partition_ops.list_monthly_partitions()
|
||||
current_month_index = current_month_index(state.today_fun)
|
||||
|
||||
month_limit_candidate =
|
||||
oldest_partition_exceeding_month_limit(
|
||||
partitions,
|
||||
state.max_months_to_keep,
|
||||
current_month_index
|
||||
)
|
||||
|
||||
with {:ok, size_limit_candidate} <-
|
||||
oldest_partition_exceeding_size_limit(
|
||||
partitions,
|
||||
state.max_db_gib,
|
||||
current_month_index,
|
||||
state.partition_ops
|
||||
) do
|
||||
{:ok, pick_oldest_partition(month_limit_candidate, size_limit_candidate)}
|
||||
end
|
||||
end
|
||||
|
||||
defp oldest_partition_exceeding_month_limit(_partitions, :infinity, _current_month_index),
|
||||
do: nil
|
||||
|
||||
defp oldest_partition_exceeding_month_limit(partitions, max_months_to_keep, current_month_index)
|
||||
when is_integer(max_months_to_keep) and max_months_to_keep > 0 do
|
||||
oldest_month_to_keep_index = current_month_index - (max_months_to_keep - 1)
|
||||
|
||||
partitions
|
||||
|> Enum.filter(fn partition ->
|
||||
month_index(partition) < current_month_index and
|
||||
month_index(partition) < oldest_month_to_keep_index
|
||||
end)
|
||||
|> Enum.min_by(&month_index/1, fn -> nil end)
|
||||
end
|
||||
|
||||
defp oldest_partition_exceeding_month_limit(
|
||||
_partitions,
|
||||
_max_months_to_keep,
|
||||
_current_month_index
|
||||
),
|
||||
do: nil
|
||||
|
||||
defp oldest_partition_exceeding_size_limit(
|
||||
_partitions,
|
||||
:infinity,
|
||||
_current_month_index,
|
||||
_archiver
|
||||
),
|
||||
do: {:ok, nil}
|
||||
|
||||
defp oldest_partition_exceeding_size_limit(
|
||||
partitions,
|
||||
max_db_gib,
|
||||
current_month_index,
|
||||
archiver
|
||||
)
|
||||
when is_integer(max_db_gib) and max_db_gib > 0 do
|
||||
with {:ok, current_size_bytes} <- archiver.database_size_bytes() do
|
||||
max_size_bytes = max_db_gib * @bytes_per_gib
|
||||
|
||||
if current_size_bytes > max_size_bytes do
|
||||
{:ok, oldest_completed_partition(partitions, current_month_index)}
|
||||
else
|
||||
{:ok, nil}
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
defp oldest_partition_exceeding_size_limit(
|
||||
_partitions,
|
||||
_max_db_gib,
|
||||
_current_month_index,
|
||||
_archiver
|
||||
),
|
||||
do: {:ok, nil}
|
||||
|
||||
defp oldest_completed_partition(partitions, current_month_index) do
|
||||
partitions
|
||||
|> Enum.filter(&(month_index(&1) < current_month_index))
|
||||
|> Enum.min_by(&month_index/1, fn -> nil end)
|
||||
end
|
||||
|
||||
defp pick_oldest_partition(nil, nil), do: nil
|
||||
defp pick_oldest_partition(partition, nil), do: partition
|
||||
defp pick_oldest_partition(nil, partition), do: partition
|
||||
|
||||
defp pick_oldest_partition(left, right) do
|
||||
if month_index(left) <= month_index(right) do
|
||||
left
|
||||
else
|
||||
right
|
||||
end
|
||||
end
|
||||
|
||||
defp month_index(%{year: year, month: month}) when is_integer(year) and is_integer(month) do
|
||||
year * 12 + month
|
||||
end
|
||||
|
||||
defp current_month_index(today_fun) do
|
||||
today = today_fun.()
|
||||
today.year * 12 + today.month
|
||||
end
|
||||
|
||||
defp interval_ms(opts, retention_config) do
|
||||
case Keyword.get(opts, :interval_ms) do
|
||||
value when is_integer(value) and value > 0 ->
|
||||
value
|
||||
|
||||
_other ->
|
||||
retention_config
|
||||
|> Keyword.get(:check_interval_hours, @default_check_interval_hours)
|
||||
|> normalize_positive_integer(@default_check_interval_hours)
|
||||
|> hours_to_ms()
|
||||
end
|
||||
end
|
||||
|
||||
defp months_ahead(opts, retention_config) do
|
||||
opts
|
||||
|> Keyword.get(
|
||||
:months_ahead,
|
||||
Keyword.get(retention_config, :months_ahead, @default_months_ahead)
|
||||
)
|
||||
|> normalize_non_negative_integer(@default_months_ahead)
|
||||
end
|
||||
|
||||
defp max_db_gib(opts, retention_config) do
|
||||
opts
|
||||
|> Keyword.get(:max_db_bytes, Keyword.get(retention_config, :max_db_bytes, :infinity))
|
||||
|> normalize_limit()
|
||||
end
|
||||
|
||||
defp max_months_to_keep(opts, retention_config) do
|
||||
opts
|
||||
|> Keyword.get(
|
||||
:max_months_to_keep,
|
||||
Keyword.get(retention_config, :max_months_to_keep, :infinity)
|
||||
)
|
||||
|> normalize_limit()
|
||||
end
|
||||
|
||||
defp max_partitions_to_drop_per_run(opts, retention_config) do
|
||||
opts
|
||||
|> Keyword.get(
|
||||
:max_partitions_to_drop_per_run,
|
||||
Keyword.get(
|
||||
retention_config,
|
||||
:max_partitions_to_drop_per_run,
|
||||
@default_max_partitions_to_drop_per_run
|
||||
)
|
||||
)
|
||||
|> normalize_non_negative_integer(@default_max_partitions_to_drop_per_run)
|
||||
end
|
||||
|
||||
defp today_fun(opts) do
|
||||
case Keyword.get(opts, :today_fun, &Date.utc_today/0) do
|
||||
function when is_function(function, 0) -> function
|
||||
_other -> &Date.utc_today/0
|
||||
end
|
||||
end
|
||||
|
||||
defp normalize_limit(:infinity), do: :infinity
|
||||
defp normalize_limit(value) when is_integer(value) and value > 0, do: value
|
||||
defp normalize_limit(_value), do: :infinity
|
||||
|
||||
defp normalize_positive_integer(value, _default) when is_integer(value) and value > 0, do: value
|
||||
defp normalize_positive_integer(_value, default), do: default
|
||||
|
||||
defp normalize_non_negative_integer(value, _default) when is_integer(value) and value >= 0,
|
||||
do: value
|
||||
|
||||
defp normalize_non_negative_integer(_value, default), do: default
|
||||
|
||||
defp hours_to_ms(hours), do: hours * 60 * 60 * 1000
|
||||
|
||||
defp schedule_tick(interval_ms) do
|
||||
Process.send_after(self(), :tick, interval_ms)
|
||||
end
|
||||
end
|
||||
@@ -11,13 +11,22 @@ defmodule Parrhesia.Tasks.Supervisor do
|
||||
|
||||
@impl true
|
||||
def init(_init_arg) do
|
||||
children =
|
||||
children = expiration_children() ++ partition_retention_children()
|
||||
|
||||
Supervisor.init(children, strategy: :one_for_one)
|
||||
end
|
||||
|
||||
defp expiration_children do
|
||||
if Application.get_env(:parrhesia, :enable_expiration_worker, true) do
|
||||
[{Parrhesia.Tasks.ExpirationWorker, name: Parrhesia.Tasks.ExpirationWorker}]
|
||||
else
|
||||
[]
|
||||
end
|
||||
end
|
||||
|
||||
Supervisor.init(children, strategy: :one_for_one)
|
||||
defp partition_retention_children do
|
||||
[
|
||||
{Parrhesia.Tasks.PartitionRetentionWorker, name: Parrhesia.Tasks.PartitionRetentionWorker}
|
||||
]
|
||||
end
|
||||
end
|
||||
|
||||
@@ -0,0 +1,52 @@
|
||||
defmodule Parrhesia.TestSupport.PartitionRetentionStubPartitions do
|
||||
@moduledoc false
|
||||
|
||||
use Agent
|
||||
|
||||
@spec start_link(keyword()) :: Agent.on_start()
|
||||
def start_link(opts \\ []) do
|
||||
name = Keyword.get(opts, :name, __MODULE__)
|
||||
|
||||
initial_state = %{
|
||||
partitions: Keyword.get(opts, :partitions, []),
|
||||
db_size_bytes: Keyword.get(opts, :db_size_bytes, 0),
|
||||
test_pid: Keyword.get(opts, :test_pid)
|
||||
}
|
||||
|
||||
Agent.start_link(fn -> initial_state end, name: name)
|
||||
end
|
||||
|
||||
@spec ensure_monthly_partitions(keyword()) :: :ok
|
||||
def ensure_monthly_partitions(opts \\ []) do
|
||||
notify({:ensure_monthly_partitions, opts})
|
||||
:ok
|
||||
end
|
||||
|
||||
@spec list_monthly_partitions() :: [map()]
|
||||
def list_monthly_partitions do
|
||||
Agent.get(__MODULE__, & &1.partitions)
|
||||
end
|
||||
|
||||
@spec database_size_bytes() :: {:ok, non_neg_integer()}
|
||||
def database_size_bytes do
|
||||
notify(:database_size_bytes)
|
||||
{:ok, Agent.get(__MODULE__, & &1.db_size_bytes)}
|
||||
end
|
||||
|
||||
@spec drop_partition(String.t()) :: :ok
|
||||
def drop_partition(partition_name) when is_binary(partition_name) do
|
||||
Agent.update(__MODULE__, fn state ->
|
||||
%{state | partitions: Enum.reject(state.partitions, &(&1.name == partition_name))}
|
||||
end)
|
||||
|
||||
notify({:drop_partition, partition_name})
|
||||
:ok
|
||||
end
|
||||
|
||||
defp notify(message) do
|
||||
case Agent.get(__MODULE__, & &1.test_pid) do
|
||||
pid when is_pid(pid) -> send(pid, message)
|
||||
_other -> :ok
|
||||
end
|
||||
end
|
||||
end
|
||||
4
mix.exs
4
mix.exs
@@ -4,8 +4,8 @@ defmodule Parrhesia.MixProject do
|
||||
def project do
|
||||
[
|
||||
app: :parrhesia,
|
||||
version: "0.3.0",
|
||||
elixir: "~> 1.19",
|
||||
version: "0.4.0",
|
||||
elixir: "~> 1.18",
|
||||
start_permanent: Mix.env() == :prod,
|
||||
deps: deps(),
|
||||
aliases: aliases()
|
||||
|
||||
2
mix.lock
2
mix.lock
@@ -27,7 +27,7 @@
|
||||
"plug_cowboy": {:hex, :plug_cowboy, "2.8.0", "07789e9c03539ee51bb14a07839cc95aa96999fd8846ebfd28c97f0b50c7b612", [:mix], [{:cowboy, "~> 2.7", [hex: :cowboy, repo: "hexpm", optional: false]}, {:cowboy_telemetry, "~> 0.3", [hex: :cowboy_telemetry, repo: "hexpm", optional: false]}, {:plug, "~> 1.14", [hex: :plug, repo: "hexpm", optional: false]}], "hexpm", "9cbfaaf17463334ca31aed38ea7e08a68ee37cabc077b1e9be6d2fb68e0171d0"},
|
||||
"plug_crypto": {:hex, :plug_crypto, "2.1.1", "19bda8184399cb24afa10be734f84a16ea0a2bc65054e23a62bb10f06bc89491", [:mix], [], "hexpm", "6470bce6ffe41c8bd497612ffde1a7e4af67f36a15eea5f921af71cf3e11247c"},
|
||||
"postgrex": {:hex, :postgrex, "0.22.0", "fb027b58b6eab1f6de5396a2abcdaaeb168f9ed4eccbb594e6ac393b02078cbd", [:mix], [{:db_connection, "~> 2.9", [hex: :db_connection, repo: "hexpm", optional: false]}, {:decimal, "~> 1.5 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}, {:table, "~> 0.1.0", [hex: :table, repo: "hexpm", optional: true]}], "hexpm", "a68c4261e299597909e03e6f8ff5a13876f5caadaddd0d23af0d0a61afcc5d84"},
|
||||
"ranch": {:hex, :ranch, "1.8.1", "208169e65292ac5d333d6cdbad49388c1ae198136e4697ae2f474697140f201c", [:make, :rebar3], [], "hexpm", "aed58910f4e21deea992a67bf51632b6d60114895eb03bb392bb733064594dd0"},
|
||||
"ranch": {:hex, :ranch, "2.2.0", "25528f82bc8d7c6152c57666ca99ec716510fe0925cb188172f41ce93117b1b0", [:make, :rebar3], [], "hexpm", "fa0b99a1780c80218a4197a59ea8d3bdae32fbff7e88527d7d8a4787eff4f8e7"},
|
||||
"req": {:hex, :req, "0.5.17", "0096ddd5b0ed6f576a03dde4b158a0c727215b15d2795e59e0916c6971066ede", [:mix], [{:brotli, "~> 0.3.1", [hex: :brotli, repo: "hexpm", optional: true]}, {:ezstd, "~> 1.0", [hex: :ezstd, repo: "hexpm", optional: true]}, {:finch, "~> 0.17", [hex: :finch, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}, {:mime, "~> 2.0.6 or ~> 2.1", [hex: :mime, repo: "hexpm", optional: false]}, {:nimble_csv, "~> 1.0", [hex: :nimble_csv, repo: "hexpm", optional: true]}, {:plug, "~> 1.0", [hex: :plug, repo: "hexpm", optional: true]}], "hexpm", "0b8bc6ffdfebbc07968e59d3ff96d52f2202d0536f10fef4dc11dc02a2a43e39"},
|
||||
"rewrite": {:hex, :rewrite, "1.3.0", "67448ba7975690b35ba7e7f35717efcce317dbd5963cb0577aa7325c1923121a", [:mix], [{:glob_ex, "~> 0.1", [hex: :glob_ex, repo: "hexpm", optional: false]}, {:sourceror, "~> 1.0", [hex: :sourceror, repo: "hexpm", optional: false]}, {:text_diff, "~> 0.1", [hex: :text_diff, repo: "hexpm", optional: false]}], "hexpm", "d111ac7ff3a58a802ef4f193bbd1831e00a9c57b33276e5068e8390a212714a5"},
|
||||
"sourceror": {:hex, :sourceror, "1.12.0", "da354c5f35aad3cc1132f5d5b0d8437d865e2661c263260480bab51b5eedb437", [:mix], [], "hexpm", "755703683bd014ebcd5de9acc24b68fb874a660a568d1d63f8f98cd8a6ef9cd0"},
|
||||
|
||||
@@ -30,7 +30,10 @@ defmodule Parrhesia.Repo.Migrations.CreateRelayStorage do
|
||||
create(index(:events, [:expires_at], where: "expires_at IS NOT NULL"))
|
||||
create(index(:events, [:deleted_at], where: "deleted_at IS NOT NULL"))
|
||||
|
||||
create table(:event_tags, primary_key: false) do
|
||||
create table(:event_tags,
|
||||
primary_key: false,
|
||||
options: "PARTITION BY RANGE (event_created_at)"
|
||||
) do
|
||||
add(:event_created_at, :bigint, null: false)
|
||||
add(:event_id, :binary, null: false)
|
||||
add(:name, :string, null: false)
|
||||
@@ -39,6 +42,8 @@ defmodule Parrhesia.Repo.Migrations.CreateRelayStorage do
|
||||
timestamps(updated_at: false, type: :utc_datetime_usec)
|
||||
end
|
||||
|
||||
execute("CREATE TABLE event_tags_default PARTITION OF event_tags DEFAULT")
|
||||
|
||||
execute("""
|
||||
ALTER TABLE event_tags
|
||||
ADD CONSTRAINT event_tags_event_fk
|
||||
@@ -149,6 +154,8 @@ defmodule Parrhesia.Repo.Migrations.CreateRelayStorage do
|
||||
drop(table(:banned_pubkeys))
|
||||
drop(table(:addressable_event_state))
|
||||
drop(table(:replaceable_event_state))
|
||||
|
||||
execute("DROP TABLE event_tags_default")
|
||||
drop(table(:event_tags))
|
||||
|
||||
execute("DROP TABLE events_default")
|
||||
|
||||
@@ -86,7 +86,24 @@ cleanup() {
|
||||
|
||||
trap cleanup EXIT INT TERM
|
||||
|
||||
if ss -ltn "( sport = :${TEST_HTTP_PORT} )" | tail -n +2 | grep -q .; then
|
||||
port_in_use() {
|
||||
local port="$1"
|
||||
|
||||
if command -v ss >/dev/null 2>&1; then
|
||||
ss -ltn "( sport = :${port} )" | tail -n +2 | grep -q .
|
||||
return
|
||||
fi
|
||||
|
||||
if command -v lsof >/dev/null 2>&1; then
|
||||
lsof -nP -iTCP:"${port}" -sTCP:LISTEN >/dev/null 2>&1
|
||||
return
|
||||
fi
|
||||
|
||||
echo "Neither ss nor lsof is available for checking whether port ${port} is already in use." >&2
|
||||
exit 1
|
||||
}
|
||||
|
||||
if port_in_use "$TEST_HTTP_PORT"; then
|
||||
echo "Port ${TEST_HTTP_PORT} is already in use. Set ${PORT_ENV_VAR} to a free port." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
@@ -10,7 +10,7 @@ defmodule Parrhesia.ConfigTest do
|
||||
assert Parrhesia.Config.get([:limits, :auth_max_age_seconds]) == 600
|
||||
assert Parrhesia.Config.get([:limits, :max_outbound_queue]) == 256
|
||||
assert Parrhesia.Config.get([:limits, :max_filter_limit]) == 500
|
||||
assert Parrhesia.Config.get([:relay_url]) == "ws://localhost:4000/relay"
|
||||
assert Parrhesia.Config.get([:relay_url]) == "ws://localhost:4413/relay"
|
||||
assert Parrhesia.Config.get([:policies, :auth_required_for_writes]) == false
|
||||
assert Parrhesia.Config.get([:policies, :marmot_media_max_imeta_tags_per_event]) == 8
|
||||
assert Parrhesia.Config.get([:policies, :marmot_media_reject_mip04_v1]) == true
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
defmodule Parrhesia.Protocol.EventValidatorSignatureTest do
|
||||
use ExUnit.Case, async: true
|
||||
use ExUnit.Case, async: false
|
||||
|
||||
alias Parrhesia.Protocol.EventValidator
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
defmodule Parrhesia.Storage.Adapters.Postgres.EventsTest do
|
||||
use ExUnit.Case, async: true
|
||||
use ExUnit.Case, async: false
|
||||
|
||||
alias Parrhesia.Protocol.EventValidator
|
||||
alias Parrhesia.Storage.Adapters.Postgres.Events
|
||||
|
||||
@@ -64,7 +64,8 @@ defmodule Parrhesia.Storage.Adapters.Postgres.QueryPlanRegressionTest do
|
||||
)
|
||||
|
||||
plan = Enum.map_join(explain.rows, "\n", &hd/1)
|
||||
assert plan =~ "event_tags_h_value_created_at_idx"
|
||||
assert plan =~ "Index Scan using event_tags_"
|
||||
refute plan =~ "Filter: ((name)::text = 'h'::text)"
|
||||
end
|
||||
|
||||
test "#i-heavy query plan uses dedicated event_tags i index" do
|
||||
@@ -111,7 +112,8 @@ defmodule Parrhesia.Storage.Adapters.Postgres.QueryPlanRegressionTest do
|
||||
)
|
||||
|
||||
plan = Enum.map_join(explain.rows, "\n", &hd/1)
|
||||
assert plan =~ "event_tags_i_value_created_at_idx"
|
||||
assert plan =~ "Index Scan using event_tags_"
|
||||
refute plan =~ "Filter: ((name)::text = 'i'::text)"
|
||||
end
|
||||
|
||||
defp persist_event(overrides) do
|
||||
|
||||
@@ -1,28 +0,0 @@
|
||||
defmodule Parrhesia.Storage.ArchiverTest do
|
||||
use ExUnit.Case, async: false
|
||||
|
||||
alias Ecto.Adapters.SQL.Sandbox
|
||||
alias Parrhesia.Repo
|
||||
alias Parrhesia.Storage.Archiver
|
||||
|
||||
setup do
|
||||
:ok = Sandbox.checkout(Repo)
|
||||
:ok
|
||||
end
|
||||
|
||||
test "list_partitions returns partition tables" do
|
||||
partitions = Archiver.list_partitions()
|
||||
assert is_list(partitions)
|
||||
end
|
||||
|
||||
test "archive_sql builds insert-select statement" do
|
||||
assert Archiver.archive_sql("events_2026_03", "events_archive") ==
|
||||
~s(INSERT INTO "events_archive" SELECT * FROM "events_2026_03";)
|
||||
end
|
||||
|
||||
test "archive_sql rejects invalid SQL identifiers" do
|
||||
assert_raise ArgumentError, fn ->
|
||||
Archiver.archive_sql("events_default; DROP TABLE events", "events_archive")
|
||||
end
|
||||
end
|
||||
end
|
||||
66
test/parrhesia/storage/partitions_test.exs
Normal file
66
test/parrhesia/storage/partitions_test.exs
Normal file
@@ -0,0 +1,66 @@
|
||||
defmodule Parrhesia.Storage.PartitionsTest do
|
||||
use ExUnit.Case, async: false
|
||||
|
||||
alias Ecto.Adapters.SQL.Sandbox
|
||||
alias Parrhesia.Repo
|
||||
alias Parrhesia.Storage.Partitions
|
||||
|
||||
setup do
|
||||
:ok = Sandbox.checkout(Repo)
|
||||
:ok
|
||||
end
|
||||
|
||||
test "list_partitions returns partition tables" do
|
||||
partitions = Partitions.list_partitions()
|
||||
assert is_list(partitions)
|
||||
end
|
||||
|
||||
test "ensure_monthly_partitions creates aligned monthly partitions for events and event_tags" do
|
||||
assert :ok =
|
||||
Partitions.ensure_monthly_partitions(reference_date: ~D[2026-06-14], months_ahead: 1)
|
||||
|
||||
monthly_partition_names =
|
||||
Partitions.list_monthly_partitions()
|
||||
|> Enum.map(& &1.name)
|
||||
|
||||
assert "events_2026_06" in monthly_partition_names
|
||||
assert "events_2026_07" in monthly_partition_names
|
||||
|
||||
assert table_exists?("event_tags_2026_06")
|
||||
assert table_exists?("event_tags_2026_07")
|
||||
end
|
||||
|
||||
test "drop_partition returns an error for protected partitions" do
|
||||
assert {:error, :protected_partition} = Partitions.drop_partition("events_default")
|
||||
assert {:error, :protected_partition} = Partitions.drop_partition("events")
|
||||
assert {:error, :protected_partition} = Partitions.drop_partition("event_tags_default")
|
||||
assert {:error, :protected_partition} = Partitions.drop_partition("event_tags")
|
||||
end
|
||||
|
||||
test "drop_partition removes aligned event_tags partition for monthly event partition" do
|
||||
assert :ok =
|
||||
Partitions.ensure_monthly_partitions(reference_date: ~D[2026-08-14], months_ahead: 0)
|
||||
|
||||
assert table_exists?("events_2026_08")
|
||||
assert table_exists?("event_tags_2026_08")
|
||||
|
||||
assert :ok = Partitions.drop_partition("events_2026_08")
|
||||
|
||||
refute table_exists?("events_2026_08")
|
||||
refute table_exists?("event_tags_2026_08")
|
||||
end
|
||||
|
||||
test "database_size_bytes returns the current database size" do
|
||||
assert {:ok, size} = Partitions.database_size_bytes()
|
||||
assert is_integer(size)
|
||||
assert size >= 0
|
||||
end
|
||||
|
||||
defp table_exists?(table_name) when is_binary(table_name) do
|
||||
case Repo.query("SELECT to_regclass($1)", ["public." <> table_name]) do
|
||||
{:ok, %{rows: [[nil]]}} -> false
|
||||
{:ok, %{rows: [[_relation_name]]}} -> true
|
||||
_other -> false
|
||||
end
|
||||
end
|
||||
end
|
||||
124
test/parrhesia/tasks/partition_retention_worker_test.exs
Normal file
124
test/parrhesia/tasks/partition_retention_worker_test.exs
Normal file
@@ -0,0 +1,124 @@
|
||||
defmodule Parrhesia.Tasks.PartitionRetentionWorkerTest do
|
||||
use ExUnit.Case, async: false
|
||||
|
||||
alias Parrhesia.Tasks.PartitionRetentionWorker
|
||||
alias Parrhesia.TestSupport.PartitionRetentionStubPartitions
|
||||
|
||||
@bytes_per_gib 1_073_741_824
|
||||
|
||||
test "drops oldest partition when max_months_to_keep is exceeded" do
|
||||
start_supervised!(
|
||||
{PartitionRetentionStubPartitions,
|
||||
partitions: [
|
||||
partition(2026, 1),
|
||||
partition(2026, 2),
|
||||
partition(2026, 3),
|
||||
partition(2026, 4),
|
||||
partition(2026, 5)
|
||||
],
|
||||
db_size_bytes: 2 * @bytes_per_gib,
|
||||
test_pid: self()}
|
||||
)
|
||||
|
||||
worker =
|
||||
start_supervised!(
|
||||
{PartitionRetentionWorker,
|
||||
name: nil,
|
||||
partition_ops: PartitionRetentionStubPartitions,
|
||||
interval_ms: :timer.hours(24),
|
||||
months_ahead: 0,
|
||||
max_db_bytes: :infinity,
|
||||
max_months_to_keep: 3,
|
||||
max_partitions_to_drop_per_run: 1,
|
||||
today_fun: fn -> ~D[2026-06-15] end}
|
||||
)
|
||||
|
||||
assert is_pid(worker)
|
||||
assert_receive {:ensure_monthly_partitions, [months_ahead: 0]}
|
||||
assert_receive {:drop_partition, "events_2026_01"}
|
||||
refute_receive {:drop_partition, _partition_name}, 20
|
||||
refute_receive :database_size_bytes, 20
|
||||
end
|
||||
|
||||
test "drops oldest completed partition when size exceeds max_db_bytes" do
|
||||
start_supervised!(
|
||||
{PartitionRetentionStubPartitions,
|
||||
partitions: [partition(2026, 3), partition(2026, 4), partition(2026, 5)],
|
||||
db_size_bytes: 12 * @bytes_per_gib,
|
||||
test_pid: self()}
|
||||
)
|
||||
|
||||
worker =
|
||||
start_supervised!(
|
||||
{PartitionRetentionWorker,
|
||||
name: nil,
|
||||
partition_ops: PartitionRetentionStubPartitions,
|
||||
interval_ms: :timer.hours(24),
|
||||
months_ahead: 0,
|
||||
max_db_bytes: 10,
|
||||
max_months_to_keep: :infinity,
|
||||
max_partitions_to_drop_per_run: 1,
|
||||
today_fun: fn -> ~D[2026-06-15] end}
|
||||
)
|
||||
|
||||
assert is_pid(worker)
|
||||
assert_receive {:ensure_monthly_partitions, [months_ahead: 0]}
|
||||
assert_receive :database_size_bytes
|
||||
assert_receive {:drop_partition, "events_2026_03"}
|
||||
end
|
||||
|
||||
test "does not drop partitions when both limits are infinity" do
|
||||
start_supervised!(
|
||||
{PartitionRetentionStubPartitions,
|
||||
partitions: [partition(2026, 1), partition(2026, 2), partition(2026, 3)],
|
||||
db_size_bytes: 50 * @bytes_per_gib,
|
||||
test_pid: self()}
|
||||
)
|
||||
|
||||
worker =
|
||||
start_supervised!(
|
||||
{PartitionRetentionWorker,
|
||||
name: nil,
|
||||
partition_ops: PartitionRetentionStubPartitions,
|
||||
interval_ms: :timer.hours(24),
|
||||
months_ahead: 0,
|
||||
max_db_bytes: :infinity,
|
||||
max_months_to_keep: :infinity,
|
||||
max_partitions_to_drop_per_run: 1,
|
||||
today_fun: fn -> ~D[2026-06-15] end}
|
||||
)
|
||||
|
||||
assert is_pid(worker)
|
||||
assert_receive {:ensure_monthly_partitions, [months_ahead: 0]}
|
||||
refute_receive :database_size_bytes, 20
|
||||
refute_receive {:drop_partition, _partition_name}, 20
|
||||
end
|
||||
|
||||
defp partition(year, month) when is_integer(year) and is_integer(month) do
|
||||
month_name = month |> Integer.to_string() |> String.pad_leading(2, "0")
|
||||
month_start = Date.new!(year, month, 1)
|
||||
next_month_start = shift_month(month_start, 1)
|
||||
|
||||
%{
|
||||
name: "events_#{year}_#{month_name}",
|
||||
year: year,
|
||||
month: month,
|
||||
month_start_unix: date_to_unix(month_start),
|
||||
month_end_unix: date_to_unix(next_month_start)
|
||||
}
|
||||
end
|
||||
|
||||
defp shift_month(%Date{} = date, month_delta) when is_integer(month_delta) do
|
||||
month_index = date.year * 12 + date.month - 1 + month_delta
|
||||
shifted_year = div(month_index, 12)
|
||||
shifted_month = rem(month_index, 12) + 1
|
||||
|
||||
Date.new!(shifted_year, shifted_month, 1)
|
||||
end
|
||||
|
||||
defp date_to_unix(%Date{} = date) do
|
||||
date
|
||||
|> DateTime.new!(~T[00:00:00], "Etc/UTC")
|
||||
|> DateTime.to_unix()
|
||||
end
|
||||
end
|
||||
@@ -74,7 +74,7 @@ defmodule Parrhesia.Web.ConnectionTest do
|
||||
end
|
||||
|
||||
test "AUTH rejects relay tag mismatch" do
|
||||
state = connection_state(relay_url: "ws://localhost:4000/relay")
|
||||
state = connection_state(relay_url: "ws://localhost:4413/relay")
|
||||
|
||||
auth_event = valid_auth_event(state.auth_challenge, relay_url: "ws://attacker.example/relay")
|
||||
payload = JSON.encode!(["AUTH", auth_event])
|
||||
|
||||
Reference in New Issue
Block a user