Add LiveKit, coturn, and JWT service to Matrix module
Some checks failed
Test action / kaas (push) Failing after 1s

- Integrate LiveKit SFU, coturn TURN server, and lk-jwt-service for
  Element Call support in the Matrix Synapse module
- Add firewall rules for new services and ports
- Add key generation systemd service for LiveKit JWT
- Extend Synapse config with TURN URIs and experimental features
- Update Caddy config for new endpoints and well-known support
- Improve OIDC config with additional scopes and user mapping
- Add Grafana secret_key to SOPS secrets and config
- Refactor and modularize secret checking in justfile scripts
This commit is contained in:
Chris Kruining 2026-03-03 14:59:58 +01:00
parent a2071e16a2
commit d3a394dfd9
No known key found for this signature in database
GPG key ID: EB894A3560CCCAD2
4 changed files with 234 additions and 27 deletions

View file

@ -7,5 +7,6 @@
[doc('Update the target machine')] [doc('Update the target machine')]
[no-exit-message] [no-exit-message]
@update machine: @update machine:
cd .. && just vars _check {{ machine }}
just assert '-d "../systems/x86_64-linux/{{ machine }}"' "Machine {{ machine }} does not exist, must be one of: $(ls ../systems/x86_64-linux/ | sed ':a;N;$!ba;s/\n/, /g')" just assert '-d "../systems/x86_64-linux/{{ machine }}"' "Machine {{ machine }} does not exist, must be one of: $(ls ../systems/x86_64-linux/ | sed ':a;N;$!ba;s/\n/, /g')"
nixos-rebuild switch -L --sudo --target-host {{ machine }} --build-host {{ machine }} --flake ..#{{ machine }} --log-format internal-json -v |& nom --json nixos-rebuild switch -L --sudo --target-host {{ machine }} --build-host {{ machine }} --flake ..#{{ machine }} --log-format internal-json -v |& nom --json

View file

@ -23,7 +23,7 @@ edit machine:
echo "Done" echo "Done"
[doc('Get var value by {key} of {machine}')] [doc('Get var by {key} from {machine}')]
get machine key: get machine key:
sops decrypt {{ base_path }}/{{ machine }}/secrets.yml | yq ".$(echo "{{ key }}" | sed -E 's/\//./g')" sops decrypt {{ base_path }}/{{ machine }}/secrets.yml | yq ".$(echo "{{ key }}" | sed -E 's/\//./g')"
@ -38,25 +38,52 @@ remove machine key:
[script] [script]
check: check:
cd ..
for machine in $(ls {{ base_path }}); do for machine in $(ls {{ base_path }}); do
[ -f "{{ base_path }}/$machine/secrets.yml" ] || continue just vars _check "$machine"
[ -f "{{ base_path }}/$machine/default.nix" ] || continue
echo "Processing $machine"
mapfile -t missing < <(jq -nr \
--rawfile defined <(nix eval --json --apply 'builtins.attrNames' ..#nixosConfigurations.$machine.config.sops.secrets 2>/dev/null) \
--rawfile configured <(sops decrypt {{ base_path }}/$machine/secrets.yml | yq '.') \
'
$defined | fromjson as $def
| $configured
| fromjson
| paths(scalars)
| join("/")
| select(. | IN($def[]) | not)
')
if (( ${#missing[@]} > 0 )); then
printf 'missing the following %d secret(s):\n%s\n\n' "${#missing[@]}" "$(printf -- '- %s\n' "${missing[@]}")"
fi
done done
[no-exit-message]
[script]
_check machine:
# If the default nix file is missing,
# we can skip this folder as we are
# missing the files used to compare
# the defined vs the configured secrets
if [ ! -f "{{ base_path }}/{{ machine }}/default.nix" ]; then
printf "\r• %-8sskipped\n" "{{ machine }}"
exit 0
fi
exec 3< <(jq -nr \
--rawfile defined <(nix eval --json ..#nixosConfigurations.{{ machine }}.config.sops.secrets 2>/dev/null) \
--rawfile configured <([ -f "{{ base_path }}/{{ machine }}/secrets.yml" ] && sops decrypt {{ base_path }}/{{ machine }}/secrets.yml | yq '.' || echo "{}") \
'
[ $configured | fromjson | paths(scalars) | join("/") ] as $conf
| $defined
| fromjson
| map(.key | select(. | IN($conf[]) | not))
| unique
| .[]
')
pid=$! # Process Id of the previous running command
spin='⠇⠋⠙⠸⢰⣠⣄⡆'
i=0
while kill -0 $pid 2>/dev/null
do
i=$(( (i+1) %${#spin} ))
printf "\r${spin:$i:1} %s" "{{ machine }}"
sleep .1
done
mapfile -t missing <&3
if (( ${#missing[@]} > 0 )); then
printf '\r✗ %-8smissing %d secret(s):\n%s\n' "{{ machine }}" "${#missing[@]}" "$(printf -- ' %s\n' "${missing[@]}")"
exit 1
else
printf "\r✓ %-8sup to date\n" "{{ machine }}"
fi

View file

@ -15,6 +15,7 @@
port = 4001; port = 4001;
database = "synapse"; database = "synapse";
keyFile = "/var/lib/element-call/key";
in { in {
options.${namespace}.services.communication.matrix = { options.${namespace}.services.communication.matrix = {
enable = mkEnableOption "Matrix server (Synapse)"; enable = mkEnableOption "Matrix server (Synapse)";
@ -26,8 +27,6 @@ in {
# virtualisation.podman.enable = true; # virtualisation.podman.enable = true;
}; };
networking.firewall.allowedTCPPorts = [4001];
services = { services = {
matrix-synapse = { matrix-synapse = {
enable = true; enable = true;
@ -55,8 +54,27 @@ in {
password_config.enabled = true; password_config.enabled = true;
backchannel_logout_enabled = true; backchannel_logout_enabled = true;
# Element Call options
max_event_delay_duration = "24h";
rc_message = {
per_second = 0.5;
burst_count = 30;
};
rc_delayed_event_mgmt = {
per_second = 1;
burst_count = 20;
};
turn_uris = ["turn:turn.${domain}:4004?transport=udp" "turn:turn.${domain}:4004?transport=tcp"];
experimental_features = { experimental_features = {
# MSC2965: OAuth 2.0 Authorization Server Metadata discovery
msc2965_enabled = true; msc2965_enabled = true;
# MSC3266: Room summary API. Used for knocking over federation
msc3266_enabled = true;
# MSC4222 needed for syncv2 state_after. This allow clients to
# correctly track the state of the room.
msc4222_enabled = true;
}; };
sso = { sso = {
@ -181,33 +199,180 @@ in {
caddy = { caddy = {
enable = true; enable = true;
# globalConfig = ''
# layer4 {
# 127.0.0.1:4004
# route {
# proxy {
# upstream synapse:4004
# }
# }
# }
# 127.0.0.1:4005
# route {
# proxy {
# upstream synapse:4005
# }
# }
# }
# }
# '';
virtualHosts = let virtualHosts = let
server = { server = {
"m.server" = "${fqn}:443"; "m.server" = "${fqn}:443";
}; };
client = { client = {
"m.homeserver".base_url = "https://${fqn}"; "m.homeserver".base_url = "https://${fqn}";
"m.identity_server".base_url = "https://auth.kruining.eu"; "m.identity_server".base_url = "https://auth.${domain}";
"org.matrix.msc3575.proxy".url = "https://${domain}";
"org.matrix.msc4143.rtc_foci" = [
{
type = "livekit";
livekit_service_url = "https://${domain}/livekit/jwt";
}
];
}; };
in { in {
"${domain}".extraConfig = '' "${domain}, darkch.at".extraConfig = ''
# Route for lk-jwt-service
handle /livekit/jwt* {
uri strip_prefix /livekit/jwt
reverse_proxy http://[::1]:${toString config.services.lk-jwt-service.port} {
header_up Host {host}
header_up X-Forwarded-Server {host}
header_up X-Real-IP {remote_host}
header_up X-Forwarded-For {remote_host}
}
}
handle_path /livekit/sfu* {
reverse_proxy http://[::1]:${toString config.services.livekit.settings.port} {
header_up Host {host}
header_up X-Forwarded-Server {host}
header_up X-Real-IP {remote_host}
header_up X-Forwarded-For {remote_host}
}
}
header /.well-known/matrix/* Content-Type application/json header /.well-known/matrix/* Content-Type application/json
header /.well-known/matrix/* Access-Control-Allow-Origin * header /.well-known/matrix/* Access-Control-Allow-Origin *
respond /.well-known/matrix/server `${toJSON server}` respond /.well-known/matrix/server `${toJSON server}`
respond /.well-known/matrix/client `${toJSON client}` respond /.well-known/matrix/client `${toJSON client}`
''; '';
"${fqn}".extraConfig = '' "${fqn}".extraConfig = ''
reverse_proxy /_matrix/* http://::1:4001 reverse_proxy /_matrix/* http://::1:${toString port}
reverse_proxy /_synapse/client/* http://::1:4001 reverse_proxy /_synapse/client/* http://::1:${toString port}
''; '';
}; };
}; };
livekit = {
enable = true;
openFirewall = true;
inherit keyFile;
settings = {
port = 4002;
room.auto_create = false;
};
};
lk-jwt-service = {
enable = true;
port = 4003;
# can be on the same virtualHost as synapse
livekitUrl = "wss://${domain}/livekit/sfu";
inherit keyFile;
};
coturn = rec {
enable = true;
listening-port = 4004;
tls-listening-port = 40004;
no-cli = true;
no-tcp-relay = true;
min-port = 50000;
max-port = 50100;
use-auth-secret = true;
static-auth-secret-file = config.sops.secrets."coturn/secret".path;
realm = "turn.${domain}";
# cert = "${config.security.acme.certs.${realm}.directory}/full.pem";
# pkey = "${config.security.acme.certs.${realm}.directory}/key.pem";
extraConfig = ''
# for debugging
verbose
# ban private IP ranges
no-multicast-peers
denied-peer-ip=0.0.0.0-0.255.255.255
denied-peer-ip=10.0.0.0-10.255.255.255
denied-peer-ip=100.64.0.0-100.127.255.255
denied-peer-ip=127.0.0.0-127.255.255.255
denied-peer-ip=169.254.0.0-169.254.255.255
denied-peer-ip=172.16.0.0-172.31.255.255
denied-peer-ip=192.0.0.0-192.0.0.255
denied-peer-ip=192.0.2.0-192.0.2.255
denied-peer-ip=192.88.99.0-192.88.99.255
denied-peer-ip=192.168.0.0-192.168.255.255
denied-peer-ip=198.18.0.0-198.19.255.255
denied-peer-ip=198.51.100.0-198.51.100.255
denied-peer-ip=203.0.113.0-203.0.113.255
denied-peer-ip=240.0.0.0-255.255.255.255
denied-peer-ip=::1
denied-peer-ip=64:ff9b::-64:ff9b::ffff:ffff
denied-peer-ip=::ffff:0.0.0.0-::ffff:255.255.255.255
denied-peer-ip=100::-100::ffff:ffff:ffff:ffff
denied-peer-ip=2001::-2001:1ff:ffff:ffff:ffff:ffff:ffff:ffff
denied-peer-ip=2002::-2002:ffff:ffff:ffff:ffff:ffff:ffff:ffff
denied-peer-ip=fc00::-fdff:ffff:ffff:ffff:ffff:ffff:ffff:ffff
denied-peer-ip=fe80::-febf:ffff:ffff:ffff:ffff:ffff:ffff:ffff
'';
};
};
networking.firewall = {
allowedTCPPortRanges = [];
allowedTCPPorts = [
# Synapse
port
# coTURN ports
config.services.coturn.listening-port
config.services.coturn.alt-listening-port
config.services.coturn.tls-listening-port
config.services.coturn.alt-tls-listening-port
];
allowedUDPPortRanges = with config.services.coturn;
lib.singleton {
from = min-port;
to = max-port;
};
allowedUDPPorts = [
# coTURN ports
config.services.coturn.listening-port
config.services.coturn.alt-listening-port
];
};
systemd = {
services.livekit-key = {
before = ["lk-jwt-service.service" "livekit.service"];
wantedBy = ["multi-user.target"];
path = with pkgs; [livekit coreutils gawk];
script = ''
echo "Key missing, generating key"
echo "lk-jwt-service: $(livekit-server generate-keys | tail -1 | awk '{print $3}')" > "${keyFile}"
'';
serviceConfig.Type = "oneshot";
unitConfig.ConditionPathExists = "!${keyFile}";
};
services.lk-jwt-service.environment.LIVEKIT_FULL_ACCESS_HOMESERVERS = "${domain}";
}; };
sops = { sops = {
secrets = { secrets = {
"synapse/oidc_id" = {}; "synapse/oidc_id" = {};
"synapse/oidc_secret" = {}; "synapse/oidc_secret" = {};
"coturn/secret" = {};
}; };
templates = { templates = {
@ -222,13 +387,19 @@ in {
scopes: scopes:
- openid - openid
- profile - profile
- email
- offline_access
client_id: '${config.sops.placeholder."synapse/oidc_id"}' client_id: '${config.sops.placeholder."synapse/oidc_id"}'
client_secret: '${config.sops.placeholder."synapse/oidc_secret"}' client_secret: '${config.sops.placeholder."synapse/oidc_secret"}'
backchannel_logout_enabled: true backchannel_logout_enabled: true
user_profile_method: userinfo_endpoint
allow_existing_users: true
enable_registration: true
user_mapping_provider: user_mapping_provider:
config: config:
localpart_template: "{{ user.preferred_username }}" localpart_template: "{{ user.preferred_username }}"
display_name_template: "{{ user.name }}" display_name_template: "{{ user.name }}"
email_template: "{{ user.email }}"
''; '';
restartUnits = ["matrix-synapse.service"]; restartUnits = ["matrix-synapse.service"];
}; };

View file

@ -30,6 +30,10 @@ in {
domain = "ulmo"; domain = "ulmo";
}; };
security = {
secret_key = "$__file{${config.sops.secrets."grafana/secret_key".path}}";
};
auth = { auth = {
disable_login_form = false; disable_login_form = false;
oauth_auto_login = true; oauth_auto_login = true;
@ -133,6 +137,10 @@ in {
sops = { sops = {
secrets = { secrets = {
"grafana/secret_key" = {
owner = "grafana";
group = "grafana";
};
"grafana/oidc_id" = { "grafana/oidc_id" = {
owner = "grafana"; owner = "grafana";
group = "grafana"; group = "grafana";