Compare commits

..

19 commits

Author SHA1 Message Date
Chris Kruining
100a218aed
Add poster image support to Matrix download listings
Some checks failed
Test action / kaas (push) Failing after 2s
- Fetch and display poster images for tracked items in Matrix
- Show monitored/unmonitored icons in listings
- Limit displayed items to 12, with count and overflow message
- Add tests for image fetching and formatting
- Enable Grafana datasources
- Fix Sonarr/Radarr URL config bug
2026-04-16 16:55:52 +02:00
Chris Kruining
e07257e137
checkpoint 2026-04-16 15:36:33 +02:00
Chris Kruining
be2843ca80
. 2026-04-16 11:00:38 +02:00
Chris Kruining
e26e25b566
Change observability service ports and add Arrtrix content management
- Update ports for Alloy, Grafana, Loki, Prometheus, Promtail, Tempo,
  and
  Uptime Kuma to new ranges
- Add Arrtrix content management commands and subscriptions
- Implement Radarr and Sonarr client logic for movie and series
  management
- Add matrix commands for download and subscription management
- Add subscription repository with database schema and logic
- Update Arrtrix config and example config for content section
- Update help text and command processor to include new commands
- Update vendor hash for Arrtrix package
2026-04-16 10:41:16 +02:00
Chris Kruining
9b93f017b6
Add observability stack: Alloy, Tempo, and OTEL support
- Add NixOS modules for Alloy and Tempo with default configs
- Update Grafana datasource config for Prometheus, Loki, Tempo
- Add Prometheus remote_write for Alloy
- Implement OTEL metrics/tracing/logging in arrtrix (Go)
- Enable Alloy and Tempo in ulmo system config
2026-04-16 10:29:04 +02:00
Chris Kruining
81f34676c4
Add OpenTelemetry observability to Arrtrix
- Add OTLP/gRPC observability config and resource attributes
- Instrument webhook and onboarding handlers with tracing and metrics
- Add OpenTelemetry dependencies to go.mod/go.sum
- Update NixOS modules to configure observability settings
2026-04-16 10:13:51 +02:00
Chris Kruining
bbfe6867c8
Refactor arrtrix webhook to use fixed path and remove legacy config
Some checks failed
Test action / kaas (push) Failing after 1s
- Switch arrtrix webhook to a fixed path: /_arrtrix/webhook
- Remove Radarr-specific and secret-based config from arrtrix
- Simplify connector and webhook handler logic
- Update NixOS module to drop legacy webhook config
- Add new tests for generic arrtrix webhook handler
2026-04-16 09:47:00 +02:00
Chris Kruining
fe627f3aab
Add Arrtrix runtime, config, onboarding, and webhook support
- Implement runtime package for bridge startup, config loading, and env
  overrides
- Add onboarding package for management room welcome messages
- Add matrixcmd package for command processing and help
- Add webhook package with Radarr webhook support and validation
- Extend connector config for webhooks and validation
- Update default config and example config for new options
- Add tests for new packages and config validation
- Change database type default to sqlite3-fk-wal
2026-04-16 09:06:57 +02:00
Chris Kruining
eeedb5268a
Remove Vaultwarden package definition
Some checks failed
Test action / kaas (push) Failing after 0s
2026-04-16 08:08:55 +02:00
Chris Kruining
ce44496a48
Add arrtrix Matrix bridge service and package scaffolding
Some checks failed
Test action / kaas (push) Failing after 1s
2026-04-16 07:46:45 +02:00
c4e9485ccb chore(secrets): set secret "synapse/shared_secret" for machine "ulmo" 2026-04-16 05:20:19 +00:00
6fe9387626 chore(secrets): set secret "synapse/shared_secret" for machine "ulmo" 2026-04-16 05:19:04 +00:00
Chris Kruining
d5b5166b95
checkpoint 2026-04-14 15:27:49 +02:00
Chris Kruining
66fc9e532a
add backup stuff
Some checks failed
Test action / kaas (push) Failing after 1s
2026-04-12 17:53:37 +02:00
Chris Kruining
03bd906aef
fix vaultwarden oidc 2026-04-12 17:53:06 +02:00
a1d4c244cf chore(secrets): set secret "zitadel/users" for machine "ulmo" 2026-04-12 15:00:09 +00:00
352569fd8b chore(secrets): set secret "backup/ssh-key" for machine "ulmo" 2026-04-12 12:03:43 +00:00
Chris Kruining
7b37c0e9c3
various fixes
Some checks failed
Test action / kaas (push) Failing after 1s
2026-04-05 16:05:01 +02:00
a10e74a596 chore: update dependencies 2026-04-05 12:36:49 +00:00
273 changed files with 8030 additions and 5876 deletions

6
.editorconfig Normal file
View file

@ -0,0 +1,6 @@
root = true
[*]
end_of_line = lf
insert_final_newline = true
charset = utf-8

5
.gitattributes vendored
View file

@ -1,4 +1 @@
* text=auto
core.autocrlf=false
core.eol=lf
core.filemode=false
* text=auto eol=lf

20
.just/machine.just Normal file
View file

@ -0,0 +1,20 @@
@_default: list
[doc('List machines')]
@list:
ls -1 ../systems/x86_64-linux/
[doc('Update target machine')]
[no-exit-message]
@update machine:
echo "Checking vars"
cd .. && just vars _check {{ machine }}
echo ""
just assert '-d "../systems/x86_64-linux/{{ machine }}"' "Machine {{ machine }} does not exist, must be one of: $(ls ../systems/x86_64-linux/ | sed ':a;N;$!ba;s/\n/, /g')"
nixos-rebuild switch -L --sudo --target-host {{ machine }} --flake ..#{{ machine }} --log-format internal-json -v |& nom --json
[doc('Check if target machine builds')]
[no-exit-message]
@check machine:
just assert '-d "../systems/x86_64-linux/{{ machine }}"' "Machine {{ machine }} does not exist, must be one of: $(ls ../systems/x86_64-linux/ | sed ':a;N;$!ba;s/\n/, /g')"
nix build ..#nixosConfigurations.{{ machine }}.config.system.build.toplevel

101
.just/users.just Normal file
View file

@ -0,0 +1,101 @@
set unstable := true
set quiet := true
_default:
just --list users
[doc('List available users')]
[script]
list:
cd .. && just vars get ulmo zitadel/users | jq -r -C '
import ".jq/table" as table;
import ".jq/format" as f;
fromjson
| to_entries
| sort_by(.key)
| map(
(.key|f::to_title) + ":\n"
+ table::create(
.value
| to_entries
| sort_by(.key)
| map({username:.key} + .value)
)
)
| join("\n\n┄┄┄\n\n")
';
[doc('Add a new user')]
[script]
add:
exec 5>&1
pad () { [ "$#" -gt 1 ] && [ -n "$2" ] && printf "%$2.${2#-}s" "$1"; }
input() {
local label=$1
local value=$2
local res=$(gum input --header "$label" --value "$value")
echo -e "\e[2m$(pad "$label" -11)\e[0m$res" >&5
echo $res
}
data=`cd .. && just vars get ulmo zitadel/users | jq 'fromjson'`
# Gather inputs
org=`
jq -r 'to_entries | map(.key)[]' <<< "$data" \
| gum choose --header 'Which organisation to save to?' --select-if-one
`
username=`input 'user name' ''`
email=`input 'email' ''`
first_name=`input 'first name' ''`
last_name=`input 'last name' ''`
user_exists=`jq --arg 'org' "$org" --arg 'username' "$username" '.[$org][$username]? | . != null' <<< "$data"`
if [ "$user_exists" == "true" ]; then
gum confirm 'User already exists, overwrite it?' --padding="1 1" || exit 0
fi
next=`
jq \
--arg 'org' "$org" \
--arg 'username' "$username" \
--arg 'email' "$email" \
--arg 'first_name' "$first_name" \
--arg 'last_name' "$last_name" \
--compact-output \
'.[$org] += { $username: { email: $email, firstName: $first_name, lastName: $last_name } }' \
<<< $data
`
gum spin --title "saving..." -- echo "$(cd .. && just vars set ulmo 'zitadel/users' "$next")"
[doc('Remove a new user')]
[script]
remove:
data=`cd .. && just vars get ulmo zitadel/users | jq fromjson`
# Gather inputs
org=`
jq -r 'to_entries | map(.key)[]' <<< "$data" \
| gum choose --header 'Which organisation?' --select-if-one
`
user=`
jq -r --arg org "$org" '.[$org] | to_entries | map(.key)[]' <<< "$data" \
| gum choose --header 'Which user?' --select-if-one
`
next=`
jq \
--arg 'org' "$org" \
--arg 'user' "$user" \
--compact-output \
'del(.[$org][$user])' \
<<< $data
`
gum spin --title "saving..." -- echo "$(cd .. && just vars set ulmo 'zitadel/users' "$next")"

View file

@ -1,39 +1,38 @@
set unstable := true
set quiet := true
machine_base_path := justfile_directory() + "/machines"
secret_base_path := justfile_directory() + "/systems/x86_64-linux"
base_path := justfile_directory() + "/systems/x86_64-linux"
_default:
just --list vars
[doc('List all vars of {machine}')]
list machine:
sops decrypt {{ secret_base_path }}/{{ machine }}/secrets.yml
sops decrypt {{ base_path }}/{{ machine }}/secrets.yml
[doc('Edit all vars of {machine} in your editor')]
edit machine:
sops edit {{ secret_base_path }}/{{ machine }}/secrets.yml
sops edit {{ base_path }}/{{ machine }}/secrets.yml
[doc('Set var {value} by {key} for {machine}')]
@set machine key value:
sops set {{ secret_base_path }}/{{ machine }}/secrets.yml "$(printf '%s\n' '["{{ key }}"]' | sed -E 's#/#"]["#g; s/\["([0-9]+)"\]/[\1]/g')" "\"$(echo '{{ value }}' | sed 's/\"/\\\"/g')\""
sops set {{ base_path }}/{{ machine }}/secrets.yml "$(printf '%s\n' '["{{ key }}"]' | sed -E 's#/#"]["#g; s/\["([0-9]+)"\]/[\1]/g')" "\"$(echo '{{ value }}' | sed 's/\"/\\\"/g')\""
git add {{ secret_base_path }}/{{ machine }}/secrets.yml
git commit -m 'chore(secrets): set secret "{{ key }}" for machine "{{ machine }}"' -- {{ secret_base_path }}/{{ machine }}/secrets.yml > /dev/null
git add {{ base_path }}/{{ machine }}/secrets.yml
git commit -m 'chore(secrets): set secret "{{ key }}" for machine "{{ machine }}"' -- {{ base_path }}/{{ machine }}/secrets.yml > /dev/null
echo "Done"
[doc('Get var by {key} from {machine}')]
get machine key:
sops decrypt {{ secret_base_path }}/{{ machine }}/secrets.yml | yq ".$(echo "{{ key }}" | sed -E 's/\//./g') // \"\""
sops decrypt {{ base_path }}/{{ machine }}/secrets.yml | yq ".$(echo "{{ key }}" | sed -E 's/\//./g') // \"\""
[doc('Remove var by {key} for {machine}')]
remove machine key:
sops unset {{ secret_base_path }}/{{ machine }}/secrets.yml "$(printf '%s\n' '["{{ key }}"]' | sed -E 's#/#"]["#g; s/\["([0-9]+)"\]/[\1]/g')"
sops unset {{ base_path }}/{{ machine }}/secrets.yml "$(printf '%s\n' '["{{ key }}"]' | sed -E 's#/#"]["#g; s/\["([0-9]+)"\]/[\1]/g')"
git add {{ secret_base_path }}/{{ machine }}/secrets.yml
git commit -m 'chore(secrets): removed secret "{{ key }}" from machine "{{ machine }}"' -- {{ secret_base_path }}/{{ machine }}/secrets.yml > /dev/null
git add {{ base_path }}/{{ machine }}/secrets.yml
git commit -m 'chore(secrets): removed secret "{{ key }}" from machine "{{ machine }}"' -- {{ base_path }}/{{ machine }}/secrets.yml > /dev/null
echo "Done"
@ -44,14 +43,14 @@ generate machine:
# Skip if we already have a value
[ $(just vars get "{{ machine }}" "$key" | jq -r) ] && continue
just _rotate "{{ machine }}" "$key"
just vars _rotate "{{ machine }}" "$key"
done
[doc('Regenerate var values for {machine}')]
[script]
_rotate machine key:
# Exit if there's no script
[ -f "{{ justfile_directory() }}/script/{{ key }}" ] || exit
[ -f "{{ justfile_directory() }}/script/{{ key }}" ] || exit 0
echo "Executing script for {{ key }}"
just vars set "{{ machine }}" "{{ key }}" "$(cd -- "$(dirname "{{ justfile_directory() }}/script/{{ key }}")" && source "./$(basename "{{ key }}")")"
@ -60,7 +59,7 @@ _rotate machine key:
check:
cd ..
for machine in $(ls {{ machine_base_path }}); do
for machine in $(ls {{ base_path }}); do
just vars _check "$machine"
done
@ -71,14 +70,14 @@ _check machine:
# we can skip this folder as we are
# missing the files used to compare
# the defined vs the configured secrets
if [ ! -f "{{ machine_base_path }}/{{ machine }}/default.nix" ]; then
if [ ! -f "{{ base_path }}/{{ machine }}/default.nix" ]; then
printf "\r• %-8sskipped\n" "{{ machine }}"
exit 0
fi
exec 3< <(jq -nr \
--rawfile defined <(nix eval --json ..#nixosConfigurations.{{ machine }}.config.sops.secrets 2>/dev/null) \
--rawfile configured <([ -f "{{ secret_base_path }}/{{ machine }}/secrets.yml" ] && sops decrypt {{ secret_base_path }}/{{ machine }}/secrets.yml | yq '.' || echo "{}") \
--rawfile configured <([ -f "{{ base_path }}/{{ machine }}/secrets.yml" ] && sops decrypt {{ base_path }}/{{ machine }}/secrets.yml | yq '.' || echo "{}") \
'
[ $configured | fromjson | paths(scalars) | join("/") ] as $conf
| $defined

View file

@ -3,3 +3,34 @@
[doc('Manage vars')]
mod vars '.just/vars.just'
[doc('Manage users')]
mod users '.just/users.just'
[doc('Manage machines')]
mod machine '.just/machine.just'
[doc('Show information about project')]
@show:
echo "show"
[doc('update the flake dependencies')]
@update:
nix flake update
git commit -m 'chore: update dependencies' -- ./flake.lock > /dev/null
echo "Done"
[doc('Introspection on flake output')]
@select key:
nix eval --show-trace --json .#{{ key }} | jq .
#===============================================================================================
# Utils
#===============================================================================================
[no-exit-message]
[no-cd]
[private]
@assert condition message:
[ {{ condition }} ] || { echo -e 1>&2 "\n\x1b[1;41m Error \x1b[0m {{ message }}\n"; exit 1; }

View file

@ -1,43 +0,0 @@
{
lib,
inputs,
...
}: {
imports = [
./machines.nix
./tags.nix
./instances.nix
];
clan = {
meta = {
name = "arda";
domain = "arda";
description = "My personal machines at home";
};
directory = ../.;
specialArgs = {
ardaLib = {
types =
./types
|> (inputs.import-tree.withLib lib).leafs
|> lib.map (mod: {
name = mod |> lib.baseNameOf |> lib.splitString "." |> lib.head;
value = lib.types.submoduleWith {modules = [mod];};
})
|> lib.listToAttrs;
};
};
exportInterfaces =
./interfaces
|> (inputs.import-tree.withLib lib).leafs
|> lib.map (mod: {
name = mod |> lib.baseNameOf |> lib.splitString "." |> lib.head;
value = import mod;
})
|> lib.listToAttrs;
};
}

View file

@ -1,253 +0,0 @@
{
self,
inputs,
...
}: let
db =
self.clan.exports
|> inputs.clan-core.lib.getExport {
serviceName = "arda/persistence";
roleName = "default";
machineName = "ulmo";
instanceName = "persistence";
}
|> (v: v.persistence.driver.${v.persistence.main});
in {
clan.inventory.instances = {
users-chris = {
module = {
name = "users";
input = "clan-core";
};
roles.default.machines.mandos.settings = {};
roles.default.machines.manwe.settings = {};
roles.default.machines.orome.settings = {};
roles.default.machines.tulkas.settings = {};
roles.default.settings = {
user = "chris";
groups = ["wheel"];
prompt = true;
share = true;
};
};
clanDns = {
module = {
name = "dm-dns";
input = "clan-core";
};
roles.default.tags = ["all"];
};
gateway = {
module = {
name = "gateway";
input = "self";
};
roles.default = {
tags = ["operational:role:gateway"];
settings = {
driver = "caddy";
hosts = {
"auth.kruining.eu" = ''
reverse_proxy h2c://[::1]:9092
'';
};
};
};
};
persistence = {
module = {
name = "persistence";
input = "self";
};
roles.default.tags = ["operational:availability:always-on" "operational:storage:large"];
};
identity = {
module = {
name = "identity";
input = "self";
};
roles.default = {
tags = ["operational:availability:always-on"];
settings = {
database = db;
organization = {
nix = {
user = {
chris = {
email = "chris@kruining.eu";
firstName = "Chris";
lastName = "Kruining";
roles = ["ORG_OWNER"];
instanceRoles = ["IAM_OWNER"];
};
kaas = {
email = "chris+kaas@kruining.eu";
firstName = "Kaas";
lastName = "Kruining";
};
};
project = {
ulmo = {
projectRoleCheck = true;
projectRoleAssertion = true;
hasProjectCheck = true;
role = {
jellyfin = {
group = "jellyfin";
};
jellyfin_admin = {
group = "jellyfin";
};
};
assign = {
chris = ["jellyfin" "jellyfin_admin"];
kaas = ["jellyfin"];
};
application = {
jellyfin = {
redirectUris = ["https://jellyfin.kruining.eu/sso/OID/redirect/zitadel"];
grantTypes = ["authorizationCode"];
responseTypes = ["code"];
};
forgejo = {
redirectUris = ["https://git.amarth.cloud/user/oauth2/zitadel/callback"];
grantTypes = ["authorizationCode"];
responseTypes = ["code"];
};
vaultwarden = {
redirectUris = ["https://vault.kruining.eu/identity/connect/oidc-signin"];
grantTypes = ["authorizationCode"];
responseTypes = ["code"];
exportMap = {
client_id = "SSO_CLIENT_ID";
client_secret = "SSO_CLIENT_SECRET";
};
};
matrix = {
redirectUris = ["https://matrix.kruining.eu/_synapse/client/oidc/callback"];
grantTypes = ["authorizationCode"];
responseTypes = ["code"];
};
mydia = {
redirectUris = ["http://localhost:2010/auth/oidc/callback"];
grantTypes = ["authorizationCode"];
responseTypes = ["code"];
};
grafana = {
redirectUris = ["http://localhost:9001/login/generic_oauth"];
grantTypes = ["authorizationCode"];
responseTypes = ["code"];
};
};
};
convex = {
projectRoleCheck = true;
projectRoleAssertion = true;
hasProjectCheck = true;
application = {
scry = {
redirectUris = ["https://nautical-salamander-320.eu-west-1.convex.cloud/api/auth/callback/zitadel"];
grantTypes = ["authorizationCode"];
responseTypes = ["code"];
};
};
};
};
action = {
flattenRoles = {
script = ''
(ctx, api) => {
if (ctx.v1.user.grants == undefined || ctx.v1.user.grants.count == 0) {
return;
}
const roles = ctx.v1.user.grants.grants.flatMap(({ roles, projectId }) => roles.map(role => projectId + ':' + role));
api.v1.claims.setClaim('nix:zitadel:custom', JSON.stringify({ roles }));
};
'';
};
};
triggers = [
{
flowType = "customiseToken";
triggerType = "preUserinfoCreation";
actions = ["flattenRoles"];
}
{
flowType = "customiseToken";
triggerType = "preAccessTokenCreation";
actions = ["flattenRoles"];
}
];
};
};
};
};
};
servarr = {
module = {
name = "servarr";
input = "self";
};
roles.default = {
tags = ["operational:availability:always-on"];
settings = {
enable = true;
database = db;
services = {
sonarr = {
rootFolders = [
"/var/media/series"
];
};
radarr = {
rootFolders = [
"/var/media/movies"
];
};
lidarr = {
rootFolders = [
"/var/media/music"
];
};
prowlarr = {};
};
};
};
};
};
}

View file

@ -1,94 +0,0 @@
{lib, ...}: let
inherit (lib) mkOption types;
in {
options = {
services = mkOption {
type = types.attrsOf (types.submodule ({name, ...}: {
options = {
name = mkOption {
type = types.str;
default = name;
};
endpoint = mkOption {
type = types.submoduleWith {
modules = [../types/endpoint.nix];
};
default = {};
apply = attrs:
attrs
// {
__toString = self: let
protocol =
if self.protocol != null
then "${self.protocol}://"
else "";
port =
if self.port != null
then ":${toString self.port}"
else "";
path =
if self.path != null
then "/${self.path}"
else "";
query =
if self.query != null
then "?${toString self.query
|> lib.attrsToList
|> lib.map ({
name,
value,
}: "${name}=${value}")}"
else "";
hash =
if self.hash != null
then "#${toString self.hash
|> lib.attrsToList
|> lib.map ({
name,
value,
}: "${name}=${value}")}"
else "";
in "${protocol}${self.host}${port}${path}${query}${hash}";
};
};
# protocol = mkOption {
# type = types.str;
# default = "http";
# };
# host = mkOption {
# type = types.str;
# default = "[::1]";
# };
# port = mkOption {
# type = types.port;
# };
};
}));
default = {};
};
functions = mkOption {
type = types.attrsOf (types.submodule ({name, ...}: {
options = {
name = mkOption {
type = types.str;
default = name;
};
body = mkOption {
type = types.str;
};
};
}));
default = {};
};
};
}

View file

@ -1,24 +0,0 @@
{lib, ...}: let
inherit (lib) mkOption types;
in {
options = {
main = mkOption {
type = types.nullOr types.str;
default = null;
};
driver = mkOption {
type = types.attrsOf (types.submoduleWith {
modules = [
../types/endpoint.nix
];
});
default = {};
};
databases = mkOption {
type = types.listOf types.str;
default = [];
};
};
}

View file

@ -1,75 +0,0 @@
{...}: {
clan.inventory.machines = {
aule = {
name = "aule";
description = "Planned build server.";
machineClass = "nixos";
tags = [];
};
mandos = {
name = "mandos";
description = "Living room Steam box.";
machineClass = "nixos";
tags = [
"capability:mobility:stationary"
"operational:availability:wake-on-demand"
];
};
manwe = {
name = "manwe";
description = "Main desktop.";
machineClass = "nixos";
tags = [
"capability:mobility:stationary"
"operational:availability:manual"
];
};
melkor = {
name = "melkor";
description = "Planned machine with no defined role yet.";
machineClass = "nixos";
tags = [];
};
orome = {
name = "orome";
description = "Work laptop.";
machineClass = "nixos";
tags = [
"capability:mobility:portable"
"operational:availability:manual"
];
};
tulkas = {
name = "tulkas";
description = "Steam Deck.";
machineClass = "nixos";
tags = [
"capability:mobility:portable"
"operational:availability:manual"
];
};
ulmo = {
name = "ulmo";
description = "Primary self-hosted services machine.";
machineClass = "nixos";
tags = [
"capability:mobility:stationary"
"operational:availability:always-on"
"operational:storage:large"
"operational:role:gateway"
];
};
varda = {
name = "varda";
description = "Planned machine with no defined role yet.";
machineClass = "nixos";
tags = [];
};
yavanna = {
name = "yavanna";
description = "Planned machine with no defined role yet.";
machineClass = "nixos";
tags = [];
};
};
}

View file

@ -1,12 +0,0 @@
{...}: {
clan.inventory.tags = {
config,
machines,
...
}: {
# tag_name = [ "list" "of" "machines" ]
"capability:hardware:gpu" = [""];
"capability:hardware:audio" = [""];
"capability:hardware:bluetooth" = [""];
};
}

View file

@ -1,45 +0,0 @@
{lib, ...}: let
inherit (lib) mkOption types;
in {
options = {
protocol = mkOption {
type = types.str;
default = "http";
};
host = mkOption {
type = types.str;
default = "localhost";
};
port = mkOption {
type = types.nullOr types.port;
default = null;
};
user = mkOption {
type = types.nullOr types.str;
default = null;
};
password = mkOption {
type = types.nullOr types.str;
default = null;
};
path = mkOption {
type = types.nullOr types.str;
default = null;
};
query = mkOption {
type = types.nullOr (types.attrsOf types.str);
default = null;
};
hash = mkOption {
type = types.nullOr (types.attrsOf types.str);
default = null;
};
};
}

View file

@ -1,19 +0,0 @@
{lib, ...}: {
imports =
./.
|> builtins.readDir
|> lib.attrsToList
|> builtins.map ({
name,
value,
}: {
type = value;
path = ./. + "/${name}/flake-module.nix";
})
|> builtins.filter ({
type,
path,
}:
type == "directory" && (builtins.pathExists path))
|> builtins.map ({path, ...}: path);
}

View file

@ -1,92 +0,0 @@
{
lib,
clanLib,
exports,
...
}: let
inherit (builtins) toString;
in {
_class = "clan.service";
manifest = {
name = "arda/gateway";
description = ''
'';
readme = builtins.readFile ./README.md;
exports = {
inputs = [];
out = [];
};
};
roles.default = {
description = '''';
interface = {lib, ...}: let
inherit (lib) mkOption types;
in {
options = {
driver = mkOption {
type = types.enum ["caddy" "nginx"];
};
hosts = mkOption {
type = types.attrsOf types.str;
default = {};
};
};
};
perInstance = {
mkExports,
machine,
settings,
...
}: let
reverse_proxies =
exports
|> clanLib.selectExports (_scope: true)
|> lib.mapAttrsToList (_: value: (value.gateway.services or {}) |> lib.attrValues)
|> lib.concatLists
|> lib.map ({
name,
endpoint,
}: {
name = "${name}.${machine.name}.arda";
value = {
extraConfig = ''
reverse_proxy ${toString endpoint}
'';
};
})
|> lib.listToAttrs;
in {
# exports =
# mkExports {
# };
nixosModule = {
lib,
pkgs,
...
}: let
inherit (lib) mkMerge mkIf;
caddyPackage = pkgs.caddy.withPlugins {
plugins = ["github.com/corazawaf/coraza-caddy/v2@v2.1.0"];
hash = "sha256-pSXjLaZoRtKV3eFl2ySRSjl3yxi514G1Cb7pfrpxxtE=";
};
in {
config = mkMerge [
(lib.mkIf (settings.driver == "caddy") {
services.caddy = {
enable = true;
package = caddyPackage;
virtualHosts = reverse_proxies // {};
};
})
];
};
};
};
}

View file

@ -1,13 +0,0 @@
{...}: let
module = ./default.nix;
in {
clan.modules.gateway = module;
# perSystem = {...}: {
# clan.nixosTests.gateway = {
# imports = [];
# clan.modules."@arda/gateway" = module;
# };
# };
}

View file

@ -1,518 +0,0 @@
{
lib,
clanLib,
exports,
...
}: let
inherit (builtins) toString readFile;
inherit (lib) mkMerge mkIf;
in {
_class = "clan.service";
manifest = {
name = "arda/identity";
description = ''
'';
readme = readFile ./README.md;
exports = {
inputs = ["persistence"];
out = ["gateway" "persistence"];
};
};
roles.default = {
description = '''';
interface = {lib, ...}: let
inherit (lib) mkOption types toSentenceCase literalExpression;
in {
options = {
driver = mkOption {
type = types.enum ["zitadel"];
default = "zitadel";
};
database = mkOption {
type = types.anything;
};
port = mkOption {
type = types.port;
default = 9092;
};
organization = mkOption {
type = types.attrsOf (types.submodule ({ name, ... }: {
options =
let
org = name;
in
{
isDefault = mkOption {
type = types.bool;
default = false;
example = "true";
description = ''
True sets the '${org}' org as default org for the instance. Only one org can be default org.
Nothing happens if you set it to false until you set another org as default org.
'';
};
project = mkOption {
default = {};
type = types.attrsOf (types.submodule {
options = {
hasProjectCheck = mkOption {
type = types.bool;
default = false;
example = "true";
description = ''
ZITADEL checks if the org of the user has permission to this project.
'';
};
privateLabelingSetting = mkOption {
type = types.nullOr (types.enum [ "unspecified" "enforceProjectResourceOwnerPolicy" "allowLoginUserResourceOwnerPolicy" ]);
default = null;
example = "enforceProjectResourceOwnerPolicy";
description = ''
Defines from where the private labeling should be triggered,
supported values:
- unspecified
- enforceProjectResourceOwnerPolicy
- allowLoginUserResourceOwnerPolicy
'';
};
projectRoleAssertion = mkOption {
type = types.bool;
default = false;
example = "true";
description = ''
Describes if roles of user should be added in token.
'';
};
projectRoleCheck = mkOption {
type = types.bool;
default = false;
example = "true";
description = ''
ZITADEL checks if the user has at least one on this project.
'';
};
role = mkOption {
default = {};
type = types.attrsOf (types.submodule ({ name, ... }: {
options =
let
roleName = name;
in
{
displayName = mkOption {
type = types.str;
default = toSentenceCase name;
example = "RoleName";
description = ''
Name used for project role.
'';
};
group = mkOption {
type = types.nullOr types.str;
default = null;
example = "some_group";
description = ''
Group used for project role.
'';
};
};
}));
};
assign = mkOption {
default = {};
type = types.attrsOf (types.listOf types.str);
};
application = mkOption {
default = {};
type = types.attrsOf (types.submodule {
options = {
redirectUris = mkOption {
type = types.nonEmptyListOf types.str;
example = ''
[ "https://example.com/redirect/url" ]
'';
description = ''
.
'';
};
grantTypes = mkOption {
type = types.nonEmptyListOf (types.enum [ "authorizationCode" "implicit" "refreshToken" "deviceCode" "tokenExchange" ]);
example = ''
[ "authorizationCode" ]
'';
description = ''
.
'';
};
responseTypes = mkOption {
type = types.nonEmptyListOf (types.enum [ "code" "idToken" "idTokenToken" ]);
example = ''
[ "code" ]
'';
description = ''
.
'';
};
exportMap =
let
strOpt = mkOption { type = types.nullOr types.str; default = null; };
in
mkOption {
type = types.submodule { options = { client_id = strOpt; client_secret = strOpt; }; };
default = {};
example = literalExpression ''
{
client_id = "SSO_CLIENT_ID";
client_secret = "SSO_CLIENT_SECRET";
}
'';
description = ''
Remap the outputted variables to another key.
'';
};
};
});
};
};
});
};
user = mkOption {
default = {};
type = types.attrsOf (types.submodule ({ name, ... }: {
options =
let
username = name;
in
{
email = mkOption {
type = types.str;
example = "someone@some.domain";
description = ''
Username.
'';
};
userName = mkOption {
type = types.nullOr types.str;
default = username;
example = "some_user_name";
description = ''
Username. Default value is the key of the config object you created, you can overwrite that by setting this option
'';
};
firstName = mkOption {
type = types.str;
example = "John";
description = ''
First name of the user.
'';
};
lastName = mkOption {
type = types.str;
example = "Doe";
description = ''
Last name of the user.
'';
};
roles = mkOption {
type = types.listOf types.str;
default = [];
example = "[ \"ORG_OWNER\" ]";
description = ''
List of roles granted to organisation.
'';
};
instanceRoles = mkOption {
type = types.listOf types.str;
default = [];
example = "[ \"IAM_OWNER\" ]";
description = ''
List of roles granted to instance.
'';
};
};
}));
};
action = mkOption {
default = {};
type = types.attrsOf (types.submodule ({ name, ... }: {
options = {
script = mkOption {
type = types.str;
example = ''
(ctx, api) => {
api.v1.claims.setClaim('some_claim', 'some_value');
};
'';
description = ''
The script to run. This must be a function that receives 2 parameters, and returns void. During the creation of the action's script this module simly does `const {{name}} = {{script}}`.
'';
};
timeout = mkOption {
type = (types.ints.between 0 20);
default = 10;
example = "10";
description = ''
After which time the action will be terminated if not finished.
'';
};
allowedToFail = mkOption {
type = types.bool;
default = true;
example = "true";
description = ''
Allowed to fail.
'';
};
};
}));
};
triggers = mkOption {
default = [];
type = types.listOf (types.submodule {
options = {
flowType = mkOption {
type = types.enum [ "authentication" "customiseToken" "internalAuthentication" "samlResponse" ];
example = "customiseToken";
description = ''
Type of the flow to which the action triggers belong.
'';
};
triggerType = mkOption {
type = types.enum [ "postAuthentication" "preCreation" "postCreation" "preUserinfoCreation" "preAccessTokenCreation" "preSamlResponse" ];
example = "postAuthentication";
description = ''
Trigger type on when the actions get triggered.
'';
};
actions = mkOption {
type = types.nonEmptyListOf types.str;
example = ''[ "action_name" ]'';
description = ''
Names of actions to trigger
'';
};
};
});
};
};
}));
};
};
};
perInstance = {
mkExports,
settings,
machine,
instanceName,
...
}: {
exports = mkExports (mkMerge [
{
gateway.services.identity = {endpoint.port = settings.port;};
}
(mkIf (settings.driver == "zitadel") {
gateway.functions.auth = {
body = ''
forward_auth h2c://[::1]:${toString settings.port} {
uri /api/authz/forward-auth
copy_headers Remote-User Remote-Groups Remote-Email Remote-Name
}
'';
};
persistence.databases = ["zitadel"];
})
]);
nixosModule = args@{
lib,
pkgs,
config,
...
}: let
vars = config.clan.core.vars.generators.zitadel.files;
users = config.clan.core.vars.generators.zitadel_users.files.users.path;
email_password = config.clan.core.vars.generators.zitadel_email_password.files.password.path;
ardaLib = import ../../lib/strings.nix args;
zLib = import ./lib.nix (args // {inherit settings ardaLib;});
in {
config = mkMerge [
(mkIf (settings.driver == "zitadel") ({
clan.core.vars.generators.zitadel = {
dependencies = ["persistence"];
files = {
masterKey = {
deploy = true;
owner = "zitadel";
group = "zitadel";
restartUnits = ["zitadel.service"];
};
settings = {
deploy = true;
owner = "zitadel";
group = "zitadel";
restartUnits = ["zitadel.service"];
};
infraPrivateKey = {
deploy = true;
owner = "zitadel";
group = "zitadel";
restartUnits = ["zitadel.service"];
};
infraPublicKey = {
deploy = true;
owner = "zitadel";
group = "zitadel";
restartUnits = ["zitadel.service"];
};
};
runtimeInputs = with pkgs; [pwgen openssl_3_5];
script = ''
pwgen -s 32 1 > $out/masterKey
openssl genrsa -traditional -out $out/infraPrivateKey 2048
openssl rsa -pubout -in $out/infraPrivateKey -out $out/infraPublicKey
cat << EOL > $out/settings
Database:
postgres:
User:
Password: $(cat $in/persistence/zitadel_password)
Admin:
Password: $(cat $in/persistence/zitadel_password)
EOL
'';
};
clan.core.vars.generators.zitadel_users = {
files = {
users = {
deploy = true;
owner = "zitadel";
group = "zitadel";
restartUnits = ["infra-zitadel.service"];
};
};
script = ''
echo "{}" > $out/users
'';
};
clan.core.vars.generators.zitadel_email_password = {
prompts = {
password = {
description = "password to email for zitadel's smpt connection";
type = "hidden";
persist = true;
};
};
files = {
password = {
deploy = true;
owner = "zitadel";
group = "zitadel";
restartUnits = ["infra-zitadel.service"];
};
};
script = ''
cat $prompts/password > $out/password
'';
};
environment.systemPackages = with pkgs; [
zitadel
];
services.zitadel = {
enable = true;
masterKeyFile = vars.masterKey.path;
tlsMode = "external";
extraSettingsPaths = [
vars.settings.path
];
settings = {
Port = settings.port;
ExternalDomain = "auth.kruining.eu";
ExternalPort = 443;
ExternalSecure = true;
Metrics.Type = "otel";
Tracing.Type = "otel";
Telemetry.Enabled = true;
SystemDefaults = {
PasswordHasher.Hasher.Algorithm = "argon2id";
SecretHasher.Hasher.Algorithm = "argon2id";
};
Database.postgres = {
Host = settings.database.host;
Port = settings.database.port;
Database = "zitadel";
User = {
Username = "zitadel";
};
Admin = {
Username = "zitadel";
};
};
SystemAPIUsers = {
infra = {
Path = vars.infraPublicKey.path;
Memberships = [
{ MemberType = "System"; Roles = [ "SYSTEM_OWNER" "IAM_OWNER" "ORG_OWNER" ]; }
];
};
};
};
};
} // (zLib.createInfra { inherit users email_password; key_file = vars.infraPrivateKey.path; })))
];
};
};
};
}

View file

@ -1,13 +0,0 @@
{...}: let
module = ./default.nix;
in {
clan.modules.identity = module;
# perSystem = {...}: {
# clan.nixosTests.identity = {
# imports = [];
# clan.modules."@arda/identity" = module;
# };
# };
}

View file

@ -1,372 +0,0 @@
{
lib,
ardaLib,
self,
pkgs,
settings,
...
}: let
createTerranixModule = {
users,
email_password,
key_file,
...
}: terra: let
inherit (lib) toUpper toSentenceCase nameValuePair mapAttrs mapAttrs' concatMapAttrs concatMapStringsSep filterAttrsRecursive listToAttrs imap0 head drop length literalExpression attrNames;
inherit (ardaLib) toSnakeCase;
inherit (terra.lib) tfRef;
_refTypeMap = {
org = {type = "org";};
project = {type = "project";};
user = {
type = "user";
tfType = "human_user";
};
};
mapRef' = {
type,
tfType ? type,
}: name: {"${type}Id" = "\${ resource.zitadel_${tfType}.${toSnakeCase name}.id }";};
mapRef = type: name: mapRef' (_refTypeMap.${type}) name;
mapEnum = prefix: value: "${prefix}_${value |> toSnakeCase |> toUpper}";
mapValue = type: value: ({
appType = mapEnum "OIDC_APP_TYPE" value;
grantTypes = map (t: mapEnum "OIDC_GRANT_TYPE" t) value;
responseTypes = map (t: mapEnum "OIDC_RESPONSE_TYPE" t) value;
authMethodType = mapEnum "OIDC_AUTH_METHOD_TYPE" value;
flowType = mapEnum "FLOW_TYPE" value;
triggerType = mapEnum "TRIGGER_TYPE" value;
accessTokenType = mapEnum "OIDC_TOKEN_TYPE" value;
}."${type}" or value);
toResource = name: value:
nameValuePair
(toSnakeCase name)
(lib.mapAttrs' (k: v: nameValuePair (toSnakeCase k) (mapValue k v)) value);
withRef = type: name: attrs: attrs // (mapRef type name);
select = keys: callback: set:
if (length keys) == 0
then mapAttrs' callback set
else let
key = head keys;
in
concatMapAttrs (k: v: select (drop 1 keys) (callback k) (v.${key} or {})) set;
append = attrList: set: set // (listToAttrs attrList);
forEach = src: key: set: let
_key = concatMapStringsSep "_" (k: "\${item.${k}}") key;
in
{
forEach = tfRef '' {
for item in ${src} :
"''${item.org}_''${item.name}" => item
}'';
}
// set;
in {
terraform.required_providers.zitadel = {
source = "zitadel/zitadel";
version = "2.2.0";
};
provider.zitadel = {
domain = "auth.kruining.eu";
insecure = false;
system_api = {
user = "infra";
inherit key_file;
};
};
locals = {
extra_users = tfRef "
flatten([ for org, users in jsondecode(file(\"${users}\")): [
for name, details in users: {
org = org
name = name
email = details.email
firstName = details.firstName
lastName = details.lastName
}
] ])
";
orgs = settings.organization |> mapAttrs (org: _: tfRef "resource.zitadel_org.${org}.id");
};
resource = {
# Organizations
zitadel_org =
settings.organization
|> select [] (
name: {isDefault, ...}:
{inherit name isDefault;}
|> toResource name
);
# Projects per organization
zitadel_project =
settings.organization
|> select ["project"] (
org: name: {
hasProjectCheck,
privateLabelingSetting,
projectRoleAssertion,
projectRoleCheck,
...
}:
{
inherit name hasProjectCheck privateLabelingSetting projectRoleAssertion projectRoleCheck;
}
|> withRef "org" org
|> toResource "${org}_${name}"
);
# Each OIDC app per project
zitadel_application_oidc =
settings.organization
|> select ["project" "application"] (
org: project: name: {
redirectUris,
grantTypes,
responseTypes,
...
}:
{
inherit name redirectUris grantTypes responseTypes;
accessTokenRoleAssertion = true;
idTokenRoleAssertion = true;
accessTokenType = "JWT";
}
|> withRef "org" org
|> withRef "project" "${org}_${project}"
|> toResource "${org}_${project}_${name}"
);
# Each project role
zitadel_project_role =
settings.organization
|> select ["project" "role"] (
org: project: name: value:
{
inherit (value) displayName group;
roleKey = name;
}
|> withRef "org" org
|> withRef "project" "${org}_${project}"
|> toResource "${org}_${project}_${name}"
);
# Each project role assignment
zitadel_user_grant =
settings.organization
|> select ["project" "assign"] (
org: project: user: roles:
{roleKeys = roles;}
|> withRef "org" org
|> withRef "project" "${org}_${project}"
|> withRef "user" "${org}_${user}"
|> toResource "${org}_${project}_${user}"
);
# Users
zitadel_human_user =
settings.organization
|> select ["user"] (
org: name: {
email,
userName,
firstName,
lastName,
...
}:
{
inherit email userName firstName lastName;
isEmailVerified = true;
lifecycle = {
ignore_changes = ["first_name" "last_name" "user_name"];
};
}
|> withRef "org" org
|> toResource "${org}_${name}"
)
|> append [
(forEach "local.extra_users" ["org" "name"] {
orgId = tfRef "local.orgs[each.value.org]";
userName = tfRef "each.value.name";
email = tfRef "each.value.email";
firstName = tfRef "each.value.firstName";
lastName = tfRef "each.value.lastName";
isEmailVerified = true;
}
|> toResource "extraUsers")
];
# Global user roles
zitadel_instance_member =
settings.organization
|> filterAttrsRecursive (n: v: !(v ? "instanceRoles" && (length v.instanceRoles) == 0))
|> select ["user"] (
org: name: {instanceRoles, ...}:
{roles = instanceRoles;}
|> withRef "user" "${org}_${name}"
|> toResource "${org}_${name}"
);
# Organazation specific roles
zitadel_org_member =
settings.organization
|> filterAttrsRecursive (n: v: !(v ? "roles" && (length v.roles) == 0))
|> select ["user"] (
org: name: {roles, ...}:
{inherit roles;}
|> withRef "org" org
|> withRef "user" "${org}_${name}"
|> toResource "${org}_${name}"
);
# Organazation's actions
zitadel_action =
settings.organization
|> select ["action"] (
org: name: {
timeout,
allowedToFail,
script,
...
}:
{
inherit allowedToFail name;
timeout = "${toString timeout}s";
script = "const ${name} = ${script}";
}
|> withRef "org" org
|> toResource "${org}_${name}"
);
# Organazation's action assignments
zitadel_trigger_actions =
settings.organization
|> concatMapAttrs (
org: {triggers, ...}:
triggers
|> imap0 (i: {
flowType,
triggerType,
actions,
...
}: (
let
name = "trigger_${toString i}";
in
{
inherit flowType triggerType;
actionIds =
actions
|> map (action: (tfRef "zitadel_action.${org}_${toSnakeCase action}.id"));
}
|> withRef "org" org
|> toResource "${org}_${name}"
))
|> listToAttrs
);
# SMTP config
zitadel_smtp_config.default = {
sender_address = "chris@kruining.eu";
sender_name = "no-reply (Zitadel)";
tls = true;
host = "black-mail.nl:587";
user = "chris@kruining.eu";
password = tfRef "file(\"${email_password}\")";
set_active = true;
};
# Client credentials per app
local_sensitive_file =
settings.organization
|> select ["project" "application"] (
org: project: name: {exportMap, ...}:
nameValuePair "${org}_${project}_${name}" {
content = ''
${
if exportMap.client_id != null
then exportMap.client_id
else "CLIENT_ID"
}=${tfRef "resource.zitadel_application_oidc.${org}_${project}_${name}.client_id"}
${
if exportMap.client_secret != null
then exportMap.client_secret
else "CLIENT_SECRET"
}=${tfRef "resource.zitadel_application_oidc.${org}_${project}_${name}.client_secret"}
'';
filename = "/var/lib/zitadel/clients/${org}_${project}_${name}";
}
);
};
};
in {
createInfra = args @ {...}: let
tofu = "${lib.getExe pkgs.opentofu} -input=false";
terraformConfiguration = self.inputs.terranix.lib.terranixConfiguration {
system = pkgs.stdenv.hostPlatform.system;
modules = [
(createTerranixModule args)
];
};
in {
systemd.services."infra-zitadel" = {
description = "Infra for Zitadel";
wantedBy = ["multi-user.target"];
wants = ["zitadel.service"];
after = ["zitadel.service"];
preStart = ''
install -d -m 0770 -o zitadel -g media /var/lib/infra-zitadel
'';
script = ''
# Sleep for a bit to give the service a chance to start up
sleep 5s
if [ "$(systemctl is-active zitadel)" != "active" ]; then
echo "zitadel is not running"
exit 1
fi
# Print the path to the source for easier debugging
echo "config location: ${terraformConfiguration}"
# Copy infra code into workspace
cp -f ${terraformConfiguration} config.tf.json
# Initialize OpenTofu
${tofu} init
# Run the infrastructure code
${tofu} plan -out=tfplan
${tofu} apply -json -auto-approve tfplan
'';
serviceConfig = {
Type = "oneshot";
User = "zitadel";
Group = "zitadel";
StateDirectory = "/var/lib/infra-zitadel";
};
};
};
}

View file

@ -1,169 +0,0 @@
{
lib,
clanLib,
exports,
...
}: let
inherit (builtins) toString;
in {
_class = "clan.service";
manifest = {
name = "arda/persistence";
description = ''
Configuration of persistence resrouce(s)
(for now this means a database. and specifically it means postgres)
'';
readme = builtins.readFile ./README.md;
exports = {
inputs = ["persistence"];
out = ["persistence"];
};
};
roles.default = {
description = '''';
interface = {lib, ...}: let
inherit (lib) mkOption types;
in {
options = {
port = mkOption {
type = types.port;
default = 5432;
};
};
};
perInstance = {
mkExports,
machine,
settings,
...
}: let
requested_databases =
exports
|> clanLib.selectExports (_scope: true)
|> lib.mapAttrsToList (_: value: value.persistence.databases or [])
|> lib.concatLists;
in {
exports = mkExports {
persistence = {
main = "postgresql";
driver.postgresql = {
host = "localhost";
port = settings.port;
};
};
};
nixosModule = {
lib,
pkgs,
config,
...
}: {
clan.core.vars.generators.postgresql = let
password_files =
requested_databases
|> lib.map (db: [
{
name = "${db}_password";
value = {
secret = true;
deploy = false;
};
}
])
|> lib.concatLists
|> lib.listToAttrs;
in {
files =
{
"server.crt" = {
secret = true;
deploy = true;
};
"server.key" = {
secret = true;
deploy = true;
};
".pgpass" = {
secret = true;
deploy = true;
owner = "postgres";
group = "postgres";
mode = "0600";
restartUnits = ["postgresql.service"];
};
}
// password_files;
runtimeInputs = with pkgs; [openssl_3_5 pwgen];
script = ''
openssl req \
-new -x509 -days 365 -nodes -text \
-out $out/server.crt \
-keyout $out/server.key \
-subj "/CN=db.${config.networking.fqdn}"
${requested_databases
|> lib.map (db: "pwgen -s 128 1 > $out/${db}_password")
|> lib.join "\n"}
cat << EOL > $out/.pgpass
#host:port:database:user:password
${requested_databases
|> lib.map (db: "*:${toString settings.port}:${db}:${db}:$(cat $out/${db}_password)")
|> lib.join "\n"}
EOL
'';
};
systemd.services.postgresql.environment.PGPASSFILE = config.clan.core.vars.generators.postgresql.files.".pgpass".path;
services = {
postgresql = {
enable = true;
# enableTCPIP = true;
settings = {
port = settings.port;
ssl = true;
};
ensureDatabases = requested_databases;
ensureUsers =
requested_databases
|> lib.map (db: {
name = db;
ensureDBOwnership = true;
ensureClauses = {
login = true;
connection_limit = 5;
};
});
identMap = ''
#map sys user db user
superuser_map root postgres
superuser_map postgres postgres
superuser_map /^(.+)$ \1
'';
authentication = ''
# Generated file, do not edit!
# type database user auth-method optional_ident_map
local sameuser all peer map=superuser_map
# TYPE DATABASE USER ADDRESS METHOD
# local all all trust
host all all 127.0.0.1/32 scram-sha-256
host all all ::1/128 scram-sha-256
'';
};
};
};
};
};
}

View file

@ -1,13 +0,0 @@
{...}: let
module = ./default.nix;
in {
clan.modules.persistence = module;
# perSystem = {...}: {
# clan.nixosTests.persistence = {
# imports = [];
# clan.modules."@arda/persistence" = module;
# };
# };
}

View file

@ -1,150 +0,0 @@
{
exports,
clanLib,
lib,
...
}: let
inherit (lib) toString;
in {
_class = "clan.service";
manifest = {
name = "arda/servarr";
description = '''';
categories = ["Service" "Media"];
readme = builtins.readFile ./README.md;
exports = {
inputs = ["persistence"];
out = ["gateway" "persistence"];
};
};
roles.default = {
description = '''';
interface = {lib, ...}: let
inherit (lib) mkOption mkEnableOption types;
in {
options = {
enable = mkEnableOption "Enable configured *arr services";
database = mkOption {
type = types.anything; #ardaLib.types.endpoint;
};
services = mkOption {
type = types.attrsOf (types.submodule ({name, ...}: {
options = {
enable = mkEnableOption "Enable ${name}" // {default = true;};
debug = mkEnableOption "Use tofu plan instead of tofu apply for ${name} ";
rootFolders = mkOption {
type = types.listOf types.str;
default = [];
};
};
}));
default = {};
description = ''
Settings foreach *arr service
'';
};
};
};
perInstance = {
instanceName,
settings,
machine,
roles,
mkExports,
...
}: {
exports = mkExports {
# endpoints.hosts =
# settings.services
# |> lib.attrNames
# |> (s: lib.concat s ["sabnzbd" "qbittorrent" "flaresolverr"])
# |> lib.map (service: "${service}.${machine.name}.arda");
persistence.databases =
settings.services
|> lib.attrNames;
gateway.services =
settings.services
|> lib.attrNames
# |> (s: lib.concat s ["sabnzbd" "qbittorrent" "flaresolverr"])
|> lib.imap1 (i: name: {
inherit name;
value = {
endpoint.port = 2000 + i;
};
})
|> lib.listToAttrs;
};
nixosModule = args @ {
config,
lib,
pkgs,
...
}: let
services = settings.services |> lib.attrNames;
service_count = services |> lib.length;
servarr = import ./lib.nix (args // {inherit settings;});
in {
imports = [
(import ./sabnzbd.nix (args
// {
inherit settings;
port = 2000 + service_count + 1;
}))
(import ./qbittorrent.nix (args
// {
inherit settings;
port = 2000 + service_count + 2;
}))
(servarr.createModule settings.services)
];
config = {
clan.core.vars.generators.servarr = rec {
dependencies =
services ++ ["sabnzbd" "qbittorrent"];
files."config.tfvars" = {
owner = "media";
group = "media";
mode = "0440";
restartUnits = services |> lib.map (s: "${s}.service");
};
script = ''
cat << EOL > $out/config.tfvars
${
services
|> lib.map (s: "${s}_api_key = \"$(cat $in/${s}/api_key)\"")
|> lib.join "\n"
}
qbittorrent_api_key = "$(cat $in/qbittorrent/password)"
sabnzbd_api_key = "$(cat $in/sabnzbd/api_key)"
EOL
'';
};
services = {
flaresolverr = {
enable = true;
openFirewall = true;
port = 2000 + service_count + 3;
};
};
};
};
};
};
perMachine = {...}: {
};
}

View file

@ -1,13 +0,0 @@
{...}: let
module = ./default.nix;
in {
clan.modules.servarr = module;
# perSystem = {...}: {
# clan.nixosTests.servarr = {
# imports = [];
# clan.modules."@arda/servarr" = module;
# };
# };
}

View file

@ -1,329 +0,0 @@
{
self,
config,
lib,
pkgs,
settings,
...
}: let
inherit (lib) mkIf;
createGenerator = {
service,
options,
...
}: {
dependencies = ["postgresql"];
files = {
api_key = {
secret = true;
deploy = true;
owner = service;
group = "media";
restartUnits = ["${service}.service"];
};
"config.env" = {
secret = true;
deploy = true;
owner = service;
group = "media";
restartUnits = ["${service}.service"];
};
};
runtimeInputs = with pkgs; [pwgen];
script = ''
pwgen -s 128 1 > $out/api_key
cat << EOL > $out/config.env
${lib.toUpper service}__AUTH__APIKEY="$(cat $out/api_key)"
${lib.toUpper service}__POSTGRES_PASSWORD="$(cat $in/postgresql/${service}_password)"
EOL
'';
};
createService = {
service,
options,
...
}: let
inherit (builtins) toString;
in
{
enable = true;
# openFirewall = true;
environmentFiles = [
config.clan.core.vars.generators.${service}.files."config.env".path
];
settings = {
auth.authenticationMethod = "External";
server = {
bindaddress = "[::1]";
port = options.port;
};
# Password provided via environment file
postgres = {
host = settings.database.host;
port = toString settings.database.port;
user = service;
maindb = service;
logdb = service;
};
};
}
// (lib.optionalAttrs (lib.elem service ["radarr" "sonarr" "lidarr" "whisparr"]) {
user = service;
group = "media";
});
createSystemdService = args @ {
service,
options,
...
}: let
tofu = lib.getExe pkgs.opentofu;
terraformConfiguration = self.inputs.terranix.lib.terranixConfiguration {
system = pkgs.stdenv.hostPlatform.system;
modules = [
(createInfra args)
];
};
in {
description = "${service} apply infra";
wantedBy = ["multi-user.target"];
wants = ["${service}.service"];
preStart = ''
install -d -m 0770 -o ${service} -g media /var/lib/infra-${service}
${
options.rootFolders
|> lib.map (folder: "install -d -m 0770 -o media -g media ${folder}")
|> lib.join "\n"
}
'';
script = ''
# Sleep for a bit to give the service a chance to start up
sleep 5s
if [ "$(systemctl is-active ${lib.escapeShellArg service})" != "active" ]; then
echo "${service} is not running"
exit 1
fi
# Print the path to the source for easier debugging
echo "config location: ${terraformConfiguration}"
# Copy infra code into workspace
cp -f ${terraformConfiguration} config.tf.json
# Initialize OpenTofu
${tofu} init
# Run the infrastructure code
${tofu} \
${
if options.debug
then "plan"
else "apply -auto-approve"
} \
-var-file='${config.clan.core.vars.generators.servarr.files."config.tfvars".path}'
'';
serviceConfig = {
Type = "oneshot";
User = service;
Group = "media";
WorkingDirectory = "/var/lib/${service}-apply-infra";
EnvironmentFile = [
config.clan.core.vars.generators.${service}.files."config.env".path
];
};
};
# Returns a module to be used in a modules list of terranix
createInfra = {
service,
options,
...
}: terra: let
inherit (terra.lib) tfRef;
in {
variable = {
"${service}_api_key" = {
type = "string";
description = "${service} API key";
};
qbittorrent_api_key = {
type = "string";
description = "qbittorrent api key";
};
sabnzbd_api_key = {
type = "string";
description = "sabnzbd api key";
};
};
terraform.required_providers.${service} = {
source = "devopsarr/${service}";
version =
{
radarr = "2.3.5";
sonarr = "3.4.2";
prowlarr = "3.2.1";
lidarr = "1.13.0";
readarr = "2.1.0";
whisparr = "1.2.0";
}.${
service
};
};
provider.${service} = {
url = "http://[::1]:${toString options.port}";
api_key = tfRef "var.${service}_api_key";
};
resource =
{
"${service}_root_folder" = mkIf (lib.elem service ["radarr" "sonarr" "whisparr" "readarr"]) (
options.rootFolders
|> lib.imap (i: f: lib.nameValuePair "local${toString i}" {path = f;})
|> lib.listToAttrs
);
"${service}_download_client_qbittorrent" = mkIf (lib.elem service ["radarr" "sonarr" "lidarr" "whisparr"]) {
"main" = {
name = "qBittorrent";
enable = true;
priority = 1;
host = "localhost";
username = "admin";
password = tfRef "var.qbittorrent_api_key";
url_base = "/";
port = config.services.qbittorrent.webuiPort;
};
};
"${service}_download_client_sabnzbd" = mkIf (lib.elem service ["radarr" "sonarr" "lidarr" "whisparr"]) {
"main" = {
name = "SABnzbd";
enable = true;
priority = 1;
host = "localhost";
api_key = tfRef "var.sabnzbd_api_key";
url_base = "/";
port = config.services.sabnzbd.settings.misc.port;
};
};
}
// (lib.optionalAttrs (service == "prowlarr") (
settings.services
|> lib.filterAttrs (s: _: lib.elem s ["radarr" "sonarr" "lidarr" "whisparr"])
|> lib.mapAttrsToList (s: {port, ...}: {
"prowlarr_application_${s}"."main" = let
p = config.services.prowlarr.settings.server.port or 9696;
in {
name = s;
sync_level = "addOnly";
base_url = "http://localhost:${toString port}";
prowlarr_url = "http://localhost:${toString p}";
api_key = tfRef "var.${s}_api_key";
};
})
|> lib.concat [
{
"prowlarr_indexer" = {
"nyaa" = {
enable = true;
app_profile_id = 1;
priority = 1;
name = "Nyaa";
implementation = "Cardigann";
config_contract = "CardigannSettings";
protocol = "torrent";
fields = [
{
name = "definitionFile";
text_value = "nyaasi";
}
{
name = "baseSettings.limitsUnit";
number_value = 0;
}
{
name = "torrentBaseSettings.preferMagnetUrl";
bool_value = false;
}
{
name = "prefer_magnet_links";
bool_value = true;
}
{
name = "sonarr_compatibility";
bool_value = false;
}
{
name = "strip_s01";
bool_value = false;
}
{
name = "radarr_compatibility";
bool_value = false;
}
{
name = "filter-id";
number_value = 0;
}
{
name = "cat-id";
number_value = 0;
}
{
name = "sort";
number_value = 0;
}
{
name = "type";
number_value = 1;
}
];
};
};
}
]
|> lib.mkMerge
));
};
in {
createModule = services: args: {
config =
services
|> lib.attrsToList
|> lib.imap1 (i: {
name,
value,
}: let
service = name;
options = value // {port = 2000 + i;};
in {
clan.core.vars.generators.${service} = createGenerator (args // {inherit service options;});
services.${service} = createService (args // {inherit service options;});
systemd.services."infra-${service}" = lib.mkIf settings.enable (createSystemdService (args // {inherit service options;}));
})
|> lib.mkMerge;
};
}

View file

@ -1,96 +0,0 @@
{
config,
pkgs,
lib,
settings,
port,
...
}: {
clan.core.vars.generators.qbittorrent = let
hash_password = pkgs.writers.writePython3 "hashPassword" {} ''
import base64
import hashlib
import sys
import uuid
password = sys.argv[1]
salt = uuid.uuid4()
salt_bytes = salt.bytes
password = str.encode(password)
hashed_password = hashlib.pbkdf2_hmac(
"sha512",
password,
salt_bytes,
100000,
dklen=64
)
b64_salt = base64.b64encode(salt_bytes).decode("utf-8")
b64_password = base64.b64encode(hashed_password).decode("utf-8")
password_string = "@ByteArray({salt}:{password})".format(
salt=b64_salt, password=b64_password
)
print(password_string)
'';
in {
files = {
"password" = {
secret = true;
deploy = true;
};
"password_hash" = {
secret = true;
deploy = true;
};
"qBittorrent.conf" = {
secret = true;
deploy = true;
owner = "qbittorrent";
group = "media";
mode = "0660";
restartUnits = ["qbittorrent.service"];
};
};
runtimeInputs = with pkgs; [pwgen hash_password];
script = ''
pwgen -s 128 1 > $out/password
${hash_password} $(cat $out/password) > $out/password_hash
cat << EOF > $out/qBittorrent.conf
[LegalNotice]
Accepted=true
[Preferences]
WebUI\AlternativeUIEnabled=true
WebUI\RootFolder=${pkgs.vuetorrent}/share/vuetorrent
WebUI\Username=admin
WebUI\Password_PBKDF2=$(cat $out/password_hash)
EOF
'';
};
system.activationScripts.qbittorrent-config = {
deps = lib.optional (!config.sops.useSystemdActivation) "setupSecrets";
# TODO: If sops-nix is switched to systemd activation, add a systemd unit
# for this install step that runs after sops-install-secrets.service,
# because this activation-script dependency only orders against setupSecrets.
text = ''
install -Dm0600 -o ${config.services.qbittorrent.user} -g ${config.services.qbittorrent.group} \
${config.clan.core.vars.generators.qbittorrent.files."qBittorrent.conf".path} \
${config.services.qbittorrent.profileDir}/qBittorrent/config/qBittorrent.conf
'';
};
services.qbittorrent = {
enable = true;
openFirewall = true;
webuiPort = port;
serverConfig = lib.mkForce {};
user = "qbittorrent";
group = "media";
};
}

View file

@ -1,95 +0,0 @@
{
config,
lib,
pkgs,
settings,
port,
...
}: {
clan.core.vars.generators.sabnzbd = {
files = {
"api_key" = {
secret = true;
deploy = true;
};
"nzb_key" = {
secret = true;
deploy = true;
};
"config.ini" = {
secret = true;
deploy = true;
owner = "sabnzbd";
group = "media";
mode = "0660";
};
};
prompts = {
username = {
description = "usenet username";
type = "hidden";
persist = true;
};
password = {
description = "usenet password";
type = "hidden";
persist = true;
};
};
runtimeInputs = with pkgs; [pwgen];
script = ''
pwgen -s 128 1 > $out/api_key
pwgen -s 128 1 > $out/nzb_key
cat << EOF > $out/config.ini
[misc]
api_key = $(cat $out/api_key)
nzb_key = $(cat $out/nzb_key)
[servers]
[[news.sunnyusenet.com]]
username = $(cat $prompts/username)
password = $(cat $prompts/password)
EOF
'';
};
services.sabnzbd = {
enable = true;
openFirewall = true;
allowConfigWrite = false;
configFile = lib.mkForce null;
secretFiles = [
config.clan.core.vars.generators.sabnzbd.files."config.ini".path
];
settings = {
misc = {
host = "0.0.0.0";
port = port;
host_whitelist = "${config.networking.hostName}";
download_dir = "/var/media/downloads/incomplete";
complete_dir = "/var/media/downloads/done";
};
servers = {
"news.sunnyusenet.com" = {
name = "news.sunnyusenet.com";
displayname = "news.sunnyusenet.com";
host = "news.sunnyusenet.com";
port = 563;
timeout = 60;
};
};
};
user = "sabnzbd";
group = "media";
};
}

View file

@ -1,22 +0,0 @@
{
inputs,
...
}: {
perSystem = {pkgs, system, ...}: {
devShells.default = pkgs.mkShell {
packages = with pkgs; [
bash
sops
just
yq
pwgen
alejandra
nil
nixd
openssl
inputs.clan-core.packages.${system}.clan-cli
nix-output-monitor
];
};
};
}

View file

@ -1,125 +0,0 @@
# Mandos as a wake-on-demand build host
## Goal
Mandos is primarily an interactive living-room machine, but it is also a strong candidate for handling remote Nix builds when it is idle. The goal is to make that dual use practical without keeping the machine powered all the time.
## Current context
On `main`, Mandos is configured as an interactive gaming machine:
- `systems/x86_64-linux/mandos/default.nix`
- `sneeuwvlok.hardware.has.gpu.nvidia = true`
- `sneeuwvlok.hardware.has.audio = true`
- `sneeuwvlok.desktop.use = "gamescope"`
- `sneeuwvlok.application.steam.enable = true`
- `homes/x86_64-linux/chris@mandos/default.nix`
- user-facing application set for an interactive machine
This makes Mandos a poor fit for "always running random infrastructure", but a reasonable fit for "available for work when needed".
## Desired behavior
- Mandos remains an interactive machine first.
- Mandos can be used as a remote build worker when no one is actively using it.
- Mandos should not need to stay fully on all day just to be eligible for builds.
- Waking and idling down should be automatic enough that the machine can participate in builds without turning into a maintenance burden.
## Recommended model
### 1. Use wake-on-LAN as the activation mechanism
Mandos should support being awakened by another machine on the same LAN.
Requirements:
- BIOS or UEFI wake-on-LAN support enabled
- NixOS interface configuration enabling wake-on-LAN
- one low-power machine that is effectively always available to send wake requests
In this repo, `ulmo` is the obvious candidate to act as the coordinator, but the pattern should stay generic: one machine is always reachable, and one or more stronger machines can be woken on demand.
### 2. Prefer suspend-first over shutdown-first
There are two main power states worth considering:
- **Suspend on idle**
- faster resume
- generally better user experience
- often easier to make reliable for wake-on-LAN
- **Shutdown on idle**
- lowest power draw
- more fragile in practice because firmware support for wake from soft-off varies
- longer time to become available again
Recommended rollout order:
1. Prove the concept with suspend on idle.
2. Only consider full power-off later if the hardware and firmware behave reliably.
## 3. Add an explicit availability policy
The interesting lesson for tagging is not "Mandos should have a build tag". The interesting lesson is that some machines have a deliberate availability policy that affects how safely they can participate in automation.
A future host-level setting could encode this policy directly, for example:
- `always-on`
- `wake-on-demand`
- `manual`
That setting would be a better source for any computed operational tag than current workload or ad hoc tags.
## 4. Idle detection should be policy-driven
If Mandos becomes a build worker, idle shutdown or suspend should depend on signals such as:
- no local interactive session activity
- no active build job
- no long-running system task that should keep the machine awake
This should not be a blind timer that powers the machine down every X minutes regardless of context.
## 5. Build orchestration needs a coordinator
Wake-on-demand only works well if something else can wake the machine and wait for it to become reachable. In practice, this means:
- a coordinator sends the wake signal
- the build client retries until the machine is reachable
- the remote builder participates only after it is actually ready
The exact implementation can vary, but the architectural point is the same: a wakeable build worker is not self-sufficient.
## Risks and caveats
- Firmware wake support may be unreliable, especially from full shutdown.
- Build latency increases because wake and readiness checks take time.
- A machine that users expect to be immediately available should not surprise them with power-state transitions at awkward moments.
- Interactive workload detection matters; otherwise the machine will feel hostile as a living-room device.
## Recommendation
Treat the Mandos idea as a good pattern, but generalize it:
- some machines are **interactive**
- some machines are **wakeable on demand**
- some machines are suitable for **interruptible background work**
Those are more reusable concepts than "Mandos is the build server".
## Implications for the tag strategy
This investigation strengthens a small part of the `operational:*` space:
- `operational:availability:always-on`
- `operational:availability:wake-on-demand`
- `operational:workload:interruptible`
These should not be assigned by hand if they can instead be computed from explicit machine settings that describe availability policy.
## References
- Clan inventory tags and dynamic tags docs: `https://clan.lol/docs/25.11/reference/options/clan_inventory`
- NixOS Wake-on-LAN wiki: `https://wiki.nixos.org/wiki/Wake_on_LAN`
- Home-lab wake-on-demand discussion and patterns:
- `https://dgross.ca/blog/linux-home-server-auto-sleep`
- `https://danielpgross.github.io/friendly_neighbor/howto-sleep-wake-on-demand.html`

View file

@ -1,235 +0,0 @@
# Clan machine tagging strategy
## Goal
Replace machine-name targeting with stable tags that survive machine renames, hardware reshuffles, and service moves.
The strategy should fit how this repo is evolving:
- machine tags should describe the machine
- service roles should describe service topology
- computed tags should be derived from machine settings or other explicit metadata, not from other tags
## Source material
This plan is based on:
- current Clan inventory in `clan.nix`
- current machine configs under `machines/*/configuration.nix`
- workload and module usage on `main` under:
- `systems/x86_64-linux/*/default.nix`
- `homes/x86_64-linux/chris@*/default.nix`
- Clan inventory tag and dynamic-tag documentation
## Guiding principles
### 1. Prefer capabilities over roles
A machine rarely has one permanent role. In this repo especially, a machine may be interactive, portable, build-capable, and temporarily host some service at the same time.
Because of that, tags should describe durable traits and capabilities rather than trying to answer "what is this machine?"
### 2. Do not encode current workload as a machine tag
A machine currently running Grafana, Jellyfin, or PostgreSQL does not mean that those should become machine tags. Those are current placements, not stable identity.
If a service can move, its current presence is weak evidence for tagging.
### 3. Use service roles for topology
Some relationships belong in service definitions rather than host tags.
Examples:
- NFS producer and consumer
- persistence provider and client
- reverse proxy frontend and backend
These are not machine identity tags; they are service-topology relationships.
### 4. Derive tags from settings when possible
If a machine setting already captures a fact, derive the tag from that setting instead of duplicating it by hand.
Good examples in this repo:
- `desktop.use` can imply whether a machine is interactive
- `hardware.has.gpu.*` can imply GPU availability
- `hardware.has.audio` can imply audio capability
- `hardware.has.bluetooth` can imply Bluetooth capability
### 5. Avoid deriving tags from other tags
Clan supports dynamic tags, but tag-from-tag derivation can become fragile and can even recurse. If tags need computation, compute them from machine settings or an explicit metadata source instead.
## Proposed namespaces
Use full words:
- `capability:*`
- `operational:*`
The intention is:
- `capability:*` describes stable machine traits
- `operational:*` describes automation-relevant policy or availability behavior
## Tag catalog
This is the current list of tags discussed so far, grouped by status.
### Agreed capability tags
- `capability:runtime:interactive`
- `capability:runtime:headless`
- `capability:hardware:gpu`
- `capability:hardware:audio`
- `capability:hardware:bluetooth`
- `capability:mobility:portable`
- `capability:mobility:stationary`
### Agreed operational tags
- `operational:availability:always-on`
- `operational:availability:wake-on-demand`
- `operational:availability:manual`
- `operational:workload:interruptible`
### Explicitly rejected or deferred
- GPU vendor-specific tags such as AMD- or NVIDIA-specific variants
- service-presence tags such as Jellyfin, Grafana, Forgejo, or PostgreSQL
- service-topology tags such as NFS producer or consumer
- application-presence tags such as Discord or TeamSpeak
- desktop-environment tags such as Plasma or Gamescope
- location tags such as "living room" unless location later becomes a deliberate scheduling dimension
## Current static tags in `clan.nix`
These are the manually assigned tags currently present in the inventory. Settings-derived tags are intentionally not listed here because they are meant to be computed rather than maintained by hand.
- `mandos`
- `capability:mobility:stationary`
- `operational:availability:wake-on-demand`
- `manwe`
- `capability:mobility:stationary`
- `operational:availability:manual`
- `orome`
- `capability:mobility:portable`
- `operational:availability:manual`
- `tulkas`
- `capability:mobility:portable`
- `operational:availability:manual`
- `ulmo`
- `capability:mobility:stationary`
- `operational:availability:always-on`
## Capability tags
These are the strongest candidates for machine tags.
### Runtime
- `capability:runtime:interactive`
- `capability:runtime:headless`
These are directly useful for deciding where a service with a user-facing local experience does or does not belong.
### Hardware
- `capability:hardware:gpu`
- `capability:hardware:audio`
- `capability:hardware:bluetooth`
At the moment, the repo provides enough configuration structure to derive these from machine settings.
GPU vendor-specific tags are intentionally excluded for now. The current conclusion is that the presence of GPU hardware may matter, but the vendor usually does not unless there is a specific workload that depends on CUDA, ROCm, or a similar stack.
### Mobility
- `capability:mobility:portable`
- `capability:mobility:stationary`
These are useful concepts, but they are not currently obvious from one uniform machine setting in the repo. If they become desirable, they likely need either:
- an explicit machine setting, or
- a stronger convention around machine form factor
For now they are candidates, not automatic defaults.
## Operational tags
Operational tags are weaker than capability tags and should stay small in number.
They should only exist when they capture real automation constraints that are not already represented elsewhere.
### Availability
- `operational:availability:always-on`
- `operational:availability:wake-on-demand`
- `operational:availability:manual`
This dimension became clearer while thinking through the Mandos build-host idea. A machine may be technically capable of a workload, while its availability policy determines whether it is a sensible target.
These tags should not be guessed from existing workloads. They should come from an explicit machine setting that states the intended availability policy.
### Interruptibility
- `operational:workload:interruptible`
This is not about the machine by itself. It is a useful policy boundary for selecting machines that may host work that can be delayed, retried, paused, or moved.
If introduced, it should again come from explicit machine policy rather than being inferred from current services.
## What should not become machine tags
- current service assignments, such as Jellyfin, Grafana, Forgejo, or PostgreSQL
- service topology, such as NFS producer or consumer
- user application presence, such as Discord or TeamSpeak
- detailed desktop-environment choice, such as Plasma or Gamescope
- one-off descriptions like "living room" unless location becomes a deliberate scheduling dimension
## What is derivable today
The repo already contains enough structure to derive several useful capability tags.
Examples from the current configuration style:
- if a machine enables a desktop session, derive `capability:runtime:interactive`
- if a machine does not, derive `capability:runtime:headless`
- if a machine enables `hardware.has.audio`, derive `capability:hardware:audio`
- if a machine enables `hardware.has.bluetooth`, derive `capability:hardware:bluetooth`
- if a machine enables any `hardware.has.gpu.*`, derive `capability:hardware:gpu`
## What probably needs explicit policy
These should not be inferred from current services or tag combinations:
- `operational:availability:*`
- `operational:workload:interruptible`
- mobility-related tags if there is no explicit machine setting to derive them from
The clean way to support these is to introduce one or more explicit machine settings whose purpose is to describe machine policy rather than workload.
## Mandos update
The Mandos wake-on-demand build-host idea adds an important refinement:
- some machines should be eligible for background work only when they are available through a specific policy, such as wake-on-demand
This does **not** mean Mandos should get a hand-maintained "build server" tag.
It instead suggests a more generic pattern:
- a machine may be interactive
- a machine may be available on demand rather than always on
- that availability policy may influence whether certain classes of automation should target it
That strengthens the case for a very small `operational:*` namespace derived from explicit machine policy.
## Recommended next steps
1. Start with `capability:*` tags that are clearly derivable from machine settings.
2. Keep service topology in service roles instead of machine tags.
3. If availability policy becomes important, add an explicit machine setting for it and derive `operational:*` tags from that setting.
4. Avoid expanding the tag vocabulary until there is a clear service-selection use case for each added tag.

654
flake.lock generated

File diff suppressed because it is too large Load diff

124
flake.nix
View file

@ -3,22 +3,13 @@
nixConfig = {
warn-dirty = false;
extra-experimental-features = ["nix-command" "flakes" "pipe-operators"];
};
inputs = {
nixpkgs.url = "github:nixos/nixpkgs/nixos-unstable";
flake-parts = {
url = "github:hercules-ci/flake-parts";
inputs.nixpkgs-lib.follows = "nixpkgs";
};
import-tree.url = "github:vic/import-tree";
systems.url = "github:nix-systems/default";
sops-nix.url = "github:Mic92/sops-nix";
disko = {
url = "github:nix-community/disko";
snowfall-lib = {
url = "github:snowfallorg/lib";
inputs.nixpkgs.follows = "nixpkgs";
};
@ -27,32 +18,25 @@
inputs.nixpkgs.follows = "nixpkgs";
};
terranix = {
url = "github:terranix/terranix";
inputs.nixpkgs.follows = "nixpkgs";
inputs.flake-parts.follows = "flake-parts";
};
clan-core = {
url = "https://git.clan.lol/clan/clan-core/archive/main.tar.gz";
inputs = {
flake-parts.follows = "flake-parts";
nixpkgs.follows = "nixpkgs";
sops-nix.follows = "sops-nix";
disko.follows = "disko";
systems.follows = "systems";
};
};
plasma-manager = {
url = "github:nix-community/plasma-manager";
inputs.nixpkgs.follows = "nixpkgs";
inputs.home-manager.follows = "home-manager";
};
nixos-generators = {
url = "github:nix-community/nixos-generators";
inputs.nixpkgs.follows = "nixpkgs";
};
# neovim
nvf.url = "github:notashelf/nvf";
# plymouth theme
nixos-boot.url = "github:Melkor333/nixos-boot";
firefox.url = "github:nix-community/flake-firefox-nightly";
stylix.url = "github:nix-community/stylix";
# Rust toolchain
@ -70,6 +54,8 @@
flux.url = "github:IogaMaster/flux";
sops-nix.url = "github:Mic92/sops-nix";
# Azure AD for linux
himmelblau = {
url = "github:himmelblau-idm/himmelblau";
@ -89,34 +75,58 @@
url = "github:vinceliuice/grub2-themes";
};
nixos-wsl = {
url = "github:nix-community/nixos-wsl";
inputs = {
nixpkgs.follows = "nixpkgs";
flake-compat.follows = "";
};
};
terranix = {
url = "github:terranix/terranix";
inputs.nixpkgs.follows = "nixpkgs";
};
clan-core = {
url = "https://git.clan.lol/clan/clan-core/archive/main.tar.gz";
inputs.nixpkgs.follows = "nixpkgs";
};
mydia = {
url = "github:chris-kruining/mydia";
# url = "github:getmydia/mydia";
};
};
outputs = inputs @ {
flake-parts,
nixpkgs,
systems,
...
}:
flake-parts.lib.mkFlake {inherit inputs;} {
systems = import systems;
outputs = inputs:
inputs.snowfall-lib.mkFlake {
inherit inputs;
src = ./.;
imports = with inputs; [
flake-parts.flakeModules.modules
clan-core.flakeModules.default
home-manager.flakeModules.default
./clan/flake-module.nix
./packages/flake-module.nix
./clanServices/flake-module.nix
snowfall = {
namespace = "sneeuwvlok";
meta = {
name = "sneeuwvlok";
title = "Sneeuwvlok";
};
};
channels-config = {
allowUnfree = true;
permittedInsecurePackages = [
# Due to *arr stack
"dotnet-sdk-6.0.428"
"aspnetcore-runtime-6.0.36"
# I think this is because of zen
"qtwebengine-5.15.19"
# For Nheko, the matrix client
"olm-3.2.16"
];
perSystem = {system, ...}: {
_module.args = {
pkgs = import nixpkgs {
inherit system;
};
overlays = with inputs; [
fenix.overlays.default
@ -124,19 +134,13 @@
flux.overlays.default
];
config = {
allowUnfree = true;
systems.modules = with inputs; [
clan-core.nixosModules.default
];
permittedInsecurePackages = [
# I think this is because of zen
"qtwebengine-5.15.19"
# For mautrix-signal, the matrix to signal bridge
"olm-3.2.16"
homes.modules = with inputs; [
stylix.homeModules.stylix
plasma-manager.homeModules.plasma-manager
];
};
};
};
};
};
}

View file

@ -0,0 +1,36 @@
{osConfig, ...}: {
home.stateVersion = osConfig.system.stateVersion;
programs.git = {
settings.user = {
name = "Chris Kruining";
email = "chris@kruining.eu";
};
};
sneeuwvlok = {
defaults = {
shell = "zsh";
terminal = "ghostty";
browser = "zen";
editor = "zed";
};
shell = {
corePkgs.enable = true;
};
themes = {
enable = true;
theme = "everforest";
polarity = "dark";
};
application = {
bitwarden.enable = true;
teamspeak.enable = true;
steam.enable = true;
zen.enable = true;
};
};
}

View file

@ -0,0 +1,59 @@
{osConfig, ...}: {
home.stateVersion = osConfig.system.stateVersion;
programs.git = {
settings.user = {
name = "Chris Kruining";
email = "chris@kruining.eu";
};
};
sneeuwvlok = {
defaults = {
shell = "zsh";
terminal = "ghostty";
browser = "zen";
editor = "zed";
};
shell = {
corePkgs.enable = true;
};
themes = {
enable = true;
theme = "everforest";
polarity = "dark";
};
development = {
rust.enable = true;
javascript.enable = true;
dotnet.enable = true;
};
application = {
bitwarden.enable = true;
discord.enable = true;
ladybird.enable = true;
matrix.enable = true;
obs.enable = true;
onlyoffice.enable = true;
signal.enable = true;
steam.enable = true;
studio.enable = true;
teamspeak.enable = true;
thunderbird.enable = true;
zen.enable = true;
};
shell.zsh.enable = true;
terminal.ghostty.enable = true;
editor = {
zed.enable = true;
nvim.enable = true;
nano.enable = true;
};
};
}

View file

@ -0,0 +1,49 @@
{osConfig, ...}: {
home.stateVersion = osConfig.system.stateVersion;
programs.git = {
settings.user = {
name = "Chris Kruining";
email = "chris@kruining.eu";
};
};
sneeuwvlok = {
defaults = {
shell = "zsh";
terminal = "ghostty";
browser = "zen";
editor = "zed";
};
shell = {
corePkgs.enable = true;
};
themes = {
enable = true;
theme = "everforest";
polarity = "dark";
};
development = {
javascript.enable = true;
dotnet.enable = true;
};
application = {
bitwarden.enable = true;
onlyoffice.enable = true;
signal.enable = true;
zen.enable = true;
};
shell.zsh.enable = true;
terminal.ghostty.enable = true;
editor = {
zed.enable = true;
nano.enable = true;
};
};
}

View file

@ -0,0 +1,36 @@
{osConfig, ...}: {
home.stateVersion = osConfig.system.stateVersion;
programs.git = {
settings.user = {
name = "Chris Kruining";
email = "chris@kruining.eu";
};
};
sneeuwvlok = {
defaults = {
shell = "zsh";
terminal = "ghostty";
browser = "zen";
editor = "zed";
};
shell = {
corePkgs.enable = true;
};
themes = {
enable = true;
theme = "everforest";
polarity = "dark";
};
application = {
bitwarden.enable = true;
teamspeak.enable = true;
steam.enable = true;
zen.enable = true;
};
};
}

View file

@ -1,37 +0,0 @@
{lib, ...}: let
inherit (lib) mkOption types;
in {
mkUrlOptions = defaults: {
host =
mkOption {
type = types.str;
example = "host.tld";
description = ''
Hostname
'';
}
// (defaults.host or {});
port =
mkOption {
type = types.port;
default = 1234;
example = "1234";
description = ''
Port
'';
}
// (defaults.port or {});
protocol =
mkOption {
type = types.str;
default = "https";
example = "https";
description = ''
Which protocol to use when creating a url string
'';
}
// (defaults.protocol or {});
};
}

38
lib/options/default.nix Normal file
View file

@ -0,0 +1,38 @@
{ lib, ...}:
let
inherit (builtins) isString typeOf;
inherit (lib) mkOption types throwIfNot concatStringsSep splitStringBy toLower map;
in
{
options = {
mkUrlOptions =
defaults:
{
host = mkOption {
type = types.str;
example = "host.tld";
description = ''
Hostname
'';
} // (defaults.host or {});
port = mkOption {
type = types.port;
default = 1234;
example = "1234";
description = ''
Port
'';
} // (defaults.port or {});
protocol = mkOption {
type = types.str;
default = "https";
example = "https";
description = ''
Which protocol to use when creating a url string
'';
} // (defaults.protocol or {});
};
};
}

View file

@ -1,53 +0,0 @@
{lib, ...}: let
inherit (builtins) isString typeOf match toString head;
inherit (lib) throwIfNot concatStringsSep splitStringBy toLower map concatMapAttrsStringSep;
in {
#========================================================================================
# Converts a string to snake case
#
# simply replaces any uppercase letter to its lowercase variant preceeded by an underscore
#========================================================================================
toSnakeCase = str:
throwIfNot (isString str) "toSnakeCase only accepts string values, but got ${typeOf str}" (
str
|> splitStringBy (prev: curr: builtins.match "[a-z]" prev != null && builtins.match "[A-Z]" curr != null) true
|> map (p: toLower p)
|> concatStringsSep "_"
);
#========================================================================================
# Converts a set of url parts to a string
#========================================================================================
toUrl = {
protocol ? null,
host,
port ? null,
path ? null,
query ? null,
hash ? null,
}: let
trim_slashes = str: str |> match "^\/*(.+?)\/*$" |> head;
encode_to_str = set: concatMapAttrsStringSep "&" (n: v: "${n}=${v}") set;
_protocol =
if protocol != null
then "${protocol}://"
else "";
_port =
if port != null
then ":${toString port}"
else "";
_path =
if path != null
then "/${path |> trim_slashes}"
else "";
_query =
if query != null
then "?${query |> encode_to_str}"
else "";
_hash =
if hash != null
then "#${hash |> encode_to_str}"
else "";
in "${_protocol}${host}${_port}${_path}${_query}${_hash}";
}

39
lib/strings/default.nix Normal file
View file

@ -0,0 +1,39 @@
{ lib, ...}:
let
inherit (builtins) isString typeOf match toString head;
inherit (lib) throwIfNot concatStringsSep splitStringBy toLower map concatMapAttrsStringSep;
in
{
strings = {
#========================================================================================
# Converts a string to snake case
#
# simply replaces any uppercase letter to its lowercase variant preceeded by an underscore
#========================================================================================
toSnakeCase =
str:
throwIfNot (isString str) "toSnakeCase only accepts string values, but got ${typeOf str}" (
str
|> splitStringBy (prev: curr: builtins.match "[a-z]" prev != null && builtins.match "[A-Z]" curr != null) true
|> map (p: toLower p)
|> concatStringsSep "_"
);
#========================================================================================
# Converts a set of url parts to a string
#========================================================================================
toUrl =
{ protocol ? null, host, port ? null, path ? null, query ? null, hash ? null }:
let
trim_slashes = str: str |> match "^\/*(.+?)\/*$" |> head;
encode_to_str = set: concatMapAttrsStringSep "&" (n: v: "${n}=${v}") set;
_protocol = if protocol != null then "${protocol}://" else "";
_port = if port != null then ":${toString port}" else "";
_path = if path != null then "/${path |> trim_slashes}" else "";
_query = if query != null then "?${query |> encode_to_str}" else "";
_hash = if hash != null then "#${hash |> encode_to_str}" else "";
in
"${_protocol}${host}${_port}${_path}${_query}${_hash}";
};
}

View file

@ -0,0 +1,2 @@
{"level":"fatal","error":"homeserver.address not configured","time":"2026-04-15T09:10:06.949460064Z","message":"Configuration error"}
{"level":"info","time":"2026-04-15T09:10:06.949840013Z","message":"See https://docs.mau.fi/faq/field-unconfigured for more info"}

2
logs/bridge.log Normal file
View file

@ -0,0 +1,2 @@
{"level":"fatal","error":"appservice.as_token not configured. Did you forget to generate the registration? ","time":"2026-04-15T09:11:43.617908298Z","message":"Configuration error"}
{"level":"info","time":"2026-04-15T09:11:43.618232253Z","message":"See https://docs.mau.fi/faq/field-unconfigured for more info"}

View file

@ -1,3 +0,0 @@
{ ... }: {
nixpkgs.hostPlatform = "x86_64-linux";
}

View file

@ -1,40 +0,0 @@
{self, ...}: {
imports = [
./disks.nix
./hardware.nix
self.inputs.home-manager.nixosModules.home-manager
self.inputs.himmelblau.nixosModules.himmelblau
self.inputs.jovian.nixosModules.default
self.inputs.mydia.nixosModules.default
self.inputs.nix-minecraft.nixosModules.minecraft-servers
self.inputs.nvf.nixosModules.default
self.inputs.sops-nix.nixosModules.sops
(self.inputs.import-tree ../../modules/nixos)
];
nixpkgs.hostPlatform = "x86_64-linux";
sneeuwvlok = {
hardware.has = {
gpu.nvidia = true;
audio = true;
};
boot = {
quiet = true;
animated = true;
};
desktop.use = "gamescope";
application = {
steam.enable = true;
};
editor = {
nano.enable = true;
};
};
system.stateVersion = "23.11";
}

View file

@ -1,79 +0,0 @@
{
self,
lib,
pkgs,
...
}: {
_module.args = {
pkgs = lib.mkForce (import self.inputs.nixpkgs {
system = "x86_64-linux";
overlays = with self.inputs; [
fenix.overlays.default
nix-minecraft.overlay
flux.overlays.default
];
config = {
allowUnfree = true;
permittedInsecurePackages = [
# I think this is because of zen
"qtwebengine-5.15.19"
# For mautrix-signal, the matrix to signal bridge
"olm-3.2.16"
];
};
});
};
imports = [
./disks.nix
./hardware.nix
self.inputs.home-manager.nixosModules.home-manager
self.inputs.himmelblau.nixosModules.himmelblau
self.inputs.jovian.nixosModules.default
self.inputs.mydia.nixosModules.default
self.inputs.nix-minecraft.nixosModules.minecraft-servers
self.inputs.nvf.nixosModules.default
self.inputs.sops-nix.nixosModules.sops
(self.inputs.import-tree ../../modules/nixos)
];
system.activationScripts.remove-gtkrc.text = "rm -f /home/chris/.gtkrc-2.0";
services.logrotate.checkConfig = false;
environment.systemPackages = with pkgs; [beyond-all-reason openrct2];
sneeuwvlok = {
hardware.has = {
gpu.amd = true;
bluetooth = true;
audio = true;
};
boot = {
quiet = true;
animated = true;
};
desktop.use = "plasma";
application = {
steam.enable = true;
};
editor = {
nano.enable = true;
};
};
services.displayManager.autoLogin = {
enable = true;
user = "chris";
};
system.stateVersion = "23.11";
}

View file

@ -1,18 +0,0 @@
{
config,
lib,
...
}: let
inherit (lib.modules) mkDefault;
in {
boot = {
initrd.availableKernelModules = ["xhci_pci" "ahci" "usb_storage" "usbhid" "sd_mod"];
initrd.kernelModules = [];
kernelModules = ["kvm-amd"];
kernelParams = [];
extraModulePackages = [];
};
nixpkgs.hostPlatform = "x86_64-linux";
hardware.cpu.amd.updateMicrocode = mkDefault config.hardware.enableRedistributableFirmware;
}

View file

@ -1,3 +0,0 @@
{ ... }: {
nixpkgs.hostPlatform = "x86_64-linux";
}

View file

@ -1,44 +0,0 @@
{
self,
pkgs,
...
}: {
imports = [
./disks.nix
./hardware.nix
self.inputs.home-manager.nixosModules.home-manager
self.inputs.himmelblau.nixosModules.himmelblau
self.inputs.jovian.nixosModules.default
self.inputs.mydia.nixosModules.default
self.inputs.nix-minecraft.nixosModules.minecraft-servers
self.inputs.nvf.nixosModules.default
self.inputs.sops-nix.nixosModules.sops
(self.inputs.import-tree ../../modules/nixos)
];
nixpkgs.hostPlatform = "x86_64-linux";
environment.systemPackages = with pkgs; [
azure-cli
github-copilot-cli
];
sneeuwvlok = {
hardware.has = {
bluetooth = true;
audio = true;
};
services.authentication.himmelblau.enable = true;
application = {
steam.enable = true;
};
editor = {
nano.enable = true;
};
};
system.stateVersion = "23.11";
}

View file

@ -1,41 +0,0 @@
{self, ...}: {
imports = [
./disks.nix
./hardware.nix
self.inputs.home-manager.nixosModules.home-manager
self.inputs.himmelblau.nixosModules.himmelblau
self.inputs.jovian.nixosModules.default
self.inputs.mydia.nixosModules.default
self.inputs.nix-minecraft.nixosModules.minecraft-servers
self.inputs.nvf.nixosModules.default
self.inputs.sops-nix.nixosModules.sops
(self.inputs.import-tree ../../modules/nixos)
];
nixpkgs.hostPlatform = "x86_64-linux";
sneeuwvlok = {
hardware.has = {
gpu.amd = true;
bluetooth = true;
audio = true;
};
boot = {
quiet = true;
animated = true;
};
desktop.use = "gamescope";
application = {
steam.enable = true;
};
editor = {
nano.enable = true;
};
};
system.stateVersion = "23.11";
}

View file

@ -1,286 +0,0 @@
{
pkgs,
lib,
self,
...
}: {
_module.args = {
pkgs = lib.mkForce (import self.inputs.nixpkgs {
system = "x86_64-linux";
overlays = with self.inputs; [
fenix.overlays.default
nix-minecraft.overlay
flux.overlays.default
];
config = {
allowUnfree = true;
permittedInsecurePackages = [
# I think this is because of zen
"qtwebengine-5.15.19"
# For mautrix-signal, the matrix to signal bridge
"olm-3.2.16"
];
};
});
};
imports = [
./disks.nix
./hardware.nix
self.inputs.home-manager.nixosModules.home-manager
self.inputs.himmelblau.nixosModules.himmelblau
self.inputs.jovian.nixosModules.default
self.inputs.mydia.nixosModules.default
self.inputs.nix-minecraft.nixosModules.minecraft-servers
self.inputs.nvf.nixosModules.default
self.inputs.sops-nix.nixosModules.sops
(self.inputs.import-tree ../../modules/nixos)
];
system.stateVersion = "23.11";
networking = {
interfaces.enp2s0 = {
ipv6.addresses = [
{
address = "2a0d:6e00:1dc9:0::dead:beef";
prefixLength = 64;
}
];
useDHCP = true;
};
defaultGateway = {
address = "192.168.1.1";
interface = "enp2s0";
};
defaultGateway6 = {
address = "fe80::1";
interface = "enp2s0";
};
};
# sneeuwvlok = {
# services = {
# backup.borg.enable = true;
# authentication.zitadel = {
# enable = true;
# organization = {
# nix = {
# user = {
# chris = {
# email = "chris@kruining.eu";
# firstName = "Chris";
# lastName = "Kruining";
# roles = ["ORG_OWNER"];
# instanceRoles = ["IAM_OWNER"];
# };
# kaas = {
# email = "chris+kaas@kruining.eu";
# firstName = "Kaas";
# lastName = "Kruining";
# };
# };
# project = {
# ulmo = {
# projectRoleCheck = true;
# projectRoleAssertion = true;
# hasProjectCheck = true;
# role = {
# jellyfin = {
# group = "jellyfin";
# };
# jellyfin_admin = {
# group = "jellyfin";
# };
# };
# assign = {
# chris = ["jellyfin" "jellyfin_admin"];
# kaas = ["jellyfin"];
# };
# application = {
# jellyfin = {
# redirectUris = ["https://jellyfin.kruining.eu/sso/OID/redirect/zitadel"];
# grantTypes = ["authorizationCode"];
# responseTypes = ["code"];
# };
# forgejo = {
# redirectUris = ["https://git.amarth.cloud/user/oauth2/zitadel/callback"];
# grantTypes = ["authorizationCode"];
# responseTypes = ["code"];
# };
# vaultwarden = {
# redirectUris = ["https://vault.kruining.eu/identity/connect/oidc-signin"];
# grantTypes = ["authorizationCode"];
# responseTypes = ["code"];
# exportMap = {
# client_id = "SSO_CLIENT_ID";
# client_secret = "SSO_CLIENT_SECRET";
# };
# };
# matrix = {
# redirectUris = ["https://matrix.kruining.eu/_synapse/client/oidc/callback"];
# grantTypes = ["authorizationCode"];
# responseTypes = ["code"];
# };
# mydia = {
# redirectUris = ["http://localhost:2010/auth/oidc/callback"];
# grantTypes = ["authorizationCode"];
# responseTypes = ["code"];
# };
# grafana = {
# redirectUris = ["http://localhost:9001/login/generic_oauth"];
# grantTypes = ["authorizationCode"];
# responseTypes = ["code"];
# };
# };
# };
# convex = {
# projectRoleCheck = true;
# projectRoleAssertion = true;
# hasProjectCheck = true;
# application = {
# scry = {
# redirectUris = ["https://nautical-salamander-320.eu-west-1.convex.cloud/api/auth/callback/zitadel"];
# grantTypes = ["authorizationCode"];
# responseTypes = ["code"];
# };
# };
# };
# };
# action = {
# flattenRoles = {
# script = ''
# (ctx, api) => {
# if (ctx.v1.user.grants == undefined || ctx.v1.user.grants.count == 0) {
# return;
# }
# const roles = ctx.v1.user.grants.grants.flatMap(({ roles, projectId }) => roles.map(role => projectId + ':' + role));
# api.v1.claims.setClaim('nix:zitadel:custom', JSON.stringify({ roles }));
# };
# '';
# };
# };
# triggers = [
# {
# flowType = "customiseToken";
# triggerType = "preUserinfoCreation";
# actions = ["flattenRoles"];
# }
# {
# flowType = "customiseToken";
# triggerType = "preAccessTokenCreation";
# actions = ["flattenRoles"];
# }
# ];
# };
# };
# };
# communication.matrix.enable = true;
# development.forgejo.enable = true;
# networking.ssh.enable = true;
# networking.caddy.hosts = {
# # Expose amarht cloud stuff like this until I have a proper solution
# "auth.amarth.cloud" = ''
# reverse_proxy http://192.168.1.223:9092
# '';
# "amarth.cloud" = ''
# reverse_proxy http://192.168.1.223:8080
# '';
# };
# media.enable = true;
# media.glance.enable = true;
# media.mydia.enable = true;
# media.nfs.enable = true;
# media.jellyfin.enable = true;
# # media.servarr = {
# # radarr = {
# # enable = true;
# # port = 2001;
# # rootFolders = [
# # "/var/media/movies"
# # ];
# # };
# # sonarr = {
# # enable = true;
# # # debug = true;
# # port = 2002;
# # rootFolders = [
# # "/var/media/series"
# # ];
# # };
# # lidarr = {
# # enable = true;
# # debug = true;
# # port = 2003;
# # rootFolders = [
# # "/var/media/music"
# # ];
# # };
# # prowlarr = {
# # enable = true;
# # # debug = true;
# # port = 2004;
# # };
# # };
# observability = {
# grafana.enable = true;
# prometheus.enable = true;
# loki.enable = true;
# promtail.enable = true;
# # uptime-kuma.enable = true;
# };
# security.vaultwarden = {
# enable = true;
# database = {
# # type = "sqlite";
# # file = "/var/lib/vaultwarden/state.db";
# type = "postgresql";
# host = "localhost";
# port = 5432;
# sslMode = "disabled";
# };
# };
# };
# editor = {
# nano.enable = true;
# };
# };
}

View file

@ -1,20 +0,0 @@
{
config,
pkgs,
lib,
modulesPath,
...
}: let
inherit (lib.modules) mkDefault;
in {
boot = {
initrd.availableKernelModules = ["xhci_pci" "ahci" "nvme" "usbhid" "usb_storage" "sd_mod"];
initrd.kernelModules = [];
kernelModules = ["kvm-intel"];
kernelParams = [];
extraModulePackages = [];
};
nixpkgs.hostPlatform = "x86_64-linux";
hardware.cpu.intel.updateMicrocode = mkDefault config.hardware.enableRedistributableFirmware;
}

View file

@ -1,3 +0,0 @@
{ ... }: {
nixpkgs.hostPlatform = "x86_64-linux";
}

View file

@ -1,3 +0,0 @@
{ ... }: {
nixpkgs.hostPlatform = "x86_64-linux";
}

View file

@ -1,19 +0,0 @@
{
inputs,
config,
lib,
pkgs,
...
}: let
inherit (lib) mkIf mkEnableOption;
cfg = config.sneeuwvlok.application.bitwarden;
in {
options.sneeuwvlok.application.bitwarden = {
enable = mkEnableOption "enable bitwarden";
};
config = mkIf cfg.enable {
home.packages = with pkgs; [bitwarden-desktop];
};
}

View file

@ -0,0 +1,15 @@
{ inputs, config, lib, pkgs, namespace, ... }:
let
inherit (lib) mkIf mkEnableOption;
cfg = config.${namespace}.application.bitwarden;
in
{
options.${namespace}.application.bitwarden = {
enable = mkEnableOption "enable bitwarden";
};
config = mkIf cfg.enable {
home.packages = with pkgs; [ bitwarden-desktop ];
};
}

View file

@ -1,15 +1,11 @@
{
inputs,
config,
lib,
pkgs,
...
}: let
{ inputs, config, lib, pkgs, namespace, ... }:
let
inherit (lib) mkIf mkEnableOption;
cfg = config.sneeuwvlok.application.chrome;
in {
options.sneeuwvlok.application.chrome = {
cfg = config.${namespace}.application.chrome;
in
{
options.${namespace}.application.chrome = {
enable = mkEnableOption "enable chrome";
};

View file

@ -1,19 +0,0 @@
{
inputs,
config,
lib,
pkgs,
...
}: let
inherit (lib) mkIf mkEnableOption;
cfg = config.sneeuwvlok.application.discord;
in {
options.sneeuwvlok.application.discord = {
enable = mkEnableOption "enable discord (vesktop)";
};
config = mkIf cfg.enable {
home.packages = with pkgs; [vesktop];
};
}

View file

@ -0,0 +1,15 @@
{ inputs, config, lib, pkgs, namespace, ... }:
let
inherit (lib) mkIf mkEnableOption;
cfg = config.${namespace}.application.discord;
in
{
options.${namespace}.application.discord = {
enable = mkEnableOption "enable discord (vesktop)";
};
config = mkIf cfg.enable {
home.packages = with pkgs; [ vesktop ];
};
}

View file

@ -1,19 +0,0 @@
{
inputs,
config,
lib,
pkgs,
...
}: let
inherit (lib) mkIf mkEnableOption;
cfg = config.sneeuwvlok.application.ladybird;
in {
options.sneeuwvlok.application.ladybird = {
enable = mkEnableOption "enable ladybird";
};
config = mkIf cfg.enable {
home.packages = with pkgs; [ladybird];
};
}

View file

@ -0,0 +1,15 @@
{ inputs, config, lib, pkgs, namespace, ... }:
let
inherit (lib) mkIf mkEnableOption;
cfg = config.${namespace}.application.ladybird;
in
{
options.${namespace}.application.ladybird = {
enable = mkEnableOption "enable ladybird";
};
config = mkIf cfg.enable {
home.packages = with pkgs; [ ladybird ];
};
}

View file

@ -1,23 +0,0 @@
{
config,
lib,
pkgs,
osConfig ? {},
...
}: let
inherit (lib) mkIf mkEnableOption;
cfg = config.sneeuwvlok.application.matrix;
in {
options.sneeuwvlok.application.matrix = {
enable = mkEnableOption "enable Matrix client (Fractal)";
};
config = mkIf cfg.enable {
home.packages = with pkgs; [fractal element-desktop];
programs.element-desktop = {
enable = true;
};
};
}

View file

@ -0,0 +1,19 @@
{ config, lib, pkgs, namespace, osConfig ? {}, ... }:
let
inherit (lib) mkIf mkEnableOption;
cfg = config.${namespace}.application.matrix;
in
{
options.${namespace}.application.matrix = {
enable = mkEnableOption "enable Matrix client (Fractal)";
};
config = mkIf cfg.enable {
home.packages = with pkgs; [ fractal element-desktop ];
programs.element-desktop = {
enable = true;
};
};
}

View file

@ -1,15 +1,11 @@
{
config,
lib,
pkgs,
osConfig ? {},
...
}: let
{ config, lib, pkgs, namespace, osConfig ? {}, ... }:
let
inherit (lib) mkIf mkEnableOption;
cfg = config.sneeuwvlok.application.obs;
in {
options.sneeuwvlok.application.obs = {
cfg = config.${namespace}.application.obs;
in
{
options.${namespace}.application.obs = {
enable = mkEnableOption "enable obs";
};

View file

@ -3,13 +3,14 @@
config,
lib,
pkgs,
namespace,
...
}: let
inherit (lib) mkIf mkEnableOption;
cfg = config.sneeuwvlok.application.onlyoffice;
cfg = config.${namespace}.application.onlyoffice;
in {
options.sneeuwvlok.application.onlyoffice = {
options.${namespace}.application.onlyoffice = {
enable = mkEnableOption "enable onlyoffice";
};

View file

@ -1,19 +0,0 @@
{
inputs,
config,
lib,
pkgs,
...
}: let
inherit (lib) mkIf mkEnableOption;
cfg = config.sneeuwvlok.application.signal;
in {
options.sneeuwvlok.application.signal = {
enable = mkEnableOption "enable signal";
};
config = mkIf cfg.enable {
home.packages = with pkgs; [signal-desktop];
};
}

View file

@ -0,0 +1,15 @@
{ inputs, config, lib, pkgs, namespace, ... }:
let
inherit (lib) mkIf mkEnableOption;
cfg = config.${namespace}.application.signal;
in
{
options.${namespace}.application.signal = {
enable = mkEnableOption "enable signal";
};
config = mkIf cfg.enable {
home.packages = with pkgs; [ signal-desktop ];
};
}

View file

@ -1,20 +1,16 @@
{
inputs,
config,
lib,
pkgs,
...
}: let
{ inputs, config, lib, pkgs, namespace, ... }:
let
inherit (lib) mkIf mkEnableOption;
cfg = config.sneeuwvlok.application.steam;
in {
options.sneeuwvlok.application.steam = {
cfg = config.${namespace}.application.steam;
in
{
options.${namespace}.application.steam = {
enable = mkEnableOption "enable steam";
};
config = mkIf cfg.enable {
home.packages = with pkgs; [protonup-ng];
home.packages = with pkgs; [ protonup-ng ];
home.sessionVariables = {
STEAM_EXTRA_COMPAT_TOOLS_PATHS = "\${HOME}/.steam/root/compatibilitytools.d";

View file

@ -1,18 +0,0 @@
{
config,
lib,
self,
...
}: let
inherit (lib) mkIf mkEnableOption;
cfg = config.sneeuwvlok.application.studio;
in {
options.sneeuwvlok.application.studio = {
enable = mkEnableOption "enable Bricklink Studio";
};
config = mkIf cfg.enable {
home.packages = [self.packages.studio];
};
}

View file

@ -0,0 +1,15 @@
{ config, lib, pkgs, namespace, ... }:
let
inherit (lib) mkIf mkEnableOption;
cfg = config.${namespace}.application.studio;
in
{
options.${namespace}.application.studio = {
enable = mkEnableOption "enable Bricklink Studio";
};
config = mkIf cfg.enable {
home.packages = with pkgs.${namespace}; [ studio ];
};
}

View file

@ -1,15 +1,11 @@
{
inputs,
config,
lib,
pkgs,
...
}: let
{ inputs, config, lib, pkgs, namespace, ... }:
let
inherit (lib) mkIf mkEnableOption;
cfg = config.sneeuwvlok.application.teamspeak;
in {
options.sneeuwvlok.application.teamspeak = {
cfg = config.${namespace}.application.teamspeak;
in
{
options.${namespace}.application.teamspeak = {
enable = mkEnableOption "enable teamspeak";
};

View file

@ -1,15 +1,11 @@
{
inputs,
config,
lib,
pkgs,
...
}: let
{ inputs, config, lib, pkgs, namespace, ... }:
let
inherit (lib) mkIf mkEnableOption;
cfg = config.sneeuwvlok.application.thunderbird;
in {
options.sneeuwvlok.application.thunderbird = {
cfg = config.${namespace}.application.thunderbird;
in
{
options.${namespace}.application.thunderbird = {
enable = mkEnableOption "enable thunderbird";
};
@ -18,7 +14,7 @@ in {
enable = true;
package = pkgs.thunderbird-latest;
profiles.chris = {
profiles.${config.snowfallorg.user.name} = {
isDefault = true;
};
};
@ -34,7 +30,7 @@ in {
};
thunderbird = {
enable = true;
profiles = ["chris"];
profiles = [ config.snowfallorg.user.name ];
};
};

View file

@ -1,14 +1,15 @@
{
config,
lib,
pkgs,
...
}: let
{ inputs, config, lib, pkgs, namespace, ... }:
let
inherit (lib) mkIf mkEnableOption;
cfg = config.sneeuwvlok.application.zen;
in {
options.sneeuwvlok.application.zen = {
cfg = config.${namespace}.application.zen;
in
{
imports = [
inputs.zen-browser.homeModules.default
];
options.${namespace}.application.zen = {
enable = mkEnableOption "enable zen";
};
@ -53,7 +54,8 @@ in {
install_url = "https://addons.mozilla.org/firefox/downloads/latest/${builtins.toString id}/latest.xpi";
installation_mode = "force_installed";
};
in {
in
{
ublock_origin = 4531307;
ghostry = 4562168;
bitwarden = 4562769;

View file

@ -1,38 +1,34 @@
{
pkgs,
config,
lib,
...
}: let
{ pkgs, config, lib, namespace, ... }:
let
inherit (lib) mkOption;
inherit (lib.types) enum;
cfg = config.sneeuwvlok.defaults;
cfg = config.${namespace}.defaults;
in {
options.sneeuwvlok.defaults = {
options.${namespace}.defaults = {
editor = mkOption {
type = enum ["nano" "nvim" "zed"];
type = enum [ "nano" "nvim" "zed" ];
default = "nano";
description = "Default editor for text manipulation";
example = "nvim";
};
shell = mkOption {
type = enum ["fish" "zsh" "bash"];
type = enum [ "fish" "zsh" "bash" ];
default = "zsh";
description = "Default shell";
example = "zsh";
};
terminal = mkOption {
type = enum ["ghostty" "alacritty"];
type = enum [ "ghostty" "alacritty" ];
default = "ghostty";
description = "Default terminal";
example = "ghostty";
};
browser = mkOption {
type = enum ["chrome" "ladybird" "zen"];
type = enum [ "chrome" "ladybird" "zen" ];
default = "zen";
description = "Default terminal";
example = "zen";

View file

@ -1,15 +1,13 @@
{
config,
lib,
osConfig ? {},
...
}: let
{ config, lib, namespace, osConfig ? {}, ... }:
let
inherit (lib) mkIf;
cfg = config.sneeuwvlok.desktop.plasma;
osCfg = osConfig.sneeuwvlok.desktop.plasma or {enable = false;};
in {
options.sneeuwvlok.desktop.plasma = {
cfg = config.${namespace}.desktop.plasma;
osCfg = osConfig.${namespace}.desktop.plasma or { enable = false; };
in
{
options.${namespace}.desktop.plasma = {
};
config = mkIf osCfg.enable {

View file

@ -1,18 +0,0 @@
{
config,
lib,
pkgs,
...
}: let
inherit (lib) mkEnableOption mkIf;
cfg = config.sneeuwvlok.development.dotnet;
in {
options.sneeuwvlok.development.dotnet = {
enable = mkEnableOption "Enable dotnet development tools";
};
config = mkIf cfg.enable {
home.packages = with pkgs; [dotnet-sdk_8];
};
}

View file

@ -0,0 +1,15 @@
{ config, lib, pkgs, namespace, ... }:
let
inherit (lib) mkEnableOption mkIf;
cfg = config.${namespace}.development.dotnet;
in
{
options.${namespace}.development.dotnet = {
enable = mkEnableOption "Enable dotnet development tools";
};
config = mkIf cfg.enable {
home.packages = with pkgs; [ dotnet-sdk_8 ];
};
}

View file

@ -1,18 +0,0 @@
{
config,
lib,
pkgs,
...
}: let
inherit (lib) mkEnableOption mkIf;
cfg = config.sneeuwvlok.development.javascript;
in {
options.sneeuwvlok.development.javascript = {
enable = mkEnableOption "Enable javascript development tools";
};
config = mkIf cfg.enable {
home.packages = with pkgs; [bun nodejs nodePackages_latest.typescript-language-server];
};
}

View file

@ -0,0 +1,15 @@
{ config, lib, pkgs, namespace, ... }:
let
inherit (lib) mkEnableOption mkIf;
cfg = config.${namespace}.development.javascript;
in
{
options.${namespace}.development.javascript = {
enable = mkEnableOption "Enable javascript development tools";
};
config = mkIf cfg.enable {
home.packages = with pkgs; [ bun nodejs nodePackages_latest.typescript-language-server ];
};
}

View file

@ -1,18 +0,0 @@
{
config,
lib,
pkgs,
...
}: let
inherit (lib) mkEnableOption mkIf;
cfg = config.sneeuwvlok.development.rust;
in {
options.sneeuwvlok.development.rust = {
enable = mkEnableOption "Enable rust development tools";
};
config =
mkIf cfg.enable {
};
}

View file

@ -0,0 +1,15 @@
{ config, lib, pkgs, namespace, ... }:
let
inherit (lib) mkEnableOption mkIf;
cfg = config.${namespace}.development.rust;
in
{
options.${namespace}.development.rust = {
enable = mkEnableOption "Enable rust development tools";
};
config = mkIf cfg.enable {
};
}

View file

@ -1,20 +1,16 @@
{
config,
options,
lib,
pkgs,
...
}: let
{ config, options, lib, pkgs, namespace, ... }:
let
inherit (lib) mkEnableOption mkIf;
cfg = config.sneeuwvlok.editor.nano;
in {
options.sneeuwvlok.editor.nano = {
cfg = config.${namespace}.editor.nano;
in
{
options.${namespace}.editor.nano = {
enable = mkEnableOption "nano";
};
config = mkIf cfg.enable {
home.packages = with pkgs; [nano];
home.packages = with pkgs; [ nano ];
# programs.nano = {
# enable = true;

View file

@ -1,19 +1,15 @@
{
inputs,
config,
lib,
pkgs,
...
}: let
{ inputs, config, lib, pkgs, namespace, ... }:
let
inherit (lib) mkIf mkEnableOption;
cfg = config.sneeuwvlok.editor.nvim;
in {
cfg = config.${namespace}.editor.nvim;
in
{
# imports = [
# inputs.nvf.nixosModules.default
# ];
options.sneeuwvlok.editor.nvim = {
options.${namespace}.editor.nvim = {
enable = mkEnableOption "enable nvim via nvf on user level";
};

View file

@ -1,29 +1,21 @@
{
config,
lib,
pkgs,
...
}: let
{ config, lib, pkgs, namespace, ... }: let
inherit (lib) mkIf mkEnableOption;
cfg = config.sneeuwvlok.editor.zed;
cfg = config.${namespace}.editor.zed;
in {
options.sneeuwvlok.editor.zed = {
options.${namespace}.editor.zed = {
enable = mkEnableOption "zed";
};
config = mkIf cfg.enable {
home.packages = with pkgs; [
zed-editor
nixd
nil
alejandra
zed-editor nixd nil alejandra
];
programs.zed-editor = {
enable = true;
extensions = ["nix" "toml" "html" "just-ls"];
extensions = [ "nix" "toml" "html" "just-ls" ];
userSettings = {
assistant.enabled = false;

View file

@ -0,0 +1,15 @@
{ inputs, config, lib, pkgs, namespace, ... }:
let
inherit (lib) mkIf mkEnableOption;
cfg = config.${namespace}.game.minecraft;
in
{
options.${namespace}.game.minecraft = {
enable = mkEnableOption "enable minecraft";
};
config = mkIf cfg.enable {
home.packages = with pkgs; [ prismlauncher ];
};
}

View file

@ -1,19 +0,0 @@
{
inputs,
config,
lib,
pkgs,
...
}: let
inherit (lib) mkIf mkEnableOption;
cfg = config.sneeuwvlok.game.minecraft;
in {
options.sneeuwvlok.game.minecraft = {
enable = mkEnableOption "enable minecraft";
};
config = mkIf cfg.enable {
home.packages = with pkgs; [prismlauncher];
};
}

View file

@ -1,20 +1,17 @@
{
config,
lib,
pkgs,
...
}: let
{ config, lib, pkgs, namespace, ... }:
let
inherit (lib) mkIf mkMerge mkEnableOption mkDefault;
cfg = config.sneeuwvlok.shell;
in {
options.sneeuwvlok.shell = {
cfg = config.${namespace}.shell;
in
{
options.${namespace}.shell = {
corePkgs.enable = mkEnableOption "core shell packages";
};
config = mkMerge [
(mkIf (cfg.corePkgs.enable) {
sneeuwvlok.shell.toolset = mkDefault {
${namespace}.shell.toolset = mkDefault {
bat.enable = true;
btop.enable = true;
eza.enable = true;
@ -28,8 +25,8 @@ in {
};
})
{
home.packages = with pkgs; [any-nix-shell pwgen yt-dlp ripdrag fd (ripgrep.override {withPCRE2 = true;})];
({
home.packages = with pkgs; [ any-nix-shell pwgen yt-dlp ripdrag fd (ripgrep.override {withPCRE2 = true;}) ];
programs = {
direnv = {
@ -43,6 +40,6 @@ in {
config.whitelist.prefix = ["/home"];
};
};
}
})
];
}

View file

@ -1,19 +1,16 @@
{
config,
lib,
pkgs,
...
}: let
{ config, lib, pkgs, namespace, ... }:
let
inherit (lib) mkIf mkEnableOption;
cfg = config.sneeuwvlok.shell.toolset.bat;
in {
options.sneeuwvlok.shell.toolset.bat = {
cfg = config.${namespace}.shell.toolset.bat;
in
{
options.${namespace}.shell.toolset.bat = {
enable = mkEnableOption "cat replacement";
};
config = mkIf cfg.enable {
home.packages = with pkgs; [bat];
home.packages = with pkgs; [ bat ];
programs.bat = {
enable = true;

View file

@ -1,20 +1,17 @@
{
config,
lib,
pkgs,
...
}: let
{ config, lib, pkgs, namespace, ... }:
let
inherit (lib) mkIf mkEnableOption;
inherit (lib.strings) concatStringsSep;
cfg = config.sneeuwvlok.shell.toolset.btop;
in {
options.sneeuwvlok.shell.toolset.btop = {
cfg = config.${namespace}.shell.toolset.btop;
in
{
options.${namespace}.shell.toolset.btop = {
enable = mkEnableOption "system-monitor";
};
config = mkIf cfg.enable {
home.packages = with pkgs; [btop];
home.packages = with pkgs; [ btop ];
programs.btop = {
enable = true;

View file

@ -1,19 +1,16 @@
{
config,
lib,
pkgs,
...
}: let
{ config, lib, pkgs, namespace, ... }:
let
inherit (lib) mkIf mkEnableOption;
cfg = config.sneeuwvlok.shell.toolset.eza;
in {
options.sneeuwvlok.shell.toolset.eza = {
cfg = config.${namespace}.shell.toolset.eza;
in
{
options.${namespace}.shell.toolset.eza = {
enable = mkEnableOption "system-monitor";
};
config = mkIf cfg.enable {
home.packages = with pkgs; [eza];
home.packages = with pkgs; [ eza ];
programs.eza = {
enable = true;

View file

@ -1,19 +1,16 @@
{
config,
lib,
pkgs,
...
}: let
{ config, lib, pkgs, namespace, ... }:
let
inherit (lib) mkIf mkEnableOption;
cfg = config.sneeuwvlok.shell.toolset.fzf;
in {
options.sneeuwvlok.shell.toolset.fzf = {
cfg = config.${namespace}.shell.toolset.fzf;
in
{
options.${namespace}.shell.toolset.fzf = {
enable = mkEnableOption "TUI Fuzzy Finder.";
};
config = mkIf cfg.enable {
home.packages = with pkgs; [fzf];
home.packages = with pkgs; [ fzf ];
programs.fzf = {
enable = true;

View file

@ -2,13 +2,14 @@
config,
lib,
pkgs,
namespace,
...
}: let
inherit (lib) mkEnableOption mkIf;
cfg = config.sneeuwvlok.shell.toolset.git;
cfg = config.${namespace}.shell.toolset.git;
in {
options.sneeuwvlok.shell.toolset.git = {
options.${namespace}.shell.toolset.git = {
enable = mkEnableOption "version-control system";
};

View file

@ -1,14 +1,11 @@
{
config,
lib,
pkgs,
...
}: let
{ config, lib, pkgs, namespace, ... }:
let
inherit (lib) mkIf mkEnableOption;
cfg = config.sneeuwvlok.shell.toolset.gnupg;
in {
options.sneeuwvlok.shell.toolset.gnupg = {
cfg = config.${namespace}.shell.toolset.gnupg;
in
{
options.${namespace}.shell.toolset.gnupg = {
enable = mkEnableOption "cryptographic suite";
};

View file

@ -1,18 +0,0 @@
{
config,
lib,
pkgs,
...
}: let
inherit (lib) mkEnableOption mkIf;
cfg = config.sneeuwvlok.shell.toolset.just;
in {
options.sneeuwvlok.shell.toolset.just = {
enable = mkEnableOption "version-control system";
};
config = mkIf cfg.enable {
home.packages = with pkgs; [just gum];
};
}

View file

@ -0,0 +1,15 @@
{ config, lib, pkgs, namespace, ... }:
let
inherit (lib) mkEnableOption mkIf;
cfg = config.${namespace}.shell.toolset.just;
in
{
options.${namespace}.shell.toolset.just = {
enable = mkEnableOption "version-control system";
};
config = mkIf cfg.enable {
home.packages = with pkgs; [ just gum ];
};
}

View file

@ -1,19 +1,16 @@
{
config,
lib,
pkgs,
...
}: let
{ config, lib, pkgs, namespace, ... }:
let
inherit (lib) mkIf mkEnableOption;
cfg = config.sneeuwvlok.shell.toolset.starship;
in {
options.sneeuwvlok.shell.toolset.starship = {
cfg = config.${namespace}.shell.toolset.starship;
in
{
options.${namespace}.shell.toolset.starship = {
enable = mkEnableOption "fancy pansy shell prompt";
};
config = mkIf cfg.enable {
home.packages = with pkgs; [starship];
home.packages = with pkgs; [ starship ];
programs.starship = {
enable = true;

View file

@ -1,19 +1,16 @@
{
config,
lib,
pkgs,
...
}: let
{ config, lib, pkgs, namespace, ... }:
let
inherit (lib) mkIf mkEnableOption;
cfg = config.sneeuwvlok.shell.toolset.tmux;
in {
options.sneeuwvlok.shell.toolset.tmux = {
cfg = config.${namespace}.shell.toolset.tmux;
in
{
options.${namespace}.shell.toolset.tmux = {
enable = mkEnableOption "terminal multiplexer";
};
config = mkIf cfg.enable {
home.packages = with pkgs; [tmux];
home.packages = with pkgs; [ tmux ];
programs.tmux = {
enable = true;

View file

@ -1,19 +1,16 @@
{
config,
lib,
pkgs,
...
}: let
{ config, lib, pkgs, namespace, ... }:
let
inherit (lib) mkIf mkEnableOption;
cfg = config.sneeuwvlok.shell.toolset.yazi;
in {
options.sneeuwvlok.shell.toolset.yazi = {
cfg = config.${namespace}.shell.toolset.yazi;
in
{
options.${namespace}.shell.toolset.yazi = {
enable = mkEnableOption "cli file browser";
};
config = mkIf cfg.enable {
home.packages = with pkgs; [yazi];
home.packages = with pkgs; [ yazi ];
programs.yazi = {
enable = true;

Some files were not shown because too many files have changed in this diff Show more