Compare commits

..

39 commits

Author SHA1 Message Date
Chris Kruining
64bc77a73e
apply review comments 2026-04-13 16:31:43 +02:00
Chris Kruining
6b3389c4b1
checkpoint 2026-04-13 15:58:41 +02:00
Chris Kruining
59e8ca812c
. 2026-04-12 13:02:57 +02:00
Chris Kruining
cf9dcf2568
kaas 2026-04-07 15:23:11 +02:00
Chris Kruining
5c1e6807b6
checkpoint 2026-04-05 20:18:07 +02:00
Chris Kruining
d60d4badf3
really loving clan! 2026-04-02 17:24:18 +02:00
Chris Kruining
a8a639db6e
check in time 2026-04-02 11:37:34 +02:00
4dfcd5cca8 vars: update via generator servarr (machine: ulmo) 2026-04-02 09:26:04 +00:00
545b2ad871 vars: update via generator sonarr (machine: ulmo) 2026-04-02 09:26:00 +00:00
4f2ecc60b4 vars: update via generator radarr (machine: ulmo) 2026-04-02 09:25:57 +00:00
8c6e72786c vars: update via generator prowlarr (machine: ulmo) 2026-04-02 09:25:54 +00:00
772db61b9e vars: update via generator lidarr (machine: ulmo) 2026-04-02 09:25:51 +00:00
e941c305b8 vars: update via generator postgresql (machine: ulmo) 2026-04-02 09:25:47 +00:00
11e74b4f29 vars: update via generator postgresql (machine: ulmo) 2026-04-02 08:04:45 +00:00
Chris Kruining
2ffece26f2
daily checkpoint 2026-04-01 16:09:51 +02:00
Chris Kruining
cc86b0a815
checkpoint 2026-03-31 15:43:34 +02:00
cb30a0ba8b vars: update via generator servarr (machine: ulmo) 2026-03-31 09:09:56 +00:00
ad25722249 vars: update via generator sonarr (machine: ulmo) 2026-03-31 09:08:29 +00:00
6b2ec0565a vars: update via generator radarr (machine: ulmo) 2026-03-31 09:08:27 +00:00
163f4a022e vars: update via generator prowlarr (machine: ulmo) 2026-03-31 09:08:24 +00:00
e21de6d1da vars: update via generator lidarr (machine: ulmo) 2026-03-31 09:08:21 +00:00
0229b4f816 vars: update via generator qbittorrent (machine: ulmo) 2026-03-31 06:02:36 +00:00
781d1f3c8a vars: update via generator qbittorrent (machine: ulmo) 2026-03-30 13:38:16 +00:00
db5e5974ab vars: update via generator sabnzbd (machine: ulmo) 2026-03-30 13:05:11 +00:00
b349025232 vars: update via generator sabnzbd (machine: ulmo) 2026-03-30 12:58:15 +00:00
bd540029a7 vars: update via generator sabnzbd (machine: ulmo) 2026-03-30 12:57:36 +00:00
955b1c6ba4 vars: update via generator sabnzbd (machine: ulmo) 2026-03-30 12:31:51 +00:00
a4500a2eb6 vars: update via generator sabnzbd (machine: ulmo) 2026-03-30 12:24:27 +00:00
611f474961 secrets: add machine ulmo 2026-03-30 12:23:02 +00:00
10bbf99210 secrets: add user chris 2026-03-30 12:22:11 +00:00
Chris Kruining
b37c5c0cbd
also refactor nixos modules 2026-03-30 10:29:26 +02:00
Chris Kruining
2471562583
. 2026-03-30 09:22:42 +02:00
Chris Kruining
f59d282c12
refactoring home manager modules 2026-03-30 09:09:01 +02:00
Chris Kruining
20de142350
add import tree 2026-03-26 15:05:37 +01:00
Chris Kruining
ba7c3392b9
wooooooot, we're compiling again 2026-03-26 14:03:12 +01:00
Chris Kruining
97b63074f0
Add home-manager flake module and update imports
Comment out grub2-theme and nextcloud home-manager config
2026-03-25 16:35:07 +01:00
Chris Kruining
a7a1763fe0
wiiiiips 2026-03-25 16:26:04 +01:00
ac3dac322d . 2026-03-25 06:45:43 +00:00
59a1fbaf0f initial migration 2026-03-24 14:09:46 +00:00
273 changed files with 5883 additions and 8037 deletions

View file

@ -1,6 +0,0 @@
root = true
[*]
end_of_line = lf
insert_final_newline = true
charset = utf-8

5
.gitattributes vendored
View file

@ -1 +1,4 @@
* text=auto eol=lf
* text=auto
core.autocrlf=false
core.eol=lf
core.filemode=false

View file

@ -1,20 +0,0 @@
@_default: list
[doc('List machines')]
@list:
ls -1 ../systems/x86_64-linux/
[doc('Update target machine')]
[no-exit-message]
@update machine:
echo "Checking vars"
cd .. && just vars _check {{ machine }}
echo ""
just assert '-d "../systems/x86_64-linux/{{ machine }}"' "Machine {{ machine }} does not exist, must be one of: $(ls ../systems/x86_64-linux/ | sed ':a;N;$!ba;s/\n/, /g')"
nixos-rebuild switch -L --sudo --target-host {{ machine }} --flake ..#{{ machine }} --log-format internal-json -v |& nom --json
[doc('Check if target machine builds')]
[no-exit-message]
@check machine:
just assert '-d "../systems/x86_64-linux/{{ machine }}"' "Machine {{ machine }} does not exist, must be one of: $(ls ../systems/x86_64-linux/ | sed ':a;N;$!ba;s/\n/, /g')"
nix build ..#nixosConfigurations.{{ machine }}.config.system.build.toplevel

View file

@ -1,101 +0,0 @@
set unstable := true
set quiet := true
_default:
just --list users
[doc('List available users')]
[script]
list:
cd .. && just vars get ulmo zitadel/users | jq -r -C '
import ".jq/table" as table;
import ".jq/format" as f;
fromjson
| to_entries
| sort_by(.key)
| map(
(.key|f::to_title) + ":\n"
+ table::create(
.value
| to_entries
| sort_by(.key)
| map({username:.key} + .value)
)
)
| join("\n\n┄┄┄\n\n")
';
[doc('Add a new user')]
[script]
add:
exec 5>&1
pad () { [ "$#" -gt 1 ] && [ -n "$2" ] && printf "%$2.${2#-}s" "$1"; }
input() {
local label=$1
local value=$2
local res=$(gum input --header "$label" --value "$value")
echo -e "\e[2m$(pad "$label" -11)\e[0m$res" >&5
echo $res
}
data=`cd .. && just vars get ulmo zitadel/users | jq 'fromjson'`
# Gather inputs
org=`
jq -r 'to_entries | map(.key)[]' <<< "$data" \
| gum choose --header 'Which organisation to save to?' --select-if-one
`
username=`input 'user name' ''`
email=`input 'email' ''`
first_name=`input 'first name' ''`
last_name=`input 'last name' ''`
user_exists=`jq --arg 'org' "$org" --arg 'username' "$username" '.[$org][$username]? | . != null' <<< "$data"`
if [ "$user_exists" == "true" ]; then
gum confirm 'User already exists, overwrite it?' --padding="1 1" || exit 0
fi
next=`
jq \
--arg 'org' "$org" \
--arg 'username' "$username" \
--arg 'email' "$email" \
--arg 'first_name' "$first_name" \
--arg 'last_name' "$last_name" \
--compact-output \
'.[$org] += { $username: { email: $email, firstName: $first_name, lastName: $last_name } }' \
<<< $data
`
gum spin --title "saving..." -- echo "$(cd .. && just vars set ulmo 'zitadel/users' "$next")"
[doc('Remove a new user')]
[script]
remove:
data=`cd .. && just vars get ulmo zitadel/users | jq fromjson`
# Gather inputs
org=`
jq -r 'to_entries | map(.key)[]' <<< "$data" \
| gum choose --header 'Which organisation?' --select-if-one
`
user=`
jq -r --arg org "$org" '.[$org] | to_entries | map(.key)[]' <<< "$data" \
| gum choose --header 'Which user?' --select-if-one
`
next=`
jq \
--arg 'org' "$org" \
--arg 'user' "$user" \
--compact-output \
'del(.[$org][$user])' \
<<< $data
`
gum spin --title "saving..." -- echo "$(cd .. && just vars set ulmo 'zitadel/users' "$next")"

View file

@ -1,38 +1,39 @@
set unstable := true
set quiet := true
base_path := justfile_directory() + "/systems/x86_64-linux"
machine_base_path := justfile_directory() + "/machines"
secret_base_path := justfile_directory() + "/systems/x86_64-linux"
_default:
just --list vars
[doc('List all vars of {machine}')]
list machine:
sops decrypt {{ base_path }}/{{ machine }}/secrets.yml
sops decrypt {{ secret_base_path }}/{{ machine }}/secrets.yml
[doc('Edit all vars of {machine} in your editor')]
edit machine:
sops edit {{ base_path }}/{{ machine }}/secrets.yml
sops edit {{ secret_base_path }}/{{ machine }}/secrets.yml
[doc('Set var {value} by {key} for {machine}')]
@set machine key value:
sops set {{ base_path }}/{{ machine }}/secrets.yml "$(printf '%s\n' '["{{ key }}"]' | sed -E 's#/#"]["#g; s/\["([0-9]+)"\]/[\1]/g')" "\"$(echo '{{ value }}' | sed 's/\"/\\\"/g')\""
sops set {{ secret_base_path }}/{{ machine }}/secrets.yml "$(printf '%s\n' '["{{ key }}"]' | sed -E 's#/#"]["#g; s/\["([0-9]+)"\]/[\1]/g')" "\"$(echo '{{ value }}' | sed 's/\"/\\\"/g')\""
git add {{ base_path }}/{{ machine }}/secrets.yml
git commit -m 'chore(secrets): set secret "{{ key }}" for machine "{{ machine }}"' -- {{ base_path }}/{{ machine }}/secrets.yml > /dev/null
git add {{ secret_base_path }}/{{ machine }}/secrets.yml
git commit -m 'chore(secrets): set secret "{{ key }}" for machine "{{ machine }}"' -- {{ secret_base_path }}/{{ machine }}/secrets.yml > /dev/null
echo "Done"
[doc('Get var by {key} from {machine}')]
get machine key:
sops decrypt {{ base_path }}/{{ machine }}/secrets.yml | yq ".$(echo "{{ key }}" | sed -E 's/\//./g') // \"\""
sops decrypt {{ secret_base_path }}/{{ machine }}/secrets.yml | yq ".$(echo "{{ key }}" | sed -E 's/\//./g') // \"\""
[doc('Remove var by {key} for {machine}')]
remove machine key:
sops unset {{ base_path }}/{{ machine }}/secrets.yml "$(printf '%s\n' '["{{ key }}"]' | sed -E 's#/#"]["#g; s/\["([0-9]+)"\]/[\1]/g')"
sops unset {{ secret_base_path }}/{{ machine }}/secrets.yml "$(printf '%s\n' '["{{ key }}"]' | sed -E 's#/#"]["#g; s/\["([0-9]+)"\]/[\1]/g')"
git add {{ base_path }}/{{ machine }}/secrets.yml
git commit -m 'chore(secrets): removed secret "{{ key }}" from machine "{{ machine }}"' -- {{ base_path }}/{{ machine }}/secrets.yml > /dev/null
git add {{ secret_base_path }}/{{ machine }}/secrets.yml
git commit -m 'chore(secrets): removed secret "{{ key }}" from machine "{{ machine }}"' -- {{ secret_base_path }}/{{ machine }}/secrets.yml > /dev/null
echo "Done"
@ -43,14 +44,14 @@ generate machine:
# Skip if we already have a value
[ $(just vars get "{{ machine }}" "$key" | jq -r) ] && continue
just vars _rotate "{{ machine }}" "$key"
just _rotate "{{ machine }}" "$key"
done
[doc('Regenerate var values for {machine}')]
[script]
_rotate machine key:
# Exit if there's no script
[ -f "{{ justfile_directory() }}/script/{{ key }}" ] || exit 0
[ -f "{{ justfile_directory() }}/script/{{ key }}" ] || exit
echo "Executing script for {{ key }}"
just vars set "{{ machine }}" "{{ key }}" "$(cd -- "$(dirname "{{ justfile_directory() }}/script/{{ key }}")" && source "./$(basename "{{ key }}")")"
@ -59,7 +60,7 @@ _rotate machine key:
check:
cd ..
for machine in $(ls {{ base_path }}); do
for machine in $(ls {{ machine_base_path }}); do
just vars _check "$machine"
done
@ -70,14 +71,14 @@ _check machine:
# we can skip this folder as we are
# missing the files used to compare
# the defined vs the configured secrets
if [ ! -f "{{ base_path }}/{{ machine }}/default.nix" ]; then
if [ ! -f "{{ machine_base_path }}/{{ machine }}/default.nix" ]; then
printf "\r• %-8sskipped\n" "{{ machine }}"
exit 0
fi
exec 3< <(jq -nr \
--rawfile defined <(nix eval --json ..#nixosConfigurations.{{ machine }}.config.sops.secrets 2>/dev/null) \
--rawfile configured <([ -f "{{ base_path }}/{{ machine }}/secrets.yml" ] && sops decrypt {{ base_path }}/{{ machine }}/secrets.yml | yq '.' || echo "{}") \
--rawfile configured <([ -f "{{ secret_base_path }}/{{ machine }}/secrets.yml" ] && sops decrypt {{ secret_base_path }}/{{ machine }}/secrets.yml | yq '.' || echo "{}") \
'
[ $configured | fromjson | paths(scalars) | join("/") ] as $conf
| $defined

View file

@ -3,34 +3,3 @@
[doc('Manage vars')]
mod vars '.just/vars.just'
[doc('Manage users')]
mod users '.just/users.just'
[doc('Manage machines')]
mod machine '.just/machine.just'
[doc('Show information about project')]
@show:
echo "show"
[doc('update the flake dependencies')]
@update:
nix flake update
git commit -m 'chore: update dependencies' -- ./flake.lock > /dev/null
echo "Done"
[doc('Introspection on flake output')]
@select key:
nix eval --show-trace --json .#{{ key }} | jq .
#===============================================================================================
# Utils
#===============================================================================================
[no-exit-message]
[no-cd]
[private]
@assert condition message:
[ {{ condition }} ] || { echo -e 1>&2 "\n\x1b[1;41m Error \x1b[0m {{ message }}\n"; exit 1; }

43
clan/flake-module.nix Normal file
View file

@ -0,0 +1,43 @@
{
lib,
inputs,
...
}: {
imports = [
./machines.nix
./tags.nix
./instances.nix
];
clan = {
meta = {
name = "arda";
domain = "arda";
description = "My personal machines at home";
};
directory = ../.;
specialArgs = {
ardaLib = {
types =
./types
|> (inputs.import-tree.withLib lib).leafs
|> lib.map (mod: {
name = mod |> lib.baseNameOf |> lib.splitString "." |> lib.head;
value = lib.types.submoduleWith {modules = [mod];};
})
|> lib.listToAttrs;
};
};
exportInterfaces =
./interfaces
|> (inputs.import-tree.withLib lib).leafs
|> lib.map (mod: {
name = mod |> lib.baseNameOf |> lib.splitString "." |> lib.head;
value = import mod;
})
|> lib.listToAttrs;
};
}

253
clan/instances.nix Normal file
View file

@ -0,0 +1,253 @@
{
self,
inputs,
...
}: let
db =
self.clan.exports
|> inputs.clan-core.lib.getExport {
serviceName = "arda/persistence";
roleName = "default";
machineName = "ulmo";
instanceName = "persistence";
}
|> (v: v.persistence.driver.${v.persistence.main});
in {
clan.inventory.instances = {
users-chris = {
module = {
name = "users";
input = "clan-core";
};
roles.default.machines.mandos.settings = {};
roles.default.machines.manwe.settings = {};
roles.default.machines.orome.settings = {};
roles.default.machines.tulkas.settings = {};
roles.default.settings = {
user = "chris";
groups = ["wheel"];
prompt = true;
share = true;
};
};
clanDns = {
module = {
name = "dm-dns";
input = "clan-core";
};
roles.default.tags = ["all"];
};
gateway = {
module = {
name = "gateway";
input = "self";
};
roles.default = {
tags = ["operational:role:gateway"];
settings = {
driver = "caddy";
hosts = {
"auth.kruining.eu" = ''
reverse_proxy h2c://[::1]:9092
'';
};
};
};
};
persistence = {
module = {
name = "persistence";
input = "self";
};
roles.default.tags = ["operational:availability:always-on" "operational:storage:large"];
};
identity = {
module = {
name = "identity";
input = "self";
};
roles.default = {
tags = ["operational:availability:always-on"];
settings = {
database = db;
organization = {
nix = {
user = {
chris = {
email = "chris@kruining.eu";
firstName = "Chris";
lastName = "Kruining";
roles = ["ORG_OWNER"];
instanceRoles = ["IAM_OWNER"];
};
kaas = {
email = "chris+kaas@kruining.eu";
firstName = "Kaas";
lastName = "Kruining";
};
};
project = {
ulmo = {
projectRoleCheck = true;
projectRoleAssertion = true;
hasProjectCheck = true;
role = {
jellyfin = {
group = "jellyfin";
};
jellyfin_admin = {
group = "jellyfin";
};
};
assign = {
chris = ["jellyfin" "jellyfin_admin"];
kaas = ["jellyfin"];
};
application = {
jellyfin = {
redirectUris = ["https://jellyfin.kruining.eu/sso/OID/redirect/zitadel"];
grantTypes = ["authorizationCode"];
responseTypes = ["code"];
};
forgejo = {
redirectUris = ["https://git.amarth.cloud/user/oauth2/zitadel/callback"];
grantTypes = ["authorizationCode"];
responseTypes = ["code"];
};
vaultwarden = {
redirectUris = ["https://vault.kruining.eu/identity/connect/oidc-signin"];
grantTypes = ["authorizationCode"];
responseTypes = ["code"];
exportMap = {
client_id = "SSO_CLIENT_ID";
client_secret = "SSO_CLIENT_SECRET";
};
};
matrix = {
redirectUris = ["https://matrix.kruining.eu/_synapse/client/oidc/callback"];
grantTypes = ["authorizationCode"];
responseTypes = ["code"];
};
mydia = {
redirectUris = ["http://localhost:2010/auth/oidc/callback"];
grantTypes = ["authorizationCode"];
responseTypes = ["code"];
};
grafana = {
redirectUris = ["http://localhost:9001/login/generic_oauth"];
grantTypes = ["authorizationCode"];
responseTypes = ["code"];
};
};
};
convex = {
projectRoleCheck = true;
projectRoleAssertion = true;
hasProjectCheck = true;
application = {
scry = {
redirectUris = ["https://nautical-salamander-320.eu-west-1.convex.cloud/api/auth/callback/zitadel"];
grantTypes = ["authorizationCode"];
responseTypes = ["code"];
};
};
};
};
action = {
flattenRoles = {
script = ''
(ctx, api) => {
if (ctx.v1.user.grants == undefined || ctx.v1.user.grants.count == 0) {
return;
}
const roles = ctx.v1.user.grants.grants.flatMap(({ roles, projectId }) => roles.map(role => projectId + ':' + role));
api.v1.claims.setClaim('nix:zitadel:custom', JSON.stringify({ roles }));
};
'';
};
};
triggers = [
{
flowType = "customiseToken";
triggerType = "preUserinfoCreation";
actions = ["flattenRoles"];
}
{
flowType = "customiseToken";
triggerType = "preAccessTokenCreation";
actions = ["flattenRoles"];
}
];
};
};
};
};
};
servarr = {
module = {
name = "servarr";
input = "self";
};
roles.default = {
tags = ["operational:availability:always-on"];
settings = {
enable = true;
database = db;
services = {
sonarr = {
rootFolders = [
"/var/media/series"
];
};
radarr = {
rootFolders = [
"/var/media/movies"
];
};
lidarr = {
rootFolders = [
"/var/media/music"
];
};
prowlarr = {};
};
};
};
};
};
}

View file

@ -0,0 +1,94 @@
{lib, ...}: let
inherit (lib) mkOption types;
in {
options = {
services = mkOption {
type = types.attrsOf (types.submodule ({name, ...}: {
options = {
name = mkOption {
type = types.str;
default = name;
};
endpoint = mkOption {
type = types.submoduleWith {
modules = [../types/endpoint.nix];
};
default = {};
apply = attrs:
attrs
// {
__toString = self: let
protocol =
if self.protocol != null
then "${self.protocol}://"
else "";
port =
if self.port != null
then ":${toString self.port}"
else "";
path =
if self.path != null
then "/${self.path}"
else "";
query =
if self.query != null
then "?${toString self.query
|> lib.attrsToList
|> lib.map ({
name,
value,
}: "${name}=${value}")}"
else "";
hash =
if self.hash != null
then "#${toString self.hash
|> lib.attrsToList
|> lib.map ({
name,
value,
}: "${name}=${value}")}"
else "";
in "${protocol}${self.host}${port}${path}${query}${hash}";
};
};
# protocol = mkOption {
# type = types.str;
# default = "http";
# };
# host = mkOption {
# type = types.str;
# default = "[::1]";
# };
# port = mkOption {
# type = types.port;
# };
};
}));
default = {};
};
functions = mkOption {
type = types.attrsOf (types.submodule ({name, ...}: {
options = {
name = mkOption {
type = types.str;
default = name;
};
body = mkOption {
type = types.str;
};
};
}));
default = {};
};
};
}

View file

@ -0,0 +1,24 @@
{lib, ...}: let
inherit (lib) mkOption types;
in {
options = {
main = mkOption {
type = types.nullOr types.str;
default = null;
};
driver = mkOption {
type = types.attrsOf (types.submoduleWith {
modules = [
../types/endpoint.nix
];
});
default = {};
};
databases = mkOption {
type = types.listOf types.str;
default = [];
};
};
}

75
clan/machines.nix Normal file
View file

@ -0,0 +1,75 @@
{...}: {
clan.inventory.machines = {
aule = {
name = "aule";
description = "Planned build server.";
machineClass = "nixos";
tags = [];
};
mandos = {
name = "mandos";
description = "Living room Steam box.";
machineClass = "nixos";
tags = [
"capability:mobility:stationary"
"operational:availability:wake-on-demand"
];
};
manwe = {
name = "manwe";
description = "Main desktop.";
machineClass = "nixos";
tags = [
"capability:mobility:stationary"
"operational:availability:manual"
];
};
melkor = {
name = "melkor";
description = "Planned machine with no defined role yet.";
machineClass = "nixos";
tags = [];
};
orome = {
name = "orome";
description = "Work laptop.";
machineClass = "nixos";
tags = [
"capability:mobility:portable"
"operational:availability:manual"
];
};
tulkas = {
name = "tulkas";
description = "Steam Deck.";
machineClass = "nixos";
tags = [
"capability:mobility:portable"
"operational:availability:manual"
];
};
ulmo = {
name = "ulmo";
description = "Primary self-hosted services machine.";
machineClass = "nixos";
tags = [
"capability:mobility:stationary"
"operational:availability:always-on"
"operational:storage:large"
"operational:role:gateway"
];
};
varda = {
name = "varda";
description = "Planned machine with no defined role yet.";
machineClass = "nixos";
tags = [];
};
yavanna = {
name = "yavanna";
description = "Planned machine with no defined role yet.";
machineClass = "nixos";
tags = [];
};
};
}

12
clan/tags.nix Normal file
View file

@ -0,0 +1,12 @@
{...}: {
clan.inventory.tags = {
config,
machines,
...
}: {
# tag_name = [ "list" "of" "machines" ]
"capability:hardware:gpu" = [""];
"capability:hardware:audio" = [""];
"capability:hardware:bluetooth" = [""];
};
}

45
clan/types/endpoint.nix Normal file
View file

@ -0,0 +1,45 @@
{lib, ...}: let
inherit (lib) mkOption types;
in {
options = {
protocol = mkOption {
type = types.str;
default = "http";
};
host = mkOption {
type = types.str;
default = "localhost";
};
port = mkOption {
type = types.nullOr types.port;
default = null;
};
user = mkOption {
type = types.nullOr types.str;
default = null;
};
password = mkOption {
type = types.nullOr types.str;
default = null;
};
path = mkOption {
type = types.nullOr types.str;
default = null;
};
query = mkOption {
type = types.nullOr (types.attrsOf types.str);
default = null;
};
hash = mkOption {
type = types.nullOr (types.attrsOf types.str);
default = null;
};
};
}

View file

@ -0,0 +1,19 @@
{lib, ...}: {
imports =
./.
|> builtins.readDir
|> lib.attrsToList
|> builtins.map ({
name,
value,
}: {
type = value;
path = ./. + "/${name}/flake-module.nix";
})
|> builtins.filter ({
type,
path,
}:
type == "directory" && (builtins.pathExists path))
|> builtins.map ({path, ...}: path);
}

View file

View file

@ -0,0 +1,92 @@
{
lib,
clanLib,
exports,
...
}: let
inherit (builtins) toString;
in {
_class = "clan.service";
manifest = {
name = "arda/gateway";
description = ''
'';
readme = builtins.readFile ./README.md;
exports = {
inputs = [];
out = [];
};
};
roles.default = {
description = '''';
interface = {lib, ...}: let
inherit (lib) mkOption types;
in {
options = {
driver = mkOption {
type = types.enum ["caddy" "nginx"];
};
hosts = mkOption {
type = types.attrsOf types.str;
default = {};
};
};
};
perInstance = {
mkExports,
machine,
settings,
...
}: let
reverse_proxies =
exports
|> clanLib.selectExports (_scope: true)
|> lib.mapAttrsToList (_: value: (value.gateway.services or {}) |> lib.attrValues)
|> lib.concatLists
|> lib.map ({
name,
endpoint,
}: {
name = "${name}.${machine.name}.arda";
value = {
extraConfig = ''
reverse_proxy ${toString endpoint}
'';
};
})
|> lib.listToAttrs;
in {
# exports =
# mkExports {
# };
nixosModule = {
lib,
pkgs,
...
}: let
inherit (lib) mkMerge mkIf;
caddyPackage = pkgs.caddy.withPlugins {
plugins = ["github.com/corazawaf/coraza-caddy/v2@v2.1.0"];
hash = "sha256-pSXjLaZoRtKV3eFl2ySRSjl3yxi514G1Cb7pfrpxxtE=";
};
in {
config = mkMerge [
(lib.mkIf (settings.driver == "caddy") {
services.caddy = {
enable = true;
package = caddyPackage;
virtualHosts = reverse_proxies // {};
};
})
];
};
};
};
}

View file

@ -0,0 +1,13 @@
{...}: let
module = ./default.nix;
in {
clan.modules.gateway = module;
# perSystem = {...}: {
# clan.nixosTests.gateway = {
# imports = [];
# clan.modules."@arda/gateway" = module;
# };
# };
}

View file

View file

@ -0,0 +1,518 @@
{
lib,
clanLib,
exports,
...
}: let
inherit (builtins) toString readFile;
inherit (lib) mkMerge mkIf;
in {
_class = "clan.service";
manifest = {
name = "arda/identity";
description = ''
'';
readme = readFile ./README.md;
exports = {
inputs = ["persistence"];
out = ["gateway" "persistence"];
};
};
roles.default = {
description = '''';
interface = {lib, ...}: let
inherit (lib) mkOption types toSentenceCase literalExpression;
in {
options = {
driver = mkOption {
type = types.enum ["zitadel"];
default = "zitadel";
};
database = mkOption {
type = types.anything;
};
port = mkOption {
type = types.port;
default = 9092;
};
organization = mkOption {
type = types.attrsOf (types.submodule ({ name, ... }: {
options =
let
org = name;
in
{
isDefault = mkOption {
type = types.bool;
default = false;
example = "true";
description = ''
True sets the '${org}' org as default org for the instance. Only one org can be default org.
Nothing happens if you set it to false until you set another org as default org.
'';
};
project = mkOption {
default = {};
type = types.attrsOf (types.submodule {
options = {
hasProjectCheck = mkOption {
type = types.bool;
default = false;
example = "true";
description = ''
ZITADEL checks if the org of the user has permission to this project.
'';
};
privateLabelingSetting = mkOption {
type = types.nullOr (types.enum [ "unspecified" "enforceProjectResourceOwnerPolicy" "allowLoginUserResourceOwnerPolicy" ]);
default = null;
example = "enforceProjectResourceOwnerPolicy";
description = ''
Defines from where the private labeling should be triggered,
supported values:
- unspecified
- enforceProjectResourceOwnerPolicy
- allowLoginUserResourceOwnerPolicy
'';
};
projectRoleAssertion = mkOption {
type = types.bool;
default = false;
example = "true";
description = ''
Describes if roles of user should be added in token.
'';
};
projectRoleCheck = mkOption {
type = types.bool;
default = false;
example = "true";
description = ''
ZITADEL checks if the user has at least one on this project.
'';
};
role = mkOption {
default = {};
type = types.attrsOf (types.submodule ({ name, ... }: {
options =
let
roleName = name;
in
{
displayName = mkOption {
type = types.str;
default = toSentenceCase name;
example = "RoleName";
description = ''
Name used for project role.
'';
};
group = mkOption {
type = types.nullOr types.str;
default = null;
example = "some_group";
description = ''
Group used for project role.
'';
};
};
}));
};
assign = mkOption {
default = {};
type = types.attrsOf (types.listOf types.str);
};
application = mkOption {
default = {};
type = types.attrsOf (types.submodule {
options = {
redirectUris = mkOption {
type = types.nonEmptyListOf types.str;
example = ''
[ "https://example.com/redirect/url" ]
'';
description = ''
.
'';
};
grantTypes = mkOption {
type = types.nonEmptyListOf (types.enum [ "authorizationCode" "implicit" "refreshToken" "deviceCode" "tokenExchange" ]);
example = ''
[ "authorizationCode" ]
'';
description = ''
.
'';
};
responseTypes = mkOption {
type = types.nonEmptyListOf (types.enum [ "code" "idToken" "idTokenToken" ]);
example = ''
[ "code" ]
'';
description = ''
.
'';
};
exportMap =
let
strOpt = mkOption { type = types.nullOr types.str; default = null; };
in
mkOption {
type = types.submodule { options = { client_id = strOpt; client_secret = strOpt; }; };
default = {};
example = literalExpression ''
{
client_id = "SSO_CLIENT_ID";
client_secret = "SSO_CLIENT_SECRET";
}
'';
description = ''
Remap the outputted variables to another key.
'';
};
};
});
};
};
});
};
user = mkOption {
default = {};
type = types.attrsOf (types.submodule ({ name, ... }: {
options =
let
username = name;
in
{
email = mkOption {
type = types.str;
example = "someone@some.domain";
description = ''
Username.
'';
};
userName = mkOption {
type = types.nullOr types.str;
default = username;
example = "some_user_name";
description = ''
Username. Default value is the key of the config object you created, you can overwrite that by setting this option
'';
};
firstName = mkOption {
type = types.str;
example = "John";
description = ''
First name of the user.
'';
};
lastName = mkOption {
type = types.str;
example = "Doe";
description = ''
Last name of the user.
'';
};
roles = mkOption {
type = types.listOf types.str;
default = [];
example = "[ \"ORG_OWNER\" ]";
description = ''
List of roles granted to organisation.
'';
};
instanceRoles = mkOption {
type = types.listOf types.str;
default = [];
example = "[ \"IAM_OWNER\" ]";
description = ''
List of roles granted to instance.
'';
};
};
}));
};
action = mkOption {
default = {};
type = types.attrsOf (types.submodule ({ name, ... }: {
options = {
script = mkOption {
type = types.str;
example = ''
(ctx, api) => {
api.v1.claims.setClaim('some_claim', 'some_value');
};
'';
description = ''
The script to run. This must be a function that receives 2 parameters, and returns void. During the creation of the action's script this module simly does `const {{name}} = {{script}}`.
'';
};
timeout = mkOption {
type = (types.ints.between 0 20);
default = 10;
example = "10";
description = ''
After which time the action will be terminated if not finished.
'';
};
allowedToFail = mkOption {
type = types.bool;
default = true;
example = "true";
description = ''
Allowed to fail.
'';
};
};
}));
};
triggers = mkOption {
default = [];
type = types.listOf (types.submodule {
options = {
flowType = mkOption {
type = types.enum [ "authentication" "customiseToken" "internalAuthentication" "samlResponse" ];
example = "customiseToken";
description = ''
Type of the flow to which the action triggers belong.
'';
};
triggerType = mkOption {
type = types.enum [ "postAuthentication" "preCreation" "postCreation" "preUserinfoCreation" "preAccessTokenCreation" "preSamlResponse" ];
example = "postAuthentication";
description = ''
Trigger type on when the actions get triggered.
'';
};
actions = mkOption {
type = types.nonEmptyListOf types.str;
example = ''[ "action_name" ]'';
description = ''
Names of actions to trigger
'';
};
};
});
};
};
}));
};
};
};
perInstance = {
mkExports,
settings,
machine,
instanceName,
...
}: {
exports = mkExports (mkMerge [
{
gateway.services.identity = {endpoint.port = settings.port;};
}
(mkIf (settings.driver == "zitadel") {
gateway.functions.auth = {
body = ''
forward_auth h2c://[::1]:${toString settings.port} {
uri /api/authz/forward-auth
copy_headers Remote-User Remote-Groups Remote-Email Remote-Name
}
'';
};
persistence.databases = ["zitadel"];
})
]);
nixosModule = args@{
lib,
pkgs,
config,
...
}: let
vars = config.clan.core.vars.generators.zitadel.files;
users = config.clan.core.vars.generators.zitadel_users.files.users.path;
email_password = config.clan.core.vars.generators.zitadel_email_password.files.password.path;
ardaLib = import ../../lib/strings.nix args;
zLib = import ./lib.nix (args // {inherit settings ardaLib;});
in {
config = mkMerge [
(mkIf (settings.driver == "zitadel") ({
clan.core.vars.generators.zitadel = {
dependencies = ["persistence"];
files = {
masterKey = {
deploy = true;
owner = "zitadel";
group = "zitadel";
restartUnits = ["zitadel.service"];
};
settings = {
deploy = true;
owner = "zitadel";
group = "zitadel";
restartUnits = ["zitadel.service"];
};
infraPrivateKey = {
deploy = true;
owner = "zitadel";
group = "zitadel";
restartUnits = ["zitadel.service"];
};
infraPublicKey = {
deploy = true;
owner = "zitadel";
group = "zitadel";
restartUnits = ["zitadel.service"];
};
};
runtimeInputs = with pkgs; [pwgen openssl_3_5];
script = ''
pwgen -s 32 1 > $out/masterKey
openssl genrsa -traditional -out $out/infraPrivateKey 2048
openssl rsa -pubout -in $out/infraPrivateKey -out $out/infraPublicKey
cat << EOL > $out/settings
Database:
postgres:
User:
Password: $(cat $in/persistence/zitadel_password)
Admin:
Password: $(cat $in/persistence/zitadel_password)
EOL
'';
};
clan.core.vars.generators.zitadel_users = {
files = {
users = {
deploy = true;
owner = "zitadel";
group = "zitadel";
restartUnits = ["infra-zitadel.service"];
};
};
script = ''
echo "{}" > $out/users
'';
};
clan.core.vars.generators.zitadel_email_password = {
prompts = {
password = {
description = "password to email for zitadel's smpt connection";
type = "hidden";
persist = true;
};
};
files = {
password = {
deploy = true;
owner = "zitadel";
group = "zitadel";
restartUnits = ["infra-zitadel.service"];
};
};
script = ''
cat $prompts/password > $out/password
'';
};
environment.systemPackages = with pkgs; [
zitadel
];
services.zitadel = {
enable = true;
masterKeyFile = vars.masterKey.path;
tlsMode = "external";
extraSettingsPaths = [
vars.settings.path
];
settings = {
Port = settings.port;
ExternalDomain = "auth.kruining.eu";
ExternalPort = 443;
ExternalSecure = true;
Metrics.Type = "otel";
Tracing.Type = "otel";
Telemetry.Enabled = true;
SystemDefaults = {
PasswordHasher.Hasher.Algorithm = "argon2id";
SecretHasher.Hasher.Algorithm = "argon2id";
};
Database.postgres = {
Host = settings.database.host;
Port = settings.database.port;
Database = "zitadel";
User = {
Username = "zitadel";
};
Admin = {
Username = "zitadel";
};
};
SystemAPIUsers = {
infra = {
Path = vars.infraPublicKey.path;
Memberships = [
{ MemberType = "System"; Roles = [ "SYSTEM_OWNER" "IAM_OWNER" "ORG_OWNER" ]; }
];
};
};
};
};
} // (zLib.createInfra { inherit users email_password; key_file = vars.infraPrivateKey.path; })))
];
};
};
};
}

View file

@ -0,0 +1,13 @@
{...}: let
module = ./default.nix;
in {
clan.modules.identity = module;
# perSystem = {...}: {
# clan.nixosTests.identity = {
# imports = [];
# clan.modules."@arda/identity" = module;
# };
# };
}

View file

@ -0,0 +1,372 @@
{
lib,
ardaLib,
self,
pkgs,
settings,
...
}: let
createTerranixModule = {
users,
email_password,
key_file,
...
}: terra: let
inherit (lib) toUpper toSentenceCase nameValuePair mapAttrs mapAttrs' concatMapAttrs concatMapStringsSep filterAttrsRecursive listToAttrs imap0 head drop length literalExpression attrNames;
inherit (ardaLib) toSnakeCase;
inherit (terra.lib) tfRef;
_refTypeMap = {
org = {type = "org";};
project = {type = "project";};
user = {
type = "user";
tfType = "human_user";
};
};
mapRef' = {
type,
tfType ? type,
}: name: {"${type}Id" = "\${ resource.zitadel_${tfType}.${toSnakeCase name}.id }";};
mapRef = type: name: mapRef' (_refTypeMap.${type}) name;
mapEnum = prefix: value: "${prefix}_${value |> toSnakeCase |> toUpper}";
mapValue = type: value: ({
appType = mapEnum "OIDC_APP_TYPE" value;
grantTypes = map (t: mapEnum "OIDC_GRANT_TYPE" t) value;
responseTypes = map (t: mapEnum "OIDC_RESPONSE_TYPE" t) value;
authMethodType = mapEnum "OIDC_AUTH_METHOD_TYPE" value;
flowType = mapEnum "FLOW_TYPE" value;
triggerType = mapEnum "TRIGGER_TYPE" value;
accessTokenType = mapEnum "OIDC_TOKEN_TYPE" value;
}."${type}" or value);
toResource = name: value:
nameValuePair
(toSnakeCase name)
(lib.mapAttrs' (k: v: nameValuePair (toSnakeCase k) (mapValue k v)) value);
withRef = type: name: attrs: attrs // (mapRef type name);
select = keys: callback: set:
if (length keys) == 0
then mapAttrs' callback set
else let
key = head keys;
in
concatMapAttrs (k: v: select (drop 1 keys) (callback k) (v.${key} or {})) set;
append = attrList: set: set // (listToAttrs attrList);
forEach = src: key: set: let
_key = concatMapStringsSep "_" (k: "\${item.${k}}") key;
in
{
forEach = tfRef '' {
for item in ${src} :
"''${item.org}_''${item.name}" => item
}'';
}
// set;
in {
terraform.required_providers.zitadel = {
source = "zitadel/zitadel";
version = "2.2.0";
};
provider.zitadel = {
domain = "auth.kruining.eu";
insecure = false;
system_api = {
user = "infra";
inherit key_file;
};
};
locals = {
extra_users = tfRef "
flatten([ for org, users in jsondecode(file(\"${users}\")): [
for name, details in users: {
org = org
name = name
email = details.email
firstName = details.firstName
lastName = details.lastName
}
] ])
";
orgs = settings.organization |> mapAttrs (org: _: tfRef "resource.zitadel_org.${org}.id");
};
resource = {
# Organizations
zitadel_org =
settings.organization
|> select [] (
name: {isDefault, ...}:
{inherit name isDefault;}
|> toResource name
);
# Projects per organization
zitadel_project =
settings.organization
|> select ["project"] (
org: name: {
hasProjectCheck,
privateLabelingSetting,
projectRoleAssertion,
projectRoleCheck,
...
}:
{
inherit name hasProjectCheck privateLabelingSetting projectRoleAssertion projectRoleCheck;
}
|> withRef "org" org
|> toResource "${org}_${name}"
);
# Each OIDC app per project
zitadel_application_oidc =
settings.organization
|> select ["project" "application"] (
org: project: name: {
redirectUris,
grantTypes,
responseTypes,
...
}:
{
inherit name redirectUris grantTypes responseTypes;
accessTokenRoleAssertion = true;
idTokenRoleAssertion = true;
accessTokenType = "JWT";
}
|> withRef "org" org
|> withRef "project" "${org}_${project}"
|> toResource "${org}_${project}_${name}"
);
# Each project role
zitadel_project_role =
settings.organization
|> select ["project" "role"] (
org: project: name: value:
{
inherit (value) displayName group;
roleKey = name;
}
|> withRef "org" org
|> withRef "project" "${org}_${project}"
|> toResource "${org}_${project}_${name}"
);
# Each project role assignment
zitadel_user_grant =
settings.organization
|> select ["project" "assign"] (
org: project: user: roles:
{roleKeys = roles;}
|> withRef "org" org
|> withRef "project" "${org}_${project}"
|> withRef "user" "${org}_${user}"
|> toResource "${org}_${project}_${user}"
);
# Users
zitadel_human_user =
settings.organization
|> select ["user"] (
org: name: {
email,
userName,
firstName,
lastName,
...
}:
{
inherit email userName firstName lastName;
isEmailVerified = true;
lifecycle = {
ignore_changes = ["first_name" "last_name" "user_name"];
};
}
|> withRef "org" org
|> toResource "${org}_${name}"
)
|> append [
(forEach "local.extra_users" ["org" "name"] {
orgId = tfRef "local.orgs[each.value.org]";
userName = tfRef "each.value.name";
email = tfRef "each.value.email";
firstName = tfRef "each.value.firstName";
lastName = tfRef "each.value.lastName";
isEmailVerified = true;
}
|> toResource "extraUsers")
];
# Global user roles
zitadel_instance_member =
settings.organization
|> filterAttrsRecursive (n: v: !(v ? "instanceRoles" && (length v.instanceRoles) == 0))
|> select ["user"] (
org: name: {instanceRoles, ...}:
{roles = instanceRoles;}
|> withRef "user" "${org}_${name}"
|> toResource "${org}_${name}"
);
# Organazation specific roles
zitadel_org_member =
settings.organization
|> filterAttrsRecursive (n: v: !(v ? "roles" && (length v.roles) == 0))
|> select ["user"] (
org: name: {roles, ...}:
{inherit roles;}
|> withRef "org" org
|> withRef "user" "${org}_${name}"
|> toResource "${org}_${name}"
);
# Organazation's actions
zitadel_action =
settings.organization
|> select ["action"] (
org: name: {
timeout,
allowedToFail,
script,
...
}:
{
inherit allowedToFail name;
timeout = "${toString timeout}s";
script = "const ${name} = ${script}";
}
|> withRef "org" org
|> toResource "${org}_${name}"
);
# Organazation's action assignments
zitadel_trigger_actions =
settings.organization
|> concatMapAttrs (
org: {triggers, ...}:
triggers
|> imap0 (i: {
flowType,
triggerType,
actions,
...
}: (
let
name = "trigger_${toString i}";
in
{
inherit flowType triggerType;
actionIds =
actions
|> map (action: (tfRef "zitadel_action.${org}_${toSnakeCase action}.id"));
}
|> withRef "org" org
|> toResource "${org}_${name}"
))
|> listToAttrs
);
# SMTP config
zitadel_smtp_config.default = {
sender_address = "chris@kruining.eu";
sender_name = "no-reply (Zitadel)";
tls = true;
host = "black-mail.nl:587";
user = "chris@kruining.eu";
password = tfRef "file(\"${email_password}\")";
set_active = true;
};
# Client credentials per app
local_sensitive_file =
settings.organization
|> select ["project" "application"] (
org: project: name: {exportMap, ...}:
nameValuePair "${org}_${project}_${name}" {
content = ''
${
if exportMap.client_id != null
then exportMap.client_id
else "CLIENT_ID"
}=${tfRef "resource.zitadel_application_oidc.${org}_${project}_${name}.client_id"}
${
if exportMap.client_secret != null
then exportMap.client_secret
else "CLIENT_SECRET"
}=${tfRef "resource.zitadel_application_oidc.${org}_${project}_${name}.client_secret"}
'';
filename = "/var/lib/zitadel/clients/${org}_${project}_${name}";
}
);
};
};
in {
createInfra = args @ {...}: let
tofu = "${lib.getExe pkgs.opentofu} -input=false";
terraformConfiguration = self.inputs.terranix.lib.terranixConfiguration {
system = pkgs.stdenv.hostPlatform.system;
modules = [
(createTerranixModule args)
];
};
in {
systemd.services."infra-zitadel" = {
description = "Infra for Zitadel";
wantedBy = ["multi-user.target"];
wants = ["zitadel.service"];
after = ["zitadel.service"];
preStart = ''
install -d -m 0770 -o zitadel -g media /var/lib/infra-zitadel
'';
script = ''
# Sleep for a bit to give the service a chance to start up
sleep 5s
if [ "$(systemctl is-active zitadel)" != "active" ]; then
echo "zitadel is not running"
exit 1
fi
# Print the path to the source for easier debugging
echo "config location: ${terraformConfiguration}"
# Copy infra code into workspace
cp -f ${terraformConfiguration} config.tf.json
# Initialize OpenTofu
${tofu} init
# Run the infrastructure code
${tofu} plan -out=tfplan
${tofu} apply -json -auto-approve tfplan
'';
serviceConfig = {
Type = "oneshot";
User = "zitadel";
Group = "zitadel";
StateDirectory = "/var/lib/infra-zitadel";
};
};
};
}

View file

View file

@ -0,0 +1,169 @@
{
lib,
clanLib,
exports,
...
}: let
inherit (builtins) toString;
in {
_class = "clan.service";
manifest = {
name = "arda/persistence";
description = ''
Configuration of persistence resrouce(s)
(for now this means a database. and specifically it means postgres)
'';
readme = builtins.readFile ./README.md;
exports = {
inputs = ["persistence"];
out = ["persistence"];
};
};
roles.default = {
description = '''';
interface = {lib, ...}: let
inherit (lib) mkOption types;
in {
options = {
port = mkOption {
type = types.port;
default = 5432;
};
};
};
perInstance = {
mkExports,
machine,
settings,
...
}: let
requested_databases =
exports
|> clanLib.selectExports (_scope: true)
|> lib.mapAttrsToList (_: value: value.persistence.databases or [])
|> lib.concatLists;
in {
exports = mkExports {
persistence = {
main = "postgresql";
driver.postgresql = {
host = "localhost";
port = settings.port;
};
};
};
nixosModule = {
lib,
pkgs,
config,
...
}: {
clan.core.vars.generators.postgresql = let
password_files =
requested_databases
|> lib.map (db: [
{
name = "${db}_password";
value = {
secret = true;
deploy = false;
};
}
])
|> lib.concatLists
|> lib.listToAttrs;
in {
files =
{
"server.crt" = {
secret = true;
deploy = true;
};
"server.key" = {
secret = true;
deploy = true;
};
".pgpass" = {
secret = true;
deploy = true;
owner = "postgres";
group = "postgres";
mode = "0600";
restartUnits = ["postgresql.service"];
};
}
// password_files;
runtimeInputs = with pkgs; [openssl_3_5 pwgen];
script = ''
openssl req \
-new -x509 -days 365 -nodes -text \
-out $out/server.crt \
-keyout $out/server.key \
-subj "/CN=db.${config.networking.fqdn}"
${requested_databases
|> lib.map (db: "pwgen -s 128 1 > $out/${db}_password")
|> lib.join "\n"}
cat << EOL > $out/.pgpass
#host:port:database:user:password
${requested_databases
|> lib.map (db: "*:${toString settings.port}:${db}:${db}:$(cat $out/${db}_password)")
|> lib.join "\n"}
EOL
'';
};
systemd.services.postgresql.environment.PGPASSFILE = config.clan.core.vars.generators.postgresql.files.".pgpass".path;
services = {
postgresql = {
enable = true;
# enableTCPIP = true;
settings = {
port = settings.port;
ssl = true;
};
ensureDatabases = requested_databases;
ensureUsers =
requested_databases
|> lib.map (db: {
name = db;
ensureDBOwnership = true;
ensureClauses = {
login = true;
connection_limit = 5;
};
});
identMap = ''
#map sys user db user
superuser_map root postgres
superuser_map postgres postgres
superuser_map /^(.+)$ \1
'';
authentication = ''
# Generated file, do not edit!
# type database user auth-method optional_ident_map
local sameuser all peer map=superuser_map
# TYPE DATABASE USER ADDRESS METHOD
# local all all trust
host all all 127.0.0.1/32 scram-sha-256
host all all ::1/128 scram-sha-256
'';
};
};
};
};
};
}

View file

@ -0,0 +1,13 @@
{...}: let
module = ./default.nix;
in {
clan.modules.persistence = module;
# perSystem = {...}: {
# clan.nixosTests.persistence = {
# imports = [];
# clan.modules."@arda/persistence" = module;
# };
# };
}

View file

View file

@ -0,0 +1,150 @@
{
exports,
clanLib,
lib,
...
}: let
inherit (lib) toString;
in {
_class = "clan.service";
manifest = {
name = "arda/servarr";
description = '''';
categories = ["Service" "Media"];
readme = builtins.readFile ./README.md;
exports = {
inputs = ["persistence"];
out = ["gateway" "persistence"];
};
};
roles.default = {
description = '''';
interface = {lib, ...}: let
inherit (lib) mkOption mkEnableOption types;
in {
options = {
enable = mkEnableOption "Enable configured *arr services";
database = mkOption {
type = types.anything; #ardaLib.types.endpoint;
};
services = mkOption {
type = types.attrsOf (types.submodule ({name, ...}: {
options = {
enable = mkEnableOption "Enable ${name}" // {default = true;};
debug = mkEnableOption "Use tofu plan instead of tofu apply for ${name} ";
rootFolders = mkOption {
type = types.listOf types.str;
default = [];
};
};
}));
default = {};
description = ''
Settings foreach *arr service
'';
};
};
};
perInstance = {
instanceName,
settings,
machine,
roles,
mkExports,
...
}: {
exports = mkExports {
# endpoints.hosts =
# settings.services
# |> lib.attrNames
# |> (s: lib.concat s ["sabnzbd" "qbittorrent" "flaresolverr"])
# |> lib.map (service: "${service}.${machine.name}.arda");
persistence.databases =
settings.services
|> lib.attrNames;
gateway.services =
settings.services
|> lib.attrNames
# |> (s: lib.concat s ["sabnzbd" "qbittorrent" "flaresolverr"])
|> lib.imap1 (i: name: {
inherit name;
value = {
endpoint.port = 2000 + i;
};
})
|> lib.listToAttrs;
};
nixosModule = args @ {
config,
lib,
pkgs,
...
}: let
services = settings.services |> lib.attrNames;
service_count = services |> lib.length;
servarr = import ./lib.nix (args // {inherit settings;});
in {
imports = [
(import ./sabnzbd.nix (args
// {
inherit settings;
port = 2000 + service_count + 1;
}))
(import ./qbittorrent.nix (args
// {
inherit settings;
port = 2000 + service_count + 2;
}))
(servarr.createModule settings.services)
];
config = {
clan.core.vars.generators.servarr = rec {
dependencies =
services ++ ["sabnzbd" "qbittorrent"];
files."config.tfvars" = {
owner = "media";
group = "media";
mode = "0440";
restartUnits = services |> lib.map (s: "${s}.service");
};
script = ''
cat << EOL > $out/config.tfvars
${
services
|> lib.map (s: "${s}_api_key = \"$(cat $in/${s}/api_key)\"")
|> lib.join "\n"
}
qbittorrent_api_key = "$(cat $in/qbittorrent/password)"
sabnzbd_api_key = "$(cat $in/sabnzbd/api_key)"
EOL
'';
};
services = {
flaresolverr = {
enable = true;
openFirewall = true;
port = 2000 + service_count + 3;
};
};
};
};
};
};
perMachine = {...}: {
};
}

View file

@ -0,0 +1,13 @@
{...}: let
module = ./default.nix;
in {
clan.modules.servarr = module;
# perSystem = {...}: {
# clan.nixosTests.servarr = {
# imports = [];
# clan.modules."@arda/servarr" = module;
# };
# };
}

View file

@ -0,0 +1,329 @@
{
self,
config,
lib,
pkgs,
settings,
...
}: let
inherit (lib) mkIf;
createGenerator = {
service,
options,
...
}: {
dependencies = ["postgresql"];
files = {
api_key = {
secret = true;
deploy = true;
owner = service;
group = "media";
restartUnits = ["${service}.service"];
};
"config.env" = {
secret = true;
deploy = true;
owner = service;
group = "media";
restartUnits = ["${service}.service"];
};
};
runtimeInputs = with pkgs; [pwgen];
script = ''
pwgen -s 128 1 > $out/api_key
cat << EOL > $out/config.env
${lib.toUpper service}__AUTH__APIKEY="$(cat $out/api_key)"
${lib.toUpper service}__POSTGRES_PASSWORD="$(cat $in/postgresql/${service}_password)"
EOL
'';
};
createService = {
service,
options,
...
}: let
inherit (builtins) toString;
in
{
enable = true;
# openFirewall = true;
environmentFiles = [
config.clan.core.vars.generators.${service}.files."config.env".path
];
settings = {
auth.authenticationMethod = "External";
server = {
bindaddress = "[::1]";
port = options.port;
};
# Password provided via environment file
postgres = {
host = settings.database.host;
port = toString settings.database.port;
user = service;
maindb = service;
logdb = service;
};
};
}
// (lib.optionalAttrs (lib.elem service ["radarr" "sonarr" "lidarr" "whisparr"]) {
user = service;
group = "media";
});
createSystemdService = args @ {
service,
options,
...
}: let
tofu = lib.getExe pkgs.opentofu;
terraformConfiguration = self.inputs.terranix.lib.terranixConfiguration {
system = pkgs.stdenv.hostPlatform.system;
modules = [
(createInfra args)
];
};
in {
description = "${service} apply infra";
wantedBy = ["multi-user.target"];
wants = ["${service}.service"];
preStart = ''
install -d -m 0770 -o ${service} -g media /var/lib/infra-${service}
${
options.rootFolders
|> lib.map (folder: "install -d -m 0770 -o media -g media ${folder}")
|> lib.join "\n"
}
'';
script = ''
# Sleep for a bit to give the service a chance to start up
sleep 5s
if [ "$(systemctl is-active ${lib.escapeShellArg service})" != "active" ]; then
echo "${service} is not running"
exit 1
fi
# Print the path to the source for easier debugging
echo "config location: ${terraformConfiguration}"
# Copy infra code into workspace
cp -f ${terraformConfiguration} config.tf.json
# Initialize OpenTofu
${tofu} init
# Run the infrastructure code
${tofu} \
${
if options.debug
then "plan"
else "apply -auto-approve"
} \
-var-file='${config.clan.core.vars.generators.servarr.files."config.tfvars".path}'
'';
serviceConfig = {
Type = "oneshot";
User = service;
Group = "media";
WorkingDirectory = "/var/lib/${service}-apply-infra";
EnvironmentFile = [
config.clan.core.vars.generators.${service}.files."config.env".path
];
};
};
# Returns a module to be used in a modules list of terranix
createInfra = {
service,
options,
...
}: terra: let
inherit (terra.lib) tfRef;
in {
variable = {
"${service}_api_key" = {
type = "string";
description = "${service} API key";
};
qbittorrent_api_key = {
type = "string";
description = "qbittorrent api key";
};
sabnzbd_api_key = {
type = "string";
description = "sabnzbd api key";
};
};
terraform.required_providers.${service} = {
source = "devopsarr/${service}";
version =
{
radarr = "2.3.5";
sonarr = "3.4.2";
prowlarr = "3.2.1";
lidarr = "1.13.0";
readarr = "2.1.0";
whisparr = "1.2.0";
}.${
service
};
};
provider.${service} = {
url = "http://[::1]:${toString options.port}";
api_key = tfRef "var.${service}_api_key";
};
resource =
{
"${service}_root_folder" = mkIf (lib.elem service ["radarr" "sonarr" "whisparr" "readarr"]) (
options.rootFolders
|> lib.imap (i: f: lib.nameValuePair "local${toString i}" {path = f;})
|> lib.listToAttrs
);
"${service}_download_client_qbittorrent" = mkIf (lib.elem service ["radarr" "sonarr" "lidarr" "whisparr"]) {
"main" = {
name = "qBittorrent";
enable = true;
priority = 1;
host = "localhost";
username = "admin";
password = tfRef "var.qbittorrent_api_key";
url_base = "/";
port = config.services.qbittorrent.webuiPort;
};
};
"${service}_download_client_sabnzbd" = mkIf (lib.elem service ["radarr" "sonarr" "lidarr" "whisparr"]) {
"main" = {
name = "SABnzbd";
enable = true;
priority = 1;
host = "localhost";
api_key = tfRef "var.sabnzbd_api_key";
url_base = "/";
port = config.services.sabnzbd.settings.misc.port;
};
};
}
// (lib.optionalAttrs (service == "prowlarr") (
settings.services
|> lib.filterAttrs (s: _: lib.elem s ["radarr" "sonarr" "lidarr" "whisparr"])
|> lib.mapAttrsToList (s: {port, ...}: {
"prowlarr_application_${s}"."main" = let
p = config.services.prowlarr.settings.server.port or 9696;
in {
name = s;
sync_level = "addOnly";
base_url = "http://localhost:${toString port}";
prowlarr_url = "http://localhost:${toString p}";
api_key = tfRef "var.${s}_api_key";
};
})
|> lib.concat [
{
"prowlarr_indexer" = {
"nyaa" = {
enable = true;
app_profile_id = 1;
priority = 1;
name = "Nyaa";
implementation = "Cardigann";
config_contract = "CardigannSettings";
protocol = "torrent";
fields = [
{
name = "definitionFile";
text_value = "nyaasi";
}
{
name = "baseSettings.limitsUnit";
number_value = 0;
}
{
name = "torrentBaseSettings.preferMagnetUrl";
bool_value = false;
}
{
name = "prefer_magnet_links";
bool_value = true;
}
{
name = "sonarr_compatibility";
bool_value = false;
}
{
name = "strip_s01";
bool_value = false;
}
{
name = "radarr_compatibility";
bool_value = false;
}
{
name = "filter-id";
number_value = 0;
}
{
name = "cat-id";
number_value = 0;
}
{
name = "sort";
number_value = 0;
}
{
name = "type";
number_value = 1;
}
];
};
};
}
]
|> lib.mkMerge
));
};
in {
createModule = services: args: {
config =
services
|> lib.attrsToList
|> lib.imap1 (i: {
name,
value,
}: let
service = name;
options = value // {port = 2000 + i;};
in {
clan.core.vars.generators.${service} = createGenerator (args // {inherit service options;});
services.${service} = createService (args // {inherit service options;});
systemd.services."infra-${service}" = lib.mkIf settings.enable (createSystemdService (args // {inherit service options;}));
})
|> lib.mkMerge;
};
}

View file

@ -0,0 +1,96 @@
{
config,
pkgs,
lib,
settings,
port,
...
}: {
clan.core.vars.generators.qbittorrent = let
hash_password = pkgs.writers.writePython3 "hashPassword" {} ''
import base64
import hashlib
import sys
import uuid
password = sys.argv[1]
salt = uuid.uuid4()
salt_bytes = salt.bytes
password = str.encode(password)
hashed_password = hashlib.pbkdf2_hmac(
"sha512",
password,
salt_bytes,
100000,
dklen=64
)
b64_salt = base64.b64encode(salt_bytes).decode("utf-8")
b64_password = base64.b64encode(hashed_password).decode("utf-8")
password_string = "@ByteArray({salt}:{password})".format(
salt=b64_salt, password=b64_password
)
print(password_string)
'';
in {
files = {
"password" = {
secret = true;
deploy = true;
};
"password_hash" = {
secret = true;
deploy = true;
};
"qBittorrent.conf" = {
secret = true;
deploy = true;
owner = "qbittorrent";
group = "media";
mode = "0660";
restartUnits = ["qbittorrent.service"];
};
};
runtimeInputs = with pkgs; [pwgen hash_password];
script = ''
pwgen -s 128 1 > $out/password
${hash_password} $(cat $out/password) > $out/password_hash
cat << EOF > $out/qBittorrent.conf
[LegalNotice]
Accepted=true
[Preferences]
WebUI\AlternativeUIEnabled=true
WebUI\RootFolder=${pkgs.vuetorrent}/share/vuetorrent
WebUI\Username=admin
WebUI\Password_PBKDF2=$(cat $out/password_hash)
EOF
'';
};
system.activationScripts.qbittorrent-config = {
deps = lib.optional (!config.sops.useSystemdActivation) "setupSecrets";
# TODO: If sops-nix is switched to systemd activation, add a systemd unit
# for this install step that runs after sops-install-secrets.service,
# because this activation-script dependency only orders against setupSecrets.
text = ''
install -Dm0600 -o ${config.services.qbittorrent.user} -g ${config.services.qbittorrent.group} \
${config.clan.core.vars.generators.qbittorrent.files."qBittorrent.conf".path} \
${config.services.qbittorrent.profileDir}/qBittorrent/config/qBittorrent.conf
'';
};
services.qbittorrent = {
enable = true;
openFirewall = true;
webuiPort = port;
serverConfig = lib.mkForce {};
user = "qbittorrent";
group = "media";
};
}

View file

@ -0,0 +1,95 @@
{
config,
lib,
pkgs,
settings,
port,
...
}: {
clan.core.vars.generators.sabnzbd = {
files = {
"api_key" = {
secret = true;
deploy = true;
};
"nzb_key" = {
secret = true;
deploy = true;
};
"config.ini" = {
secret = true;
deploy = true;
owner = "sabnzbd";
group = "media";
mode = "0660";
};
};
prompts = {
username = {
description = "usenet username";
type = "hidden";
persist = true;
};
password = {
description = "usenet password";
type = "hidden";
persist = true;
};
};
runtimeInputs = with pkgs; [pwgen];
script = ''
pwgen -s 128 1 > $out/api_key
pwgen -s 128 1 > $out/nzb_key
cat << EOF > $out/config.ini
[misc]
api_key = $(cat $out/api_key)
nzb_key = $(cat $out/nzb_key)
[servers]
[[news.sunnyusenet.com]]
username = $(cat $prompts/username)
password = $(cat $prompts/password)
EOF
'';
};
services.sabnzbd = {
enable = true;
openFirewall = true;
allowConfigWrite = false;
configFile = lib.mkForce null;
secretFiles = [
config.clan.core.vars.generators.sabnzbd.files."config.ini".path
];
settings = {
misc = {
host = "0.0.0.0";
port = port;
host_whitelist = "${config.networking.hostName}";
download_dir = "/var/media/downloads/incomplete";
complete_dir = "/var/media/downloads/done";
};
servers = {
"news.sunnyusenet.com" = {
name = "news.sunnyusenet.com";
displayname = "news.sunnyusenet.com";
host = "news.sunnyusenet.com";
port = 563;
timeout = 60;
};
};
};
user = "sabnzbd";
group = "media";
};
}

22
devShell.nix Normal file
View file

@ -0,0 +1,22 @@
{
inputs,
...
}: {
perSystem = {pkgs, system, ...}: {
devShells.default = pkgs.mkShell {
packages = with pkgs; [
bash
sops
just
yq
pwgen
alejandra
nil
nixd
openssl
inputs.clan-core.packages.${system}.clan-cli
nix-output-monitor
];
};
};
}

View file

@ -0,0 +1,125 @@
# Mandos as a wake-on-demand build host
## Goal
Mandos is primarily an interactive living-room machine, but it is also a strong candidate for handling remote Nix builds when it is idle. The goal is to make that dual use practical without keeping the machine powered all the time.
## Current context
On `main`, Mandos is configured as an interactive gaming machine:
- `systems/x86_64-linux/mandos/default.nix`
- `sneeuwvlok.hardware.has.gpu.nvidia = true`
- `sneeuwvlok.hardware.has.audio = true`
- `sneeuwvlok.desktop.use = "gamescope"`
- `sneeuwvlok.application.steam.enable = true`
- `homes/x86_64-linux/chris@mandos/default.nix`
- user-facing application set for an interactive machine
This makes Mandos a poor fit for "always running random infrastructure", but a reasonable fit for "available for work when needed".
## Desired behavior
- Mandos remains an interactive machine first.
- Mandos can be used as a remote build worker when no one is actively using it.
- Mandos should not need to stay fully on all day just to be eligible for builds.
- Waking and idling down should be automatic enough that the machine can participate in builds without turning into a maintenance burden.
## Recommended model
### 1. Use wake-on-LAN as the activation mechanism
Mandos should support being awakened by another machine on the same LAN.
Requirements:
- BIOS or UEFI wake-on-LAN support enabled
- NixOS interface configuration enabling wake-on-LAN
- one low-power machine that is effectively always available to send wake requests
In this repo, `ulmo` is the obvious candidate to act as the coordinator, but the pattern should stay generic: one machine is always reachable, and one or more stronger machines can be woken on demand.
### 2. Prefer suspend-first over shutdown-first
There are two main power states worth considering:
- **Suspend on idle**
- faster resume
- generally better user experience
- often easier to make reliable for wake-on-LAN
- **Shutdown on idle**
- lowest power draw
- more fragile in practice because firmware support for wake from soft-off varies
- longer time to become available again
Recommended rollout order:
1. Prove the concept with suspend on idle.
2. Only consider full power-off later if the hardware and firmware behave reliably.
## 3. Add an explicit availability policy
The interesting lesson for tagging is not "Mandos should have a build tag". The interesting lesson is that some machines have a deliberate availability policy that affects how safely they can participate in automation.
A future host-level setting could encode this policy directly, for example:
- `always-on`
- `wake-on-demand`
- `manual`
That setting would be a better source for any computed operational tag than current workload or ad hoc tags.
## 4. Idle detection should be policy-driven
If Mandos becomes a build worker, idle shutdown or suspend should depend on signals such as:
- no local interactive session activity
- no active build job
- no long-running system task that should keep the machine awake
This should not be a blind timer that powers the machine down every X minutes regardless of context.
## 5. Build orchestration needs a coordinator
Wake-on-demand only works well if something else can wake the machine and wait for it to become reachable. In practice, this means:
- a coordinator sends the wake signal
- the build client retries until the machine is reachable
- the remote builder participates only after it is actually ready
The exact implementation can vary, but the architectural point is the same: a wakeable build worker is not self-sufficient.
## Risks and caveats
- Firmware wake support may be unreliable, especially from full shutdown.
- Build latency increases because wake and readiness checks take time.
- A machine that users expect to be immediately available should not surprise them with power-state transitions at awkward moments.
- Interactive workload detection matters; otherwise the machine will feel hostile as a living-room device.
## Recommendation
Treat the Mandos idea as a good pattern, but generalize it:
- some machines are **interactive**
- some machines are **wakeable on demand**
- some machines are suitable for **interruptible background work**
Those are more reusable concepts than "Mandos is the build server".
## Implications for the tag strategy
This investigation strengthens a small part of the `operational:*` space:
- `operational:availability:always-on`
- `operational:availability:wake-on-demand`
- `operational:workload:interruptible`
These should not be assigned by hand if they can instead be computed from explicit machine settings that describe availability policy.
## References
- Clan inventory tags and dynamic tags docs: `https://clan.lol/docs/25.11/reference/options/clan_inventory`
- NixOS Wake-on-LAN wiki: `https://wiki.nixos.org/wiki/Wake_on_LAN`
- Home-lab wake-on-demand discussion and patterns:
- `https://dgross.ca/blog/linux-home-server-auto-sleep`
- `https://danielpgross.github.io/friendly_neighbor/howto-sleep-wake-on-demand.html`

View file

@ -0,0 +1,235 @@
# Clan machine tagging strategy
## Goal
Replace machine-name targeting with stable tags that survive machine renames, hardware reshuffles, and service moves.
The strategy should fit how this repo is evolving:
- machine tags should describe the machine
- service roles should describe service topology
- computed tags should be derived from machine settings or other explicit metadata, not from other tags
## Source material
This plan is based on:
- current Clan inventory in `clan.nix`
- current machine configs under `machines/*/configuration.nix`
- workload and module usage on `main` under:
- `systems/x86_64-linux/*/default.nix`
- `homes/x86_64-linux/chris@*/default.nix`
- Clan inventory tag and dynamic-tag documentation
## Guiding principles
### 1. Prefer capabilities over roles
A machine rarely has one permanent role. In this repo especially, a machine may be interactive, portable, build-capable, and temporarily host some service at the same time.
Because of that, tags should describe durable traits and capabilities rather than trying to answer "what is this machine?"
### 2. Do not encode current workload as a machine tag
A machine currently running Grafana, Jellyfin, or PostgreSQL does not mean that those should become machine tags. Those are current placements, not stable identity.
If a service can move, its current presence is weak evidence for tagging.
### 3. Use service roles for topology
Some relationships belong in service definitions rather than host tags.
Examples:
- NFS producer and consumer
- persistence provider and client
- reverse proxy frontend and backend
These are not machine identity tags; they are service-topology relationships.
### 4. Derive tags from settings when possible
If a machine setting already captures a fact, derive the tag from that setting instead of duplicating it by hand.
Good examples in this repo:
- `desktop.use` can imply whether a machine is interactive
- `hardware.has.gpu.*` can imply GPU availability
- `hardware.has.audio` can imply audio capability
- `hardware.has.bluetooth` can imply Bluetooth capability
### 5. Avoid deriving tags from other tags
Clan supports dynamic tags, but tag-from-tag derivation can become fragile and can even recurse. If tags need computation, compute them from machine settings or an explicit metadata source instead.
## Proposed namespaces
Use full words:
- `capability:*`
- `operational:*`
The intention is:
- `capability:*` describes stable machine traits
- `operational:*` describes automation-relevant policy or availability behavior
## Tag catalog
This is the current list of tags discussed so far, grouped by status.
### Agreed capability tags
- `capability:runtime:interactive`
- `capability:runtime:headless`
- `capability:hardware:gpu`
- `capability:hardware:audio`
- `capability:hardware:bluetooth`
- `capability:mobility:portable`
- `capability:mobility:stationary`
### Agreed operational tags
- `operational:availability:always-on`
- `operational:availability:wake-on-demand`
- `operational:availability:manual`
- `operational:workload:interruptible`
### Explicitly rejected or deferred
- GPU vendor-specific tags such as AMD- or NVIDIA-specific variants
- service-presence tags such as Jellyfin, Grafana, Forgejo, or PostgreSQL
- service-topology tags such as NFS producer or consumer
- application-presence tags such as Discord or TeamSpeak
- desktop-environment tags such as Plasma or Gamescope
- location tags such as "living room" unless location later becomes a deliberate scheduling dimension
## Current static tags in `clan.nix`
These are the manually assigned tags currently present in the inventory. Settings-derived tags are intentionally not listed here because they are meant to be computed rather than maintained by hand.
- `mandos`
- `capability:mobility:stationary`
- `operational:availability:wake-on-demand`
- `manwe`
- `capability:mobility:stationary`
- `operational:availability:manual`
- `orome`
- `capability:mobility:portable`
- `operational:availability:manual`
- `tulkas`
- `capability:mobility:portable`
- `operational:availability:manual`
- `ulmo`
- `capability:mobility:stationary`
- `operational:availability:always-on`
## Capability tags
These are the strongest candidates for machine tags.
### Runtime
- `capability:runtime:interactive`
- `capability:runtime:headless`
These are directly useful for deciding where a service with a user-facing local experience does or does not belong.
### Hardware
- `capability:hardware:gpu`
- `capability:hardware:audio`
- `capability:hardware:bluetooth`
At the moment, the repo provides enough configuration structure to derive these from machine settings.
GPU vendor-specific tags are intentionally excluded for now. The current conclusion is that the presence of GPU hardware may matter, but the vendor usually does not unless there is a specific workload that depends on CUDA, ROCm, or a similar stack.
### Mobility
- `capability:mobility:portable`
- `capability:mobility:stationary`
These are useful concepts, but they are not currently obvious from one uniform machine setting in the repo. If they become desirable, they likely need either:
- an explicit machine setting, or
- a stronger convention around machine form factor
For now they are candidates, not automatic defaults.
## Operational tags
Operational tags are weaker than capability tags and should stay small in number.
They should only exist when they capture real automation constraints that are not already represented elsewhere.
### Availability
- `operational:availability:always-on`
- `operational:availability:wake-on-demand`
- `operational:availability:manual`
This dimension became clearer while thinking through the Mandos build-host idea. A machine may be technically capable of a workload, while its availability policy determines whether it is a sensible target.
These tags should not be guessed from existing workloads. They should come from an explicit machine setting that states the intended availability policy.
### Interruptibility
- `operational:workload:interruptible`
This is not about the machine by itself. It is a useful policy boundary for selecting machines that may host work that can be delayed, retried, paused, or moved.
If introduced, it should again come from explicit machine policy rather than being inferred from current services.
## What should not become machine tags
- current service assignments, such as Jellyfin, Grafana, Forgejo, or PostgreSQL
- service topology, such as NFS producer or consumer
- user application presence, such as Discord or TeamSpeak
- detailed desktop-environment choice, such as Plasma or Gamescope
- one-off descriptions like "living room" unless location becomes a deliberate scheduling dimension
## What is derivable today
The repo already contains enough structure to derive several useful capability tags.
Examples from the current configuration style:
- if a machine enables a desktop session, derive `capability:runtime:interactive`
- if a machine does not, derive `capability:runtime:headless`
- if a machine enables `hardware.has.audio`, derive `capability:hardware:audio`
- if a machine enables `hardware.has.bluetooth`, derive `capability:hardware:bluetooth`
- if a machine enables any `hardware.has.gpu.*`, derive `capability:hardware:gpu`
## What probably needs explicit policy
These should not be inferred from current services or tag combinations:
- `operational:availability:*`
- `operational:workload:interruptible`
- mobility-related tags if there is no explicit machine setting to derive them from
The clean way to support these is to introduce one or more explicit machine settings whose purpose is to describe machine policy rather than workload.
## Mandos update
The Mandos wake-on-demand build-host idea adds an important refinement:
- some machines should be eligible for background work only when they are available through a specific policy, such as wake-on-demand
This does **not** mean Mandos should get a hand-maintained "build server" tag.
It instead suggests a more generic pattern:
- a machine may be interactive
- a machine may be available on demand rather than always on
- that availability policy may influence whether certain classes of automation should target it
That strengthens the case for a very small `operational:*` namespace derived from explicit machine policy.
## Recommended next steps
1. Start with `capability:*` tags that are clearly derivable from machine settings.
2. Keep service topology in service roles instead of machine tags.
3. If availability policy becomes important, add an explicit machine setting for it and derive `operational:*` tags from that setting.
4. Avoid expanding the tag vocabulary until there is a clear service-selection use case for each added tag.

668
flake.lock generated

File diff suppressed because it is too large Load diff

124
flake.nix
View file

@ -3,13 +3,22 @@
nixConfig = {
warn-dirty = false;
extra-experimental-features = ["nix-command" "flakes" "pipe-operators"];
};
inputs = {
nixpkgs.url = "github:nixos/nixpkgs/nixos-unstable";
snowfall-lib = {
url = "github:snowfallorg/lib";
flake-parts = {
url = "github:hercules-ci/flake-parts";
inputs.nixpkgs-lib.follows = "nixpkgs";
};
import-tree.url = "github:vic/import-tree";
systems.url = "github:nix-systems/default";
sops-nix.url = "github:Mic92/sops-nix";
disko = {
url = "github:nix-community/disko";
inputs.nixpkgs.follows = "nixpkgs";
};
@ -18,25 +27,32 @@
inputs.nixpkgs.follows = "nixpkgs";
};
terranix = {
url = "github:terranix/terranix";
inputs.nixpkgs.follows = "nixpkgs";
inputs.flake-parts.follows = "flake-parts";
};
clan-core = {
url = "https://git.clan.lol/clan/clan-core/archive/main.tar.gz";
inputs = {
flake-parts.follows = "flake-parts";
nixpkgs.follows = "nixpkgs";
sops-nix.follows = "sops-nix";
disko.follows = "disko";
systems.follows = "systems";
};
};
plasma-manager = {
url = "github:nix-community/plasma-manager";
inputs.nixpkgs.follows = "nixpkgs";
inputs.home-manager.follows = "home-manager";
};
nixos-generators = {
url = "github:nix-community/nixos-generators";
inputs.nixpkgs.follows = "nixpkgs";
};
# neovim
nvf.url = "github:notashelf/nvf";
# plymouth theme
nixos-boot.url = "github:Melkor333/nixos-boot";
firefox.url = "github:nix-community/flake-firefox-nightly";
stylix.url = "github:nix-community/stylix";
# Rust toolchain
@ -54,8 +70,6 @@
flux.url = "github:IogaMaster/flux";
sops-nix.url = "github:Mic92/sops-nix";
# Azure AD for linux
himmelblau = {
url = "github:himmelblau-idm/himmelblau";
@ -75,58 +89,34 @@
url = "github:vinceliuice/grub2-themes";
};
nixos-wsl = {
url = "github:nix-community/nixos-wsl";
inputs = {
nixpkgs.follows = "nixpkgs";
flake-compat.follows = "";
};
};
terranix = {
url = "github:terranix/terranix";
inputs.nixpkgs.follows = "nixpkgs";
};
clan-core = {
url = "https://git.clan.lol/clan/clan-core/archive/main.tar.gz";
inputs.nixpkgs.follows = "nixpkgs";
};
mydia = {
url = "github:chris-kruining/mydia";
# url = "github:getmydia/mydia";
};
};
outputs = inputs:
inputs.snowfall-lib.mkFlake {
inherit inputs;
src = ./.;
outputs = inputs @ {
flake-parts,
nixpkgs,
systems,
...
}:
flake-parts.lib.mkFlake {inherit inputs;} {
systems = import systems;
snowfall = {
namespace = "sneeuwvlok";
meta = {
name = "sneeuwvlok";
title = "Sneeuwvlok";
};
};
channels-config = {
allowUnfree = true;
permittedInsecurePackages = [
# Due to *arr stack
"dotnet-sdk-6.0.428"
"aspnetcore-runtime-6.0.36"
# I think this is because of zen
"qtwebengine-5.15.19"
# For Nheko, the matrix client
"olm-3.2.16"
imports = with inputs; [
flake-parts.flakeModules.modules
clan-core.flakeModules.default
home-manager.flakeModules.default
./clan/flake-module.nix
./packages/flake-module.nix
./clanServices/flake-module.nix
];
};
perSystem = {system, ...}: {
_module.args = {
pkgs = import nixpkgs {
inherit system;
overlays = with inputs; [
fenix.overlays.default
@ -134,13 +124,19 @@
flux.overlays.default
];
systems.modules = with inputs; [
clan-core.nixosModules.default
];
config = {
allowUnfree = true;
homes.modules = with inputs; [
stylix.homeModules.stylix
plasma-manager.homeModules.plasma-manager
permittedInsecurePackages = [
# I think this is because of zen
"qtwebengine-5.15.19"
# For mautrix-signal, the matrix to signal bridge
"olm-3.2.16"
];
};
};
};
};
};
}

View file

@ -1,36 +0,0 @@
{osConfig, ...}: {
home.stateVersion = osConfig.system.stateVersion;
programs.git = {
settings.user = {
name = "Chris Kruining";
email = "chris@kruining.eu";
};
};
sneeuwvlok = {
defaults = {
shell = "zsh";
terminal = "ghostty";
browser = "zen";
editor = "zed";
};
shell = {
corePkgs.enable = true;
};
themes = {
enable = true;
theme = "everforest";
polarity = "dark";
};
application = {
bitwarden.enable = true;
teamspeak.enable = true;
steam.enable = true;
zen.enable = true;
};
};
}

View file

@ -1,59 +0,0 @@
{osConfig, ...}: {
home.stateVersion = osConfig.system.stateVersion;
programs.git = {
settings.user = {
name = "Chris Kruining";
email = "chris@kruining.eu";
};
};
sneeuwvlok = {
defaults = {
shell = "zsh";
terminal = "ghostty";
browser = "zen";
editor = "zed";
};
shell = {
corePkgs.enable = true;
};
themes = {
enable = true;
theme = "everforest";
polarity = "dark";
};
development = {
rust.enable = true;
javascript.enable = true;
dotnet.enable = true;
};
application = {
bitwarden.enable = true;
discord.enable = true;
ladybird.enable = true;
matrix.enable = true;
obs.enable = true;
onlyoffice.enable = true;
signal.enable = true;
steam.enable = true;
studio.enable = true;
teamspeak.enable = true;
thunderbird.enable = true;
zen.enable = true;
};
shell.zsh.enable = true;
terminal.ghostty.enable = true;
editor = {
zed.enable = true;
nvim.enable = true;
nano.enable = true;
};
};
}

View file

@ -1,49 +0,0 @@
{osConfig, ...}: {
home.stateVersion = osConfig.system.stateVersion;
programs.git = {
settings.user = {
name = "Chris Kruining";
email = "chris@kruining.eu";
};
};
sneeuwvlok = {
defaults = {
shell = "zsh";
terminal = "ghostty";
browser = "zen";
editor = "zed";
};
shell = {
corePkgs.enable = true;
};
themes = {
enable = true;
theme = "everforest";
polarity = "dark";
};
development = {
javascript.enable = true;
dotnet.enable = true;
};
application = {
bitwarden.enable = true;
onlyoffice.enable = true;
signal.enable = true;
zen.enable = true;
};
shell.zsh.enable = true;
terminal.ghostty.enable = true;
editor = {
zed.enable = true;
nano.enable = true;
};
};
}

View file

@ -1,36 +0,0 @@
{osConfig, ...}: {
home.stateVersion = osConfig.system.stateVersion;
programs.git = {
settings.user = {
name = "Chris Kruining";
email = "chris@kruining.eu";
};
};
sneeuwvlok = {
defaults = {
shell = "zsh";
terminal = "ghostty";
browser = "zen";
editor = "zed";
};
shell = {
corePkgs.enable = true;
};
themes = {
enable = true;
theme = "everforest";
polarity = "dark";
};
application = {
bitwarden.enable = true;
teamspeak.enable = true;
steam.enable = true;
zen.enable = true;
};
};
}

37
lib/options.nix Normal file
View file

@ -0,0 +1,37 @@
{lib, ...}: let
inherit (lib) mkOption types;
in {
mkUrlOptions = defaults: {
host =
mkOption {
type = types.str;
example = "host.tld";
description = ''
Hostname
'';
}
// (defaults.host or {});
port =
mkOption {
type = types.port;
default = 1234;
example = "1234";
description = ''
Port
'';
}
// (defaults.port or {});
protocol =
mkOption {
type = types.str;
default = "https";
example = "https";
description = ''
Which protocol to use when creating a url string
'';
}
// (defaults.protocol or {});
};
}

View file

@ -1,38 +0,0 @@
{ lib, ...}:
let
inherit (builtins) isString typeOf;
inherit (lib) mkOption types throwIfNot concatStringsSep splitStringBy toLower map;
in
{
options = {
mkUrlOptions =
defaults:
{
host = mkOption {
type = types.str;
example = "host.tld";
description = ''
Hostname
'';
} // (defaults.host or {});
port = mkOption {
type = types.port;
default = 1234;
example = "1234";
description = ''
Port
'';
} // (defaults.port or {});
protocol = mkOption {
type = types.str;
default = "https";
example = "https";
description = ''
Which protocol to use when creating a url string
'';
} // (defaults.protocol or {});
};
};
}

53
lib/strings.nix Normal file
View file

@ -0,0 +1,53 @@
{lib, ...}: let
inherit (builtins) isString typeOf match toString head;
inherit (lib) throwIfNot concatStringsSep splitStringBy toLower map concatMapAttrsStringSep;
in {
#========================================================================================
# Converts a string to snake case
#
# simply replaces any uppercase letter to its lowercase variant preceeded by an underscore
#========================================================================================
toSnakeCase = str:
throwIfNot (isString str) "toSnakeCase only accepts string values, but got ${typeOf str}" (
str
|> splitStringBy (prev: curr: builtins.match "[a-z]" prev != null && builtins.match "[A-Z]" curr != null) true
|> map (p: toLower p)
|> concatStringsSep "_"
);
#========================================================================================
# Converts a set of url parts to a string
#========================================================================================
toUrl = {
protocol ? null,
host,
port ? null,
path ? null,
query ? null,
hash ? null,
}: let
trim_slashes = str: str |> match "^\/*(.+?)\/*$" |> head;
encode_to_str = set: concatMapAttrsStringSep "&" (n: v: "${n}=${v}") set;
_protocol =
if protocol != null
then "${protocol}://"
else "";
_port =
if port != null
then ":${toString port}"
else "";
_path =
if path != null
then "/${path |> trim_slashes}"
else "";
_query =
if query != null
then "?${query |> encode_to_str}"
else "";
_hash =
if hash != null
then "#${hash |> encode_to_str}"
else "";
in "${_protocol}${host}${_port}${_path}${_query}${_hash}";
}

View file

@ -1,39 +0,0 @@
{ lib, ...}:
let
inherit (builtins) isString typeOf match toString head;
inherit (lib) throwIfNot concatStringsSep splitStringBy toLower map concatMapAttrsStringSep;
in
{
strings = {
#========================================================================================
# Converts a string to snake case
#
# simply replaces any uppercase letter to its lowercase variant preceeded by an underscore
#========================================================================================
toSnakeCase =
str:
throwIfNot (isString str) "toSnakeCase only accepts string values, but got ${typeOf str}" (
str
|> splitStringBy (prev: curr: builtins.match "[a-z]" prev != null && builtins.match "[A-Z]" curr != null) true
|> map (p: toLower p)
|> concatStringsSep "_"
);
#========================================================================================
# Converts a set of url parts to a string
#========================================================================================
toUrl =
{ protocol ? null, host, port ? null, path ? null, query ? null, hash ? null }:
let
trim_slashes = str: str |> match "^\/*(.+?)\/*$" |> head;
encode_to_str = set: concatMapAttrsStringSep "&" (n: v: "${n}=${v}") set;
_protocol = if protocol != null then "${protocol}://" else "";
_port = if port != null then ":${toString port}" else "";
_path = if path != null then "/${path |> trim_slashes}" else "";
_query = if query != null then "?${query |> encode_to_str}" else "";
_hash = if hash != null then "#${hash |> encode_to_str}" else "";
in
"${_protocol}${host}${_port}${_path}${_query}${_hash}";
};
}

View file

@ -1,2 +0,0 @@
{"level":"fatal","error":"homeserver.address not configured","time":"2026-04-15T09:10:06.949460064Z","message":"Configuration error"}
{"level":"info","time":"2026-04-15T09:10:06.949840013Z","message":"See https://docs.mau.fi/faq/field-unconfigured for more info"}

View file

@ -1,2 +0,0 @@
{"level":"fatal","error":"appservice.as_token not configured. Did you forget to generate the registration? ","time":"2026-04-15T09:11:43.617908298Z","message":"Configuration error"}
{"level":"info","time":"2026-04-15T09:11:43.618232253Z","message":"See https://docs.mau.fi/faq/field-unconfigured for more info"}

View file

@ -0,0 +1,3 @@
{ ... }: {
nixpkgs.hostPlatform = "x86_64-linux";
}

View file

@ -0,0 +1,40 @@
{self, ...}: {
imports = [
./disks.nix
./hardware.nix
self.inputs.home-manager.nixosModules.home-manager
self.inputs.himmelblau.nixosModules.himmelblau
self.inputs.jovian.nixosModules.default
self.inputs.mydia.nixosModules.default
self.inputs.nix-minecraft.nixosModules.minecraft-servers
self.inputs.nvf.nixosModules.default
self.inputs.sops-nix.nixosModules.sops
(self.inputs.import-tree ../../modules/nixos)
];
nixpkgs.hostPlatform = "x86_64-linux";
sneeuwvlok = {
hardware.has = {
gpu.nvidia = true;
audio = true;
};
boot = {
quiet = true;
animated = true;
};
desktop.use = "gamescope";
application = {
steam.enable = true;
};
editor = {
nano.enable = true;
};
};
system.stateVersion = "23.11";
}

View file

@ -1,4 +1,4 @@
{ config, lib, pkgs, modulesPath, system, ... }:
{ config, lib, pkgs, modulesPath, ... }:
let
inherit (lib.modules) mkDefault;
in
@ -13,6 +13,6 @@ in
extraModulePackages = [ ];
};
nixpkgs.hostPlatform = mkDefault system;
nixpkgs.hostPlatform = mkDefault pkgs.stdenv.hostPlatform.system;
hardware.cpu.intel.updateMicrocode = mkDefault config.hardware.enableRedistributableFirmware;
}

View file

@ -0,0 +1,79 @@
{
self,
lib,
pkgs,
...
}: {
_module.args = {
pkgs = lib.mkForce (import self.inputs.nixpkgs {
system = "x86_64-linux";
overlays = with self.inputs; [
fenix.overlays.default
nix-minecraft.overlay
flux.overlays.default
];
config = {
allowUnfree = true;
permittedInsecurePackages = [
# I think this is because of zen
"qtwebengine-5.15.19"
# For mautrix-signal, the matrix to signal bridge
"olm-3.2.16"
];
};
});
};
imports = [
./disks.nix
./hardware.nix
self.inputs.home-manager.nixosModules.home-manager
self.inputs.himmelblau.nixosModules.himmelblau
self.inputs.jovian.nixosModules.default
self.inputs.mydia.nixosModules.default
self.inputs.nix-minecraft.nixosModules.minecraft-servers
self.inputs.nvf.nixosModules.default
self.inputs.sops-nix.nixosModules.sops
(self.inputs.import-tree ../../modules/nixos)
];
system.activationScripts.remove-gtkrc.text = "rm -f /home/chris/.gtkrc-2.0";
services.logrotate.checkConfig = false;
environment.systemPackages = with pkgs; [beyond-all-reason openrct2];
sneeuwvlok = {
hardware.has = {
gpu.amd = true;
bluetooth = true;
audio = true;
};
boot = {
quiet = true;
animated = true;
};
desktop.use = "plasma";
application = {
steam.enable = true;
};
editor = {
nano.enable = true;
};
};
services.displayManager.autoLogin = {
enable = true;
user = "chris";
};
system.stateVersion = "23.11";
}

View file

@ -0,0 +1,18 @@
{
config,
lib,
...
}: let
inherit (lib.modules) mkDefault;
in {
boot = {
initrd.availableKernelModules = ["xhci_pci" "ahci" "usb_storage" "usbhid" "sd_mod"];
initrd.kernelModules = [];
kernelModules = ["kvm-amd"];
kernelParams = [];
extraModulePackages = [];
};
nixpkgs.hostPlatform = "x86_64-linux";
hardware.cpu.amd.updateMicrocode = mkDefault config.hardware.enableRedistributableFirmware;
}

View file

@ -0,0 +1,3 @@
{ ... }: {
nixpkgs.hostPlatform = "x86_64-linux";
}

View file

@ -0,0 +1,44 @@
{
self,
pkgs,
...
}: {
imports = [
./disks.nix
./hardware.nix
self.inputs.home-manager.nixosModules.home-manager
self.inputs.himmelblau.nixosModules.himmelblau
self.inputs.jovian.nixosModules.default
self.inputs.mydia.nixosModules.default
self.inputs.nix-minecraft.nixosModules.minecraft-servers
self.inputs.nvf.nixosModules.default
self.inputs.sops-nix.nixosModules.sops
(self.inputs.import-tree ../../modules/nixos)
];
nixpkgs.hostPlatform = "x86_64-linux";
environment.systemPackages = with pkgs; [
azure-cli
github-copilot-cli
];
sneeuwvlok = {
hardware.has = {
bluetooth = true;
audio = true;
};
services.authentication.himmelblau.enable = true;
application = {
steam.enable = true;
};
editor = {
nano.enable = true;
};
};
system.stateVersion = "23.11";
}

View file

@ -1,4 +1,4 @@
{ config, lib, pkgs, modulesPath, system, ... }:
{ config, lib, pkgs, modulesPath, ... }:
let
inherit (lib.modules) mkDefault;
in
@ -13,6 +13,6 @@ in
extraModulePackages = [ ];
};
nixpkgs.hostPlatform = mkDefault system;
nixpkgs.hostPlatform = mkDefault pkgs.stdenv.hostPlatform.system;
hardware.cpu.intel.updateMicrocode = mkDefault config.hardware.enableRedistributableFirmware;
}

View file

@ -0,0 +1,41 @@
{self, ...}: {
imports = [
./disks.nix
./hardware.nix
self.inputs.home-manager.nixosModules.home-manager
self.inputs.himmelblau.nixosModules.himmelblau
self.inputs.jovian.nixosModules.default
self.inputs.mydia.nixosModules.default
self.inputs.nix-minecraft.nixosModules.minecraft-servers
self.inputs.nvf.nixosModules.default
self.inputs.sops-nix.nixosModules.sops
(self.inputs.import-tree ../../modules/nixos)
];
nixpkgs.hostPlatform = "x86_64-linux";
sneeuwvlok = {
hardware.has = {
gpu.amd = true;
bluetooth = true;
audio = true;
};
boot = {
quiet = true;
animated = true;
};
desktop.use = "gamescope";
application = {
steam.enable = true;
};
editor = {
nano.enable = true;
};
};
system.stateVersion = "23.11";
}

View file

@ -1,4 +1,4 @@
{ config, lib, pkgs, modulesPath, system, ... }:
{ config, lib, pkgs, modulesPath, ... }:
let
inherit (lib.modules) mkDefault;
in
@ -13,6 +13,6 @@ in
extraModulePackages = [ ];
};
nixpkgs.hostPlatform = mkDefault system;
nixpkgs.hostPlatform = mkDefault pkgs.stdenv.hostPlatform.system;
hardware.cpu.intel.updateMicrocode = mkDefault config.hardware.enableRedistributableFirmware;
}

View file

@ -0,0 +1,286 @@
{
pkgs,
lib,
self,
...
}: {
_module.args = {
pkgs = lib.mkForce (import self.inputs.nixpkgs {
system = "x86_64-linux";
overlays = with self.inputs; [
fenix.overlays.default
nix-minecraft.overlay
flux.overlays.default
];
config = {
allowUnfree = true;
permittedInsecurePackages = [
# I think this is because of zen
"qtwebengine-5.15.19"
# For mautrix-signal, the matrix to signal bridge
"olm-3.2.16"
];
};
});
};
imports = [
./disks.nix
./hardware.nix
self.inputs.home-manager.nixosModules.home-manager
self.inputs.himmelblau.nixosModules.himmelblau
self.inputs.jovian.nixosModules.default
self.inputs.mydia.nixosModules.default
self.inputs.nix-minecraft.nixosModules.minecraft-servers
self.inputs.nvf.nixosModules.default
self.inputs.sops-nix.nixosModules.sops
(self.inputs.import-tree ../../modules/nixos)
];
system.stateVersion = "23.11";
networking = {
interfaces.enp2s0 = {
ipv6.addresses = [
{
address = "2a0d:6e00:1dc9:0::dead:beef";
prefixLength = 64;
}
];
useDHCP = true;
};
defaultGateway = {
address = "192.168.1.1";
interface = "enp2s0";
};
defaultGateway6 = {
address = "fe80::1";
interface = "enp2s0";
};
};
# sneeuwvlok = {
# services = {
# backup.borg.enable = true;
# authentication.zitadel = {
# enable = true;
# organization = {
# nix = {
# user = {
# chris = {
# email = "chris@kruining.eu";
# firstName = "Chris";
# lastName = "Kruining";
# roles = ["ORG_OWNER"];
# instanceRoles = ["IAM_OWNER"];
# };
# kaas = {
# email = "chris+kaas@kruining.eu";
# firstName = "Kaas";
# lastName = "Kruining";
# };
# };
# project = {
# ulmo = {
# projectRoleCheck = true;
# projectRoleAssertion = true;
# hasProjectCheck = true;
# role = {
# jellyfin = {
# group = "jellyfin";
# };
# jellyfin_admin = {
# group = "jellyfin";
# };
# };
# assign = {
# chris = ["jellyfin" "jellyfin_admin"];
# kaas = ["jellyfin"];
# };
# application = {
# jellyfin = {
# redirectUris = ["https://jellyfin.kruining.eu/sso/OID/redirect/zitadel"];
# grantTypes = ["authorizationCode"];
# responseTypes = ["code"];
# };
# forgejo = {
# redirectUris = ["https://git.amarth.cloud/user/oauth2/zitadel/callback"];
# grantTypes = ["authorizationCode"];
# responseTypes = ["code"];
# };
# vaultwarden = {
# redirectUris = ["https://vault.kruining.eu/identity/connect/oidc-signin"];
# grantTypes = ["authorizationCode"];
# responseTypes = ["code"];
# exportMap = {
# client_id = "SSO_CLIENT_ID";
# client_secret = "SSO_CLIENT_SECRET";
# };
# };
# matrix = {
# redirectUris = ["https://matrix.kruining.eu/_synapse/client/oidc/callback"];
# grantTypes = ["authorizationCode"];
# responseTypes = ["code"];
# };
# mydia = {
# redirectUris = ["http://localhost:2010/auth/oidc/callback"];
# grantTypes = ["authorizationCode"];
# responseTypes = ["code"];
# };
# grafana = {
# redirectUris = ["http://localhost:9001/login/generic_oauth"];
# grantTypes = ["authorizationCode"];
# responseTypes = ["code"];
# };
# };
# };
# convex = {
# projectRoleCheck = true;
# projectRoleAssertion = true;
# hasProjectCheck = true;
# application = {
# scry = {
# redirectUris = ["https://nautical-salamander-320.eu-west-1.convex.cloud/api/auth/callback/zitadel"];
# grantTypes = ["authorizationCode"];
# responseTypes = ["code"];
# };
# };
# };
# };
# action = {
# flattenRoles = {
# script = ''
# (ctx, api) => {
# if (ctx.v1.user.grants == undefined || ctx.v1.user.grants.count == 0) {
# return;
# }
# const roles = ctx.v1.user.grants.grants.flatMap(({ roles, projectId }) => roles.map(role => projectId + ':' + role));
# api.v1.claims.setClaim('nix:zitadel:custom', JSON.stringify({ roles }));
# };
# '';
# };
# };
# triggers = [
# {
# flowType = "customiseToken";
# triggerType = "preUserinfoCreation";
# actions = ["flattenRoles"];
# }
# {
# flowType = "customiseToken";
# triggerType = "preAccessTokenCreation";
# actions = ["flattenRoles"];
# }
# ];
# };
# };
# };
# communication.matrix.enable = true;
# development.forgejo.enable = true;
# networking.ssh.enable = true;
# networking.caddy.hosts = {
# # Expose amarht cloud stuff like this until I have a proper solution
# "auth.amarth.cloud" = ''
# reverse_proxy http://192.168.1.223:9092
# '';
# "amarth.cloud" = ''
# reverse_proxy http://192.168.1.223:8080
# '';
# };
# media.enable = true;
# media.glance.enable = true;
# media.mydia.enable = true;
# media.nfs.enable = true;
# media.jellyfin.enable = true;
# # media.servarr = {
# # radarr = {
# # enable = true;
# # port = 2001;
# # rootFolders = [
# # "/var/media/movies"
# # ];
# # };
# # sonarr = {
# # enable = true;
# # # debug = true;
# # port = 2002;
# # rootFolders = [
# # "/var/media/series"
# # ];
# # };
# # lidarr = {
# # enable = true;
# # debug = true;
# # port = 2003;
# # rootFolders = [
# # "/var/media/music"
# # ];
# # };
# # prowlarr = {
# # enable = true;
# # # debug = true;
# # port = 2004;
# # };
# # };
# observability = {
# grafana.enable = true;
# prometheus.enable = true;
# loki.enable = true;
# promtail.enable = true;
# # uptime-kuma.enable = true;
# };
# security.vaultwarden = {
# enable = true;
# database = {
# # type = "sqlite";
# # file = "/var/lib/vaultwarden/state.db";
# type = "postgresql";
# host = "localhost";
# port = 5432;
# sslMode = "disabled";
# };
# };
# };
# editor = {
# nano.enable = true;
# };
# };
}

View file

@ -0,0 +1,20 @@
{
config,
pkgs,
lib,
modulesPath,
...
}: let
inherit (lib.modules) mkDefault;
in {
boot = {
initrd.availableKernelModules = ["xhci_pci" "ahci" "nvme" "usbhid" "usb_storage" "sd_mod"];
initrd.kernelModules = [];
kernelModules = ["kvm-intel"];
kernelParams = [];
extraModulePackages = [];
};
nixpkgs.hostPlatform = "x86_64-linux";
hardware.cpu.intel.updateMicrocode = mkDefault config.hardware.enableRedistributableFirmware;
}

View file

@ -0,0 +1,3 @@
{ ... }: {
nixpkgs.hostPlatform = "x86_64-linux";
}

View file

@ -0,0 +1,3 @@
{ ... }: {
nixpkgs.hostPlatform = "x86_64-linux";
}

View file

@ -0,0 +1,19 @@
{
inputs,
config,
lib,
pkgs,
...
}: let
inherit (lib) mkIf mkEnableOption;
cfg = config.sneeuwvlok.application.bitwarden;
in {
options.sneeuwvlok.application.bitwarden = {
enable = mkEnableOption "enable bitwarden";
};
config = mkIf cfg.enable {
home.packages = with pkgs; [bitwarden-desktop];
};
}

View file

@ -1,15 +0,0 @@
{ inputs, config, lib, pkgs, namespace, ... }:
let
inherit (lib) mkIf mkEnableOption;
cfg = config.${namespace}.application.bitwarden;
in
{
options.${namespace}.application.bitwarden = {
enable = mkEnableOption "enable bitwarden";
};
config = mkIf cfg.enable {
home.packages = with pkgs; [ bitwarden-desktop ];
};
}

View file

@ -1,11 +1,15 @@
{ inputs, config, lib, pkgs, namespace, ... }:
let
{
inputs,
config,
lib,
pkgs,
...
}: let
inherit (lib) mkIf mkEnableOption;
cfg = config.${namespace}.application.chrome;
in
{
options.${namespace}.application.chrome = {
cfg = config.sneeuwvlok.application.chrome;
in {
options.sneeuwvlok.application.chrome = {
enable = mkEnableOption "enable chrome";
};

View file

@ -0,0 +1,19 @@
{
inputs,
config,
lib,
pkgs,
...
}: let
inherit (lib) mkIf mkEnableOption;
cfg = config.sneeuwvlok.application.discord;
in {
options.sneeuwvlok.application.discord = {
enable = mkEnableOption "enable discord (vesktop)";
};
config = mkIf cfg.enable {
home.packages = with pkgs; [vesktop];
};
}

View file

@ -1,15 +0,0 @@
{ inputs, config, lib, pkgs, namespace, ... }:
let
inherit (lib) mkIf mkEnableOption;
cfg = config.${namespace}.application.discord;
in
{
options.${namespace}.application.discord = {
enable = mkEnableOption "enable discord (vesktop)";
};
config = mkIf cfg.enable {
home.packages = with pkgs; [ vesktop ];
};
}

View file

@ -0,0 +1,19 @@
{
inputs,
config,
lib,
pkgs,
...
}: let
inherit (lib) mkIf mkEnableOption;
cfg = config.sneeuwvlok.application.ladybird;
in {
options.sneeuwvlok.application.ladybird = {
enable = mkEnableOption "enable ladybird";
};
config = mkIf cfg.enable {
home.packages = with pkgs; [ladybird];
};
}

View file

@ -1,15 +0,0 @@
{ inputs, config, lib, pkgs, namespace, ... }:
let
inherit (lib) mkIf mkEnableOption;
cfg = config.${namespace}.application.ladybird;
in
{
options.${namespace}.application.ladybird = {
enable = mkEnableOption "enable ladybird";
};
config = mkIf cfg.enable {
home.packages = with pkgs; [ ladybird ];
};
}

View file

@ -0,0 +1,23 @@
{
config,
lib,
pkgs,
osConfig ? {},
...
}: let
inherit (lib) mkIf mkEnableOption;
cfg = config.sneeuwvlok.application.matrix;
in {
options.sneeuwvlok.application.matrix = {
enable = mkEnableOption "enable Matrix client (Fractal)";
};
config = mkIf cfg.enable {
home.packages = with pkgs; [fractal element-desktop];
programs.element-desktop = {
enable = true;
};
};
}

View file

@ -1,19 +0,0 @@
{ config, lib, pkgs, namespace, osConfig ? {}, ... }:
let
inherit (lib) mkIf mkEnableOption;
cfg = config.${namespace}.application.matrix;
in
{
options.${namespace}.application.matrix = {
enable = mkEnableOption "enable Matrix client (Fractal)";
};
config = mkIf cfg.enable {
home.packages = with pkgs; [ fractal element-desktop ];
programs.element-desktop = {
enable = true;
};
};
}

View file

@ -1,11 +1,15 @@
{ config, lib, pkgs, namespace, osConfig ? {}, ... }:
let
{
config,
lib,
pkgs,
osConfig ? {},
...
}: let
inherit (lib) mkIf mkEnableOption;
cfg = config.${namespace}.application.obs;
in
{
options.${namespace}.application.obs = {
cfg = config.sneeuwvlok.application.obs;
in {
options.sneeuwvlok.application.obs = {
enable = mkEnableOption "enable obs";
};

View file

@ -3,14 +3,13 @@
config,
lib,
pkgs,
namespace,
...
}: let
inherit (lib) mkIf mkEnableOption;
cfg = config.${namespace}.application.onlyoffice;
cfg = config.sneeuwvlok.application.onlyoffice;
in {
options.${namespace}.application.onlyoffice = {
options.sneeuwvlok.application.onlyoffice = {
enable = mkEnableOption "enable onlyoffice";
};

View file

@ -0,0 +1,19 @@
{
inputs,
config,
lib,
pkgs,
...
}: let
inherit (lib) mkIf mkEnableOption;
cfg = config.sneeuwvlok.application.signal;
in {
options.sneeuwvlok.application.signal = {
enable = mkEnableOption "enable signal";
};
config = mkIf cfg.enable {
home.packages = with pkgs; [signal-desktop];
};
}

View file

@ -1,15 +0,0 @@
{ inputs, config, lib, pkgs, namespace, ... }:
let
inherit (lib) mkIf mkEnableOption;
cfg = config.${namespace}.application.signal;
in
{
options.${namespace}.application.signal = {
enable = mkEnableOption "enable signal";
};
config = mkIf cfg.enable {
home.packages = with pkgs; [ signal-desktop ];
};
}

View file

@ -1,11 +1,15 @@
{ inputs, config, lib, pkgs, namespace, ... }:
let
{
inputs,
config,
lib,
pkgs,
...
}: let
inherit (lib) mkIf mkEnableOption;
cfg = config.${namespace}.application.steam;
in
{
options.${namespace}.application.steam = {
cfg = config.sneeuwvlok.application.steam;
in {
options.sneeuwvlok.application.steam = {
enable = mkEnableOption "enable steam";
};

View file

@ -0,0 +1,18 @@
{
config,
lib,
self,
...
}: let
inherit (lib) mkIf mkEnableOption;
cfg = config.sneeuwvlok.application.studio;
in {
options.sneeuwvlok.application.studio = {
enable = mkEnableOption "enable Bricklink Studio";
};
config = mkIf cfg.enable {
home.packages = [self.packages.studio];
};
}

View file

@ -1,15 +0,0 @@
{ config, lib, pkgs, namespace, ... }:
let
inherit (lib) mkIf mkEnableOption;
cfg = config.${namespace}.application.studio;
in
{
options.${namespace}.application.studio = {
enable = mkEnableOption "enable Bricklink Studio";
};
config = mkIf cfg.enable {
home.packages = with pkgs.${namespace}; [ studio ];
};
}

View file

@ -1,11 +1,15 @@
{ inputs, config, lib, pkgs, namespace, ... }:
let
{
inputs,
config,
lib,
pkgs,
...
}: let
inherit (lib) mkIf mkEnableOption;
cfg = config.${namespace}.application.teamspeak;
in
{
options.${namespace}.application.teamspeak = {
cfg = config.sneeuwvlok.application.teamspeak;
in {
options.sneeuwvlok.application.teamspeak = {
enable = mkEnableOption "enable teamspeak";
};

View file

@ -1,11 +1,15 @@
{ inputs, config, lib, pkgs, namespace, ... }:
let
{
inputs,
config,
lib,
pkgs,
...
}: let
inherit (lib) mkIf mkEnableOption;
cfg = config.${namespace}.application.thunderbird;
in
{
options.${namespace}.application.thunderbird = {
cfg = config.sneeuwvlok.application.thunderbird;
in {
options.sneeuwvlok.application.thunderbird = {
enable = mkEnableOption "enable thunderbird";
};
@ -14,7 +18,7 @@ in
enable = true;
package = pkgs.thunderbird-latest;
profiles.${config.snowfallorg.user.name} = {
profiles.chris = {
isDefault = true;
};
};
@ -30,7 +34,7 @@ in
};
thunderbird = {
enable = true;
profiles = [ config.snowfallorg.user.name ];
profiles = ["chris"];
};
};

View file

@ -1,15 +1,14 @@
{ inputs, config, lib, pkgs, namespace, ... }:
let
{
config,
lib,
pkgs,
...
}: let
inherit (lib) mkIf mkEnableOption;
cfg = config.${namespace}.application.zen;
in
{
imports = [
inputs.zen-browser.homeModules.default
];
options.${namespace}.application.zen = {
cfg = config.sneeuwvlok.application.zen;
in {
options.sneeuwvlok.application.zen = {
enable = mkEnableOption "enable zen";
};
@ -54,8 +53,7 @@ in
install_url = "https://addons.mozilla.org/firefox/downloads/latest/${builtins.toString id}/latest.xpi";
installation_mode = "force_installed";
};
in
{
in {
ublock_origin = 4531307;
ghostry = 4562168;
bitwarden = 4562769;

View file

@ -1,11 +1,15 @@
{ pkgs, config, lib, namespace, ... }:
let
{
pkgs,
config,
lib,
...
}: let
inherit (lib) mkOption;
inherit (lib.types) enum;
cfg = config.${namespace}.defaults;
cfg = config.sneeuwvlok.defaults;
in {
options.${namespace}.defaults = {
options.sneeuwvlok.defaults = {
editor = mkOption {
type = enum ["nano" "nvim" "zed"];
default = "nano";

View file

@ -1,13 +1,15 @@
{ config, lib, namespace, osConfig ? {}, ... }:
let
{
config,
lib,
osConfig ? {},
...
}: let
inherit (lib) mkIf;
cfg = config.${namespace}.desktop.plasma;
osCfg = osConfig.${namespace}.desktop.plasma or { enable = false; };
in
{
options.${namespace}.desktop.plasma = {
cfg = config.sneeuwvlok.desktop.plasma;
osCfg = osConfig.sneeuwvlok.desktop.plasma or {enable = false;};
in {
options.sneeuwvlok.desktop.plasma = {
};
config = mkIf osCfg.enable {

View file

@ -0,0 +1,18 @@
{
config,
lib,
pkgs,
...
}: let
inherit (lib) mkEnableOption mkIf;
cfg = config.sneeuwvlok.development.dotnet;
in {
options.sneeuwvlok.development.dotnet = {
enable = mkEnableOption "Enable dotnet development tools";
};
config = mkIf cfg.enable {
home.packages = with pkgs; [dotnet-sdk_8];
};
}

View file

@ -1,15 +0,0 @@
{ config, lib, pkgs, namespace, ... }:
let
inherit (lib) mkEnableOption mkIf;
cfg = config.${namespace}.development.dotnet;
in
{
options.${namespace}.development.dotnet = {
enable = mkEnableOption "Enable dotnet development tools";
};
config = mkIf cfg.enable {
home.packages = with pkgs; [ dotnet-sdk_8 ];
};
}

View file

@ -0,0 +1,18 @@
{
config,
lib,
pkgs,
...
}: let
inherit (lib) mkEnableOption mkIf;
cfg = config.sneeuwvlok.development.javascript;
in {
options.sneeuwvlok.development.javascript = {
enable = mkEnableOption "Enable javascript development tools";
};
config = mkIf cfg.enable {
home.packages = with pkgs; [bun nodejs nodePackages_latest.typescript-language-server];
};
}

View file

@ -1,15 +0,0 @@
{ config, lib, pkgs, namespace, ... }:
let
inherit (lib) mkEnableOption mkIf;
cfg = config.${namespace}.development.javascript;
in
{
options.${namespace}.development.javascript = {
enable = mkEnableOption "Enable javascript development tools";
};
config = mkIf cfg.enable {
home.packages = with pkgs; [ bun nodejs nodePackages_latest.typescript-language-server ];
};
}

View file

@ -0,0 +1,18 @@
{
config,
lib,
pkgs,
...
}: let
inherit (lib) mkEnableOption mkIf;
cfg = config.sneeuwvlok.development.rust;
in {
options.sneeuwvlok.development.rust = {
enable = mkEnableOption "Enable rust development tools";
};
config =
mkIf cfg.enable {
};
}

View file

@ -1,15 +0,0 @@
{ config, lib, pkgs, namespace, ... }:
let
inherit (lib) mkEnableOption mkIf;
cfg = config.${namespace}.development.rust;
in
{
options.${namespace}.development.rust = {
enable = mkEnableOption "Enable rust development tools";
};
config = mkIf cfg.enable {
};
}

View file

@ -1,11 +1,15 @@
{ config, options, lib, pkgs, namespace, ... }:
let
{
config,
options,
lib,
pkgs,
...
}: let
inherit (lib) mkEnableOption mkIf;
cfg = config.${namespace}.editor.nano;
in
{
options.${namespace}.editor.nano = {
cfg = config.sneeuwvlok.editor.nano;
in {
options.sneeuwvlok.editor.nano = {
enable = mkEnableOption "nano";
};

Some files were not shown because too many files have changed in this diff Show more