574 lines
18 KiB
Nix
574 lines
18 KiB
Nix
{
|
||
pkgs,
|
||
lib,
|
||
config,
|
||
inputs,
|
||
...
|
||
}: let
|
||
sopsPath = key: config.sops.secrets.${key}.path;
|
||
|
||
scrapedExporters = lib.attrsets.mapAttrsToList (expName: conf: {
|
||
job_name = expName;
|
||
static_configs = lib.singleton {
|
||
targets = lib.singleton "localhost:${builtins.toString conf.port}";
|
||
};
|
||
});
|
||
|
||
mkVirtHost = certificateName:
|
||
lib.attrsets.recursiveUpdate {
|
||
addSSL = true;
|
||
listenAddresses = [vpnIPv4 "[${vpnIPv6}]"];
|
||
sslTrustedCertificate = pkgs.writeText "ca.crt" (builtins.readFile ../secrets/ca.crt);
|
||
sslCertificateKey = sopsPath "certificate-key-${certificateName}";
|
||
sslCertificate = pkgs.writeText "${certificateName}.crt" (builtins.readFile ../secrets/pub/${certificateName}.crt);
|
||
};
|
||
|
||
vpnInterface = config.services.tailscale.interfaceName;
|
||
vpnIPv4 = "100.108.135.4";
|
||
vpnIPv6 = "fd7a:115c:a1e0:ab12:4843:cd96:626c:8704";
|
||
in {
|
||
imports = [
|
||
inputs.nixos-hardware.nixosModules.common-cpu-intel #-cpu-only
|
||
../modules/nginx-reverse-proxy.nix
|
||
../hardware/asrock-z370-i3-black-box.nix
|
||
];
|
||
config = {
|
||
networking.hostName = "faunus-ater";
|
||
networking.hostId = "a4d7bec4";
|
||
networking.interfaces.eno1.useDHCP = true;
|
||
|
||
# === Make sure ZFS works ===
|
||
# TODO: Update and think of some automatic way of keeping this up to date.
|
||
boot.kernelPackages = pkgs.linuxPackages_5_15;
|
||
|
||
# === Can't handle this ===
|
||
systemd.enableEmergencyMode = false;
|
||
|
||
# === Settings ===
|
||
settings.ssh.openOutsideVPN = true;
|
||
settings.printing.enable = true;
|
||
|
||
# === ZFS services ===
|
||
services.zfs.trim.enable = true;
|
||
services.zfs.autoScrub.enable = true;
|
||
services.zfs.autoScrub.pools = ["rpool"];
|
||
|
||
# === Additional services ===
|
||
services.fwupd.enable = true;
|
||
powerManagement = {
|
||
enable = true;
|
||
powertop.enable = true;
|
||
cpuFreqGovernor = "powersave";
|
||
};
|
||
|
||
# === Git.home, because everything else sucks ===
|
||
services.gogsHome = {
|
||
enable = true;
|
||
passwordFile = sopsPath "gogs-database-password";
|
||
addr = {
|
||
v4 = vpnIPv4;
|
||
v6 = vpnIPv6;
|
||
};
|
||
stateDir = "/data/dirty/gogs";
|
||
};
|
||
|
||
# === Extend printing settings because sharing is caring ===
|
||
services.printing = {
|
||
listenAddresses = ["*:631"];
|
||
allowFrom = ["all" "@IF(${vpnInterface})"];
|
||
defaultShared = true;
|
||
browsing = true;
|
||
logLevel = "debug";
|
||
};
|
||
networking.firewall.interfaces.${vpnInterface} = {
|
||
allowedUDPPorts = [631];
|
||
allowedTCPPorts = [631 config.services.hydra.port];
|
||
};
|
||
hardware.printers = {
|
||
ensureDefaultPrinter = "Local";
|
||
ensurePrinters = lib.singleton {
|
||
description = "The fastest Boi in town!";
|
||
deviceUri = "usb://Samsung/ML-1640%20Series?serial=144QBAHS600499T.";
|
||
location = "@Home";
|
||
model = "samsung/ML-1640.ppd";
|
||
name = "Local";
|
||
ppdOptions = {
|
||
PageSize = "A4";
|
||
Resolution = "600dpi";
|
||
};
|
||
};
|
||
};
|
||
|
||
virtualisation.oci-containers.backend = "podman";
|
||
virtualisation.podman = {
|
||
enable = true;
|
||
dockerCompat = true;
|
||
extraPackages = with pkgs; [zfs];
|
||
};
|
||
# Override storage driver
|
||
virtualisation.containers.storage.settings = {
|
||
storage = {
|
||
driver = "zfs";
|
||
graphroot = "/var/lib/containers/storage";
|
||
runroot = "/run/containers/storage";
|
||
};
|
||
};
|
||
|
||
virtualisation.oci-containers.containers."timetagger" = {
|
||
image = "ghcr.io/almarklein/timetagger:v23.2.1";
|
||
ports = ["5873:5873"];
|
||
environment = {
|
||
TIMETAGGER_BIND = "0.0.0.0:5873";
|
||
TIMETAGGER_DATADIR = "/root/_timetagger";
|
||
TIMETAGGER_LOG_LEVEL = "info";
|
||
TIMETAGGER_CREDENTIALS = "malte:$2a$08$P.e3SD0cnPK0P4mFYShELuoa37.1e1dEqE8MWa6LJ/kSJfje1BdBi,marie:$2a$08$ubOZWO510y5bgwIl0O4Ne.dKZdWoHqEMzvs56L6esqvLfBJ/6OgYm";
|
||
};
|
||
volumes = [
|
||
"/data/dirty/timetagger:/root/_timetagger"
|
||
];
|
||
};
|
||
services.nginx.virtualHosts."time.home" = mkVirtHost "time-home" {
|
||
locations."/" = {
|
||
proxyPass = "http://127.0.0.1:5873";
|
||
proxyWebsockets = true;
|
||
};
|
||
};
|
||
|
||
services.nginx.virtualHosts."todo.home" = mkVirtHost "todo-home" {
|
||
locations."/" = {
|
||
proxyPass = "http://127.0.0.1:7372";
|
||
proxyWebsockets = true;
|
||
};
|
||
};
|
||
|
||
services.nginx.virtualHosts."support.home" = mkVirtHost "support-home" {
|
||
locations."/" = {
|
||
proxyPass = "http://127.0.0.1:9999";
|
||
proxyWebsockets = true;
|
||
};
|
||
};
|
||
|
||
services.nginx.virtualHosts."config.home" = mkVirtHost "config-home" {
|
||
locations."/" = {
|
||
proxyPass = "http://127.0.0.1:8123";
|
||
proxyWebsockets = true;
|
||
};
|
||
};
|
||
virtualisation.oci-containers.containers.home-assistant = {
|
||
volumes = ["/data/dirty/home-assistant:/config"];
|
||
environment.TZ = "Europe/Berlin";
|
||
image = "ghcr.io/home-assistant/home-assistant:2023.9";
|
||
ports = [
|
||
"8123:8123"
|
||
"1400:1400/tcp"
|
||
];
|
||
extraOptions = [
|
||
# TODO: Fix the path of the zigbee controller using udev
|
||
"--device=/dev/serial/by-id/usb-Silicon_Labs_Sonoff_Zigbee_3.0_USB_Dongle_Plus_0001-if00-port0"
|
||
"--device=/dev/ttyUSB0"
|
||
"--cap-add=CAP_NET_RAW,CAP_NET_BIND_SERVICE"
|
||
];
|
||
};
|
||
# For SONOS
|
||
networking.firewall.allowedTCPPorts = [1400];
|
||
|
||
# === Dim ===
|
||
# virtualisation.oci-containers.containers."dim" = {
|
||
# environment = {};
|
||
# image = "ghcr.io/dusk-labs/dim:dev";
|
||
# ports = lib.singleton "7999:8000";
|
||
# volumes = [
|
||
# # TODO: https://github.com/Dusk-Labs/dim/blob/master/docker-compose-template.yml
|
||
# "/srv/media.deletemesoon:/media:ro"
|
||
# ];
|
||
# #user = "${config.users.users.dim.name}:${config.users.groups.dim.name}";
|
||
# };
|
||
|
||
# === SheetAble ===
|
||
# virtualisation.oci-containers.containers."sheetable" = {
|
||
# environment = {
|
||
# CONFIG_PATH = "/app/config/";
|
||
# };
|
||
# image = "vallezw/sheetable";
|
||
# ports = lib.singleton "7998:8080";
|
||
# volumes = [
|
||
# # TODO: https://sheetable.net/docs/Installation/installation-docker
|
||
# ];
|
||
# };
|
||
|
||
# === Seafile ===
|
||
# services.seafile = {
|
||
# enable = true;
|
||
# adminEmail = "malte.tammena@pm.me";
|
||
# initialAdminPassword = "test";
|
||
# seafileSettings = {
|
||
# fileserver.host = "::1";
|
||
# };
|
||
# ccnetSettings.General.SERVICE_URL = "http://file.home";
|
||
# };
|
||
|
||
# === HYDRA & Friends. ===
|
||
services.hydra = {
|
||
enable = true;
|
||
package = pkgs.hydra;
|
||
notificationSender = "hydra@home";
|
||
hydraURL = "http://faunus-ater:${builtins.toString config.services.hydra.port}";
|
||
minimumDiskFree = 10;
|
||
useSubstitutes = true;
|
||
};
|
||
services.nix-serve = {
|
||
enable = true;
|
||
secretKeyFile = sopsPath "nix-store-signing-key";
|
||
# FIXME: Remove once fixed upstream
|
||
package = pkgs.nix-serve.override {
|
||
nix = pkgs.nixVersions.nix_2_12;
|
||
};
|
||
};
|
||
# Build on other machines aswell if possible
|
||
nix.buildMachines = [
|
||
{
|
||
hostName = "localhost";
|
||
maxJobs = 4;
|
||
speedFactor = 1;
|
||
sshKey = sopsPath "hydra-overseer-key";
|
||
sshUser = "hydra-minion";
|
||
systems = ["x86_64-linux" "i686-linux"];
|
||
}
|
||
{
|
||
hostName = "helix-texta";
|
||
maxJobs = 4;
|
||
speedFactor = 2;
|
||
sshKey = sopsPath "hydra-overseer-key";
|
||
sshUser = "hydra-minion";
|
||
supportedFeatures = ["kvm" "big-parallel"];
|
||
systems = ["x86_64-linux" "i686-linux"];
|
||
}
|
||
{
|
||
hostName = "murex-pecten";
|
||
maxJobs = 4;
|
||
speedFactor = 4;
|
||
sshKey = sopsPath "hydra-overseer-key";
|
||
sshUser = "hydra-minion";
|
||
supportedFeatures = ["kvm" "big-parallel"];
|
||
systems = ["x86_64-linux" "i686-linux"];
|
||
}
|
||
];
|
||
# TODO: This doesn't seem to work
|
||
programs.ssh.extraConfig = ''
|
||
Host *
|
||
StrictHostKeyChecking accept-new
|
||
'';
|
||
nix.extraOptions = ''
|
||
allowed-uris = http:// https://
|
||
'';
|
||
systemd.services."hydra-initial-setup" = {
|
||
description = "Setup hydra admin password once";
|
||
serviceConfig = {
|
||
Type = "oneshot";
|
||
RemainAfterExit = true;
|
||
LoadCredential = "USER_PW:${sopsPath "hydra-admin-password"}";
|
||
};
|
||
wantedBy = lib.singleton "multi-user.target";
|
||
requires = lib.singleton "hydra-init.service";
|
||
after = lib.singleton "hydra-init.service";
|
||
environment = {
|
||
inherit (config.systemd.services.hydra-init.environment) HYDRA_DBI;
|
||
};
|
||
script = let
|
||
hydra-create-user = "${pkgs.hydra}/bin/hydra-create-user";
|
||
in ''
|
||
if [ ! -e ~hydra/.setup-is-complete ]; then
|
||
# create admin user
|
||
${hydra-create-user} admin --full-name 'Admin Mc. Admining' --email-address 'admin@faunus-ater' --password "$USER_PW" --role admin || exit 1
|
||
# done
|
||
touch ~hydra/.setup-is-complete
|
||
fi
|
||
'';
|
||
};
|
||
services.nginx.virtualHosts = {
|
||
"hydra.home" = mkVirtHost "hydra-home" {
|
||
locations."/" = {
|
||
proxyPass = "http://localhost:${builtins.toString config.services.hydra.port}";
|
||
};
|
||
};
|
||
"cache.home" = mkVirtHost "cache-home" {
|
||
locations."/" = {
|
||
proxyPass = "http://localhost:${builtins.toString config.services.nix-serve.port}";
|
||
};
|
||
};
|
||
};
|
||
|
||
# === PAPERLESS service, save me! ===
|
||
services.paperless = {
|
||
enable = true;
|
||
address = "[::1]";
|
||
passwordFile = sopsPath "paperless-admin-password";
|
||
dataDir = "/data/dirty/paperless";
|
||
extraConfig = {
|
||
PAPERLESS_OCR_LANGUAGE = "deu";
|
||
PAPERLESS_CONSUMER_RECURSIVE = true;
|
||
PAPERLESS_CONSUMER_SUBDIRS_AS_TAGS = true;
|
||
PAPERLESS_URL = "https://doc.home";
|
||
};
|
||
};
|
||
services.nginx.virtualHosts."doc.home" = mkVirtHost "doc-home" {
|
||
locations."/" = {
|
||
proxyPass = "http://[::1]:${builtins.toString config.services.paperless.port}";
|
||
proxyWebsockets = true;
|
||
};
|
||
};
|
||
|
||
# === Komga, for my reading needs ===
|
||
services.komga = {
|
||
enable = true;
|
||
stateDir = "/data/dirty/komga";
|
||
};
|
||
services.nginx.virtualHosts."read.home" = mkVirtHost "read-home" {
|
||
locations."/" = {
|
||
proxyPass = "http://[::1]:${builtins.toString config.services.komga.port}";
|
||
proxyWebsockets = true;
|
||
};
|
||
};
|
||
|
||
# === Trilium ===
|
||
services.trilium-server = {
|
||
enable = true;
|
||
port = 10302;
|
||
dataDir = "/data/dirty/trilium";
|
||
};
|
||
services.nginx.virtualHosts."note.home" = mkVirtHost "note-home" {
|
||
locations."/" = {
|
||
proxyPass = "http://${config.services.trilium-server.host}:${builtins.toString config.services.trilium-server.port}";
|
||
proxyWebsockets = true;
|
||
};
|
||
};
|
||
|
||
# === Photoprism ===
|
||
services.photoprism = {
|
||
enable = true;
|
||
port = 2342;
|
||
storagePath = "/data/dirty/photoprism/storage";
|
||
originalsPath = "/data/dirty/photoprism/originals";
|
||
importPath = "/data/dirty/photoprism/import";
|
||
passwordFile = sopsPath "photoprism-admin-password";
|
||
settings = {
|
||
PHOTOPRISM_SESSION_MAXAGE = "31536000";
|
||
PHOTOPRISM_SESSION_TIMEOUT = "31536000";
|
||
PHOTOPRISM_UPLOAD_NSFW = "true";
|
||
PHOTOPRISM_DETECT_NSFW = "true";
|
||
PHOTOPRISM_SITE_URL = "https://foto.home";
|
||
PHOTOPRISM_SITE_TITLE = "PhotoPrism";
|
||
PHOTOPRISM_SITE_CAPTION = "All the pictures!";
|
||
PHOTOPRISM_SITE_DESCRIPTION = "";
|
||
PHOTOPRISM_SITE_AUTHOR = "";
|
||
};
|
||
};
|
||
# TODO: Why does it not work without these? :/
|
||
systemd.services.photoprism.serviceConfig.User = lib.mkForce null;
|
||
systemd.services.photoprism.serviceConfig.Group = lib.mkForce null;
|
||
systemd.services.photoprism.serviceConfig.DynamicUser = lib.mkForce false;
|
||
systemd.services.photoprism.serviceConfig.SystemCallFilter = lib.mkForce [];
|
||
services.nginx.virtualHosts."foto.home" = mkVirtHost "foto-home" {
|
||
locations."/" = {
|
||
proxyPass = "http://localhost:${builtins.toString config.services.photoprism.port}";
|
||
proxyWebsockets = true;
|
||
};
|
||
extraConfig = ''
|
||
client_max_body_size 500M;
|
||
'';
|
||
};
|
||
|
||
# === Restic User Backup ===
|
||
services.resticConfigured = {
|
||
enable = true;
|
||
rootDir = "/data/dirty/restic";
|
||
openFirewall = true;
|
||
};
|
||
|
||
# === Grafana ===
|
||
services.grafanaHome = {
|
||
enable = true;
|
||
nginx.listenAddresses = [vpnIPv4 "[${vpnIPv6}]"];
|
||
nginx.sslCertificate = sopsPath "nginx-cert-crt";
|
||
nginx.sslCertificateKey = sopsPath "nginx-cert-key";
|
||
grafana.adminPasswordFile = sopsPath "grafana-admin-password";
|
||
};
|
||
|
||
# === Prometheus ===
|
||
services.prometheus = {
|
||
enable = true;
|
||
enableReload = true;
|
||
exporters = {
|
||
fritzbox = {
|
||
enable = true;
|
||
gatewayAddress = "spof";
|
||
};
|
||
node = {
|
||
enable = true;
|
||
enabledCollectors = ["systemd"];
|
||
disabledCollectors = ["diskstats"];
|
||
};
|
||
};
|
||
scrapeConfigs = scrapedExporters {inherit (config.services.prometheus.exporters) fritzbox node;};
|
||
};
|
||
systemd.services."prometheus-fritzbox-exporter".serviceConfig.EnvironmentFile = sopsPath "fritzbox-exporter-env";
|
||
# TODO: Yikes
|
||
systemd.services."prometheus-fritzbox-exporter".serviceConfig.ExecStart = let
|
||
cfg = config.services.prometheus.exporters.fritzbox;
|
||
in
|
||
lib.mkForce ''
|
||
${pkgs.prometheus-fritzbox-exporter}/bin/fritzbox_exporter \
|
||
-listen-address ${cfg.listenAddress}:${toString cfg.port} \
|
||
-gateway-url http://${cfg.gatewayAddress}:${toString cfg.gatewayPort} \
|
||
-gateway-luaurl http://${cfg.gatewayAddress} \
|
||
-metrics-file ${pkgs.prometheus-fritzbox-exporter}/share/metrics.json \
|
||
-lua-metrics-file ${pkgs.prometheus-fritzbox-exporter}/share/metrics-lua_cable.json
|
||
'';
|
||
|
||
# services.nginx.virtualHosts."media.home" = {
|
||
# locations."/" = {
|
||
# proxyPass = "http://127.0.0.1:7999";
|
||
# proxyWebsockets = true;
|
||
# };
|
||
# };
|
||
|
||
# services.nginx.virtualHosts."file.home" = {
|
||
# locations."/" = {
|
||
# proxyPass = "http://[::1]:${builtins.toString config.services.seafile.seafileSettings.fileserver.port}";
|
||
# proxyWebsockets = true;
|
||
# };
|
||
# };
|
||
# networking.firewall.allowedTCPPorts = [config.services.seafile.seafileSettings.fileserver.port];
|
||
|
||
# === Print Service ===
|
||
systemd.paths."print-all-files" = {
|
||
requires = ["printer.target"];
|
||
after = ["printer.target"];
|
||
wantedBy = ["default.target"];
|
||
pathConfig = {
|
||
DirectoryNotEmpty = "/srv/to-be-printed";
|
||
MakeDirectory = true;
|
||
DirectoryMode = "777";
|
||
Unit = "print-all-files.service";
|
||
};
|
||
};
|
||
systemd.services."print-all-files" = let
|
||
printAndDeleteFile = pkgs.writeShellApplication {
|
||
name = "print-and-delete-file";
|
||
runtimeInputs = [
|
||
pkgs.coreutils
|
||
pkgs.cups
|
||
];
|
||
text = ''
|
||
echo Printing "$1"
|
||
lp -- "$1"
|
||
rm "$1"
|
||
'';
|
||
};
|
||
script = pkgs.writeShellApplication {
|
||
name = "print-all-files-script";
|
||
runtimeInputs = [
|
||
pkgs.coreutils
|
||
printAndDeleteFile
|
||
];
|
||
text = ''
|
||
find . -type f -exec print-and-delete-file "{}" \;
|
||
'';
|
||
};
|
||
in {
|
||
requires = ["printer.target"];
|
||
after = ["printer.target"];
|
||
serviceConfig = {
|
||
WorkingDirectory = "/srv/to-be-printed";
|
||
ExecStart = "${script}/bin/print-all-files-script";
|
||
# Wait 15 seconds before restart to let the file load, if not present yet
|
||
RestartSec = "15";
|
||
};
|
||
};
|
||
users.users.sftp = {
|
||
description = "User used for all sftp stuff";
|
||
isNormalUser = true;
|
||
group = "sftp";
|
||
openssh.authorizedKeys.keyFiles = [
|
||
../secrets/users/malte/sftp-key.pub
|
||
../secrets/users/marie/sftp-key.pub
|
||
];
|
||
};
|
||
users.groups.sftp = {};
|
||
|
||
hardware.cpu.intel.updateMicrocode = lib.mkDefault config.hardware.enableRedistributableFirmware;
|
||
|
||
# === BACKUPS ===
|
||
services.restic.backups = {
|
||
# Make sure my 'active IO' disk get's saved once a day
|
||
zdirty = {
|
||
initialize = true;
|
||
repository = "/data/archive/dirty.bak";
|
||
timerConfig.OnCalendar = "daily";
|
||
paths = lib.singleton "/data/dirty";
|
||
pruneOpts = [
|
||
"--keep-daily 1"
|
||
"--keep-weekly 1"
|
||
"--keep-monthly 1"
|
||
"--keep-yearly 5"
|
||
];
|
||
passwordFile = sopsPath "internal-restic-password";
|
||
};
|
||
};
|
||
|
||
# === RUNTIME SECRETS ===
|
||
sops.defaultSopsFile = ../secrets/hosts/faunus-ater/secrets.yaml;
|
||
sops.age.sshKeyPaths = ["/etc/ssh/ssh_host_ed25519_key"];
|
||
sops.secrets = let
|
||
nginxSecret = {
|
||
owner = config.users.users.nginx.name;
|
||
mode = "0400";
|
||
};
|
||
in {
|
||
"certificate-key-config-home" = nginxSecret;
|
||
"certificate-key-todo-home" = nginxSecret;
|
||
"certificate-key-time-home" = nginxSecret;
|
||
"certificate-key-support-home" = nginxSecret;
|
||
"certificate-key-hydra-home" = nginxSecret;
|
||
"certificate-key-cache-home" = nginxSecret;
|
||
"certificate-key-doc-home" = nginxSecret;
|
||
"certificate-key-read-home" = nginxSecret;
|
||
"certificate-key-note-home" = nginxSecret;
|
||
"certificate-key-foto-home" = nginxSecret;
|
||
"certificate-key-listen-home" = nginxSecret;
|
||
"certificate-key-git-home" = nginxSecret;
|
||
"paperless-admin-password" = {};
|
||
"photoprism-admin-password" = {};
|
||
"grafana-admin-password" = {
|
||
owner = config.users.users.grafana.name;
|
||
mode = "0400";
|
||
};
|
||
"nginx-cert-key" = nginxSecret;
|
||
"nginx-cert-crt" = nginxSecret;
|
||
"fritzbox-exporter-env" = {};
|
||
"internal-restic-password" = {};
|
||
"nix-store-signing-key" = {};
|
||
"hydra-admin-password" = {
|
||
owner = config.users.users.hydra.name;
|
||
mode = "0400";
|
||
};
|
||
"hydra-overseer-key" = {
|
||
owner = config.users.users.hydra.name;
|
||
mode = "0440";
|
||
};
|
||
"gogs-database-password" = {
|
||
owner = config.users.users.gogs.name;
|
||
mode = "0400";
|
||
};
|
||
};
|
||
|
||
# This value determines the NixOS release from which the default
|
||
# settings for stateful data, like file locations and database versions
|
||
# on your system were taken. It‘s perfectly fine and recommended to leave
|
||
# this value at the release version of the first install of this system.
|
||
# Before changing this value read the documentation for this option
|
||
# (e.g. man configuration.nix or on https://nixos.org/nixos/options.html).
|
||
system.stateVersion = "22.05"; # Did you read the comment?
|
||
};
|
||
}
|