nixos: create 'modules/nixos' folder
Let's consolidate all modules under one path, so that NixOS, home-manager, and nix-darwin (if I ever end up using it down the line) would go under the same folder.
This commit is contained in:
parent
b52e56ed08
commit
c856933803
74 changed files with 1 additions and 1 deletions
72
modules/nixos/services/adblock/default.nix
Normal file
72
modules/nixos/services/adblock/default.nix
Normal file
|
|
@ -0,0 +1,72 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
let
|
||||
wgCfg = config.my.services.wireguard;
|
||||
cfg = config.my.services.adblock;
|
||||
in
|
||||
{
|
||||
options.my.services.adblock = with lib; {
|
||||
enable = mkEnableOption "Hosts-based adblock using unbound";
|
||||
|
||||
forwardAddresses = mkOption {
|
||||
type = with types; listOf str;
|
||||
default = [
|
||||
"1.0.0.1@853#cloudflare-dns.com"
|
||||
"1.1.1.1@853#cloudflare-dns.com"
|
||||
];
|
||||
example = [
|
||||
"8.8.4.4"
|
||||
"8.8.8.8"
|
||||
];
|
||||
description = "Which DNS servers to forward queries to";
|
||||
};
|
||||
|
||||
interfaces = mkOption {
|
||||
type = with types; listOf str;
|
||||
default = [
|
||||
"0.0.0.0"
|
||||
"::"
|
||||
];
|
||||
example = literalExample ''
|
||||
[
|
||||
"127.0.0.1"
|
||||
]
|
||||
'';
|
||||
description = "Which addresses to listen on";
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
# Allow wireguard clients to connect to it
|
||||
networking.firewall.interfaces."${wgCfg.iface}" = {
|
||||
allowedUDPPorts = [ 53 ];
|
||||
allowedTCPPorts = [ 53 ];
|
||||
};
|
||||
|
||||
services.unbound = {
|
||||
enable = true;
|
||||
|
||||
settings = {
|
||||
server = {
|
||||
access-control = [
|
||||
"127.0.0.0/24 allow"
|
||||
"${wgCfg.net.v4.subnet}.0/${toString wgCfg.net.v4.mask} allow"
|
||||
"${wgCfg.net.v6.subnet}::0/${toString wgCfg.net.v6.mask} allow"
|
||||
];
|
||||
|
||||
interface = cfg.interfaces;
|
||||
|
||||
so-reuseport = true;
|
||||
tls-cert-bundle = "/etc/ssl/certs/ca-certificates.crt";
|
||||
tls-upstream = true;
|
||||
|
||||
include = "${pkgs.ambroisie.unbound-zones-adblock}/hosts";
|
||||
};
|
||||
|
||||
forward-zone = [{
|
||||
name = ".";
|
||||
forward-addr = cfg.forwardAddresses;
|
||||
}];
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
106
modules/nixos/services/backup/default.nix
Normal file
106
modules/nixos/services/backup/default.nix
Normal file
|
|
@ -0,0 +1,106 @@
|
|||
# Backups using Backblaze B2 and `restic`
|
||||
{ config, pkgs, lib, ... }:
|
||||
let
|
||||
cfg = config.my.services.backup;
|
||||
|
||||
excludeArg = with builtins; with pkgs; "--exclude-file=" +
|
||||
(writeText "excludes.txt" (concatStringsSep "\n" cfg.exclude));
|
||||
in
|
||||
{
|
||||
options.my.services.backup = with lib; {
|
||||
enable = mkEnableOption "Enable backups for this host";
|
||||
|
||||
repository = mkOption {
|
||||
type = types.str;
|
||||
example = "/mnt/backup-hdd";
|
||||
description = "The repository to back up to";
|
||||
};
|
||||
|
||||
passwordFile = mkOption {
|
||||
type = types.str;
|
||||
example = "/var/lib/restic/password.txt";
|
||||
description = "Read the repository's password from this path";
|
||||
};
|
||||
|
||||
credentialsFile = mkOption {
|
||||
type = types.str;
|
||||
example = "/var/lib/restic/creds.env";
|
||||
description = ''
|
||||
Credential file as an 'EnvironmentFile' (see `systemd.exec(5)`)
|
||||
'';
|
||||
};
|
||||
|
||||
paths = mkOption {
|
||||
type = with types; listOf str;
|
||||
default = [ ];
|
||||
example = [
|
||||
"/var/lib"
|
||||
"/home"
|
||||
];
|
||||
description = "Paths to backup";
|
||||
};
|
||||
|
||||
exclude = mkOption {
|
||||
type = with types; listOf str;
|
||||
default = [ ];
|
||||
example = [
|
||||
# very large paths
|
||||
"/var/lib/docker"
|
||||
"/var/lib/systemd"
|
||||
"/var/lib/libvirt"
|
||||
|
||||
# temporary files created by `cargo` and `go build`
|
||||
"**/target"
|
||||
"/home/*/go/bin"
|
||||
"/home/*/go/pkg"
|
||||
];
|
||||
description = "Paths to exclude from backup";
|
||||
};
|
||||
|
||||
pruneOpts = mkOption {
|
||||
type = with types; listOf str;
|
||||
default = [
|
||||
"--keep-last 10"
|
||||
"--keep-hourly 24"
|
||||
"--keep-daily 7"
|
||||
"--keep-weekly 5"
|
||||
"--keep-monthly 12"
|
||||
"--keep-yearly 100"
|
||||
];
|
||||
example = [ "--keep-last 5" "--keep-weekly 2" ];
|
||||
description = ''
|
||||
List of options to give to the `forget` subcommand after a backup.
|
||||
'';
|
||||
};
|
||||
|
||||
timerConfig = mkOption {
|
||||
# NOTE: I do not know how to cleanly set the type
|
||||
default = {
|
||||
OnCalendar = "daily";
|
||||
};
|
||||
example = {
|
||||
OnCalendar = "00:05";
|
||||
RandomizedDelaySec = "5h";
|
||||
};
|
||||
description = ''
|
||||
When to run the backup. See man systemd.timer for details.
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
services.restic.backups.backblaze = {
|
||||
# Take care of included and excluded files
|
||||
paths = cfg.paths;
|
||||
extraBackupArgs = [ "--verbose=2" ]
|
||||
++ lib.optional (builtins.length cfg.exclude != 0) excludeArg
|
||||
;
|
||||
# Take care of creating the repository if it doesn't exist
|
||||
initialize = true;
|
||||
# give B2 API key securely
|
||||
environmentFile = cfg.credentialsFile;
|
||||
|
||||
inherit (cfg) passwordFile pruneOpts timerConfig repository;
|
||||
};
|
||||
};
|
||||
}
|
||||
46
modules/nixos/services/blog/default.nix
Normal file
46
modules/nixos/services/blog/default.nix
Normal file
|
|
@ -0,0 +1,46 @@
|
|||
# My blog setup
|
||||
{ config, lib, ... }:
|
||||
let
|
||||
cfg = config.my.services.blog;
|
||||
domain = config.networking.domain;
|
||||
|
||||
makeHostInfo = subdomain: {
|
||||
inherit subdomain;
|
||||
root = "/var/www/${subdomain}";
|
||||
};
|
||||
|
||||
hostsInfo = map makeHostInfo [ "cv" "dev" "key" ];
|
||||
in
|
||||
{
|
||||
options.my.services.blog = {
|
||||
enable = lib.mkEnableOption "Blog hosting";
|
||||
};
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
services.nginx.virtualHosts = {
|
||||
# This is not a subdomain, cannot use my nginx wrapper module
|
||||
${domain} = {
|
||||
forceSSL = true;
|
||||
useACMEHost = domain;
|
||||
root = "/var/www/blog";
|
||||
|
||||
# http://www.gnuterrypratchett.com/
|
||||
extraConfig = ''
|
||||
add_header X-Clacks-Overhead "GNU Terry Pratchett";
|
||||
'';
|
||||
};
|
||||
|
||||
# Dummy vhost to redirect all unknown (sub-)domains to my blog
|
||||
"_" = {
|
||||
forceSSL = true;
|
||||
useACMEHost = domain;
|
||||
default = true;
|
||||
|
||||
locations."/".return = "302 https://belanyi.fr$request_uri";
|
||||
};
|
||||
};
|
||||
|
||||
# Those are all subdomains, no problem
|
||||
my.services.nginx.virtualHosts = hostsInfo;
|
||||
};
|
||||
}
|
||||
73
modules/nixos/services/calibre-web/default.nix
Normal file
73
modules/nixos/services/calibre-web/default.nix
Normal file
|
|
@ -0,0 +1,73 @@
|
|||
{ config, lib, ... }:
|
||||
let
|
||||
cfg = config.my.services.calibre-web;
|
||||
in
|
||||
{
|
||||
options.my.services.calibre-web = with lib; {
|
||||
enable = mkEnableOption "Calibre-web server";
|
||||
|
||||
port = mkOption {
|
||||
type = types.port;
|
||||
default = 8083;
|
||||
example = 8080;
|
||||
description = "Internal port for webui";
|
||||
};
|
||||
|
||||
libraryPath = mkOption {
|
||||
type = with types; either path str;
|
||||
example = /data/media/library;
|
||||
description = "Path to the Calibre library to use";
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
services.calibre-web = {
|
||||
enable = true;
|
||||
|
||||
listen = {
|
||||
ip = "127.0.0.1";
|
||||
port = cfg.port;
|
||||
};
|
||||
|
||||
group = "media";
|
||||
|
||||
options = {
|
||||
calibreLibrary = cfg.libraryPath;
|
||||
enableBookConversion = true;
|
||||
};
|
||||
};
|
||||
|
||||
# Set-up media group
|
||||
users.groups.media = { };
|
||||
|
||||
my.services.nginx.virtualHosts = [
|
||||
{
|
||||
subdomain = "library";
|
||||
inherit (cfg) port;
|
||||
}
|
||||
];
|
||||
|
||||
my.services.backup = {
|
||||
paths = [
|
||||
"/var/lib/${config.services.calibre-web.dataDir}" # For `app.db` and `gdrive.db`
|
||||
cfg.libraryPath
|
||||
];
|
||||
};
|
||||
|
||||
services.fail2ban.jails = {
|
||||
calibre-web = ''
|
||||
enabled = true
|
||||
filter = calibre-web
|
||||
port = http,https
|
||||
'';
|
||||
};
|
||||
|
||||
environment.etc = {
|
||||
"fail2ban/filter.d/calibre-web.conf".text = ''
|
||||
[Definition]
|
||||
failregex = ^.*Login failed for user ".*" IP-address: <HOST>$
|
||||
journalmatch = _SYSTEMD_UNIT=calibre-web.service
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
||||
40
modules/nixos/services/default.nix
Normal file
40
modules/nixos/services/default.nix
Normal file
|
|
@ -0,0 +1,40 @@
|
|||
{ ... }:
|
||||
|
||||
{
|
||||
imports = [
|
||||
./adblock
|
||||
./backup
|
||||
./blog
|
||||
./calibre-web
|
||||
./drone
|
||||
./fail2ban
|
||||
./flood
|
||||
./gitea
|
||||
./grocy
|
||||
./indexers
|
||||
./jellyfin
|
||||
./lohr
|
||||
./matrix
|
||||
./miniflux
|
||||
./monitoring
|
||||
./navidrome
|
||||
./nextcloud
|
||||
./nginx
|
||||
./nix-cache
|
||||
./paperless
|
||||
./pirate
|
||||
./podgrab
|
||||
./postgresql
|
||||
./postgresql-backup
|
||||
./quassel
|
||||
./rss-bridge
|
||||
./sabnzbd
|
||||
./ssh-server
|
||||
./tandoor-recipes
|
||||
./tlp
|
||||
./transmission
|
||||
./vikunja
|
||||
./wireguard
|
||||
./woodpecker
|
||||
];
|
||||
}
|
||||
44
modules/nixos/services/drone/default.nix
Normal file
44
modules/nixos/services/drone/default.nix
Normal file
|
|
@ -0,0 +1,44 @@
|
|||
# A docker-based CI/CD system
|
||||
#
|
||||
# Inspired by [1]
|
||||
# [1]: https://github.com/Mic92/dotfiles/blob/master/nixos/eve/modules/drone.nix
|
||||
{ lib, ... }:
|
||||
{
|
||||
imports = [
|
||||
./runner-docker
|
||||
./runner-exec
|
||||
./server
|
||||
];
|
||||
|
||||
options.my.services.drone = with lib; {
|
||||
enable = mkEnableOption "Drone CI";
|
||||
runners = mkOption {
|
||||
type = with types; listOf (enum [ "exec" "docker" ]);
|
||||
default = [ ];
|
||||
example = [ "exec" "docker" ];
|
||||
description = "Types of runners to enable";
|
||||
};
|
||||
admin = mkOption {
|
||||
type = types.str;
|
||||
default = "ambroisie";
|
||||
example = "admin";
|
||||
description = "Name of the admin user";
|
||||
};
|
||||
port = mkOption {
|
||||
type = types.port;
|
||||
default = 3030;
|
||||
example = 8080;
|
||||
description = "Internal port of the Drone UI";
|
||||
};
|
||||
secretFile = mkOption {
|
||||
type = types.str;
|
||||
example = "/run/secrets/drone-gitea.env";
|
||||
description = "Secrets to inject into Drone server";
|
||||
};
|
||||
sharedSecretFile = mkOption {
|
||||
type = types.str;
|
||||
example = "/run/secrets/drone-rpc.env";
|
||||
description = "Shared RPC secret to inject into server and runners";
|
||||
};
|
||||
};
|
||||
}
|
||||
43
modules/nixos/services/drone/runner-docker/default.nix
Normal file
43
modules/nixos/services/drone/runner-docker/default.nix
Normal file
|
|
@ -0,0 +1,43 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
let
|
||||
cfg = config.my.services.drone;
|
||||
hasRunner = (name: builtins.elem name cfg.runners);
|
||||
in
|
||||
{
|
||||
config = lib.mkIf (cfg.enable && hasRunner "docker") {
|
||||
systemd.services.drone-runner-docker = {
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
after = [ "docker.socket" ]; # Needs the socket to be available
|
||||
# might break deployment
|
||||
restartIfChanged = false;
|
||||
confinement.enable = true;
|
||||
serviceConfig = {
|
||||
Environment = [
|
||||
"DRONE_SERVER_HOST=drone.${config.networking.domain}"
|
||||
"DRONE_SERVER_PROTO=https"
|
||||
"DRONE_RUNNER_CAPACITY=10"
|
||||
"CLIENT_DRONE_RPC_HOST=127.0.0.1:${toString cfg.port}"
|
||||
];
|
||||
BindPaths = [
|
||||
"/var/run/docker.sock"
|
||||
];
|
||||
EnvironmentFile = [
|
||||
cfg.sharedSecretFile
|
||||
];
|
||||
ExecStart = lib.getExe pkgs.drone-runner-docker;
|
||||
User = "drone-runner-docker";
|
||||
Group = "drone-runner-docker";
|
||||
};
|
||||
};
|
||||
|
||||
# Make sure it is activated in that case
|
||||
my.system.docker.enable = true;
|
||||
|
||||
users.users.drone-runner-docker = {
|
||||
isSystemUser = true;
|
||||
group = "drone-runner-docker";
|
||||
extraGroups = [ "docker" ]; # Give access to the daemon
|
||||
};
|
||||
users.groups.drone-runner-docker = { };
|
||||
};
|
||||
}
|
||||
67
modules/nixos/services/drone/runner-exec/default.nix
Normal file
67
modules/nixos/services/drone/runner-exec/default.nix
Normal file
|
|
@ -0,0 +1,67 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
let
|
||||
cfg = config.my.services.drone;
|
||||
hasRunner = (name: builtins.elem name cfg.runners);
|
||||
in
|
||||
{
|
||||
config = lib.mkIf (cfg.enable && hasRunner "exec") {
|
||||
systemd.services.drone-runner-exec = {
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
# might break deployment
|
||||
restartIfChanged = false;
|
||||
confinement.enable = true;
|
||||
confinement.packages = with pkgs; [
|
||||
git
|
||||
gnutar
|
||||
bash
|
||||
nix
|
||||
gzip
|
||||
];
|
||||
path = with pkgs; [
|
||||
git
|
||||
gnutar
|
||||
bash
|
||||
nix
|
||||
gzip
|
||||
];
|
||||
serviceConfig = {
|
||||
Environment = [
|
||||
"DRONE_SERVER_HOST=drone.${config.networking.domain}"
|
||||
"DRONE_SERVER_PROTO=https"
|
||||
"DRONE_RUNNER_CAPACITY=10"
|
||||
"CLIENT_DRONE_RPC_HOST=127.0.0.1:${toString cfg.port}"
|
||||
"NIX_REMOTE=daemon"
|
||||
"PAGER=cat"
|
||||
];
|
||||
BindPaths = [
|
||||
"/nix/var/nix/daemon-socket/socket"
|
||||
"/run/nscd/socket"
|
||||
];
|
||||
BindReadOnlyPaths = [
|
||||
"/etc/resolv.conf:/etc/resolv.conf"
|
||||
"/etc/resolvconf.conf:/etc/resolvconf.conf"
|
||||
"/etc/passwd:/etc/passwd"
|
||||
"/etc/group:/etc/group"
|
||||
"/nix/var/nix/profiles/system/etc/nix:/etc/nix"
|
||||
"${config.environment.etc."ssl/certs/ca-certificates.crt".source}:/etc/ssl/certs/ca-certificates.crt"
|
||||
"${config.environment.etc."ssh/ssh_known_hosts".source}:/etc/ssh/ssh_known_hosts"
|
||||
"/etc/machine-id"
|
||||
# channels are dynamic paths in the nix store, therefore we need to bind mount the whole thing
|
||||
"/nix/"
|
||||
];
|
||||
EnvironmentFile = [
|
||||
cfg.sharedSecretFile
|
||||
];
|
||||
ExecStart = lib.getExe pkgs.drone-runner-exec;
|
||||
User = "drone-runner-exec";
|
||||
Group = "drone-runner-exec";
|
||||
};
|
||||
};
|
||||
|
||||
users.users.drone-runner-exec = {
|
||||
isSystemUser = true;
|
||||
group = "drone-runner-exec";
|
||||
};
|
||||
users.groups.drone-runner-exec = { };
|
||||
};
|
||||
}
|
||||
57
modules/nixos/services/drone/server/default.nix
Normal file
57
modules/nixos/services/drone/server/default.nix
Normal file
|
|
@ -0,0 +1,57 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
let
|
||||
cfg = config.my.services.drone;
|
||||
in
|
||||
{
|
||||
config = lib.mkIf cfg.enable {
|
||||
systemd.services.drone-server = {
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
after = [ "postgresql.service" ];
|
||||
requires = [ "postgresql.service" ];
|
||||
serviceConfig = {
|
||||
EnvironmentFile = [
|
||||
cfg.secretFile
|
||||
cfg.sharedSecretFile
|
||||
];
|
||||
Environment = [
|
||||
"DRONE_DATABASE_DATASOURCE=postgres:///drone?host=/run/postgresql"
|
||||
"DRONE_SERVER_HOST=drone.${config.networking.domain}"
|
||||
"DRONE_SERVER_PROTO=https"
|
||||
"DRONE_DATABASE_DRIVER=postgres"
|
||||
"DRONE_SERVER_PORT=:${toString cfg.port}"
|
||||
"DRONE_USER_CREATE=username:${cfg.admin},admin:true"
|
||||
"DRONE_JSONNET_ENABLED=true"
|
||||
"DRONE_STARLARK_ENABLED=true"
|
||||
];
|
||||
ExecStart = "${pkgs.drone}/bin/drone-server";
|
||||
User = "drone";
|
||||
Group = "drone";
|
||||
};
|
||||
};
|
||||
|
||||
users.users.drone = {
|
||||
isSystemUser = true;
|
||||
createHome = true;
|
||||
group = "drone";
|
||||
};
|
||||
users.groups.drone = { };
|
||||
|
||||
services.postgresql = {
|
||||
enable = true;
|
||||
ensureDatabases = [ "drone" ];
|
||||
ensureUsers = [{
|
||||
name = "drone";
|
||||
ensurePermissions = {
|
||||
"DATABASE drone" = "ALL PRIVILEGES";
|
||||
};
|
||||
}];
|
||||
};
|
||||
|
||||
my.services.nginx.virtualHosts = [
|
||||
{
|
||||
subdomain = "drone";
|
||||
inherit (cfg) port;
|
||||
}
|
||||
];
|
||||
};
|
||||
}
|
||||
37
modules/nixos/services/fail2ban/default.nix
Normal file
37
modules/nixos/services/fail2ban/default.nix
Normal file
|
|
@ -0,0 +1,37 @@
|
|||
# Filter and ban unauthorized access
|
||||
{ config, lib, ... }:
|
||||
let
|
||||
cfg = config.my.services.fail2ban;
|
||||
wgNetCfg = config.my.services.wireguard.net;
|
||||
in
|
||||
{
|
||||
options.my.services.fail2ban = with lib; {
|
||||
enable = mkEnableOption "fail2ban daemon";
|
||||
};
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
services.fail2ban = {
|
||||
enable = true;
|
||||
|
||||
ignoreIP = [
|
||||
# Wireguard IPs
|
||||
"${wgNetCfg.v4.subnet}.0/${toString wgNetCfg.v4.mask}"
|
||||
"${wgNetCfg.v6.subnet}::/${toString wgNetCfg.v6.mask}"
|
||||
# Loopback addresses
|
||||
"127.0.0.0/8"
|
||||
];
|
||||
|
||||
maxretry = 5;
|
||||
|
||||
bantime-increment = {
|
||||
enable = true;
|
||||
rndtime = "5m"; # Use 5 minute jitter to avoid unban evasion
|
||||
};
|
||||
|
||||
jails.DEFAULT.settings = {
|
||||
findtime = "4h";
|
||||
bantime = "10m";
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
50
modules/nixos/services/flood/default.nix
Normal file
50
modules/nixos/services/flood/default.nix
Normal file
|
|
@ -0,0 +1,50 @@
|
|||
# A nice UI for various torrent clients
|
||||
{ config, lib, pkgs, ... }:
|
||||
let
|
||||
cfg = config.my.services.flood;
|
||||
in
|
||||
{
|
||||
options.my.services.flood = with lib; {
|
||||
enable = mkEnableOption "Flood UI";
|
||||
|
||||
port = mkOption {
|
||||
type = types.port;
|
||||
default = 9092;
|
||||
example = 3000;
|
||||
description = "Internal port for Flood UI";
|
||||
};
|
||||
|
||||
stateDir = mkOption {
|
||||
type = types.str;
|
||||
default = "flood";
|
||||
example = "floodUI";
|
||||
description = "Directory under `/var/run` for storing Flood's files";
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
systemd.services.flood = {
|
||||
description = "Flood torrent UI";
|
||||
after = [ "network.target" ];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
|
||||
serviceConfig = {
|
||||
ExecStart = lib.concatStringsSep " " [
|
||||
(lib.getExe pkgs.flood)
|
||||
"--port ${builtins.toString cfg.port}"
|
||||
"--rundir /var/lib/${cfg.stateDir}"
|
||||
];
|
||||
DynamicUser = true;
|
||||
StateDirectory = cfg.stateDir;
|
||||
ReadWritePaths = "";
|
||||
};
|
||||
};
|
||||
|
||||
my.services.nginx.virtualHosts = [
|
||||
{
|
||||
subdomain = "flood";
|
||||
inherit (cfg) port;
|
||||
}
|
||||
];
|
||||
};
|
||||
}
|
||||
154
modules/nixos/services/gitea/default.nix
Normal file
154
modules/nixos/services/gitea/default.nix
Normal file
|
|
@ -0,0 +1,154 @@
|
|||
# A low-ressource, full-featured git forge.
|
||||
{ config, lib, ... }:
|
||||
let
|
||||
cfg = config.my.services.gitea;
|
||||
in
|
||||
{
|
||||
options.my.services.gitea = with lib; {
|
||||
enable = mkEnableOption "Gitea";
|
||||
port = mkOption {
|
||||
type = types.port;
|
||||
default = 3042;
|
||||
example = 8080;
|
||||
description = "Internal port";
|
||||
};
|
||||
mail = {
|
||||
enable = mkEnableOption {
|
||||
description = "mailer configuration";
|
||||
};
|
||||
host = mkOption {
|
||||
type = types.str;
|
||||
example = "smtp.example.com:465";
|
||||
description = "Host for the mail account";
|
||||
};
|
||||
user = mkOption {
|
||||
type = types.str;
|
||||
example = "gitea@example.com";
|
||||
description = "User for the mail account";
|
||||
};
|
||||
passwordFile = mkOption {
|
||||
type = types.str;
|
||||
example = "/run/secrets/gitea-mail-password.txt";
|
||||
description = "Password for the mail account";
|
||||
};
|
||||
type = mkOption {
|
||||
type = types.str;
|
||||
default = "smtp";
|
||||
example = "smtp";
|
||||
description = "Password for the mail account";
|
||||
};
|
||||
tls = mkOption {
|
||||
type = types.bool;
|
||||
default = true;
|
||||
example = false;
|
||||
description = "Use TLS for connection";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
services.gitea =
|
||||
let
|
||||
inherit (config.networking) domain;
|
||||
giteaDomain = "git.${domain}";
|
||||
in
|
||||
{
|
||||
enable = true;
|
||||
|
||||
appName = "Ambroisie's forge";
|
||||
|
||||
user = "git";
|
||||
lfs.enable = true;
|
||||
|
||||
useWizard = false;
|
||||
|
||||
database = {
|
||||
type = "postgres"; # Automatic setup
|
||||
user = "git"; # User needs to be the same as gitea user
|
||||
};
|
||||
|
||||
# NixOS module uses `gitea dump` to backup repositories and the database,
|
||||
# but it produces a single .zip file that's not very backup friendly.
|
||||
# I configure my backup system manually below.
|
||||
dump.enable = false;
|
||||
|
||||
mailerPasswordFile = lib.mkIf cfg.mail.enable cfg.mail.passwordFile;
|
||||
|
||||
settings = {
|
||||
server = {
|
||||
HTTP_PORT = cfg.port;
|
||||
DOMAIN = giteaDomain;
|
||||
ROOT_URL = "https://${giteaDomain}";
|
||||
};
|
||||
|
||||
mailer = lib.mkIf cfg.mail.enable {
|
||||
ENABLED = true;
|
||||
HOST = cfg.mail.host;
|
||||
FROM = cfg.mail.user;
|
||||
USER = cfg.mail.user;
|
||||
MAILER_TYPE = cfg.mail.type;
|
||||
IS_TLS_ENABLED = cfg.mail.tls;
|
||||
};
|
||||
|
||||
service = {
|
||||
DISABLE_REGISTRATION = true;
|
||||
};
|
||||
|
||||
session = {
|
||||
# only send cookies via HTTPS
|
||||
COOKIE_SECURE = true;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
users.users.git = {
|
||||
description = "Gitea Service";
|
||||
home = config.services.gitea.stateDir;
|
||||
useDefaultShell = true;
|
||||
group = "git";
|
||||
|
||||
# The service for gitea seems to hardcode the group as
|
||||
# gitea, so, uh, just in case?
|
||||
extraGroups = [ "gitea" ];
|
||||
|
||||
isSystemUser = true;
|
||||
};
|
||||
users.groups.git = { };
|
||||
|
||||
my.services.nginx.virtualHosts = [
|
||||
# Proxy to Gitea
|
||||
{
|
||||
subdomain = "git";
|
||||
inherit (cfg) port;
|
||||
}
|
||||
# Redirect `gitea.` to actual forge subdomain
|
||||
{
|
||||
subdomain = "gitea";
|
||||
redirect = config.services.gitea.settings.server.ROOT_URL;
|
||||
}
|
||||
];
|
||||
|
||||
my.services.backup = {
|
||||
paths = [
|
||||
config.services.gitea.lfs.contentDir
|
||||
config.services.gitea.repositoryRoot
|
||||
];
|
||||
};
|
||||
|
||||
services.fail2ban.jails = {
|
||||
gitea = ''
|
||||
enabled = true
|
||||
filter = gitea
|
||||
action = iptables-allports
|
||||
'';
|
||||
};
|
||||
|
||||
environment.etc = {
|
||||
"fail2ban/filter.d/gitea.conf".text = ''
|
||||
[Definition]
|
||||
failregex = ^.*(Failed authentication attempt|invalid credentials|Attempted access of unknown user).* from <HOST>$
|
||||
journalmatch = _SYSTEMD_UNIT=gitea.service
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
||||
40
modules/nixos/services/grocy/default.nix
Normal file
40
modules/nixos/services/grocy/default.nix
Normal file
|
|
@ -0,0 +1,40 @@
|
|||
# Groceries and household management
|
||||
{ config, lib, ... }:
|
||||
let
|
||||
cfg = config.my.services.grocy;
|
||||
grocyDomain = "grocy.${config.networking.domain}";
|
||||
in
|
||||
{
|
||||
options.my.services.grocy = with lib; {
|
||||
enable = mkEnableOption "Grocy household ERP";
|
||||
};
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
services.grocy = {
|
||||
enable = true;
|
||||
|
||||
# The service sets up the reverse proxy automatically
|
||||
hostName = grocyDomain;
|
||||
|
||||
# Configure SSL by hand
|
||||
nginx = {
|
||||
enableSSL = false;
|
||||
};
|
||||
|
||||
settings = {
|
||||
currency = "EUR";
|
||||
culture = "en";
|
||||
calendar = {
|
||||
# Start on Monday
|
||||
firstDayOfWeek = 1;
|
||||
showWeekNumber = true;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
services.nginx.virtualHosts."${grocyDomain}" = {
|
||||
forceSSL = true;
|
||||
useACMEHost = config.networking.domain;
|
||||
};
|
||||
};
|
||||
}
|
||||
81
modules/nixos/services/indexers/default.nix
Normal file
81
modules/nixos/services/indexers/default.nix
Normal file
|
|
@ -0,0 +1,81 @@
|
|||
# Torrent and usenet meta-indexers
|
||||
{ config, lib, ... }:
|
||||
let
|
||||
cfg = config.my.services.indexers;
|
||||
|
||||
jackettPort = 9117;
|
||||
nzbhydraPort = 5076;
|
||||
prowlarrPort = 9696;
|
||||
in
|
||||
{
|
||||
options.my.services.indexers = with lib; {
|
||||
jackett.enable = mkEnableOption "Jackett torrent meta-indexer";
|
||||
nzbhydra.enable = mkEnableOption "NZBHydra2 usenet meta-indexer";
|
||||
prowlarr.enable = mkEnableOption "Prowlarr torrent & usenet meta-indexer";
|
||||
};
|
||||
|
||||
config = lib.mkMerge [
|
||||
(lib.mkIf cfg.jackett.enable {
|
||||
services.jackett = {
|
||||
enable = true;
|
||||
};
|
||||
|
||||
# Jackett wants to eat *all* my RAM if left to its own devices
|
||||
systemd.services.jackett = {
|
||||
serviceConfig = {
|
||||
MemoryHigh = "15%";
|
||||
MemoryMax = "25%";
|
||||
};
|
||||
};
|
||||
|
||||
my.services.nginx.virtualHosts = [
|
||||
{
|
||||
subdomain = "jackett";
|
||||
port = jackettPort;
|
||||
}
|
||||
];
|
||||
})
|
||||
|
||||
(lib.mkIf cfg.nzbhydra.enable {
|
||||
services.nzbhydra2 = {
|
||||
enable = true;
|
||||
};
|
||||
|
||||
my.services.nginx.virtualHosts = [
|
||||
{
|
||||
subdomain = "nzbhydra";
|
||||
port = nzbhydraPort;
|
||||
}
|
||||
];
|
||||
})
|
||||
|
||||
(lib.mkIf cfg.prowlarr.enable {
|
||||
services.prowlarr = {
|
||||
enable = true;
|
||||
};
|
||||
|
||||
my.services.nginx.virtualHosts = [
|
||||
{
|
||||
subdomain = "prowlarr";
|
||||
port = prowlarrPort;
|
||||
}
|
||||
];
|
||||
|
||||
services.fail2ban.jails = {
|
||||
prowlarr = ''
|
||||
enabled = true
|
||||
filter = prowlarr
|
||||
action = iptables-allports
|
||||
'';
|
||||
};
|
||||
|
||||
environment.etc = {
|
||||
"fail2ban/filter.d/prowlarr.conf".text = ''
|
||||
[Definition]
|
||||
failregex = ^.*\|Warn\|Auth\|Auth-Failure ip <HOST> username .*$
|
||||
journalmatch = _SYSTEMD_UNIT=prowlarr.service
|
||||
'';
|
||||
};
|
||||
})
|
||||
];
|
||||
}
|
||||
39
modules/nixos/services/jellyfin/default.nix
Normal file
39
modules/nixos/services/jellyfin/default.nix
Normal file
|
|
@ -0,0 +1,39 @@
|
|||
# A FLOSS media server
|
||||
{ config, lib, ... }:
|
||||
let
|
||||
cfg = config.my.services.jellyfin;
|
||||
in
|
||||
{
|
||||
options.my.services.jellyfin = {
|
||||
enable = lib.mkEnableOption "Jellyfin Media Server";
|
||||
};
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
services.jellyfin = {
|
||||
enable = true;
|
||||
group = "media";
|
||||
};
|
||||
|
||||
# Set-up media group
|
||||
users.groups.media = { };
|
||||
|
||||
my.services.nginx.virtualHosts = [
|
||||
{
|
||||
subdomain = "jellyfin";
|
||||
port = 8096;
|
||||
extraConfig = {
|
||||
locations."/" = {
|
||||
extraConfig = ''
|
||||
proxy_buffering off;
|
||||
'';
|
||||
};
|
||||
# Too bad for the repetition...
|
||||
locations."/socket" = {
|
||||
proxyPass = "http://127.0.0.1:8096/";
|
||||
proxyWebsockets = true;
|
||||
};
|
||||
};
|
||||
}
|
||||
];
|
||||
};
|
||||
}
|
||||
108
modules/nixos/services/lohr/default.nix
Normal file
108
modules/nixos/services/lohr/default.nix
Normal file
|
|
@ -0,0 +1,108 @@
|
|||
# A simple Gitea webhook to mirror all my repositories
|
||||
{ config, lib, pkgs, ... }:
|
||||
let
|
||||
cfg = config.my.services.lohr;
|
||||
settingsFormat = pkgs.formats.yaml { };
|
||||
|
||||
lohrStateDirectory = "lohr";
|
||||
lohrHome = "/var/lib/lohr/";
|
||||
in
|
||||
{
|
||||
options.my.services.lohr = with lib; {
|
||||
enable = mkEnableOption "Automatic gitea repositories mirroring";
|
||||
|
||||
port = mkOption {
|
||||
type = types.port;
|
||||
default = 9192;
|
||||
example = 8080;
|
||||
description = "Internal port of the Lohr service";
|
||||
};
|
||||
|
||||
setting = mkOption rec {
|
||||
type = settingsFormat.type;
|
||||
apply = recursiveUpdate default;
|
||||
default = {
|
||||
default_remotes = [
|
||||
"git@github.com:ambroisie"
|
||||
"git@git.sr.ht:~ambroisie"
|
||||
];
|
||||
};
|
||||
description = "Global settings configuration file";
|
||||
};
|
||||
|
||||
sharedSecretFile = mkOption {
|
||||
type = types.str;
|
||||
example = "/run/secrets/lohr.env";
|
||||
description = "Shared secret between lohr and Gitea hook";
|
||||
};
|
||||
|
||||
sshKeyFile = mkOption {
|
||||
type = with types; nullOr str;
|
||||
default = null;
|
||||
example = "/run/secrets/lohr/ssh-key";
|
||||
description = ''
|
||||
The ssh key that should be used by lohr to mirror repositories
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
systemd.services.lohr = {
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
serviceConfig = {
|
||||
EnvironmentFile = [
|
||||
cfg.sharedSecretFile
|
||||
];
|
||||
Environment = [
|
||||
"ROCKET_PORT=${toString cfg.port}"
|
||||
"ROCKET_LOG_LEVEL=normal"
|
||||
"LOHR_HOME=${lohrHome}"
|
||||
"LOHR_CONFIG="
|
||||
];
|
||||
ExecStartPre = lib.mkIf (cfg.sshKeyFile != null) ''+${
|
||||
pkgs.writeScript "copy-ssh-key" ''
|
||||
#!${pkgs.bash}/bin/bash
|
||||
# Ensure the key is not there
|
||||
mkdir -p '${lohrHome}/.ssh'
|
||||
rm -f '${lohrHome}/.ssh/id_ed25519'
|
||||
|
||||
# Move the key into place
|
||||
cp ${cfg.sshKeyFile} '${lohrHome}/.ssh/id_ed25519'
|
||||
|
||||
# Fix permissions
|
||||
chown -R lohr:lohr '${lohrHome}/.ssh'
|
||||
chmod -R 0700 '${lohrHome}/.ssh'
|
||||
''
|
||||
}'';
|
||||
ExecStart =
|
||||
let
|
||||
configFile = settingsFormat.generate "lohr-config.yaml" cfg.setting;
|
||||
in
|
||||
"${lib.getExe pkgs.ambroisie.lohr} --config ${configFile}";
|
||||
StateDirectory = lohrStateDirectory;
|
||||
WorkingDirectory = lohrHome;
|
||||
User = "lohr";
|
||||
Group = "lohr";
|
||||
};
|
||||
path = with pkgs; [
|
||||
git
|
||||
openssh
|
||||
];
|
||||
};
|
||||
|
||||
users.users.lohr = {
|
||||
isSystemUser = true;
|
||||
home = lohrHome;
|
||||
createHome = true;
|
||||
group = "lohr";
|
||||
};
|
||||
users.groups.lohr = { };
|
||||
|
||||
my.services.nginx.virtualHosts = [
|
||||
{
|
||||
subdomain = "lohr";
|
||||
inherit (cfg) port;
|
||||
}
|
||||
];
|
||||
};
|
||||
}
|
||||
261
modules/nixos/services/matrix/default.nix
Normal file
261
modules/nixos/services/matrix/default.nix
Normal file
|
|
@ -0,0 +1,261 @@
|
|||
# Matrix homeserver setup, using different endpoints for federation and client
|
||||
# traffic. The main trick for this is defining two nginx servers endpoints for
|
||||
# matrix.domain.com, each listening on different ports.
|
||||
#
|
||||
# Configuration shamelessly stolen from [1]
|
||||
#
|
||||
# [1]: https://github.com/alarsyo/nixos-config/blob/main/services/matrix.nix
|
||||
{ config, lib, pkgs, ... }:
|
||||
|
||||
let
|
||||
cfg = config.my.services.matrix;
|
||||
|
||||
federationPort = { public = 8448; private = 11338; };
|
||||
clientPort = { public = 443; private = 11339; };
|
||||
domain = config.networking.domain;
|
||||
matrixDomain = "matrix.${domain}";
|
||||
in
|
||||
{
|
||||
options.my.services.matrix = with lib; {
|
||||
enable = mkEnableOption "Matrix Synapse";
|
||||
|
||||
secretFile = mkOption {
|
||||
type = with types; nullOr str;
|
||||
default = null;
|
||||
example = "/var/lib/matrix/shared-secret-config.yaml";
|
||||
description = "Shared secret to register users";
|
||||
};
|
||||
|
||||
slidingSync = {
|
||||
port = mkOption {
|
||||
type = types.port;
|
||||
default = 8009;
|
||||
example = 8084;
|
||||
description = "Port used by sliding sync server";
|
||||
};
|
||||
|
||||
secretFile = mkOption {
|
||||
type = types.str;
|
||||
example = "/var/lib/matrix/sliding-sync-secret-file.env";
|
||||
description = "Secret file which contains SYNCV3_SECRET definition";
|
||||
};
|
||||
};
|
||||
|
||||
mailConfigFile = mkOption {
|
||||
type = types.str;
|
||||
example = "/var/lib/matrix/email-config.yaml";
|
||||
description = ''
|
||||
Configuration file for mail setup.
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
services.postgresql = {
|
||||
enable = true;
|
||||
initialScript = pkgs.writeText "synapse-init.sql" ''
|
||||
CREATE ROLE "matrix-synapse" WITH LOGIN PASSWORD 'synapse';
|
||||
CREATE DATABASE "matrix-synapse" WITH OWNER "matrix-synapse"
|
||||
TEMPLATE template0
|
||||
LC_COLLATE = "C"
|
||||
LC_CTYPE = "C";
|
||||
'';
|
||||
};
|
||||
|
||||
services.matrix-synapse = {
|
||||
enable = true;
|
||||
dataDir = "/var/lib/matrix-synapse";
|
||||
|
||||
settings = {
|
||||
server_name = domain;
|
||||
public_baseurl = "https://${matrixDomain}";
|
||||
|
||||
enable_registration = false;
|
||||
|
||||
listeners = [
|
||||
# Federation
|
||||
{
|
||||
bind_addresses = [ "::1" ];
|
||||
port = federationPort.private;
|
||||
tls = false; # Terminated by nginx.
|
||||
x_forwarded = true;
|
||||
resources = [{ names = [ "federation" ]; compress = false; }];
|
||||
}
|
||||
|
||||
# Client
|
||||
{
|
||||
bind_addresses = [ "::1" ];
|
||||
port = clientPort.private;
|
||||
tls = false; # Terminated by nginx.
|
||||
x_forwarded = true;
|
||||
resources = [{ names = [ "client" ]; compress = false; }];
|
||||
}
|
||||
];
|
||||
|
||||
account_threepid_delegates = {
|
||||
msisdn = "https://vector.im";
|
||||
};
|
||||
|
||||
experimental_features = {
|
||||
spaces_enabled = true;
|
||||
};
|
||||
};
|
||||
|
||||
extraConfigFiles = [
|
||||
cfg.mailConfigFile
|
||||
] ++ lib.optional (cfg.secretFile != null) cfg.secretFile;
|
||||
|
||||
sliding-sync = {
|
||||
enable = true;
|
||||
|
||||
settings = {
|
||||
SYNCV3_SERVER = "https://${matrixDomain}";
|
||||
SYNCV3_BINDADDR = "127.0.0.1:${toString cfg.slidingSync.port}";
|
||||
};
|
||||
|
||||
environmentFile = cfg.slidingSync.secretFile;
|
||||
};
|
||||
};
|
||||
|
||||
my.services.nginx.virtualHosts = [
|
||||
# Element Web app deployment
|
||||
{
|
||||
subdomain = "chat";
|
||||
root = pkgs.element-web.override {
|
||||
conf = {
|
||||
default_server_config = {
|
||||
"m.homeserver" = {
|
||||
"base_url" = "https://${matrixDomain}";
|
||||
"server_name" = domain;
|
||||
};
|
||||
"m.identity_server" = {
|
||||
"base_url" = "https://vector.im";
|
||||
};
|
||||
"org.matrix.msc3575.proxy" = {
|
||||
"url" = "https://matrix-sync.${domain}";
|
||||
};
|
||||
};
|
||||
showLabsSettings = true;
|
||||
defaultCountryCode = "FR"; # cocorico
|
||||
roomDirectory = {
|
||||
"servers" = [
|
||||
"matrix.org"
|
||||
"mozilla.org"
|
||||
];
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
# Dummy VHosts for port collision detection
|
||||
{
|
||||
subdomain = "matrix-federation";
|
||||
port = federationPort.private;
|
||||
}
|
||||
{
|
||||
subdomain = "matrix-client";
|
||||
port = clientPort.private;
|
||||
}
|
||||
# Sliding sync
|
||||
{
|
||||
subdomain = "matrix-sync";
|
||||
inherit (cfg.slidingSync) port;
|
||||
}
|
||||
];
|
||||
|
||||
# Those are too complicated to use my wrapper...
|
||||
services.nginx.virtualHosts = {
|
||||
${matrixDomain} = {
|
||||
onlySSL = true;
|
||||
useACMEHost = domain;
|
||||
|
||||
locations =
|
||||
let
|
||||
proxyToClientPort = {
|
||||
proxyPass = "http://[::1]:${toString clientPort.private}";
|
||||
};
|
||||
in
|
||||
{
|
||||
# Or do a redirect instead of the 404, or whatever is appropriate
|
||||
# for you. But do not put a Matrix Web client here! See the
|
||||
# Element web section below.
|
||||
"/".return = "404";
|
||||
|
||||
"/_matrix" = proxyToClientPort;
|
||||
"/_synapse/client" = proxyToClientPort;
|
||||
|
||||
# Sliding sync
|
||||
"~ ^/(client/|_matrix/client/unstable/org.matrix.msc3575/sync)" = {
|
||||
proxyPass = "http://${config.services.matrix-synapse.sliding-sync.settings.SYNCV3_BINDADDR}";
|
||||
};
|
||||
};
|
||||
|
||||
listen = [
|
||||
{ addr = "0.0.0.0"; port = clientPort.public; ssl = true; }
|
||||
{ addr = "[::]"; port = clientPort.public; ssl = true; }
|
||||
];
|
||||
|
||||
};
|
||||
|
||||
# same as above, but listening on the federation port
|
||||
"${matrixDomain}_federation" = {
|
||||
onlySSL = true;
|
||||
serverName = matrixDomain;
|
||||
useACMEHost = domain;
|
||||
|
||||
locations."/".return = "404";
|
||||
|
||||
locations."/_matrix" = {
|
||||
proxyPass = "http://[::1]:${toString federationPort.private}";
|
||||
};
|
||||
|
||||
listen = [
|
||||
{ addr = "0.0.0.0"; port = federationPort.public; ssl = true; }
|
||||
{ addr = "[::]"; port = federationPort.public; ssl = true; }
|
||||
];
|
||||
};
|
||||
|
||||
"${domain}" = {
|
||||
forceSSL = true;
|
||||
useACMEHost = domain;
|
||||
|
||||
locations."= /.well-known/matrix/server".extraConfig =
|
||||
let
|
||||
server = { "m.server" = "${matrixDomain}:${toString federationPort.public}"; };
|
||||
in
|
||||
''
|
||||
add_header Content-Type application/json;
|
||||
return 200 '${builtins.toJSON server}';
|
||||
'';
|
||||
|
||||
locations."= /.well-known/matrix/client".extraConfig =
|
||||
let
|
||||
client = {
|
||||
"m.homeserver" = { "base_url" = "https://${matrixDomain}"; };
|
||||
"m.identity_server" = { "base_url" = "https://vector.im"; };
|
||||
"org.matrix.msc3575.proxy" = { "url" = "https://matrix-sync.${domain}"; };
|
||||
};
|
||||
# ACAO required to allow element-web on any URL to request this json file
|
||||
in
|
||||
''
|
||||
add_header Content-Type application/json;
|
||||
add_header Access-Control-Allow-Origin *;
|
||||
return 200 '${builtins.toJSON client}';
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
# For administration tools.
|
||||
environment.systemPackages = [ pkgs.matrix-synapse ];
|
||||
|
||||
networking.firewall.allowedTCPPorts = [
|
||||
clientPort.public
|
||||
federationPort.public
|
||||
];
|
||||
|
||||
my.services.backup = {
|
||||
paths = [
|
||||
config.services.matrix-synapse.dataDir
|
||||
];
|
||||
};
|
||||
};
|
||||
}
|
||||
53
modules/nixos/services/miniflux/default.nix
Normal file
53
modules/nixos/services/miniflux/default.nix
Normal file
|
|
@ -0,0 +1,53 @@
|
|||
# A minimalist, opinionated feed reader
|
||||
{ config, lib, ... }:
|
||||
let
|
||||
cfg = config.my.services.miniflux;
|
||||
in
|
||||
{
|
||||
options.my.services.miniflux = with lib; {
|
||||
enable = mkEnableOption "Miniflux feed reader";
|
||||
|
||||
credentialsFiles = mkOption {
|
||||
type = types.str;
|
||||
example = "/var/lib/miniflux/creds.env";
|
||||
description = ''
|
||||
Credential file as an 'EnvironmentFile' (see `systemd.exec(5)`)
|
||||
'';
|
||||
};
|
||||
|
||||
port = mkOption {
|
||||
type = types.port;
|
||||
default = 9876;
|
||||
example = 8080;
|
||||
description = "Internal port for webui";
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
# The service automatically sets up the DB
|
||||
services.miniflux = {
|
||||
enable = true;
|
||||
|
||||
adminCredentialsFile = cfg.credentialsFiles;
|
||||
|
||||
config = {
|
||||
# Virtual hosts settings
|
||||
BASE_URL = "https://reader.${config.networking.domain}";
|
||||
LISTEN_ADDR = "localhost:${toString cfg.port}";
|
||||
# I want fast updates
|
||||
POLLING_FREQUENCY = "30";
|
||||
BATCH_SIZE = "50";
|
||||
# I am a hoarder
|
||||
CLEANUP_ARCHIVE_UNREAD_DAYS = "-1";
|
||||
CLEANUP_ARCHIVE_READ_DAYS = "-1";
|
||||
};
|
||||
};
|
||||
|
||||
my.services.nginx.virtualHosts = [
|
||||
{
|
||||
subdomain = "reader";
|
||||
inherit (cfg) port;
|
||||
}
|
||||
];
|
||||
};
|
||||
}
|
||||
135
modules/nixos/services/monitoring/default.nix
Normal file
135
modules/nixos/services/monitoring/default.nix
Normal file
|
|
@ -0,0 +1,135 @@
|
|||
# Grafana dashboards for all the things!
|
||||
{ config, lib, pkgs, ... }:
|
||||
let
|
||||
cfg = config.my.services.monitoring;
|
||||
in
|
||||
{
|
||||
options.my.services.monitoring = with lib; {
|
||||
enable = mkEnableOption "monitoring";
|
||||
|
||||
grafana = {
|
||||
port = mkOption {
|
||||
type = types.port;
|
||||
default = 9500;
|
||||
example = 3001;
|
||||
description = "Internal port";
|
||||
};
|
||||
|
||||
username = mkOption {
|
||||
type = types.str;
|
||||
default = "ambroisie";
|
||||
example = "admin";
|
||||
description = "Admin username";
|
||||
};
|
||||
|
||||
passwordFile = mkOption {
|
||||
type = types.str;
|
||||
example = "/var/lib/grafana/password.txt";
|
||||
description = "Admin password stored in a file";
|
||||
};
|
||||
|
||||
secretKeyFile = mkOption {
|
||||
type = types.str;
|
||||
example = "/var/lib/grafana/secret_key.txt";
|
||||
description = "Secret key stored in a file";
|
||||
};
|
||||
};
|
||||
|
||||
prometheus = {
|
||||
port = mkOption {
|
||||
type = types.port;
|
||||
default = 9501;
|
||||
example = 3002;
|
||||
description = "Internal port";
|
||||
};
|
||||
|
||||
scrapeInterval = mkOption {
|
||||
type = types.str;
|
||||
default = "15s";
|
||||
example = "1m";
|
||||
description = "Scrape interval";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
services.grafana = {
|
||||
enable = true;
|
||||
|
||||
settings = {
|
||||
server = {
|
||||
domain = "monitoring.${config.networking.domain}";
|
||||
root_url = "https://monitoring.${config.networking.domain}/";
|
||||
http_port = cfg.grafana.port;
|
||||
http_addr = "127.0.0.1"; # Proxied through Nginx
|
||||
};
|
||||
|
||||
security = {
|
||||
admin_user = cfg.grafana.username;
|
||||
admin_password = "$__file{${cfg.grafana.passwordFile}}";
|
||||
secret_key = "$__file{${cfg.grafana.secretKeyFile}}";
|
||||
};
|
||||
};
|
||||
|
||||
provision = {
|
||||
enable = true;
|
||||
|
||||
datasources.settings.datasources = [
|
||||
{
|
||||
name = "Prometheus";
|
||||
type = "prometheus";
|
||||
url = "http://localhost:${toString cfg.prometheus.port}";
|
||||
jsonData = {
|
||||
timeInterval = cfg.prometheus.scrapeInterval;
|
||||
};
|
||||
}
|
||||
];
|
||||
|
||||
dashboards.settings.providers = [
|
||||
{
|
||||
name = "Node Exporter";
|
||||
options.path = pkgs.nur.repos.alarsyo.grafanaDashboards.node-exporter;
|
||||
disableDeletion = true;
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
services.prometheus = {
|
||||
enable = true;
|
||||
port = cfg.prometheus.port;
|
||||
listenAddress = "127.0.0.1";
|
||||
|
||||
retentionTime = "2y";
|
||||
|
||||
exporters = {
|
||||
node = {
|
||||
enable = true;
|
||||
enabledCollectors = [ "systemd" ];
|
||||
port = 9100;
|
||||
listenAddress = "127.0.0.1";
|
||||
};
|
||||
};
|
||||
|
||||
globalConfig = {
|
||||
scrape_interval = cfg.prometheus.scrapeInterval;
|
||||
};
|
||||
|
||||
scrapeConfigs = [
|
||||
{
|
||||
job_name = config.networking.hostName;
|
||||
static_configs = [{
|
||||
targets = [ "127.0.0.1:${toString config.services.prometheus.exporters.node.port}" ];
|
||||
}];
|
||||
}
|
||||
];
|
||||
};
|
||||
|
||||
my.services.nginx.virtualHosts = [
|
||||
{
|
||||
subdomain = "monitoring";
|
||||
inherit (cfg.grafana) port;
|
||||
}
|
||||
];
|
||||
};
|
||||
}
|
||||
57
modules/nixos/services/navidrome/default.nix
Normal file
57
modules/nixos/services/navidrome/default.nix
Normal file
|
|
@ -0,0 +1,57 @@
|
|||
# A FLOSS self-hosted, subsonic compatible music server
|
||||
{ config, lib, pkgs, ... }:
|
||||
let
|
||||
cfg = config.my.services.navidrome;
|
||||
in
|
||||
{
|
||||
options.my.services.navidrome = with lib; {
|
||||
enable = mkEnableOption "Navidrome Music Server";
|
||||
|
||||
settings = mkOption {
|
||||
type = (pkgs.formats.json { }).type;
|
||||
default = { };
|
||||
example = {
|
||||
"LastFM.ApiKey" = "MYKEY";
|
||||
"LastFM.Secret" = "MYSECRET";
|
||||
"Spotify.ID" = "MYKEY";
|
||||
"Spotify.Secret" = "MYSECRET";
|
||||
};
|
||||
description = ''
|
||||
Additional settings.
|
||||
'';
|
||||
};
|
||||
|
||||
port = mkOption {
|
||||
type = types.port;
|
||||
default = 4533;
|
||||
example = 8080;
|
||||
description = "Internal port for webui";
|
||||
};
|
||||
|
||||
musicFolder = mkOption {
|
||||
type = types.str;
|
||||
example = "/mnt/music/";
|
||||
description = "Music folder";
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
services.navidrome = {
|
||||
enable = true;
|
||||
|
||||
settings = cfg.settings // {
|
||||
Port = cfg.port;
|
||||
Address = "127.0.0.1"; # Behind reverse proxy, so only loopback
|
||||
MusicFolder = cfg.musicFolder;
|
||||
LogLevel = "info";
|
||||
};
|
||||
};
|
||||
|
||||
my.services.nginx.virtualHosts = [
|
||||
{
|
||||
subdomain = "music";
|
||||
inherit (cfg) port;
|
||||
}
|
||||
];
|
||||
};
|
||||
}
|
||||
86
modules/nixos/services/nextcloud/default.nix
Normal file
86
modules/nixos/services/nextcloud/default.nix
Normal file
|
|
@ -0,0 +1,86 @@
|
|||
# A self-hosted cloud.
|
||||
{ config, lib, pkgs, ... }:
|
||||
let
|
||||
cfg = config.my.services.nextcloud;
|
||||
in
|
||||
{
|
||||
options.my.services.nextcloud = with lib; {
|
||||
enable = mkEnableOption "Nextcloud";
|
||||
maxSize = mkOption {
|
||||
type = types.str;
|
||||
default = "512M";
|
||||
example = "1G";
|
||||
description = "Maximum file upload size";
|
||||
};
|
||||
admin = mkOption {
|
||||
type = types.str;
|
||||
default = "Ambroisie";
|
||||
example = "admin";
|
||||
description = "Name of the admin user";
|
||||
};
|
||||
passwordFile = mkOption {
|
||||
type = types.str;
|
||||
example = "/var/lib/nextcloud/password.txt";
|
||||
description = ''
|
||||
Path to a file containing the admin's password, must be readable by
|
||||
'nextcloud' user.
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
services.nextcloud = {
|
||||
enable = true;
|
||||
package = pkgs.nextcloud27;
|
||||
hostName = "nextcloud.${config.networking.domain}";
|
||||
home = "/var/lib/nextcloud";
|
||||
maxUploadSize = cfg.maxSize;
|
||||
configureRedis = true;
|
||||
config = {
|
||||
adminuser = cfg.admin;
|
||||
adminpassFile = cfg.passwordFile;
|
||||
dbtype = "pgsql";
|
||||
dbhost = "/run/postgresql";
|
||||
overwriteProtocol = "https"; # Nginx only allows SSL
|
||||
};
|
||||
|
||||
notify_push = {
|
||||
enable = true;
|
||||
# Allow using the push service without hard-coding my IP in the configuration
|
||||
bendDomainToLocalhost = true;
|
||||
};
|
||||
};
|
||||
|
||||
services.postgresql = {
|
||||
enable = true;
|
||||
ensureDatabases = [ "nextcloud" ];
|
||||
ensureUsers = [
|
||||
{
|
||||
name = "nextcloud";
|
||||
ensurePermissions."DATABASE nextcloud" = "ALL PRIVILEGES";
|
||||
}
|
||||
];
|
||||
};
|
||||
|
||||
systemd.services."nextcloud-setup" = {
|
||||
requires = [ "postgresql.service" ];
|
||||
after = [ "postgresql.service" ];
|
||||
};
|
||||
|
||||
# The service above configures the domain, no need for my wrapper
|
||||
services.nginx.virtualHosts."nextcloud.${config.networking.domain}" = {
|
||||
forceSSL = true;
|
||||
useACMEHost = config.networking.domain;
|
||||
};
|
||||
|
||||
my.services.backup = {
|
||||
paths = [
|
||||
config.services.nextcloud.home
|
||||
];
|
||||
exclude = [
|
||||
# image previews can take up a lot of space
|
||||
"${config.services.nextcloud.home}/data/appdata_*/preview"
|
||||
];
|
||||
};
|
||||
};
|
||||
}
|
||||
471
modules/nixos/services/nginx/default.nix
Normal file
471
modules/nixos/services/nginx/default.nix
Normal file
|
|
@ -0,0 +1,471 @@
|
|||
# A simple abstraction layer for almost all of my services' needs
|
||||
{ config, lib, pkgs, ... }:
|
||||
let
|
||||
cfg = config.my.services.nginx;
|
||||
|
||||
domain = config.networking.domain;
|
||||
|
||||
virtualHostOption = with lib; types.submodule {
|
||||
options = {
|
||||
subdomain = mkOption {
|
||||
type = types.str;
|
||||
example = "dev";
|
||||
description = ''
|
||||
Which subdomain, under config.networking.domain, to use
|
||||
for this virtual host.
|
||||
'';
|
||||
};
|
||||
|
||||
port = mkOption {
|
||||
type = with types; nullOr port;
|
||||
default = null;
|
||||
example = 8080;
|
||||
description = ''
|
||||
Which port to proxy to, through 127.0.0.1, for this virtual host.
|
||||
'';
|
||||
};
|
||||
|
||||
redirect = mkOption {
|
||||
type = with types; nullOr str;
|
||||
default = null;
|
||||
example = "https://example.com";
|
||||
description = ''
|
||||
Which domain to redirect to (301 response), for this virtual host.
|
||||
'';
|
||||
};
|
||||
|
||||
root = mkOption {
|
||||
type = with types; nullOr path;
|
||||
default = null;
|
||||
example = "/var/www/blog";
|
||||
description = ''
|
||||
The root folder for this virtual host.
|
||||
'';
|
||||
};
|
||||
|
||||
socket = mkOption {
|
||||
type = with types; nullOr path;
|
||||
default = null;
|
||||
example = "FIXME";
|
||||
description = ''
|
||||
The UNIX socket for this virtual host.
|
||||
'';
|
||||
};
|
||||
|
||||
sso = {
|
||||
enable = mkEnableOption "SSO authentication";
|
||||
};
|
||||
|
||||
extraConfig = mkOption {
|
||||
type = types.attrs; # FIXME: forward type of virtualHosts
|
||||
example = litteralExample ''
|
||||
{
|
||||
locations."/socket" = {
|
||||
proxyPass = "http://127.0.0.1:8096/";
|
||||
proxyWebsockets = true;
|
||||
};
|
||||
}
|
||||
'';
|
||||
default = { };
|
||||
description = ''
|
||||
Any extra configuration that should be applied to this virtual host.
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
in
|
||||
{
|
||||
imports = [
|
||||
./sso
|
||||
];
|
||||
|
||||
options.my.services.nginx = with lib; {
|
||||
enable = mkEnableOption "Nginx";
|
||||
|
||||
acme = {
|
||||
credentialsFile = mkOption {
|
||||
type = types.str;
|
||||
example = "/var/lib/acme/creds.env";
|
||||
description = ''
|
||||
Gandi API key file as an 'EnvironmentFile' (see `systemd.exec(5)`)
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
monitoring = {
|
||||
enable = my.mkDisableOption "monitoring through grafana and prometheus";
|
||||
};
|
||||
|
||||
virtualHosts = mkOption {
|
||||
type = types.listOf virtualHostOption;
|
||||
default = [ ];
|
||||
example = litteralExample ''
|
||||
[
|
||||
{
|
||||
subdomain = "gitea";
|
||||
port = 8080;
|
||||
}
|
||||
{
|
||||
subdomain = "dev";
|
||||
root = "/var/www/dev";
|
||||
}
|
||||
{
|
||||
subdomain = "jellyfin";
|
||||
port = 8096;
|
||||
extraConfig = {
|
||||
locations."/socket" = {
|
||||
proxyPass = "http://127.0.0.1:8096/";
|
||||
proxyWebsockets = true;
|
||||
};
|
||||
};
|
||||
}
|
||||
]
|
||||
'';
|
||||
description = ''
|
||||
List of virtual hosts to set-up using default settings.
|
||||
'';
|
||||
};
|
||||
|
||||
sso = {
|
||||
authKeyFile = mkOption {
|
||||
type = types.str;
|
||||
example = "/var/lib/nginx-sso/auth-key.txt";
|
||||
description = ''
|
||||
Path to the auth key.
|
||||
'';
|
||||
};
|
||||
|
||||
subdomain = mkOption {
|
||||
type = types.str;
|
||||
default = "login";
|
||||
example = "auth";
|
||||
description = "Which subdomain, to use for SSO.";
|
||||
};
|
||||
|
||||
port = mkOption {
|
||||
type = types.port;
|
||||
default = 8082;
|
||||
example = 8080;
|
||||
description = "Port to use for internal webui.";
|
||||
};
|
||||
|
||||
users = mkOption {
|
||||
type = types.attrsOf (types.submodule {
|
||||
options = {
|
||||
passwordHashFile = mkOption {
|
||||
type = types.str;
|
||||
example = "/var/lib/nginx-sso/alice/password-hash.txt";
|
||||
description = "Path to file containing the user's password hash.";
|
||||
};
|
||||
totpSecretFile = mkOption {
|
||||
type = types.str;
|
||||
example = "/var/lib/nginx-sso/alice/totp-secret.txt";
|
||||
description = "Path to file containing the user's TOTP secret.";
|
||||
};
|
||||
};
|
||||
});
|
||||
example = litteralExample ''
|
||||
{
|
||||
alice = {
|
||||
passwordHashFile = "/var/lib/nginx-sso/alice/password-hash.txt";
|
||||
totpSecretFile = "/var/lib/nginx-sso/alice/totp-secret.txt";
|
||||
};
|
||||
}
|
||||
'';
|
||||
description = "Definition of users";
|
||||
};
|
||||
|
||||
groups = mkOption {
|
||||
type = with types; attrsOf (listOf str);
|
||||
example = litteralExample ''
|
||||
{
|
||||
root = [ "alice" ];
|
||||
users = [ "alice" "bob" ];
|
||||
}
|
||||
'';
|
||||
description = "Groups of users";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
assertions = [ ]
|
||||
++ (lib.flip builtins.map cfg.virtualHosts ({ subdomain, ... } @ args:
|
||||
let
|
||||
conflicts = [ "port" "root" "socket" "redirect" ];
|
||||
optionsNotNull = builtins.map (v: args.${v} != null) conflicts;
|
||||
optionsSet = lib.filter lib.id optionsNotNull;
|
||||
in
|
||||
{
|
||||
assertion = builtins.length optionsSet == 1;
|
||||
message = ''
|
||||
Subdomain '${subdomain}' must have exactly one of ${
|
||||
lib.concatStringsSep ", " (builtins.map (v: "'${v}'") conflicts)
|
||||
} configured.
|
||||
'';
|
||||
}))
|
||||
++ (
|
||||
let
|
||||
ports = lib.my.mapFilter
|
||||
(v: v != null)
|
||||
({ port, ... }: port)
|
||||
cfg.virtualHosts;
|
||||
portCounts = lib.my.countValues ports;
|
||||
nonUniquesCounts = lib.filterAttrs (_: v: v != 1) portCounts;
|
||||
nonUniques = builtins.attrNames nonUniquesCounts;
|
||||
mkAssertion = port: {
|
||||
assertion = false;
|
||||
message = "Port ${port} cannot appear in multiple virtual hosts.";
|
||||
};
|
||||
in
|
||||
map mkAssertion nonUniques
|
||||
) ++ (
|
||||
let
|
||||
subs = map ({ subdomain, ... }: subdomain) cfg.virtualHosts;
|
||||
subsCounts = lib.my.countValues subs;
|
||||
nonUniquesCounts = lib.filterAttrs (_: v: v != 1) subsCounts;
|
||||
nonUniques = builtins.attrNames nonUniquesCounts;
|
||||
mkAssertion = v: {
|
||||
assertion = false;
|
||||
message = ''
|
||||
Subdomain '${v}' cannot appear in multiple virtual hosts.
|
||||
'';
|
||||
};
|
||||
in
|
||||
map mkAssertion nonUniques
|
||||
)
|
||||
;
|
||||
|
||||
services.nginx = {
|
||||
enable = true;
|
||||
statusPage = true; # For monitoring scraping.
|
||||
|
||||
recommendedBrotliSettings = true;
|
||||
recommendedGzipSettings = true;
|
||||
recommendedOptimisation = true;
|
||||
recommendedProxySettings = true;
|
||||
recommendedTlsSettings = true;
|
||||
recommendedZstdSettings = true;
|
||||
|
||||
virtualHosts =
|
||||
let
|
||||
domain = config.networking.domain;
|
||||
mkVHost = ({ subdomain, ... } @ args: lib.nameValuePair
|
||||
"${subdomain}.${domain}"
|
||||
(lib.my.recursiveMerge [
|
||||
# Base configuration
|
||||
{
|
||||
forceSSL = true;
|
||||
useACMEHost = domain;
|
||||
}
|
||||
# Proxy to port
|
||||
(lib.optionalAttrs (args.port != null) {
|
||||
locations."/".proxyPass =
|
||||
"http://127.0.0.1:${toString args.port}";
|
||||
})
|
||||
# Serve filesystem content
|
||||
(lib.optionalAttrs (args.root != null) {
|
||||
inherit (args) root;
|
||||
})
|
||||
# Serve to UNIX socket
|
||||
(lib.optionalAttrs (args.socket != null) {
|
||||
locations."/".proxyPass =
|
||||
"http://unix:${args.socket}";
|
||||
})
|
||||
# Redirect to a different domain
|
||||
(lib.optionalAttrs (args.redirect != null) {
|
||||
locations."/".return = "301 ${args.redirect}$request_uri";
|
||||
})
|
||||
# VHost specific configuration
|
||||
args.extraConfig
|
||||
# SSO configuration
|
||||
(lib.optionalAttrs args.sso.enable {
|
||||
extraConfig = (args.extraConfig.extraConfig or "") + ''
|
||||
error_page 401 = @error401;
|
||||
'';
|
||||
|
||||
locations."@error401".return = ''
|
||||
302 https://${cfg.sso.subdomain}.${config.networking.domain}/login?go=$scheme://$http_host$request_uri
|
||||
'';
|
||||
|
||||
locations."/" = {
|
||||
extraConfig =
|
||||
(args.extraConfig.locations."/".extraConfig or "") + ''
|
||||
# Use SSO
|
||||
auth_request /sso-auth;
|
||||
|
||||
# Set username through header
|
||||
auth_request_set $username $upstream_http_x_username;
|
||||
proxy_set_header X-User $username;
|
||||
|
||||
# Renew SSO cookie on request
|
||||
auth_request_set $cookie $upstream_http_set_cookie;
|
||||
add_header Set-Cookie $cookie;
|
||||
'';
|
||||
};
|
||||
|
||||
locations."/sso-auth" = {
|
||||
proxyPass = "http://localhost:${toString cfg.sso.port}/auth";
|
||||
extraConfig = ''
|
||||
# Do not allow requests from outside
|
||||
internal;
|
||||
|
||||
# Do not forward the request body
|
||||
proxy_pass_request_body off;
|
||||
proxy_set_header Content-Length "";
|
||||
|
||||
# Set X-Application according to subdomain for matching
|
||||
proxy_set_header X-Application "${subdomain}";
|
||||
|
||||
# Set origin URI for matching
|
||||
proxy_set_header X-Origin-URI $request_uri;
|
||||
'';
|
||||
};
|
||||
})
|
||||
])
|
||||
);
|
||||
in
|
||||
lib.my.genAttrs' cfg.virtualHosts mkVHost;
|
||||
|
||||
sso = {
|
||||
enable = true;
|
||||
|
||||
configuration = {
|
||||
listen = {
|
||||
addr = "127.0.0.1";
|
||||
inherit (cfg.sso) port;
|
||||
};
|
||||
|
||||
audit_log = {
|
||||
target = [
|
||||
"fd://stdout"
|
||||
];
|
||||
events = [
|
||||
"access_denied"
|
||||
"login_success"
|
||||
"login_failure"
|
||||
"logout"
|
||||
"validate"
|
||||
];
|
||||
headers = [
|
||||
"x-origin-uri"
|
||||
"x-application"
|
||||
];
|
||||
};
|
||||
|
||||
cookie = {
|
||||
domain = ".${config.networking.domain}";
|
||||
secure = true;
|
||||
authentication_key = {
|
||||
_secret = cfg.sso.authKeyFile;
|
||||
};
|
||||
};
|
||||
|
||||
login = {
|
||||
title = "Ambroisie's SSO";
|
||||
default_method = "simple";
|
||||
hide_mfa_field = false;
|
||||
names = {
|
||||
simple = "Username / Password";
|
||||
};
|
||||
};
|
||||
|
||||
providers = {
|
||||
simple =
|
||||
let
|
||||
applyUsers = lib.flip lib.mapAttrs cfg.sso.users;
|
||||
in
|
||||
{
|
||||
users = applyUsers (_: v: { _secret = v.passwordHashFile; });
|
||||
|
||||
mfa = applyUsers (_: v: [{
|
||||
provider = "totp";
|
||||
attributes = {
|
||||
secret = {
|
||||
_secret = v.totpSecretFile;
|
||||
};
|
||||
};
|
||||
}]);
|
||||
|
||||
inherit (cfg.sso) groups;
|
||||
};
|
||||
};
|
||||
|
||||
acl = {
|
||||
rule_sets = [
|
||||
{
|
||||
rules = [{ field = "x-application"; present = true; }];
|
||||
allow = [ "@root" ];
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
my.services.nginx.virtualHosts = [
|
||||
{
|
||||
subdomain = "login";
|
||||
inherit (cfg.sso) port;
|
||||
}
|
||||
];
|
||||
|
||||
networking.firewall.allowedTCPPorts = [ 80 443 ];
|
||||
|
||||
# Nginx needs to be able to read the certificates
|
||||
users.users.nginx.extraGroups = [ "acme" ];
|
||||
|
||||
security.acme = {
|
||||
defaults.email = lib.my.mkMailAddress "bruno.acme" "belanyi.fr";
|
||||
|
||||
acceptTerms = true;
|
||||
# Use DNS wildcard certificate
|
||||
certs =
|
||||
{
|
||||
"${domain}" = {
|
||||
extraDomainNames = [ "*.${domain}" ];
|
||||
dnsProvider = "gandiv5";
|
||||
inherit (cfg.acme) credentialsFile;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
systemd.services."acme-${domain}" = {
|
||||
serviceConfig = {
|
||||
Environment = [
|
||||
# Since I do a "weird" setup with a wildcard CNAME
|
||||
"LEGO_DISABLE_CNAME_SUPPORT=true"
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
services.grafana.provision.dashboards.settings.providers = lib.mkIf cfg.monitoring.enable [
|
||||
{
|
||||
name = "NGINX";
|
||||
options.path = pkgs.nur.repos.alarsyo.grafanaDashboards.nginx;
|
||||
disableDeletion = true;
|
||||
}
|
||||
];
|
||||
|
||||
services.prometheus = lib.mkIf cfg.monitoring.enable {
|
||||
exporters.nginx = {
|
||||
enable = true;
|
||||
listenAddress = "127.0.0.1";
|
||||
};
|
||||
|
||||
scrapeConfigs = [
|
||||
{
|
||||
job_name = "nginx";
|
||||
static_configs = [
|
||||
{
|
||||
targets = [ "127.0.0.1:${toString config.services.prometheus.exporters.nginx.port}" ];
|
||||
labels = {
|
||||
instance = config.networking.hostName;
|
||||
};
|
||||
}
|
||||
];
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
}
|
||||
89
modules/nixos/services/nginx/sso/default.nix
Normal file
89
modules/nixos/services/nginx/sso/default.nix
Normal file
|
|
@ -0,0 +1,89 @@
|
|||
# I must override the module to allow having runtime secrets
|
||||
{ config, lib, pkgs, utils, ... }:
|
||||
let
|
||||
cfg = config.services.nginx.sso;
|
||||
pkg = lib.getBin cfg.package;
|
||||
confPath = "/var/lib/nginx-sso/config.json";
|
||||
in
|
||||
{
|
||||
disabledModules = [ "services/security/nginx-sso.nix" ];
|
||||
|
||||
|
||||
options.services.nginx.sso = with lib; {
|
||||
enable = mkEnableOption "nginx-sso service";
|
||||
|
||||
package = mkOption {
|
||||
type = types.package;
|
||||
default = pkgs.nginx-sso;
|
||||
defaultText = "pkgs.nginx-sso";
|
||||
description = ''
|
||||
The nginx-sso package that should be used.
|
||||
'';
|
||||
};
|
||||
|
||||
configuration = mkOption {
|
||||
type = types.attrsOf types.unspecified;
|
||||
default = { };
|
||||
example = literalExample ''
|
||||
{
|
||||
listen = { addr = "127.0.0.1"; port = 8080; };
|
||||
|
||||
providers.token.tokens = {
|
||||
myuser = "MyToken";
|
||||
};
|
||||
|
||||
acl = {
|
||||
rule_sets = [
|
||||
{
|
||||
rules = [ { field = "x-application"; equals = "MyApp"; } ];
|
||||
allow = [ "myuser" ];
|
||||
}
|
||||
];
|
||||
};
|
||||
}
|
||||
'';
|
||||
description = ''
|
||||
nginx-sso configuration
|
||||
(<link xlink:href="https://github.com/Luzifer/nginx-sso/wiki/Main-Configuration">documentation</link>)
|
||||
as a Nix attribute set.
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
systemd.services.nginx-sso = {
|
||||
description = "Nginx SSO Backend";
|
||||
after = [ "network.target" ];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
serviceConfig = {
|
||||
StateDirectory = "nginx-sso";
|
||||
WorkingDirectory = "/var/lib/nginx-sso";
|
||||
# The files to be merged might not have the correct permissions
|
||||
ExecStartPre = ''+${pkgs.writeShellScript "merge-nginx-sso-config" ''
|
||||
rm -f '${confPath}'
|
||||
${utils.genJqSecretsReplacementSnippet cfg.configuration confPath}
|
||||
|
||||
# Fix permissions
|
||||
chown nginx-sso:nginx-sso ${confPath}
|
||||
chmod 0600 ${confPath}
|
||||
''
|
||||
}'';
|
||||
ExecStart = lib.mkForce ''
|
||||
${lib.getExe pkg} \
|
||||
--config ${confPath} \
|
||||
--frontend-dir ${pkg}/share/frontend
|
||||
'';
|
||||
Restart = "always";
|
||||
User = "nginx-sso";
|
||||
Group = "nginx-sso";
|
||||
};
|
||||
};
|
||||
|
||||
users.users.nginx-sso = {
|
||||
isSystemUser = true;
|
||||
group = "nginx-sso";
|
||||
};
|
||||
|
||||
users.groups.nginx-sso = { };
|
||||
};
|
||||
}
|
||||
53
modules/nixos/services/nix-cache/default.nix
Normal file
53
modules/nixos/services/nix-cache/default.nix
Normal file
|
|
@ -0,0 +1,53 @@
|
|||
# Binary cache
|
||||
{ config, lib, ... }:
|
||||
let
|
||||
cfg = config.my.services.nix-cache;
|
||||
in
|
||||
{
|
||||
options.my.services.nix-cache = with lib; {
|
||||
enable = mkEnableOption "nix binary cache";
|
||||
|
||||
port = mkOption {
|
||||
type = types.port;
|
||||
default = 5000;
|
||||
example = 8080;
|
||||
description = "Internal port for serving cache";
|
||||
};
|
||||
|
||||
secretKeyFile = mkOption {
|
||||
type = types.str;
|
||||
example = "/run/secrets/nix-cache";
|
||||
description = "Secret signing key for the cache";
|
||||
};
|
||||
|
||||
priority = mkOption {
|
||||
type = types.int;
|
||||
default = 50;
|
||||
example = 30;
|
||||
description = ''
|
||||
Which priority to assign to this cache. Lower number is higher priority.
|
||||
The official nixpkgs hydra cache is priority 40.
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
services.harmonia = {
|
||||
enable = true;
|
||||
|
||||
settings = {
|
||||
bind = "127.0.0.1:${toString cfg.port}";
|
||||
inherit (cfg) priority;
|
||||
};
|
||||
|
||||
signKeyPath = cfg.secretKeyFile;
|
||||
};
|
||||
|
||||
my.services.nginx.virtualHosts = [
|
||||
{
|
||||
subdomain = "cache";
|
||||
inherit (cfg) port;
|
||||
}
|
||||
];
|
||||
};
|
||||
}
|
||||
168
modules/nixos/services/paperless/default.nix
Normal file
168
modules/nixos/services/paperless/default.nix
Normal file
|
|
@ -0,0 +1,168 @@
|
|||
{ config, lib, ... }:
|
||||
let
|
||||
cfg = config.my.services.paperless;
|
||||
in
|
||||
{
|
||||
options.my.services.paperless = with lib; {
|
||||
enable = mkEnableOption "Paperless service";
|
||||
|
||||
port = mkOption {
|
||||
type = types.port;
|
||||
default = 4535;
|
||||
example = 8080;
|
||||
description = "Internal port for webui";
|
||||
};
|
||||
|
||||
secretKeyFile = mkOption {
|
||||
type = types.str;
|
||||
example = "/var/lib/paperless/secret-key.env";
|
||||
description = ''
|
||||
Secret key as an 'EnvironmentFile' (see `systemd.exec(5)`)
|
||||
'';
|
||||
};
|
||||
|
||||
documentPath = mkOption {
|
||||
type = with types; nullOr str;
|
||||
default = null;
|
||||
example = "/mnt/paperless";
|
||||
description = ''
|
||||
Path to the directory to store the documents. Use default if null
|
||||
'';
|
||||
};
|
||||
|
||||
username = mkOption {
|
||||
type = types.str;
|
||||
default = "ambroisie";
|
||||
example = "username";
|
||||
description = "Name of the administrator";
|
||||
};
|
||||
|
||||
passwordFile = mkOption {
|
||||
type = types.str;
|
||||
example = "/var/lib/paperless/password.txt";
|
||||
description = "Read the administrator's password from this path";
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
services.paperless = {
|
||||
enable = true;
|
||||
|
||||
port = cfg.port;
|
||||
|
||||
mediaDir = lib.mkIf (cfg.documentPath != null) cfg.documentPath;
|
||||
|
||||
extraConfig =
|
||||
let
|
||||
paperlessDomain = "paperless.${config.networking.domain}";
|
||||
in
|
||||
{
|
||||
# Use SSO
|
||||
PAPERLESS_ENABLE_HTTP_REMOTE_USER = true;
|
||||
PAPERLESS_HTTP_REMOTE_USER_HEADER_NAME = "HTTP_X_USER";
|
||||
|
||||
# Use PostgreSQL
|
||||
PAPERLESS_DBHOST = "/run/postgresql";
|
||||
PAPERLESS_DBUSER = "paperless";
|
||||
PAPERLESS_DBNAME = "paperless";
|
||||
|
||||
# Security settings
|
||||
PAPERLESS_ALLOWED_HOSTS = paperlessDomain;
|
||||
PAPERLESS_CORS_ALLOWED_HOSTS = "https://${paperlessDomain}";
|
||||
|
||||
# OCR settings
|
||||
PAPERLESS_OCR_LANGUAGE = "fra+eng";
|
||||
|
||||
# Workers
|
||||
PAPERLESS_TASK_WORKERS = 3;
|
||||
PAPERLESS_THREADS_PER_WORKER = 4;
|
||||
|
||||
# Misc
|
||||
PAPERLESS_TIME_ZONE = config.time.timeZone;
|
||||
PAPERLESS_ADMIN_USER = cfg.username;
|
||||
};
|
||||
|
||||
# Admin password
|
||||
passwordFile = cfg.passwordFile;
|
||||
};
|
||||
|
||||
systemd.services = {
|
||||
paperless-scheduler = {
|
||||
requires = [ "postgresql.service" ];
|
||||
after = [ "postgresql.service" ];
|
||||
|
||||
serviceConfig = {
|
||||
EnvironmentFile = cfg.secretKeyFile;
|
||||
};
|
||||
};
|
||||
|
||||
paperless-consumer = {
|
||||
requires = [ "postgresql.service" ];
|
||||
after = [ "postgresql.service" ];
|
||||
|
||||
serviceConfig = {
|
||||
EnvironmentFile = cfg.secretKeyFile;
|
||||
};
|
||||
};
|
||||
|
||||
paperless-web = {
|
||||
requires = [ "postgresql.service" ];
|
||||
after = [ "postgresql.service" ];
|
||||
|
||||
serviceConfig = {
|
||||
EnvironmentFile = cfg.secretKeyFile;
|
||||
};
|
||||
};
|
||||
|
||||
paperless-task-queue = {
|
||||
requires = [ "postgresql.service" ];
|
||||
after = [ "postgresql.service" ];
|
||||
|
||||
serviceConfig = {
|
||||
EnvironmentFile = cfg.secretKeyFile;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
# Set-up database
|
||||
services.postgresql = {
|
||||
enable = true;
|
||||
ensureDatabases = [ "paperless" ];
|
||||
ensureUsers = [
|
||||
{
|
||||
name = "paperless";
|
||||
ensurePermissions."DATABASE paperless" = "ALL PRIVILEGES";
|
||||
}
|
||||
];
|
||||
};
|
||||
|
||||
# Set-up media group
|
||||
users.groups.media = { };
|
||||
|
||||
users.users.${config.services.paperless.user} = {
|
||||
extraGroups = [ "media" ];
|
||||
};
|
||||
|
||||
my.services.nginx.virtualHosts = [
|
||||
{
|
||||
subdomain = "paperless";
|
||||
inherit (cfg) port;
|
||||
sso = {
|
||||
enable = true;
|
||||
};
|
||||
|
||||
# Enable websockets on root
|
||||
extraConfig = {
|
||||
locations."/".proxyWebsockets = true;
|
||||
};
|
||||
}
|
||||
];
|
||||
|
||||
my.services.backup = {
|
||||
paths = [
|
||||
config.services.paperless.dataDir
|
||||
config.services.paperless.mediaDir
|
||||
];
|
||||
};
|
||||
};
|
||||
}
|
||||
93
modules/nixos/services/pirate/default.nix
Normal file
93
modules/nixos/services/pirate/default.nix
Normal file
|
|
@ -0,0 +1,93 @@
|
|||
# The total autonomous media delivery system.
|
||||
# Relevant link [1].
|
||||
#
|
||||
# [1]: https://youtu.be/I26Ql-uX6AM
|
||||
{ config, lib, ... }:
|
||||
let
|
||||
cfg = config.my.services.pirate;
|
||||
|
||||
ports = {
|
||||
bazarr = 6767;
|
||||
lidarr = 8686;
|
||||
radarr = 7878;
|
||||
sonarr = 8989;
|
||||
};
|
||||
|
||||
mkService = service: {
|
||||
services.${service} = {
|
||||
enable = true;
|
||||
group = "media";
|
||||
};
|
||||
};
|
||||
|
||||
mkRedirection = service: {
|
||||
my.services.nginx.virtualHosts = [
|
||||
{
|
||||
subdomain = service;
|
||||
port = ports.${service};
|
||||
}
|
||||
];
|
||||
};
|
||||
|
||||
mkFail2Ban = service: lib.mkIf cfg.${service}.enable {
|
||||
services.fail2ban.jails = {
|
||||
${service} = ''
|
||||
enabled = true
|
||||
filter = ${service}
|
||||
action = iptables-allports
|
||||
'';
|
||||
};
|
||||
|
||||
environment.etc = {
|
||||
"fail2ban/filter.d/${service}.conf".text = ''
|
||||
[Definition]
|
||||
failregex = ^.*\|Warn\|Auth\|Auth-Failure ip <HOST> username .*$
|
||||
journalmatch = _SYSTEMD_UNIT=${service}.service
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
mkFullConfig = service: lib.mkIf cfg.${service}.enable (lib.mkMerge [
|
||||
(mkService service)
|
||||
(mkRedirection service)
|
||||
]);
|
||||
in
|
||||
{
|
||||
options.my.services.pirate = {
|
||||
enable = lib.mkEnableOption "Media automation";
|
||||
|
||||
bazarr = {
|
||||
enable = lib.my.mkDisableOption "Bazarr";
|
||||
};
|
||||
|
||||
lidarr = {
|
||||
enable = lib.my.mkDisableOption "Lidarr";
|
||||
};
|
||||
|
||||
radarr = {
|
||||
enable = lib.my.mkDisableOption "Radarr";
|
||||
};
|
||||
|
||||
sonarr = {
|
||||
enable = lib.my.mkDisableOption "Sonarr";
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf cfg.enable (lib.mkMerge [
|
||||
{
|
||||
# Set-up media group
|
||||
users.groups.media = { };
|
||||
}
|
||||
# Bazarr does not log authentication failures...
|
||||
(mkFullConfig "bazarr")
|
||||
# Lidarr for music
|
||||
(mkFullConfig "lidarr")
|
||||
(mkFail2Ban "lidarr")
|
||||
# Radarr for movies
|
||||
(mkFullConfig "radarr")
|
||||
(mkFail2Ban "radarr")
|
||||
# Sonarr for shows
|
||||
(mkFullConfig "sonarr")
|
||||
(mkFail2Ban "sonarr")
|
||||
]);
|
||||
}
|
||||
41
modules/nixos/services/podgrab/default.nix
Normal file
41
modules/nixos/services/podgrab/default.nix
Normal file
|
|
@ -0,0 +1,41 @@
|
|||
# A simple podcast fetcher
|
||||
{ config, lib, ... }:
|
||||
let
|
||||
cfg = config.my.services.podgrab;
|
||||
in
|
||||
{
|
||||
options.my.services.podgrab = with lib; {
|
||||
enable = mkEnableOption "Podgrab, a self-hosted podcast manager";
|
||||
|
||||
passwordFile = mkOption {
|
||||
type = with types; nullOr str;
|
||||
default = null;
|
||||
example = "/run/secrets/password.env";
|
||||
description = ''
|
||||
The path to a file containing the PASSWORD environment variable
|
||||
definition for Podgrab's authentification.
|
||||
'';
|
||||
};
|
||||
|
||||
port = mkOption {
|
||||
type = types.port;
|
||||
default = 8080;
|
||||
example = 4242;
|
||||
description = "The port on which Podgrab will listen for incoming HTTP traffic.";
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
services.podgrab = {
|
||||
enable = true;
|
||||
inherit (cfg) passwordFile port;
|
||||
};
|
||||
|
||||
my.services.nginx.virtualHosts = [
|
||||
{
|
||||
subdomain = "podgrab";
|
||||
inherit (cfg) port;
|
||||
}
|
||||
];
|
||||
};
|
||||
}
|
||||
28
modules/nixos/services/postgresql-backup/default.nix
Normal file
28
modules/nixos/services/postgresql-backup/default.nix
Normal file
|
|
@ -0,0 +1,28 @@
|
|||
# Backup your data, kids!
|
||||
{ config, lib, ... }:
|
||||
let
|
||||
cfg = config.my.services.postgresql-backup;
|
||||
in
|
||||
{
|
||||
options.my.services.postgresql-backup = {
|
||||
enable = lib.mkEnableOption "Backup SQL databases";
|
||||
};
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
services.postgresqlBackup = {
|
||||
enable = true;
|
||||
backupAll = true;
|
||||
location = "/var/backup/postgresql";
|
||||
};
|
||||
|
||||
my.services.backup = {
|
||||
paths = [
|
||||
config.services.postgresqlBackup.location
|
||||
];
|
||||
# No need to store previous backups thanks to `restic`
|
||||
exclude = [
|
||||
(config.services.postgresqlBackup.location + "/*.prev.sql.gz")
|
||||
];
|
||||
};
|
||||
};
|
||||
}
|
||||
61
modules/nixos/services/postgresql/default.nix
Normal file
61
modules/nixos/services/postgresql/default.nix
Normal file
|
|
@ -0,0 +1,61 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
let
|
||||
cfg = config.my.services.postgresql;
|
||||
in
|
||||
{
|
||||
options.my.services.postgresql = with lib; {
|
||||
enable = my.mkDisableOption "postgres configuration";
|
||||
|
||||
# Transient option to be enabled for migrations
|
||||
upgradeScript = mkEnableOption "postgres upgrade script";
|
||||
};
|
||||
|
||||
config = lib.mkMerge [
|
||||
# Let other services enable postgres when they need it
|
||||
(lib.mkIf cfg.enable {
|
||||
services.postgresql = {
|
||||
package = pkgs.postgresql_13;
|
||||
};
|
||||
})
|
||||
|
||||
# Taken from the manual
|
||||
(lib.mkIf cfg.upgradeScript {
|
||||
containers.temp-pg.config.services.postgresql = {
|
||||
enable = true;
|
||||
package = pkgs.postgresql_13;
|
||||
};
|
||||
|
||||
environment.systemPackages =
|
||||
let
|
||||
newpg = config.containers.temp-pg.config.services.postgresql;
|
||||
in
|
||||
[
|
||||
(pkgs.writeScriptBin "upgrade-pg-cluster" ''
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -x
|
||||
export OLDDATA="${config.services.postgresql.dataDir}"
|
||||
export NEWDATA="${newpg.dataDir}"
|
||||
export OLDBIN="${config.services.postgresql.package}/bin"
|
||||
export NEWBIN="${newpg.package}/bin"
|
||||
|
||||
if [ "$OLDDATA" -ef "$NEWDATA" ]; then
|
||||
echo "Cannot migrate to same data directory" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
install -d -m 0700 -o postgres -g postgres "$NEWDATA"
|
||||
cd "$NEWDATA"
|
||||
sudo -u postgres $NEWBIN/initdb -D "$NEWDATA"
|
||||
|
||||
systemctl stop postgresql # old one
|
||||
|
||||
sudo -u postgres $NEWBIN/pg_upgrade \
|
||||
--old-datadir "$OLDDATA" --new-datadir "$NEWDATA" \
|
||||
--old-bindir $OLDBIN --new-bindir $NEWBIN \
|
||||
"$@"
|
||||
'')
|
||||
];
|
||||
})
|
||||
];
|
||||
}
|
||||
50
modules/nixos/services/quassel/default.nix
Normal file
50
modules/nixos/services/quassel/default.nix
Normal file
|
|
@ -0,0 +1,50 @@
|
|||
# An IRC client daemon
|
||||
{ config, lib, ... }:
|
||||
let
|
||||
cfg = config.my.services.quassel;
|
||||
domain = config.networking.domain;
|
||||
in
|
||||
{
|
||||
options.my.services.quassel = with lib; {
|
||||
enable = mkEnableOption "Quassel IRC client daemon";
|
||||
port = mkOption {
|
||||
type = types.port;
|
||||
default = 4242;
|
||||
example = 8080;
|
||||
description = "The port number for Quassel";
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
services.quassel = {
|
||||
enable = true;
|
||||
portNumber = cfg.port;
|
||||
# Let's be secure
|
||||
requireSSL = true;
|
||||
certificateFile = config.security.acme.certs."${domain}".directory + "/full.pem";
|
||||
# The whole point *is* to connect from other clients
|
||||
interfaces = [ "0.0.0.0" ];
|
||||
};
|
||||
|
||||
# Allow Quassel to read the certificates.
|
||||
users.groups.acme.members = [ "quassel" ];
|
||||
|
||||
# Open port for Quassel
|
||||
networking.firewall.allowedTCPPorts = [ cfg.port ];
|
||||
|
||||
# Create storage DB
|
||||
services.postgresql = {
|
||||
enable = true;
|
||||
ensureDatabases = [ "quassel" ];
|
||||
ensureUsers = [
|
||||
{
|
||||
name = "quassel";
|
||||
ensurePermissions."DATABASE quassel" = "ALL PRIVILEGES";
|
||||
}
|
||||
];
|
||||
# Insecure, I don't care.
|
||||
# Because Quassel does not use the socket, I simply trust its connection
|
||||
authentication = "host quassel quassel localhost trust";
|
||||
};
|
||||
};
|
||||
}
|
||||
24
modules/nixos/services/rss-bridge/default.nix
Normal file
24
modules/nixos/services/rss-bridge/default.nix
Normal file
|
|
@ -0,0 +1,24 @@
|
|||
# Get RSS feeds from websites that don't natively have one
|
||||
{ config, lib, ... }:
|
||||
let
|
||||
cfg = config.my.services.rss-bridge;
|
||||
in
|
||||
{
|
||||
options.my.services.rss-bridge = {
|
||||
enable = lib.mkEnableOption "RSS-Bridge service";
|
||||
};
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
services.rss-bridge = {
|
||||
enable = true;
|
||||
whitelist = [ "*" ]; # Whitelist all
|
||||
virtualHost = "rss-bridge.${config.networking.domain}";
|
||||
};
|
||||
|
||||
# The service above configures the domain, no need for my wrapper
|
||||
services.nginx.virtualHosts."rss-bridge.${config.networking.domain}" = {
|
||||
forceSSL = true;
|
||||
useACMEHost = config.networking.domain;
|
||||
};
|
||||
};
|
||||
}
|
||||
57
modules/nixos/services/sabnzbd/default.nix
Normal file
57
modules/nixos/services/sabnzbd/default.nix
Normal file
|
|
@ -0,0 +1,57 @@
|
|||
# Usenet binary client.
|
||||
{ config, lib, ... }:
|
||||
let
|
||||
cfg = config.my.services.sabnzbd;
|
||||
port = 9090; # NOTE: not declaratively set...
|
||||
in
|
||||
{
|
||||
options.my.services.sabnzbd = with lib; {
|
||||
enable = mkEnableOption "SABnzbd binary news reader";
|
||||
};
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
services.sabnzbd = {
|
||||
enable = true;
|
||||
group = "media";
|
||||
};
|
||||
|
||||
# Set-up media group
|
||||
users.groups.media = { };
|
||||
|
||||
my.services.nginx.virtualHosts = [
|
||||
{
|
||||
subdomain = "sabnzbd";
|
||||
inherit port;
|
||||
}
|
||||
];
|
||||
|
||||
services.fail2ban.jails = {
|
||||
sabnzbd = ''
|
||||
enabled = true
|
||||
filter = sabnzbd
|
||||
port = http,https
|
||||
# Unfortunately, sabnzbd does not log to systemd journal
|
||||
backend = auto
|
||||
logpath = /var/lib/sabnzbd/logs/sabnzbd.log
|
||||
'';
|
||||
};
|
||||
|
||||
environment.etc = {
|
||||
# FIXME: path to log file
|
||||
"fail2ban/filter.d/sabnzbd.conf".text = ''
|
||||
[Definition]
|
||||
failregex = ^.*WARNING.*API Key incorrect, Use the api key from Config->General in your 3rd party program: .* \(X-Forwarded-For: <HOST>\) .*$
|
||||
^.*WARNING.*API Key incorrect, Use the api key from Config->General in your 3rd party program: <HOST> .*$
|
||||
^.*WARNING.*API Key missing, please enter the api key from Config->General into your 3rd party program: .* \(X-Forwarded-For: <HOST>\) .*$
|
||||
^.*WARNING.*API Key missing, please enter the api key from Config->General into your 3rd party program: <HOST> .*$
|
||||
^.*WARNING.*Refused connection from: .* \(X-Forwarded-For: <HOST>\) .*$
|
||||
^.*WARNING.*Refused connection from: <HOST> .*$
|
||||
^.*WARNING.*Refused connection with hostname ".*" from: .* \(X-Forwarded-For: <HOST>\) .*$
|
||||
^.*WARNING.*Refused connection with hostname ".*" from: <HOST> .*$
|
||||
^.*WARNING.*Unsuccessful login attempt from .* \(X-Forwarded-For: <HOST>\) .*$
|
||||
^.*WARNING.*Unsuccessful login attempt from <HOST> .*$
|
||||
journalmatch = _SYSTEMD_UNIT=sabnzbd.service
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
||||
26
modules/nixos/services/ssh-server/default.nix
Normal file
26
modules/nixos/services/ssh-server/default.nix
Normal file
|
|
@ -0,0 +1,26 @@
|
|||
# An SSH server, using 'mosh'
|
||||
{ config, lib, ... }:
|
||||
let
|
||||
cfg = config.my.services.ssh-server;
|
||||
in
|
||||
{
|
||||
options.my.services.ssh-server = {
|
||||
enable = lib.mkEnableOption "SSH Server using 'mosh'";
|
||||
};
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
services.openssh = {
|
||||
# Enable the OpenSSH daemon.
|
||||
enable = true;
|
||||
|
||||
settings = {
|
||||
# Be more secure
|
||||
PermitRootLogin = "no";
|
||||
PasswordAuthentication = false;
|
||||
};
|
||||
};
|
||||
|
||||
# Opens the relevant UDP ports.
|
||||
programs.mosh.enable = true;
|
||||
};
|
||||
}
|
||||
80
modules/nixos/services/tandoor-recipes/default.nix
Normal file
80
modules/nixos/services/tandoor-recipes/default.nix
Normal file
|
|
@ -0,0 +1,80 @@
|
|||
{ config, lib, ... }:
|
||||
let
|
||||
cfg = config.my.services.tandoor-recipes;
|
||||
in
|
||||
{
|
||||
options.my.services.tandoor-recipes = with lib; {
|
||||
enable = mkEnableOption "Tandoor Recipes service";
|
||||
|
||||
port = mkOption {
|
||||
type = types.port;
|
||||
default = 4536;
|
||||
example = 8080;
|
||||
description = "Internal port for webui";
|
||||
};
|
||||
|
||||
secretKeyFile = mkOption {
|
||||
type = types.str;
|
||||
example = "/var/lib/tandoor-recipes/secret-key.env";
|
||||
description = ''
|
||||
Secret key as an 'EnvironmentFile' (see `systemd.exec(5)`)
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
services.tandoor-recipes = {
|
||||
enable = true;
|
||||
|
||||
port = cfg.port;
|
||||
extraConfig =
|
||||
let
|
||||
tandoorRecipesDomain = "recipes.${config.networking.domain}";
|
||||
in
|
||||
{
|
||||
# Use PostgreSQL
|
||||
DB_ENGINE = "django.db.backends.postgresql";
|
||||
POSTGRES_HOST = "/run/postgresql";
|
||||
POSTGRES_USER = "tandoor_recipes";
|
||||
POSTGRES_DB = "tandoor_recipes";
|
||||
|
||||
# Security settings
|
||||
ALLOWED_HOSTS = tandoorRecipesDomain;
|
||||
CSRF_TRUSTED_ORIGINS = "https://${tandoorRecipesDomain}";
|
||||
|
||||
# Misc
|
||||
TIMEZONE = config.time.timeZone;
|
||||
};
|
||||
};
|
||||
|
||||
systemd.services = {
|
||||
tandoor-recipes = {
|
||||
after = [ "postgresql.service" ];
|
||||
requires = [ "postgresql.service" ];
|
||||
|
||||
serviceConfig = {
|
||||
EnvironmentFile = cfg.secretKeyFile;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
# Set-up database
|
||||
services.postgresql = {
|
||||
enable = true;
|
||||
ensureDatabases = [ "tandoor_recipes" ];
|
||||
ensureUsers = [
|
||||
{
|
||||
name = "tandoor_recipes";
|
||||
ensurePermissions."DATABASE tandoor_recipes" = "ALL PRIVILEGES";
|
||||
}
|
||||
];
|
||||
};
|
||||
|
||||
my.services.nginx.virtualHosts = [
|
||||
{
|
||||
subdomain = "recipes";
|
||||
inherit (cfg) port;
|
||||
}
|
||||
];
|
||||
};
|
||||
}
|
||||
26
modules/nixos/services/tlp/default.nix
Normal file
26
modules/nixos/services/tlp/default.nix
Normal file
|
|
@ -0,0 +1,26 @@
|
|||
# TLP power management
|
||||
{ config, lib, ... }:
|
||||
let
|
||||
cfg = config.my.services.tlp;
|
||||
in
|
||||
{
|
||||
options.my.services.tlp = {
|
||||
enable = lib.mkEnableOption "TLP power management configuration";
|
||||
};
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
services.tlp = {
|
||||
enable = true;
|
||||
|
||||
settings = {
|
||||
# Set CPU scaling aggressively when power is not an issue
|
||||
CPU_SCALING_GOVERNOR_ON_AC = "performance";
|
||||
CPU_SCALING_GOVERNOR_ON_BAT = "powersave";
|
||||
|
||||
# Keep charge between 60% and 80% to preserve battery life
|
||||
START_CHARGE_THRESH_BAT0 = 60;
|
||||
STOP_CHARGE_THRESH_BAT0 = 80;
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
95
modules/nixos/services/transmission/default.nix
Normal file
95
modules/nixos/services/transmission/default.nix
Normal file
|
|
@ -0,0 +1,95 @@
|
|||
# Small seedbox setup.
|
||||
#
|
||||
# Inspired by [1]
|
||||
#
|
||||
# [1]: https://github.com/delroth/infra.delroth.net/blob/master/roles/seedbox.nix
|
||||
{ config, lib, pkgs, ... }:
|
||||
let
|
||||
cfg = config.my.services.transmission;
|
||||
in
|
||||
{
|
||||
options.my.services.transmission = with lib; {
|
||||
enable = mkEnableOption "Transmission torrent client";
|
||||
|
||||
credentialsFile = mkOption {
|
||||
type = types.str;
|
||||
example = "/var/lib/transmission/creds.json";
|
||||
description = ''
|
||||
Credential file as an json configuration file to be merged with
|
||||
the main one.
|
||||
'';
|
||||
};
|
||||
|
||||
downloadBase = mkOption {
|
||||
type = types.str;
|
||||
default = "/data/downloads";
|
||||
example = "/var/lib/transmission/download";
|
||||
description = "Download base directory";
|
||||
};
|
||||
|
||||
port = mkOption {
|
||||
type = types.port;
|
||||
default = 9091;
|
||||
example = 8080;
|
||||
description = "Internal port for webui";
|
||||
};
|
||||
|
||||
peerPort = mkOption {
|
||||
type = types.port;
|
||||
default = 30251;
|
||||
example = 32323;
|
||||
description = "Peering port";
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
services.transmission = {
|
||||
enable = true;
|
||||
package = pkgs.transmission_4;
|
||||
group = "media";
|
||||
|
||||
downloadDirPermissions = "775";
|
||||
|
||||
inherit (cfg) credentialsFile;
|
||||
|
||||
settings = {
|
||||
download-dir = "${cfg.downloadBase}/complete";
|
||||
incomplete-dir = "${cfg.downloadBase}/incomplete";
|
||||
|
||||
peer-port = cfg.peerPort;
|
||||
|
||||
rpc-enabled = true;
|
||||
rpc-port = cfg.port;
|
||||
rpc-authentication-required = true;
|
||||
|
||||
# Proxied behind Nginx.
|
||||
rpc-whitelist-enabled = true;
|
||||
rpc-whitelist = "127.0.0.1";
|
||||
};
|
||||
};
|
||||
|
||||
# Transmission wants to eat *all* my RAM if left to its own devices
|
||||
systemd.services.transmission = {
|
||||
serviceConfig = {
|
||||
MemoryMax = "33%";
|
||||
};
|
||||
};
|
||||
|
||||
# Set-up media group
|
||||
users.groups.media = { };
|
||||
|
||||
# Default transmission webui, I prefer combustion but its development
|
||||
# seems to have stalled
|
||||
my.services.nginx.virtualHosts = [
|
||||
{
|
||||
subdomain = "transmission";
|
||||
inherit (cfg) port;
|
||||
}
|
||||
];
|
||||
|
||||
networking.firewall = {
|
||||
allowedTCPPorts = [ cfg.peerPort ];
|
||||
allowedUDPPorts = [ cfg.peerPort ];
|
||||
};
|
||||
};
|
||||
}
|
||||
123
modules/nixos/services/vikunja/default.nix
Normal file
123
modules/nixos/services/vikunja/default.nix
Normal file
|
|
@ -0,0 +1,123 @@
|
|||
# Todo and kanban app
|
||||
{ config, lib, ... }:
|
||||
let
|
||||
cfg = config.my.services.vikunja;
|
||||
subdomain = "todo";
|
||||
vikunjaDomain = "${subdomain}.${config.networking.domain}";
|
||||
socketPath = "/run/vikunja/vikunja.socket";
|
||||
in
|
||||
{
|
||||
options.my.services.vikunja = with lib; {
|
||||
enable = mkEnableOption "Vikunja todo app";
|
||||
|
||||
mail = {
|
||||
enable = mkEnableOption {
|
||||
description = "mailer configuration";
|
||||
};
|
||||
|
||||
configFile = mkOption {
|
||||
type = types.str;
|
||||
example = "/run/secrets/vikunja-mail-config.env";
|
||||
description = "Configuration for the mailer connection, using environment variables.";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
services.vikunja = {
|
||||
enable = true;
|
||||
|
||||
frontendScheme = "https";
|
||||
frontendHostname = vikunjaDomain;
|
||||
|
||||
setupNginx = false;
|
||||
|
||||
database = {
|
||||
type = "postgres";
|
||||
user = "vikunja";
|
||||
database = "vikunja";
|
||||
host = "/run/postgresql";
|
||||
};
|
||||
|
||||
settings = {
|
||||
service = {
|
||||
# Only allow registration of users through the CLI
|
||||
enableregistration = false;
|
||||
# Ues the host's timezone
|
||||
timezone = config.time.timeZone;
|
||||
# Use UNIX socket for serving the API
|
||||
unixsocket = socketPath;
|
||||
unixsocketmode = "0o660";
|
||||
};
|
||||
|
||||
mailer = {
|
||||
enabled = cfg.mail.enable;
|
||||
};
|
||||
};
|
||||
|
||||
environmentFiles = lib.optional cfg.mail.enable cfg.mail.configFile;
|
||||
};
|
||||
|
||||
# This is a weird setup
|
||||
my.services.nginx.virtualHosts = [
|
||||
{
|
||||
inherit subdomain;
|
||||
# Serve the root for the web-ui
|
||||
root = config.services.vikunja.package-frontend;
|
||||
|
||||
extraConfig = {
|
||||
locations = {
|
||||
"/" = {
|
||||
tryFiles = "try_files $uri $uri/ /";
|
||||
};
|
||||
|
||||
# Serve the API through a UNIX socket
|
||||
"~* ^/(api|dav|\\.well-known)/" = {
|
||||
proxyPass = "http://unix:${socketPath}";
|
||||
extraConfig = ''
|
||||
client_max_body_size 20M;
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
];
|
||||
|
||||
systemd.services.vikunja-api = {
|
||||
serviceConfig = {
|
||||
# Use a system user to simplify using the CLI
|
||||
DynamicUser = lib.mkForce false;
|
||||
# Set the user for postgres authentication
|
||||
User = "vikunja";
|
||||
# Create /run/vikunja/ to serve the UNIX socket
|
||||
RuntimeDirectory = "vikunja";
|
||||
};
|
||||
};
|
||||
|
||||
users.users.vikunja = {
|
||||
description = "Vikunja Service";
|
||||
group = "vikunja";
|
||||
isSystemUser = true;
|
||||
};
|
||||
users.groups.vikunja = { };
|
||||
|
||||
# Allow nginx to access the UNIX socket
|
||||
users.users.nginx.extraGroups = [ "vikunja" ];
|
||||
|
||||
services.postgresql = {
|
||||
ensureDatabases = [ "vikunja" ];
|
||||
ensureUsers = [
|
||||
{
|
||||
name = "vikunja";
|
||||
ensurePermissions = { "DATABASE vikunja" = "ALL PRIVILEGES"; };
|
||||
}
|
||||
];
|
||||
};
|
||||
|
||||
my.services.backup = {
|
||||
paths = [
|
||||
config.services.vikunja.settings.files.basepath
|
||||
];
|
||||
};
|
||||
};
|
||||
}
|
||||
265
modules/nixos/services/wireguard/default.nix
Normal file
265
modules/nixos/services/wireguard/default.nix
Normal file
|
|
@ -0,0 +1,265 @@
|
|||
# A simple, in-kernel VPN service
|
||||
#
|
||||
# Strongly inspired by [1].
|
||||
# [1]: https://github.com/delroth/infra.delroth.net/blob/master/roles/wireguard-peer.nix
|
||||
{ config, lib, pkgs, ... }:
|
||||
let
|
||||
cfg = config.my.services.wireguard;
|
||||
secrets = config.age.secrets;
|
||||
hostName = config.networking.hostName;
|
||||
|
||||
peers = {
|
||||
# "Server"
|
||||
porthos = {
|
||||
clientNum = 1;
|
||||
publicKey = "PLdgsizztddri0LYtjuNHr5r2E8D+yI+gM8cm5WDfHQ=";
|
||||
externalIp = "91.121.177.163";
|
||||
};
|
||||
|
||||
# "Clients"
|
||||
aramis = {
|
||||
clientNum = 2;
|
||||
publicKey = "QJSWIBS1mXTpxYybLlKu/Y5wy0GFbUfn4yPzpF1DZDc=";
|
||||
};
|
||||
|
||||
richelieu = {
|
||||
clientNum = 3;
|
||||
publicKey = "w4IADAj2Tt7Qe95a0RxDv9ovg/Dr/f3q1LrVOPF48Rk=";
|
||||
};
|
||||
|
||||
# Sarah's iPhone
|
||||
milady = {
|
||||
clientNum = 4;
|
||||
publicKey = "3MKEu4F6o8kww54xeAao5Uet86fv8z/QsZ2L2mOzqDQ=";
|
||||
};
|
||||
};
|
||||
thisPeer = peers."${hostName}";
|
||||
thisPeerIsServer = thisPeer ? externalIp;
|
||||
# Only connect to clients from server, and only connect to server from clients
|
||||
otherPeers =
|
||||
let
|
||||
allOthers = lib.filterAttrs (name: _: name != hostName) peers;
|
||||
shouldConnectToPeer = _: peer: thisPeerIsServer != (peer ? externalIp);
|
||||
in
|
||||
lib.filterAttrs shouldConnectToPeer allOthers;
|
||||
|
||||
extIface = config.my.hardware.networking.externalInterface;
|
||||
|
||||
mkInterface = clientAllowedIPs: {
|
||||
listenPort = cfg.port;
|
||||
address = with cfg.net; with lib; [
|
||||
"${v4.subnet}.${toString thisPeer.clientNum}/${toString v4.mask}"
|
||||
"${v6.subnet}::${toString thisPeer.clientNum}/${toHexString v6.mask}"
|
||||
];
|
||||
privateKeyFile = secrets."wireguard/private-key".path;
|
||||
|
||||
peers =
|
||||
let
|
||||
mkPeer = _: peer: lib.mkMerge [
|
||||
{
|
||||
inherit (peer) publicKey;
|
||||
}
|
||||
|
||||
(lib.optionalAttrs thisPeerIsServer {
|
||||
# Only forward from server to clients
|
||||
allowedIPs = with cfg.net; [
|
||||
"${v4.subnet}.${toString peer.clientNum}/32"
|
||||
"${v6.subnet}::${toString peer.clientNum}/128"
|
||||
];
|
||||
})
|
||||
|
||||
(lib.optionalAttrs (!thisPeerIsServer) {
|
||||
# Forward all traffic through wireguard to server
|
||||
allowedIPs = clientAllowedIPs;
|
||||
# Roaming clients need to keep NAT-ing active
|
||||
persistentKeepalive = 10;
|
||||
# We know that `peer` is a server, set up the endpoint
|
||||
endpoint = "${peer.externalIp}:${toString cfg.port}";
|
||||
})
|
||||
];
|
||||
in
|
||||
lib.mapAttrsToList mkPeer otherPeers;
|
||||
|
||||
# Set up clients to use configured DNS servers
|
||||
dns =
|
||||
let
|
||||
toInternalIps = peer: [
|
||||
"${cfg.net.v4.subnet}.${toString peer.clientNum}"
|
||||
"${cfg.net.v6.subnet}::${toString peer.clientNum}"
|
||||
];
|
||||
# We know that `otherPeers` is an attribute set of servers
|
||||
internalIps = lib.flatten
|
||||
(lib.mapAttrsToList (_: peer: toInternalIps peer) otherPeers);
|
||||
internalServers = lib.optionals cfg.dns.useInternal internalIps;
|
||||
in
|
||||
lib.mkIf (!thisPeerIsServer)
|
||||
(internalServers ++ cfg.dns.additionalServers);
|
||||
};
|
||||
in
|
||||
{
|
||||
options.my.services.wireguard = with lib; {
|
||||
enable = mkEnableOption "Wireguard VPN service";
|
||||
|
||||
startAtBoot = mkEnableOption ''
|
||||
Should the VPN service be started at boot. Must be true for the server to
|
||||
work reliably.
|
||||
'';
|
||||
|
||||
iface = mkOption {
|
||||
type = types.str;
|
||||
default = "wg";
|
||||
example = "wg0";
|
||||
description = "Name of the interface to configure";
|
||||
};
|
||||
|
||||
port = mkOption {
|
||||
type = types.port;
|
||||
default = 51820;
|
||||
example = 55555;
|
||||
description = "Port to configure for Wireguard";
|
||||
};
|
||||
|
||||
dns = {
|
||||
useInternal = my.mkDisableOption ''
|
||||
Use internal DNS servers from wireguard 'server'
|
||||
'';
|
||||
|
||||
additionalServers = mkOption {
|
||||
type = with types; listOf str;
|
||||
default = [
|
||||
"1.0.0.1"
|
||||
"1.1.1.1"
|
||||
];
|
||||
example = [
|
||||
"8.8.4.4"
|
||||
"8.8.8.8"
|
||||
];
|
||||
description = "Which DNS servers to use in addition to adblock ones";
|
||||
};
|
||||
};
|
||||
|
||||
net = {
|
||||
# FIXME: use new ip library to handle this more cleanly
|
||||
v4 = {
|
||||
subnet = mkOption {
|
||||
type = types.str;
|
||||
default = "10.0.0";
|
||||
example = "10.100.0";
|
||||
description = "Which prefix to use for internal IPs";
|
||||
};
|
||||
mask = mkOption {
|
||||
type = types.int;
|
||||
default = 24;
|
||||
example = 28;
|
||||
description = "The CIDR mask to use on internal IPs";
|
||||
};
|
||||
};
|
||||
# FIXME: extend library for IPv6
|
||||
v6 = {
|
||||
subnet = mkOption {
|
||||
type = types.str;
|
||||
default = "fd42:42:42";
|
||||
example = "fdc9:281f:04d7:9ee9";
|
||||
description = "Which prefix to use for internal IPs";
|
||||
};
|
||||
mask = mkOption {
|
||||
type = types.int;
|
||||
default = 64;
|
||||
example = 68;
|
||||
description = "The CIDR mask to use on internal IPs";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
internal = {
|
||||
enable = mkEnableOption ''
|
||||
Additional interface which does not route WAN traffic, but gives access
|
||||
to wireguard peers.
|
||||
Is useful for accessing DNS and other internal services, without having
|
||||
to route all traffic through wireguard.
|
||||
Is automatically disabled on server, and enabled otherwise.
|
||||
'' // {
|
||||
default = !thisPeerIsServer;
|
||||
};
|
||||
|
||||
name = mkOption {
|
||||
type = types.str;
|
||||
default = "lan";
|
||||
example = "internal";
|
||||
description = "Which name to use for this interface";
|
||||
};
|
||||
|
||||
startAtBoot = my.mkDisableOption ''
|
||||
Should the internal VPN service be started at boot.
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf cfg.enable (lib.mkMerge [
|
||||
# Normal interface should route all traffic from client through server
|
||||
{
|
||||
networking.wg-quick.interfaces."${cfg.iface}" = mkInterface [
|
||||
"0.0.0.0/0"
|
||||
"::/0"
|
||||
];
|
||||
}
|
||||
|
||||
# Additional inteface is only used to get access to "LAN" from wireguard
|
||||
(lib.mkIf cfg.internal.enable {
|
||||
networking.wg-quick.interfaces."${cfg.internal.name}" = mkInterface [
|
||||
"${cfg.net.v4.subnet}.0/${toString cfg.net.v4.mask}"
|
||||
"${cfg.net.v6.subnet}::/${toString cfg.net.v6.mask}"
|
||||
];
|
||||
})
|
||||
|
||||
# Expose port
|
||||
{
|
||||
networking.firewall.allowedUDPPorts = [ cfg.port ];
|
||||
}
|
||||
|
||||
# Allow NATing wireguard traffic on server
|
||||
(lib.mkIf thisPeerIsServer {
|
||||
networking.nat = {
|
||||
enable = true;
|
||||
externalInterface = extIface;
|
||||
internalInterfaces = [ cfg.iface ];
|
||||
};
|
||||
})
|
||||
|
||||
# Set up forwarding to WAN
|
||||
(lib.mkIf thisPeerIsServer {
|
||||
networking.wg-quick.interfaces."${cfg.iface}" = {
|
||||
postUp = with cfg.net; ''
|
||||
${pkgs.iptables}/bin/iptables -A FORWARD -i ${cfg.iface} -j ACCEPT
|
||||
${pkgs.iptables}/bin/iptables -t nat -A POSTROUTING \
|
||||
-s ${v4.subnet}.${toString thisPeer.clientNum}/${toString v4.mask} \
|
||||
-o ${extIface} -j MASQUERADE
|
||||
${pkgs.iptables}/bin/ip6tables -A FORWARD -i ${cfg.iface} -j ACCEPT
|
||||
${pkgs.iptables}/bin/ip6tables -t nat -A POSTROUTING \
|
||||
-s ${v6.subnet}::${toString thisPeer.clientNum}/${toString v6.mask} \
|
||||
-o ${extIface} -j MASQUERADE
|
||||
'';
|
||||
preDown = with cfg.net; ''
|
||||
${pkgs.iptables}/bin/iptables -D FORWARD -i ${cfg.iface} -j ACCEPT
|
||||
${pkgs.iptables}/bin/iptables -t nat -D POSTROUTING \
|
||||
-s ${v4.subnet}.${toString thisPeer.clientNum}/${toString v4.mask} \
|
||||
-o ${extIface} -j MASQUERADE
|
||||
${pkgs.iptables}/bin/ip6tables -D FORWARD -i ${cfg.iface} -j ACCEPT
|
||||
${pkgs.iptables}/bin/ip6tables -t nat -D POSTROUTING \
|
||||
-s ${v6.subnet}::${toString thisPeer.clientNum}/${toString v6.mask} \
|
||||
-o ${extIface} -j MASQUERADE
|
||||
'';
|
||||
};
|
||||
})
|
||||
|
||||
# When not needed at boot, ensure that there are no reverse dependencies
|
||||
(lib.mkIf (!cfg.startAtBoot) {
|
||||
systemd.services."wg-quick-${cfg.iface}".wantedBy = lib.mkForce [ ];
|
||||
})
|
||||
|
||||
# Same idea, for internal-only interface
|
||||
(lib.mkIf (cfg.internal.enable && !cfg.internal.startAtBoot) {
|
||||
systemd.services."wg-quick-${cfg.internal.name}".wantedBy = lib.mkForce [ ];
|
||||
})
|
||||
]);
|
||||
}
|
||||
|
|
@ -0,0 +1,9 @@
|
|||
age-encryption.org/v1
|
||||
-> ssh-ed25519 cKojmg gWB20jfimPCJHYjqxBSHYkL9Z/kGZ23dRu4PHp7oJj8
|
||||
z3dBymvgrGNtIXe3yQAzpm36uExPmD7DKjU6mMNw99U
|
||||
-> ssh-ed25519 jPowng aeWv6an+PmWRuk2eHOQhF7jvmld1I5p2LbSmehjUBBw
|
||||
Rn+ApMvZlO0ji6TCakCUc+1jK762UxOqVanmCsjB+80
|
||||
-> jDh})['\-grease |Y6J(8{ +v.7nKx
|
||||
WID+ZDtsOlPI0AW8ROvXH1s
|
||||
--- ZlSk2uv95UoKi5D94+tiQdZyxCVv6dlj6ajwYeDzmp0
|
||||
çön“¯`Wáø¸öm!Q3]ñËQ}}<7D>ý†ŽBy—€kÛuÐìçÝÆ€EÉ^…zO‡Ö[ÕV¨p šfâøÀ>¡Ä”ÌÌÖî
|
||||
|
|
@ -0,0 +1,10 @@
|
|||
age-encryption.org/v1
|
||||
-> ssh-ed25519 cKojmg rYhrpoTaFjLBGtbCXxEK7jZa+KnriEV/kWViIEjmuQs
|
||||
jHMSjxKIIqjUnpAcEo3JgsieI1iiA5/gKEx8+QFhDgY
|
||||
-> ssh-ed25519 jPowng 6sQQFvSbWdjgDYSKmJ/CBG+BTzxFghX4SaJ4GyACKWc
|
||||
OABJuh+Ta8q+G0onF/9bz3xxv4zTlHYlF4AjC5P6Y6I
|
||||
-> xwW|#D`-grease $xYH C m8lBk9
|
||||
OBqgvLNIurE0qNaSB7dO2/6dQkVXeLgf/3l9gGlRJ6ynhqwmbXOUa0vyj+OBz27O
|
||||
uI97+0y1TFAs3HN0Y8nj8LrwsafbDENu99JuVow2OuLKeSqc7sxOQQ
|
||||
--- 9filSHStPTJJGDLY7AWzIXu/6tK4X0okT522sc4OJTc
|
||||
M{イ顗仭$ケ:Nル災[ンカャ2xy8&腴_{RワLX<4C>W√<E2889A>サxム*Pr`セUイp<EFBDB2>Jノ枇鵲#藝ヤ<E8979D>ラ<EFBFBD>覬粘 s
|
||||
15
modules/nixos/services/wireguard/keys/secrets.nix
Normal file
15
modules/nixos/services/wireguard/keys/secrets.nix
Normal file
|
|
@ -0,0 +1,15 @@
|
|||
# Extra wireguard keys that are not hosts NixOS hosts
|
||||
let
|
||||
keys = import ../../../../keys;
|
||||
|
||||
all = [
|
||||
keys.users.ambroisie
|
||||
];
|
||||
in
|
||||
{
|
||||
# Sarah's iPhone
|
||||
"milady/private-key.age".publicKeys = all;
|
||||
|
||||
# My Android phone
|
||||
"richelieu/private-key.age".publicKeys = all;
|
||||
}
|
||||
42
modules/nixos/services/woodpecker/agent-docker/default.nix
Normal file
42
modules/nixos/services/woodpecker/agent-docker/default.nix
Normal file
|
|
@ -0,0 +1,42 @@
|
|||
{ config, lib, ... }:
|
||||
let
|
||||
cfg = config.my.services.woodpecker;
|
||||
|
||||
hasRunner = (name: builtins.elem name cfg.runners);
|
||||
in
|
||||
{
|
||||
config = lib.mkIf (cfg.enable && hasRunner "docker") {
|
||||
services.woodpecker-agents = {
|
||||
agents.docker = {
|
||||
enable = true;
|
||||
|
||||
environment = {
|
||||
WOODPECKER_SERVER = "localhost:${toString cfg.rpcPort}";
|
||||
WOODPECKER_MAX_WORKFLOWS = "10";
|
||||
WOODPECKER_BACKEND = "docker";
|
||||
WOODPECKER_FILTER_LABELS = "type=docker";
|
||||
WOODPECKER_HEALTHCHECK = "false";
|
||||
};
|
||||
|
||||
environmentFile = [ cfg.sharedSecretFile ];
|
||||
|
||||
extraGroups = [ "docker" ];
|
||||
};
|
||||
};
|
||||
|
||||
# Make sure it is activated in that case
|
||||
my.system.docker.enable = true;
|
||||
|
||||
# Adjust runner service for nix usage
|
||||
systemd.services.woodpecker-agent-docker = {
|
||||
after = [ "docker.socket" ]; # Needs the socket to be available
|
||||
# might break deployment
|
||||
restartIfChanged = false;
|
||||
serviceConfig = {
|
||||
BindPaths = [
|
||||
"/var/run/docker.sock"
|
||||
];
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
64
modules/nixos/services/woodpecker/agent-exec/default.nix
Normal file
64
modules/nixos/services/woodpecker/agent-exec/default.nix
Normal file
|
|
@ -0,0 +1,64 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
let
|
||||
cfg = config.my.services.woodpecker;
|
||||
|
||||
hasRunner = (name: builtins.elem name cfg.runners);
|
||||
in
|
||||
{
|
||||
config = lib.mkIf (cfg.enable && hasRunner "exec") {
|
||||
services.woodpecker-agents = {
|
||||
agents.exec = {
|
||||
enable = true;
|
||||
|
||||
environment = {
|
||||
WOODPECKER_SERVER = "localhost:${toString cfg.rpcPort}";
|
||||
WOODPECKER_MAX_WORKFLOWS = "10";
|
||||
WOODPECKER_BACKEND = "local";
|
||||
WOODPECKER_FILTER_LABELS = "type=exec";
|
||||
WOODPECKER_HEALTHCHECK = "false";
|
||||
|
||||
NIX_REMOTE = "daemon";
|
||||
PAGER = "cat";
|
||||
};
|
||||
|
||||
path = with pkgs; [
|
||||
woodpecker-plugin-git
|
||||
bash
|
||||
coreutils
|
||||
git
|
||||
git-lfs
|
||||
gnutar
|
||||
gzip
|
||||
nix
|
||||
];
|
||||
|
||||
environmentFile = [ cfg.sharedSecretFile ];
|
||||
};
|
||||
};
|
||||
|
||||
# Adjust runner service for nix usage
|
||||
systemd.services.woodpecker-agent-exec = {
|
||||
# Might break deployment
|
||||
restartIfChanged = false;
|
||||
|
||||
serviceConfig = {
|
||||
# Same option as upstream, without @setuid
|
||||
SystemCallFilter = lib.mkForce "~@clock @privileged @cpu-emulation @debug @keyring @module @mount @obsolete @raw-io @reboot @swap";
|
||||
|
||||
BindPaths = [
|
||||
"/nix/var/nix/daemon-socket/socket"
|
||||
"/run/nscd/socket"
|
||||
];
|
||||
BindReadOnlyPaths = [
|
||||
"/etc/passwd:/etc/passwd"
|
||||
"/etc/group:/etc/group"
|
||||
"/etc/nix:/etc/nix"
|
||||
"${config.environment.etc."ssh/ssh_known_hosts".source}:/etc/ssh/ssh_known_hosts"
|
||||
"/etc/machine-id"
|
||||
# channels are dynamic paths in the nix store, therefore we need to bind mount the whole thing
|
||||
"/nix/"
|
||||
];
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
46
modules/nixos/services/woodpecker/default.nix
Normal file
46
modules/nixos/services/woodpecker/default.nix
Normal file
|
|
@ -0,0 +1,46 @@
|
|||
{ lib, ... }:
|
||||
{
|
||||
imports = [
|
||||
./agent-docker
|
||||
./agent-exec
|
||||
./server
|
||||
];
|
||||
|
||||
options.my.services.woodpecker = with lib; {
|
||||
enable = mkEnableOption "Woodpecker CI";
|
||||
runners = mkOption {
|
||||
type = with types; listOf (enum [ "exec" "docker" ]);
|
||||
default = [ ];
|
||||
example = [ "exec" "docker" ];
|
||||
description = "Types of runners to enable";
|
||||
};
|
||||
admin = mkOption {
|
||||
type = types.str;
|
||||
default = "ambroisie";
|
||||
example = "admin";
|
||||
description = "Name of the admin user";
|
||||
};
|
||||
port = mkOption {
|
||||
type = types.port;
|
||||
default = 3030;
|
||||
example = 8080;
|
||||
description = "Internal port of the Woodpecker UI";
|
||||
};
|
||||
rpcPort = mkOption {
|
||||
type = types.port;
|
||||
default = 3031;
|
||||
example = 8080;
|
||||
description = "Internal port of the Woodpecker UI";
|
||||
};
|
||||
secretFile = mkOption {
|
||||
type = types.str;
|
||||
example = "/run/secrets/woodpecker.env";
|
||||
description = "Secrets to inject into Woodpecker server";
|
||||
};
|
||||
sharedSecretFile = mkOption {
|
||||
type = types.str;
|
||||
example = "/run/secrets/woodpecker.env";
|
||||
description = "Shared RPC secret to inject into server and runners";
|
||||
};
|
||||
};
|
||||
}
|
||||
69
modules/nixos/services/woodpecker/server/default.nix
Normal file
69
modules/nixos/services/woodpecker/server/default.nix
Normal file
|
|
@ -0,0 +1,69 @@
|
|||
{ config, lib, ... }:
|
||||
let
|
||||
cfg = config.my.services.woodpecker;
|
||||
in
|
||||
{
|
||||
config = lib.mkIf cfg.enable {
|
||||
services.woodpecker-server = {
|
||||
enable = true;
|
||||
|
||||
environment = {
|
||||
WOODPECKER_OPEN = "true";
|
||||
WOODPECKER_HOST = "https://woodpecker.${config.networking.domain}";
|
||||
WOODPECKER_DATABASE_DRIVER = "postgres";
|
||||
WOODPECKER_DATABASE_DATASOURCE = "postgres:///woodpecker?host=/run/postgresql";
|
||||
WOODPECKER_ADMIN = cfg.admin;
|
||||
WOODPECKER_SERVER_ADDR = ":${toString cfg.port}";
|
||||
WOODPECKER_GRPC_ADDR = ":${toString cfg.rpcPort}";
|
||||
|
||||
WOODPECKER_GITEA = "true";
|
||||
WOODPECKER_GITEA_URL = config.services.gitea.settings.server.ROOT_URL;
|
||||
|
||||
WOODPECKER_LOG_LEVEL = "debug";
|
||||
};
|
||||
};
|
||||
|
||||
systemd.services.woodpecker-server = {
|
||||
after = [ "postgresql.service" ];
|
||||
requires = [ "postgresql.service" ];
|
||||
|
||||
serviceConfig = {
|
||||
# Set username for DB access
|
||||
User = "woodpecker";
|
||||
|
||||
BindPaths = [
|
||||
# Allow access to DB path
|
||||
"/run/postgresql"
|
||||
];
|
||||
|
||||
EnvironmentFile = [
|
||||
cfg.secretFile
|
||||
cfg.sharedSecretFile
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
services.postgresql = {
|
||||
enable = true;
|
||||
ensureDatabases = [ "woodpecker" ];
|
||||
ensureUsers = [{
|
||||
name = "woodpecker";
|
||||
ensurePermissions = {
|
||||
"DATABASE woodpecker" = "ALL PRIVILEGES";
|
||||
};
|
||||
}];
|
||||
};
|
||||
|
||||
my.services.nginx.virtualHosts = [
|
||||
{
|
||||
subdomain = "woodpecker";
|
||||
inherit (cfg) port;
|
||||
}
|
||||
# I might want to be able to RPC from other hosts in the future
|
||||
{
|
||||
subdomain = "woodpecker-rpc";
|
||||
port = cfg.rpcPort;
|
||||
}
|
||||
];
|
||||
};
|
||||
}
|
||||
Loading…
Add table
Add a link
Reference in a new issue