mirror of
https://github.com/SebastianStork/nixos-config.git
synced 2026-03-22 20:09:07 +01:00
Rename modules directory system to nixos
This commit is contained in:
parent
653a6f310b
commit
1c1b9221fc
48 changed files with 1 additions and 1 deletions
152
modules/nixos/services/alloy.nix
Normal file
152
modules/nixos/services/alloy.nix
Normal file
|
|
@ -0,0 +1,152 @@
|
|||
{ config, lib, ... }:
|
||||
let
|
||||
cfg = config.custom.services.alloy;
|
||||
in
|
||||
{
|
||||
options.custom.services.alloy = {
|
||||
enable = lib.mkEnableOption "";
|
||||
domain = lib.mkOption {
|
||||
type = lib.types.nonEmptyStr;
|
||||
default = "";
|
||||
};
|
||||
port = lib.mkOption {
|
||||
type = lib.types.port;
|
||||
default = 12345;
|
||||
};
|
||||
metricsEndpoint = lib.mkOption {
|
||||
type = lib.types.nonEmptyStr;
|
||||
default = "https://metrics.${config.custom.networking.overlay.domain}/prometheus/api/v1/write";
|
||||
};
|
||||
logsEndpoint = lib.mkOption {
|
||||
type = lib.types.nonEmptyStr;
|
||||
default = "https://logs.${config.custom.networking.overlay.domain}/insert/loki/api/v1/push";
|
||||
};
|
||||
collect = {
|
||||
metrics = {
|
||||
system = lib.mkEnableOption "" // {
|
||||
default = true;
|
||||
};
|
||||
victorialogs = lib.mkEnableOption "" // {
|
||||
default = config.services.victorialogs.enable;
|
||||
};
|
||||
caddy = lib.mkEnableOption "" // {
|
||||
default = config.services.caddy.enable;
|
||||
};
|
||||
};
|
||||
logs.openssh = lib.mkEnableOption "" // {
|
||||
default = config.services.openssh.enable;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
assertions =
|
||||
let
|
||||
metricsAssertions =
|
||||
cfg.collect.metrics
|
||||
|> lib.attrNames
|
||||
|> lib.filter (name: name != "system")
|
||||
|> lib.map (name: {
|
||||
assertion = cfg.collect.metrics.${name} -> config.services.${name}.enable;
|
||||
message = "Alloy cannot collect `${name}` metrics without the `${name}` service";
|
||||
});
|
||||
logsAssertions =
|
||||
cfg.collect.logs
|
||||
|> lib.attrNames
|
||||
|> lib.map (name: {
|
||||
assertion = cfg.collect.logs.${name} -> config.services.${name}.enable;
|
||||
message = "Alloy cannot collect '${name}' logs without the '${name}' service";
|
||||
});
|
||||
in
|
||||
metricsAssertions ++ logsAssertions;
|
||||
|
||||
services.alloy = {
|
||||
enable = true;
|
||||
extraFlags = [
|
||||
"--server.http.listen-addr=localhost:${toString cfg.port}"
|
||||
"--disable-reporting"
|
||||
];
|
||||
};
|
||||
|
||||
environment.etc =
|
||||
let
|
||||
isTrue = x: x;
|
||||
anyIsTrue = attrs: attrs |> lib.attrValues |> lib.any isTrue;
|
||||
in
|
||||
{
|
||||
"alloy/metrics-endpoint.alloy" = {
|
||||
enable = cfg.collect.metrics |> anyIsTrue;
|
||||
text = ''
|
||||
prometheus.remote_write "default" {
|
||||
endpoint {
|
||||
url = "${cfg.metricsEndpoint}"
|
||||
}
|
||||
}
|
||||
'';
|
||||
};
|
||||
"alloy/logs-endpoint.alloy" = {
|
||||
enable = cfg.collect.logs |> anyIsTrue;
|
||||
text = ''
|
||||
loki.write "default" {
|
||||
endpoint {
|
||||
url = "${cfg.logsEndpoint}"
|
||||
}
|
||||
}
|
||||
'';
|
||||
};
|
||||
"alloy/system-metrics.alloy" = {
|
||||
enable = cfg.collect.metrics.system;
|
||||
text = ''
|
||||
prometheus.exporter.unix "default" {
|
||||
enable_collectors = ["systemd"]
|
||||
}
|
||||
|
||||
prometheus.scrape "node_exporter" {
|
||||
targets = prometheus.exporter.unix.default.targets
|
||||
forward_to = [prometheus.remote_write.default.receiver]
|
||||
scrape_interval = "15s"
|
||||
}
|
||||
'';
|
||||
};
|
||||
"alloy/victorialogs-metrics.alloy" = {
|
||||
enable = cfg.collect.metrics.victorialogs;
|
||||
text = ''
|
||||
prometheus.scrape "victorialogs" {
|
||||
targets = [{
|
||||
__address__ = "localhost:${toString config.custom.web-services.victorialogs.port}",
|
||||
job = "victorialogs",
|
||||
instance = constants.hostname,
|
||||
}]
|
||||
forward_to = [prometheus.remote_write.default.receiver]
|
||||
scrape_interval = "15s"
|
||||
}
|
||||
'';
|
||||
};
|
||||
"alloy/caddy-metrics.alloy" = {
|
||||
enable = cfg.collect.metrics.caddy;
|
||||
text = ''
|
||||
prometheus.scrape "caddy" {
|
||||
targets = [{
|
||||
__address__ = "localhost:${toString config.custom.services.caddy.metricsPort}",
|
||||
job = "caddy",
|
||||
instance = constants.hostname,
|
||||
}]
|
||||
forward_to = [prometheus.remote_write.default.receiver]
|
||||
scrape_interval = "15s"
|
||||
}
|
||||
'';
|
||||
};
|
||||
"alloy/sshd-logs.alloy" = {
|
||||
enable = cfg.collect.logs.openssh;
|
||||
text = ''
|
||||
loki.source.journal "sshd" {
|
||||
matches = "_SYSTEMD_UNIT=sshd.service"
|
||||
forward_to = [loki.write.default.receiver]
|
||||
}
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
custom.services.caddy.virtualHosts.${cfg.domain}.port = cfg.port;
|
||||
};
|
||||
}
|
||||
52
modules/nixos/services/atuin.nix
Normal file
52
modules/nixos/services/atuin.nix
Normal file
|
|
@ -0,0 +1,52 @@
|
|||
{ config, lib, ... }:
|
||||
let
|
||||
cfg = config.custom.services.atuin;
|
||||
dataDir = "/var/lib/atuin";
|
||||
in
|
||||
{
|
||||
options.custom.services.atuin = {
|
||||
enable = lib.mkEnableOption "";
|
||||
domain = lib.mkOption {
|
||||
type = lib.types.nonEmptyStr;
|
||||
default = "";
|
||||
};
|
||||
port = lib.mkOption {
|
||||
type = lib.types.port;
|
||||
default = 8849;
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
services.atuin = {
|
||||
enable = true;
|
||||
inherit (cfg) port;
|
||||
openRegistration = true;
|
||||
database = {
|
||||
createLocally = false;
|
||||
uri = "sqlite://${dataDir}/atuin.db";
|
||||
};
|
||||
};
|
||||
|
||||
users = {
|
||||
users.atuin = {
|
||||
isSystemUser = true;
|
||||
group = config.users.groups.atuin.name;
|
||||
};
|
||||
groups.atuin = { };
|
||||
};
|
||||
|
||||
systemd.services.atuin.serviceConfig = {
|
||||
DynamicUser = lib.mkForce false;
|
||||
User = config.users.users.atuin.name;
|
||||
Group = config.users.groups.atuin.name;
|
||||
StateDirectory = "atuin";
|
||||
StateDirectoryMode = "0700";
|
||||
};
|
||||
|
||||
custom = {
|
||||
services.caddy.virtualHosts.${cfg.domain}.port = cfg.port;
|
||||
|
||||
persistence.directories = [ dataDir ];
|
||||
};
|
||||
};
|
||||
}
|
||||
27
modules/nixos/services/auto-gc.nix
Normal file
27
modules/nixos/services/auto-gc.nix
Normal file
|
|
@ -0,0 +1,27 @@
|
|||
{ config, lib, ... }:
|
||||
let
|
||||
cfg = config.custom.services.auto-gc;
|
||||
in
|
||||
{
|
||||
options.custom.services.auto-gc = {
|
||||
enable = lib.mkEnableOption "";
|
||||
onlyCleanRoots = lib.mkEnableOption "";
|
||||
};
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
programs.nh = {
|
||||
enable = true;
|
||||
clean = {
|
||||
enable = true;
|
||||
dates = "weekly";
|
||||
extraArgs =
|
||||
[
|
||||
"--keep 10"
|
||||
"--keep-since 7d"
|
||||
]
|
||||
++ lib.optional cfg.onlyCleanRoots "--no-gc"
|
||||
|> lib.concatStringsSep " ";
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
16
modules/nixos/services/bluetooth.nix
Normal file
16
modules/nixos/services/bluetooth.nix
Normal file
|
|
@ -0,0 +1,16 @@
|
|||
{ config, lib, ... }:
|
||||
{
|
||||
options.custom.services.bluetooth.enable = lib.mkEnableOption "";
|
||||
|
||||
config = lib.mkIf config.custom.services.bluetooth.enable {
|
||||
hardware = {
|
||||
bluetooth = {
|
||||
enable = true;
|
||||
powerOnBoot = true;
|
||||
};
|
||||
logitech.wireless.enable = true;
|
||||
};
|
||||
|
||||
services.blueman.enable = true;
|
||||
};
|
||||
}
|
||||
170
modules/nixos/services/caddy.nix
Normal file
170
modules/nixos/services/caddy.nix
Normal file
|
|
@ -0,0 +1,170 @@
|
|||
{
|
||||
config,
|
||||
self,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
let
|
||||
cfg = config.custom.services.caddy;
|
||||
netCfg = config.custom.networking;
|
||||
|
||||
virtualHosts = cfg.virtualHosts |> lib.attrValues |> lib.filter (vHost: vHost.enable);
|
||||
|
||||
publicHostsExist = virtualHosts |> lib.any (vHost: (!self.lib.isPrivateDomain vHost.domain));
|
||||
privateHostsExist = virtualHosts |> lib.any (vHost: self.lib.isPrivateDomain vHost.domain);
|
||||
|
||||
mkVirtualHost =
|
||||
{
|
||||
domain,
|
||||
port,
|
||||
files,
|
||||
extraConfig,
|
||||
...
|
||||
}:
|
||||
lib.nameValuePair domain {
|
||||
logFormat = "output file ${config.services.caddy.logDir}/${domain}.log { mode 640 }";
|
||||
extraConfig =
|
||||
let
|
||||
certDir = config.security.acme.certs.${domain}.directory;
|
||||
in
|
||||
[
|
||||
(lib.optionals (self.lib.isPrivateDomain domain) [
|
||||
"tls ${certDir}/fullchain.pem ${certDir}/key.pem"
|
||||
"bind ${config.custom.networking.overlay.address}"
|
||||
])
|
||||
(lib.optional (port != null) "reverse_proxy localhost:${toString port}")
|
||||
(lib.optionals (files != null) [
|
||||
"root * ${files}"
|
||||
"encode"
|
||||
"file_server"
|
||||
])
|
||||
(lib.optional (extraConfig != null) extraConfig)
|
||||
]
|
||||
|> lib.concatLists
|
||||
|> lib.concatLines;
|
||||
};
|
||||
in
|
||||
{
|
||||
options.custom.services.caddy = {
|
||||
metricsPort = lib.mkOption {
|
||||
type = lib.types.port;
|
||||
default = 49514;
|
||||
};
|
||||
virtualHosts = lib.mkOption {
|
||||
type = lib.types.attrsOf (
|
||||
lib.types.submodule (
|
||||
{ name, ... }:
|
||||
{
|
||||
options = {
|
||||
enable = lib.mkEnableOption "" // {
|
||||
default = true;
|
||||
};
|
||||
domain = lib.mkOption {
|
||||
type = lib.types.nonEmptyStr;
|
||||
default = name;
|
||||
};
|
||||
port = lib.mkOption {
|
||||
type = lib.types.nullOr lib.types.port;
|
||||
default = null;
|
||||
};
|
||||
files = lib.mkOption {
|
||||
type = lib.types.nullOr lib.types.path;
|
||||
default = null;
|
||||
};
|
||||
extraConfig = lib.mkOption {
|
||||
type = lib.types.nullOr lib.types.lines;
|
||||
default = null;
|
||||
};
|
||||
};
|
||||
}
|
||||
)
|
||||
);
|
||||
default = { };
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf (virtualHosts != [ ]) (
|
||||
lib.mkMerge [
|
||||
{
|
||||
assertions =
|
||||
virtualHosts
|
||||
|> lib.concatMap (vHost: [
|
||||
{
|
||||
assertion = (vHost.port == null) || (vHost.files == null);
|
||||
message = "Caddy virtual host `${vHost.domain}` cannot set both `port` and `files`";
|
||||
}
|
||||
{
|
||||
assertion = (vHost.port != null) || (vHost.files != null) || (vHost.extraConfig != null);
|
||||
message = "Caddy virtual host `${vHost.domain}` must set at least one of `port`, `files` or `extraConfig`";
|
||||
}
|
||||
]);
|
||||
|
||||
networking.firewall.allowedTCPPorts = lib.mkIf publicHostsExist [
|
||||
80
|
||||
443
|
||||
];
|
||||
|
||||
services.caddy = {
|
||||
enable = true;
|
||||
enableReload = false;
|
||||
globalConfig = ''
|
||||
admin off
|
||||
metrics { per_host }
|
||||
'';
|
||||
extraConfig = ":${toString cfg.metricsPort} { metrics /metrics }";
|
||||
virtualHosts = virtualHosts |> lib.map mkVirtualHost |> lib.listToAttrs;
|
||||
};
|
||||
|
||||
custom.persistence.directories = [ "/var/lib/caddy" ];
|
||||
}
|
||||
|
||||
(lib.mkIf privateHostsExist {
|
||||
sops.secrets = {
|
||||
"porkbun/api-key".owner = config.users.users.acme.name;
|
||||
"porkbun/secret-api-key".owner = config.users.users.acme.name;
|
||||
};
|
||||
|
||||
security.acme = {
|
||||
acceptTerms = true;
|
||||
defaults = {
|
||||
email = "acme@sstork.dev";
|
||||
dnsProvider = "porkbun";
|
||||
dnsResolver = "1.1.1.1:53";
|
||||
group = config.users.users.caddy.name;
|
||||
credentialFiles = {
|
||||
PORKBUN_API_KEY_FILE = config.sops.secrets."porkbun/api-key".path;
|
||||
PORKBUN_SECRET_API_KEY_FILE = config.sops.secrets."porkbun/secret-api-key".path;
|
||||
};
|
||||
reloadServices = [ "caddy.service" ];
|
||||
};
|
||||
|
||||
certs =
|
||||
virtualHosts
|
||||
|> lib.filter (host: self.lib.isPrivateDomain host.domain)
|
||||
|> lib.map (host: lib.nameValuePair host.domain { })
|
||||
|> lib.listToAttrs;
|
||||
};
|
||||
|
||||
services.nebula.networks.mesh.firewall.inbound = [
|
||||
{
|
||||
port = "80";
|
||||
proto = "tcp";
|
||||
host = "any";
|
||||
}
|
||||
{
|
||||
port = "443";
|
||||
proto = "tcp";
|
||||
host = "any";
|
||||
}
|
||||
];
|
||||
|
||||
systemd.services.caddy = {
|
||||
requires = [ netCfg.overlay.systemdUnit ];
|
||||
after = [ netCfg.overlay.systemdUnit ];
|
||||
};
|
||||
|
||||
custom.persistence.directories = [ "/var/lib/acme" ];
|
||||
})
|
||||
]
|
||||
);
|
||||
}
|
||||
22
modules/nixos/services/comin.nix
Normal file
22
modules/nixos/services/comin.nix
Normal file
|
|
@ -0,0 +1,22 @@
|
|||
{
|
||||
config,
|
||||
inputs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
{
|
||||
imports = [ inputs.comin.nixosModules.comin ];
|
||||
|
||||
options.custom.services.comin.enable = lib.mkEnableOption "";
|
||||
|
||||
config = lib.mkIf config.custom.services.comin.enable {
|
||||
services.comin = {
|
||||
enable = true;
|
||||
remotes = lib.singleton {
|
||||
name = "origin";
|
||||
url = "https://github.com/SebastianStork/nixos-config.git";
|
||||
branches.main.name = "deploy";
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
56
modules/nixos/services/dns.nix
Normal file
56
modules/nixos/services/dns.nix
Normal file
|
|
@ -0,0 +1,56 @@
|
|||
{
|
||||
config,
|
||||
self,
|
||||
lib,
|
||||
allHosts,
|
||||
...
|
||||
}:
|
||||
let
|
||||
cfg = config.custom.services.dns;
|
||||
netCfg = config.custom.networking;
|
||||
in
|
||||
{
|
||||
options.custom.services.dns.enable = lib.mkEnableOption "";
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
services = {
|
||||
unbound = {
|
||||
enable = true;
|
||||
|
||||
settings.server = {
|
||||
interface = [ netCfg.overlay.interface ];
|
||||
access-control = [ "${toString netCfg.overlay.networkCidr} allow" ];
|
||||
|
||||
local-zone = "\"${netCfg.overlay.domain}.\" static";
|
||||
local-data =
|
||||
let
|
||||
nodeRecords = netCfg.nodes |> lib.map (node: "\"${node.overlay.fqdn}. A ${node.overlay.address}\"");
|
||||
serviceRecords =
|
||||
allHosts
|
||||
|> lib.attrValues
|
||||
|> lib.concatMap (
|
||||
host:
|
||||
host.config.custom.services.caddy.virtualHosts
|
||||
|> lib.attrValues
|
||||
|> lib.map (vHost: vHost.domain)
|
||||
|> lib.filter (domain: self.lib.isPrivateDomain domain)
|
||||
|> lib.map (domain: "\"${domain}. A ${host.config.custom.networking.overlay.address}\"")
|
||||
);
|
||||
in
|
||||
nodeRecords ++ serviceRecords;
|
||||
};
|
||||
};
|
||||
|
||||
nebula.networks.mesh.firewall.inbound = lib.singleton {
|
||||
port = 53;
|
||||
proto = "any";
|
||||
host = "any";
|
||||
};
|
||||
};
|
||||
|
||||
systemd.services.unbound = {
|
||||
requires = [ netCfg.overlay.systemdUnit ];
|
||||
after = [ netCfg.overlay.systemdUnit ];
|
||||
};
|
||||
};
|
||||
}
|
||||
5
modules/nixos/services/nebula/ca.crt
Normal file
5
modules/nixos/services/nebula/ca.crt
Normal file
|
|
@ -0,0 +1,5 @@
|
|||
-----BEGIN NEBULA CERTIFICATE V2-----
|
||||
MHugFYAEbWFpboQB/4UEaUdKdYYEayh99YIg5FsAhFthpvA/ELlR7NVFGvuIB5Zv
|
||||
66n1h1qg0vumHY+DQHGky+1qxbGswdyDZBYfqctktyfJUMKk0TZIn6cqYLbydSZJ
|
||||
J9HxMj2JWu/d/2nsh11uhRwquBH733AmXZ2DDgE=
|
||||
-----END NEBULA CERTIFICATE V2-----
|
||||
139
modules/nixos/services/nebula/default.nix
Normal file
139
modules/nixos/services/nebula/default.nix
Normal file
|
|
@ -0,0 +1,139 @@
|
|||
{
|
||||
config,
|
||||
self,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
let
|
||||
cfg = config.custom.services.nebula;
|
||||
netCfg = config.custom.networking;
|
||||
|
||||
publicPort = 47141;
|
||||
|
||||
lighthouses =
|
||||
netCfg.peers
|
||||
|> lib.filter (peer: peer.overlay.isLighthouse)
|
||||
|> lib.map (lighthouse: lighthouse.overlay.address);
|
||||
in
|
||||
{
|
||||
options.custom.services.nebula = {
|
||||
enable = lib.mkEnableOption "" // {
|
||||
default = netCfg.overlay.implementation == "nebula";
|
||||
};
|
||||
groups = lib.mkOption {
|
||||
type = lib.types.nonEmptyListOf lib.types.nonEmptyStr;
|
||||
default =
|
||||
lib.singleton netCfg.overlay.role
|
||||
++ lib.optional config.custom.services.syncthing.enable "syncthing";
|
||||
};
|
||||
|
||||
caCertificateFile = lib.mkOption {
|
||||
type = self.lib.types.existingPath;
|
||||
default = ./ca.crt;
|
||||
};
|
||||
publicKeyFile = lib.mkOption {
|
||||
type = self.lib.types.existingPath;
|
||||
default = "${self}/hosts/${netCfg.hostName}/keys/nebula.pub";
|
||||
};
|
||||
certificateFile = lib.mkOption {
|
||||
type = self.lib.types.existingPath;
|
||||
default = "${self}/hosts/${netCfg.hostName}/keys/nebula.crt";
|
||||
};
|
||||
privateKeyFile = lib.mkOption {
|
||||
type = lib.types.nullOr lib.types.path;
|
||||
default = null;
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
assertions = lib.singleton {
|
||||
assertion = netCfg.overlay.isLighthouse -> netCfg.underlay.isPublic;
|
||||
message = "`${netCfg.hostName}` is a Nebula lighthouse, but `underlay.isPublic` is not set. Lighthouses must be publicly reachable.";
|
||||
};
|
||||
|
||||
sops.secrets."nebula/host-key" = lib.mkIf (cfg.privateKeyFile == null) {
|
||||
owner = config.users.users.nebula-mesh.name;
|
||||
restartUnits = [ "nebula@mesh.service" ];
|
||||
};
|
||||
|
||||
environment.etc = {
|
||||
"nebula/ca.crt" = {
|
||||
source = cfg.caCertificateFile;
|
||||
mode = "0440";
|
||||
user = config.systemd.services."nebula@mesh".serviceConfig.User;
|
||||
};
|
||||
"nebula/host.crt" = {
|
||||
source = cfg.certificateFile;
|
||||
mode = "0440";
|
||||
user = config.systemd.services."nebula@mesh".serviceConfig.User;
|
||||
};
|
||||
};
|
||||
|
||||
services.nebula.networks.mesh = {
|
||||
enable = true;
|
||||
|
||||
ca = "/etc/nebula/ca.crt";
|
||||
cert = "/etc/nebula/host.crt";
|
||||
key =
|
||||
if (cfg.privateKeyFile != null) then
|
||||
cfg.privateKeyFile
|
||||
else
|
||||
config.sops.secrets."nebula/host-key".path;
|
||||
|
||||
tun.device = netCfg.overlay.interface;
|
||||
listen = {
|
||||
host = lib.mkIf (netCfg.underlay.address != null) netCfg.underlay.address;
|
||||
port = lib.mkIf netCfg.underlay.isPublic publicPort;
|
||||
};
|
||||
|
||||
inherit (netCfg.overlay) isLighthouse;
|
||||
lighthouses = lib.mkIf (!netCfg.overlay.isLighthouse) lighthouses;
|
||||
|
||||
isRelay = netCfg.overlay.isLighthouse;
|
||||
relays = lib.mkIf (!netCfg.overlay.isLighthouse) lighthouses;
|
||||
|
||||
staticHostMap =
|
||||
netCfg.peers
|
||||
|> lib.filter (peer: peer.underlay.isPublic)
|
||||
|> lib.map (publicPeer: {
|
||||
name = publicPeer.overlay.address;
|
||||
value = lib.singleton "${publicPeer.underlay.address}:${toString publicPort}";
|
||||
})
|
||||
|> lib.listToAttrs;
|
||||
|
||||
firewall = {
|
||||
outbound = lib.singleton {
|
||||
port = "any";
|
||||
proto = "any";
|
||||
host = "any";
|
||||
};
|
||||
inbound = lib.singleton {
|
||||
port = "any";
|
||||
proto = "icmp";
|
||||
host = "any";
|
||||
};
|
||||
};
|
||||
|
||||
settings = {
|
||||
pki.disconnect_invalid = true;
|
||||
cipher = "aes";
|
||||
};
|
||||
};
|
||||
|
||||
networking.firewall.trustedInterfaces = [ netCfg.overlay.interface ];
|
||||
|
||||
systemd = {
|
||||
services."nebula@mesh" = {
|
||||
wants = [ "network-online.target" ];
|
||||
after = [ "network-online.target" ];
|
||||
};
|
||||
|
||||
network.networks."40-nebula" = {
|
||||
matchConfig.Name = netCfg.overlay.interface;
|
||||
address = [ netCfg.overlay.cidr ];
|
||||
dns = netCfg.overlay.dnsServers;
|
||||
domains = [ netCfg.overlay.domain ];
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
15
modules/nixos/services/printing.nix
Normal file
15
modules/nixos/services/printing.nix
Normal file
|
|
@ -0,0 +1,15 @@
|
|||
{ config, lib, ... }:
|
||||
{
|
||||
options.custom.services.printing.enable = lib.mkEnableOption "";
|
||||
|
||||
config = lib.mkIf config.custom.services.printing.enable {
|
||||
services = {
|
||||
printing.enable = true;
|
||||
avahi = {
|
||||
enable = true;
|
||||
nssmdns4 = true;
|
||||
openFirewall = true;
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
112
modules/nixos/services/restic/backups.nix
Normal file
112
modules/nixos/services/restic/backups.nix
Normal file
|
|
@ -0,0 +1,112 @@
|
|||
{
|
||||
config,
|
||||
pkgs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
let
|
||||
backups =
|
||||
config.custom.services.restic.backups |> lib.attrValues |> lib.filter (backup: backup.enable);
|
||||
in
|
||||
{
|
||||
options.custom.services.restic.backups = lib.mkOption {
|
||||
type = lib.types.attrsOf (
|
||||
lib.types.submodule (
|
||||
{ name, ... }:
|
||||
{
|
||||
options = {
|
||||
enable = lib.mkEnableOption "" // {
|
||||
default = true;
|
||||
};
|
||||
name = lib.mkOption {
|
||||
type = lib.types.nonEmptyStr;
|
||||
default = name;
|
||||
};
|
||||
conflictingService = lib.mkOption {
|
||||
type = lib.types.nullOr lib.types.nonEmptyStr;
|
||||
default = null;
|
||||
};
|
||||
paths = lib.mkOption {
|
||||
type = lib.types.listOf lib.types.path;
|
||||
default = [ ];
|
||||
};
|
||||
extraConfig = lib.mkOption {
|
||||
type = lib.types.attrsOf lib.types.anything;
|
||||
default = { };
|
||||
};
|
||||
};
|
||||
}
|
||||
)
|
||||
);
|
||||
default = { };
|
||||
};
|
||||
|
||||
config = lib.mkIf (backups != [ ]) {
|
||||
sops = {
|
||||
secrets = {
|
||||
"backblaze/key-id" = { };
|
||||
"backblaze/application-key" = { };
|
||||
"restic/password" = { };
|
||||
};
|
||||
|
||||
templates."restic/environment".content = ''
|
||||
AWS_ACCESS_KEY_ID=${config.sops.placeholder."backblaze/key-id"}
|
||||
AWS_SECRET_ACCESS_KEY=${config.sops.placeholder."backblaze/application-key"}
|
||||
'';
|
||||
};
|
||||
|
||||
systemd.tmpfiles.rules =
|
||||
backups |> lib.map (backup: "d /var/cache/restic-backups-${backup.name} 700 - - -");
|
||||
|
||||
services.restic.backups =
|
||||
backups
|
||||
|> lib.map (backup: {
|
||||
inherit (backup) name;
|
||||
value = lib.mkMerge [
|
||||
{
|
||||
inherit (backup) paths;
|
||||
initialize = true;
|
||||
repository = "s3:https://s3.eu-central-003.backblazeb2.com/stork-atlas/${backup.name}";
|
||||
environmentFile = config.sops.templates."restic/environment".path;
|
||||
passwordFile = config.sops.secrets."restic/password".path;
|
||||
pruneOpts = [
|
||||
"--keep-daily 7"
|
||||
"--keep-weekly 4"
|
||||
"--keep-monthly 6"
|
||||
];
|
||||
timerConfig = {
|
||||
OnCalendar = "03:00";
|
||||
RandomizedDelaySec = "1h";
|
||||
};
|
||||
}
|
||||
backup.extraConfig
|
||||
];
|
||||
})
|
||||
|> lib.listToAttrs;
|
||||
|
||||
systemd.services =
|
||||
backups
|
||||
|> lib.filter (backup: backup.conflictingService != null)
|
||||
|> lib.map (backup: {
|
||||
name = "restic-backups-${backup.name}";
|
||||
value = {
|
||||
unitConfig.Conflicts = [ backup.conflictingService ];
|
||||
after = [ backup.conflictingService ];
|
||||
onSuccess = [ backup.conflictingService ];
|
||||
onFailure = [ backup.conflictingService ];
|
||||
};
|
||||
})
|
||||
|> lib.listToAttrs;
|
||||
|
||||
environment.systemPackages =
|
||||
let
|
||||
backupAllScript = pkgs.writeShellApplication {
|
||||
name = "restic-backup-all";
|
||||
text = "systemctl start restic-backups-{${
|
||||
backups |> lib.map (backup: backup.name) |> lib.concatStringsSep ","
|
||||
}}";
|
||||
};
|
||||
in
|
||||
[ backupAllScript ];
|
||||
};
|
||||
}
|
||||
58
modules/nixos/services/restic/healthchecks.nix
Normal file
58
modules/nixos/services/restic/healthchecks.nix
Normal file
|
|
@ -0,0 +1,58 @@
|
|||
{
|
||||
config,
|
||||
pkgs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
let
|
||||
backupsWithHealthchecks =
|
||||
config.custom.services.restic.backups
|
||||
|> lib.attrValues
|
||||
|> lib.filter (backup: backup.enable && backup.doHealthchecks);
|
||||
in
|
||||
{
|
||||
options.custom.services.restic.backups = lib.mkOption {
|
||||
type = lib.types.attrsOf (
|
||||
lib.types.submodule {
|
||||
options.doHealthchecks = lib.mkEnableOption "" // {
|
||||
default = true;
|
||||
};
|
||||
}
|
||||
);
|
||||
};
|
||||
|
||||
config = lib.mkIf (backupsWithHealthchecks != [ ]) {
|
||||
sops.secrets."healthchecks/ping-key" = { };
|
||||
|
||||
systemd.services = {
|
||||
"healthcheck-ping@" = {
|
||||
description = "Pings healthcheck (%i)";
|
||||
serviceConfig.Type = "oneshot";
|
||||
scriptArgs = "%i";
|
||||
script = ''
|
||||
ping_key="$(cat ${config.sops.secrets."healthchecks/ping-key".path})"
|
||||
slug="$(echo "$1" | tr _ /)"
|
||||
|
||||
${lib.getExe pkgs.curl} \
|
||||
--fail \
|
||||
--silent \
|
||||
--show-error \
|
||||
--max-time 10 \
|
||||
--retry 5 "https://hc-ping.com/$ping_key/$slug?create=1"
|
||||
'';
|
||||
};
|
||||
}
|
||||
// (
|
||||
backupsWithHealthchecks
|
||||
|> lib.map (backup: {
|
||||
name = "restic-backups-${backup.name}";
|
||||
value = {
|
||||
wants = [ "healthcheck-ping@${backup.name}-backup_start.service" ];
|
||||
onSuccess = [ "healthcheck-ping@${backup.name}-backup.service" ];
|
||||
onFailure = [ "healthcheck-ping@${backup.name}-backup_fail.service" ];
|
||||
};
|
||||
})
|
||||
|> lib.listToAttrs
|
||||
);
|
||||
};
|
||||
}
|
||||
66
modules/nixos/services/restic/restore.nix
Normal file
66
modules/nixos/services/restic/restore.nix
Normal file
|
|
@ -0,0 +1,66 @@
|
|||
{
|
||||
config,
|
||||
pkgs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
let
|
||||
backupsWithRestoreCommand =
|
||||
config.custom.services.restic.backups
|
||||
|> lib.attrValues
|
||||
|> lib.filter (backup: backup.enable && backup.restoreCommand.enable);
|
||||
in
|
||||
{
|
||||
options.custom.services.restic.backups = lib.mkOption {
|
||||
type = lib.types.attrsOf (
|
||||
lib.types.submodule {
|
||||
options.restoreCommand = {
|
||||
enable = lib.mkEnableOption "" // {
|
||||
default = true;
|
||||
};
|
||||
preRestore = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "";
|
||||
};
|
||||
postRestore = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "";
|
||||
};
|
||||
};
|
||||
}
|
||||
);
|
||||
};
|
||||
|
||||
config = {
|
||||
environment.systemPackages =
|
||||
let
|
||||
restoreScripts =
|
||||
backupsWithRestoreCommand
|
||||
|> lib.map (
|
||||
backup:
|
||||
let
|
||||
inherit (backup) name conflictingService;
|
||||
inherit (backup.restoreCommand) preRestore postRestore;
|
||||
hasConflictingService = conflictingService != null;
|
||||
in
|
||||
pkgs.writeShellApplication {
|
||||
name = "restic-restore-${name}";
|
||||
text = ''
|
||||
${lib.optionalString hasConflictingService "systemctl stop ${conflictingService}"}
|
||||
${preRestore}
|
||||
restic-${name} restore latest --target /
|
||||
${postRestore}
|
||||
${lib.optionalString hasConflictingService "systemctl start ${conflictingService}"}
|
||||
'';
|
||||
}
|
||||
);
|
||||
|
||||
restoreAllScript = pkgs.writeShellApplication {
|
||||
name = "restic-restore-all";
|
||||
text =
|
||||
backupsWithRestoreCommand |> lib.map (backup: "restic-restore-${backup.name}") |> lib.concatLines;
|
||||
};
|
||||
in
|
||||
restoreScripts ++ [ restoreAllScript ];
|
||||
};
|
||||
}
|
||||
16
modules/nixos/services/sound.nix
Normal file
16
modules/nixos/services/sound.nix
Normal file
|
|
@ -0,0 +1,16 @@
|
|||
{ config, lib, ... }:
|
||||
{
|
||||
options.custom.services.sound.enable = lib.mkEnableOption "";
|
||||
|
||||
config = lib.mkIf config.custom.services.sound.enable {
|
||||
security.rtkit.enable = true;
|
||||
services.pipewire = {
|
||||
enable = true;
|
||||
pulse.enable = true;
|
||||
alsa = {
|
||||
enable = true;
|
||||
support32Bit = true;
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
52
modules/nixos/services/sshd.nix
Normal file
52
modules/nixos/services/sshd.nix
Normal file
|
|
@ -0,0 +1,52 @@
|
|||
{
|
||||
config,
|
||||
lib,
|
||||
allHosts,
|
||||
...
|
||||
}:
|
||||
let
|
||||
cfg = config.custom.services.sshd;
|
||||
netCfg = config.custom.networking;
|
||||
in
|
||||
{
|
||||
options.custom.services.sshd.enable = lib.mkEnableOption "";
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
services = {
|
||||
openssh = {
|
||||
enable = true;
|
||||
openFirewall = false;
|
||||
ports = lib.mkForce [ ];
|
||||
listenAddresses = lib.singleton {
|
||||
addr = netCfg.overlay.address;
|
||||
port = 22;
|
||||
};
|
||||
settings = {
|
||||
PasswordAuthentication = false;
|
||||
KbdInteractiveAuthentication = false;
|
||||
PermitRootLogin = "no";
|
||||
};
|
||||
};
|
||||
|
||||
nebula.networks.mesh.firewall.inbound = lib.singleton {
|
||||
port = 22;
|
||||
proto = "tcp";
|
||||
group = "client";
|
||||
};
|
||||
};
|
||||
|
||||
systemd.services.sshd = {
|
||||
requires = [ netCfg.overlay.systemdUnit ];
|
||||
after = [ netCfg.overlay.systemdUnit ];
|
||||
};
|
||||
|
||||
users.users.seb.openssh.authorizedKeys.keyFiles =
|
||||
allHosts
|
||||
|> lib.attrValues
|
||||
|> lib.filter (host: host.config.networking.hostName != netCfg.hostName)
|
||||
|> lib.filter (host: host.config |> lib.hasAttr "home-manager")
|
||||
|> lib.map (host: host.config.home-manager.users.seb.custom.programs.ssh)
|
||||
|> lib.filter (ssh: ssh.enable)
|
||||
|> lib.map (ssh: ssh.publicKeyFile);
|
||||
};
|
||||
}
|
||||
148
modules/nixos/services/syncthing.nix
Normal file
148
modules/nixos/services/syncthing.nix
Normal file
|
|
@ -0,0 +1,148 @@
|
|||
{
|
||||
config,
|
||||
self,
|
||||
lib,
|
||||
allHosts,
|
||||
...
|
||||
}:
|
||||
let
|
||||
cfg = config.custom.services.syncthing;
|
||||
netCfg = config.custom.networking;
|
||||
|
||||
inherit (config.services.syncthing) dataDir;
|
||||
|
||||
useSopsSecrets = config.custom.sops.secrets |> lib.hasAttr "syncthing";
|
||||
in
|
||||
{
|
||||
options.custom.services.syncthing = {
|
||||
enable = lib.mkEnableOption "";
|
||||
isServer = lib.mkEnableOption "";
|
||||
doBackups = lib.mkEnableOption "";
|
||||
deviceId = lib.mkOption {
|
||||
type = lib.types.nonEmptyStr;
|
||||
default = "${self}/hosts/${netCfg.hostName}/keys/syncthing.id" |> lib.readFile |> lib.trim;
|
||||
};
|
||||
syncPort = lib.mkOption {
|
||||
type = lib.types.port;
|
||||
default = 22000;
|
||||
};
|
||||
gui = {
|
||||
domain = lib.mkOption {
|
||||
type = lib.types.nullOr lib.types.nonEmptyStr;
|
||||
default = null;
|
||||
};
|
||||
port = lib.mkOption {
|
||||
type = lib.types.port;
|
||||
default = 8384;
|
||||
};
|
||||
};
|
||||
folders = lib.mkOption {
|
||||
type = lib.types.nonEmptyListOf lib.types.nonEmptyStr;
|
||||
default = [
|
||||
"Documents"
|
||||
"Downloads"
|
||||
"Music"
|
||||
"Pictures"
|
||||
"Projects"
|
||||
"Videos"
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
assertions = [
|
||||
{
|
||||
assertion = cfg.isServer -> (cfg.gui.domain != null);
|
||||
message = "Syncthing requires `gui.domain` to be set when `isServer` is enabled";
|
||||
}
|
||||
{
|
||||
assertion = (cfg.gui.domain != null) -> (self.lib.isPrivateDomain cfg.gui.domain);
|
||||
message = self.lib.mkUnprotectedMessage "Syncthing-GUI";
|
||||
}
|
||||
];
|
||||
|
||||
sops.secrets = lib.mkIf useSopsSecrets {
|
||||
"syncthing/cert" = {
|
||||
owner = config.services.syncthing.user;
|
||||
restartUnits = [ "syncthing.service" ];
|
||||
};
|
||||
"syncthing/key" = {
|
||||
owner = config.services.syncthing.user;
|
||||
restartUnits = [ "syncthing.service" ];
|
||||
};
|
||||
};
|
||||
|
||||
services = {
|
||||
syncthing = {
|
||||
enable = true;
|
||||
|
||||
user = lib.mkIf (!cfg.isServer) "seb";
|
||||
group = lib.mkIf (!cfg.isServer) "users";
|
||||
dataDir = lib.mkIf (!cfg.isServer) "/home/seb";
|
||||
|
||||
guiAddress = "localhost:${toString cfg.gui.port}";
|
||||
|
||||
cert = lib.mkIf useSopsSecrets config.sops.secrets."syncthing/cert".path;
|
||||
key = lib.mkIf useSopsSecrets config.sops.secrets."syncthing/key".path;
|
||||
|
||||
settings =
|
||||
let
|
||||
hosts =
|
||||
allHosts
|
||||
|> lib.filterAttrs (_: host: host.config.networking.hostName != config.networking.hostName)
|
||||
|> lib.filterAttrs (_: host: host.config.custom.services.syncthing.enable);
|
||||
in
|
||||
{
|
||||
devices =
|
||||
hosts
|
||||
|> lib.mapAttrs (
|
||||
_: host: {
|
||||
id = host.config.custom.services.syncthing.deviceId;
|
||||
addresses = lib.singleton "tcp://${host.config.custom.networking.overlay.address}:${toString host.config.custom.services.syncthing.syncPort}";
|
||||
}
|
||||
);
|
||||
|
||||
folders =
|
||||
cfg.folders
|
||||
|> self.lib.genAttrs (folder: {
|
||||
path = "${dataDir}/${folder}";
|
||||
devices =
|
||||
hosts
|
||||
|> lib.filterAttrs (_: host: host.config.custom.services.syncthing.folders |> lib.elem folder)
|
||||
|> lib.attrNames;
|
||||
});
|
||||
|
||||
options = {
|
||||
listenAddress = "tcp://${netCfg.overlay.address}:${toString cfg.syncPort}";
|
||||
globalAnnounceEnabled = false;
|
||||
localAnnounceEnabled = false;
|
||||
relaysEnabled = false;
|
||||
natEnabled = false;
|
||||
urAccepted = -1;
|
||||
autoUpgradeIntervalH = 0;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
nebula.networks.mesh.firewall.inbound = lib.singleton {
|
||||
port = cfg.syncPort;
|
||||
proto = "tcp";
|
||||
group = "syncthing";
|
||||
};
|
||||
};
|
||||
|
||||
custom = {
|
||||
services = {
|
||||
caddy.virtualHosts.${cfg.gui.domain}.port = lib.mkIf (cfg.gui.domain != null) cfg.gui.port;
|
||||
|
||||
restic.backups.syncthing = lib.mkIf cfg.doBackups {
|
||||
conflictingService = "syncthing.service";
|
||||
paths = [ dataDir ];
|
||||
extraConfig.exclude = [ "${dataDir}/Downloads" ];
|
||||
};
|
||||
};
|
||||
|
||||
persistence.directories = [ dataDir ];
|
||||
};
|
||||
};
|
||||
}
|
||||
Loading…
Add table
Add a link
Reference in a new issue