{ config, pkgs, name, lib, pkgs-no-overlay, secrets, ... }:
{
# ssh-keyscan dilion | nix-shell -p ssh-to-age --run ssh-to-age
secrets.ageKeys = [ "age1x49n6qa0arkdpq8530s7umgm0gqkq90exv4jep97q30rfnzknpaqate06a" ];
boot = {
loader = {
grub = {
devices = [ "/dev/sda" "/dev/sdb" "/dev/sdc" "/dev/sdd" ];
};
timeout = 1;
};
blacklistedKernelModules = [ "nvidiafb" ];
supportedFilesystems = [ "zfs" ];
kernelPackages = config.boot.zfs.package.latestCompatibleLinuxPackages;
kernelModules = [ "kvm-intel" ];
initrd.availableKernelModules = [ "ahci" "sd_mod" ];
initrd.secrets = {
"/boot/pass.key" = "/boot/pass.key";
};
kernel.sysctl."vm.nr_hugepages" = 256; # for xmr-stak
# available in nixos-20.09
#zfs.requestEncryptionCredentials = [ "zpool/root" ];
};
powerManagement.cpuFreqGovernor = "powersave";
hardware.enableRedistributableFirmware = true;
swapDevices = [ { label = "swap"; } ];
fileSystems = {
"/" = { fsType = "zfs"; device = "zpool/root"; };
"/boot" = { fsType = "ext4"; device = "/dev/disk/by-uuid/fd1c511e-2bc0-49d5-b8bb-95e7e8c8c816"; };
"/etc" = { fsType = "zfs"; device = "zpool/root/etc"; };
"/home" = { fsType = "zfs"; device = "zpool/root/home"; };
"/tmp" = { fsType = "zfs"; device = "zpool/root/tmp"; };
"/var" = { fsType = "zfs"; device = "zpool/root/var"; };
"/data" = { fsType = "ext4"; label = "data"; };
"/nix" = { fsType = "ext4"; label = "nix"; };
};
services.udev.extraRules = ''
ACTION=="add", SUBSYSTEM=="net", ATTR{address}=="10:bf:48:7f:e6:3b", NAME="eth0"
'';
nixpkgs.config.permittedInsecurePackages = [
"python-2.7.18.6" # for nagios-cli
];
networking = {
hostId = "27c3048d"; # generated with head -c4 /dev/urandom | od -A none -t x4
firewall.enable = false;
interfaces."eth0".ipv4.addresses =
[ { address = lib.head config.hostEnv.ips.main.ip4; prefixLength = 27; } ]
++ pkgs.lib.flatten (pkgs.lib.attrsets.mapAttrsToList
(n: ips: map (ip: { address = ip; prefixLength = 32; }) (ips.ip4 or []))
(pkgs.lib.attrsets.filterAttrs (n: v: n != "main") config.hostEnv.ips));
interfaces."eth0".ipv6.addresses =
[ { address = "2a01:4f8:141:53e7::"; prefixLength = 64; } ]
++ pkgs.lib.flatten (pkgs.lib.attrsets.mapAttrsToList
(n: ips: map (ip: { address = ip; prefixLength = (if n == "main" && ip == pkgs.lib.head ips.ip6 then 64 else 128); }) (ips.ip6 or []))
config.hostEnv.ips);
defaultGateway = { address = "176.9.10.225"; interface = "eth0"; };
defaultGateway6 = { address = "fe80::1"; interface = "eth0"; };
nameservers = [
"213.133.98.98"
"213.133.99.99"
"213.133.100.100"
"2a01:4f8:0:a0a1::add:1010"
"2a01:4f8:0:a102::add:9999"
"2a01:4f8:0:a111::add:9898"
];
};
imports = [
secrets.nixosModules.users-config-dilion
./monitoring.nix
./vms.nix
];
myServices.vms.libvirt-guests = {
buildbot = {
pool = "zfspool";
network = "immae";
cpus = 1;
memory = 3;
diskSize = 10;
destroyVolumeOnExit = true;
};
};
myServices.vms.libvirt-images = {
nixos = ./vms/base_configuration.nix;
buildbot = ./vms/buildbot_configuration.nix;
};
myServices.vms.libvirt-networks.immae = {
bridgeNumber = 1;
ipRange = "192.168.100";
};
myServices.vms.libvirt-pools = {
niximages = {
type = "dir";
target = "/etc/libvirtd/base-images";
};
buildbot-disks = rec {
type = "dir";
target = "/var/lib/libvirt/images/buildbot-disks";
preStart = ''
mkdir -p ${target}
'';
};
zfspool = {
# pool-define-as --name zfspool --source-name zpool/libvirt --type zfs
type = "zfs";
xml = ''
<source>
<name>zpool/libvirt</name>
</source>
'';
};
};
system.nssModules = [ pkgs.libvirt ];
system.nssDatabases.hosts = lib.mkForce [ "files" "libvirt_guest" "mymachines" "dns" "myhostname" ];
programs.zsh.enable = true;
users.users.libvirt = {
hashedPassword = "!";
shell = pkgs.bashInteractive;
isSystemUser = true;
group = "libvirtd";
packages = [ pkgs.libressl.nc ];
openssh.authorizedKeys.keys = [
config.myEnv.buildbot.ssh_key.public
config.myEnv.sshd.rootKeys.ismael_flony
];
};
users.groups.backup = {};
users.users.backup = {
hashedPassword = "!";
isSystemUser = true;
extraGroups = [ "keys" ];
group = "backup";
shell = pkgs.bashInteractive;
openssh.authorizedKeys.keys = let
zreplConfig = "/etc/zrepl/zrepl.yml";
in
["command=\"${pkgs.zrepl}/bin/zrepl stdinserver --config ${zreplConfig} eldiron\",restrict ${config.myEnv.zrepl_backup.ssh_key.public}"];
};
virtualisation.docker.enable = true;
virtualisation.docker.storageDriver = "zfs";
virtualisation.libvirtd.enable = true;
virtualisation.libvirtd.qemu.package = pkgs-no-overlay.qemu;
systemd.services.libvirtd.path = lib.mkAfter [ config.boot.zfs.package ];
systemd.services.libvirtd.postStart = ''
install -m 0770 -g libvirtd -d /var/lib/libvirt/images
'';
time.timeZone = "Europe/Paris";
nix = {
settings = {
sandbox = "relaxed";
max-jobs = 8;
substituters = [ "https://hydra.iohk.io" "https://cache.nixos.org" ];
trusted-public-keys = [ "hydra.iohk.io:f/Ea+s+dFdN+3Y/G+FDgSq+a5NEWhJGzdjvKNGv0/EQ=" ];
trusted-users = [ "root" "@wheel"] ;
};
extraOptions = ''
keep-outputs = true
keep-derivations = true
allow-unsafe-native-code-during-evaluation = true
experimental-features = nix-command flakes
#Assumed in NUR
allow-import-from-derivation = true
'';
};
security.pki.certificateFiles = [
(pkgs.fetchurl {
url = "http://downloads.e.eriomem.net/eriomemca.pem";
sha256 = "1ixx4c6j3m26j8dp9a3dkvxc80v1nr5aqgmawwgs06bskasqkvvh";
})
];
myServices.monitoring.enable = true;
security.acme.certs."${name}-immae" = {
group = "immae";
domain = "dilion.immae.eu";
};
security.acme.certs."${name}" = {
group = config.services.nginx.group;
extraDomainNames = [
"dilion.immae.dev"
];
};
systemd.services.nginx.serviceConfig.ProtectHome = "read-only";
services.nginx = {
enable = true;
recommendedOptimisation = true;
recommendedGzipSettings = true;
recommendedProxySettings = true;
virtualHosts = {
"dilion.immae.dev" = {
acmeRoot = config.security.acme.defaults.webroot;
useACMEHost = name;
forceSSL = true;
locations."/".root = "/home/immae/www";
};
};
};
secrets.keys = {
"zrepl/${name}.key" = {
permissions = "0400";
text = config.myEnv.zrepl_backup.certs."${name}".key;
user = "backup";
group = "root";
};
} // builtins.listToAttrs (map (x: lib.attrsets.nameValuePair "zrepl/certificates/${x}.crt" {
permissions = "0400";
text = config.myEnv.zrepl_backup.certs."${x}".certificate;
user = "backup";
group = "root";
}) (builtins.attrNames config.myEnv.zrepl_backup.certs));
environment.etc."mdadm.conf" = {
enable = true;
mode = "0644";
user = "root";
text = "MAILADDR ${config.myEnv.monitoring.email}";
};
systemd.services.zrepl.serviceConfig.User = "backup";
systemd.services.zrepl.path = [ pkgs.openssh ];
# pour eldiron:
# zfs allow backup create,mount,receive,destroy,rename,snapshot,hold,bookmark,release zpool/backup
# pour flony:
# zfs allow backup hold,release,bookmark,snapshot,send zpool
services.zrepl = {
enable = true;
settings = {
global.control.sockpath = "/run/zrepl/control";
global.serve.stdinserver.sockdir = "/run/zrepl/stdinserver";
jobs = [
{
type = "sink";
# must not change
name = "backup-from-eldiron";
root_fs = "zpool/backup";
serve.type = "tls";
serve.listen = ":19000";
serve.ca = config.secrets.fullPaths."zrepl/certificates/eldiron.crt";
serve.cert = config.secrets.fullPaths."zrepl/certificates/dilion.crt";
serve.key = config.secrets.fullPaths."zrepl/dilion.key";
serve.client_cns = [ "eldiron" ];
}
{
type = "source";
# must not change
name = "backup-to-wd-zpool";
# not encrypted!
serve.type = "tls";
serve.listen = ":19001";
serve.ca = config.secrets.fullPaths."zrepl/certificates/flony.crt";
serve.cert = config.secrets.fullPaths."zrepl/certificates/dilion.crt";
serve.key = config.secrets.fullPaths."zrepl/dilion.key";
serve.client_cns = [ "flony" ];
filesystems."zpool/libvirt<" = true;
filesystems."zpool/root<" = true;
snapshotting.type = "manual";
}
{
type = "source";
# must not change
name = "backup-to-wd-zpool-docker";
# not encrypted!
serve.type = "tls";
serve.listen = ":19002";
serve.ca = config.secrets.fullPaths."zrepl/certificates/flony.crt";
serve.cert = config.secrets.fullPaths."zrepl/certificates/dilion.crt";
serve.key = config.secrets.fullPaths."zrepl/dilion.key";
serve.client_cns = [ "flony" ];
filesystems."zpool/docker<" = true;
snapshotting.type = "manual";
}
];
};
};
# This value determines the NixOS release with which your system is
# to be compatible, in order to avoid breaking some software such as
# database servers. You should change this only after NixOS release
# notes say you should.
# https://nixos.org/nixos/manual/release-notes.html
system.stateVersion = "23.05"; # Did you read the comment?
}