-{ config, pkgs, name, lib, ... }:
+{ config, pkgs, name, lib, pkgs-no-overlay, secrets, ... }:
{
- deployment = {
- targetUser = "root";
- targetHost = config.hostEnv.ips.main.ip4;
- substituteOnDestination = true;
- };
# ssh-keyscan dilion | nix-shell -p ssh-to-age --run ssh-to-age
secrets.ageKeys = [ "age1x49n6qa0arkdpq8530s7umgm0gqkq90exv4jep97q30rfnzknpaqate06a" ];
- nixpkgs.system = lib.mkOverride 900 "x86_64-linux";
boot = {
loader = {
grub = {
- version = 2;
devices = [ "/dev/sda" "/dev/sdb" "/dev/sdc" "/dev/sdd" ];
};
timeout = 1;
};
blacklistedKernelModules = [ "nvidiafb" ];
supportedFilesystems = [ "zfs" ];
- kernelPackages = pkgs.linuxPackages_latest;
+ kernelPackages = config.boot.zfs.package.latestCompatibleLinuxPackages;
kernelModules = [ "kvm-intel" ];
initrd.availableKernelModules = [ "ahci" "sd_mod" ];
initrd.secrets = {
# available in nixos-20.09
#zfs.requestEncryptionCredentials = [ "zpool/root" ];
};
- nix.maxJobs = 8;
powerManagement.cpuFreqGovernor = "powersave";
hardware.enableRedistributableFirmware = true;
- myEnv = import ../../../nixops/secrets/environment.nix;
-
swapDevices = [ { label = "swap"; } ];
fileSystems = {
"/" = { fsType = "zfs"; device = "zpool/root"; };
ACTION=="add", SUBSYSTEM=="net", ATTR{address}=="10:bf:48:7f:e6:3b", NAME="eth0"
'';
+ nixpkgs.config.permittedInsecurePackages = [
+ "python-2.7.18.6" # for nagios-cli
+ ];
+
networking = {
hostId = "27c3048d"; # generated with head -c4 /dev/urandom | od -A none -t x4
firewall.enable = false;
interfaces."eth0".ipv4.addresses =
- [ { address = config.hostEnv.ips.main.ip4; prefixLength = 27; } ]
- ++ pkgs.lib.attrsets.mapAttrsToList
- (n: ips: { address = ips.ip4; prefixLength = 32; })
- (pkgs.lib.attrsets.filterAttrs (n: v: n != "main") config.hostEnv.ips);
+ [ { address = lib.head config.hostEnv.ips.main.ip4; prefixLength = 27; } ]
+ ++ pkgs.lib.flatten (pkgs.lib.attrsets.mapAttrsToList
+ (n: ips: map (ip: { address = ip; prefixLength = 32; }) (ips.ip4 or []))
+ (pkgs.lib.attrsets.filterAttrs (n: v: n != "main") config.hostEnv.ips));
interfaces."eth0".ipv6.addresses =
[ { address = "2a01:4f8:141:53e7::"; prefixLength = 64; } ]
++ pkgs.lib.flatten (pkgs.lib.attrsets.mapAttrsToList
];
};
- myServices.ssh.modules = [ config.myServices.ssh.predefinedModules.regular ];
- imports = builtins.attrValues (import ../..) ++ [ ./dilion/vms.nix ];
+ myServices.ssh.modules.regular.snippet = builtins.readFile ./ssh_ldap_regular.sh;
+
+ imports = [
+ secrets.nixosModules.users-config-dilion
+ ./monitoring.nix
+ ./vms.nix
+ ];
+
+ myServices.vms.libvirt-guests = {
+ buildbot = {
+ pool = "zfspool";
+ network = "immae";
+ cpus = 1;
+ memory = 3;
+ diskSize = 10;
+ destroyVolumeOnExit = true;
+ };
+ };
+ myServices.vms.libvirt-images = {
+ nixos = ./vms/base_configuration.nix;
+ buildbot = ./vms/buildbot_configuration.nix;
+ };
+ myServices.vms.libvirt-networks.immae = {
+ bridgeNumber = 1;
+ ipRange = "192.168.100";
+ };
+ myServices.vms.libvirt-pools = {
+ niximages = {
+ type = "dir";
+ target = "/etc/libvirtd/base-images";
+ };
+ buildbot-disks = rec {
+ type = "dir";
+ target = "/var/lib/libvirt/images/buildbot-disks";
+ preStart = ''
+ mkdir -p ${target}
+ '';
+ };
+ zfspool = {
+ # pool-define-as --name zfspool --source-name zpool/libvirt --type zfs
+ type = "zfs";
+ xml = ''
+ <source>
+ <name>zpool/libvirt</name>
+ </source>
+ '';
+ };
+ };
system.nssModules = [ pkgs.libvirt ];
system.nssDatabases.hosts = lib.mkForce [ "files" "libvirt_guest" "mymachines" "dns" "myhostname" ];
shell = pkgs.bashInteractive;
isSystemUser = true;
group = "libvirtd";
- packages = [ pkgs.netcat-openbsd ];
+ packages = [ pkgs.libressl.nc ];
openssh.authorizedKeys.keys = [
config.myEnv.buildbot.ssh_key.public
config.myEnv.sshd.rootKeys.ismael_flony
];
};
+ users.groups.backup = {};
users.users.backup = {
hashedPassword = "!";
isSystemUser = true;
extraGroups = [ "keys" ];
+ group = "backup";
shell = pkgs.bashInteractive;
openssh.authorizedKeys.keys = let
- zreplConfig = config.secrets.fullPaths."zrepl/zrepl.yml";
+ zreplConfig = "/etc/zrepl/zrepl.yml";
in
["command=\"${pkgs.zrepl}/bin/zrepl stdinserver --config ${zreplConfig} eldiron\",restrict ${config.myEnv.zrepl_backup.ssh_key.public}"];
};
}
];
- system.activationScripts.libvirtd_exports = ''
- install -m 0755 -o root -g root -d /var/lib/caldance
- '';
virtualisation.docker.enable = true;
virtualisation.docker.storageDriver = "zfs";
virtualisation.libvirtd.enable = true;
- users.extraUsers.immae.extraGroups = [ "libvirtd" "docker" ];
+ virtualisation.libvirtd.qemu.package = pkgs-no-overlay.qemu;
+ systemd.services.libvirtd.path = lib.mkAfter [ config.boot.zfs.package ];
+ users.groups.immae = {};
+ users.extraUsers.immae.extraGroups = [ "immae" "libvirtd" "docker" ];
systemd.services.libvirtd.postStart = ''
install -m 0770 -g libvirtd -d /var/lib/libvirt/images
'';
- systemd.services.socat-caldance = {
- description = "Forward ssh port to caldance";
- wantedBy = [ "multi-user.target" ];
- after = [ "network.target" ];
-
- serviceConfig = {
- ExecStart = "${pkgs.socat}/bin/socat TCP-LISTEN:8022,fork TCP:caldance:22";
- };
- };
time.timeZone = "Europe/Paris";
nix = {
- useSandbox = "relaxed";
+ settings = {
+ sandbox = "relaxed";
+ max-jobs = 8;
+ substituters = [ "https://hydra.iohk.io" "https://cache.nixos.org" ];
+ trusted-public-keys = [ "hydra.iohk.io:f/Ea+s+dFdN+3Y/G+FDgSq+a5NEWhJGzdjvKNGv0/EQ=" ];
+ trusted-users = [ "root" "@wheel"] ;
+ };
extraOptions = ''
keep-outputs = true
keep-derivations = true
})
];
- # This is equivalent to setting environment.sessionVariables.NIX_PATH
- nix.nixPath = [
- "home-manager=${pkgs.sources.home-manager.url}"
- "nixpkgs=${pkgs.sources.nixpkgs-home-manager.url}"
- ];
- nix.binaryCaches = [ "https://hydra.iohk.io" "https://cache.nixos.org" ];
- nix.binaryCachePublicKeys = [ "hydra.iohk.io:f/Ea+s+dFdN+3Y/G+FDgSq+a5NEWhJGzdjvKNGv0/EQ=" ];
-
myServices.monitoring.enable = true;
- myServices.certificates.enable = true;
- security.acme.certs."${name}-immae" = config.myServices.certificates.certConfig // {
- user = "immae";
+ security.acme.certs."${name}-immae" = {
+ group = "immae";
domain = "dilion.immae.eu";
};
security.acme.certs."${name}" = {
- user = config.services.nginx.user;
group = config.services.nginx.group;
- extraDomains = {
- "dilion.immae.dev" = null;
- "caldance.cs.immae.dev" = null;
- };
+ extraDomainNames = [
+ "dilion.immae.dev"
+ ];
};
+ systemd.services.nginx.serviceConfig.ProtectHome = "read-only";
services.nginx = {
enable = true;
recommendedOptimisation = true;
recommendedGzipSettings = true;
recommendedProxySettings = true;
- upstreams = {
- caldance.servers."caldance:3031" = {};
- };
virtualHosts = {
"dilion.immae.dev" = {
- acmeRoot = config.myServices.certificates.webroot;
+ acmeRoot = config.security.acme.defaults.webroot;
useACMEHost = name;
forceSSL = true;
- root = "/home/immae/www";
- };
- "caldance.cs.immae.dev" = {
- acmeRoot = config.myServices.certificates.webroot;
- useACMEHost = name;
- forceSSL = true;
- locations."/".extraConfig = ''
- uwsgi_pass caldance;
- '';
- locations."/static/".alias = "/var/lib/caldance/caldance/app/www/static/";
- locations."/media/".alias = "/var/lib/caldance/caldance/media/";
- extraConfig = ''
- auth_basic "Authentification requise";
- auth_basic_user_file ${pkgs.writeText "htpasswd" config.myEnv.websites.caldance.integration.password};
- '';
+ locations."/".root = "/home/immae/www";
};
};
};
- systemd.services.zrepl.serviceConfig.RuntimeDirectory = lib.mkForce "zrepl zrepl/stdinserver";
+ secrets.keys = {
+ "zrepl/${name}.key" = {
+ permissions = "0400";
+ text = config.myEnv.zrepl_backup.certs."${name}".key;
+ user = "backup";
+ group = "root";
+ };
+ } // builtins.listToAttrs (map (x: lib.attrsets.nameValuePair "zrepl/certificates/${x}.crt" {
+ permissions = "0400";
+ text = config.myEnv.zrepl_backup.certs."${x}".certificate;
+ user = "backup";
+ group = "root";
+ }) (builtins.attrNames config.myEnv.zrepl_backup.certs));
+
+ environment.etc."mdadm.conf" = {
+ enable = true;
+ mode = "0644";
+ user = "root";
+ text = "MAILADDR ${config.myEnv.monitoring.email}";
+ };
+
+
systemd.services.zrepl.serviceConfig.User = "backup";
+ systemd.services.zrepl.path = [ pkgs.openssh ];
+ # pour eldiron:
# zfs allow backup create,mount,receive,destroy,rename,snapshot,hold,bookmark,release zpool/backup
+ # pour flony:
+ # zfs allow backup hold,release,bookmark,snapshot,send zpool
services.zrepl = {
enable = true;
- config = ''
- global:
- control:
- sockpath: /run/zrepl/control
- serve:
- stdinserver:
- sockdir: /run/zrepl/stdinserver
- jobs:
- - type: sink
+ settings = {
+ global.control.sockpath = "/run/zrepl/control";
+ global.serve.stdinserver.sockdir = "/run/zrepl/stdinserver";
+ jobs = [
+ {
+ type = "sink";
# must not change
- name: "backup-from-eldiron"
- root_fs: "zpool/backup"
- serve:
- type: stdinserver
- client_identities:
- - eldiron
- '';
+ name = "backup-from-eldiron";
+ root_fs = "zpool/backup";
+ serve.type = "tls";
+ serve.listen = ":19000";
+ serve.ca = config.secrets.fullPaths."zrepl/certificates/eldiron.crt";
+ serve.cert = config.secrets.fullPaths."zrepl/certificates/dilion.crt";
+ serve.key = config.secrets.fullPaths."zrepl/dilion.key";
+ serve.client_cns = [ "eldiron" ];
+ }
+ {
+ type = "source";
+ # must not change
+ name = "backup-to-wd-zpool";
+ # not encrypted!
+ serve.type = "tls";
+ serve.listen = ":19001";
+ serve.ca = config.secrets.fullPaths."zrepl/certificates/flony.crt";
+ serve.cert = config.secrets.fullPaths."zrepl/certificates/dilion.crt";
+ serve.key = config.secrets.fullPaths."zrepl/dilion.key";
+ serve.client_cns = [ "flony" ];
+ filesystems."zpool/libvirt<" = true;
+ filesystems."zpool/root<" = true;
+ snapshotting.type = "manual";
+ }
+ {
+ type = "source";
+ # must not change
+ name = "backup-to-wd-zpool-docker";
+ # not encrypted!
+ serve.type = "tls";
+ serve.listen = ":19002";
+ serve.ca = config.secrets.fullPaths."zrepl/certificates/flony.crt";
+ serve.cert = config.secrets.fullPaths."zrepl/certificates/dilion.crt";
+ serve.key = config.secrets.fullPaths."zrepl/dilion.key";
+ serve.client_cns = [ "flony" ];
+ filesystems."zpool/docker<" = true;
+ snapshotting.type = "manual";
+ }
+ ];
+ };
};
# This value determines the NixOS release with which your system is
# to be compatible, in order to avoid breaking some software such as
# database servers. You should change this only after NixOS release
# notes say you should.
# https://nixos.org/nixos/manual/release-notes.html
- system.stateVersion = "20.03"; # Did you read the comment?
+ system.stateVersion = "23.05"; # Did you read the comment?
}