+++ /dev/null
-{ ... }:
-{
- config = {
- services.backup.enable = true;
- };
-}
{ lib, pkgs, config, ... }:
{
- options.services.myCertificates = {
+ options.myServices.certificates = {
+ enable = lib.mkEnableOption "enable certificates";
certConfig = lib.mkOption {
default = {
webroot = "${config.security.acme.directory}/acme-challenge";
};
};
- config = {
+ config = lib.mkIf config.myServices.certificates.enable {
services.backup.profiles.system.excludeFile = ''
+ ${config.security.acme.directory}
'';
- services.websites.certs = config.services.myCertificates.certConfig;
- myServices.databasesCerts = config.services.myCertificates.certConfig;
- myServices.ircCerts = config.services.myCertificates.certConfig;
+ services.websites.certs = config.myServices.certificates.certConfig;
+ myServices.databasesCerts = config.myServices.certificates.certConfig;
+ myServices.ircCerts = config.myServices.certificates.certConfig;
security.acme.preliminarySelfsigned = true;
security.acme.certs = {
- "eldiron" = config.services.myCertificates.certConfig // {
+ "eldiron" = config.myServices.certificates.certConfig // {
domain = "eldiron.immae.eu";
};
};
options.myServices.databases = {
mariadb = {
enable = lib.mkOption {
- default = cfg.enable;
+ default = false;
example = true;
description = "Whether to enable mariadb database";
type = lib.types.bool;
options.myServices.databases = {
openldap = {
enable = lib.mkOption {
- default = cfg.enable;
+ default = false;
example = true;
description = "Whether to enable ldap";
type = lib.types.bool;
options.myServices.databases = {
postgresql = {
enable = lib.mkOption {
- default = cfg.enable;
+ default = false;
example = true;
description = "Whether to enable postgresql database";
type = lib.types.bool;
in {
options.myServices.databases.redis = {
enable = lib.mkOption {
- default = cfg.enable;
+ default = false;
example = true;
description = "Whether to enable redis database";
type = lib.types.bool;
mailTool = ./websites/tools/mail;
mail = ./mail;
- mailMilters = ./mail/milters.nix;
- mailPostfix = ./mail/postfix.nix;
- mailDovecot = ./mail/dovecot.nix;
- mailRspamd = ./mail/rspamd.nix;
buildbot = ./buildbot;
certificates = ./certificates.nix;
ftp = ./ftp.nix;
mpd = ./mpd.nix;
ssh = ./ssh;
- backup = ./backup.nix;
monitoring = ./monitoring;
system = ./system.nix;
{ lib, pkgs, config, myconfig, ... }:
{
+ options.myServices.dns.enable = lib.mkEnableOption "enable DNS resolver";
config = let
cfg = config.services.bind;
configFile = pkgs.writeText "named.conf" ''
'')
cfg.zones }
'';
- in
- {
+ in lib.mkIf config.myServices.dns.enable {
networking.firewall.allowedUDPPorts = [ 53 ];
networking.firewall.allowedTCPPorts = [ 53 ];
services.bind = {
services.backup.profiles.ftp = {
rootDir = "/var/lib/ftp";
};
- security.acme.certs."ftp" = config.services.myCertificates.certConfig // {
+ security.acme.certs."ftp" = config.myServices.certificates.certConfig // {
domain = "eldiron.immae.eu";
postRun = ''
systemctl restart pure-ftpd.service
{ lib, pkgs, config, myconfig, ... }:
{
- config.security.acme.certs."mail" = config.services.myCertificates.certConfig // {
- domain = "eldiron.immae.eu";
- extraDomains = let
- zonesWithMx = builtins.filter (zone:
- lib.attrsets.hasAttr "withEmail" zone && lib.lists.length zone.withEmail > 0
- ) myconfig.env.dns.masterZones;
- mxs = map (zone: "mx-1.${zone.name}") zonesWithMx;
- in builtins.listToAttrs (map (mx: lib.attrsets.nameValuePair mx null) mxs);
- };
- config.services.backup.profiles = {
- mail = {
- rootDir = "/var/lib";
- excludeFile = lib.mkAfter ''
- + /var/lib/vhost
- - /var/lib
- '';
+ imports = [
+ ./milters.nix
+ ./postfix.nix
+ ./dovecot.nix
+ ./rspamd.nix
+ ];
+ options.myServices.mail.enable = lib.mkEnableOption "enable Mail services";
+
+ config = lib.mkIf config.myServices.mail.enable {
+ security.acme.certs."mail" = config.myServices.certificates.certConfig // {
+ domain = "eldiron.immae.eu";
+ extraDomains = let
+ zonesWithMx = builtins.filter (zone:
+ lib.attrsets.hasAttr "withEmail" zone && lib.lists.length zone.withEmail > 0
+ ) myconfig.env.dns.masterZones;
+ mxs = map (zone: "mx-1.${zone.name}") zonesWithMx;
+ in builtins.listToAttrs (map (mx: lib.attrsets.nameValuePair mx null) mxs);
+ };
+ services.backup.profiles = {
+ mail = {
+ rootDir = "/var/lib";
+ excludeFile = lib.mkAfter ''
+ + /var/lib/vhost
+ - /var/lib
+ '';
+ };
};
};
}
'';
in
{
- config.services.backup.profiles.mail.excludeFile = ''
- + /var/lib/dhparams
- + /var/lib/dovecot
- '';
- config.secrets.keys = [
- {
- dest = "dovecot/ldap";
- user = config.services.dovecot2.user;
- group = config.services.dovecot2.group;
- permissions = "0400";
- text = ''
- hosts = ${myconfig.env.mail.dovecot.ldap.host}
- tls = yes
-
- dn = ${myconfig.env.mail.dovecot.ldap.dn}
- dnpass = ${myconfig.env.mail.dovecot.ldap.password}
+ config = lib.mkIf config.myServices.mail.enable {
+ services.backup.profiles.mail.excludeFile = ''
+ + /var/lib/dhparams
+ + /var/lib/dovecot
+ '';
+ secrets.keys = [
+ {
+ dest = "dovecot/ldap";
+ user = config.services.dovecot2.user;
+ group = config.services.dovecot2.group;
+ permissions = "0400";
+ text = ''
+ hosts = ${myconfig.env.mail.dovecot.ldap.host}
+ tls = yes
- auth_bind = yes
+ dn = ${myconfig.env.mail.dovecot.ldap.dn}
+ dnpass = ${myconfig.env.mail.dovecot.ldap.password}
- ldap_version = 3
+ auth_bind = yes
- base = ${myconfig.env.mail.dovecot.ldap.base}
- scope = subtree
+ ldap_version = 3
- user_filter = ${myconfig.env.mail.dovecot.ldap.filter}
- pass_filter = ${myconfig.env.mail.dovecot.ldap.filter}
+ base = ${myconfig.env.mail.dovecot.ldap.base}
+ scope = subtree
- user_attrs = ${myconfig.env.mail.dovecot.ldap.user_attrs}
- pass_attrs = ${myconfig.env.mail.dovecot.ldap.pass_attrs}
- '';
- }
- ];
+ user_filter = ${myconfig.env.mail.dovecot.ldap.filter}
+ pass_filter = ${myconfig.env.mail.dovecot.ldap.filter}
- config.users.users.vhost = {
- group = "vhost";
- uid = config.ids.uids.vhost;
- };
- config.users.groups.vhost.gid = config.ids.gids.vhost;
-
- # https://blog.zeninc.net/index.php?post/2018/04/01/Un-annuaire-pour-les-gouverner-tous.......
- config.services.dovecot2 = {
- enable = true;
- enablePAM = false;
- enablePop3 = true;
- enableImap = true;
- enableLmtp = true;
- protocols = [ "sieve" ];
- modules = [
- pkgs.dovecot_pigeonhole
- pkgs.dovecot_fts-xapian
- ];
- mailUser = "vhost";
- mailGroup = "vhost";
- createMailUser = false;
- mailboxes = [
- { name = "Trash"; auto = "subscribe"; specialUse = "Trash"; }
- { name = "Junk"; auto = "subscribe"; specialUse = "Junk"; }
- { name = "Sent"; auto = "subscribe"; specialUse = "Sent"; }
- { name = "Drafts"; auto = "subscribe"; specialUse = "Drafts"; }
+ user_attrs = ${myconfig.env.mail.dovecot.ldap.user_attrs}
+ pass_attrs = ${myconfig.env.mail.dovecot.ldap.pass_attrs}
+ '';
+ }
];
- mailLocation = "mbox:~/Mail:INBOX=~/Mail/Inbox:INDEX=~/.imap";
- sslServerCert = "/var/lib/acme/mail/fullchain.pem";
- sslServerKey = "/var/lib/acme/mail/key.pem";
- sslCACert = "/var/lib/acme/mail/fullchain.pem";
- extraConfig = builtins.concatStringsSep "\n" [
- ''
- postmaster_address = postmaster@immae.eu
- mail_attribute_dict = file:%h/dovecot-attributes
- imap_idle_notify_interval = 20 mins
- namespace inbox {
- type = private
- separator = /
- inbox = yes
- list = yes
- }
- ''
-
- # Full text search
- ''
- # needs to be bigger than any mailbox size
- default_vsz_limit = 2GB
- mail_plugins = $mail_plugins fts fts_xapian
+
+ users.users.vhost = {
+ group = "vhost";
+ uid = config.ids.uids.vhost;
+ };
+ users.groups.vhost.gid = config.ids.gids.vhost;
+
+ # https://blog.zeninc.net/index.php?post/2018/04/01/Un-annuaire-pour-les-gouverner-tous.......
+ services.dovecot2 = {
+ enable = true;
+ enablePAM = false;
+ enablePop3 = true;
+ enableImap = true;
+ enableLmtp = true;
+ protocols = [ "sieve" ];
+ modules = [
+ pkgs.dovecot_pigeonhole
+ pkgs.dovecot_fts-xapian
+ ];
+ mailUser = "vhost";
+ mailGroup = "vhost";
+ createMailUser = false;
+ mailboxes = [
+ { name = "Trash"; auto = "subscribe"; specialUse = "Trash"; }
+ { name = "Junk"; auto = "subscribe"; specialUse = "Junk"; }
+ { name = "Sent"; auto = "subscribe"; specialUse = "Sent"; }
+ { name = "Drafts"; auto = "subscribe"; specialUse = "Drafts"; }
+ ];
+ mailLocation = "mbox:~/Mail:INBOX=~/Mail/Inbox:INDEX=~/.imap";
+ sslServerCert = "/var/lib/acme/mail/fullchain.pem";
+ sslServerKey = "/var/lib/acme/mail/key.pem";
+ sslCACert = "/var/lib/acme/mail/fullchain.pem";
+ extraConfig = builtins.concatStringsSep "\n" [
+ ''
+ postmaster_address = postmaster@immae.eu
+ mail_attribute_dict = file:%h/dovecot-attributes
+ imap_idle_notify_interval = 20 mins
+ namespace inbox {
+ type = private
+ separator = /
+ inbox = yes
+ list = yes
+ }
+ ''
+
+ # Full text search
+ ''
+ # needs to be bigger than any mailbox size
+ default_vsz_limit = 2GB
+ mail_plugins = $mail_plugins fts fts_xapian
+ plugin {
+ plugin = fts fts_xapian
+ fts = xapian
+ fts_xapian = partial=2 full=20
+ fts_autoindex = yes
+ fts_autoindex_exclude = \Junk
+ fts_autoindex_exclude2 = \Trash
+ fts_autoindex_exclude3 = Virtual/*
+ }
+ ''
+
+ # Antispam
+ # https://docs.iredmail.org/dovecot.imapsieve.html
+ ''
+ # imap_sieve plugin added below
+
plugin {
- plugin = fts fts_xapian
- fts = xapian
- fts_xapian = partial=2 full=20
- fts_autoindex = yes
- fts_autoindex_exclude = \Junk
- fts_autoindex_exclude2 = \Trash
- fts_autoindex_exclude3 = Virtual/*
- }
- ''
-
- # Antispam
- # https://docs.iredmail.org/dovecot.imapsieve.html
- ''
- # imap_sieve plugin added below
-
- plugin {
- sieve_plugins = sieve_imapsieve sieve_extprograms
- imapsieve_url = sieve://127.0.0.1:4190
-
- # From elsewhere to Junk folder
- imapsieve_mailbox1_name = Junk
- imapsieve_mailbox1_causes = COPY APPEND
- imapsieve_mailbox1_before = file:${./sieve_scripts}/report_spam.sieve;bindir=/var/lib/vhost/.imapsieve_bin
-
- # From Junk folder to elsewhere
- imapsieve_mailbox2_name = *
- imapsieve_mailbox2_from = Junk
- imapsieve_mailbox2_causes = COPY
- imapsieve_mailbox2_before = file:${./sieve_scripts}/report_ham.sieve;bindir=/var/lib/vhost/.imapsieve_bin
-
- sieve_pipe_bin_dir = ${sieve_bin}
-
- sieve_global_extensions = +vnd.dovecot.pipe +vnd.dovecot.environment
- }
- ''
- # Services to listen
- ''
- service imap-login {
- inet_listener imap {
+ sieve_plugins = sieve_imapsieve sieve_extprograms
+ imapsieve_url = sieve://127.0.0.1:4190
+
+ # From elsewhere to Junk folder
+ imapsieve_mailbox1_name = Junk
+ imapsieve_mailbox1_causes = COPY APPEND
+ imapsieve_mailbox1_before = file:${./sieve_scripts}/report_spam.sieve;bindir=/var/lib/vhost/.imapsieve_bin
+
+ # From Junk folder to elsewhere
+ imapsieve_mailbox2_name = *
+ imapsieve_mailbox2_from = Junk
+ imapsieve_mailbox2_causes = COPY
+ imapsieve_mailbox2_before = file:${./sieve_scripts}/report_ham.sieve;bindir=/var/lib/vhost/.imapsieve_bin
+
+ sieve_pipe_bin_dir = ${sieve_bin}
+
+ sieve_global_extensions = +vnd.dovecot.pipe +vnd.dovecot.environment
}
- inet_listener imaps {
+ ''
+ # Services to listen
+ ''
+ service imap-login {
+ inet_listener imap {
+ }
+ inet_listener imaps {
+ }
}
- }
- service pop3-login {
- inet_listener pop3 {
+ service pop3-login {
+ inet_listener pop3 {
+ }
+ inet_listener pop3s {
+ }
}
- inet_listener pop3s {
+ service imap {
}
- }
- service imap {
- }
- service pop3 {
- }
- service auth {
- unix_listener auth-userdb {
+ service pop3 {
}
- unix_listener ${config.services.postfix.config.queue_directory}/private/auth {
- mode = 0666
+ service auth {
+ unix_listener auth-userdb {
+ }
+ unix_listener ${config.services.postfix.config.queue_directory}/private/auth {
+ mode = 0666
+ }
}
- }
- service auth-worker {
- }
- service dict {
- unix_listener dict {
+ service auth-worker {
}
- }
- service stats {
- unix_listener stats-reader {
- user = vhost
- group = vhost
- mode = 0660
+ service dict {
+ unix_listener dict {
+ }
}
- unix_listener stats-writer {
- user = vhost
- group = vhost
- mode = 0660
+ service stats {
+ unix_listener stats-reader {
+ user = vhost
+ group = vhost
+ mode = 0660
+ }
+ unix_listener stats-writer {
+ user = vhost
+ group = vhost
+ mode = 0660
+ }
}
- }
- ''
-
- # Authentification
- ''
- first_valid_uid = ${toString config.ids.uids.vhost}
- disable_plaintext_auth = yes
- passdb {
- driver = ldap
- args = ${config.secrets.fullPaths."dovecot/ldap"}
- }
- userdb {
- driver = static
- args = user=%u uid=vhost gid=vhost home=/var/lib/vhost/%d/%n/ mail=mbox:~/Mail:INBOX=~/Mail/Inbox:INDEX=~/.imap
- }
- ''
-
- # Zlib
- ''
- mail_plugins = $mail_plugins zlib
- plugin {
- zlib_save_level = 6
- zlib_save = gz
- }
- ''
+ ''
+
+ # Authentification
+ ''
+ first_valid_uid = ${toString config.ids.uids.vhost}
+ disable_plaintext_auth = yes
+ passdb {
+ driver = ldap
+ args = ${config.secrets.fullPaths."dovecot/ldap"}
+ }
+ userdb {
+ driver = static
+ args = user=%u uid=vhost gid=vhost home=/var/lib/vhost/%d/%n/ mail=mbox:~/Mail:INBOX=~/Mail/Inbox:INDEX=~/.imap
+ }
+ ''
- # Sieve
- ''
- plugin {
- sieve = file:~/sieve;bindir=~/.sieve-bin;active=~/.dovecot.sieve
- }
- service managesieve-login {
- }
- service managesieve {
- }
- ''
-
- # Virtual mailboxes
- ''
- mail_plugins = $mail_plugins virtual
- namespace Virtual {
- prefix = Virtual/
- location = virtual:~/Virtual
- }
- ''
+ # Zlib
+ ''
+ mail_plugins = $mail_plugins zlib
+ plugin {
+ zlib_save_level = 6
+ zlib_save = gz
+ }
+ ''
- # Protocol specific configuration
- # Needs to come last if there are mail_plugins entries
- ''
- protocol imap {
- mail_plugins = $mail_plugins imap_sieve
- }
- protocol lda {
- mail_plugins = $mail_plugins sieve
- }
- ''
- ];
- };
- config.networking.firewall.allowedTCPPorts = [ 110 143 993 995 4190 ];
- config.system.activationScripts.dovecot = {
- deps = [ "users" ];
- text =''
- install -m 0755 -o vhost -g vhost -d /var/lib/vhost
- '';
- };
+ # Sieve
+ ''
+ plugin {
+ sieve = file:~/sieve;bindir=~/.sieve-bin;active=~/.dovecot.sieve
+ }
+ service managesieve-login {
+ }
+ service managesieve {
+ }
+ ''
+
+ # Virtual mailboxes
+ ''
+ mail_plugins = $mail_plugins virtual
+ namespace Virtual {
+ prefix = Virtual/
+ location = virtual:~/Virtual
+ }
+ ''
- config.security.acme.certs."mail" = {
- postRun = ''
- systemctl restart dovecot2.service
- '';
- extraDomains = {
- "imap.immae.eu" = null;
- "pop3.immae.eu" = null;
+ # Protocol specific configuration
+ # Needs to come last if there are mail_plugins entries
+ ''
+ protocol imap {
+ mail_plugins = $mail_plugins imap_sieve
+ }
+ protocol lda {
+ mail_plugins = $mail_plugins sieve
+ }
+ ''
+ ];
+ };
+ networking.firewall.allowedTCPPorts = [ 110 143 993 995 4190 ];
+ system.activationScripts.dovecot = {
+ deps = [ "users" ];
+ text =''
+ install -m 0755 -o vhost -g vhost -d /var/lib/vhost
+ '';
+ };
+
+ security.acme.certs."mail" = {
+ postRun = ''
+ systemctl restart dovecot2.service
+ '';
+ extraDomains = {
+ "imap.immae.eu" = null;
+ "pop3.immae.eu" = null;
+ };
};
};
}
milters sockets
'';
};
- config.secrets.keys = [
- {
- dest = "opendkim/eldiron.private";
- user = config.services.opendkim.user;
- group = config.services.opendkim.group;
- permissions = "0400";
- text = myconfig.env.mail.dkim.eldiron.private;
- }
- {
- dest = "opendkim/eldiron.txt";
- user = config.services.opendkim.user;
- group = config.services.opendkim.group;
- permissions = "0444";
- text = ''
- eldiron._domainkey IN TXT ${myconfig.env.mail.dkim.eldiron.public}'';
- }
- {
- dest = "opendmarc/ignore.hosts";
- user = config.services.opendmarc.user;
- group = config.services.opendmarc.group;
- permissions = "0400";
- text = myconfig.env.mail.dmarc.ignore_hosts;
- }
- ];
- config.users.users."${config.services.opendkim.user}".extraGroups = [ "keys" ];
- config.services.opendkim = {
- enable = true;
- socket = "local:${config.myServices.mail.milters.sockets.opendkim}";
- domains = builtins.concatStringsSep "," (lib.flatten (map
- (zone: map
- (e: "${e.domain}${lib.optionalString (e.domain != "") "."}${zone.name}")
- (zone.withEmail or [])
- )
- myconfig.env.dns.masterZones
- ));
- keyPath = "${config.secrets.location}/opendkim";
- selector = "eldiron";
- configFile = pkgs.writeText "opendkim.conf" ''
- SubDomains yes
- UMask 002
- '';
- group = config.services.postfix.group;
- };
- config.systemd.services.opendkim.preStart = lib.mkBefore ''
- # Skip the prestart script as keys are handled in secrets
- exit 0
- '';
- config.services.filesWatcher.opendkim = {
- restart = true;
- paths = [
- config.secrets.fullPaths."opendkim/eldiron.private"
+ config = lib.mkIf config.myServices.mail.enable {
+ secrets.keys = [
+ {
+ dest = "opendkim/eldiron.private";
+ user = config.services.opendkim.user;
+ group = config.services.opendkim.group;
+ permissions = "0400";
+ text = myconfig.env.mail.dkim.eldiron.private;
+ }
+ {
+ dest = "opendkim/eldiron.txt";
+ user = config.services.opendkim.user;
+ group = config.services.opendkim.group;
+ permissions = "0444";
+ text = ''
+ eldiron._domainkey IN TXT ${myconfig.env.mail.dkim.eldiron.public}'';
+ }
+ {
+ dest = "opendmarc/ignore.hosts";
+ user = config.services.opendmarc.user;
+ group = config.services.opendmarc.group;
+ permissions = "0400";
+ text = myconfig.env.mail.dmarc.ignore_hosts;
+ }
];
- };
-
- config.users.users."${config.services.opendmarc.user}".extraGroups = [ "keys" ];
- config.services.opendmarc = {
- enable = true;
- socket = "local:${config.myServices.mail.milters.sockets.opendmarc}";
- configFile = pkgs.writeText "opendmarc.conf" ''
- AuthservID HOSTNAME
- FailureReports false
- FailureReportsBcc postmaster@localhost.immae.eu
- FailureReportsOnNone true
- FailureReportsSentBy postmaster@immae.eu
- IgnoreAuthenticatedClients true
- IgnoreHosts ${config.secrets.fullPaths."opendmarc/ignore.hosts"}
- SoftwareHeader true
- SPFSelfValidate true
- TrustedAuthservIDs HOSTNAME, immae.eu, nef2.ens.fr
- UMask 002
+ users.users."${config.services.opendkim.user}".extraGroups = [ "keys" ];
+ services.opendkim = {
+ enable = true;
+ socket = "local:${config.myServices.mail.milters.sockets.opendkim}";
+ domains = builtins.concatStringsSep "," (lib.flatten (map
+ (zone: map
+ (e: "${e.domain}${lib.optionalString (e.domain != "") "."}${zone.name}")
+ (zone.withEmail or [])
+ )
+ myconfig.env.dns.masterZones
+ ));
+ keyPath = "${config.secrets.location}/opendkim";
+ selector = "eldiron";
+ configFile = pkgs.writeText "opendkim.conf" ''
+ SubDomains yes
+ UMask 002
+ '';
+ group = config.services.postfix.group;
+ };
+ systemd.services.opendkim.preStart = lib.mkBefore ''
+ # Skip the prestart script as keys are handled in secrets
+ exit 0
'';
- group = config.services.postfix.group;
- };
- config.services.filesWatcher.opendmarc = {
- restart = true;
- paths = [
- config.secrets.fullPaths."opendmarc/ignore.hosts"
- ];
- };
+ services.filesWatcher.opendkim = {
+ restart = true;
+ paths = [
+ config.secrets.fullPaths."opendkim/eldiron.private"
+ ];
+ };
+
+ users.users."${config.services.opendmarc.user}".extraGroups = [ "keys" ];
+ services.opendmarc = {
+ enable = true;
+ socket = "local:${config.myServices.mail.milters.sockets.opendmarc}";
+ configFile = pkgs.writeText "opendmarc.conf" ''
+ AuthservID HOSTNAME
+ FailureReports false
+ FailureReportsBcc postmaster@localhost.immae.eu
+ FailureReportsOnNone true
+ FailureReportsSentBy postmaster@immae.eu
+ IgnoreAuthenticatedClients true
+ IgnoreHosts ${config.secrets.fullPaths."opendmarc/ignore.hosts"}
+ SoftwareHeader true
+ SPFSelfValidate true
+ TrustedAuthservIDs HOSTNAME, immae.eu, nef2.ens.fr
+ UMask 002
+ '';
+ group = config.services.postfix.group;
+ };
+ services.filesWatcher.opendmarc = {
+ restart = true;
+ paths = [
+ config.secrets.fullPaths."opendmarc/ignore.hosts"
+ ];
+ };
- config.services.openarc = {
- enable = true;
- user = "opendkim";
- socket = "local:${config.myServices.mail.milters.sockets.openarc}";
- group = config.services.postfix.group;
- configFile = pkgs.writeText "openarc.conf" ''
- AuthservID mail.immae.eu
- Domain mail.immae.eu
- KeyFile ${config.secrets.fullPaths."opendkim/eldiron.private"}
- Mode sv
- Selector eldiron
- SoftwareHeader yes
- Syslog Yes
+ services.openarc = {
+ enable = true;
+ user = "opendkim";
+ socket = "local:${config.myServices.mail.milters.sockets.openarc}";
+ group = config.services.postfix.group;
+ configFile = pkgs.writeText "openarc.conf" ''
+ AuthservID mail.immae.eu
+ Domain mail.immae.eu
+ KeyFile ${config.secrets.fullPaths."opendkim/eldiron.private"}
+ Mode sv
+ Selector eldiron
+ SoftwareHeader yes
+ Syslog Yes
+ '';
+ };
+ systemd.services.openarc.postStart = lib.optionalString
+ (lib.strings.hasPrefix "local:" config.services.openarc.socket) ''
+ while [ ! -S ${lib.strings.removePrefix "local:" config.services.openarc.socket} ]; do
+ sleep 0.5
+ done
+ chmod g+w ${lib.strings.removePrefix "local:" config.services.openarc.socket}
'';
- };
- config.systemd.services.openarc.postStart = lib.optionalString
- (lib.strings.hasPrefix "local:" config.services.openarc.socket) ''
- while [ ! -S ${lib.strings.removePrefix "local:" config.services.openarc.socket} ]; do
- sleep 0.5
- done
- chmod g+w ${lib.strings.removePrefix "local:" config.services.openarc.socket}
- '';
- config.services.filesWatcher.openarc = {
- restart = true;
- paths = [
- config.secrets.fullPaths."opendkim/eldiron.private"
- ];
+ services.filesWatcher.openarc = {
+ restart = true;
+ paths = [
+ config.secrets.fullPaths."opendkim/eldiron.private"
+ ];
+ };
};
}
{ lib, pkgs, config, myconfig, ... }:
{
- config.services.backup.profiles.mail.excludeFile = ''
- + /var/lib/postfix
- '';
- config.secrets.keys = [
- {
- dest = "postfix/mysql_alias_maps";
- user = config.services.postfix.user;
- group = config.services.postfix.group;
- permissions = "0440";
- text = ''
- # We need to specify that option to trigger ssl connection
- tls_ciphers = TLSv1.2
- user = ${myconfig.env.mail.postfix.mysql.user}
- password = ${myconfig.env.mail.postfix.mysql.password}
- hosts = unix:${myconfig.env.mail.postfix.mysql.socket}
- dbname = ${myconfig.env.mail.postfix.mysql.database}
- query = SELECT DISTINCT destination
- FROM forwardings_merge
- WHERE
- ((regex = 1 AND '%s' REGEXP CONCAT('^',source,'$') ) OR (regex = 0 AND source = '%s'))
- AND active = 1
- AND '%s' NOT IN
- (
- SELECT source
+ config = lib.mkIf config.myServices.mail.enable {
+ services.backup.profiles.mail.excludeFile = ''
+ + /var/lib/postfix
+ '';
+ secrets.keys = [
+ {
+ dest = "postfix/mysql_alias_maps";
+ user = config.services.postfix.user;
+ group = config.services.postfix.group;
+ permissions = "0440";
+ text = ''
+ # We need to specify that option to trigger ssl connection
+ tls_ciphers = TLSv1.2
+ user = ${myconfig.env.mail.postfix.mysql.user}
+ password = ${myconfig.env.mail.postfix.mysql.password}
+ hosts = unix:${myconfig.env.mail.postfix.mysql.socket}
+ dbname = ${myconfig.env.mail.postfix.mysql.database}
+ query = SELECT DISTINCT destination
+ FROM forwardings_merge
+ WHERE
+ ((regex = 1 AND '%s' REGEXP CONCAT('^',source,'$') ) OR (regex = 0 AND source = '%s'))
+ AND active = 1
+ AND '%s' NOT IN
+ (
+ SELECT source
+ FROM forwardings_blacklisted
+ WHERE source = '%s'
+ ) UNION
+ SELECT 'devnull@immae.eu'
FROM forwardings_blacklisted
WHERE source = '%s'
- ) UNION
- SELECT 'devnull@immae.eu'
- FROM forwardings_blacklisted
- WHERE source = '%s'
- '';
- }
- {
- dest = "postfix/mysql_mailbox_maps";
- user = config.services.postfix.user;
- group = config.services.postfix.group;
- permissions = "0440";
- text = ''
- # We need to specify that option to trigger ssl connection
- tls_ciphers = TLSv1.2
- user = ${myconfig.env.mail.postfix.mysql.user}
- password = ${myconfig.env.mail.postfix.mysql.password}
- hosts = unix:${myconfig.env.mail.postfix.mysql.socket}
- dbname = ${myconfig.env.mail.postfix.mysql.database}
- result_format = /%d/%u
- query = SELECT DISTINCT '%s'
- FROM mailboxes
- WHERE active = 1
- AND (
- (domain = '%d' AND user = '%u' AND regex = 0)
- OR (
- regex = 1
- AND '%d' REGEXP CONCAT('^',domain,'$')
- AND '%u' REGEXP CONCAT('^',user,'$')
+ '';
+ }
+ {
+ dest = "postfix/mysql_mailbox_maps";
+ user = config.services.postfix.user;
+ group = config.services.postfix.group;
+ permissions = "0440";
+ text = ''
+ # We need to specify that option to trigger ssl connection
+ tls_ciphers = TLSv1.2
+ user = ${myconfig.env.mail.postfix.mysql.user}
+ password = ${myconfig.env.mail.postfix.mysql.password}
+ hosts = unix:${myconfig.env.mail.postfix.mysql.socket}
+ dbname = ${myconfig.env.mail.postfix.mysql.database}
+ result_format = /%d/%u
+ query = SELECT DISTINCT '%s'
+ FROM mailboxes
+ WHERE active = 1
+ AND (
+ (domain = '%d' AND user = '%u' AND regex = 0)
+ OR (
+ regex = 1
+ AND '%d' REGEXP CONCAT('^',domain,'$')
+ AND '%u' REGEXP CONCAT('^',user,'$')
+ )
)
- )
- LIMIT 1
- '';
- }
- {
- dest = "postfix/mysql_sender_login_maps";
- user = config.services.postfix.user;
- group = config.services.postfix.group;
- permissions = "0440";
- text = ''
- # We need to specify that option to trigger ssl connection
- tls_ciphers = TLSv1.2
- user = ${myconfig.env.mail.postfix.mysql.user}
- password = ${myconfig.env.mail.postfix.mysql.password}
- hosts = unix:${myconfig.env.mail.postfix.mysql.socket}
- dbname = ${myconfig.env.mail.postfix.mysql.database}
- query = SELECT DISTINCT destination
- FROM forwardings_merge
- WHERE
- ((regex = 1 AND '%s' REGEXP CONCAT('^',source,'$') ) OR (regex = 0 AND source = '%s'))
- AND active = 1
- UNION SELECT '%s' AS destination
+ LIMIT 1
'';
- }
- ];
+ }
+ {
+ dest = "postfix/mysql_sender_login_maps";
+ user = config.services.postfix.user;
+ group = config.services.postfix.group;
+ permissions = "0440";
+ text = ''
+ # We need to specify that option to trigger ssl connection
+ tls_ciphers = TLSv1.2
+ user = ${myconfig.env.mail.postfix.mysql.user}
+ password = ${myconfig.env.mail.postfix.mysql.password}
+ hosts = unix:${myconfig.env.mail.postfix.mysql.socket}
+ dbname = ${myconfig.env.mail.postfix.mysql.database}
+ query = SELECT DISTINCT destination
+ FROM forwardings_merge
+ WHERE
+ ((regex = 1 AND '%s' REGEXP CONCAT('^',source,'$') ) OR (regex = 0 AND source = '%s'))
+ AND active = 1
+ UNION SELECT '%s' AS destination
+ '';
+ }
+ ];
- config.networking.firewall.allowedTCPPorts = [ 25 465 587 ];
+ networking.firewall.allowedTCPPorts = [ 25 465 587 ];
- config.nixpkgs.overlays = [ (self: super: {
- postfix = super.postfix.override { withMySQL = true; };
- }) ];
- config.users.users."${config.services.postfix.user}".extraGroups = [ "keys" ];
- config.services.filesWatcher.postfix = {
- restart = true;
- paths = [
- config.secrets.fullPaths."postfix/mysql_alias_maps"
- config.secrets.fullPaths."postfix/mysql_mailbox_maps"
- config.secrets.fullPaths."postfix/mysql_sender_login_maps"
- ];
- };
- config.services.postfix = {
- mapFiles = let
- recipient_maps = let
- name = n: i: "relay_${n}_${toString i}";
- pair = n: i: m: lib.attrsets.nameValuePair (name n i) (
- if m.type == "hash"
- then pkgs.writeText (name n i) m.content
- else null
- );
- pairs = n: v: lib.imap1 (i: m: pair n i m) v.recipient_maps;
- in lib.attrsets.filterAttrs (k: v: v != null) (
- lib.attrsets.listToAttrs (lib.flatten (
- lib.attrsets.mapAttrsToList pairs myconfig.env.mail.postfix.backup_domains
- ))
- );
- relay_restrictions = lib.attrsets.filterAttrs (k: v: v != null) (
- lib.attrsets.mapAttrs' (n: v:
- lib.attrsets.nameValuePair "recipient_access_${n}" (
- if lib.attrsets.hasAttr "relay_restrictions" v
- then pkgs.writeText "recipient_access_${n}" v.relay_restrictions
+ nixpkgs.overlays = [ (self: super: {
+ postfix = super.postfix.override { withMySQL = true; };
+ }) ];
+ users.users."${config.services.postfix.user}".extraGroups = [ "keys" ];
+ services.filesWatcher.postfix = {
+ restart = true;
+ paths = [
+ config.secrets.fullPaths."postfix/mysql_alias_maps"
+ config.secrets.fullPaths."postfix/mysql_mailbox_maps"
+ config.secrets.fullPaths."postfix/mysql_sender_login_maps"
+ ];
+ };
+ services.postfix = {
+ mapFiles = let
+ recipient_maps = let
+ name = n: i: "relay_${n}_${toString i}";
+ pair = n: i: m: lib.attrsets.nameValuePair (name n i) (
+ if m.type == "hash"
+ then pkgs.writeText (name n i) m.content
else null
- )
- ) myconfig.env.mail.postfix.backup_domains
- );
- in
- recipient_maps // relay_restrictions;
- config = {
- ### postfix module overrides
- readme_directory = "${pkgs.postfix}/share/postfix/doc";
- smtp_tls_CAfile = lib.mkForce "";
- smtp_tls_cert_file = lib.mkForce "";
- smtp_tls_key_file = lib.mkForce "";
+ );
+ pairs = n: v: lib.imap1 (i: m: pair n i m) v.recipient_maps;
+ in lib.attrsets.filterAttrs (k: v: v != null) (
+ lib.attrsets.listToAttrs (lib.flatten (
+ lib.attrsets.mapAttrsToList pairs myconfig.env.mail.postfix.backup_domains
+ ))
+ );
+ relay_restrictions = lib.attrsets.filterAttrs (k: v: v != null) (
+ lib.attrsets.mapAttrs' (n: v:
+ lib.attrsets.nameValuePair "recipient_access_${n}" (
+ if lib.attrsets.hasAttr "relay_restrictions" v
+ then pkgs.writeText "recipient_access_${n}" v.relay_restrictions
+ else null
+ )
+ ) myconfig.env.mail.postfix.backup_domains
+ );
+ in
+ recipient_maps // relay_restrictions;
+ config = {
+ ### postfix module overrides
+ readme_directory = "${pkgs.postfix}/share/postfix/doc";
+ smtp_tls_CAfile = lib.mkForce "";
+ smtp_tls_cert_file = lib.mkForce "";
+ smtp_tls_key_file = lib.mkForce "";
- message_size_limit = "1073741824"; # Don't put 0 here, it's not equivalent to "unlimited"
- alias_database = "\$alias_maps";
+ message_size_limit = "1073741824"; # Don't put 0 here, it's not equivalent to "unlimited"
+ alias_database = "\$alias_maps";
- ### Virtual mailboxes config
- virtual_alias_maps = "mysql:${config.secrets.fullPaths."postfix/mysql_alias_maps"}";
- virtual_mailbox_domains = myconfig.env.mail.postfix.additional_mailbox_domains
- ++ lib.remove "localhost.immae.eu" (lib.remove null (lib.flatten (map
- (zone: map
- (e: if e.receive
- then "${e.domain}${lib.optionalString (e.domain != "") "."}${zone.name}"
- else null
+ ### Virtual mailboxes config
+ virtual_alias_maps = "mysql:${config.secrets.fullPaths."postfix/mysql_alias_maps"}";
+ virtual_mailbox_domains = myconfig.env.mail.postfix.additional_mailbox_domains
+ ++ lib.remove "localhost.immae.eu" (lib.remove null (lib.flatten (map
+ (zone: map
+ (e: if e.receive
+ then "${e.domain}${lib.optionalString (e.domain != "") "."}${zone.name}"
+ else null
+ )
+ (zone.withEmail or [])
)
- (zone.withEmail or [])
- )
- myconfig.env.dns.masterZones
- )));
- virtual_mailbox_maps = "mysql:${config.secrets.fullPaths."postfix/mysql_mailbox_maps"}";
- dovecot_destination_recipient_limit = "1";
- virtual_transport = "dovecot";
+ myconfig.env.dns.masterZones
+ )));
+ virtual_mailbox_maps = "mysql:${config.secrets.fullPaths."postfix/mysql_mailbox_maps"}";
+ dovecot_destination_recipient_limit = "1";
+ virtual_transport = "dovecot";
- ### Relay domains
- relay_domains = lib.flatten (lib.attrsets.mapAttrsToList (n: v: v.domains or []) myconfig.env.mail.postfix.backup_domains);
- relay_recipient_maps = lib.flatten (lib.attrsets.mapAttrsToList (n: v:
- lib.imap1 (i: m: "${m.type}:/etc/postfix/relay_${n}_${toString i}") v.recipient_maps
- ) myconfig.env.mail.postfix.backup_domains);
- smtpd_relay_restrictions = [
- "permit_mynetworks"
- "permit_sasl_authenticated"
- "defer_unauth_destination"
- ] ++ lib.flatten (lib.attrsets.mapAttrsToList (n: v:
- if lib.attrsets.hasAttr "relay_restrictions" v
- then [ "check_recipient_access hash:/etc/postfix/recipient_access_${n}" ]
- else []
- ) myconfig.env.mail.postfix.backup_domains);
+ ### Relay domains
+ relay_domains = lib.flatten (lib.attrsets.mapAttrsToList (n: v: v.domains or []) myconfig.env.mail.postfix.backup_domains);
+ relay_recipient_maps = lib.flatten (lib.attrsets.mapAttrsToList (n: v:
+ lib.imap1 (i: m: "${m.type}:/etc/postfix/relay_${n}_${toString i}") v.recipient_maps
+ ) myconfig.env.mail.postfix.backup_domains);
+ smtpd_relay_restrictions = [
+ "permit_mynetworks"
+ "permit_sasl_authenticated"
+ "defer_unauth_destination"
+ ] ++ lib.flatten (lib.attrsets.mapAttrsToList (n: v:
+ if lib.attrsets.hasAttr "relay_restrictions" v
+ then [ "check_recipient_access hash:/etc/postfix/recipient_access_${n}" ]
+ else []
+ ) myconfig.env.mail.postfix.backup_domains);
- ### Additional smtpd configuration
- smtpd_tls_received_header = "yes";
- smtpd_tls_loglevel = "1";
+ ### Additional smtpd configuration
+ smtpd_tls_received_header = "yes";
+ smtpd_tls_loglevel = "1";
- ### Email sending configuration
- smtp_tls_security_level = "may";
- smtp_tls_loglevel = "1";
+ ### Email sending configuration
+ smtp_tls_security_level = "may";
+ smtp_tls_loglevel = "1";
- ### Force ip bind for smtp
- smtp_bind_address = myconfig.env.servers.eldiron.ips.main.ip4;
- smtp_bind_address6 = builtins.head myconfig.env.servers.eldiron.ips.main.ip6;
+ ### Force ip bind for smtp
+ smtp_bind_address = myconfig.env.servers.eldiron.ips.main.ip4;
+ smtp_bind_address6 = builtins.head myconfig.env.servers.eldiron.ips.main.ip6;
- # #Unneeded if postfix can only send e-mail from "self" domains
- # #smtp_sasl_auth_enable = "yes";
- # #smtp_sasl_password_maps = "hash:/etc/postfix/relay_creds";
- # #smtp_sasl_security_options = "noanonymous";
- # #smtp_sender_dependent_authentication = "yes";
- # #sender_dependent_relayhost_maps = "hash:/etc/postfix/sender_relay";
+ # #Unneeded if postfix can only send e-mail from "self" domains
+ # #smtp_sasl_auth_enable = "yes";
+ # #smtp_sasl_password_maps = "hash:/etc/postfix/relay_creds";
+ # #smtp_sasl_security_options = "noanonymous";
+ # #smtp_sender_dependent_authentication = "yes";
+ # #sender_dependent_relayhost_maps = "hash:/etc/postfix/sender_relay";
- ### opendkim, opendmarc, openarc milters
- non_smtpd_milters = [
- "unix:${config.myServices.mail.milters.sockets.opendkim}"
- "unix:${config.myServices.mail.milters.sockets.opendmarc}"
- "unix:${config.myServices.mail.milters.sockets.openarc}"
- ];
- smtpd_milters = [
- "unix:${config.myServices.mail.milters.sockets.opendkim}"
- "unix:${config.myServices.mail.milters.sockets.opendmarc}"
- "unix:${config.myServices.mail.milters.sockets.openarc}"
- ];
- };
- enable = true;
- enableSmtp = true;
- enableSubmission = true;
- submissionOptions = {
- smtpd_tls_security_level = "encrypt";
- smtpd_sasl_auth_enable = "yes";
- smtpd_tls_auth_only = "yes";
- smtpd_sasl_tls_security_options = "noanonymous";
- smtpd_sasl_type = "dovecot";
- smtpd_sasl_path = "private/auth";
- smtpd_reject_unlisted_recipient = "no";
- smtpd_client_restrictions = "permit_sasl_authenticated,reject";
- # Refuse to send e-mails with a From that is not handled
- smtpd_sender_restrictions =
- "reject_sender_login_mismatch,reject_unlisted_sender,permit_sasl_authenticated,reject";
- smtpd_sender_login_maps = "mysql:${config.secrets.fullPaths."postfix/mysql_sender_login_maps"}";
- smtpd_recipient_restrictions = "permit_sasl_authenticated,reject";
- milter_macro_daemon_name = "ORIGINATING";
- smtpd_milters = "unix:${config.myServices.mail.milters.sockets.opendkim}";
- };
- # FIXME: Mail adressed to localhost.immae.eu will still have mx-1 as
- # prioritized MX, which provokes "mail for localhost.immae.eu loops
- # back to myself" errors. This transport entry forces to push
- # e-mails to its right destination.
- transport = ''
- localhost.immae.eu smtp:[immae.eu]:25
- '';
- destination = ["localhost"];
- # This needs to reverse DNS
- hostname = "eldiron.immae.eu";
- setSendmail = true;
- sslCert = "/var/lib/acme/mail/fullchain.pem";
- sslKey = "/var/lib/acme/mail/key.pem";
- recipientDelimiter = "+";
- masterConfig = {
- submissions = {
- type = "inet";
- private = false;
- command = "smtpd";
- args = ["-o" "smtpd_tls_wrappermode=yes" ] ++ (let
- mkKeyVal = opt: val: [ "-o" (opt + "=" + val) ];
- in lib.concatLists (lib.mapAttrsToList mkKeyVal config.services.postfix.submissionOptions)
- );
- };
- dovecot = {
- type = "unix";
- privileged = true;
- chroot = false;
- command = "pipe";
- args = let
- # rspamd could be used as a milter, but then it cannot apply
- # its checks "per user" (milter is not yet dispatched to
- # users), so we wrap dovecot-lda inside rspamc per recipient
- # here.
- dovecot_exe = "${pkgs.dovecot}/libexec/dovecot/dovecot-lda -f \${sender} -a \${original_recipient} -d \${user}@\${nexthop}";
- in [
- "flags=DRhu" "user=vhost:vhost"
- "argv=${pkgs.rspamd}/bin/rspamc -h ${config.myServices.mail.rspamd.sockets.worker-controller} -c bayes -d \${user}@\${nexthop} --mime --exec {${dovecot_exe}}"
+ ### opendkim, opendmarc, openarc milters
+ non_smtpd_milters = [
+ "unix:${config.myServices.mail.milters.sockets.opendkim}"
+ "unix:${config.myServices.mail.milters.sockets.opendmarc}"
+ "unix:${config.myServices.mail.milters.sockets.openarc}"
+ ];
+ smtpd_milters = [
+ "unix:${config.myServices.mail.milters.sockets.opendkim}"
+ "unix:${config.myServices.mail.milters.sockets.opendmarc}"
+ "unix:${config.myServices.mail.milters.sockets.openarc}"
];
};
+ enable = true;
+ enableSmtp = true;
+ enableSubmission = true;
+ submissionOptions = {
+ smtpd_tls_security_level = "encrypt";
+ smtpd_sasl_auth_enable = "yes";
+ smtpd_tls_auth_only = "yes";
+ smtpd_sasl_tls_security_options = "noanonymous";
+ smtpd_sasl_type = "dovecot";
+ smtpd_sasl_path = "private/auth";
+ smtpd_reject_unlisted_recipient = "no";
+ smtpd_client_restrictions = "permit_sasl_authenticated,reject";
+ # Refuse to send e-mails with a From that is not handled
+ smtpd_sender_restrictions =
+ "reject_sender_login_mismatch,reject_unlisted_sender,permit_sasl_authenticated,reject";
+ smtpd_sender_login_maps = "mysql:${config.secrets.fullPaths."postfix/mysql_sender_login_maps"}";
+ smtpd_recipient_restrictions = "permit_sasl_authenticated,reject";
+ milter_macro_daemon_name = "ORIGINATING";
+ smtpd_milters = "unix:${config.myServices.mail.milters.sockets.opendkim}";
+ };
+ # FIXME: Mail adressed to localhost.immae.eu will still have mx-1 as
+ # prioritized MX, which provokes "mail for localhost.immae.eu loops
+ # back to myself" errors. This transport entry forces to push
+ # e-mails to its right destination.
+ transport = ''
+ localhost.immae.eu smtp:[immae.eu]:25
+ '';
+ destination = ["localhost"];
+ # This needs to reverse DNS
+ hostname = "eldiron.immae.eu";
+ setSendmail = true;
+ sslCert = "/var/lib/acme/mail/fullchain.pem";
+ sslKey = "/var/lib/acme/mail/key.pem";
+ recipientDelimiter = "+";
+ masterConfig = {
+ submissions = {
+ type = "inet";
+ private = false;
+ command = "smtpd";
+ args = ["-o" "smtpd_tls_wrappermode=yes" ] ++ (let
+ mkKeyVal = opt: val: [ "-o" (opt + "=" + val) ];
+ in lib.concatLists (lib.mapAttrsToList mkKeyVal config.services.postfix.submissionOptions)
+ );
+ };
+ dovecot = {
+ type = "unix";
+ privileged = true;
+ chroot = false;
+ command = "pipe";
+ args = let
+ # rspamd could be used as a milter, but then it cannot apply
+ # its checks "per user" (milter is not yet dispatched to
+ # users), so we wrap dovecot-lda inside rspamc per recipient
+ # here.
+ dovecot_exe = "${pkgs.dovecot}/libexec/dovecot/dovecot-lda -f \${sender} -a \${original_recipient} -d \${user}@\${nexthop}";
+ in [
+ "flags=DRhu" "user=vhost:vhost"
+ "argv=${pkgs.rspamd}/bin/rspamc -h ${config.myServices.mail.rspamd.sockets.worker-controller} -c bayes -d \${user}@\${nexthop} --mime --exec {${dovecot_exe}}"
+ ];
+ };
+ };
};
- };
- config.security.acme.certs."mail" = {
- postRun = ''
- systemctl restart postfix.service
- '';
- extraDomains = {
- "smtp.immae.eu" = null;
+ security.acme.certs."mail" = {
+ postRun = ''
+ systemctl restart postfix.service
+ '';
+ extraDomains = {
+ "smtp.immae.eu" = null;
+ };
};
};
}
rspamd sockets
'';
};
- config.services.backup.profiles.mail.excludeFile = ''
- + /var/lib/rspamd
- '';
- config.services.cron.systemCronJobs = let
- cron_script = pkgs.runCommand "cron_script" {
- buildInputs = [ pkgs.makeWrapper ];
- } ''
- mkdir -p $out
- cp ${./scan_reported_mails} $out/scan_reported_mails
- patchShebangs $out
- for i in $out/*; do
- wrapProgram "$i" --prefix PATH : ${lib.makeBinPath [ pkgs.coreutils pkgs.rspamd pkgs.flock ]}
- done
+ config = lib.mkIf config.myServices.mail.enable {
+ services.backup.profiles.mail.excludeFile = ''
+ + /var/lib/rspamd
'';
- in
- [ "*/20 * * * * vhost ${cron_script}/scan_reported_mails" ];
-
- config.services.rspamd = {
- enable = true;
- debug = true;
- overrides = {
- "actions.conf".text = ''
- reject = null;
- add_header = 6;
- greylist = null;
+ services.cron.systemCronJobs = let
+ cron_script = pkgs.runCommand "cron_script" {
+ buildInputs = [ pkgs.makeWrapper ];
+ } ''
+ mkdir -p $out
+ cp ${./scan_reported_mails} $out/scan_reported_mails
+ patchShebangs $out
+ for i in $out/*; do
+ wrapProgram "$i" --prefix PATH : ${lib.makeBinPath [ pkgs.coreutils pkgs.rspamd pkgs.flock ]}
+ done
'';
- "milter_headers.conf".text = ''
- extended_spam_headers = true;
- '';
- };
- locals = {
- "redis.conf".text = ''
- servers = "${myconfig.env.mail.rspamd.redis.socket}";
- db = "${myconfig.env.mail.rspamd.redis.db}";
+ in
+ [ "*/20 * * * * vhost ${cron_script}/scan_reported_mails" ];
+
+ services.rspamd = {
+ enable = true;
+ debug = true;
+ overrides = {
+ "actions.conf".text = ''
+ reject = null;
+ add_header = 6;
+ greylist = null;
+ '';
+ "milter_headers.conf".text = ''
+ extended_spam_headers = true;
'';
- "classifier-bayes.conf".text = ''
- users_enabled = true;
- backend = "redis";
- servers = "${myconfig.env.mail.rspamd.redis.socket}";
- database = "${myconfig.env.mail.rspamd.redis.db}";
- autolearn = true;
- cache {
+ };
+ locals = {
+ "redis.conf".text = ''
+ servers = "${myconfig.env.mail.rspamd.redis.socket}";
+ db = "${myconfig.env.mail.rspamd.redis.db}";
+ '';
+ "classifier-bayes.conf".text = ''
+ users_enabled = true;
backend = "redis";
- }
- new_schema = true;
- statfile {
- BAYES_HAM {
- spam = false;
+ servers = "${myconfig.env.mail.rspamd.redis.socket}";
+ database = "${myconfig.env.mail.rspamd.redis.db}";
+ autolearn = true;
+ cache {
+ backend = "redis";
}
- BAYES_SPAM {
- spam = true;
+ new_schema = true;
+ statfile {
+ BAYES_HAM {
+ spam = false;
+ }
+ BAYES_SPAM {
+ spam = true;
+ }
}
- }
- '';
- };
- workers = {
- controller = {
- extraConfig = ''
- enable_password = "${myconfig.env.mail.rspamd.write_password_hashed}";
- password = "${myconfig.env.mail.rspamd.read_password_hashed}";
- '';
- bindSockets = [ {
- socket = config.myServices.mail.rspamd.sockets.worker-controller;
- mode = "0660";
- owner = config.services.rspamd.user;
- group = "vhost";
- } ];
+ '';
+ };
+ workers = {
+ controller = {
+ extraConfig = ''
+ enable_password = "${myconfig.env.mail.rspamd.write_password_hashed}";
+ password = "${myconfig.env.mail.rspamd.read_password_hashed}";
+ '';
+ bindSockets = [ {
+ socket = config.myServices.mail.rspamd.sockets.worker-controller;
+ mode = "0660";
+ owner = config.services.rspamd.user;
+ group = "vhost";
+ } ];
+ };
+ };
+ postfix = {
+ enable = true;
+ config = {};
};
- };
- postfix = {
- enable = true;
- config = {};
};
};
}
{ lib, pkgs, config, myconfig, ... }:
{
- config = {
+ options.myServices.mpd.enable = lib.mkEnableOption "enable MPD";
+ config = lib.mkIf config.myServices.mpd.enable {
services.backup.profiles.mpd = {
rootDir = "/var/lib/mpd";
};
--- /dev/null
+{ privateFiles }:
+{ config, pkgs, myconfig, ... }:
+{
+ boot.kernelPackages = pkgs.linuxPackages_latest;
+ _module.args.privateFiles = privateFiles;
+ imports = builtins.attrValues (import ../..);
+
+ deployment = {
+ targetEnv = "hetznerCloud";
+ hetznerCloud = {
+ authToken = myconfig.env.hetznerCloud.authToken;
+ datacenter = "hel1-dc2";
+ location ="hel1";
+ serverType = "cx11";
+ };
+ };
+
+ # This value determines the NixOS release with which your system is
+ # to be compatible, in order to avoid breaking some software such as
+ # database servers. You should change this only after NixOS release
+ # notes say you should.
+ # https://nixos.org/nixos/manual/release-notes.html
+ system.stateVersion = "19.03"; # Did you read the comment?
+}
myServices.irc.enable = true;
myServices.pub.enable = true;
myServices.tasks.enable = true;
+ myServices.mpd.enable = true;
+ myServices.dns.enable = true;
+ myServices.certificates.enable = true;
+ myServices.websites.enable = true;
+ myServices.mail.enable = true;
services.pure-ftpd.enable = true;
+ services.backup.enable = true;
deployment = {
targetEnv = "hetzner";
myServices.websites.webappDirs._task = ./www;
- security.acme.certs."task" = config.services.myCertificates.certConfig // {
+ security.acme.certs."task" = config.myServices.certificates.certConfig // {
inherit user group;
plugins = [ "fullchain.pem" "key.pem" "cert.pem" "account_key.json" ];
domain = fqdn;
makeExtraConfig = (builtins.filter (x: x != null) (lib.attrsets.mapAttrsToList (n: v: v.extraConfig or null) apacheConfig));
in
{
- options.myServices.websites.webappDirs = lib.mkOption {
- type = lib.types.attrsOf lib.types.path;
- description = ''
- Webapp paths to create in /run/current-system/webapps
- '';
- default = {};
+ options.myServices.websites = {
+ enable = lib.mkEnableOption "enable websites";
+
+ webappDirs = lib.mkOption {
+ type = lib.types.attrsOf lib.types.path;
+ description = ''
+ Webapp paths to create in /run/current-system/webapps
+ '';
+ default = {};
+ };
};
- config = {
+ config = lib.mkIf config.myServices.websites.enable {
services.backup.profiles.php = {
rootDir = "/var/lib/php";
};
"cp ${file d} $out/${d.domain}.txt"
) domains)}
'';
+ cfg = config.myServices.websites.tools.email;
in
{
- config.myServices.websites.webappDirs = {
- _mta-sts = root;
- };
+ config = lib.mkIf cfg.enable {
+ myServices.websites.webappDirs = {
+ _mta-sts = root;
+ };
- config.services.websites.env.tools.vhostConfs.mta_sts = {
- certName = "mail";
- addToCerts = true;
- hosts = ["mta-sts.mail.immae.eu"] ++ map (v: "mta-sts.${v.domain}") domains;
- root = "/run/current-system/webapps/_mta-sts";
- extraConfig = [
- ''
- RewriteEngine on
- RewriteCond %{HTTP_HOST} ^mta-sts.(.*)$
- RewriteRule ^/.well-known/mta-sts.txt$ %{DOCUMENT_ROOT}/%1.txt [L]
- <Directory /run/current-system/webapps/_mta-sts>
- Require all granted
- Options -Indexes
- </Directory>
- ''
- ];
+ services.websites.env.tools.vhostConfs.mta_sts = {
+ certName = "mail";
+ addToCerts = true;
+ hosts = ["mta-sts.mail.immae.eu"] ++ map (v: "mta-sts.${v.domain}") domains;
+ root = "/run/current-system/webapps/_mta-sts";
+ extraConfig = [
+ ''
+ RewriteEngine on
+ RewriteCond %{HTTP_HOST} ^mta-sts.(.*)$
+ RewriteRule ^/.well-known/mta-sts.txt$ %{DOCUMENT_ROOT}/%1.txt [L]
+ <Directory /run/current-system/webapps/_mta-sts>
+ Require all granted
+ Options -Indexes
+ </Directory>
+ ''
+ ];
+ };
};
-
}
enableRollback = true;
};
+ resources.sshKeyPairs.ssh-key = {};
eldiron = import ../modules/private/system/eldiron.nix { inherit privateFiles; };
+ backup-2 = import ../modules/private/system/backup-2.nix { inherit privateFiles; };
}
newsboat irssi
# nix
- mylibs.yarn2nixPackage.yarn2nix
+ mylibs.yarn2nixPackage.yarn2nix nix
nixops nix-prefetch-scripts nix-generate-from-cpan
nix-zsh-completions bundix nodePackages.bower2nix
nodePackages.node2nix
self: super: {
nixops = super.nixops.overrideAttrs (old: {
+ patches = [ ./hetzner_cloud.patch ];
preConfigure = (old.preConfigure or "") + ''
sed -i -e "/'keyFile'/s/'path'/'string'/" nixops/backends/__init__.py
'';
--- /dev/null
+From 272e50d0b0262e49cdcaad42cdab57aad183d1c2 Mon Sep 17 00:00:00 2001
+From: goodraven
+ <employee-pseudonym-7f597def-7eeb-47f8-b10a-0724f2ba59a9@google.com>
+Date: Thu, 3 May 2018 22:24:58 -0700
+Subject: [PATCH] Initial commit adding support for hetzner cloud
+
+This is based on the digital ocean backend. It also uses nixos-infect. I extended nixos-infect to be generic
+for both backends.
+
+Fixes #855
+---
+ examples/trivial-hetzner-cloud.nix | 12 ++
+ nix/eval-machine-info.nix | 1 +
+ nix/hetzner-cloud.nix | 56 +++++++
+ nix/options.nix | 1 +
+ nixops/backends/hetzner_cloud.py | 230 +++++++++++++++++++++++++++++
+ nixops/data/nixos-infect | 77 +++++++---
+ 6 files changed, 354 insertions(+), 23 deletions(-)
+ create mode 100644 examples/trivial-hetzner-cloud.nix
+ create mode 100644 nix/hetzner-cloud.nix
+ create mode 100644 nixops/backends/hetzner_cloud.py
+
+diff --git a/examples/trivial-hetzner-cloud.nix b/examples/trivial-hetzner-cloud.nix
+new file mode 100644
+index 000000000..c61add6bb
+--- /dev/null
++++ b/examples/trivial-hetzner-cloud.nix
+@@ -0,0 +1,12 @@
++{
++ resources.sshKeyPairs.ssh-key = {};
++
++ machine = { config, pkgs, ... }: {
++ services.openssh.enable = true;
++
++ deployment.targetEnv = "hetznerCloud";
++ deployment.hetznerCloud.serverType = "cx11";
++
++ networking.firewall.allowedTCPPorts = [ 22 ];
++ };
++}
+diff --git a/nix/eval-machine-info.nix b/nix/eval-machine-info.nix
+index 2884b4b47..6a7205786 100644
+--- a/nix/eval-machine-info.nix
++++ b/nix/eval-machine-info.nix
+@@ -309,6 +309,7 @@ rec {
+ digitalOcean = optionalAttrs (v.config.deployment.targetEnv == "digitalOcean") v.config.deployment.digitalOcean;
+ gce = optionalAttrs (v.config.deployment.targetEnv == "gce") v.config.deployment.gce;
+ hetzner = optionalAttrs (v.config.deployment.targetEnv == "hetzner") v.config.deployment.hetzner;
++ hetznerCloud = optionalAttrs (v.config.deployment.targetEnv == "hetznerCloud") v.config.deployment.hetznerCloud;
+ container = optionalAttrs (v.config.deployment.targetEnv == "container") v.config.deployment.container;
+ route53 = v.config.deployment.route53;
+ virtualbox =
+diff --git a/nix/hetzner-cloud.nix b/nix/hetzner-cloud.nix
+new file mode 100644
+index 000000000..21d148c1a
+--- /dev/null
++++ b/nix/hetzner-cloud.nix
+@@ -0,0 +1,56 @@
++{ config, pkgs, lib, utils, ... }:
++
++with utils;
++with lib;
++with import ./lib.nix lib;
++
++let
++ cfg = config.deployment.hetznerCloud;
++in
++{
++ ###### interface
++ options = {
++
++ deployment.hetznerCloud.authToken = mkOption {
++ default = "";
++ example = "8b2f4e96af3997853bfd4cd8998958eab871d9614e35d63fab45a5ddf981c4da";
++ type = types.str;
++ description = ''
++ The API auth token. We're checking the environment for
++ <envar>HETZNER_CLOUD_AUTH_TOKEN</envar> first and if that is
++ not set we try this auth token.
++ '';
++ };
++
++ deployment.hetznerCloud.datacenter = mkOption {
++ example = "fsn1-dc8";
++ default = null;
++ type = types.nullOr types.str;
++ description = ''
++ The datacenter.
++ '';
++ };
++
++ deployment.hetznerCloud.location = mkOption {
++ example = "fsn1";
++ default = null;
++ type = types.nullOr types.str;
++ description = ''
++ The location.
++ '';
++ };
++
++ deployment.hetznerCloud.serverType = mkOption {
++ example = "cx11";
++ type = types.str;
++ description = ''
++ Name or id of server types.
++ '';
++ };
++ };
++
++ config = mkIf (config.deployment.targetEnv == "hetznerCloud") {
++ nixpkgs.system = mkOverride 900 "x86_64-linux";
++ services.openssh.enable = true;
++ };
++}
+diff --git a/nix/options.nix b/nix/options.nix
+index 0866c3ab8..db021f74d 100644
+--- a/nix/options.nix
++++ b/nix/options.nix
+@@ -22,6 +22,7 @@ in
+ ./keys.nix
+ ./gce.nix
+ ./hetzner.nix
++ ./hetzner-cloud.nix
+ ./container.nix
+ ./libvirtd.nix
+ ];
+diff --git a/nixops/backends/hetzner_cloud.py b/nixops/backends/hetzner_cloud.py
+new file mode 100644
+index 000000000..a2cb176b9
+--- /dev/null
++++ b/nixops/backends/hetzner_cloud.py
+@@ -0,0 +1,230 @@
++# -*- coding: utf-8 -*-
++"""
++A backend for hetzner cloud.
++
++This backend uses nixos-infect (which uses nixos LUSTRATE) to infect a
++hetzner cloud instance. The setup requires two reboots, one for
++the infect itself, another after we pushed the nixos image.
++"""
++import os
++import os.path
++import time
++import socket
++
++import requests
++
++import nixops.resources
++from nixops.backends import MachineDefinition, MachineState
++from nixops.nix_expr import Function, RawValue
++import nixops.util
++import nixops.known_hosts
++
++infect_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'data', 'nixos-infect'))
++
++API_HOST = 'api.hetzner.cloud'
++
++class ApiError(Exception):
++ pass
++
++class ApiNotFoundError(ApiError):
++ pass
++
++class HetznerCloudDefinition(MachineDefinition):
++ @classmethod
++ def get_type(cls):
++ return "hetznerCloud"
++
++ def __init__(self, xml, config):
++ MachineDefinition.__init__(self, xml, config)
++ self.auth_token = config["hetznerCloud"]["authToken"]
++ self.location = config["hetznerCloud"]["location"]
++ self.datacenter = config["hetznerCloud"]["datacenter"]
++ self.server_type = config["hetznerCloud"]["serverType"]
++
++ def show_type(self):
++ return "{0} [{1}]".format(self.get_type(), self.location or self.datacenter or 'any location')
++
++
++class HetznerCloudState(MachineState):
++ @classmethod
++ def get_type(cls):
++ return "hetznerCloud"
++
++ state = nixops.util.attr_property("state", MachineState.MISSING, int) # override
++ public_ipv4 = nixops.util.attr_property("publicIpv4", None)
++ public_ipv6 = nixops.util.attr_property("publicIpv6", None)
++ location = nixops.util.attr_property("hetznerCloud.location", None)
++ datacenter = nixops.util.attr_property("hetznerCloud.datacenter", None)
++ server_type = nixops.util.attr_property("hetznerCloud.serverType", None)
++ auth_token = nixops.util.attr_property("hetznerCloud.authToken", None)
++ server_id = nixops.util.attr_property("hetznerCloud.serverId", None, int)
++
++ def __init__(self, depl, name, id):
++ MachineState.__init__(self, depl, name, id)
++ self.name = name
++
++ def get_ssh_name(self):
++ return self.public_ipv4
++
++ def get_ssh_flags(self, *args, **kwargs):
++ super_flags = super(HetznerCloudState, self).get_ssh_flags(*args, **kwargs)
++ return super_flags + [
++ '-o', 'UserKnownHostsFile=/dev/null',
++ '-o', 'StrictHostKeyChecking=no',
++ '-i', self.get_ssh_private_key_file(),
++ ]
++
++ def get_physical_spec(self):
++ return Function("{ ... }", {
++ 'imports': [ RawValue('<nixpkgs/nixos/modules/profiles/qemu-guest.nix>') ],
++ ('boot', 'loader', 'grub', 'device'): 'nodev',
++ ('fileSystems', '/'): { 'device': '/dev/sda1', 'fsType': 'ext4'},
++ ('users', 'extraUsers', 'root', 'openssh', 'authorizedKeys', 'keys'): [self.depl.active_resources.get('ssh-key').public_key],
++ })
++
++ def get_ssh_private_key_file(self):
++ return self.write_ssh_private_key(self.depl.active_resources.get('ssh-key').private_key)
++
++ def create_after(self, resources, defn):
++ # make sure the ssh key exists before we do anything else
++ return {
++ r for r in resources if
++ isinstance(r, nixops.resources.ssh_keypair.SSHKeyPairState)
++ }
++
++ def get_auth_token(self):
++ return os.environ.get('HETZNER_CLOUD_AUTH_TOKEN', self.auth_token)
++
++ def _api(self, path, method=None, data=None, json=True):
++ """Basic wrapper around requests that handles auth and serialization."""
++ assert path[0] == '/'
++ url = 'https://%s%s' % (API_HOST, path)
++ token = self.get_auth_token()
++ if not token:
++ raise Exception('No hetzner cloud auth token set')
++ headers = {
++ 'Authorization': 'Bearer '+self.get_auth_token(),
++ }
++ res = requests.request(
++ method=method,
++ url=url,
++ json=data,
++ headers=headers)
++
++ if res.status_code == 404:
++ raise ApiNotFoundError('Not Found: %r' % path)
++ elif not res.ok:
++ raise ApiError('Response for %s %s has status code %d: %s' % (method, path, res.status_code, res.content))
++ if not json:
++ return
++ try:
++ res_data = res.json()
++ except ValueError as e:
++ raise ApiError('Response for %s %s has invalid JSON (%s): %r' % (method, path, e, res.content))
++ return res_data
++
++
++ def destroy(self, wipe=False):
++ if not self.server_id:
++ self.log('server {} was never made'.format(self.name))
++ return
++ self.log('destroying server {} with id {}'.format(self.name, self.server_id))
++ try:
++ res = self._api('/v1/servers/%s' % (self.server_id), method='DELETE')
++ except ApiNotFoundError:
++ self.log("server not found - assuming it's been destroyed already")
++
++ self.public_ipv4 = None
++ self.server_id = None
++
++ return True
++
++ def _create_ssh_key(self, public_key):
++ """Create or get an ssh key and return an id."""
++ public_key = public_key.strip()
++ res = self._api('/v1/ssh_keys', method='GET')
++ name = 'nixops-%s-%s' % (self.depl.uuid, self.name)
++ deletes = []
++ for key in res['ssh_keys']:
++ if key['public_key'].strip() == public_key:
++ return key['id']
++ if key['name'] == name:
++ deletes.append(key['id'])
++ for d in deletes:
++ # This reply is empty, so don't decode json.
++ self._api('/v1/ssh_keys/%d' % d, method='DELETE', json=False)
++ res = self._api('/v1/ssh_keys', method='POST', data={
++ 'name': name,
++ 'public_key': public_key,
++ })
++ return res['ssh_key']['id']
++
++ def create(self, defn, check, allow_reboot, allow_recreate):
++ ssh_key = self.depl.active_resources.get('ssh-key')
++ if ssh_key is None:
++ raise Exception('Please specify a ssh-key resource (resources.sshKeyPairs.ssh-key = {}).')
++
++ self.set_common_state(defn)
++
++ if self.server_id is not None:
++ return
++
++ ssh_key_id = self._create_ssh_key(ssh_key.public_key)
++
++ req = {
++ 'name': self.name,
++ 'server_type': defn.server_type,
++ 'start_after_create': True,
++ 'image': 'debian-9',
++ 'ssh_keys': [
++ ssh_key_id,
++ ],
++ }
++
++ if defn.datacenter:
++ req['datacenter'] = defn.datacenter
++ elif defn.location:
++ req['location'] = defn.location
++
++ self.log_start("creating server ...")
++ create_res = self._api('/v1/servers', method='POST', data=req)
++ self.server_id = create_res['server']['id']
++ self.public_ipv4 = create_res['server']['public_net']['ipv4']['ip']
++ self.public_ipv6 = create_res['server']['public_net']['ipv6']['ip']
++ self.datacenter = create_res['server']['datacenter']['name']
++ self.location = create_res['server']['datacenter']['location']['name']
++
++ action = create_res['action']
++ action_path = '/v1/servers/%d/actions/%d' % (self.server_id, action['id'])
++
++ while action['status'] == 'running':
++ time.sleep(1)
++ res = self._api(action_path, method='GET')
++ action = res['action']
++
++ if action['status'] != 'success':
++ raise Exception('unexpected status: %s' % action['status'])
++
++ self.log_end("{}".format(self.public_ipv4))
++
++ self.wait_for_ssh()
++ self.log_start("running nixos-infect")
++ self.run_command('bash </dev/stdin 2>&1', stdin=open(infect_path))
++ self.reboot_sync()
++
++ def reboot(self, hard=False):
++ if hard:
++ self.log("sending hard reset to server...")
++ res = self._api('/v1/servers/%d/actions/reset' % self.server_id, method='POST')
++ action = res['action']
++ action_path = '/v1/servers/%d/actions/%d' % (self.server_id, action['id'])
++ while action['status'] == 'running':
++ time.sleep(1)
++ res = self._api(action_path, method='GET')
++ action = res['action']
++ if action['status'] != 'success':
++ raise Exception('unexpected status: %s' % action['status'])
++ self.wait_for_ssh()
++ self.state = self.STARTING
++ else:
++ MachineState.reboot(self, hard=hard)
+diff --git a/nixops/data/nixos-infect b/nixops/data/nixos-infect
+index 66634357b..437a2ec61 100644
+--- a/nixops/data/nixos-infect
++++ b/nixops/data/nixos-infect
+@@ -68,26 +68,49 @@ makeConf() {
+ }
+ EOF
+ # (nixos-generate-config will add qemu-user and bind-mounts, so avoid)
++ local disk
++ if [ -e /dev/sda ]; then
++ disk=/dev/sda
++ else
++ disk=/dev/vda
++ fi
+ cat > /etc/nixos/hardware-configuration.nix << EOF
+ { ... }:
+ {
+ imports = [ <nixpkgs/nixos/modules/profiles/qemu-guest.nix> ];
+- boot.loader.grub.device = "/dev/vda";
+- fileSystems."/" = { device = "/dev/vda1"; fsType = "ext4"; };
++ boot.loader.grub.device = "${disk}";
++ fileSystems."/" = { device = "${disk}1"; fsType = "ext4"; };
+ }
+ EOF
+
+ local IFS=$'\n'
+- ens3_ip4s=($(ip address show dev eth0 | grep 'inet ' | sed -r 's|.*inet ([0-9.]+)/([0-9]+).*|{ address="\1"; prefixLength=\2; }|'))
+- ens3_ip6s=($(ip address show dev eth0 | grep 'inet6 .*global' | sed -r 's|.*inet6 ([0-9a-f:]+)/([0-9]+).*|{ address="\1"; prefixLength=\2; }|'))
+- ens4_ip4s=($(ip address show dev eth1 | grep 'inet ' | sed -r 's|.*inet ([0-9.]+)/([0-9]+).*|{ address="\1"; prefixLength=\2; }|'))
+- ens4_ip6s=($(ip address show dev eth1 | grep 'inet6 .*global' | sed -r 's|.*inet6 ([0-9a-f:]+)/([0-9]+).*|{ address="\1"; prefixLength=\2; }|'))
+- gateway=($(ip route show dev eth0 | grep default | sed -r 's|default via ([0-9.]+).*|\1|'))
+- gateway6=($(ip -6 route show dev eth0 | grep default | sed -r 's|default via ([0-9a-f:]+).*|\1|'))
+- ether0=($(ip address show dev eth0 | grep link/ether | sed -r 's|.*link/ether ([0-9a-f:]+) .*|\1|'))
+- ether1=($(ip address show dev eth1 | grep link/ether | sed -r 's|.*link/ether ([0-9a-f:]+) .*|\1|'))
++ gateway=($(ip route show | grep default | sed -r 's|default via ([0-9.]+).*|\1|'))
++ gateway6=($(ip -6 route show | grep default | sed -r 's|default via ([0-9a-f:]+).*|\1|'))
++ interfaces=($(ip link | awk -F ': ' '/^[0-9]*: / {if ($2 != "lo") {print $2}}'))
+ nameservers=($(grep ^nameserver /etc/resolv.conf | cut -f2 -d' '))
+
++ # Predict the predictable name for each interface since that is enabled in
++ # the nixos system.
++ declare -A predictable_names
++ for interface in ${interfaces[@]}; do
++ # udevadm prints out the candidate names which will be selected if
++ # available in this order.
++ local name=$(udevadm info /sys/class/net/$interface | awk -F = '
++ /^E: ID_NET_NAME_FROM_DATABASE=/ {arr[1]=$2}
++ /^E: ID_NET_NAME_ONBOARD=/ {arr[2]=$2}
++ /^E: ID_NET_NAME_SLOT=/ {arr[3]=$2}
++ /^E: ID_NET_NAME_PATH=/ {arr[4]=$2}
++ /^E: ID_NET_NAME_MAC=/ {arr[5]=$2}
++ END {for (i=1;i<6;i++) {if (length(arr[i]) > 0) { print arr[i]; break}}}')
++ if [ -z "$name" ]; then
++ echo Could not determine predictable name for interface $interface
++ fi
++ predictable_names[$interface]=$name
++ done
++
++ # Take a gamble on the first interface being able to reach the gateway.
++ local default_interface=${predictable_names[${interfaces[0]}]}
++
+ cat > /etc/nixos/networking.nix << EOF
+ { ... }: {
+ # This file was populated at runtime with the networking
+@@ -96,25 +119,27 @@ EOF
+ nameservers = [$(for a in ${nameservers[@]}; do echo -n "
+ \"$a\""; done)
+ ];
+- defaultGateway = "${gateway}";
+- defaultGateway6 = "${gateway6}";
++ defaultGateway = {address = "${gateway}"; interface = "${default_interface}";};
++ defaultGateway6 = {address = "${gateway6}"; interface = "${default_interface}";};
+ interfaces = {
+- ens3 = {
+- ip4 = [$(for a in ${ens3_ip4s[@]}; do echo -n "
+- $a"; done)
+- ];
+- ip6 = [$(for a in ${ens3_ip6s[@]}; do echo -n "
+- $a"; done)
+- ];
+- };
+- ens4 = {
+- ip4 = [$(for a in ${ens4_ip4s[@]}; do echo -n "
++EOF
++
++ for interface in ${interfaces[@]}; do
++ ip4s=($(ip address show dev $interface | grep 'inet ' | sed -r 's|.*inet ([0-9.]+)/([0-9]+).*|{ address="\1"; prefixLength=\2; }|'))
++ ip6s=($(ip address show dev $interface | grep 'inet6 .*global' | sed -r 's|.*inet6 ([0-9a-f:]+)/([0-9]+).*|{ address="\1"; prefixLength=\2; }|'))
++ cat >> /etc/nixos/networking.nix << EOF
++ ${predictable_names[$interface]} = {
++ ip4 = [$(for a in ${ip4s[@]}; do echo -n "
+ $a"; done)
+ ];
+- ip6 = [$(for a in ${ens4_ip6s[@]}; do echo -n "
++ ip6 = [$(for a in ${ip6s[@]}; do echo -n "
+ $a"; done)
+ ];
+ };
++EOF
++ done
++
++ cat >> /etc/nixos/networking.nix << EOF
+ };
+ };
+ }
+@@ -154,6 +179,12 @@ export HOME="/root"
+ groupadd -r nixbld -g 30000
+ seq 1 10 | xargs -I{} useradd -c "Nix build user {}" -d /var/empty -g nixbld -G nixbld -M -N -r -s `which nologin` nixbld{}
+
++if ! which curl >/dev/null 2>/dev/null; then
++ if which apt-get >/dev/null 2>/dev/null; then
++ apt-get update && apt-get install -y curl
++ fi
++fi
++
+ curl https://nixos.org/nix/install | sh
+
+ source ~/.nix-profile/etc/profile.d/nix.sh