let
cfg = config.myEnv.backup;
varDir = "/var/lib/duply";
- duplyProfile = profile: remote: prefix: ''
- GPG_PW="${cfg.password}"
- TARGET="${cfg.remotes.${remote}.remote profile.bucket}${prefix}"
- export AWS_ACCESS_KEY_ID="${cfg.remotes.${remote}.accessKeyId}"
- export AWS_SECRET_ACCESS_KEY="${cfg.remotes.${remote}.secretAccessKey}"
+ default_action = "pre_bkp_purge_purgeFull_purgeIncr";
+ duply_backup_full_with_ignored = pkgs.writeScriptBin "duply_full_with_ignored" ''
+ #!${pkgs.stdenv.shell}
+
+ export DUPLY_FULL_BACKUP_WITH_IGNORED=yes
+ if [ -z "$1" -o "$1" = "-h" -o "$1" = "--help" ]; then
+ echo "duply_full_with_ignored /path/to/profile"
+ echo "Does a full backup including directories with .duplicity-ignore"
+ exit 1
+ fi
+ ${pkgs.duply}/bin/duply "$1" pre_full --force
+ '';
+ duply_backup = pkgs.writeScriptBin "duply_backup" ''
+ #!${pkgs.stdenv.shell}
+
+ declare -a profiles
+ profiles=()
+ ${builtins.concatStringsSep "\n" (lib.flatten (lib.mapAttrsToList (k: v: map (remote: [
+ ''profiles+=("${remote}_${k}")''
+ ]) v.remotes) config.services.duplyBackup.profiles))}
+
+ if [ -f "${varDir}/last_backup_profile" ]; then
+ last_backup=$(cat ${varDir}/last_backup_profile)
+ for i in "''${!profiles[@]}"; do
+ if [[ "''${profiles[$i]}" = "$last_backup" ]]; then
+ break
+ fi
+ done
+ ((i+=1))
+ profiles=("''${profiles[@]:$i}" "''${profiles[@]:0:$i}")
+ fi
+
+ # timeout in minutes
+ timeout="''${1:-180}"
+ timeout_timestamp=$(date +%s -d "$timeout minutes")
+ for profile in "''${profiles[@]}"; do
+ if [ $(date +%s -d "now") -ge "$timeout_timestamp" ]; then
+ break
+ fi
+
+ touch "${varDir}/$profile.log"
+ ${pkgs.duply}/bin/duply ${config.secrets.location}/backup/$profile/ ${default_action} --force >> ${varDir}/$profile.log
+ [[ $? = 0 ]] || echo -e "Error when doing backup for $profile, see above or logs in ${varDir}/$profile.log\n---------------------------------------" >&2
+ echo "$profile" > ${varDir}/last_backup_profile
+ done
+ '';
+
+ check_backups = pkgs.writeScriptBin "duply_list_not_backuped" ''
+ #!${pkgs.stdenv.shell}
+
+ do_check() {
+ local dir="$1" path ignored_path
+ find "$dir" -mindepth 1 -maxdepth 1 | while IFS= read -r path; do
+ if ${pkgs.gnugrep}/bin/grep -qFx "$path" ${config.secrets.fullPaths."backup/backuped_list"}; then
+ continue
+ elif ${pkgs.gnugrep}/bin/grep -q "^$path/" ${config.secrets.fullPaths."backup/backuped_list"}; then
+ do_check "$path"
+ else
+ while IFS= read -r ignored_path; do
+ if [[ "$path" =~ ^$ignored_path$ ]]; then
+ continue 2
+ fi
+ done < ${config.secrets.fullPaths."backup/ignored_list"}
+ printf '%s\n' "$path"
+ fi
+ done
+ }
+
+ do_check /var/lib
+ '';
+ duplyProfile = profile: remote: bucket: let
+ remote' = cfg.remotes.${remote};
+ in ''
+ if [ -z "$DUPLY_FULL_BACKUP_WITH_IGNORED" ]; then
+ GPG_PW="${cfg.password}"
+ fi
+ TARGET="${remote'.remote bucket}"
+ ${lib.optionalString (remote'.remote_type == "s3") ''
+ export AWS_ACCESS_KEY_ID="${remote'.s3AccessKeyId}"
+ export AWS_SECRET_ACCESS_KEY="${remote'.s3SecretAccessKey}"
+ ''}
+ ${lib.optionalString (remote'.remote_type == "rsync") ''
+ DUPL_PARAMS="$DUPL_PARAMS --ssh-options=-oIdentityFile='${config.secrets.fullPaths."backup/identity"}' "
+ ''}
SOURCE="${profile.rootDir}"
- FILENAME=".duplicity-ignore"
- DUPL_PARAMS="$DUPL_PARAMS --exclude-if-present '$FILENAME'"
+ if [ -z "$DUPLY_FULL_BACKUP_WITH_IGNORED" ]; then
+ FILENAME=".duplicity-ignore"
+ DUPL_PARAMS="$DUPL_PARAMS --exclude-if-present '$FILENAME'"
+ fi
VERBOSITY=4
ARCH_DIR="${varDir}/caches"
+ DUPL_PYTHON_BIN=""
- # Do a full backup after 1 month
- MAX_FULLBKP_AGE=1M
- DUPL_PARAMS="$DUPL_PARAMS --allow-source-mismatch --exclude-other-filesystems --full-if-older-than $MAX_FULLBKP_AGE "
- # Backups older than 2months are deleted
- MAX_AGE=2M
- # Keep 2 full backups
- MAX_FULL_BACKUPS=2
- MAX_FULLS_WITH_INCRS=2
+ # Do a full backup after 6 month
+ MAX_FULLBKP_AGE=6M
+ DUPL_PARAMS="$DUPL_PARAMS --allow-source-mismatch --full-if-older-than $MAX_FULLBKP_AGE "
+ # Backups older than 1months are deleted
+ MAX_AGE=1M
+ # Keep 1 full backup
+ MAX_FULL_BACKUPS=1
+ MAX_FULLS_WITH_INCRS=1
'';
- action = "bkp_purge_purgeFull_purgeIncr";
- varName = k: remoteName:
- if remoteName == "eriomem" then k else remoteName + "_" + k;
in
{
options = {
services.duplyBackup.profiles = lib.mkOption {
type = lib.types.attrsOf (lib.types.submodule {
options = {
+ hash = lib.mkOption {
+ type = lib.types.bool;
+ default = true;
+ description = ''
+ Hash bucket and directory names
+ '';
+ };
+ excludeRootDir = lib.mkOption {
+ type = lib.types.bool;
+ default = true;
+ description = ''
+ Exclude root dir in exclusion file
+ '';
+ };
rootDir = lib.mkOption {
type = lib.types.path;
+ default = "/var/lib";
description = ''
Path to backup
'';
};
bucket = lib.mkOption {
type = lib.types.str;
- default = "immae-${name}";
description = ''
Bucket to use
'';
};
remotes = lib.mkOption {
type = lib.types.listOf lib.types.str;
- default = ["eriomem"];
description = ''
Remotes to use for backup
'';
};
+ includedPaths = lib.mkOption {
+ type = lib.types.listOf lib.types.str;
+ default = [];
+ description = ''
+ Included paths (subdirs of rootDir)
+ '';
+ };
excludeFile = lib.mkOption {
type = lib.types.lines;
default = "";
Content to put in exclude file
'';
};
+ ignoredPaths = lib.mkOption {
+ type = lib.types.listOf lib.types.str;
+ default = [];
+ description = ''
+ List of paths to ignore when checking non-backed-up directories
+ Can use (POSIX extended) regex
+ '';
+ };
};
});
};
install -m 0700 -o root -g root -d ${varDir} ${varDir}/caches
'';
secrets.keys = lib.listToAttrs (lib.flatten (lib.mapAttrsToList (k: v:
- map (remote: [
- (lib.nameValuePair "backup/${varName k remote}/conf" {
+ let
+ bucket = if v.hash or true then builtins.hashString "sha256" v.bucket else v.bucket;
+ in map (remote: [
+ (lib.nameValuePair "backup/${remote}_${k}/conf" {
permissions = "0400";
- text = duplyProfile v remote "${k}/";
+ text = duplyProfile v remote bucket;
})
- (lib.nameValuePair "backup/${varName k remote}/exclude" {
+ (lib.nameValuePair "backup/${remote}_${k}/exclude" {
permissions = "0400";
- text = v.excludeFile;
+ text = v.excludeFile + (builtins.concatStringsSep "\n" (map (p: "+ ${v.rootDir}/${p}") v.includedPaths)) + (lib.optionalString v.excludeRootDir ''
+
+ - **
+ '');
})
- (lib.nameValuePair "backup/${varName k remote}" {
+ (lib.nameValuePair "backup/${remote}_${k}/pre" {
+ keyDependencies = [
+ pkgs.bash
+ pkgs.rsync
+ ];
permissions = "0500";
+ text = let
+ remote' = cfg.remotes.${remote};
+ in ''
+ #!${pkgs.stdenv.shell}
+
+ ${lib.optionalString (remote'.remote_type == "rsync") ''
+ # Recreate directory structure before synchronizing
+ mkdir -p ${varDir}/rsync_remotes/${remote}/${bucket}
+ ${pkgs.rsync}/bin/rsync -av -e \
+ "ssh -p ${remote'.sshRsyncPort} -oIdentityFile=${config.secrets.fullPaths."backup/identity"}" \
+ "${varDir}/rsync_remotes/${remote}/" \
+ ${remote'.sshRsyncHost}:
+ ''}
+ '';
+ })
+ (lib.nameValuePair "backup/${remote}_${k}" {
+ permissions = "0700";
isDir = true;
})
- ]) v.remotes) config.services.duplyBackup.profiles));
+ ]) v.remotes) config.services.duplyBackup.profiles)) // {
+ "backup/identity" = {
+ permissions = "0400";
+ text = "{{ .ssl_keys.duply_backup }}";
+ };
+ "backup/ignored_list" = {
+ permissions = "0400";
+ text = let
+ ignored = map
+ (v: map (p: "${v.rootDir}/${p}") v.ignoredPaths)
+ (builtins.attrValues config.services.duplyBackup.profiles);
+ in builtins.concatStringsSep "\n" (lib.flatten ignored);
+ };
+ "backup/backuped_list" = {
+ permissions = "0400";
+ text = let
+ included = map
+ (v: map (p: "${v.rootDir}/${p}") v.includedPaths)
+ (builtins.attrValues config.services.duplyBackup.profiles);
+ in builtins.concatStringsSep "\n" (lib.flatten included);
+ };
+ };
+ programs.ssh.knownHostsFiles = [
+ (pkgs.writeText
+ "duply_backup_known_hosts"
+ (builtins.concatStringsSep
+ "\n"
+ (builtins.filter
+ (v: v != null)
+ (builtins.map
+ (v: v.sshKnownHosts)
+ (builtins.attrValues cfg.remotes)
+ )
+ )
+ )
+ )
+ ];
+ environment.systemPackages = [ pkgs.duply check_backups duply_backup_full_with_ignored duply_backup ];
services.cron = {
enable = true;
- systemCronJobs = let
- backups = pkgs.writeScript "backups" ''
- #!${pkgs.stdenv.shell}
-
- ${builtins.concatStringsSep "\n" (lib.flatten (lib.mapAttrsToList (k: v:
- map (remote: [
- ''
- touch ${varDir}/${varName k remote}.log
- ${pkgs.duply}/bin/duply ${config.secrets.fullPaths."backup/${varName k remote}"}/ ${action} --force >> ${varDir}/${varName k remote}.log
- [[ $? = 0 ]] || echo -e "Error when doing backup for ${varName k remote}, see above\n---------------------------------------" >&2
- ''
- ]) v.remotes
- ) config.services.duplyBackup.profiles))}
- '';
- in
- [
- "0 2 * * * root ${backups}"
- ];
+ systemCronJobs = [
+ "0 0 * * * root ${duply_backup}/bin/duply_backup 90"
+ ];
};
- security.pki.certificateFiles = [
- (pkgs.fetchurl {
- url = "http://downloads.e.eriomem.net/eriomemca.pem";
- sha256 = "1ixx4c6j3m26j8dp9a3dkvxc80v1nr5aqgmawwgs06bskasqkvvh";
- })
- ];
-
- myServices.monitoring.fromMasterActivatedPlugins = [ "eriomem" ];
- myServices.monitoring.fromMasterObjects.service = [
- {
- service_description = "eriomem backup is up and not full";
- host_name = config.hostEnv.fqdn;
- use = "external-service";
- check_command = "check_backup_eriomem";
-
- check_interval = 120;
- notification_interval = "1440";
-
- servicegroups = "webstatus-backup";
- }
-
- {
- service_description = "ovh backup is up and not full";
- host_name = config.hostEnv.fqdn;
- use = "external-service";
- check_command = "check_ok";
-
- check_interval = 120;
- notification_interval = "1440";
-
- servicegroups = "webstatus-backup";
- }
- ];
};
}