--- /dev/null
+#!/bin/bash
+
+usage() {
+cat <<EOF
+ $0 [options]
+ --help,-h This help
+ --instance-id id Id of the instance
+ --reinstall-first Start with reinstalling the vps
+ --host-user user Use another user (default: arch)
+ --no-reboot Don't reboot
+ --no-reboot-start Don't reboot to rescue at the beginning
+ --no-reboot-end Don't reboot to normal at the end
+ --git-branch Use another puppet branch (default: master)
+ --environment Environment to use for the installl (default: production)
+EOF
+}
+
+set -e
+
+host_user=arch
+git_branch=master
+environment=production
+
+while [ -n "$1" ]; do
+ case "$1" in
+ --instance-id)
+ instance_id="$2"
+ shift
+ ;;
+ --reinstall-first)
+ reinstall_first=1
+ ;;
+ --host-user)
+ host_user="$2"
+ shift
+ ;;
+ --no-reboot)
+ no_reboot=1
+ ;;
+ --no-reboot-start)
+ no_reboot_start=1
+ ;;
+ --no-reboot-end)
+ no_reboot_end=1
+ ;;
+ --git-branch)
+ git_branch="$2"
+ shift
+ ;;
+ --environment)
+ environment="$2"
+ shift
+ ;;
+ --help|-h)
+ usage
+ exit 0
+ ;;
+ esac
+
+ shift
+done
+
+DIRECTORY=$(cd `dirname $0` && pwd)
+PYTHON_DIRECTORY="$DIRECTORY/../python"
+
+if [ -z "$instance_id" ]; then
+ read -p "Id de l'instance : " instance_id
+fi
+
+if [ -n "$reinstall_first" ]; then
+ echo "Réinstallation du système"
+ python $PYTHON_DIRECTORY/reinstall_cloud_instance.py --use-current "$instance_id"
+
+ read -p "Appuyer sur une touche quand le serveur est prêt" ready
+fi
+
+if [ -z "$no_reboot" -a -z "$no_reboot_start" ]; then
+ echo "Patienter le temps du reboot"
+ python $PYTHON_DIRECTORY/reboot_cloud_instance.py --rescue "$instance_id"
+
+ read -p "Appuyer sur une touche quand l'instance a redémarré" ready
+fi
+
+ARCH_DIR=`mktemp -d`
+ARCH_HOST_SCRIPT="$ARCH_DIR/arch_host_script.sh"
+ARCH_INSTALL_SCRIPT="$ARCH_DIR/arch_install_script.sh"
+ARCH_HOST_PUPPET_CONFIGURATION_SCRIPT="$ARCH_DIR/arch_host_puppet_configuration_script.sh"
+ARCH_PUPPET_CONFIGURATION_SCRIPT="$ARCH_DIR/arch_puppet_configuration_script.sh"
+ARCH_PUPPET_INITIAL_CONFIGURATION="$ARCH_DIR/puppet_variables.json"
+
+trap "rm -rf $ARCH_DIR" EXIT
+
+#### Base installation stage
+python $PYTHON_DIRECTORY/get_initial_configuration_cloud_instance.py $instance_id > $ARCH_PUPPET_INITIAL_CONFIGURATION
+host_address=$(python $PYTHON_DIRECTORY/get_initial_configuration_cloud_instance.py $instance_id | jq -r '.ips.v4.ipAddress')
+
+cat > $ARCH_HOST_SCRIPT <<EOF
+#!/bin/bash
+
+sudo haveged &
+sudo pacman -Sy --noconfirm arch-install-scripts
+
+DEVICE=/dev/vdb1
+MOUNTPOINT=/mnt
+
+UUID=\$(lsblk -rno UUID "\$DEVICE")
+PART="/dev/disk/by-uuid/\$UUID"
+
+# mkfs.ext4 -F -U "\$UUID" "\$DEVICE"
+sudo mount "\$DEVICE" /mnt
+
+##### FIXME: mkfs.ext4 would be better ####
+for i in /mnt/*; do
+ if [ "\$i" = "/mnt/boot" ]; then
+ # keep /boot/grub
+ sudo rm -f \$i/*
+ else
+ sudo rm -rf \$i
+ fi
+done
+##### /FIXME ####
+
+sudo pacstrap /mnt base git puppet
+
+echo "\$PART / auto defaults 0 1" | sudo tee /mnt/etc/fstab
+
+sudo cp /tmp/arch_install_script.sh "\$MOUNTPOINT/root/"
+sudo cp /tmp/puppet_variables.json "\$MOUNTPOINT/root/"
+
+sudo arch-chroot "\$MOUNTPOINT" /root/arch_install_script.sh
+EOF
+
+cat > $ARCH_INSTALL_SCRIPT <<EOF
+CODE_PATH="/etc/puppetlabs/code"
+rm -rf \$CODE_PATH
+git clone -b $git_branch --recursive https://git.immae.eu/perso/Immae/Projets/Puppet.git \$CODE_PATH
+puppet apply --environment $environment --tags base_installation --test \$CODE_PATH/manifests/site.pp
+# The password seed requires puppet to be run twice
+puppet apply --environment $environment --tags base_installation --test \$CODE_PATH/manifests/site.pp
+EOF
+
+chmod a+x $ARCH_HOST_SCRIPT $ARCH_INSTALL_SCRIPT
+
+expect -f - <<EOF
+set timeout -1
+spawn scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o CheckHostIP=no $ARCH_PUPPET_INITIAL_CONFIGURATION $ARCH_HOST_SCRIPT $ARCH_INSTALL_SCRIPT $host_user@$host_address:/tmp
+expect eof
+spawn ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o CheckHostIP=no $host_user@$host_address /tmp/arch_host_script.sh
+expect eof
+EOF
+
+### Role specific stage
+read -p "Press key when LDAP is configured" i
+
+cat > $ARCH_PUPPET_CONFIGURATION_SCRIPT <<EOF
+CODE_PATH="/etc/puppetlabs/code"
+puppet apply --environment $environment --tags base_installation --test \$CODE_PATH/manifests/site.pp
+EOF
+
+cat > $ARCH_HOST_PUPPET_CONFIGURATION_SCRIPT <<EOF
+MOUNTPOINT=/mnt
+
+sudo cp /tmp/arch_puppet_configuration_script.sh "\$MOUNTPOINT/root/"
+
+sudo arch-chroot "\$MOUNTPOINT" /root/arch_puppet_configuration_script.sh
+EOF
+
+chmod a+x $ARCH_PUPPET_CONFIGURATION_SCRIPT $ARCH_HOST_PUPPET_CONFIGURATION_SCRIPT
+
+expect -f - <<EOF
+set timeout -1
+spawn scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o CheckHostIP=no $ARCH_PUPPET_CONFIGURATION_SCRIPT $ARCH_HOST_PUPPET_CONFIGURATION_SCRIPT $host_user@$host_address:/tmp
+expect eof
+spawn ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o CheckHostIP=no $host_user@$host_address /tmp/arch_host_puppet_configuration_script.sh
+expect eof
+EOF
+
+### Installation finished
+if [ -z "$no_reboot" -a -z "$no_reboot_end" ]; then
+ echo "Rebooting"
+ python $PYTHON_DIRECTORY/reboot_cloud_instance.py --local "$instance_id"
+fi
--- /dev/null
+# -*- encoding: utf-8 -*-
+import json
+try:
+ from ovh import ovh
+except ImportError:
+ # In case it's installed globally
+ import ovh
+import sys
+
+infos = {}
+
+# Credentials are stored in ~/.ovh.conf
+# See ovh/README.rst
+client = ovh.Client()
+
+projects_list = client.get('/cloud/project/')
+if len(projects_list) > 1:
+ print("More than one project is not supported, taking the first one")
+project = projects_list[0]
+instances_list = client.get('/cloud/project/{}/instance'.format(project))
+instances = dict(map(lambda x: (x["id"], x), instances_list))
+if sys.argv[-1] in instances:
+ instance = instances[sys.argv[-1]]
+else:
+ print("Instance not in list:")
+ for instance in instances_list:
+ print("{}: {}".format(instance["name"], instance["id"]))
+ sys.exit(1)
+
+infos["ips"] = {}
+for ip_infos in instance["ipAddresses"]:
+ ip_infos["ipAddress"] = ip_infos.pop("ip")
+ ip_infos["gateway"] = ip_infos.pop("gatewayIp")
+
+ if ip_infos["version"] == 4:
+ infos["ips"]["v4"] = ip_infos
+ else:
+ infos["ips"]["v6"] = ip_infos
+ infos["ips"]["v6"]["mask"] = 128
+
+print(json.dumps(infos))
--- /dev/null
+# -*- encoding: utf-8 -*-
+import json
+try:
+ from ovh import ovh
+except ImportError:
+ # In case it's installed globally
+ import ovh
+import sys
+
+# Credentials are stored in ~/.ovh.conf
+# See ovh/README.rst
+client = ovh.Client()
+
+projects_list = client.get('/cloud/project/')
+if len(projects_list) > 1:
+ print("More than one project is not supported, taking the first one")
+project = projects_list[0]
+instances_list = client.get('/cloud/project/{}/instance'.format(project))
+instances = dict(map(lambda x: (x["id"], x), instances_list))
+if sys.argv[-1] in instances:
+ instance = instances[sys.argv[-1]]
+else:
+ print("Instance not in list:")
+ for instance in instances_list:
+ print("{}: {}".format(instance["name"], instance["id"]))
+ sys.exit(1)
+
+if "--rescue" in sys.argv:
+ netboot_mode="rescue"
+elif "--local" in sys.argv:
+ netboot_mode="local"
+else:
+ netboot_mode=None
+
+if netboot_mode is not None:
+ result = client.post("/cloud/project/{}/instance/{}/rescueMode".format(project,
+ instance["id"]), imageId=instance["imageId"], rescue=(netboot_mode == "rescue"))
+ print(result)
+
--- /dev/null
+# -*- encoding: utf-8 -*-
+import json
+try:
+ from ovh import ovh
+except ImportError:
+ # In case it's installed globally
+ import ovh
+import sys
+
+# Credentials are stored in ~/.ovh.conf
+# See ovh/README.rst
+client = ovh.Client()
+
+projects_list = client.get('/cloud/project/')
+if len(projects_list) > 1:
+ print("More than one project is not supported, taking the first one")
+project = projects_list[0]
+instances_list = client.get('/cloud/project/{}/instance'.format(project))
+instances = dict(map(lambda x: (x["id"], x), instances_list))
+if sys.argv[-1] in instances:
+ instance = instances[sys.argv[-1]]
+else:
+ print("Instance not in list:")
+ for instance in instances_list:
+ print("{}: {}".format(instance["name"], instance["id"]))
+ sys.exit(1)
+
+current_image = instance["imageId"]
+available_images = client.get('/cloud/project/{}/image'.format(project),
+ osType="linux",
+ region=instance["region"])
+available_images_ids = list(map(lambda x: x["id"], available_images))
+
+def print_images(available_images):
+ for image in available_images:
+ print("{}: {}".format(image["name"], image["id"]))
+
+def reinstall(image_id):
+ return client.post('/cloud/project/{}/instance/{}/reinstall'.format(project, instance["id"]),
+ imageId=image_id)
+
+if "--get-state" in sys.argv:
+ print(instance["status"])
+elif "--use-current" in sys.argv:
+ if current_image in available_images_ids:
+ print("Current image still available, using it")
+ print(reinstall(current_image))
+ else:
+ print("Current image no more available. Chose among:")
+ print_images(available_images)
+elif sys.argv[-1] in available_templates:
+ print("Chosen image available, using it")
+ print(reinstall(current_image))
+else:
+ print("Chosen image not available. Chose among:")
+ print_images(available_images)
+