Add 'create-lxd' role
Change-Id: I143f0db515e4ec4683b7ccc3a5ea4390ec260037
diff --git a/roles/create-lxd/library/host_dns_check.py b/roles/create-lxd/library/host_dns_check.py
new file mode 100755
index 0000000..cbd39ae
--- /dev/null
+++ b/roles/create-lxd/library/host_dns_check.py
@@ -0,0 +1,79 @@
+#!/usr/bin/env python
+
+import sys
+import json
+import shlex
+import subprocess
+
+# Assume nothing has changed
+result = {
+ "changed" : False,
+ "everyone" : "OK"
+}
+
+# read the argument string from the arguments file
+args_file = sys.argv[1]
+args_data = file(args_file).read()
+
+# Variables for the task options
+host_list = []
+command_on_fail = None
+
+# parse the task options
+arguments = shlex.split(args_data)
+
+for arg in arguments:
+ # ignore any arguments without an equals in it
+ if "=" in arg:
+ (key, value) = arg.split("=")
+
+ if key == "hosts":
+ # The list of hosts comes as a string that looks sort of like a python list,
+ # so some replace magic so we can parse it in to a real list
+ try:
+ value = value.replace("u'", "").replace("'", "")
+ value = json.loads(value)
+ host_list = value
+ except Exception as e:
+ result["everyone"] = "Not OK"
+ result["failed"] = True
+ result["msg"] = "Unable to parse 'hosts' argument to module : '%s'" % (e)
+ print json.dumps(result)
+ sys.stdout.flush()
+ sys.exit(1)
+ if key == "command_on_fail":
+ command_on_fail = value
+
+for host in host_list:
+ # Attempt to resolve hostname, if a host can't be resolved then fail the task
+ try:
+ if subprocess.check_output(["dig", "+short", "+search", host]) == '':
+ result["everyone"] = "Not OK"
+ result["failed"] = True
+ result["msg"] = "Unable to resolve host '%s'" % (host)
+ except Exception as e:
+ result["everyone"] = "Not OK"
+ result["failed"] = True
+ result["msg"] = "Error encountered while resolving '%s' : '%s'" % (host, e)
+ print json.dumps(result)
+ sys.stdout.flush()
+ sys.exit(1)
+
+# If not all hosts were resolved and a failure command was specified then call that
+# command and capture the results.
+if command_on_fail != None:
+ result["command_on_fail"] = {}
+ result["command_on_fail"]["command"] = command_on_fail
+ try:
+ cmd_out = subprocess.check_output(shlex.split(command_on_fail), stderr=subprocess.STDOUT)
+ result["command_on_fail"]["retcode"] = 0
+ result["command_on_fail"]["out"] = cmd_out
+ except subprocess.CalledProcessError as e:
+ result["command_on_fail"]["retcode"] = e.returncode
+ result["command_on_fail"]["out"] = e.output
+
+# Output the results
+print json.dumps(result)
+
+if result["failed"]:
+ sys.exit(1)
diff --git a/roles/create-lxd/tasks/main.yml b/roles/create-lxd/tasks/main.yml
new file mode 100644
index 0000000..ebeeb8c
--- /dev/null
+++ b/roles/create-lxd/tasks/main.yml
@@ -0,0 +1,139 @@
+---
+# file: create-lxd/tasks/main.yml
+- name: Ensure DIG
+ become: yes
+ apt:
+ name: dnsutils=1:9*
+ state: present
+
+- name: Enable trusty-backports
+ become: yes
+ apt_repository:
+ repo: "{{ item }}"
+ state: present
+ with_items:
+ - "deb http://us.archive.ubuntu.com/ubuntu/ trusty-backports main restricted universe"
+ - "deb-src http://us.archive.ubuntu.com/ubuntu/ trusty-backports main restricted universe"
+
+- name: Ensure LXD
+ become: yes
+ apt:
+ name: lxd
+ state: present
+ update_cache: yes
+ default_release: trusty-backports
+
+# For lookup() below
+- name: Fetch remote key
+ fetch:
+ src: .ssh/id_rsa.pub
+ dest: /tmp/id_rsa.pub
+ flat: yes
+
+- name: Create openstack LXD profile
+ become: yes
+ lxd_profile:
+ name: openstack
+ state: present
+ config:
+ user.user-data: |
+ #cloud-config
+ ssh_authorized_keys:
+ - "{{ lookup('file', '/tmp/id_rsa.pub') }}"
+ description: 'OpenStack services on CORD'
+ devices:
+ eth0:
+ nictype: bridged
+ parent: mgmtbr
+ type: nic
+
+- name: Create containers for the OpenStack services
+ become: yes
+ lxd_container:
+ name: "{{ item.name }}"
+ architecture: x86_64
+ state: started
+ source:
+ type: image
+ mode: pull
+ server: https://cloud-images.ubuntu.com/releases
+ protocol: simplestreams
+ alias: "{{ ansible_distribution_release }}"
+ profiles: ["openstack"]
+ wait_for_ipv4_addresses: true
+ timeout: 600
+ with_items: "{{ head_lxd_list }}"
+
+- name: fetch IP of DHCP harvester
+ when: on_maas
+ command: docker-ip harvester
+ register: harvester_ip
+ changed_when: False
+
+- name: force a harvest to get container name resolution
+ when: on_maas
+ uri:
+ url: http://{{ harvester_ip.stdout }}:8954/harvest
+ method: POST
+
+- name: wait for container name resolution
+ when: on_maas
+ host_dns_check:
+ hosts: "{{ head_lxd_list | map(attribute='name') | list | to_json }}"
+ command_on_fail: "curl -sS --connect-timeout 3 -XPOST http://{{ harvester_ip.stdout }}:8954/harvest"
+ register: all_resolved
+ until: all_resolved.everyone == "OK"
+ retries: 5
+ delay: 10
+ failed_when: all_resolved.everyone != "OK"
+
+- name: wait for containers to come up
+ wait_for:
+ host={{ item.name }}
+ port=22
+ with_items: "{{ head_lxd_list }}"
+
+- name: Create /etc/ansible/hosts file
+ become: yes
+ template:
+ src=ansible_hosts.j2
+ dest=/etc/ansible/hosts
+
+- name: Verify that we can log into every container
+ command: ansible containers -m ping -u ubuntu
+ tags:
+ - skip_ansible_lint # connectivity check
+
+- name: Have containers use the apt-cache
+ command: ansible containers -b -u ubuntu -m lineinfile -a "dest=/etc/apt/apt.conf.d/02apt-cacher-ng create=yes mode=0644 owner=root group=root regexp='^Acquire' line='Acquire::http { Proxy \"http://{{ apt_cacher_name }}:{{ apt_cacher_port | default('3142') }}\"; };'"
+ tags:
+ - skip_ansible_lint # running a sub job
+
+- name: Update apt cache
+ command: ansible containers -m apt -b -u ubuntu -a "update_cache=yes cache_valid_time=3600"
+ tags:
+ - skip_ansible_lint # running a sub job
+
+- name: Update software in all the containers
+ when: run_dist_upgrade
+ command: ansible containers -m apt -b -u ubuntu -a "upgrade=dist"
+ tags:
+ - skip_ansible_lint # running a sub job
+
+- name: Create containers' eth0 interface config file for DNS config via resolvconf program
+ when: not on_maas
+ template:
+ src=eth0.cfg.j2
+ dest={{ ansible_user_dir }}/eth0.cfg
+
+- name: Copy eth0 interface config file to all containers
+ when: not on_maas
+ command: ansible containers -b -u ubuntu -m copy -a "src={{ ansible_user_dir }}/eth0.cfg dest=/etc/network/interfaces.d/eth0.cfg owner=root group=root mode=0644"
+
+- name: Restart eth0 interface on all containers
+ when: not on_maas
+ command: ansible containers -b -u ubuntu -m shell -a "ifdown eth0 ; ifup eth0"
+
+- name: Verify that we can log into every container after restarting network interfaces
+ when: not on_maas
+ command: ansible containers -m ping -u ubuntu
diff --git a/roles/create-lxd/templates/ansible_hosts.j2 b/roles/create-lxd/templates/ansible_hosts.j2
new file mode 100644
index 0000000..e2e58de
--- /dev/null
+++ b/roles/create-lxd/templates/ansible_hosts.j2
@@ -0,0 +1,22 @@
+[localhost]
+127.0.0.1 hostname={{ ansible_fqdn }}
+
+# VMs will go away shortly in favor of containers
+[vms]
+{% for vm in head_vm_list -%}
+{{ vm.name }}
+{% endfor -%}
+
+[containers]
+{% for lxd in head_lxd_list -%}
+{{ lxd.name }}
+{% endfor -%}
+
+[services:children]
+vms
+containers
+
+[docker]
+{% for vm in head_vm_list | selectattr('docker_path', 'defined') -%}
+{{ vm.name }}
+{% endfor -%}
diff --git a/roles/create-lxd/templates/eth0.cfg.j2 b/roles/create-lxd/templates/eth0.cfg.j2
new file mode 100644
index 0000000..0235b8a
--- /dev/null
+++ b/roles/create-lxd/templates/eth0.cfg.j2
@@ -0,0 +1,12 @@
+# The primary network interface
+auto eth0
+iface eth0 inet dhcp
+{% if unbound_listen_on_default %}
+ dns-nameservers{% for host in groups['head'] %} {{ hostvars[host].ansible_default_ipv4.address }}{% endfor %}
+{% endif %}
+{% if dns_servers is defined %}
+ dns-nameservers{% for ns in dns_servers %} {{ ns }}{% endfor %}
+{% endif %}
+{% if dns_search is defined %}
+ dns-search{% for searchdom in dns_search %} {{ searchdom }}{% endfor %}
+{% endif %}