blob: 8c1acb60696a4927f0e02629b03b2af7b8f4cc2d [file] [log] [blame]
---
# file: create-vms/tasks/main.yml
- name: create Virtual Machines with uvt-kvm
command: uvt-kvm create {{ item.name }} release={{ ansible_distribution_release }} \
--cpu={{ item.cpu }} --memory={{ item.memMB }} --disk={{ item.diskGB }} --bridge="mgmtbr"
args:
creates: "/var/lib/uvtool/libvirt/images/{{ item.name }}.qcow"
with_items: "{{ head_vm_list }}"
- name: Have VMs autostart on reboot
become: yes
virt:
name: "{{ item.name }}"
command: autostart
with_items: "{{ head_vm_list }}"
- name: fetch IP of DHCP harvester
when: on_maas
command: docker-ip harvester
register: harvester_ip
changed_when: False
- name: force a harvest to get VM name resolution
when: on_maas
uri:
url: http://{{ harvester_ip.stdout }}:8954/harvest
method: POST
- name: wait for VM name resolution
when: on_maas
host_dns_check:
hosts: "{{ head_vm_list | map(attribute='name') | list | to_json }}"
command_on_fail: "curl -sS --connect-timeout 3 -XPOST http://{{ harvester_ip.stdout }}:8954/harvest"
register: all_resolved
until: all_resolved.everyone == "OK"
retries: 5
delay: 10
failed_when: all_resolved.everyone != "OK"
- name: wait for VM's to come up
wait_for:
host={{ item.name }}
port=22
with_items: "{{ head_vm_list }}"
- name: Create /etc/ansible/hosts file
become: yes
template:
src=ansible_hosts.j2
dest=/etc/ansible/hosts
- name: Verify that we can log into every VM
command: ansible services -m ping -u ubuntu
tags:
- skip_ansible_lint # connectivity check
- name: Have VM's use the apt-cache
command: ansible services -b -u ubuntu -m lineinfile -a "dest=/etc/apt/apt.conf.d/02apt-cacher-ng create=yes mode=0644 owner=root group=root regexp='^Acquire' line='Acquire::http { Proxy \"http://{{ apt_cacher_name }}:{{ apt_cacher_port | default('3142') }}\"; };'"
tags:
- skip_ansible_lint # running a sub job
- name: Update apt cache
command: ansible services -m apt -b -u ubuntu -a "update_cache=yes cache_valid_time=3600"
tags:
- skip_ansible_lint # running a sub job
- name: Update software in all the VMs
when: run_dist_upgrade
command: ansible services -m apt -b -u ubuntu -a "upgrade=dist"
tags:
- skip_ansible_lint # running a sub job
- name: Create VM's eth0 interface config file for DNS config via resolvconf program
when: not on_maas
template:
src=eth0.cfg.j2
dest={{ ansible_user_dir }}/eth0.cfg
- name: Copy eth0 interface config file to all VMs
when: not on_maas
command: ansible services -b -u ubuntu -m copy -a "src={{ ansible_user_dir }}/eth0.cfg dest=/etc/network/interfaces.d/eth0.cfg owner=root group=root mode=0644"
- name: Restart eth0 interface on all VMs
when: not on_maas
command: ansible services -b -u ubuntu -m shell -a "ifdown eth0 ; ifup eth0"
- name: Verify that we can log into every VM after restarting network interfaces
when: not on_maas
command: ansible services -m ping -u ubuntu
# sshkey is registered in head-prep task
- name: Enable root ssh login on VM's that require it
command: ansible {{ item.name }} -b -u ubuntu -m authorized_key -a "user='root' key='{{ sshkey.stdout }}'"
with_items: "{{ head_vm_list | selectattr('root_ssh_login', 'defined') | list }}"
tags:
- skip_ansible_lint # FIXME, ssh key mangling
- name: Copy over docker installation playbook and docker apt-key
copy:
src="{{ item }}"
dest="{{ ansible_user_dir }}/{{ item }}"
with_items:
- "docker-install-playbook.yml"
- "docker_apt_key.gpg"
- name: Install docker in VM's that require it
command: ansible-playbook "{{ ansible_user_dir }}/docker-install-playbook.yml"
tags:
- skip_ansible_lint # running a sub job