Create LXD containers instead of VMs

Change-Id: I66580af317419285278d59e59d290e90f3d0dd29
diff --git a/cord-head-playbook.yml b/cord-head-playbook.yml
index b60b1fd..84c30c3 100644
--- a/cord-head-playbook.yml
+++ b/cord-head-playbook.yml
@@ -40,6 +40,7 @@
   roles:
     - { role: head-prep, become: yes }
     - { role: config-virt, become: yes }
+    - create-lxd
     - create-vms
 
 - name: Create docker images
diff --git a/roles/create-vms/tasks/main.yml b/roles/create-vms/tasks/main.yml
index 323c4a0..e950772 100644
--- a/roles/create-vms/tasks/main.yml
+++ b/roles/create-vms/tasks/main.yml
@@ -49,30 +49,24 @@
     port=22
   with_items: "{{ head_vm_list }}"
 
-- name: Create /etc/ansible/hosts file
-  become: yes
-  template:
-    src=ansible_hosts.j2
-    dest=/etc/ansible/hosts
-
 - name: Verify that we can log into every VM
-  command: ansible services -m ping -u ubuntu
+  command: ansible vms -m ping -u ubuntu
   tags:
     - skip_ansible_lint # connectivity check
 
 - name: Have VM's use the apt-cache
-  command: ansible services -b -u ubuntu -m lineinfile -a "dest=/etc/apt/apt.conf.d/02apt-cacher-ng create=yes mode=0644 owner=root group=root regexp='^Acquire' line='Acquire::http { Proxy \"http://{{ apt_cacher_name }}:{{ apt_cacher_port | default('3142') }}\"; };'"
+  command: ansible vms -b -u ubuntu -m lineinfile -a "dest=/etc/apt/apt.conf.d/02apt-cacher-ng create=yes mode=0644 owner=root group=root regexp='^Acquire' line='Acquire::http { Proxy \"http://{{ apt_cacher_name }}:{{ apt_cacher_port | default('3142') }}\"; };'"
   tags:
     - skip_ansible_lint # running a sub job
 
 - name: Update apt cache
-  command: ansible services -m apt -b -u ubuntu -a "update_cache=yes cache_valid_time=3600"
+  command: ansible vms -m apt -b -u ubuntu -a "update_cache=yes cache_valid_time=3600"
   tags:
     - skip_ansible_lint # running a sub job
 
 - name: Update software in all the VMs
   when: run_dist_upgrade
-  command: ansible services -m apt -b -u ubuntu -a "upgrade=dist"
+  command: ansible vms -m apt -b -u ubuntu -a "upgrade=dist"
   tags:
     - skip_ansible_lint # running a sub job
 
@@ -84,15 +78,15 @@
 
 - name: Copy eth0 interface config file to all VMs
   when: not on_maas
-  command: ansible services -b -u ubuntu -m copy -a "src={{ ansible_user_dir }}/eth0.cfg dest=/etc/network/interfaces.d/eth0.cfg owner=root group=root mode=0644"
+  command: ansible vms -b -u ubuntu -m copy -a "src={{ ansible_user_dir }}/eth0.cfg dest=/etc/network/interfaces.d/eth0.cfg owner=root group=root mode=0644"
 
 - name: Restart eth0 interface on all VMs
   when: not on_maas
-  command: ansible services -b -u ubuntu -m shell -a "ifdown eth0 ; ifup eth0"
+  command: ansible vms -b -u ubuntu -m shell -a "ifdown eth0 ; ifup eth0"
 
 - name: Verify that we can log into every VM after restarting network interfaces
   when: not on_maas
-  command: ansible services -m ping -u ubuntu
+  command: ansible vms -m ping -u ubuntu
 
 # sshkey is registered in head-prep task
 - name: Enable root ssh login on VM's that require it
@@ -113,5 +107,3 @@
   command: ansible-playbook "{{ ansible_user_dir }}/docker-install-playbook.yml"
   tags:
     - skip_ansible_lint # running a sub job
-
-
diff --git a/roles/create-vms/templates/ansible_hosts.j2 b/roles/create-vms/templates/ansible_hosts.j2
deleted file mode 100644
index fdf6eae..0000000
--- a/roles/create-vms/templates/ansible_hosts.j2
+++ /dev/null
@@ -1,13 +0,0 @@
-[localhost]
-127.0.0.1 hostname={{ ansible_fqdn }}
-
-[services]
-{% for vm in head_vm_list -%}
-{{ vm.name }}
-{% endfor -%}
-
-[docker]
-{% for vm in head_vm_list | selectattr('docker_path', 'defined') -%}
-{{ vm.name }}
-{% endfor -%}
-
diff --git a/roles/juju-setup/tasks/main.yml b/roles/juju-setup/tasks/main.yml
index e0ea5a0..b29cbdf 100644
--- a/roles/juju-setup/tasks/main.yml
+++ b/roles/juju-setup/tasks/main.yml
@@ -34,22 +34,18 @@
 # list of active juju_services names: juju_services.keys()
 
 - name: Add machines to Juju
-  when: "{{ head_vm_list | map(attribute='service') | list | reject('undefined') | map('format_string', '%s.'~site_suffix ) | difference( juju_machines.keys() ) | length }}"
+  when: "{{ head_lxd_list | map(attribute='service') | list | reject('undefined') | map('format_string', '%s.'~site_suffix ) | difference( juju_machines.keys() ) | length }}"
   command: "juju add-machine ssh:{{ item }}"
-  with_items: "{{ head_vm_list | map(attribute='service') | list | reject('undefined') | map('format_string', '%s.'~site_suffix ) | difference( juju_machines.keys() ) }}"
+  with_items: "{{ head_lxd_list | map(attribute='service') | list | reject('undefined') | map('format_string', '%s.'~site_suffix ) | difference( juju_machines.keys() ) }}"
 
 # run this again, so machines will be in the juju_machines list
 - name: Obtain Juju Facts after machine creation
   juju_facts:
 
 - name: Deploy services that are hosted in their own VM
-  when: "{{ vm_service_list | difference( juju_services.keys() ) | length }}"
+  when: "{{ lxd_service_list | difference( juju_services.keys() ) | length }}"
   command: "juju deploy {{ charm_versions[item] | default(item) }} --to {{ juju_machines[item~'.'~site_suffix]['machine_id'] }} --config={{ juju_config_path }}"
-  with_items: "{{ vm_service_list | difference( juju_services.keys() ) }}"
-
-- name: Deploy mongodb to ceilometer VM
-  when: juju_services['mongodb'] is undefined
-  command: "juju deploy {{ charm_versions['mongodb'] | default('mongodb') }} --to {{ juju_machines['ceilometer.'~site_suffix]['machine_id'] }} --config={{ juju_config_path }}"
+  with_items: "{{ lxd_service_list | difference( juju_services.keys() ) }}"
 
 - name: Deploy services that don't have their own VM
   when: "{{ standalone_service_list | difference( juju_services.keys() ) | length }}"
@@ -72,12 +68,12 @@
 
 # 1800s = 30m. Usually takes 10-12m on cloudlab for relations to come up
 # Only checks for first port in list
-- name: Wait for juju services on VM's to have open ports
+- name: Wait for juju services to have open ports
   wait_for:
     host={{ item.name }}
     port={{ item.forwarded_ports[0].int }}
     timeout=1800
-  with_items: "{{ head_vm_list | selectattr('forwarded_ports', 'defined') | list }}"
+  with_items: "{{ head_lxd_list | selectattr('forwarded_ports', 'defined') | list }}"
 
 # secondary wait, as waiting on ports isn't enough. Probably only need one of these...
 # 160*15s = 2400s = 40m max wait
diff --git a/roles/juju-setup/templates/environments.yaml.j2 b/roles/juju-setup/templates/environments.yaml.j2
index 519adaf..b356d4e 100644
--- a/roles/juju-setup/templates/environments.yaml.j2
+++ b/roles/juju-setup/templates/environments.yaml.j2
@@ -2,8 +2,8 @@
 environments:
     manual:
         type: manual
-        bootstrap-host: juju.{{ site_suffix }}
-        bootstrap-user: ubuntu
+        bootstrap-host: {{ ansible_mgmtbr.ipv4.address }}
+        bootstrap-user: {{ ansible_user_id }}
         default-series: {{ ansible_distribution_release }}
         enable-os-refresh-update: false
         enable-os-upgrade: false
diff --git a/vars/cord_defaults.yml b/vars/cord_defaults.yml
index 353a9b2..2978c38 100644
--- a/vars/cord_defaults.yml
+++ b/vars/cord_defaults.yml
@@ -46,112 +46,6 @@
   rabbitmq-server: "cs:trusty/rabbitmq-server-42"
 
 head_vm_list:
-  - name: "juju-1"
-    service: "juju"
-    aliases:
-      - "juju"
-    ipv4_last_octet: 10
-    cpu: 1
-    memMB: 2048
-    diskGB: 20
-
-  - name: "ceilometer-1"
-    service: "ceilometer"
-    aliases:
-      - "ceilometer"
-    ipv4_last_octet: 20
-    cpu: 1
-    memMB: 2048
-    diskGB: 20
-    forwarded_ports:
-      - { ext: 8777, int: 8777 }
-
-  - name: "glance-1"
-    service: "glance"
-    aliases:
-      - "glance"
-    ipv4_last_octet: 30
-    cpu: 2
-    memMB: 4096
-    diskGB: 160
-    forwarded_ports:
-      - { ext: 9292, int: 9292 }
-
-  - name: "keystone-1"
-    service: "keystone"
-    aliases:
-      - "keystone"
-    ipv4_last_octet: 40
-    cpu: 2
-    memMB: 4096
-    diskGB: 40
-    forwarded_ports:
-      - { ext: 35357, int: 35357 }
-      - { ext: 4990, int: 4990 }
-      - { ext: 5000, int: 5000 }
-
-  - name: "percona-cluster-1"
-    service: "percona-cluster"
-    aliases:
-      - "percona-cluster"
-    ipv4_last_octet: 50
-    cpu: 2
-    memMB: 4096
-    diskGB: 40
-
-  - name: "nagios-1"
-    service: "nagios"
-    aliases:
-      - "nagios"
-    ipv4_last_octet: 60
-    cpu: 1
-    memMB: 2048
-    diskGB: 20
-    forwarded_ports:
-      - { ext: 3128, int: 80 }
-
-  - name: "neutron-api-1"
-    service: "neutron-api"
-    aliases:
-      - "neutron-api"
-    ipv4_last_octet: 70
-    cpu: 2
-    memMB: 4096
-    diskGB: 40
-    forwarded_ports:
-      - { ext: 9696, int: 9696 }
-
-  - name: "nova-cloud-controller-1"
-    service: "nova-cloud-controller"
-    aliases:
-      - "nova-cloud-controller"
-    ipv4_last_octet: 80
-    cpu: 2
-    memMB: 4096
-    diskGB: 40
-    forwarded_ports:
-      - { ext: 8774, int: 8774 }
-
-  - name: "openstack-dashboard-1"
-    service: "openstack-dashboard"
-    aliases:
-      - "openstack-dashboard"
-    ipv4_last_octet: 90
-    cpu: 1
-    memMB: 2048
-    diskGB: 20
-    forwarded_ports:
-      - { ext: 8080, int: 80 }
-
-  - name: "rabbitmq-server-1"
-    service: "rabbitmq-server"
-    aliases:
-      - "rabbitmq-server"
-    ipv4_last_octet: 100
-    cpu: 2
-    memMB: 4096
-    diskGB: 40
-
   - name: "onos-cord-1"
     aliases:
       - "onos-cord"
@@ -170,17 +64,84 @@
     diskGB: 40
     docker_path: "cord"
 
-#  - name: "xos-1"
-#    aliases:
-#      - "xos"
-#    ipv4_last_octet: 130
-#    cpu: 2
-#    memMB: 4096
-#    diskGB: 40
-#    docker_path: 'service-profile/cord-pod'
+head_lxd_list:
+  - name: "ceilometer-1"
+    service: "ceilometer"
+    aliases:
+      - "ceilometer"
+    ipv4_last_octet: 20
+    forwarded_ports:
+      - { ext: 8777, int: 8777 }
 
+  - name: "glance-1"
+    service: "glance"
+    aliases:
+      - "glance"
+    ipv4_last_octet: 30
+    forwarded_ports:
+      - { ext: 9292, int: 9292 }
 
-vm_service_list:
+  - name: "keystone-1"
+    service: "keystone"
+    aliases:
+      - "keystone"
+    ipv4_last_octet: 40
+    forwarded_ports:
+      - { ext: 35357, int: 35357 }
+      - { ext: 4990, int: 4990 }
+      - { ext: 5000, int: 5000 }
+
+  - name: "percona-cluster-1"
+    service: "percona-cluster"
+    aliases:
+      - "percona-cluster"
+    ipv4_last_octet: 50
+
+  - name: "nagios-1"
+    service: "nagios"
+    aliases:
+      - "nagios"
+    ipv4_last_octet: 60
+    forwarded_ports:
+      - { ext: 3128, int: 80 }
+
+  - name: "neutron-api-1"
+    service: "neutron-api"
+    aliases:
+      - "neutron-api"
+    ipv4_last_octet: 70
+    forwarded_ports:
+      - { ext: 9696, int: 9696 }
+
+  - name: "nova-cloud-controller-1"
+    service: "nova-cloud-controller"
+    aliases:
+      - "nova-cloud-controller"
+    ipv4_last_octet: 80
+    forwarded_ports:
+      - { ext: 8774, int: 8774 }
+
+  - name: "openstack-dashboard-1"
+    service: "openstack-dashboard"
+    aliases:
+      - "openstack-dashboard"
+    ipv4_last_octet: 90
+    forwarded_ports:
+      - { ext: 8080, int: 80 }
+
+  - name: "rabbitmq-server-1"
+    service: "rabbitmq-server"
+    aliases:
+      - "rabbitmq-server"
+    ipv4_last_octet: 100
+
+  - name: "mongodb-1"
+    service: "mongodb"
+    aliases:
+      - "mongodb"
+    ipv4_last_octet: 110
+
+lxd_service_list:
   - ceilometer
   - glance
   - keystone
@@ -190,7 +151,7 @@
   - nova-cloud-controller
   - openstack-dashboard
   - rabbitmq-server
-
+  - mongodb
 
 standalone_service_list:
   - ntp
@@ -244,4 +205,3 @@
 
   - name: ntp
     relations: [ "nova-compute", ]
-