made it run, ssh key issues
fix user/root difference, lint
lint, make 'apt-get dist-upgrade' an option, formatting
add more missing things
remove extraneous keystone pw
remove database relation to neutron-gateway which is no longer required
fixed roles so async resumption only happens when those VMs exist
workaround for old Jinja
add missing compute relations and variables
whitespace
Change-Id: I323806ea2594bb70fb95a6d11a489625f59ac0da
diff --git a/opencloud-multi-playbook.yml b/opencloud-multi-playbook.yml
index 0468a62..178ea11 100644
--- a/opencloud-multi-playbook.yml
+++ b/opencloud-multi-playbook.yml
@@ -1,39 +1,51 @@
---
# Install an OpenCloud site, with multi-node Juju configured OpenStack
-- name: Include Configuration
+- name: Include vars
hosts: all
tasks:
- - include_vars: vars/opencloud_defaults.yml
- - include_vars: vars/aztest.yml
- - include_vars: vars/aztest_keystone.yml
+ - name: Include variables
+ include_vars: "{{ item }}"
+ with_items:
+ - vars/opencloud_defaults.yml
+ - vars/aztest.yml
+ - vars/aztest_keystone.yml
-- name: Prep systems, and enable virtualization
+- name: Turn on virtualization
hosts: all
become: yes
- roles:
- - common-prep
- dell-virt
-- name: DNS Server Setup
+- name: Check prerequisites
+ hosts: all
+ roles:
+ - prereqs-common
+
+- name: DNS Server and apt-cacher-ng Setup
hosts: head
become: yes
roles:
- dns-nsd
- dns-unbound
+ - apt-cacher-ng
-- name: Configure all hosts to use DNS server
+- name: Use the new DNS server, prep systems
hosts: all
become: yes
roles:
- dns-configure
+ - common-prep
-- name: Configure head node, create VM's, and start Juju setup
+- name: Configure head node, configure virtualization
hosts: head
roles:
- { role: head-prep, become: yes }
- { role: config-virt, become: yes }
- - juju-user-prep
+
+- name: Create VM's, Configure Juju, install XOS
+ hosts: head
+ roles:
+ - create-vms
- juju-setup
- name: Configure compute nodes
@@ -42,8 +54,8 @@
roles:
- compute-prep
-- name: Configure Openstack using Juju
+- name: Deploy compute nodes
hosts: head
roles:
- - juju-openstack-config
+ - juju-compute-setup
diff --git a/roles/common-prep/tasks/main.yml b/roles/common-prep/tasks/main.yml
index 5026631..e2368e9 100644
--- a/roles/common-prep/tasks/main.yml
+++ b/roles/common-prep/tasks/main.yml
@@ -1,12 +1,20 @@
---
# file: roles/common-prep/tasks/main.yml
-- stat:
- path=/var/run/reboot-required
+- name: Upgrade system to current using apt
+ when: run_dist_upgrade
+ apt:
+ upgrade: dist
+ update_cache: yes
+ cache_valid_time: 3600
+
+- name: Determine if a reboot if required
+ when: run_dist_upgrade
+ stat: path=/var/run/reboot-required
register: reboot-required
-- name: reboot if required
- when: reboot-required.exists is defined
+- name: Perform a reboot if required
+ when: run_dist_upgrade and reboot-required.exists is defined
debug: msg="System will reboot"
notify:
- restart host
@@ -24,10 +32,10 @@
- name: Remove annoying default editors
apt:
- pkg={{ item }}
- state=absent
- update_cache=yes
- cache_valid_time=3600
+ pkg: "{{ item }}"
+ state: absent
+ update_cache: yes
+ cache_valid_time: 3600
with_items:
- nano
- jove
@@ -41,3 +49,4 @@
copy:
src=tmux.conf
dest="{{ ansible_user_dir }}/.tmux.conf"
+
diff --git a/roles/compute-prep/tasks/main.yml b/roles/compute-prep/tasks/main.yml
index 1ddee39..975f2ec 100644
--- a/roles/compute-prep/tasks/main.yml
+++ b/roles/compute-prep/tasks/main.yml
@@ -3,36 +3,36 @@
- name: Install packages
apt:
- name={{ item }}
- state=latest
- update_cache=yes
- cache_valid_time=3600
+ name: "{{ item }}"
+ state: present
+ update_cache: yes
+ cache_valid_time: 3600
with_items:
- python-yaml
- name: Add ubuntu user
user:
- name=ubuntu
- groups=admin
+ name: ubuntu
+ groups: adm
- name: Add head node ubuntu user key
authorized_key:
- user=ubuntu
- key="{{ hostvars[groups['head'][0]]['sshkey']['stdout'] }}"
+ user: ubuntu
+ key: "{{ hostvars[groups['head'][0]]['sshkey']['stdout'] }}"
- name: Add head node root user key
authorized_key:
- user=root
- key="{{ hostvars[groups['head'][0]]['sshkey']['stdout'] }}"
+ user: root
+ key: "{{ hostvars[groups['head'][0]]['sshkey']['stdout'] }}"
- name: Add route via /etc/rc.local
+ when: not on_maas
template:
src=rc.local.j2
dest=/etc/rc.local
mode=0755
notify:
- run rc.local
- when: not on_maas
- name: Create /var/lib/nova dir
file:
diff --git a/roles/config-virt/tasks/main.yml b/roles/config-virt/tasks/main.yml
index 0f0ed05..66bf5d7 100644
--- a/roles/config-virt/tasks/main.yml
+++ b/roles/config-virt/tasks/main.yml
@@ -23,17 +23,17 @@
# note, this isn't idempotent, so may need manual fixing if it changes
- name: define libvirt networks IP/DHCP/DNS settings
+ when: not on_maas
virt_net:
name=xos-{{ item.name }}
command=define
xml='{{ lookup("template", "virt_net.xml.j2") }}'
with_items: '{{ virt_nets }}'
- when: not on_maas
- name: collect libvirt network facts after defining new network
+ when: not on_maas
virt_net:
command=facts
- when: not on_maas
- name: start libvirt networks
when: not on_maas and ansible_libvirt_networks["xos-{{ item.name }}"].state != "active"
diff --git a/roles/create-vms/tasks/main.yml b/roles/create-vms/tasks/main.yml
index 038a5ce..266bad8 100644
--- a/roles/create-vms/tasks/main.yml
+++ b/roles/create-vms/tasks/main.yml
@@ -10,23 +10,24 @@
- name: Have VMs autostart on reboot
become: yes
virt:
- name={{ item.name }}
- command=autostart
+ name: "{{ item.name }}"
+ command: autostart
with_items: "{{ head_vm_list }}"
- name: fetch IP of DHCP harvester
+ when: on_maas
command: docker-ip harvester
register: harvester_ip
changed_when: False
- when: on_maas
- name: force a harvest to get VM name resolution
+ when: on_maas
uri:
url: http://{{ harvester_ip.stdout }}:8954/harvest
method: POST
- when: on_maas
- name: wait for VM name resolution
+ when: on_maas
host_dns_check:
hosts: "{{ head_vm_list | map(attribute='name') | list }}"
command_on_fail: "curl -sS --connect-timeout 3 -XPOST http://{{ harvester_ip.stdout }}:8954/harvest"
@@ -35,7 +36,6 @@
retries: 5
delay: 10
failed_when: all_resolved.everyone != "OK"
- when: on_maas
- name: wait for VM's to come up
wait_for:
@@ -56,25 +56,29 @@
command: ansible services -b -u ubuntu -m lineinfile -a "dest=/etc/apt/apt.conf.d/02apt-cacher-ng create=yes mode=0644 owner=root group=root regexp='^Acquire' line='Acquire::http { Proxy \"http://{{ apt_cacher_name }}:{{ apt_cacher_port | default('3142') }}\"; };'"
- name: Update apt cache
- command: ansible services -m apt -b -u ubuntu -a "update_cache=yes"
+ command: ansible services -m apt -b -u ubuntu -a "update_cache=yes cache_valid_time=3600"
+
+- name: Update software in all the VMs
+ when: run_dist_upgrade
+ command: ansible services -m apt -b -u ubuntu -a "upgrade=dist"
- name: Create VM's eth0 interface config file for DNS config via resolvconf program
+ when: not on_maas
template:
src=eth0.cfg.j2
dest={{ ansible_user_dir }}/eth0.cfg
- when: not on_maas
- name: Copy eth0 interface config file to all VMs
- command: ansible services -b -u ubuntu -m copy -a "src={{ ansible_user_dir }}/eth0.cfg dest=/etc/network/interfaces.d/eth0.cfg owner=root group=root mode=0644"
when: not on_maas
+ command: ansible services -b -u ubuntu -m copy -a "src={{ ansible_user_dir }}/eth0.cfg dest=/etc/network/interfaces.d/eth0.cfg owner=root group=root mode=0644"
- name: Restart eth0 interface on all VMs
- command: ansible services -b -u ubuntu -m shell -a "ifdown eth0 ; ifup eth0"
when: not on_maas
+ command: ansible services -b -u ubuntu -m shell -a "ifdown eth0 ; ifup eth0"
- name: Verify that we can log into every VM after restarting network interfaces
- command: ansible services -m ping -u ubuntu
when: not on_maas
+ command: ansible services -m ping -u ubuntu
# sshkey is registered in head-prep task
- name: Enable root ssh login on VM's that require it
@@ -91,3 +95,4 @@
- name: Install docker in VM's that require it
command: ansible-playbook "{{ ansible_user_dir }}/docker-install-playbook.yml"
+
diff --git a/roles/dns-nsd/tasks/main.yml b/roles/dns-nsd/tasks/main.yml
index 0df2533..bd87f99 100644
--- a/roles/dns-nsd/tasks/main.yml
+++ b/roles/dns-nsd/tasks/main.yml
@@ -1,8 +1,8 @@
---
#file: roles/dns-nsd/tasks/main.yml
-# OS specific vars
-- include_vars: "{{ ansible_os_family }}.yml"
+- name: Include OS specific vars
+ include_vars: "{{ ansible_os_family }}.yml"
# Debian specific installation
- include: nsd-Debian.yml
diff --git a/roles/dns-unbound/tasks/main.yml b/roles/dns-unbound/tasks/main.yml
index 2666538..03a80d3 100644
--- a/roles/dns-unbound/tasks/main.yml
+++ b/roles/dns-unbound/tasks/main.yml
@@ -1,8 +1,8 @@
---
#file: roles/dns-unbound/tasks/main.yml
-# OS specific vars
-- include_vars: "{{ ansible_os_family }}.yml"
+- name: Include OS specific vars
+ include_vars: "{{ ansible_os_family }}.yml"
# Debian specific installation
- include: unbound-Debian.yml
diff --git a/roles/docker-compose/tasks/main.yml b/roles/docker-compose/tasks/main.yml
index 84d0edd..a623cf1 100644
--- a/roles/docker-compose/tasks/main.yml
+++ b/roles/docker-compose/tasks/main.yml
@@ -1,7 +1,15 @@
---
# docker-compose/tasks/main.yml
+# The following two tests are equivalent and both evaluate to 0 when
+# `onos-cord-1` VM isn't set to be created, but the `equalto` test only exists
+# in Jinja v2.7.4, which is later than what's in Ubuntu 14.04 (v2.7.2).
+#
+# {{ head_vm_list | selectattr('name', 'equalto', 'onos-cord-1') | list | length }}
+# {{ head_vm_list | map(attribute='name') | list | intersect(['onos-cord-1']) | list | length }}
+
- name: Wait for onos_setup_playbook to complete
+ when: "{{ head_vm_list | map(attribute='name') | list | intersect(['onos-cord-1']) | list | length }}"
async_status: jid={{ onos_setup_playbook.ansible_job_id }}
register: onos_setup_playbook_result
until: onos_setup_playbook_result.finished
@@ -9,18 +17,22 @@
retries: 120
- name: Copy SSL Certs to ONOS so docker-compose can find it
+ when: "{{ head_vm_list | map(attribute='name') | list | intersect(['onos-cord-1']) | list | length }}"
command: ansible onos-cord-1 -u ubuntu -m copy \
-a "src=/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt dest=~/cord/xos-certs.crt"
- name: Build ONOS image with docker-compose
+ when: "{{ head_vm_list | map(attribute='name') | list | intersect(['onos-cord-1']) | list | length }}"
command: ansible onos-cord-1 -u ubuntu -m command \
-a "docker-compose build chdir=cord"
- name: Start ONOS
+ when: "{{ head_vm_list | map(attribute='name') | list | intersect(['onos-cord-1']) | list | length }}"
command: ansible onos-cord-1:onos-fabric-1 -u ubuntu -m command \
-a "docker-compose up -d chdir=cord"
- name: Wait for xos_setup_playbook to complete
+ when: "{{ head_vm_list | map(attribute='name') | list | intersect(['xos-1']) | list | length }}"
async_status: jid={{ xos_setup_playbook.ansible_job_id }}
register: xos_setup_playbook_result
until: xos_setup_playbook_result.finished
@@ -28,6 +40,7 @@
retries: 120
- name: Copy admin-openrc.sh into XOS container
+ when: "{{ head_vm_list | map(attribute='name') | list | intersect(['xos-1']) | list | length }}"
command: ansible xos-1 -u ubuntu -m copy \
-a "src=~/admin-openrc.sh dest={{ service_profile_repo_dest }}/{{ xos_configuration }}"
diff --git a/roles/head-prep/tasks/main.yml b/roles/head-prep/tasks/main.yml
index a858ce3..b2b300e 100644
--- a/roles/head-prep/tasks/main.yml
+++ b/roles/head-prep/tasks/main.yml
@@ -42,7 +42,6 @@
generate_ssh_key=yes
groups="libvirtd" append=yes
-# FIXME: this should be changed per http://docs.ansible.com/ansible/playbooks_lookups.html#intro-to-lookups-getting-file-contents
- name: Register public key in variable
shell: cat {{ ansible_user_dir }}/.ssh/id_rsa.pub
register: sshkey
@@ -66,19 +65,20 @@
owner={{ ansible_user_id }} mode=0644
- name: Copy node key (MaaS)
+ when: on_maas
copy:
src={{ maas_node_key }}
dest={{ ansible_user_dir }}/node_key
owner={{ ansible_user }}
mode=0600
remote_src=True
- when: on_maas
- name: Copy node key (without MaaS)
+ when: not on_maas
copy:
src={{ ansible_user_dir }}/.ssh/id_rsa
dest={{ ansible_user_dir }}/node_key
owner={{ ansible_user }}
mode=0600
remote_src=True
- when: not on_maas
+
diff --git a/roles/juju-compute-setup/defaults/main.yml b/roles/juju-compute-setup/defaults/main.yml
new file mode 100644
index 0000000..2c0208b
--- /dev/null
+++ b/roles/juju-compute-setup/defaults/main.yml
@@ -0,0 +1,9 @@
+---
+# juju-compute-setup/defaults/main.yml
+
+# note: juju_config_path and charm_versions are also set in
+# `juju-setup/defaults/main.yml`. Keep these in sync.
+
+juju_config_path: /usr/local/src/juju_config.yml
+charm_versions: {}
+
diff --git a/roles/juju-compute-setup/tasks/main.yml b/roles/juju-compute-setup/tasks/main.yml
index f12ce43..0222775 100644
--- a/roles/juju-compute-setup/tasks/main.yml
+++ b/roles/juju-compute-setup/tasks/main.yml
@@ -9,6 +9,12 @@
# list of active juju_machines names: juju_machines.keys()
# list of active juju_services names: juju_services.keys()
+# FIXME: Need to add firewall rules to head node or compute machines won't be
+# able to talk to head node VM's. iptables cmd's look like this:
+#
+# iptables -A FORWARD -i eth0 -o mgmtbr -s <extnet> -d <vmnet> -j ACCEPT
+# iptables -A FORWARD -i mgmtbr -o eth0 -s <vmnet> -d <extnet> -j ACCEPT
+
- name: Add machines to Juju
command: "juju add-machine ssh:{{ item }}"
with_items: "{{ groups['compute'] | difference( juju_machines.keys() ) }}"
diff --git a/roles/juju-openstack-config/defaults/main.yml b/roles/juju-openstack-config/defaults/main.yml
deleted file mode 100644
index 4a0158f..0000000
--- a/roles/juju-openstack-config/defaults/main.yml
+++ /dev/null
@@ -1,20 +0,0 @@
----
-# roles/juju-setup/defaults/main.yml
-
-openstack_version: kilo
-
-openstack_cfg_path: /usr/local/src/openstack.cfg
-
-compute_relations:
- - name: nova-compute
- relations: [ "glance", "nova-cloud-controller", "neutron-openvswitch", "nagios", "nrpe", ]
-
- - name: "nova-compute:shared-db"
- relations: [ "mysql:shared-db", ]
-
- - name: "nova-compute:amqp"
- relations: [ "rabbitmq-server:amqp", ]
-
- - name: ntp
- relations: [ "nova-compute", ]
-
diff --git a/roles/juju-openstack-config/files/network-setup.sh b/roles/juju-openstack-config/files/network-setup.sh
deleted file mode 100755
index 05e4c12..0000000
--- a/roles/juju-openstack-config/files/network-setup.sh
+++ /dev/null
@@ -1,40 +0,0 @@
-#!/bin/bash
-
-source ~/admin-openrc.sh
-
-function create-flat-net {
- NAME=$1
- neutron net-show $NAME-net 2>&1 > /dev/null
- if [ "$?" -ne 0 ]
- then
- neutron net-create --provider:physical_network=$NAME --provider:network_type=flat --shared $NAME-net
- fi
-}
-
-function create-subnet {
- NAME=$1
- CIDR=$2
- GW=$3
-
- neutron subnet-show $NAME-net 2>&1 > /dev/null
- if [ "$?" -ne 0 ]
- then
- neutron subnet-create $NAME-net --name $NAME-net $CIDR --gateway=$GW --disable-dhcp
- fi
-}
-
-function create-subnet-no-gateway {
- NAME=$1
- CIDR=$2
-
- neutron subnet-show $NAME-net 2>&1 > /dev/null
- if [ "$?" -ne 0 ]
- then
- neutron subnet-create $NAME-net --name $NAME-net $CIDR --no-gateway --disable-dhcp
- fi
-}
-
-create-flat-net nat
-create-subnet nat 172.16.0.0/16 172.16.0.1
-
-create-flat-net ext
diff --git a/roles/juju-openstack-config/tasks/main.yml b/roles/juju-openstack-config/tasks/main.yml
deleted file mode 100644
index 8075013..0000000
--- a/roles/juju-openstack-config/tasks/main.yml
+++ /dev/null
@@ -1,51 +0,0 @@
----
-# roles/juju-openstack-config/main/tasks.yml
-
-- name: add compute nodes
- command: juju add-machine ssh:ubuntu@{{ item }}
- with_items: "{{ groups['compute'] | difference( juju_machines.keys() ) }}"
- register: added_compute_nodes
-
-# run this again, so add-machine items will be in the juju_compute_nodes list
-- name: Obtain Juju Facts after adding compute nodes
- when: added_compute_nodes
- juju_facts:
-
-# the crazy [ ] in the with-items is so that jinja compares arrays of strings,
-# rather than strings of characters
-- name: add-unit nova-compute to first compute node
- command: "juju deploy nova-compute --to {{ juju_machines[item]['machine_id'] }} --config={{ openstack_cfg_path }}"
- with_items: "{{ [ groups['compute'][0] ] | difference( juju_compute_nodes.keys() ) }}"
- register: added_first_nova_compute
-
-# run this again, so first nova compute will be in the juju_compute_nodes list
-- name: Obtain Juju Facts nova-compute deploy
- juju_facts:
- when: added_first_nova_compute
-
-- name: add-unit nova-compute to other compute nodes
- command: "juju add-unit nova-compute --to {{ juju_machines[item]['machine_id'] }}"
- with_items: "{{ groups['compute'] | difference( juju_compute_nodes.keys() ) }}"
-
-- name: Create relations to compute
- command: "juju add-relation '{{ item.0.name }}' '{{ item.1 }}'"
- register: compute_relation
- failed_when: "compute_relation|failed and 'relation already exists' not in compute_relation.stderr"
- with_subelements:
- - "{{ compute_relations }}"
- - relations
-
-# need to ansible-ify these
-- name: Copy credentials file to nova-cloud-controller
- command: "scp {{ ansible_user_dir }}/admin-openrc.sh ubuntu@nova-cloud-controller:"
-
-- name: Copy network setup script
- become: yes
- copy:
- src=network-setup.sh
- dest=/usr/local/src/network-setup.sh
- mode=0644 owner=root
-
-- name: Run network setup script
- command: ansible nova-cloud-controller-1 -m script -u ubuntu -a "/usr/local/src/network-setup.sh"
-
diff --git a/roles/juju-setup/defaults/main.yml b/roles/juju-setup/defaults/main.yml
index bbbb92e..c6ad753 100644
--- a/roles/juju-setup/defaults/main.yml
+++ b/roles/juju-setup/defaults/main.yml
@@ -1,6 +1,10 @@
---
+# juju-setup/defaults/main.yml
juju_config_name: opencloud
-juju_config_path: /usr/local/src/juju_config.yml
+# note: juju_config_path and charm_versions are also set in
+# `juju-compute-setup/defaults/main.yml`. Keep these in sync.
+
+juju_config_path: /usr/local/src/juju_config.yml
charm_versions: {}
diff --git a/roles/juju-setup/tasks/main.yml b/roles/juju-setup/tasks/main.yml
index 0bb3d18..6bd790d 100644
--- a/roles/juju-setup/tasks/main.yml
+++ b/roles/juju-setup/tasks/main.yml
@@ -94,7 +94,7 @@
register: result
until: result | success
retries: 40
- delay: 15
+ delay: 15
- name: Copy cert to system location
become: yes
diff --git a/roles/juju-setup/templates/opencloud_juju_config.yml.j2 b/roles/juju-setup/templates/opencloud_juju_config.yml.j2
index 7911828..4345b19 100644
--- a/roles/juju-setup/templates/opencloud_juju_config.yml.j2
+++ b/roles/juju-setup/templates/opencloud_juju_config.yml.j2
@@ -45,7 +45,6 @@
nova-compute:
virt-type: kvm
config-flags: "firewall_driver=nova.virt.firewall.NoopFirewallDriver"
- disable-neutron-security-groups: "True"
openstack-origin: "cloud:trusty-kilo"
nrpe: {}
diff --git a/roles/xos-vm-install/files/xos-setup-devel-playbook.yml b/roles/xos-vm-install/files/xos-setup-devel-playbook.yml
new file mode 100644
index 0000000..517f77d
--- /dev/null
+++ b/roles/xos-vm-install/files/xos-setup-devel-playbook.yml
@@ -0,0 +1,78 @@
+---
+- hosts: xos-1
+ remote_user: ubuntu
+
+ tasks:
+ - name: Include configuration vars
+ include_vars: xos-setup-vars.yml
+
+ - name: Install prerequisites
+ apt:
+ name={{ item }}
+ update_cache=yes
+ cache_valid_time=3600
+ become: yes
+ with_items:
+ - git
+ - make
+ - curl
+ - python-novaclient
+ - python-neutronclient
+ - python-keystoneclient
+ - python-glanceclient
+
+ - name: Clone XOS repo
+ git:
+ repo={{ xos_repo_url }}
+ dest={{ xos_repo_dest }}
+ version={{ xos_repo_branch }}
+ force=yes
+
+ - name: Clone service-profile repo
+ git:
+ repo={{ service_profile_repo_url }}
+ dest={{ service_profile_repo_dest }}
+ version={{ service_profile_repo_branch }}
+ force=yes
+
+ - name: Copy over SSH keys
+ copy:
+ src=~/.ssh/{{ item }}
+ dest={{ service_profile_repo_dest }}/{{ xos_configuration }}/
+ owner={{ ansible_user_id }} mode=0600
+ with_items:
+ - id_rsa
+ - id_rsa.pub
+
+ - name: copy over node_key
+ copy:
+ src={{ node_private_key }}
+ dest={{ service_profile_repo_dest }}/{{ xos_configuration }}/node_key
+ owner={{ ansible_user_id }} mode=0600
+
+ - name: Download Glance VM images
+ get_url:
+ url={{ item.url }}
+ checksum={{ item.checksum }}
+ dest={{ service_profile_repo_dest }}/{{ xos_configuration }}/images/{{ item.name }}.img
+ with_items: "{{ xos_images }}"
+
+ - name: Pull database image
+ become: yes
+ command: docker pull {{ item }}
+ with_items:
+ - xosproject/xos-postgres
+
+ - name: Pull docker images for XOS
+ when: not xos_container_rebuild
+ become: yes
+ command: docker pull {{ item }}
+ with_items:
+ - xosproject/xos-base
+
+ - name: Rebuild XOS containers
+ when: xos_container_rebuild
+ command: make {{ item }}
+ chdir="{{ xos_repo_dest }}/containers/xos/"
+ with_items:
+ - base
diff --git a/vars/aztest.yml b/vars/aztest.yml
index c56f092..13702f4 100644
--- a/vars/aztest.yml
+++ b/vars/aztest.yml
@@ -25,6 +25,12 @@
ns:
- { name: ns1 }
nodelist: head_vm_list
+ aliases:
+ - { name: "ns1" , dest: "head" }
+ - { name: "ns" , dest: "head" }
+ - { name: "apt-cache" , dest: "head" }
+
+name_on_public_interface: head
# If true, unbound listens on the head node's `ansible_default_ipv4` interface
unbound_listen_on_default: True
diff --git a/vars/cord.yml b/vars/cord.yml
index 07136e3..c04f7a0 100644
--- a/vars/cord.yml
+++ b/vars/cord.yml
@@ -33,7 +33,6 @@
# If true, unbound listens on the head node's `ansible_default_ipv4` interface
unbound_listen_on_default: True
-
xos_images:
- name: "trusty-server-multi-nic"
url: "http://www.vicci.org/opencloud/trusty-server-cloudimg-amd64-disk1.img"
@@ -47,3 +46,5 @@
# turn this on, or override when running playbook with --extra-vars="on_cloudlab=True"
on_cloudlab: False
+
+
diff --git a/vars/cord_defaults.yml b/vars/cord_defaults.yml
index 12ce207..b91f082 100644
--- a/vars/cord_defaults.yml
+++ b/vars/cord_defaults.yml
@@ -2,7 +2,9 @@
# vars/cord_defaults.yml
# turn this off, or override when running playbook with --extra-vars="on_maas=False"
-on_maas: True
+on_maas: true
+
+run_dist_upgrade: false
maas_node_key: /etc/maas/ansible/id_rsa
diff --git a/vars/cord_single_defaults.yml b/vars/cord_single_defaults.yml
index d16be78..05caf85 100644
--- a/vars/cord_single_defaults.yml
+++ b/vars/cord_single_defaults.yml
@@ -3,7 +3,9 @@
# For a single-node case, we don't expect the node to already have been
# provisioned by CORD MaaS. It's just Ubuntu 14.04.
-on_maas: False
+on_maas: false
+
+run_dist_upgrade: false
openstack_version: kilo
diff --git a/vars/opencloud_defaults.yml b/vars/opencloud_defaults.yml
index 32ca9a8..3f9bf90 100644
--- a/vars/opencloud_defaults.yml
+++ b/vars/opencloud_defaults.yml
@@ -1,10 +1,21 @@
---
# vars/opencloud_defaults.yml
+on_maas: false
+
+run_dist_upgrade: true
+
openstack_version: kilo
juju_config_name: opencloud
+apt_cacher_name: apt-cache
+
+xos_images:
+ - name: "trusty-server-multi-nic"
+ url: "http://www.vicci.org/opencloud/trusty-server-cloudimg-amd64-disk1.img"
+ checksum: "sha256:c2d0ffc937aeb96016164881052a496658efeb98959dc68e73d9895c5d9920f7"
+
charm_versions: {}
head_vm_list:
@@ -152,7 +163,7 @@
relations: [ "percona-cluster", "keystone", "nrpe", ]
- name: neutron-gateway
- relations: [ "neutron-api", "nova-cloud-controller", "percona-cluster", "nrpe", ]
+ relations: [ "neutron-api", "nova-cloud-controller", "nrpe", ]
- name: "neutron-gateway:amqp"
relations: [ "rabbitmq-server:amqp", ]
@@ -184,4 +195,16 @@
- name: "ceilometer:ceilometer-service"
relations: [ "ceilometer-agent:ceilometer-service", ]
+compute_relations:
+ - name: nova-compute
+ relations: [ "ceilometer-agent", "glance", "nova-cloud-controller", "nagios", "nrpe", ]
+
+ - name: "nova-compute:shared-db"
+ relations: [ "percona-cluster:shared-db", ]
+
+ - name: "nova-compute:amqp"
+ relations: [ "rabbitmq-server:amqp", ]
+
+ - name: ntp
+ relations: [ "nova-compute", ]