Merge remote-tracking branch 'origin/master' into roles
diff --git a/README.md b/README.md
index aaaa650..63c3914 100644
--- a/README.md
+++ b/README.md
@@ -26,12 +26,17 @@
for the install. As mentioned above, install Ansible on the head node and check out this repository.
The playbooks assume that a bridge called *mgmtbr* on the head node is connected to the management
-network. Also there must be a DHCP server on the management network to hand out IP addresses
-to VMs; if you need to set up `dnsmasq` to do this, take a look at [this example](files/etc/dnsmasq.d/cord).
+network. Note that also there must be a DHCP server on the management network that:
+ 1. hands out IP addresses to VMs connected to *mgmtbr*
+ 2. resolves VM names to IP addresses
+ 3. is configured as a resolver on the head and compute nodes
+
+If you need to set up `dnsmasq` to do this,
+take a look at [this example](files/etc/dnsmasq.d/cord).
Then follow these steps:
* Edit *cord-hosts* with the DNS names of your compute nodes, and update the *ansible_ssh_user* variable appropriately.
- Before proceeding, this needs to work on the head node: `ansible -i cord-hosts compute -m ping`
+ Before proceeding, this needs to work on the head node: `ansible -i cord-hosts all -m ping`
* Run: `ansible-playbook -i cord-hosts cord-setup.yml`
* After the playbook finishes, wait for the OpenStack services to come up. You can check on their progress
using `juju status --format=tabular`
diff --git a/bootstrap.sh b/bootstrap.sh
new file mode 100755
index 0000000..3bfebe3
--- /dev/null
+++ b/bootstrap.sh
@@ -0,0 +1,9 @@
+#!/bin/bash
+
+sudo apt-get update
+sudo apt-get install -y software-properties-common git mosh
+sudo add-apt-repository -y ppa:ansible/ansible
+sudo apt-get update
+sudo apt-get install -y ansible
+ssh-keygen -t rsa -N "" -f ~/.ssh/id_rsa
+cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
diff --git a/cord-hosts b/cord-hosts
index a97e12e..c52b70b 100644
--- a/cord-hosts
+++ b/cord-hosts
@@ -1,5 +1,19 @@
head ansible_ssh_host=localhost connection=local
+[openstack]
+mysql
+rabbitmq-server
+keystone
+glance
+nova-cloud-controller
+openstack-dashboard
+ceilometer
+nagios
+neutron-api
+
+[openstack:vars]
+ansible_ssh_user=ubuntu
+
[compute]
node1
node2
diff --git a/cord-setup.yml b/cord-setup.yml
index c5c5ec1..6182ce1 100644
--- a/cord-setup.yml
+++ b/cord-setup.yml
@@ -85,6 +85,14 @@
- hosts: head
sudo: yes
tasks:
+
+ # Yes I know
+ - name: Add local resolver to /etc/resolv.conf
+ lineinfile: dest=/etc/resolv.conf
+ insertafter=".*DO NOT EDIT THIS FILE.*"
+ line="nameserver 192.168.122.1"
+ when: test_setup is defined
+
- name: Create VMs to host OpenCloud services on mgmtbr
sudo: no
script: scripts/create-vms-cord.sh
@@ -95,14 +103,6 @@
script: scripts/create-vms-cord.sh --testing
when: test_setup is defined
- - include: tasks/vm-ips-cord.yml
-
- - name: Add VMs to /etc/hosts
- template: src=templates/etc/cord-hosts.j2
- dest=/etc/hosts
- notify:
- - Reload dnsmasq
-
- name: Set up /etc/ansible/hosts
template: src=templates/etc/ansible/cord-hosts.j2
dest=/etc/ansible/hosts
@@ -125,10 +125,6 @@
sudo: no
shell: ansible services -m ping -u ubuntu
- handlers:
- - name: Reload dnsmasq
- shell: killall -HUP dnsmasq
-
# Play: prepare compute nodes for installation
- hosts: compute
sudo: yes
@@ -168,9 +164,6 @@
shell: juju generate-config
creates={{ ansible_env['PWD'] }}/.juju/environments.yaml
- - shell: uvt-kvm ip juju
- register: juju_ip
-
- name: Juju config file
sudo: no
template: src=templates/environments.yaml.j2
@@ -181,17 +174,21 @@
shell: juju bootstrap
creates={{ ansible_env['PWD'] }}/.juju/environments/manual.jenv
+ - name: Check that 'juju status' works
+ sudo: no
+ shell: juju status
+
+ - name: Pause for 15 seconds (problem with mysql VM not being added to Juju)
+ pause: seconds=15
+
- name: Add virtual machines to Juju's control
- script: scripts/juju-cord-setup.py
+ shell: juju add-machine ssh:{{ item }}
+ with_items: "{{ groups['openstack'] }}"
- name: Add compute nodes to Juju's control
shell: juju add-machine ssh:{{ item }}
with_items: "{{ groups['compute'] }}"
- - name: Get onos-cord IP
- shell: uvt-kvm ip onos-cord
- register: onos_cord_ip
-
- name: Copy cord.yaml bundle
template: src=templates/cord.yaml dest={{ ansible_env['PWD'] }}/cord.yaml
@@ -202,10 +199,6 @@
sudo: no
tasks:
- - name: Get Keystone IP
- shell: uvt-kvm ip keystone
- register: keystone_ip
-
- name: Create credentials
template: src=templates/admin-openrc-cord.sh.j2
dest={{ ansible_env['PWD'] }}/admin-openrc.sh
diff --git a/cord-test-hosts b/cord-test-hosts
index c41c275..8f01e1f 100644
--- a/cord-test-hosts
+++ b/cord-test-hosts
@@ -1,7 +1,21 @@
head ansible_ssh_host=localhost connection=local
+[openstack]
+mysql
+rabbitmq-server
+keystone
+glance
+nova-cloud-controller
+openstack-dashboard
+ceilometer
+nagios
+neutron-api
+
+[openstack:vars]
+ansible_ssh_user=ubuntu
+
[compute]
nova-compute ansible_ssh_user=ubuntu
[all:vars]
-test_setup=true
+test_setup=true
diff --git a/files/openstack.cfg b/files/openstack.cfg
index bbbbad7..e9a1866 100644
--- a/files/openstack.cfg
+++ b/files/openstack.cfg
@@ -11,10 +11,11 @@
openstack-origin: "cloud:trusty-kilo"
nova-compute:
# config-flags: "firewall_driver=nova.virt.firewall.NoopFirewallDriver"
- config-flags: "firewall_driver=nova.virt.firewall.NoopFirewallDriver,xos_api_url=http://130.127.133.51:9999"
+ config-flags: "firewall_driver=nova.virt.firewall.NoopFirewallDriver,xos_api_url=http://portal.opencloud.us"
disable-neutron-security-groups: "True"
openstack-origin: "cloud:trusty-kilo"
ntp:
+ source: "0.ubuntu.pool.ntp.org 1.ubuntu.pool.ntp.org 2.ubuntu.pool.ntp.org 3.ubuntu.pool.ntp.org"
openstack-dashboard:
openstack-origin: "cloud:trusty-kilo"
neutron-gateway:
diff --git a/files/usr/lib/python2.7/dist-packages/uvtool/libvirt/__init__.py b/files/usr/lib/python2.7/dist-packages/uvtool/libvirt/__init__.py
new file mode 100644
index 0000000..3c72a8d
--- /dev/null
+++ b/files/usr/lib/python2.7/dist-packages/uvtool/libvirt/__init__.py
@@ -0,0 +1,246 @@
+# Copyright (C) 2013 Canonical Ltd.
+# Author: Robie Basak <robie.basak@canonical.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import absolute_import
+from __future__ import print_function
+from __future__ import unicode_literals
+
+import codecs
+import contextlib
+import itertools
+import os
+import shutil
+import subprocess
+import tempfile
+
+import libvirt
+from lxml import etree
+from lxml.builder import E
+
+LIBVIRT_DNSMASQ_LEASE_FILE = '/var/lib/libvirt/dnsmasq/default.leases'
+
+
+def get_libvirt_pool_object(libvirt_conn, pool_name):
+ try:
+ pool = libvirt_conn.storagePoolLookupByName(pool_name)
+ except libvirt.libvirtError:
+ raise RuntimeError("Cannot find pool %s." % repr(pool_name))
+ return pool
+
+
+def create_volume_from_fobj(new_volume_name, fobj, image_type='raw',
+ pool_name='default'):
+ """Create a new libvirt volume and populate it from a file-like object."""
+
+ compressed_fobj = tempfile.NamedTemporaryFile()
+ decompressed_fobj = tempfile.NamedTemporaryFile()
+ with contextlib.closing(compressed_fobj):
+ with contextlib.closing(decompressed_fobj):
+ shutil.copyfileobj(fobj, compressed_fobj)
+ compressed_fobj.flush()
+ compressed_fobj.seek(0) # is this necessary?
+ subprocess.check_call(
+ [
+ 'qemu-img', 'convert', '-f', image_type, '-O', image_type,
+ compressed_fobj.name, decompressed_fobj.name
+ ],
+ shell=False, close_fds=False)
+ decompressed_fobj.seek(0) # is this necessary?
+ return _create_volume_from_fobj_with_size(
+ new_volume_name=new_volume_name,
+ fobj=decompressed_fobj,
+ fobj_size=os.fstat(decompressed_fobj.fileno()).st_size,
+ image_type=image_type,
+ pool_name=pool_name
+ )
+
+
+def _create_volume_from_fobj_with_size(new_volume_name, fobj, fobj_size,
+ image_type, pool_name):
+ conn = libvirt.open('qemu:///system')
+ pool = get_libvirt_pool_object(conn, pool_name)
+
+ if image_type == 'raw':
+ extra = [E.allocation(str(fobj_size)), E.capacity(str(fobj_size))]
+ elif image_type == 'qcow2':
+ extra = [E.capacity('0')]
+ else:
+ raise NotImplementedError("Unknown image type %r." % image_type)
+
+ new_vol = E.volume(
+ E.name(new_volume_name),
+ E.target(E.format(type=image_type)),
+ *extra
+ )
+ vol = pool.createXML(etree.tostring(new_vol), 0)
+
+ try:
+ stream = conn.newStream(0)
+ vol.upload(stream, 0, fobj_size, 0)
+
+ def handler(stream_ignored, size, opaque_ignored):
+ return fobj.read(size)
+
+ try:
+ stream.sendAll(handler, None)
+ except Exception as e:
+ try:
+ # This unexpectedly raises an exception even on a normal call,
+ # so ignore it.
+ stream.abort()
+ except:
+ pass
+ raise e
+ stream.finish()
+ except:
+ vol.delete(flags=0)
+ raise
+
+ return vol
+
+
+def volume_names_in_pool(pool_name='default'):
+ conn = libvirt.open('qemu:///system')
+ pool = get_libvirt_pool_object(conn, pool_name)
+ return pool.listVolumes()
+
+
+def delete_volume_by_name(volume_name, pool_name='default'):
+ conn = libvirt.open('qemu:///system')
+ pool = get_libvirt_pool_object(conn, pool_name)
+ volume = pool.storageVolLookupByName(volume_name)
+ volume.delete(flags=0)
+
+
+def have_volume_by_name(volume_name, pool_name='default'):
+ conn = libvirt.open('qemu:///system')
+ pool = get_libvirt_pool_object(conn, pool_name)
+ try:
+ volume = pool.storageVolLookupByName(volume_name)
+ except libvirt.libvirtError:
+ return False
+ else:
+ return True
+
+
+def _get_all_domains(conn=None):
+ if conn is None:
+ conn = libvirt.open('qemu:///system')
+
+ # libvirt in Precise doesn't seem to have a binding for
+ # virConnectListAllDomains, and it seems that we must enumerate
+ # defined-by-not-running and running instances separately and in different
+ # ways.
+
+ for domain_id in conn.listDomainsID():
+ yield conn.lookupByID(domain_id)
+
+ for domain_name in conn.listDefinedDomains():
+ yield conn.lookupByName(domain_name)
+
+
+def _domain_element_to_volume_paths(element):
+ assert element.tag == 'domain'
+ return (
+ source.get('file')
+ for source in element.xpath(
+ "/domain/devices/disk[@type='file']/source[@file]"
+ )
+ )
+
+
+def _domain_volume_paths(domain):
+ volume_paths = set()
+
+ for flags in [0, libvirt.VIR_DOMAIN_XML_INACTIVE]:
+ element = etree.fromstring(domain.XMLDesc(flags))
+ volume_paths.update(_domain_element_to_volume_paths(element))
+
+ return frozenset(volume_paths)
+
+
+def _volume_element_to_volume_paths(element):
+ assert element.tag == 'volume'
+ return itertools.chain(
+ (path.text for path in element.xpath('/volume/target/path')),
+ (path.text for path in element.xpath('/volume/backingStore/path')),
+ )
+
+
+def _volume_volume_paths(volume):
+ # Volumes can depend on other volumes ("backing stores"), so return all
+ # paths a volume needs to function, including the top level one.
+ volume_paths = set()
+
+ element = etree.fromstring(volume.XMLDesc(0))
+ volume_paths.update(_volume_element_to_volume_paths(element))
+
+ return frozenset(volume_paths)
+
+
+def _get_all_domain_volume_paths(conn=None):
+ if conn is None:
+ conn = libvirt.open('qemu:///system')
+
+ all_volume_paths = set()
+ for domain in _get_all_domains(conn):
+ for path in _domain_volume_paths(domain):
+ try:
+ volume = conn.storageVolLookupByKey(path)
+ except libvirt.libvirtError:
+ # ignore a lookup failure, since if a volume doesn't exist,
+ # it isn't reasonable to consider what backing volumes it may
+ # have
+ continue
+ all_volume_paths.update(_volume_volume_paths(volume))
+
+ return frozenset(all_volume_paths)
+
+
+def get_all_domain_volume_names(conn=None, filter_by_dir=None):
+ # Limitation: filter_by_dir must currently end in a '/' and be the
+ # canonical path as libvirt returns it. Ideally I'd filter by pool instead,
+ # but the libvirt API appears to not provide any method to find what pool a
+ # volume is in when looked up by key.
+ if conn is None:
+ conn = libvirt.open('qemu:///system')
+
+ for path in _get_all_domain_volume_paths(conn=conn):
+ volume = conn.storageVolLookupByKey(path)
+ if filter_by_dir and not volume.path().startswith(filter_by_dir):
+ continue
+ yield volume.name()
+
+
+def get_domain_macs(domain_name, conn=None):
+ if conn is None:
+ conn = libvirt.open('qemu:///system')
+
+ domain = conn.lookupByName(domain_name)
+ xml = etree.fromstring(domain.XMLDesc(0))
+ for mac in xml.xpath(
+ "/domain/devices/interface[@type='network' or @type='bridge']/mac[@address]"):
+ yield mac.get('address')
+
+
+def mac_to_ip(mac):
+ canonical_mac = mac.lower()
+ with codecs.open(LIBVIRT_DNSMASQ_LEASE_FILE, 'r') as f:
+ for line in f:
+ fields = line.split()
+ if len(fields) > 1 and fields[1].lower() == canonical_mac:
+ return fields[2]
+ return None
diff --git a/scripts/create-vms-cord.sh b/scripts/create-vms-cord.sh
index c00eacb..d456525 100755
--- a/scripts/create-vms-cord.sh
+++ b/scripts/create-vms-cord.sh
@@ -31,10 +31,15 @@
else
uvt-kvm create $NAME --cpu=$CPU --memory=$MEM_MB --disk=$DISK_GB --bridge mgmtbr
fi
- uvt-kvm wait --insecure $NAME
+ # uvt-kvm wait --insecure $NAME
fi
}
+function wait-for-vm {
+ NAME=$1
+ uvt-kvm wait --insecure $NAME
+}
+
create-vm juju 1 2048 20
create-vm mysql 2 4096 40
create-vm rabbitmq-server 2 4096 40
@@ -52,3 +57,22 @@
then
create-vm nova-compute 2 4096 100
fi
+
+# Wait for everything to get set up
+wait-for-vm juju
+wait-for-vm mysql
+wait-for-vm rabbitmq-server
+wait-for-vm keystone
+wait-for-vm glance
+wait-for-vm nova-cloud-controller
+wait-for-vm neutron-api
+wait-for-vm openstack-dashboard
+wait-for-vm ceilometer
+wait-for-vm nagios
+
+wait-for-vm xos
+wait-for-vm onos-cord
+if $TESTING
+then
+ wait-for-vm nova-compute
+fi
diff --git a/scripts/juju-cord-setup.py b/scripts/juju-cord-setup.py
deleted file mode 100755
index 418aab2..0000000
--- a/scripts/juju-cord-setup.py
+++ /dev/null
@@ -1,39 +0,0 @@
-#!/usr/bin/python
-
-import subprocess
-import json
-import socket
-
-# Assumption: VMs have same hostname as service that runs inside
-machines = ["mysql", "rabbitmq-server", "keystone", "glance", "nova-cloud-controller",
- "openstack-dashboard", "ceilometer", "nagios", "neutron-api"]
-
-
-# Figure out Juju ID of machine we should install on
-def get_machine(status, service):
- if service == "mongodb":
- service = "ceilometer"
- for key, value in status['machines'].iteritems():
- (hostname, aliaslist, ipaddrlist) = socket.gethostbyaddr(value['dns-name'])
- if hostname == service:
- return key
- return None
-
-def get_juju_status():
- output = subprocess.check_output("juju status --format=json", shell=True)
- status = json.loads(output)
- return status
-
-def addmachines():
- status = get_juju_status()
-
- for machine in machines:
- if get_machine(status, machine) == None:
- ipaddr = socket.gethostbyname(machine)
- subprocess.check_call("juju add-machine ssh:%s" % ipaddr, shell=True)
-
-def main():
- addmachines()
-
-if __name__ =='__main__':
- main()
diff --git a/singapore-compute.yml b/singapore-compute.yml
index 70bc008..c342483 100644
--- a/singapore-compute.yml
+++ b/singapore-compute.yml
@@ -1,22 +1,36 @@
---
-- hosts: singapore-head
+- hosts: head
sudo: no
+ user: ubuntu
tasks:
- - pause: "Install nova-compute on all unused machines managed by Juju"
+ - name: Remind user what is going to happen
+ pause: prompt="Install nova-compute on all unused machines managed by Juju"
- name: Deploy nova-compute
script: scripts/juju-compute-setup.py
- - pause: "Wait until services have started"
+ - name: Try to avoid race condition
+ pause: seconds=5
+
+ - name: Wait until nova-compute is deployed
+ script: scripts/wait-for-services.sh
+
+ - name: Make sure we're using KVM
+ shell: juju set nova-compute virt-type=kvm
- name: Add nova-compute relations
script: scripts/juju-compute-relations.py
- - pause: "Wait until relations are added"
+ - name: Try to avoid race condition
+ pause: seconds=5
+
+ - name: Wait until relations are added
+ script: scripts/wait-for-services.sh
# Play: set up ansible-pull for OpenCloud-specific files on nova-compute nodes
-- hosts: singapore-compute
+- hosts: compute
sudo: yes
+ user: ubuntu
vars:
# schedule is fed directly to cron
@@ -31,11 +45,14 @@
# Directory to where repository will be cloned
workdir: /var/lib/ansible/local
- # Repository to check out
+ # Repository to check out
# repo must contain a local.yml file at top level
#repo_url: git://github.com/sfromm/ansible-playbooks.git
repo_url: git://github.com/andybavier/opencloud-nova-compute-ansible.git
+ # Branch or tag to checkout
+ repo_version: kilo
+
tasks:
- name: Install ansible
@@ -53,4 +70,3 @@
- name: Create logrotate entry for ansible-pull.log
template: src=templates/etc/logrotate.d/ansible-pull.j2 dest=/etc/logrotate.d/ansible-pull owner=root group=root mode=0644
-
diff --git a/singapore-hosts b/singapore-hosts
new file mode 100644
index 0000000..8c1b12b
--- /dev/null
+++ b/singapore-hosts
@@ -0,0 +1,9 @@
+head ansible_ssh_host=opencloud0.sing.internet2.edu
+
+[compute]
+opencloud1.sing.internet2.edu
+opencloud2.sing.internet2.edu
+opencloud3.sing.internet2.edu
+
+[all:vars]
+cloudlab=false
diff --git a/singapore-setup.yml b/singapore-setup.yml
index bc8e325..725d679 100644
--- a/singapore-setup.yml
+++ b/singapore-setup.yml
@@ -1,11 +1,18 @@
---
# Play: set up head node
# Assumes basic /etc/ansible/hosts file
-- hosts: singapore-head
+- hosts: head
+ user: ubuntu
sudo: yes
+ vars:
+ # Each OpenCloud cluster needs a unique mgmt_net_prefix
+ mgmt_net_prefix: 192.168.103
tasks:
- - apt: name=python-pycurl
+ - apt: name={{ item }} update_cache=yes
+ with_items:
+ - python-pycurl
+ - software-properties-common
- name: Add Juju repository
apt_repository: repo="ppa:juju/stable"
@@ -13,8 +20,8 @@
- name: Add Ansible repository
apt_repository: repo="ppa:ansible/ansible"
- - name: Install older version of Juju due to bug in 1.22
- apt: name=juju-core=1.20.11-0ubuntu0.14.04.1 update_cache=yes
+ - name: Update Ansible cache
+ apt: update_cache=yes
- name: Install packages
apt: name={{ item }} state=latest
@@ -22,6 +29,12 @@
- ansible
- uvtool
- git
+ - bzr
+ - juju-core
+ - python-novaclient
+ - python-neutronclient
+ - python-keystoneclient
+ - python-glanceclient
- name: Get juju-ansible git repo
git: repo=https://github.com/cmars/juju-ansible.git
@@ -38,17 +51,40 @@
state=link
- name: Generate key to use in VMs
- user: name=ubuntu generate_ssh_key=yes
+ user: name={{ ansible_env['SUDO_USER'] }} generate_ssh_key=yes
+
+ - name: (CloudLab) Set up extra disk space
+ shell: /usr/testbed/bin/mkextrafs /var/lib/uvtool/libvirt/images
+ creates=/var/lib/uvtool/libvirt/images/lost+found
+ when: cloudlab
+
+ - name: Add myself to libvirtd group
+ user: name={{ ansible_env['SUDO_USER'] }}
+ groups=libvirtd
+ append=yes
- name: Get trusty image for uvtool
- shell: uvt-simplestreams-libvirt sync release=trusty arch=amd64
+ shell: uvt-simplestreams-libvirt sync --source http://cloud-images.ubuntu.com/daily release=trusty arch=amd64
+ - name: Change the virbr0 subnet to {{ mgmt_net_prefix }}.0/24
+ template: src=templates/etc/libvirt/qemu/networks/default.xml.j2
+ dest=/etc/libvirt/qemu/networks/default.xml
+ notify:
+ - recreate default net
+
+ handlers:
+ - name: recreate default net
+ script: scripts/recreate-virbr0.sh
+
+# Play: create VMs to host OpenStack services
+- hosts: head
+ user: ubuntu
+ sudo: yes
+ tasks:
- name: Create VMs to host OpenCloud services
sudo: no
script: scripts/create-vms.sh
- - pause: prompt="Hit return when all VMs have IP addresses"
-
- include: tasks/vm-ips.yml
- name: Add VMs to /etc/hosts
@@ -84,7 +120,10 @@
shell: killall -HUP dnsmasq
# Play: Install services using Juju
-- hosts: singapore-head
+- hosts: head
+ user: ubuntu
+ vars:
+ charm_src: /usr/local/src/charms/trusty
tasks:
- name: Initialize Juju
sudo: no
@@ -104,44 +143,36 @@
shell: juju bootstrap
creates={{ ansible_env['PWD'] }}/.juju/environments/manual.jenv
- # - pause: Break here and try rebooting Juju VM
-
- - name: Copy openstack.cfg for Juju
+ - name: Copy openstack.cfg for Juju
sudo: yes
- copy: src=files/openstack.cfg
+ copy: src=files/openstack.cfg
dest=/usr/local/src/openstack.cfg
+ - name: Check for /usr/local/src/charms/trusty
+ sudo: yes
+ file: path={{ charm_src }}
+ state=directory
+ mode=0755
+
- name: Deploy OpenStack services with Juju
script: scripts/juju-setup.py
- - pause: prompt="Hit return when all services have started successfully"
-
- - name: Set MTU for GRE tunneling
- shell: "juju set quantum-gateway instance-mtu=1400"
-
- - name: Use HTTPS for keystone authentication
- shell: 'juju set keystone use-https=yes'
-
- - name: Use HTTPS for all service endpoints
- shell: 'juju set keystone https-service-endpoints=True'
-
- - name: Use SSL for rabbitmq
- shell: 'juju set rabbitmq-server ssl=on'
-
- name: Add all Juju relations between services
script: scripts/juju-relations.py
- - pause: prompt="Wait for relations to be fully added"
+ - name: Wait for relations to be fully added
+ script: scripts/wait-for-services.sh
# Play: Use libvirt hooks to set up iptables
-- hosts: singapore-head
+- hosts: head
+ user: ubuntu
sudo: yes
tasks:
- name: Enable port forwarding for services
copy: src=files/{{ item }}
dest={{ item }}
mode=0755
- notify:
+ notify:
- reload libvirt config
- run qemu hook
with_items:
@@ -155,8 +186,9 @@
- name: run qemu hook
shell: /etc/libvirt/hooks/qemu start start
-# Play: Create credentials, set up some basic OpenStack
-- hosts: singapore-head
+# Play: Create credentials, set up some basic OpenStack
+- hosts: head
+ user: ubuntu
sudo: no
tasks:
@@ -171,6 +203,16 @@
template: src=templates/admin-openrc.sh.j2
dest={{ ansible_env['PWD'] }}/admin-openrc.sh
+ - name: (CloudLab) Make sure that /root/setup exists
+ file: path=/root/setup state=directory
+ sudo: yes
+ when: cloudlab
+
+ - name: (CloudLab) Copy credentials to /root/setup
+ shell: scp admin-openrc.sh /root/setup
+ sudo: yes
+ when: cloudlab
+
- name: Copy credentials to nova-cloud-controller
shell: "scp admin-openrc.sh ubuntu@nova-cloud-controller:"
@@ -187,27 +229,52 @@
shell: cat {{ ansible_env['PWD'] }}/.ssh/id_rsa.pub
register: sshkey
-- hosts: singapore-compute
+ - name: Copy CA certificate
+ shell: sudo juju scp nova-cloud-controller/0:/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt /usr/local/share/ca-certificates
+ creates=/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt
+
+ - name: Update CA ca-certificates
+ shell: update-ca-certificates
+ sudo: yes
+
+- hosts: compute
+ user: ubuntu
sudo: yes
- vars:
- control_net: 192.168.122.0/24
- gateway: "{{ hostvars['opencloud3.sing.internet2.edu']['ansible_eth0']['ipv4']['address'] }}"
+ vars:
+ control_net: "{{ hostvars['head']['ansible_virbr0']['ipv4']['network'] }}/24"
+ gateway: "{{ hostvars['head']['ansible_default_ipv4']['address'] }}"
tasks:
- name: Install package needed by Juju
apt: name=python-yaml state=present
+ - name: Add key
+ authorized_key: user="{{ ansible_env['SUDO_USER'] }}"
+ key="{{ hostvars['head']['sshkey']['stdout'] }}"
+
+ - name: Add route via /etc/rc.local
+ template: src=templates/etc/rc.local.cloudlab
+ dest=/etc/rc.local
+ mode=0755
+ when: cloudlab
+ notify:
+ - run /etc/rc.local
+
- name: Add route via /etc/rc.local
template: src=templates/etc/rc.local
dest=/etc/rc.local
mode=0755
+ when: not cloudlab
notify:
- run /etc/rc.local
- - name: Add key
- authorized_key: user="{{ ansible_env['SUDO_USER'] }}"
- key="{{ hostvars['opencloud3.sing.internet2.edu']['sshkey']['stdout'] }}"
+ - name: Touch ~/.ssh/config
+ file: path=/var/lib/nova state=directory
+
+ - name: (CloudLab) Set up extra disk space
+ shell: /usr/testbed/bin/mkextrafs /var/lib/nova
+ creates=/var/lib/nova/lost+found
+ when: cloudlab
handlers:
- name: run /etc/rc.local
shell: /etc/rc.local
-
diff --git a/tasks/vm-ips-cord.yml b/tasks/vm-ips-cord.yml
deleted file mode 100644
index 3fb6d5d..0000000
--- a/tasks/vm-ips-cord.yml
+++ /dev/null
@@ -1,40 +0,0 @@
----
-- shell: uvt-kvm ip juju
- register: juju_ip
-
-- shell: uvt-kvm ip mysql
- register: mysql_ip
-
-- shell: uvt-kvm ip rabbitmq-server
- register: rabbitmq_ip
-
-- shell: uvt-kvm ip keystone
- register: keystone_ip
-
-- shell: uvt-kvm ip glance
- register: glance_ip
-
-- shell: uvt-kvm ip nova-cloud-controller
- register: novacc_ip
-
-- shell: uvt-kvm ip openstack-dashboard
- register: horizon_ip
-
-- shell: uvt-kvm ip nagios
- register: nagios_ip
-
-- shell: uvt-kvm ip ceilometer
- register: ceilometer_ip
-
-- shell: uvt-kvm ip neutron-api
- register: neutron_api_ip
-
-- shell: uvt-kvm ip xos
- register: xos_ip
-
-- shell: uvt-kvm ip onos-cord
- register: onos_cord_ip
-
-- shell: uvt-kvm ip nova-compute
- register: nova_compute_ip
- when: test_setup is defined
diff --git a/templates/admin-openrc-cord.sh.j2 b/templates/admin-openrc-cord.sh.j2
index 913e86a..eb1c3df 100644
--- a/templates/admin-openrc-cord.sh.j2
+++ b/templates/admin-openrc-cord.sh.j2
@@ -1,5 +1,5 @@
export OS_USERNAME=admin
export OS_PASSWORD=ADMIN_PASS
export OS_TENANT_NAME=admin
-export OS_AUTH_URL=http://{{ keystone_ip.stdout }}:5000/v2.0/
+export OS_AUTH_URL=http://keystone:5000/v2.0/
export OS_REGION_NAME=RegionOne
diff --git a/templates/cord.yaml b/templates/cord.yaml
index 1efb0b0..0e1cb70 100644
--- a/templates/cord.yaml
+++ b/templates/cord.yaml
@@ -153,7 +153,7 @@
num_units: 1
options:
neutron-plugin: onosvtn
- onos-vtn-ip: {{ onos_cord_ip.stdout }}
+ onos-vtn-ip: onos-cord
neutron-security-groups: true
openstack-origin: cloud:trusty-kilo
overlay-network-type: vxlan
diff --git a/templates/environments.yaml.j2 b/templates/environments.yaml.j2
index 710afa1..4daeba1 100644
--- a/templates/environments.yaml.j2
+++ b/templates/environments.yaml.j2
@@ -2,6 +2,6 @@
environments:
manual:
type: manual
- bootstrap-host: {{ juju_ip.stdout }}
+ bootstrap-host: juju
bootstrap-user: ubuntu
default-series: trusty