Initial commit
diff --git a/files/ansible.cfg b/files/ansible.cfg
new file mode 100644
index 0000000..dd43d2b
--- /dev/null
+++ b/files/ansible.cfg
@@ -0,0 +1,2 @@
+[defaults]
+host_key_checking = false
diff --git a/files/etc/libvirt/hooks/daemon b/files/etc/libvirt/hooks/daemon
new file mode 100644
index 0000000..cff3fb7
--- /dev/null
+++ b/files/etc/libvirt/hooks/daemon
@@ -0,0 +1,40 @@
+#!/bin/sh
+
+SHELL="/bin/bash"
+
+NIC=$( route|grep default|awk '{print$8}' )
+
+NAME="${1}"
+OP="${2}"
+SUBOP="${3}"
+ARGS="${4}"
+
+add_port_fwd_rule() {
+ DPORT=$1
+ VM=$2
+ TOPORT=$3
+
+ VMIP=$( getent ahosts $VM|head -1|awk '{print $1}' )
+ iptables -t nat -C PREROUTING -p tcp -i $NIC --dport $DPORT -j DNAT --to-destination $VMIP:$TOPORT
+ if [ "$?" -ne 0 ]
+ then
+ iptables -t nat -A PREROUTING -p tcp -i $NIC --dport $DPORT -j DNAT --to-destination $VMIP:$TOPORT
+ fi
+}
+
+if [ "$OP" = "start" ] || [ "$OP" = "reload" ]
+then
+ iptables -t nat -F
+ add_port_fwd_rule 35357 keystone 35357
+ add_port_fwd_rule 4990 keystone 4990
+ add_port_fwd_rule 5000 keystone 5000
+ add_port_fwd_rule 8774 nova-cloud-controller 8774
+ add_port_fwd_rule 9696 nova-cloud-controller 9696
+ add_port_fwd_rule 9292 glance 9292
+ add_port_fwd_rule 8080 openstack-dashboard 80
+ add_port_fwd_rule 3128 nagios 80
+ add_port_fwd_rule 8777 ceilometer 8777
+
+ # Also flush the filter table before rules re-added
+ iptables -F
+fi
diff --git a/files/etc/libvirt/hooks/qemu b/files/etc/libvirt/hooks/qemu
new file mode 100644
index 0000000..903fced
--- /dev/null
+++ b/files/etc/libvirt/hooks/qemu
@@ -0,0 +1,40 @@
+#!/bin/sh
+
+SHELL="/bin/bash"
+
+NIC=$( route|grep default|awk '{print$8}' )
+PORTAL=$( dig +short portal.opencloud.us )
+
+NAME="${1}"
+OP="${2}"
+SUBOP="${3}"
+ARGS="${4}"
+
+add_rule() {
+ ARGS=$1
+ iptables -C FORWARD $ARGS
+ if [ "$?" -ne 0 ]
+ then
+ iptables -I FORWARD 1 $ARGS
+ fi
+}
+
+add_local_access_rules() {
+ SUBNET=$( ip addr show $NIC|grep "inet "|awk '{print $2}' )
+ add_rule "-s $SUBNET -j ACCEPT"
+}
+
+add_portal_access_rules() {
+ add_rule "-s $PORTAL -j ACCEPT"
+}
+
+add_web_access_rules() {
+ add_rule "-p tcp --dport 80 -j ACCEPT"
+}
+
+if [ "$OP" = "start" ]
+then
+ add_local_access_rules
+ add_portal_access_rules
+ add_web_access_rules
+fi
diff --git a/files/openstack.cfg b/files/openstack.cfg
new file mode 100644
index 0000000..9503b4d
--- /dev/null
+++ b/files/openstack.cfg
@@ -0,0 +1,17 @@
+glance:
+ openstack-origin: "cloud:trusty-icehouse"
+keystone:
+ admin-password: ""
+ openstack-origin: "cloud:trusty-icehouse"
+nova-cloud-controller:
+ network-manager: "Neutron"
+ openstack-origin: "cloud:trusty-icehouse"
+nova-compute:
+ config-flags: "firewall_driver=nova.virt.firewall.NoopFirewallDriver"
+ openstack-origin: "cloud:trusty-icehouse"
+ntp:
+ source: "ntp.cs.princeton.edu"
+openstack-dashboard:
+ openstack-origin: "cloud:trusty-icehouse"
+quantum-gateway:
+ openstack-origin: "cloud:trusty-icehouse"
diff --git a/scripts/create-vms.sh b/scripts/create-vms.sh
new file mode 100755
index 0000000..eebd038
--- /dev/null
+++ b/scripts/create-vms.sh
@@ -0,0 +1,24 @@
+#!/bin/bash
+
+function create-vm {
+ NAME=$1
+ CPU=$2
+ MEM_MB=$3
+ DISK_GB=$4
+ uvt-kvm list | grep $1
+ if [ "$?" -ne "0" ]
+ then
+ uvt-kvm create $NAME --cpu=$CPU --memory=$MEM_MB --disk=$DISK_GB
+ fi
+}
+
+create-vm juju 1 2048 20
+create-vm mysql 2 4096 40
+create-vm rabbitmq-server 2 4096 40
+create-vm keystone 2 4096 40
+create-vm glance 2 4096 160
+create-vm nova-cloud-controller 2 4096 40
+create-vm quantum-gateway 2 4096 40
+create-vm openstack-dashboard 1 2048 20
+create-vm ceilometer 1 2048 20
+create-vm nagios 1 2048 20
diff --git a/scripts/juju-compute-relations.py b/scripts/juju-compute-relations.py
new file mode 100755
index 0000000..49d0cdc
--- /dev/null
+++ b/scripts/juju-compute-relations.py
@@ -0,0 +1,56 @@
+#!/usr/bin/python
+
+import subprocess
+import time
+import argparse
+
+sleep_interval = 1
+
+relations = [
+ "nova-compute:shared-db mysql:shared-db",
+ "nova-compute:amqp rabbitmq-server:amqp",
+ "nova-compute glance",
+ "nova-compute nova-cloud-controller",
+ "ntp nova-compute",
+ "nova-compute nagios",
+ "nova-compute nrpe",
+ "nova-compute:nova-ceilometer ceilometer-agent:nova-ceilometer",
+ ]
+
+def addrelation(relation):
+ subprocess.check_call("juju add-relation %s" % relation, shell=True)
+
+def destroyrelation(relation):
+ subprocess.check_call("juju destroy-relation %s" % relation, shell=True)
+
+def addrelations():
+ for relation in relations:
+ print "Adding relation %s" % relation
+ try:
+ addrelation(relation)
+ time.sleep(sleep_interval)
+ except:
+ pass
+
+def destroyrelations():
+ for relation in relations:
+ print "Destroying relation %s" % relation
+ try:
+ destroyrelation(relation)
+ time.sleep(sleep_interval)
+ except:
+ pass
+
+def main():
+ parser = argparse.ArgumentParser(description='Deploy OpenStack controller services')
+ parser.add_argument('--destroy', action='store_true',
+ help='Destroy the relations instead of adding them')
+
+ args = parser.parse_args()
+ if args.destroy:
+ destroyrelations()
+ else:
+ addrelations()
+
+if __name__ =='__main__':
+ main()
diff --git a/scripts/juju-compute-setup.py b/scripts/juju-compute-setup.py
new file mode 100755
index 0000000..cc62b50
--- /dev/null
+++ b/scripts/juju-compute-setup.py
@@ -0,0 +1,65 @@
+#!/usr/bin/python
+
+import subprocess
+import json
+import time
+
+jujuconfig="/home/ubuntu/openstack.cfg"
+
+services = {
+ "nova-compute" : "--config=%s cs:~andybavier/trusty/nova-compute" % jujuconfig,
+}
+
+def get_free_machines(status):
+ for (service, svcinfo) in status['services'].iteritems():
+ if 'units' in svcinfo:
+ for (unit, unitinfo) in svcinfo['units'].iteritems():
+ if 'machine' in unitinfo:
+ machine = unitinfo['machine']
+ status['machines'][machine]['unit'] = unit
+
+ free = {}
+ for (machine, mchinfo) in status['machines'].iteritems():
+ if machine == "0":
+ continue
+
+ if 'unit' not in mchinfo:
+ # print "%s: %s" % (machine, mchinfo['dns-name'])
+ free[machine] = mchinfo
+
+ return free
+
+
+def deploy(status, service, cmd):
+ # Deploy nova-compute to all free machines
+ machines = get_free_machines(status)
+
+ for (machine, mchinfo) in machines.iteritems():
+ if service in status['services']:
+ print "Adding unit %s on %s" % (service, mchinfo['dns-name'])
+ subprocess.check_call("juju add-unit --to=%s %s" % (machine, service), shell=True)
+ else:
+ print "Deploying service %s on %s" % (service, mchinfo['dns-name'])
+ subprocess.check_call("juju deploy --to=%s %s" % (machine, cmd), shell=True)
+ status['services'][service] = "installed"
+ time.sleep(10)
+
+def get_juju_status():
+ output = subprocess.check_output("juju status --format=json", shell=True)
+ status = json.loads(output)
+ return status
+
+def addservices():
+ status = get_juju_status()
+
+ for service, cmd in services.iteritems():
+ try:
+ deploy(status, service, cmd)
+ except:
+ pass
+
+def main():
+ addservices()
+
+if __name__ =='__main__':
+ main()
diff --git a/scripts/juju-relations.py b/scripts/juju-relations.py
new file mode 100755
index 0000000..1be59ab
--- /dev/null
+++ b/scripts/juju-relations.py
@@ -0,0 +1,80 @@
+#!/usr/bin/python
+
+import subprocess
+import time
+import argparse
+
+sleep_interval = 1
+
+relations = ["keystone mysql",
+ "nova-cloud-controller mysql",
+ "nova-cloud-controller rabbitmq-server",
+ "nova-cloud-controller glance",
+ "nova-cloud-controller keystone",
+ "glance mysql",
+ "glance keystone",
+ "quantum-gateway mysql",
+ "quantum-gateway rabbitmq-server",
+ "quantum-gateway nova-cloud-controller",
+ "openstack-dashboard keystone",
+ "ntp nova-cloud-controller",
+ "mysql nagios",
+ "rabbitmq-server nagios",
+ "keystone nagios",
+ "glance nagios",
+ "nova-cloud-controller nagios",
+ "quantum-gateway nagios",
+ "openstack-dashboard nagios",
+ "nagios nrpe",
+ "mysql nrpe",
+ "rabbitmq-server nrpe",
+ "keystone nrpe",
+ "glance nrpe",
+ "nova-cloud-controller nrpe",
+ "quantum-gateway nrpe",
+ "openstack-dashboard nrpe",
+ "ceilometer mongodb",
+ "ceilometer rabbitmq-server",
+ "ceilometer:identity-service keystone:identity-service",
+ "ceilometer:ceilometer-service ceilometer-agent:ceilometer-service",
+ "ceilometer nagios",
+ "ceilometer nrpe",
+ ]
+
+def addrelation(relation):
+ subprocess.check_call("juju add-relation %s" % relation, shell=True)
+
+def destroyrelation(relation):
+ subprocess.check_call("juju destroy-relation %s" % relation, shell=True)
+
+def addrelations():
+ for relation in relations:
+ print "Adding relation %s" % relation
+ try:
+ addrelation(relation)
+ time.sleep(sleep_interval)
+ except:
+ pass
+
+def destroyrelations():
+ for relation in relations:
+ print "Destroying relation %s" % relation
+ try:
+ destroyrelation(relation)
+ time.sleep(sleep_interval)
+ except:
+ pass
+
+def main():
+ parser = argparse.ArgumentParser(description='Deploy OpenStack controller services')
+ parser.add_argument('--destroy', action='store_true',
+ help='Destroy the relations instead of adding them')
+
+ args = parser.parse_args()
+ if args.destroy:
+ destroyrelations()
+ else:
+ addrelations()
+
+if __name__ =='__main__':
+ main()
diff --git a/scripts/juju-setup.py b/scripts/juju-setup.py
new file mode 100755
index 0000000..f9bb0ac
--- /dev/null
+++ b/scripts/juju-setup.py
@@ -0,0 +1,77 @@
+#!/usr/bin/python
+
+import subprocess
+import json
+import socket
+
+jujuconfig="openstack.cfg"
+
+# Assumption: VMs have same hostname as service that runs inside
+machines = ["mysql", "rabbitmq-server", "keystone", "glance", "nova-cloud-controller",
+ "quantum-gateway", "openstack-dashboard", "ceilometer", "nagios"]
+
+services = {
+ "mysql" : "mysql",
+ "rabbitmq-server" : "rabbitmq-server",
+ "keystone" : "--config=%s keystone" % jujuconfig,
+ "glance" : "--config=%s glance" % jujuconfig,
+ "nova-cloud-controller" : "--config=%s cs:~andybavier/trusty/nova-cloud-controller" % jujuconfig,
+ "quantum-gateway" : "--config=%s cs:~andybavier/trusty/quantum-gateway" % jujuconfig,
+ "openstack-dashboard" : "--config=%s openstack-dashboard" % jujuconfig,
+ "nagios" : "nagios",
+ "mongodb" : "mongodb", # deploy to ceilometer machine
+ "ceilometer" : "ceilometer",
+ "nrpe" : "nrpe",
+ "ntp" : "ntp",
+ "ceilometer-agent" : "ceilometer-agent"
+}
+
+# Figure out Juju ID of machine we should install on
+def get_machine(status, service):
+ if service == "mongodb":
+ service = "ceilometer"
+ for key, value in status['machines'].iteritems():
+ (hostname, aliaslist, ipaddrlist) = socket.gethostbyaddr(value['dns-name'])
+ if hostname == service:
+ return key
+ return None
+
+def deploy(status, service, cmd):
+ if service in status['services']:
+ return
+
+ print "Installing %s" % service
+ machine = get_machine(status, service)
+ if machine:
+ subprocess.check_call("juju deploy --to=%s %s" % (machine, cmd), shell=True)
+ else:
+ subprocess.check_call("juju deploy %s" % cmd, shell=True)
+
+def get_juju_status():
+ output = subprocess.check_output("juju status --format=json", shell=True)
+ status = json.loads(output)
+ return status
+
+def addservices():
+ status = get_juju_status()
+
+ for service, cmd in services.iteritems():
+ try:
+ deploy(status, service, cmd)
+ except:
+ pass
+
+def addmachines():
+ status = get_juju_status()
+
+ for machine in machines:
+ if get_machine(status, machine) == None:
+ ipaddr = socket.gethostbyname(machine)
+ subprocess.check_call("juju add-machine ssh:%s" % ipaddr, shell=True)
+
+def main():
+ addmachines()
+ addservices()
+
+if __name__ =='__main__':
+ main()
diff --git a/scripts/network-setup.sh b/scripts/network-setup.sh
new file mode 100755
index 0000000..efa35fc
--- /dev/null
+++ b/scripts/network-setup.sh
@@ -0,0 +1,26 @@
+#!/bin/sh
+
+source ~/admin_openrc.sh
+
+# Create nat-net network
+neutron net-show nat-net 2&>1 > /dev/null
+if [ "$?" -ne 0 ]
+then
+ neutron net-create --provider:physical_network=nat --provider:network_type=flat --shared nat-net
+fi
+
+# Create nat-net subnet
+neutron subnet-show nat-net 2&>1 > /dev/null
+if [ "$?" -ne 0 ]
+then
+ neutron subnet-create nat-net --name nat-net 172.16.0.0/16 --gateway=172.16.0.1 --enable-dhcp=false
+fi
+
+# Create nat-net network
+neutron net-show ext-net 2&>1 > /dev/null
+if [ "$?" -ne 0 ]
+then
+ neutron net-create --provider:physical_network=ext --provider:network_type=flat --shared ext-net
+fi
+
+
diff --git a/singapore-compute.yml b/singapore-compute.yml
new file mode 100644
index 0000000..65d170b
--- /dev/null
+++ b/singapore-compute.yml
@@ -0,0 +1,59 @@
+---
+- hosts: singapore-head
+ sudo: no
+ tasks:
+ - pause: "Install nova-compute on all unused machines managed by Juju"
+
+ - name: Deploy nova-compute
+ script: scripts/juju-compute-setup.py
+
+ - pause: "Wait until services have started"
+
+ - name: Add nova-compute relations
+ script: scripts/juju-compute-relations.py
+
+ - pause: "Wait until relations are added"
+
+ - name: Copy network setup script
+ copy: src=scripts/network-setup.sh
+ dest=/home/ubuntu/network-setup.sh
+
+ - name: Run network setup script
+ shell: ansible nova-cloud-controller -m script -a "/home/ubuntu/network-setup.sh"
+
+# Play: set up ansible-pull for OpenCloud-specific files on nova-compute nodes
+- hosts: singapore-compute
+ sudo: yes
+ vars:
+
+ # schedule is fed directly to cron
+ schedule: '*/15 * * * *'
+
+ # User to run ansible-pull as from cron
+ cron_user: root
+
+ # File that ansible will use for logs
+ logfile: /var/log/ansible-pull.log
+
+ # Directory to where repository will be cloned
+ workdir: /var/lib/ansible/local
+
+ # Repository to check out
+ # repo must contain a local.yml file at top level
+ #repo_url: git://github.com/sfromm/ansible-playbooks.git
+ repo_url: git://github.com/andybavier/opencloud-nova-compute-ansible.git
+
+ tasks:
+
+ - name: Install ansible
+ apt: name=ansible state=installed
+
+ - name: Create local directory to work from
+ file: path={{workdir}} state=directory owner=root group=root mode=0751
+
+ - name: Create crontab entry to clone/pull git repository
+ template: src=templates/etc/cron.d/ansible-pull.j2 dest=/etc/cron.d/ansible-pull owner=root group=root mode=0644
+
+ - name: Create logrotate entry for ansible-pull.log
+ template: src=templates/etc/logrotate.d/ansible-pull.j2 dest=/etc/logrotate.d/ansible-pull owner=root group=root mode=0644
+
diff --git a/singapore-setup.yml b/singapore-setup.yml
new file mode 100644
index 0000000..3d2067d
--- /dev/null
+++ b/singapore-setup.yml
@@ -0,0 +1,211 @@
+---
+# Play: set up head node
+# Assumes basic /etc/ansible/hosts file
+- hosts: singapore-head
+ sudo: yes
+ tasks:
+
+ - apt: name=python-pycurl
+
+ - name: Add Juju repository
+ apt_repository: repo="ppa:juju/stable"
+
+ - name: Add Ansible repository
+ apt_repository: repo="ppa:ansible/ansible"
+
+ - name: Install older version of Juju due to bug in 1.22
+ apt: name=juju-core=1.20.11-0ubuntu0.14.04.1 update_cache=yes
+
+ - name: Install packages
+ apt: name={{ item.name }} state=latest
+ with_items:
+ - name: ansible
+ - name: uvtool
+
+ - name: Get juju-ansible git repo
+ git: repo=https://github.com/cmars/juju-ansible.git
+ dest=/home/ubuntu/juju-ansible
+
+ - name: Set up juju-ansible symlink
+ file: dest=/usr/local/bin/juju-ansible
+ src=/home/ubuntu/juju-ansible
+ state=link
+
+ - name: Set up juju-ansible-playbook symlink
+ file: dest=/usr/local/bin/juju-ansible-playbook
+ src=/home/ubuntu/juju-ansible
+ state=link
+
+ - name: Generate key to use in VMs
+ user: name=ubuntu generate_ssh_key=yes
+
+ - name: Get new key
+ sudo: no
+ shell: cat /home/ubuntu/.ssh/id_rsa.pub
+ register: sshkey
+
+ - name: Add to authorized_keys
+ authorized_key: user=ubuntu
+ key="{{ sshkey.stdout }}"
+
+ - name: Get trusty image for uvtool
+ shell: uvt-simplestreams-libvirt sync release=trusty arch=amd64
+
+ - name: Create VMs to host OpenCloud services
+ sudo: no
+ script: scripts/create-vms.sh
+
+ - pause: prompt="Hit return when all VMs have IP addresses"
+
+ - include: tasks/vm-ips.yml
+
+ - name: Add VMs to /etc/hosts
+ template: src=templates/etc/hosts.j2
+ dest=/etc/hosts
+
+ - name: Set up /etc/ansible/hosts
+ template: src=templates/etc/ansible/hosts.j2
+ dest=/etc/ansible/hosts
+
+ - name: Copy ansible.cfg to disable host key checking
+ sudo: no
+ copy: src=files/ansible.cfg
+ dest=/home/ubuntu/.ansible.cfg
+
+ - name: Touch ~/.ssh/config
+ sudo: no
+ file: path=/home/ubuntu/.ssh/config state=touch
+
+ - name: Disable host key checking in SSH
+ sudo: no
+ lineinfile: dest=/home/ubuntu/.ssh/config
+ line="StrictHostKeyChecking no"
+
+ - name: Test that we can log into every VM
+ sudo: no
+ shell: ansible services -m ping
+
+# Play: Install services using Juju
+- hosts: singapore-head
+ tasks:
+ - name: Initialize Juju
+ sudo: no
+ shell: juju generate-config
+ creates=/home/ubuntu/.juju/environments.yaml
+
+ - shell: uvt-kvm ip juju
+ register: juju_ip
+
+ - name: Juju config file
+ sudo: no
+ template: src=templates/environments.yaml.j2
+ dest=/home/ubuntu/.juju/environments.yaml
+
+ - name: Bootstrap Juju
+ sudo: no
+ shell: juju bootstrap
+ creates=/home/ubuntu/.juju/environments/manual.jenv
+
+ # - pause: Break here and try rebooting Juju VM
+
+ - name: Copy openstack.cfg for Juju
+ sudo: no
+ copy: src=files/openstack.cfg
+ dest=/home/ubuntu/openstack.cfg
+
+ - name: Deploy OpenStack services with Juju
+ script: scripts/juju-setup.py
+
+ - pause: prompt="Hit return when all services have started successfully"
+
+ - name: Set MTU for GRE tunneling
+ shell: "juju set quantum-gateway instance-mtu=1400"
+
+ - name: Use HTTPS for keystone authentication
+ shell: 'juju set keystone use-https=yes'
+
+ - name: Use HTTPS for all service endpoints
+ shell: 'juju set keystone https-service-endpoints=True'
+
+ - name: Use SSL for rabbitmq
+ shell: 'juju set rabbitmq-server ssl=on'
+
+ - name: Add all Juju relations between services
+ script: scripts/juju-relations.py
+
+ - pause: prompt="Wait for relations to be fully added"
+
+# Play: Use libvirt hooks to set up iptables
+- hosts: singapore-head
+ sudo: yes
+ tasks:
+ - name: Enable port forwarding for services
+ copy: src=files/{{ item }}
+ dest={{ item }}
+ mode=0755
+ notify:
+ - reload libvirt config
+ - run qemu hook
+ with_items:
+ - /etc/libvirt/hooks/daemon
+ - /etc/libvirt/hooks/qemu
+
+ handlers:
+ - name: reload libvirt config
+ shell: killall -HUP libvirtd
+
+ - name: run qemu hook
+ shell: /etc/libvirt/hooks/qemu start start
+
+# Play: Create credentials, set up some basic OpenStack
+- hosts: singapore-head
+ sudo: no
+ tasks:
+
+ - name: Get keystone admin password
+ shell: juju run --unit=keystone/0 "sudo cat /var/lib/keystone/keystone.passwd"
+ register: keystone_password
+
+ - shell: uvt-kvm ip keystone
+ register: keystone_ip
+
+ - name: Create credentials
+ template: src=templates/admin-openrc.sh.j2
+ dest=/home/ubuntu/admin-openrc.sh
+
+ - name: Copy credentials to nova-cloud-controller
+ shell: "scp admin-openrc.sh nova-cloud-controller:"
+
+ - name: Get public key
+ shell: cat /home/ubuntu/.ssh/id_rsa.pub
+ register: sshkey
+
+- hosts: singapore-compute
+ sudo: yes
+ vars:
+ control_net: 192.168.122.0/24
+ gateway: 198.71.44.85
+ tasks:
+ - name: Add route via /etc/rc.local
+ template: src=templates/etc/rc.local
+ dest=/etc/rc.local
+ mode=0755
+ notify:
+ - run /etc/rc.local
+
+ - name: Add key
+ authorized_key: user=ubuntu
+ key="{{ hostvars['opencloud3.sing.internet2.edu']['sshkey']['stdout'] }}"
+
+ handlers:
+ - name: run /etc/rc.local
+ shell: /etc/rc.local
+
+# Whaat's left
+# - Adding compute nodes
+# - Add machine
+# - Deploy charm
+# - Remove virbr0
+#
+# - Creating br-ex and br-nat networks
+# - But this should perhaps be done by OpenCloud?
\ No newline at end of file
diff --git a/tasks/vm-ips.yml b/tasks/vm-ips.yml
new file mode 100644
index 0000000..519535f
--- /dev/null
+++ b/tasks/vm-ips.yml
@@ -0,0 +1,30 @@
+---
+- shell: uvt-kvm ip juju
+ register: juju_ip
+
+- shell: uvt-kvm ip mysql
+ register: mysql_ip
+
+- shell: uvt-kvm ip rabbitmq-server
+ register: rabbitmq_ip
+
+- shell: uvt-kvm ip keystone
+ register: keystone_ip
+
+- shell: uvt-kvm ip glance
+ register: glance_ip
+
+- shell: uvt-kvm ip nova-cloud-controller
+ register: novacc_ip
+
+- shell: uvt-kvm ip quantum-gateway
+ register: quantum_ip
+
+- shell: uvt-kvm ip openstack-dashboard
+ register: horizon_ip
+
+- shell: uvt-kvm ip nagios
+ register: nagios_ip
+
+- shell: uvt-kvm ip ceilometer
+ register: ceilometer_ip
\ No newline at end of file
diff --git a/templates/admin-openrc.sh.j2 b/templates/admin-openrc.sh.j2
new file mode 100644
index 0000000..260f035
--- /dev/null
+++ b/templates/admin-openrc.sh.j2
@@ -0,0 +1,5 @@
+export OS_USERNAME=admin
+export OS_PASSWORD={{ keystone_password.stdout }}
+export OS_TENANT_NAME=admin
+export OS_AUTH_URL=https://{{ keystone_ip.stdout }}:5000/v2.0
+export OS_REGION_NAME=RegionOne
diff --git a/templates/environments.yaml.j2 b/templates/environments.yaml.j2
new file mode 100644
index 0000000..710afa1
--- /dev/null
+++ b/templates/environments.yaml.j2
@@ -0,0 +1,7 @@
+default: manual
+environments:
+ manual:
+ type: manual
+ bootstrap-host: {{ juju_ip.stdout }}
+ bootstrap-user: ubuntu
+ default-series: trusty
diff --git a/templates/etc/ansible/hosts.j2 b/templates/etc/ansible/hosts.j2
new file mode 100644
index 0000000..742a35a
--- /dev/null
+++ b/templates/etc/ansible/hosts.j2
@@ -0,0 +1,14 @@
+[localhost]
+127.0.0.1 hostname={{ ansible_fqdn }}
+
+[services]
+juju
+mysql
+rabbitmq-server
+keystone
+glance
+nova-cloud-controller
+quantum-gateway
+openstack-dashboard
+ceilometer
+nagios
diff --git a/templates/etc/cron.d/ansible-pull.j2 b/templates/etc/cron.d/ansible-pull.j2
new file mode 100644
index 0000000..12f3f6d
--- /dev/null
+++ b/templates/etc/cron.d/ansible-pull.j2
@@ -0,0 +1,2 @@
+# Cron job to git clone/pull a repo and then run locally
+{{ schedule }} {{ cron_user }} ansible-pull -o -d {{ workdir }} -U {{ repo_url }} >>{{ logfile }} 2>&1
diff --git a/templates/etc/hosts.j2 b/templates/etc/hosts.j2
new file mode 100644
index 0000000..f153b6e
--- /dev/null
+++ b/templates/etc/hosts.j2
@@ -0,0 +1,17 @@
+127.0.0.1 localhost
+127.0.1.1 ubuntu
+{{ juju_ip.stdout }} juju
+{{ mysql_ip.stdout }} mysql
+{{ rabbitmq_ip.stdout }} rabbitmq-server
+{{ keystone_ip.stdout }} keystone
+{{ glance_ip.stdout }} glance
+{{ novacc_ip.stdout }} nova-cloud-controller
+{{ quantum_ip.stdout }} quantum-gateway
+{{ horizon_ip.stdout }} openstack-dashboard
+{{ ceilometer_ip.stdout }} ceilometer
+{{ nagios_ip.stdout }} nagios
+
+# The following lines are desirable for IPv6 capable hosts
+::1 localhost ip6-localhost ip6-loopback
+ff02::1 ip6-allnodes
+ff02::2 ip6-allrouters
diff --git a/templates/etc/logrotate.d/ansible-pull.j2 b/templates/etc/logrotate.d/ansible-pull.j2
new file mode 100644
index 0000000..e396f31
--- /dev/null
+++ b/templates/etc/logrotate.d/ansible-pull.j2
@@ -0,0 +1,7 @@
+{{ logfile }} {
+ rotate 7
+ daily
+ compress
+ missingok
+ notifempty
+}
diff --git a/templates/etc/rc.local b/templates/etc/rc.local
new file mode 100644
index 0000000..7eb7ab1
--- /dev/null
+++ b/templates/etc/rc.local
@@ -0,0 +1,14 @@
+#!/bin/sh -e
+#
+# rc.local
+#
+# This script is executed at the end of each multiuser runlevel.
+# Make sure that the script will "exit 0" on success or any other
+# value on error.
+#
+# In order to enable or disable this script just change the execution
+# bits.
+
+route add -net {{ control_net }} gw {{ gateway }} || true
+
+exit 0
\ No newline at end of file