CORD-912
OpenCloud support
Change-Id: I170597cacd76c84c795a7bf4c8e77e068ebcd72a
diff --git a/build-core-image-playbook.yml b/build-core-image-playbook.yml
new file mode 100644
index 0000000..cb5f7a0
--- /dev/null
+++ b/build-core-image-playbook.yml
@@ -0,0 +1,17 @@
+---
+# build-core-image-playbook.yml
+
+- name: Include vars
+ hosts: build
+ tasks:
+ - name: Include variables
+ include_vars: "{{ item }}"
+ with_items:
+ - "profile_manifests/{{ cord_profile }}.yml"
+ - profile_manifests/local_vars.yml
+
+- name: Build the XOS core image
+ hosts: build
+ roles:
+ - xos-core-build
+
diff --git a/build-platform-install-playbook.yml b/build-platform-install-playbook.yml
index 358e300..b459cfa 100644
--- a/build-platform-install-playbook.yml
+++ b/build-platform-install-playbook.yml
@@ -1,17 +1,5 @@
---
# build-platform-install-playbook.yml
-- name: Include vars
- hosts: build
- tasks:
- - name: Include variables
- include_vars: "{{ item }}"
- with_items:
- - "profile_manifests/{{ cord_profile }}.yml"
- - profile_manifests/local_vars.yml
-
-- name: Build the core image
- hosts: build
- roles:
- - xos-core-build
+- include: build-core-image-playbook.yml
diff --git a/copy-profile-playbook.yml b/copy-profile-playbook.yml
index 154ba50..70d8dfb 100644
--- a/copy-profile-playbook.yml
+++ b/copy-profile-playbook.yml
@@ -21,7 +21,7 @@
roles:
- { role: copy-profile, become: yes }
- { role: ssh-install, become: yes }
- - { role: glance-images, become: yes }
+ - { role: glance-images, become: yes, when: on_maas }
- { role: copy-credentials, become: yes, when: on_maas }
- name: Install ssh keys when using MaaS
diff --git a/cord-compute-maas-playbook.yml b/cord-compute-maas-playbook.yml
index 043d316..4da725f 100644
--- a/cord-compute-maas-playbook.yml
+++ b/cord-compute-maas-playbook.yml
@@ -11,34 +11,11 @@
- "profile_manifests/{{ cord_profile }}.yml"
- profile_manifests/local_vars.yml
-- name: Configure compute hosts to use DNS server
- hosts: all
- become: yes
- roles:
- - { role: dns-configure, when: not on_maas }
+- include: prep-computenode-playbook.yml
+- include: deploy-computenode-playbook.yml
-- name: Prep systems
- hosts: compute
- become: yes
- roles:
- - common-prep
- - { role: cloudlab-prep, when: on_cloudlab }
-
-- name: Configure head node (for sshkey)
+- name: Enable Compute Node (MaaS)
hosts: head
roles:
- - { role: head-prep, become: yes }
-
-- name: Configure compute nodes
- hosts: compute
- become: yes
- roles:
- - compute-prep
-
-- name: Deploy compute nodes, create configuration
- hosts: head
- roles:
- - juju-compute-setup
- - compute-node-config
- compute-node-enable-maas
diff --git a/cord-compute-playbook.yml b/cord-compute-playbook.yml
index 2ccb405..75de950 100644
--- a/cord-compute-playbook.yml
+++ b/cord-compute-playbook.yml
@@ -2,50 +2,7 @@
# cord-compute-playbook.yml
# Installs and configures compute nodes
-- name: Include vars
- hosts: all
- tasks:
- - name: Include variables
- include_vars: "{{ item }}"
- with_items:
- - "profile_manifests/{{ cord_profile }}.yml"
- - profile_manifests/local_vars.yml
-
-- name: Configure compute hosts to use DNS server
- hosts: all
- become: yes
- roles:
- - { role: dns-configure, when: not on_maas }
-
-- name: Prep systems
- hosts: compute
- become: yes
- roles:
- - common-prep
- - { role: cloudlab-prep, when: on_cloudlab }
-
-- name: Configure head node (for sshkey)
- hosts: head
- roles:
- - { role: head-prep, become: yes }
-
-- name: Configure compute nodes
- hosts: compute
- become: yes
- roles:
- - compute-prep
-
-- name: Deploy compute nodes, create configuration
- hosts: head
- roles:
- - juju-compute-setup
- - compute-node-config
-
-- include: add-onboard-containers-playbook.yml
-
-- name: Enable compute nodes in XOS
- hosts: xos_ui
- connection: docker
- roles:
- - compute-node-enable
+- include: prep-computenode-playbook.yml
+- include: deploy-computenode-playbook.yml
+- include: onboard-openstack-playbook.yml
diff --git a/deploy-computenode-playbook.yml b/deploy-computenode-playbook.yml
new file mode 100644
index 0000000..db75306
--- /dev/null
+++ b/deploy-computenode-playbook.yml
@@ -0,0 +1,19 @@
+---
+# deploy-computenode-playbook.yml
+# deploys compute nodes with Juju, creates config
+
+- name: Include vars
+ hosts: all
+ tasks:
+ - name: Include variables
+ include_vars: "{{ item }}"
+ with_items:
+ - "profile_manifests/{{ cord_profile }}.yml"
+ - profile_manifests/local_vars.yml
+
+- name: Deploy compute nodes, create configuration
+ hosts: head
+ roles:
+ - juju-compute-setup
+ - compute-node-config
+
diff --git a/deploy-openstack-playbook.yml b/deploy-openstack-playbook.yml
index fa44a2d..68d28cb 100644
--- a/deploy-openstack-playbook.yml
+++ b/deploy-openstack-playbook.yml
@@ -3,7 +3,7 @@
# Deploys OpenStack in LXD containers on the CORD head node
- name: Include vars
- hosts: all, localhost
+ hosts: all
tasks:
- name: Include variables
include_vars: "{{ item }}"
@@ -11,21 +11,20 @@
- "profile_manifests/{{ cord_profile }}.yml"
- profile_manifests/local_vars.yml
-- name: Configure head node, create containers
+- name: Create LXD containers
hosts: head
+ become: yes
roles:
- - { role: head-prep, become: yes }
- create-lxd
-- name: Start OpenStack install
+- name: Finish container configuration
+ hosts: head
+ roles:
+ - lxd-finish
+
+- name: Juju OpenStack install
hosts: head
roles:
- juju-setup
-
-# Put plays here that will execute in parallel to
-# the OpenStack installation
-
-- name: Finish OpenStack install
- hosts: head
- roles:
- juju-finish
+
diff --git a/glance-images-playbook.yml b/glance-images-playbook.yml
new file mode 100644
index 0000000..1880c1f
--- /dev/null
+++ b/glance-images-playbook.yml
@@ -0,0 +1,17 @@
+---
+# glance-images-playbook.yml
+# Downloads glance images required by profile
+
+- name: Include vars
+ hosts: head
+ tasks:
+ - name: Include variables
+ include_vars: "{{ item }}"
+ with_items:
+ - "profile_manifests/{{ cord_profile }}.yml"
+ - profile_manifests/local_vars.yml
+
+- name: Download images for Glance
+ hosts: head
+ roles:
+ - glance-images
diff --git a/inventory/opencloud b/inventory/opencloud
new file mode 100644
index 0000000..96d3bcf
--- /dev/null
+++ b/inventory/opencloud
@@ -0,0 +1,19 @@
+; opencloud-in-a-box inventory
+
+[all:vars]
+cord_profile=opencloud
+virtual_install=True
+
+[config]
+localhost ansible_connection=local
+
+[head]
+head1
+
+[compute]
+compute1
+compute2
+
+[build]
+localhost connection=local
+
diff --git a/launch-xos-playbook.yml b/launch-xos-playbook.yml
index 8b1a0fd..6d6d281 100644
--- a/launch-xos-playbook.yml
+++ b/launch-xos-playbook.yml
@@ -1,28 +1,9 @@
---
# launch-xos-playbook.yml
-- name: Include vars
- hosts: all
- tasks:
- - name: Include variables
- include_vars: "{{ item }}"
- with_items:
- - "profile_manifests/{{ cord_profile }}.yml"
- - profile_manifests/local_vars.yml
-
-# for docker, docker-compose
- include: devel-tools-playbook.yml
-- name: Bring up XOS containers
- hosts: head
- roles:
- - xos-up
+- include: start-xos-playbook.yml
-- include: add-onboard-containers-playbook.yml
+- include: onboard-profile-playbook.yml
-- name: Check to see if XOS UI is ready, apply profile config
- hosts: xos_ui
- connection: docker
- roles:
- - xos-ready
- - xos-config
diff --git a/onboard-openstack-playbook.yml b/onboard-openstack-playbook.yml
new file mode 100644
index 0000000..dbc5469
--- /dev/null
+++ b/onboard-openstack-playbook.yml
@@ -0,0 +1,13 @@
+---
+# onboard-openstack-playbook.yml
+# Onboard Openstack into XOS
+
+- include: add-onboard-containers-playbook.yml
+
+- name: Check to see if XOS UI is ready, apply profile config
+ hosts: xos_ui
+ connection: docker
+ roles:
+ - xos-ready
+ - compute-node-enable
+
diff --git a/onboard-profile-playbook.yml b/onboard-profile-playbook.yml
new file mode 100644
index 0000000..9c7e28f
--- /dev/null
+++ b/onboard-profile-playbook.yml
@@ -0,0 +1,13 @@
+---
+# onboard-profile-playbook.yml
+# Configure XOS with profile-specific config
+
+- include: add-onboard-containers-playbook.yml
+
+- name: Check to see if XOS UI is ready, apply profile config
+ hosts: xos_ui
+ connection: docker
+ roles:
+ - xos-ready
+ - xos-config
+
diff --git a/prep-buildnode-playbook.yml b/prep-buildnode-playbook.yml
new file mode 100644
index 0000000..62d27ad
--- /dev/null
+++ b/prep-buildnode-playbook.yml
@@ -0,0 +1,18 @@
+---
+# prep-buildnode-playbook.yml
+# Prep build node to create artifacts
+
+- name: Include vars
+ hosts: all
+ tasks:
+ - name: Include variables
+ include_vars: "{{ item }}"
+ with_items:
+ - "profile_manifests/{{ cord_profile }}.yml"
+ - profile_manifests/local_vars.yml
+
+- name: Install docker on buildnode, configure
+ hosts: build
+ roles:
+ - docker-install
+
diff --git a/prep-computenode-playbook.yml b/prep-computenode-playbook.yml
new file mode 100644
index 0000000..93ce811
--- /dev/null
+++ b/prep-computenode-playbook.yml
@@ -0,0 +1,38 @@
+---
+# prep-computenode-playbook.yml
+# Prepares a compute node on a CORD pod
+
+- name: Include vars
+ hosts: compute
+ tasks:
+ - name: Include variables
+ include_vars: "{{ item }}"
+ with_items:
+ - "profile_manifests/{{ cord_profile }}.yml"
+ - profile_manifests/local_vars.yml
+
+- name: Common prep, install SSL certificates
+ hosts: compute
+ become: yes
+ roles:
+ - common-prep
+ - { role: pki-install, when: not on_maas }
+
+- name: Configure management network
+ hosts: compute
+ become: yes
+ roles:
+ - { role: interface-config, when: not on_maas }
+
+- name: Use headnode DNS server
+ hosts: compute
+ become: yes
+ roles:
+ - { role: dns-configure, when: not on_maas }
+
+- name: Prep compute node
+ hosts: compute
+ become: yes
+ roles:
+ - compute-prep
+
diff --git a/prep-headnode-playbook.yml b/prep-headnode-playbook.yml
new file mode 100644
index 0000000..759a3c3
--- /dev/null
+++ b/prep-headnode-playbook.yml
@@ -0,0 +1,66 @@
+---
+# prep-headnode-playbook.yml
+# Preps the head node of a CORD pod for thr est of the install
+
+- name: Include vars
+ hosts: head, config
+ tasks:
+ - name: Include variables
+ include_vars: "{{ item }}"
+ with_items:
+ - "profile_manifests/{{ cord_profile }}.yml"
+ - profile_manifests/local_vars.yml
+
+- name: Prep head node
+ hosts: head
+ roles:
+ - { role: head-prep, become: yes }
+
+- name: Common prep, install certificates
+ become: yes
+ hosts: head
+ roles:
+ - common-prep
+ - pki-install
+
+- name: Install docker and configure it
+ hosts: head
+ roles:
+ - docker-install
+
+- name: Configure management network
+ hosts: head
+ become: yes
+ roles:
+ - { role: interface-config, when: not on_maas }
+
+- name: Networking and DNS server on head node
+ hosts: head
+ become: yes
+ roles:
+ - { role: head-mgmtbr, when: not on_maas }
+ - { role: dns-unbound, when: not on_maas }
+ - { role: dns-nsd, when: not on_maas }
+
+# needs to be in another set of plays so that nsd/unbound restart handlers run
+- name: Configure DNS resolution, DHCP on head node
+ hosts: head
+ become: yes
+ roles:
+ - { role: dns-configure, when: not on_maas }
+ - { role: dhcpd, when: not on_maas }
+
+- name: Install apt-cacher-ng
+ hosts: head
+ become: yes
+ roles:
+ - apt-cacher-ng
+
+- name: Install elasticstack, and apache proxy
+ hosts: head
+ become: yes
+ roles:
+ - java-oracle
+ - elasticstack
+ - apache-proxy
+
diff --git a/prep-platform-playbook.yml b/prep-platform-playbook.yml
index d6367f6..09a3741 100644
--- a/prep-platform-playbook.yml
+++ b/prep-platform-playbook.yml
@@ -1,36 +1,9 @@
---
-# cord-prep-platform.yml
+# prep-platform-playbook.yml
# Prepares the CORD head node for installing OpenStack, ONOS, and XOS
-- name: Include vars
- hosts: all
- tasks:
- - name: Include variables
- include_vars: "{{ item }}"
- with_items:
- - "profile_manifests/{{ cord_profile }}.yml"
- - profile_manifests/local_vars.yml
+# Transitional note - this should only be called by gradle, not make. The
+# included playbooks should be called individually by make.
-- name: DNS Server and apt-cacher-ng Setup
- hosts: head
- become: yes
- roles:
- - { role: dns-nsd, when: not on_maas }
- - { role: dns-unbound, when: not on_maas }
- - apt-cacher-ng
-
-- name: Configure all hosts to use DNS server
- hosts: head, compute
- become: yes
- roles:
- - { role: dns-configure, when: not on_maas }
-
-- name: Prep systems
- hosts: head, compute
- become: yes
- roles:
- - common-prep
- - pki-install
- - { role: cloudlab-prep, when: on_cloudlab }
-
-- include: deploy-elasticstack-playbook.yml
+- include: prep-headnode-playbook.yml
+- include: prep-computenode-playbook.yml
diff --git a/prereqs-check-playbook.yml b/prereqs-check-playbook.yml
new file mode 100644
index 0000000..11377bd
--- /dev/null
+++ b/prereqs-check-playbook.yml
@@ -0,0 +1,10 @@
+---
+# prereqs-check-playbook.yml
+# Checks that virtual machine host is suitable
+
+- name: Check Prereqs
+ hosts: localhost
+ connection: local
+ roles:
+ - prereqs-common
+
diff --git a/profile_manifests/opencloud.yml b/profile_manifests/opencloud.yml
index ab9eea9..dd549ee 100644
--- a/profile_manifests/opencloud.yml
+++ b/profile_manifests/opencloud.yml
@@ -65,11 +65,6 @@
altnames:
- "DNS:xos-core.{{ site_suffix }}"
-# docker registry users
-docker_registry_users:
- - name: "{{ xos_admin_user }}"
- password: "{{ xos_admin_pass }}"
-
# Network/DNS settings
site_suffix: generic.infra.opencloud.us
diff --git a/roles/common-prep/defaults/main.yml b/roles/common-prep/defaults/main.yml
index 0bcd73c..7bfbb56 100644
--- a/roles/common-prep/defaults/main.yml
+++ b/roles/common-prep/defaults/main.yml
@@ -1,3 +1,4 @@
---
+# file: common-prep/defaults/main.yml
-run_dist_upgrade: true
+run_dist_upgrade: False
diff --git a/roles/common-prep/handlers/main.yml b/roles/common-prep/handlers/main.yml
index 537ccb3..9161ccd 100644
--- a/roles/common-prep/handlers/main.yml
+++ b/roles/common-prep/handlers/main.yml
@@ -1,9 +1,10 @@
---
-# file: roles/common-prep/handlers/main.yml
+# file: common-prep/handlers/main.yml
# from https://support.ansible.com/hc/en-us/articles/201958037-Reboot-a-server-and-wait-for-it-to-come-back
- name: restart host
- shell: sleep 2 && shutdown -r now "Ansible updates triggered"
+ shell: >
+ sleep 2 && shutdown -r now "Ansible updates triggered"
async: 1
poll: 0
ignore_errors: true
@@ -12,8 +13,10 @@
- name: wait for host
become: false
local_action:
- wait_for host={{ inventory_hostname }}
+ wait_for
+ host={{ inventory_hostname }}
port=22
- delay=60 timeout=600
- state=started
+ search_regex=OpenSSH
+ delay=60
+ timeout=600
diff --git a/roles/common-prep/tasks/main.yml b/roles/common-prep/tasks/main.yml
index 39bced7..51b0997 100644
--- a/roles/common-prep/tasks/main.yml
+++ b/roles/common-prep/tasks/main.yml
@@ -1,5 +1,5 @@
---
-# file: roles/common-prep/tasks/main.yml
+# file: common-prep/tasks/main.yml
- name: Upgrade system to current using apt
when: run_dist_upgrade
@@ -20,10 +20,9 @@
- name: Install standard packages
apt:
- pkg={{ item }}
- state=present
- update_cache=yes
- cache_valid_time=3600
+ pkg: "{{ item }}"
+ update_cache: yes
+ cache_valid_time: 3600
with_items:
- tmux
- vim
@@ -39,12 +38,13 @@
- jove
- name: Enable vim syntax highlighting
- lineinfile: dest=/etc/vim/vimrc
- regexp="^\s*syntax on"
- line="syntax on"
+ lineinfile:
+ dest: '/etc/vim/vimrc'
+ regexp: '^\s*syntax on'
+ line: 'syntax on'
- name: Configure tmux
copy:
- src=tmux.conf
- dest="{{ ansible_user_dir }}/.tmux.conf"
+ src: tmux.conf
+ dest: "{{ ansible_user_dir }}/.tmux.conf"
diff --git a/roles/compute-node-config/defaults/main.yml b/roles/compute-node-config/defaults/main.yml
index dffca62..cfbaa76 100644
--- a/roles/compute-node-config/defaults/main.yml
+++ b/roles/compute-node-config/defaults/main.yml
@@ -1,12 +1,16 @@
---
# compute-node-config/defaults/main.yml
+# default site/deployment placeholder names
+site_name: sitename
+deployment_type: deploymenttype
+
# location of cord_profile on head node
head_cord_profile_dir: /opt/cord_profile
-# name of the external interface on compute nodes. Should have the ansible_ prefix.
+# name of the external interface on compute nodes
# `fabric` is default in R-CORD
-compute_external_interface: ansible_fabric
+compute_external_interface: fabric
# service configs referenced here are likely located in cord-profile/templates
# used in openstack-compute-vtn.yaml.j2, referencing network in management-net.yaml.j2
@@ -16,4 +20,3 @@
# used in openstack-compute-vtn.yaml.j2, referencing service in fabric.yaml.j2
use_fabric: False
-
diff --git a/roles/compute-node-config/templates/openstack-compute-vtn.yaml.j2 b/roles/compute-node-config/templates/openstack-compute-vtn.yaml.j2
index 0e89cdc..88ab8d1 100644
--- a/roles/compute-node-config/templates/openstack-compute-vtn.yaml.j2
+++ b/roles/compute-node-config/templates/openstack-compute-vtn.yaml.j2
@@ -27,14 +27,9 @@
{% endif %}
# VTN networking for OpenStack Compute Nodes
-{% for node in groups["compute"] %}
-{% if (('ipv4' in hostvars[node]['ansible_fabric']) or
- ('ipv4' in hostvars[node]['ansible_br_int'])) %}
-{% if ('ipv4' in hostvars[node]['ansible_fabric']) %}
-{% set node_interface = hostvars[node]['ansible_fabric'] %}
-{% else %}
-{% set node_interface = hostvars[node]['ansible_br_int'] %}
-{% endif %}
+{% for node in groups['compute'] %}
+{% if 'ipv4' in hostvars[node]['ansible_' ~ compute_external_interface] %}
+{% set node_interface = hostvars[node]['ansible_' ~ compute_external_interface] %}
# Compute node, fully defined in compute-nodes.yaml
{{ hostvars[node]['ansible_hostname'] }}:
diff --git a/roles/compute-node-config/templates/openstack-compute.yaml.j2 b/roles/compute-node-config/templates/openstack-compute.yaml.j2
index 7325aa4..0a4fb9c 100644
--- a/roles/compute-node-config/templates/openstack-compute.yaml.j2
+++ b/roles/compute-node-config/templates/openstack-compute.yaml.j2
@@ -24,9 +24,8 @@
no-update: true
# OpenStack compute nodes
-{% for node in groups["compute"] %}
-{% if (('ipv4' in hostvars[node]['ansible_fabric']) or
- ('ipv4' in hostvars[node]['ansible_br_int'])) %}
+{% for node in groups['compute'] %}
+{% if 'ipv4' in hostvars[node]['ansible_' ~ compute_external_interface] %}
{{ hostvars[node]['ansible_hostname'] }}:
type: tosca.nodes.Node
requirements:
diff --git a/roles/compute-prep/defaults/main.yml b/roles/compute-prep/defaults/main.yml
new file mode 100644
index 0000000..dc0af11
--- /dev/null
+++ b/roles/compute-prep/defaults/main.yml
@@ -0,0 +1,7 @@
+---
+# compute-prep/defaults/main.yml
+
+ssh_pki_dir: "{{ playbook_dir }}/ssh_pki"
+
+pod_sshkey_name: "headnode"
+
diff --git a/roles/compute-prep/handlers/main.yml b/roles/compute-prep/handlers/main.yml
index d7e4f7b..881dee7 100644
--- a/roles/compute-prep/handlers/main.yml
+++ b/roles/compute-prep/handlers/main.yml
@@ -1,9 +1,5 @@
---
# file: roles/compute-prep/handlers/main.yml
-- name: run rc.local
- command: /etc/rc.local
-
-- name: update-ca-certifictes on compute node
+- name: update-ca-certificates
command: update-ca-certificates
-
diff --git a/roles/compute-prep/tasks/main.yml b/roles/compute-prep/tasks/main.yml
index 1607e5f..0a5274b 100644
--- a/roles/compute-prep/tasks/main.yml
+++ b/roles/compute-prep/tasks/main.yml
@@ -1,62 +1,52 @@
---
-# file: roles/compute-prep/tasks/main.yml
+# file: compute-prep/tasks/main.yml
- name: Install packages
apt:
name: "{{ item }}"
- state: present
update_cache: yes
cache_valid_time: 3600
with_items:
- python-yaml
+ - python-pip
- name: Add ubuntu user
user:
name: ubuntu
groups: adm
-- name: Copy over CA certs
+- name: Add pod public key to user accounts
+ when: not on_maas
+ authorized_key:
+ user: "{{ item }}"
+ key: "{{ lookup('file', '{{ ssh_pki_dir }}/client_certs/{{ pod_sshkey_name }}_sshkey.pub') }}"
+ with_items:
+ - root
+ - ubuntu
+
+- name: Add pod public key to user accounts (MaaS)
+ when: on_maas
+ authorized_key:
+ user: "{{ item }}"
+ key: "{{ lookup('file', '/opt/cord_profile/node_key.pub') }}"
+ with_items:
+ - root
+ - ubuntu
+
+- name: Copy over CA certs from head node (MaaS)
+ when: on_maas
synchronize:
src: "/usr/local/share/ca-certificates/"
dest: "/usr/local/share/ca-certificates/"
notify:
- - update-ca-certifictes on compute node
-
-- name: List certs in /usr/local/share/ca-certificates/
- command: "ls -la /usr/local/share/ca-certificates/"
- register: certs_on_compute
- tags:
- - skip_ansible_lint # diagnostics
-
-- name: Output from listing certs
- debug: var=certs_on_compute
-
-- name: Add head node ubuntu user key
- authorized_key:
- user: ubuntu
- key: "{{ hostvars[groups['head'][0]]['sshkey']['stdout'] }}"
-
-- name: Add head node root user key
- authorized_key:
- user: root
- key: "{{ hostvars[groups['head'][0]]['sshkey']['stdout'] }}"
-
-- name: Add route via /etc/rc.local
- when: not on_maas
- template:
- src=rc.local.j2
- dest=/etc/rc.local
- mode=0755
- notify:
- - run rc.local
+ - update-ca-certificates
- name: Create /var/lib/nova dir
file:
- path=/var/lib/nova
- state=directory
+ path: /var/lib/nova
+ state: directory
-- name: Remove superfluous requests package
+- name: Remove requests package which causes SSL errors
pip:
- name=requests
- state=absent
-
+ name: "requests"
+ state: absent
diff --git a/roles/compute-prep/templates/rc.local.j2 b/roles/compute-prep/templates/rc.local.j2
deleted file mode 100644
index 85d9dec..0000000
--- a/roles/compute-prep/templates/rc.local.j2
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/bin/sh -e
-#
-# rc.local
-#
-# This script is executed at the end of each multiuser runlevel.
-# Make sure that the script will "exit 0" on success or any other
-# value on error.
-#
-# In order to enable or disable this script just change the execution
-# bits.
-
-{% set head_host = groups['head'][0] -%}
-{% set head_ip = hostvars[head_host]['ansible_default_ipv4']['address'] -%}
-{% set virt_network = virt_nets[0]['ipv4_prefix'] ~ '.0' -%}
-{% set virt_netmask = "255.255.255.0" -%}
-
-route add -net {{ virt_network }} netmask {{ virt_netmask }} gw {{ head_ip }} || true
-
-exit 0
diff --git a/roles/cord-profile/tasks/main.yml b/roles/cord-profile/tasks/main.yml
index 2edba24..c005276 100644
--- a/roles/cord-profile/tasks/main.yml
+++ b/roles/cord-profile/tasks/main.yml
@@ -33,6 +33,13 @@
mode: 0600
remote_src: True
+- name: Copy ssh public key to node_key.pub file
+ copy:
+ src: "{{ ssh_pki_dir }}/client_certs/{{ pod_sshkey_name }}_sshkey.pub"
+ dest: "{{ config_cord_profile_dir }}/node_key.pub"
+ mode: 0600
+ remote_src: True
+
- name: Copy ssh private key to key_import directory for services that require it
copy:
src: "{{ ssh_pki_dir }}/client_certs/{{ pod_sshkey_name }}_sshkey"
diff --git a/roles/create-lxd/defaults/main.yml b/roles/create-lxd/defaults/main.yml
index 86dcbc2..891ed45 100644
--- a/roles/create-lxd/defaults/main.yml
+++ b/roles/create-lxd/defaults/main.yml
@@ -1,5 +1,12 @@
---
# create-lxd/defaults/main.yml
-apt_cacher_name: apt-cache
+ssh_pki_dir: "{{ playbook_dir }}/ssh_pki"
+pod_sshkey_name: "headnode"
+
+head_lxd_list: []
+
+# For programatic generation of MAC addresses for LXD containers
+hwaddr_prefix: "c2a4"
+mgmt_ipv4_first_octets: "192.168.200"
diff --git a/roles/create-lxd/tasks/main.yml b/roles/create-lxd/tasks/main.yml
index 1b1dd7a..ecf095a 100644
--- a/roles/create-lxd/tasks/main.yml
+++ b/roles/create-lxd/tasks/main.yml
@@ -1,57 +1,44 @@
---
# file: create-lxd/tasks/main.yml
-- name: Ensure DIG
- become: yes
- apt:
- name: dnsutils=1:9*
- state: present
-- name: Enable trusty-backports
- become: yes
+- name: Enable trusty-backports apt repository
apt_repository:
repo: "{{ item }}"
state: present
with_items:
- - "deb http://us.archive.ubuntu.com/ubuntu/ trusty-backports main restricted universe"
- - "deb-src http://us.archive.ubuntu.com/ubuntu/ trusty-backports main restricted universe"
+ - "deb http://archive.ubuntu.com/ubuntu trusty-backports main restricted universe multiverse"
-- name: Ensure LXD
- become: yes
+- name: Install LXD from trusty-backports
apt:
name: lxd
- state: present
- update_cache: yes
default_release: trusty-backports
+ update_cache: yes
+ cache_valid_time: 3600
-# For lxd_profile, has to be run as normal user
-- name: slurp user's SSH public key on remote machine to create LXD profile
- slurp:
- src: "{{ ansible_user_dir }}/.ssh/id_rsa.pub"
- register: slurped_pubkey
-
-- name: Create openstack LXD profile
- become: yes
+- name: Create LXD profiles for OpenStack services
lxd_profile:
- name: openstack
+ name: "openstack-{{ item.name }}"
state: present
config:
user.user-data: |
#cloud-config
ssh_authorized_keys:
- - "{{ slurped_pubkey['content'] | b64decode }}"
- description: 'OpenStack services on CORD'
+ - "{{ lookup('file', ssh_pki_dir ~ '/client_certs/{{ pod_sshkey_name }}_sshkey.pub') }}"
+ description: 'OpenStack service {{ item.name }} for CORD'
devices:
eth0:
- nictype: bridged
- parent: mgmtbr
type: nic
+ parent: mgmtbr
+ nictype: bridged
+ # ipv4.address: "{{ mgmt_ipv4_first_octets }}.{{ item.ipv4_last_octet }}"
+ hwaddr: "{{ item.hwaddr | default(hwaddr_prefix ~ ((mgmt_ipv4_first_octets ~ '.' ~ item.ipv4_last_octet) | ip4_hex)) | hwaddr('unix') }}"
certs:
type: disk
path: /usr/local/share/ca-certificates/cord/
source: /usr/local/share/ca-certificates/
+ with_items: "{{ head_lxd_list }}"
- name: Create containers for the OpenStack services
- become: yes
lxd_container:
name: "{{ item.name }}"
architecture: x86_64
@@ -62,7 +49,7 @@
server: https://cloud-images.ubuntu.com/releases
protocol: simplestreams
alias: "{{ ansible_distribution_release }}"
- profiles: ["openstack"]
+ profiles: ["openstack-{{ item.name }}"]
wait_for_ipv4_addresses: true
timeout: 600
with_items: "{{ head_lxd_list }}"
@@ -98,60 +85,18 @@
with_items: "{{ head_lxd_list }}"
- name: Ensure /etc/ansible directory exists
- become: yes
- file: path=/etc/ansible state=directory
+ file:
+ path: /etc/ansible
+ state: directory
+ owner: root
+ group: root
+ mode: 0755
-- name: Create /etc/ansible/hosts file
- become: yes
+- name: Create /etc/ansible/hosts file with containers list
template:
- src=ansible_hosts.j2
- dest=/etc/ansible/hosts
+ src: ansible_hosts.j2
+ dest: /etc/ansible/hosts
+ owner: root
+ group: root
+ mode: 0644
-- name: Verify that we can log into every container
- command: ansible containers -m ping -u ubuntu
- tags:
- - skip_ansible_lint # connectivity check
-
-- name: Verify that containers have external connectivity
- command: ansible containers -m uri -u ubuntu -a "url=http://www.google.com"
- tags:
- - skip_ansible_lint # connectivity check
-
-- name: Have containers use the apt-cache
- command: ansible containers -b -u ubuntu -m lineinfile -a "dest=/etc/apt/apt.conf.d/02apt-cacher-ng create=yes mode=0644 owner=root group=root regexp='^Acquire' line='Acquire::http { Proxy \"http://{{ apt_cacher_name }}:{{ apt_cacher_port | default('3142') }}\"; };'"
- tags:
- - skip_ansible_lint # running a sub job
-
-- name: Update apt cache
- command: ansible containers -m apt -b -u ubuntu -a "update_cache=yes cache_valid_time=3600"
- tags:
- - skip_ansible_lint # running a sub job
-
-- name: Update software in all the containers
- when: run_dist_upgrade
- command: ansible containers -m apt -b -u ubuntu -a "upgrade=dist"
- tags:
- - skip_ansible_lint # running a sub job
-
-- name: Update CA certificates in containers
- command: ansible containers -m shell -b -u ubuntu -a "update-ca-certificates"
- tags:
- - skip_ansible_lint # running a sub job
-
-- name: Create containers' eth0 interface config file for DNS config via resolvconf program
- when: not on_maas
- template:
- src=eth0.cfg.j2
- dest={{ ansible_user_dir }}/eth0.cfg
-
-- name: Copy eth0 interface config file to all containers
- when: not on_maas
- command: ansible containers -b -u ubuntu -m copy -a "src={{ ansible_user_dir }}/eth0.cfg dest=/etc/network/interfaces.d/eth0.cfg owner=root group=root mode=0644"
-
-- name: Restart eth0 interface on all containers
- when: not on_maas
- command: ansible containers -b -u ubuntu -m shell -a "ifdown eth0 ; ifup eth0"
-
-- name: Verify that we can log into every container after restarting network interfaces
- when: not on_maas
- command: ansible containers -m ping -u ubuntu
diff --git a/roles/create-lxd/templates/eth0.cfg.j2 b/roles/create-lxd/templates/eth0.cfg.j2
deleted file mode 100644
index 0235b8a..0000000
--- a/roles/create-lxd/templates/eth0.cfg.j2
+++ /dev/null
@@ -1,12 +0,0 @@
-# The primary network interface
-auto eth0
-iface eth0 inet dhcp
-{% if unbound_listen_on_default %}
- dns-nameservers{% for host in groups['head'] %} {{ hostvars[host].ansible_default_ipv4.address }}{% endfor %}
-{% endif %}
-{% if dns_servers is defined %}
- dns-nameservers{% for ns in dns_servers %} {{ ns }}{% endfor %}
-{% endif %}
-{% if dns_search is defined %}
- dns-search{% for searchdom in dns_search %} {{ searchdom }}{% endfor %}
-{% endif %}
diff --git a/roles/head-mgmtbr/defaults/main.yml b/roles/head-mgmtbr/defaults/main.yml
new file mode 100644
index 0000000..3b9cbc1
--- /dev/null
+++ b/roles/head-mgmtbr/defaults/main.yml
@@ -0,0 +1,8 @@
+---
+# head-mgmtbr/defaults/main.yml
+
+# public internet facing NAT interface
+mgmtbr_nat_interface: eth0
+
+# management interface bridged to mgmtbr
+mgmtbr_ext_interface: eth1
diff --git a/roles/head-mgmtbr/tasks/main.yml b/roles/head-mgmtbr/tasks/main.yml
new file mode 100644
index 0000000..27cf505
--- /dev/null
+++ b/roles/head-mgmtbr/tasks/main.yml
@@ -0,0 +1,31 @@
+---
+# head-mgmtbr/tasks/main.yml
+
+- name: Create mgmtbr bridge configuration
+ template:
+ src: "mgmtbr.cfg.j2"
+ dest: /etc/network/interfaces.d/mgmtbr.cfg
+ owner: root
+ group: root
+ mode: 0644
+ register: mgmtbr_config
+
+- name: Bring up mgmtbr if reconfigured
+ when: mgmtbr_config.changed and ansible_mgmtbr is not defined
+ command: ifup mgmtbr
+ tags:
+ - skip_ansible_lint # needs to be run here or the next steps will fail
+
+- name: Configure NAT for mgmtbr
+ iptables:
+ table: nat
+ chain: POSTROUTING
+ out_interface: "{{ mgmtbr_nat_interface }}"
+ jump: MASQUERADE
+
+- name: Configure forwarding for mgmtbr
+ iptables:
+ chain: FORWARD
+ in_interface: mgmtbr
+ jump: ACCEPT
+
diff --git a/roles/head-mgmtbr/templates/mgmtbr.cfg.j2 b/roles/head-mgmtbr/templates/mgmtbr.cfg.j2
new file mode 100644
index 0000000..8e9e3b3
--- /dev/null
+++ b/roles/head-mgmtbr/templates/mgmtbr.cfg.j2
@@ -0,0 +1,11 @@
+auto mgmtbr
+iface mgmtbr inet static
+ address {{ nsd_zones[0].ipv4_first_octets }}.1
+ network {{ nsd_zones[0].ipv4_first_octets }}.0
+ netmask 255.255.255.0
+ broadcast {{ nsd_zones[0].ipv4_first_octets }}.255
+ gateway {{ nsd_zones[0].ipv4_first_octets }}.1
+ bridge_ports {{ mgmtbr_ext_interface }}
+ dns-search {{ site_suffix }}
+ dns-nameservers {{ dns_servers | join(" ") }}
+
diff --git a/roles/head-prep/defaults/main.yml b/roles/head-prep/defaults/main.yml
index 8e379dd..17a4e22 100644
--- a/roles/head-prep/defaults/main.yml
+++ b/roles/head-prep/defaults/main.yml
@@ -1,4 +1,9 @@
---
+# head-prep/defaults/main.yml
on_maas: false
+ssh_pki_dir: "{{ playbook_dir }}/ssh_pki"
+ssh_keytype: rsa
+pod_sshkey_name: "headnode"
+
diff --git a/roles/head-prep/tasks/main.yml b/roles/head-prep/tasks/main.yml
index d934c2d..2a63cdd 100644
--- a/roles/head-prep/tasks/main.yml
+++ b/roles/head-prep/tasks/main.yml
@@ -3,18 +3,18 @@
- name: Install prerequisites for using PPA repos
apt:
- name={{ item }}
- update_cache=yes
+ name: "{{ item }}"
+ update_cache: yes
+ cache_valid_time: 3600
with_items:
- python-pycurl
- software-properties-common
-- name: Add Ansible/Juju repositories
+- name: Add Ansible PPA
apt_repository:
repo={{ item }}
with_items:
- - "{{ juju_apt_repo | default('ppa:juju/stable') }}"
- # - "{{ ansible_apt_repo | default('ppa:ansible/ansible') }}"
+ - "{{ ansible_apt_repo | default('ppa:ansible/ansible') }}"
register: result
until: result | success
retries: 3
@@ -22,27 +22,29 @@
- name: Install packages
apt:
- name={{ item }}
- state=present
- update_cache=yes
+ name: "{{ item }}"
+ update_cache: yes
+ cache_valid_time: 3600
with_items:
- - uvtool
+ - bridge-utils
+ - dnsutils
- git
- - bzr
- juju-core
- - python-pip
- - python-novaclient
- - python-neutronclient
- - python-keystoneclient
- - python-glanceclient
- - python-lxml
- - virt-top
- libssl-dev
+ - libvirt-bin
- python-dev
+ - python-glanceclient
+ - python-keystoneclient
+ - python-lxml
+ - python-neutronclient
+ - python-novaclient
+ - python-pip
- sshpass
+ - virt-top
- name: Install Ansible via pip
- pip: name=ansible version=2.2.2.0
+ pip: name=ansible version=2.2.3.0
+
# - name: Make sure Ansible is newest version
# apt:
# name: "ansible"
@@ -50,44 +52,58 @@
# update_cache: yes
# cache_valid_time: 3600
# tags:
-# - skip_ansible_lint # ansible-lint complains about latest, need this or old built in 1.5.x version may be used if already installed.
+# - skip_ansible_lint # ansible-lint complains about latest, need this as distro provided 1.5.x version may be used if already installed.
- name: Install Python packages
pip:
- name={{ item}}
- state=present
+ name: "{{ item }}"
with_items:
- - urllib3
- - pyopenssl
- ndg-httpsclient
+ - passlib
- pyasn1
+ - pyopenssl
+ - urllib3
+ - gitpython
+ - graphviz
-- name: Prep user account by adding to libvirtd group and generating SSH key
- user:
- name={{ ansible_user_id }}
- generate_ssh_key=yes
- groups="libvirtd" append=yes
+- name: Add pod ssh private key to head node user
+ copy:
+ src: "{{ ssh_pki_dir }}/client_certs/{{ pod_sshkey_name }}_sshkey"
+ dest: "{{ ansible_user_dir }}/.ssh/id_{{ ssh_keytype }}"
+ owner: "{{ ansible_user_id }}"
+ group: "{{ ansible_user_gid }}"
+ mode: 0600
-- name: Register public key in variable
- shell: cat {{ ansible_user_dir }}/.ssh/id_rsa.pub
- register: sshkey
- tags:
- - skip_ansible_lint # FIXME: this should be done a different way
+- name: Add pod ssh public key to head user
+ copy:
+ src: "{{ ssh_pki_dir }}/client_certs/{{ pod_sshkey_name }}_sshkey.pub"
+ dest: "{{ ansible_user_dir }}/.ssh/id_{{ ssh_keytype }}.pub"
+ owner: "{{ ansible_user_id }}"
+ group: "{{ ansible_user_gid }}"
+ mode: 0644
-- name: Add public key to this user account
- authorized_key:
- user={{ ansible_user_id }}
- key="{{ sshkey.stdout }}"
+- name: Add pod ssh signed public key to head node user
+ copy:
+ src: "{{ ssh_pki_dir }}/client_certs/{{ pod_sshkey_name }}_sshkey-cert.pub"
+ dest: "{{ ansible_user_dir }}/.ssh/id_{{ ssh_keytype }}-cert.pub"
+ owner: "{{ ansible_user_id }}"
+ group: "{{ ansible_user_gid }}"
+ mode: 0644
- name: Disable host key checking in ~/.ssh/config
lineinfile:
- dest={{ ansible_user_dir }}/.ssh/config
- line="StrictHostKeyChecking no"
- create=yes
- owner={{ ansible_user_id }} mode=0600
+ dest: "{{ ansible_user_dir }}/.ssh/config"
+ line: "StrictHostKeyChecking no"
+ create: yes
+ owner: "{{ ansible_user_id }}"
+ group: "{{ ansible_user_gid }}"
+ mode: 0600
- name: Disable host key checking in ~/.ansible.cfg
copy:
- src=ansible.cfg
- dest={{ ansible_user_dir }}/.ansible.cfg
- owner={{ ansible_user_id }} mode=0644
+ src: ansible.cfg
+ dest: "{{ ansible_user_dir }}/.ansible.cfg"
+ owner: "{{ ansible_user_id }}"
+ group: "{{ ansible_user_gid }}"
+ mode: 0644
+
diff --git a/roles/lxd-finish/defaults/main.yml b/roles/lxd-finish/defaults/main.yml
new file mode 100644
index 0000000..17836e2
--- /dev/null
+++ b/roles/lxd-finish/defaults/main.yml
@@ -0,0 +1,5 @@
+---
+# lxd-finish/defaults/main.yml
+
+apt_cacher_name: apt-cache
+
diff --git a/roles/lxd-finish/tasks/main.yml b/roles/lxd-finish/tasks/main.yml
new file mode 100644
index 0000000..4c02bce
--- /dev/null
+++ b/roles/lxd-finish/tasks/main.yml
@@ -0,0 +1,51 @@
+---
+# lxd-finish/tasks/main.yml
+
+- name: Verify that we can log into every container
+ command: >
+ ansible containers -u ubuntu
+ -m ping
+ tags:
+ - skip_ansible_lint # connectivity check
+
+- name: Verify that containers have external connectivity
+ command: >
+ ansible containers -u ubuntu
+ -m uri
+ -a "url=http://www.google.com"
+ tags:
+ - skip_ansible_lint # connectivity check
+
+- name: Update CA certificates in containers
+ command: >
+ ansible containers -b -u ubuntu
+ -m command
+ -a "update-ca-certificates"
+ tags:
+ - skip_ansible_lint # running a sub job
+
+- name: Have containers use the apt-cache
+ command: >
+ ansible containers -b -u ubuntu
+ -m lineinfile
+ -a "dest=/etc/apt/apt.conf.d/02apt-cacher-ng create=yes mode=0644 owner=root group=root regexp='^Acquire' line='Acquire::http { Proxy \"http://{{ apt_cacher_name }}:{{ apt_cacher_port | default('3142') }}\"; };'"
+ tags:
+ - skip_ansible_lint # running a sub job
+
+- name: Update apt cache
+ command: >
+ ansible containers -b -u ubuntu
+ -m apt
+ -a "update_cache=yes cache_valid_time=3600"
+ tags:
+ - skip_ansible_lint # running a sub job
+
+- name: Update software in all the containers
+ when: run_dist_upgrade
+ command: >
+ ansible containers -b -u ubuntu
+ -m apt
+ -a "upgrade=dist"
+ tags:
+ - skip_ansible_lint # running a sub job
+
diff --git a/start-xos-playbook.yml b/start-xos-playbook.yml
new file mode 100644
index 0000000..1ae0aff
--- /dev/null
+++ b/start-xos-playbook.yml
@@ -0,0 +1,18 @@
+---
+# start-xos-playbook.yml
+# Start and configure XOS docker containers
+
+- name: Include vars
+ hosts: all
+ tasks:
+ - name: Include variables
+ include_vars: "{{ item }}"
+ with_items:
+ - "profile_manifests/{{ cord_profile }}.yml"
+ - profile_manifests/local_vars.yml
+
+- name: Start XOS containers with docker-compose
+ hosts: head
+ roles:
+ - xos-up
+