playbook to refresh compute nodes, timeouts lengthened, docs
added profile_name file to cord_profile dir
fix syntax error
Change-Id: I94392a7f3018aabee4c4fb0eb781fd152aa3745b
diff --git a/README.md b/README.md
index a50de6a..e089f36 100644
--- a/README.md
+++ b/README.md
@@ -49,15 +49,24 @@
### Creating a development environment on you machine
-If you are doing work that does not involve `openstack` you can create a development environment on your local machine. This environment is mostly designed to do `GUI`, `APIs` and `modeling` related work. It can also be useful to test a `synchronizer` whose job is sinchronizing data using REST APIs.
+If you are doing work that does not involve `openstack` you can create a
+development environment inside a VM on your local machine. This environment is
+mostly designed to do `GUI`, `APIs` and `modeling` related work. It can also be
+useful to test a `synchronizer` whose job is synchronizing data using REST
+APIs.
-To do that we provided a `Vagrant` vm.
-From this folder just execute `vagrant up head-node`. We'll create an Ubuntu based VM with all the `cord` code shared from your local machine, so that you can make your changes locally and quickly test the outcome.
+To do that we provided a `Vagrant` VM. From this folder just execute `vagrant
+up head-node`. We'll create an Ubuntu based VM with all the `cord` code shared
+from your local machine, so that you can make your changes locally and quickly
+test the outcome.
-Once the `vm` is created you can connect to it with `vagrant ssh head-node` and then from the `~/cord/build/platform-install` execute the profile you are interested in.
-For instance you can spin up the `frontend` configuration with: `ansible-playbook -i inventory/frontend deploy-xos-playbook.yml`.
+Once the `vm` is created you can connect to it with `vagrant ssh head-node` and
+then from the `~/cord/build/platform-install` execute the profile you are
+interested in. For instance you can spin up the `frontend` configuration with:
+`ansible-playbook -i inventory/frontend deploy-xos-playbook.yml`.
-_Note that the `cord-bootstrap.sh` script is automatically invoked by the provisioning script and the `vagrant` vm requires VirtualBox_
+_Note that the `cord-bootstrap.sh` script is automatically invoked by the
+provisioning script and the `vagrant` vm requires VirtualBox_
### Credentials
@@ -156,19 +165,20 @@
to set up a virtual multi-node R-CORD pod on a single host.
If you've already built a CiaB and want to go through the dev loop described
-above on the head/production node, when running `ansible-playbook`, you must
-pass the path to the configuration file that was generated during the build to
-the `ansible-playbook` command:
+above on the head/production node, when running `ansible-playbook` you must
+pass the path to the configuration file that was generated by Gradle during the
+build:
```
-ansible-playbook -i inventory/rcord --extra-vars @../genconfig/config.yml -playbook.yml
+ansible-playbook -i inventory/head-localhost --extra-vars @../genconfig/config.yml playbook.yml
```
You may find the following shell aliases to be helpful during development:
```
-alias xos-teardown="pushd /opt/cord/build/platform-install; ansible-playbook -i inventory/rcord --extra-vars @../genconfig/config.yml teardown-playbook.yml"
-alias xos-deploy="pushd /opt/cord/build/platform-install; ansible-playbook -i inventory/rcord --extra-vars @../genconfig/config.yml deploy-xos-playbook.yml"
+alias xos-teardown="pushd /opt/cord/build/platform-install; ansible-playbook -i inventory/head-localhost --extra-vars @/opt/cord/build/genconfig/config.yml teardown-playbook.yml"
+alias xos-deploy="pushd /opt/cord/build/platform-install; ansible-playbook -i inventory/head-localhost --extra-vars @/opt/cord/build/genconfig/config.yml deploy-xos-playbook.yml"
+alias compute-node-refresh="pushd /opt/cord/build/platform-install; ansible-playbook -i /etc/maas/ansible/pod-inventory --extra-vars=@/opt/cord/build/genconfig/config.yml compute-node-refresh-playbook.yml"
alias xos-cleanup-images="docker rmi xosproject/xos-ui xosproject/xos"
```
diff --git a/compute-node-refresh-playbook.yml b/compute-node-refresh-playbook.yml
new file mode 100644
index 0000000..4fb618b
--- /dev/null
+++ b/compute-node-refresh-playbook.yml
@@ -0,0 +1,29 @@
+---
+# compute-node-refresh-playbook.yml
+# Re-adds compute nodes to a XOS after a teardown cycle
+# This analogous to the legacy `make vtn` command in service-profile
+# On physical/CiaB w/MaaS:
+# ansible-playbook -i /etc/maas/ansible/pod-inventory --extra-vars=@/opt/cord/build/genconfig/config.yml compute-node-refresh-playbook.yml
+
+- name: Include vars
+ hosts: all
+ tasks:
+ - name: Include variables
+ include_vars: "{{ item }}"
+ with_items:
+ - "profile_manifests/{{ cord_profile }}.yml"
+ - profile_manifests/local_vars.yml
+
+- name: Recreate compute node configuration
+ hosts: head
+ roles:
+ - compute-node-config
+
+- include: add-onboard-containers-playbook.yml
+
+- name: Enable compute nodes in XOS
+ hosts: xos_ui
+ connection: docker
+ roles:
+ - compute-node-enable
+
diff --git a/roles/compute-node-config/templates/openstack-compute-vtn.yaml.j2 b/roles/compute-node-config/templates/openstack-compute-vtn.yaml.j2
index b43e1e3..0e89cdc 100644
--- a/roles/compute-node-config/templates/openstack-compute-vtn.yaml.j2
+++ b/roles/compute-node-config/templates/openstack-compute-vtn.yaml.j2
@@ -28,7 +28,13 @@
# VTN networking for OpenStack Compute Nodes
{% for node in groups["compute"] %}
-{% if 'ipv4' in hostvars[node]['ansible_fabric'] %}
+{% if (('ipv4' in hostvars[node]['ansible_fabric']) or
+ ('ipv4' in hostvars[node]['ansible_br_int'])) %}
+{% if ('ipv4' in hostvars[node]['ansible_fabric']) %}
+{% set node_interface = hostvars[node]['ansible_fabric'] %}
+{% else %}
+{% set node_interface = hostvars[node]['ansible_br_int'] %}
+{% endif %}
# Compute node, fully defined in compute-nodes.yaml
{{ hostvars[node]['ansible_hostname'] }}:
@@ -43,7 +49,7 @@
type: tosca.nodes.Tag
properties:
name: bridgeId
- value: of:0000{{ hostvars[node]['ansible_fabric']['macaddress'] | hwaddr('bare') }}
+ value: of:0000{{ node_interface['macaddress'] | hwaddr('bare') }}
requirements:
- target:
node: {{ hostvars[node]['ansible_hostname'] }}
@@ -71,7 +77,7 @@
type: tosca.nodes.Tag
properties:
name: dataPlaneIp
- value: {{ ( hostvars[node]['ansible_fabric']['ipv4']['address'] ~ '/' ~ hostvars[node]['ansible_fabric']['ipv4']['netmask'] ) | ipaddr('cidr') }}
+ value: {{ ( node_interface['ipv4']['address'] ~ '/' ~ node_interface['ipv4']['netmask'] ) | ipaddr('cidr') }}
requirements:
- target:
node: {{ hostvars[node]['ansible_hostname'] }}
diff --git a/roles/compute-node-config/templates/openstack-compute.yaml.j2 b/roles/compute-node-config/templates/openstack-compute.yaml.j2
index b0849dc..7325aa4 100644
--- a/roles/compute-node-config/templates/openstack-compute.yaml.j2
+++ b/roles/compute-node-config/templates/openstack-compute.yaml.j2
@@ -25,7 +25,8 @@
# OpenStack compute nodes
{% for node in groups["compute"] %}
-{% if 'ipv4' in hostvars[node]['ansible_fabric'] %}
+{% if (('ipv4' in hostvars[node]['ansible_fabric']) or
+ ('ipv4' in hostvars[node]['ansible_br_int'])) %}
{{ hostvars[node]['ansible_hostname'] }}:
type: tosca.nodes.Node
requirements:
diff --git a/roles/compute-node-enable/tasks/main.yml b/roles/compute-node-enable/tasks/main.yml
index 5f3557c..f746241 100644
--- a/roles/compute-node-enable/tasks/main.yml
+++ b/roles/compute-node-enable/tasks/main.yml
@@ -2,7 +2,10 @@
# compute-node-enable/tasks/main.yml
- name: Load TOSCA to add OpenStack compute nodes
- command: "python /opt/xos/tosca/run.py {{ xos_admin_user }} {{ cord_profile_dir }}/openstack-compute.yaml"
+ command: "python /opt/xos/tosca/run.py {{ xos_admin_user }} {{ cord_profile_dir }}/{{ item }}"
+ with_items:
+ - openstack.yaml
+ - openstack-compute.yaml
tags:
- skip_ansible_lint # TOSCA loading should be idempotent
@@ -11,7 +14,10 @@
seconds: 20
- name: Load TOSCA to enable VTN on OpenStack compute nodes
- command: "python /opt/xos/tosca/run.py {{ xos_admin_user }} {{ cord_profile_dir }}/openstack-compute-vtn.yaml"
+ command: "python /opt/xos/tosca/run.py {{ xos_admin_user }} {{ cord_profile_dir }}/{{ item }}"
+ with_items:
+ - vtn-service.yaml
+ - openstack-compute-vtn.yaml
tags:
- skip_ansible_lint # TOSCA loading should be idempotent
diff --git a/roles/cord-profile/tasks/main.yml b/roles/cord-profile/tasks/main.yml
index 628947b..aa17d52 100644
--- a/roles/cord-profile/tasks/main.yml
+++ b/roles/cord-profile/tasks/main.yml
@@ -11,6 +11,12 @@
owner: "{{ ansible_user_id }}"
group: "{{ ansible_user_gid }}"
+- name: Create cord_profile/profile_name with the name of the profile
+ copy:
+ dest: "{{ cord_profile_dir }}/profile_name"
+ content: "{{ cord_profile }}"
+ mode: 0644
+
- name: Create subdirectories inside cord_profile directory
file:
path: "{{ cord_profile_dir }}/{{ item }}"
diff --git a/roles/xos-onboarding/tasks/main.yml b/roles/xos-onboarding/tasks/main.yml
index 841125b..9f02a01 100644
--- a/roles/xos-onboarding/tasks/main.yml
+++ b/roles/xos-onboarding/tasks/main.yml
@@ -66,7 +66,7 @@
register: xos_onboard_status
until: '"true" in xos_onboard_status.content'
retries: 60
- delay: 5
+ delay: 10
with_items: "{{ xos_libraries }}"
- name: Wait for services to be onboarded
@@ -77,7 +77,7 @@
register: xos_onboard_status
until: '"true" in xos_onboard_status.content'
retries: 60
- delay: 5
+ delay: 10
with_items: "{{ xos_services }}"
- name: Wait for XOS to be onboarded after service onboarding
@@ -88,5 +88,5 @@
register: xos_onboard_status
until: '"true" in xos_onboard_status.content'
retries: 60
- delay: 5
+ delay: 10