apt dist-upgrade reboot enabled, lint fixes
second round, for testing
lint clean, testing needed
prereqs assert w/dig doesn't loop properly
use head not all for target hosts in single
Change-Id: Ie530204b989a73828f45508fcdd4374a3362c764
diff --git a/roles/common-prep/defaults/main.yml b/roles/common-prep/defaults/main.yml
new file mode 100644
index 0000000..0bcd73c
--- /dev/null
+++ b/roles/common-prep/defaults/main.yml
@@ -0,0 +1,3 @@
+---
+
+run_dist_upgrade: true
diff --git a/roles/common-prep/tasks/main.yml b/roles/common-prep/tasks/main.yml
index e2368e9..39bced7 100644
--- a/roles/common-prep/tasks/main.yml
+++ b/roles/common-prep/tasks/main.yml
@@ -8,14 +8,12 @@
update_cache: yes
cache_valid_time: 3600
-- name: Determine if a reboot if required
+- name: Reboot if required after dist_upgrade
when: run_dist_upgrade
- stat: path=/var/run/reboot-required
- register: reboot-required
-
-- name: Perform a reboot if required
- when: run_dist_upgrade and reboot-required.exists is defined
- debug: msg="System will reboot"
+ stat:
+ path: /var/run/reboot-required
+ register: reboot_required
+ changed_when: reboot_required.stat.exists
notify:
- restart host
- wait for host
diff --git a/roles/compute-diag/tasks/main.yml b/roles/compute-diag/tasks/main.yml
index 1f25508..1bbf755 100644
--- a/roles/compute-diag/tasks/main.yml
+++ b/roles/compute-diag/tasks/main.yml
@@ -1,13 +1,16 @@
---
# compute-diag/tasks/main.yml
-#
+
- name: Create diag_dir subdir
file:
- dest="/tmp/{{ hostvars[groups['head'][0]]['diag_dir'] }}/{{ inventory_hostname }}"
- state=directory
+ dest: "/tmp/{{ hostvars[groups['head'][0]]['diag_dir'] }}/{{ inventory_hostname }}"
+ state: directory
+ register: compute_diag_dir
- name: Compute node diag collection
shell: "{{ item }} > /tmp/{{ hostvars[groups['head'][0]]['diag_dir'] }}/{{ inventory_hostname }}/{{ item | regex_replace('[^\\w-]', '_')}}"
+ args:
+ creates: "compute_diag_dir.stat.path/{{ item | regex_replace('[^\\w-]', '_')}}"
with_items:
- "date"
- "arp -n"
diff --git a/roles/config-virt/tasks/main.yml b/roles/config-virt/tasks/main.yml
index 89c0c7d..8937824 100644
--- a/roles/config-virt/tasks/main.yml
+++ b/roles/config-virt/tasks/main.yml
@@ -1,9 +1,18 @@
---
# roles/config-virt/tasks/main.yml
+- name: Check to see if we already have a uvtool image
+ find:
+ path: "/var/lib/uvtool/libvirt/images"
+ patterns: "x-uvt-b64-*"
+ register: uvtool_image
+
- name: Get ubuntu image for uvtool
+ when: "{{ uvtool_image.matched < 1 }}"
command: uvt-simplestreams-libvirt sync --source http://cloud-images.ubuntu.com/daily \
release={{ ansible_distribution_release }} arch=amd64
+ args:
+ creates: "/var/lib/uvtool/libvirt/images/x-uvt-b64-*"
async: 1200
poll: 0
register: uvt_sync
@@ -50,6 +59,7 @@
with_items: '{{ virt_nets }}'
- name: Wait for uvt-kvm image to be available
+ when: "{{ uvtool_image.matched < 1 }}"
async_status: jid={{ uvt_sync.ansible_job_id }}
register: uvt_sync_result
until: uvt_sync_result.finished
diff --git a/roles/create-vms/tasks/main.yml b/roles/create-vms/tasks/main.yml
index 266bad8..77244f0 100644
--- a/roles/create-vms/tasks/main.yml
+++ b/roles/create-vms/tasks/main.yml
@@ -2,9 +2,10 @@
# file: create-vms/tasks/main.yml
- name: create Virtual Machines with uvt-kvm
- shell: uvt-kvm create {{ item.name }} release={{ ansible_distribution_release }} \
+ command: uvt-kvm create {{ item.name }} release={{ ansible_distribution_release }} \
--cpu={{ item.cpu }} --memory={{ item.memMB }} --disk={{ item.diskGB }} --bridge="mgmtbr"
- creates=/var/lib/uvtool/libvirt/images/{{ item.name }}.qcow
+ args:
+ creates: "/var/lib/uvtool/libvirt/images/{{ item.name }}.qcow"
with_items: "{{ head_vm_list }}"
- name: Have VMs autostart on reboot
@@ -51,16 +52,24 @@
- name: Verify that we can log into every VM
command: ansible services -m ping -u ubuntu
+ tags:
+ - skip_ansible_lint # connectivity check
- name: Have VM's use the apt-cache
command: ansible services -b -u ubuntu -m lineinfile -a "dest=/etc/apt/apt.conf.d/02apt-cacher-ng create=yes mode=0644 owner=root group=root regexp='^Acquire' line='Acquire::http { Proxy \"http://{{ apt_cacher_name }}:{{ apt_cacher_port | default('3142') }}\"; };'"
+ tags:
+ - skip_ansible_lint # running a sub job
- name: Update apt cache
command: ansible services -m apt -b -u ubuntu -a "update_cache=yes cache_valid_time=3600"
+ tags:
+ - skip_ansible_lint # running a sub job
- name: Update software in all the VMs
when: run_dist_upgrade
command: ansible services -m apt -b -u ubuntu -a "upgrade=dist"
+ tags:
+ - skip_ansible_lint # running a sub job
- name: Create VM's eth0 interface config file for DNS config via resolvconf program
when: not on_maas
@@ -84,6 +93,8 @@
- name: Enable root ssh login on VM's that require it
command: ansible {{ item.name }} -b -u ubuntu -m authorized_key -a "user='root' key='{{ sshkey.stdout }}'"
with_items: "{{ head_vm_list | selectattr('root_ssh_login', 'defined') | list }}"
+ tags:
+ - skip_ansible_lint # FIXME, ssh key mangling
- name: Copy over docker installation playbook and docker apt-key
copy:
@@ -95,4 +106,7 @@
- name: Install docker in VM's that require it
command: ansible-playbook "{{ ansible_user_dir }}/docker-install-playbook.yml"
+ tags:
+ - skip_ansible_lint # running a sub job
+
diff --git a/roles/dell-virt/tasks/main.yml b/roles/dell-virt/tasks/main.yml
index cfc60a6..1d153f3 100644
--- a/roles/dell-virt/tasks/main.yml
+++ b/roles/dell-virt/tasks/main.yml
@@ -21,6 +21,8 @@
shell: lsmod | grep kvm_
ignore_errors: true
register: virtualization_enabled
+ tags:
+ - skip_ansible_lint # just used to register result
- name: Enable virtualization in BIOS
command: /opt/dell/toolkit/bin/syscfg --virtualization=enable
diff --git a/roles/dns-configure/tasks/main.yml b/roles/dns-configure/tasks/main.yml
index 1481bbe..07b0d5d 100644
--- a/roles/dns-configure/tasks/main.yml
+++ b/roles/dns-configure/tasks/main.yml
@@ -10,4 +10,6 @@
- name: Check that VM's can be found in DNS
shell: "dig +short {{ item.name }}.{{ site_suffix }} | grep {{ item.ipv4_last_octet }}"
with_items: "{{ head_vm_list }}"
+ tags:
+ - skip_ansible_lint # purely a way to pass/fail config done so far. Ansible needs a "dns_query" module
diff --git a/roles/head-diag/tasks/main.yml b/roles/head-diag/tasks/main.yml
index 1dd4a9b..8a8c750 100644
--- a/roles/head-diag/tasks/main.yml
+++ b/roles/head-diag/tasks/main.yml
@@ -14,6 +14,8 @@
- name: Head node diag collection
shell: "{{ item }} > ~/{{ diag_dir }}/head/{{ item | regex_replace('[^\\w-]', '_')}}"
+ args:
+ creates: "~/{{ diag_dir }}/head/{{ item | regex_replace('[^\\w-]', '_')}}"
with_items:
- "ifconfig -a"
- "route -n"
@@ -26,6 +28,8 @@
- name: Juju diag collection
shell: "{{ item }} > ~/{{ diag_dir }}/juju/{{ item | regex_replace('[^\\w-]', '_')}}"
+ args:
+ creates: "~/{{ diag_dir }}/juju/{{ item | regex_replace('[^\\w-]', '_')}}"
with_items:
- "juju status --format=summary"
- "juju status --format=json"
@@ -34,6 +38,7 @@
shell: "source ~/admin-openrc.sh && {{ item }} > ~/{{ diag_dir }}/openstack/{{ item | regex_replace('[^\\w-]', '_')}}"
args:
executable: "/bin/bash"
+ creates: "~/{{ diag_dir }}/openstack/{{ item | regex_replace('[^\\w-]', '_')}}"
with_items:
- "glance image-list"
- "nova list --all-tenants"
@@ -46,11 +51,16 @@
- name: ONOS diag collection - REST API
shell: "curl -X GET -u karaf:karaf http://onos-cord-1:8181/onos/v1/{{ item }} | python -m json.tool > ~/{{ diag_dir }}/onos/rest_{{ item | regex_replace('[^\\w-]', '_') }}"
+ args:
+ creates: "~/{{ diag_dir }}/onos/rest_{{ item | regex_replace('[^\\w-]', '_')}}"
+ warn: False # get_url or uri can't easily redirect to a file
with_items:
- "hosts"
- name: ONOS diag collection - ONOS CLI
shell: "sshpass -p 'karaf' ssh -p 8101 karaf@onos-cord {{ item }} > ~/{{ diag_dir }}/onos/{{ item | regex_replace('[^\\w-]', '_') }}"
+ args:
+ creates: "~/{{ diag_dir }}/onos/{{ item | regex_replace('[^\\w-]', '_')}}"
with_items:
- "apps -s -a"
- "bundle:list"
@@ -66,6 +76,8 @@
- name: XOS diag collection
shell: "ssh ubuntu@xos-1 \"{{ item }}\" > ~/{{ diag_dir }}/xos/{{ item | regex_replace('[^\\w-]', '_')}}"
+ args:
+ creates: "~/{{ diag_dir }}/xos/{{ item | regex_replace('[^\\w-]', '_')}}"
with_items:
- "docker ps"
- "arp -n"
@@ -73,6 +85,8 @@
- name: Copy/run/retrieve XOS docker logs
command: "{{ item }}"
+ tags:
+ - skip_ansible_lint # don't know the name of docker containers for all configurations
with_items:
- "scp {{ role_path }}/files/docker_logs.sh ubuntu@xos-1:~/docker_logs.sh"
- "ssh ubuntu@xos-1 'bash ~/docker_logs.sh'"
diff --git a/roles/head-prep/defaults/main.yml b/roles/head-prep/defaults/main.yml
new file mode 100644
index 0000000..8e379dd
--- /dev/null
+++ b/roles/head-prep/defaults/main.yml
@@ -0,0 +1,4 @@
+---
+
+on_maas: false
+
diff --git a/roles/head-prep/tasks/main.yml b/roles/head-prep/tasks/main.yml
index b2b300e..0fd1a7b 100644
--- a/roles/head-prep/tasks/main.yml
+++ b/roles/head-prep/tasks/main.yml
@@ -45,6 +45,8 @@
- name: Register public key in variable
shell: cat {{ ansible_user_dir }}/.ssh/id_rsa.pub
register: sshkey
+ tags:
+ - skip_ansible_lint # FIXME: this should be done a different way
- name: Add public key to this user account
authorized_key:
@@ -64,19 +66,19 @@
dest={{ ansible_user_dir }}/.ansible.cfg
owner={{ ansible_user_id }} mode=0644
-- name: Copy node key (MaaS)
- when: on_maas
+- name: Copy node key
+ when: not on_maas
copy:
- src={{ maas_node_key }}
+ src={{ ansible_user_dir }}/.ssh/id_rsa
dest={{ ansible_user_dir }}/node_key
owner={{ ansible_user }}
mode=0600
remote_src=True
-- name: Copy node key (without MaaS)
- when: not on_maas
+- name: Copy node key (MaaS)
+ when: on_maas
copy:
- src={{ ansible_user_dir }}/.ssh/id_rsa
+ src={{ maas_node_key }}
dest={{ ansible_user_dir }}/node_key
owner={{ ansible_user }}
mode=0600
diff --git a/roles/juju-compute-setup/defaults/main.yml b/roles/juju-compute-setup/defaults/main.yml
index 2c0208b..f6cabc4 100644
--- a/roles/juju-compute-setup/defaults/main.yml
+++ b/roles/juju-compute-setup/defaults/main.yml
@@ -6,4 +6,4 @@
juju_config_path: /usr/local/src/juju_config.yml
charm_versions: {}
-
+
diff --git a/roles/juju-compute-setup/tasks/main.yml b/roles/juju-compute-setup/tasks/main.yml
index 0222775..7473a06 100644
--- a/roles/juju-compute-setup/tasks/main.yml
+++ b/roles/juju-compute-setup/tasks/main.yml
@@ -16,6 +16,7 @@
# iptables -A FORWARD -i mgmtbr -o eth0 -s <vmnet> -d <extnet> -j ACCEPT
- name: Add machines to Juju
+ when: "{{ groups['compute'] | difference( juju_machines.keys() ) | length }}"
command: "juju add-machine ssh:{{ item }}"
with_items: "{{ groups['compute'] | difference( juju_machines.keys() ) }}"
@@ -24,10 +25,10 @@
juju_facts:
- name: Deploy nova-compute service if needed
- command: "juju deploy {{ charm_versions[item] | default(item) }} --to {{ juju_machines[groups['compute'][0]]['machine_id'] }} --config={{ juju_config_path }}"
- with_items:
- - "nova-compute"
when: '"nova-compute" not in juju_services.keys()'
+ command: "juju deploy {{ charm_versions[item] | default(item) }} --to {{ juju_machines[groups['compute'][0]]['machine_id'] }} --config={{ juju_config_path }}"
+ with_items:
+ - "nova-compute"
- name: Create relations between nova-compute and other services if needed
command: "juju add-relation '{{ item.0.name }}' '{{ item.1 }}'"
@@ -36,15 +37,19 @@
with_subelements:
- "{{ compute_relations }}"
- relations
+ tags:
+ - skip_ansible_lint # benign to do this more than once, hard to check for
# run another time
- name: Obtain Juju Facts after deploying nova-compute
- juju_facts:
when: '"nova-compute" not in juju_services.keys()'
+ juju_facts:
- name: Add more nova-compute units
command: "juju add-unit nova-compute --to {{ juju_machines[item]['machine_id'] }}"
with_items: "{{ groups['compute'] | difference( juju_compute_nodes.keys() ) }}"
+ tags:
+ - skip_ansible_lint # benign to do this more than once, hard to check for
- name: Pause to let Juju settle
pause:
@@ -66,3 +71,6 @@
retries: 5
delay: 5
with_items: "{{ groups['compute'] }}"
+ tags:
+ - skip_ansible_lint # this really should be the os_server module, but ansible doesn't know about juju created openstack
+
diff --git a/roles/juju-setup/tasks/main.yml b/roles/juju-setup/tasks/main.yml
index 6bd790d..2646ec0 100644
--- a/roles/juju-setup/tasks/main.yml
+++ b/roles/juju-setup/tasks/main.yml
@@ -34,6 +34,7 @@
# list of active juju_services names: juju_services.keys()
- name: Add machines to Juju
+ when: "{{ head_vm_list | map(attribute='service') | list | reject('undefined') | map('format_string', '%s.'~site_suffix ) | difference( juju_machines.keys() ) | length }}"
command: "juju add-machine ssh:{{ item }}"
with_items: "{{ head_vm_list | map(attribute='service') | list | reject('undefined') | map('format_string', '%s.'~site_suffix ) | difference( juju_machines.keys() ) }}"
@@ -42,14 +43,16 @@
juju_facts:
- name: Deploy services that are hosted in their own VM
+ when: "{{ vm_service_list | difference( juju_services.keys() ) | length }}"
command: "juju deploy {{ charm_versions[item] | default(item) }} --to {{ juju_machines[item~'.'~site_suffix]['machine_id'] }} --config={{ juju_config_path }}"
with_items: "{{ vm_service_list | difference( juju_services.keys() ) }}"
- name: Deploy mongodb to ceilometer VM
- command: "juju deploy {{ charm_versions['mongodb'] | default('mongodb') }} --to {{ juju_machines['ceilometer.'~site_suffix]['machine_id'] }} --config={{ juju_config_path }}"
when: juju_services['mongodb'] is undefined
+ command: "juju deploy {{ charm_versions['mongodb'] | default('mongodb') }} --to {{ juju_machines['ceilometer.'~site_suffix]['machine_id'] }} --config={{ juju_config_path }}"
- name: Deploy services that don't have their own VM
+ when: "{{ standalone_service_list | difference( juju_services.keys() ) | length }}"
command: "juju deploy {{ charm_versions[item] | default(item) }} --config={{ juju_config_path }}"
with_items: "{{ standalone_service_list | difference( juju_services.keys() ) }}"
@@ -60,6 +63,8 @@
with_subelements:
- "{{ service_relations }}"
- relations
+ tags:
+ - skip_ansible_lint # benign to do this more than once, hard to check for
# run another time, so services will be in juju_services list
- name: Obtain Juju Facts after service creation
@@ -77,36 +82,46 @@
# secondary wait, as waiting on ports isn't enough. Probably only need one of these...
# 160*15s = 2400s = 40m max wait
- name: Wait for juju services to start
- action: command juju status --format=summary
+ command: juju status --format=summary
register: juju_summary
until: juju_summary.stdout.find("pending:") == -1
retries: 160
delay: 15
+ tags:
+ - skip_ansible_lint # checking/waiting on a system to be up
- name: Create admin-openrc.sh credentials file
template:
src=admin-openrc.sh.j2
dest={{ ansible_user_dir }}/admin-openrc.sh
-
- name: Copy nova-cloud-controller CA certificate to head
command: juju scp {{ juju_services['nova-cloud-controller']['units'].keys()[0] }}:/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt {{ ansible_user_dir }}
register: result
until: result | success
retries: 40
delay: 15
+ tags:
+ - skip_ansible_lint # checking/waiting on file availibilty
- name: Copy cert to system location
become: yes
- command: cp {{ ansible_user_dir }}/keystone_juju_ca_cert.crt /usr/local/share/ca-certificates
+ copy:
+ src: "{{ ansible_user_dir }}/keystone_juju_ca_cert.crt"
+ dest: "/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt"
+ remote_src: true
+ register: copied_cert
- name: update-ca-certificates
+ when: copied_cert.changed
become: yes
command: update-ca-certificates
- name: Move cert to all service VM's
+ when: copied_cert.changed
command: ansible services -b -u ubuntu -m copy -a "src={{ ansible_user_dir }}/keystone_juju_ca_cert.crt dest=/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt owner=root group=root mode=0644"
- name: update-ca-certificates in service VM's
+ when: copied_cert.changed
command: ansible services -b -u ubuntu -m command -a "update-ca-certificates"
diff --git a/roles/onos-load-apps/tasks/main.yml b/roles/onos-load-apps/tasks/main.yml
index 32a8a21..515c3c4 100644
--- a/roles/onos-load-apps/tasks/main.yml
+++ b/roles/onos-load-apps/tasks/main.yml
@@ -4,6 +4,8 @@
- name: Disable loading of CORD apps from Maven repo
command: ansible xos-1 -u ubuntu -m lineinfile \
-a "dest=~/service-profile/{{ xos_configuration }}/make-vtn-external-yaml.sh state=absent regexp='install_dependencies'"
+ tags:
+ - skip_ansible_lint # running a sub-job
- name: Create directory for CORD apps and load script
file:
@@ -26,6 +28,11 @@
with_items:
- onos-app
+- name: Wait for ONOS to be ready
+ wait_for:
+ host: "{{ onos_cord_vm_hostname }}"
+ port: 8181
+
- name: Download CORD apps from maven repo
maven_artifact:
repository_url: "{{ cord_apps_repo_url }}"
@@ -35,12 +42,10 @@
extension: "oar"
dest: "{{ ansible_user_dir }}/cord_apps/{{ item.name }}.oar"
with_items: "{{ cord_apps }}"
+ register: maven_artifact_dl
-- name: Wait for ONOS to be ready
- wait_for:
- host: "{{ onos_cord_vm_hostname }}"
- port: 8181
-
+# assumes no interruption between this and previous steps...
- name: Install CORD apps
+ when: maven_artifact_dl.changed
command: "{{ ansible_user_dir }}/cord_apps/cord_app_loader.sh"
diff --git a/roles/onos-vm-install/files/onos-setup-playbook.yml b/roles/onos-vm-install/files/onos-setup-playbook.yml
index fe33054..56bf06e 100644
--- a/roles/onos-vm-install/files/onos-setup-playbook.yml
+++ b/roles/onos-vm-install/files/onos-setup-playbook.yml
@@ -12,10 +12,11 @@
path: "{{ ansible_user_dir }}/cord"
state: directory
-# Should replace with http://docs.ansible.com/ansible/docker_module.html, when replacements are stable
- name: Pull docker image for ONOS
become: yes
command: "docker pull {{ onos_docker_image }}"
+ tags:
+ - skip_ansible_lint # Should replace with http://docs.ansible.com/ansible/docker_module.html, when replacements are stable
# Setup specific for onos-cord VM
- hosts: onos-cord-1
diff --git a/roles/onos-vm-install/tasks/main.yml b/roles/onos-vm-install/tasks/main.yml
index bd3d073..1f2eedc 100644
--- a/roles/onos-vm-install/tasks/main.yml
+++ b/roles/onos-vm-install/tasks/main.yml
@@ -26,4 +26,6 @@
async: 1800
poll: 0
register: onos_setup_playbook
+ tags:
+ - skip_ansible_lint # running a sub-job
diff --git a/roles/prereqs-common/tasks/main.yml b/roles/prereqs-common/tasks/main.yml
index aecde2c..642a1b4 100644
--- a/roles/prereqs-common/tasks/main.yml
+++ b/roles/prereqs-common/tasks/main.yml
@@ -23,6 +23,10 @@
until: dns_lookup_check_result.rc == 0
retries: 3
delay: 1
+ tags:
+ - skip_ansible_lint # tried assert + dig (below), but it fails quickly and won't loop
+ # assert:
+ # that: "{{ lookup('dig', dns_check_domain ) == dns_check_ipv4 }}"
- name: DNS Global Root Connectivity Check
shell: "dig @{{ item }} +trace +short {{ dns_check_domain }} | grep {{ dns_check_ipv4 }}"
@@ -31,6 +35,8 @@
until: dns_global_check_result.rc == 0
retries: 3
delay: 1
+ tags:
+ - skip_ansible_lint # too complex for lookup('dig', ...) to handle
- name: HTTP Download Check
get_url:
diff --git a/roles/simulate-fabric/files/simulate-fabric-playbook.yml b/roles/simulate-fabric/files/simulate-fabric-playbook.yml
index da1e876..ad326e1 100644
--- a/roles/simulate-fabric/files/simulate-fabric-playbook.yml
+++ b/roles/simulate-fabric/files/simulate-fabric-playbook.yml
@@ -67,6 +67,8 @@
command: "iptables -t nat -C POSTROUTING -s 10.168.0.0/16 ! -d 10.168.0.0/16 -j MASQUERADE"
register: iptables_check
failed_when: "iptables_check|failed and 'No chain/target/match by that name' not in iptables_check.stderr"
+ tags:
+ - skip_ansible_lint # FIXME: should use iptables module when it supports inversion of ranges
- name: Create iptables rule
when: "iptables_check.rc != 0"
diff --git a/roles/simulate-fabric/tasks/main.yml b/roles/simulate-fabric/tasks/main.yml
index c9e834b..3f919ff 100644
--- a/roles/simulate-fabric/tasks/main.yml
+++ b/roles/simulate-fabric/tasks/main.yml
@@ -13,4 +13,6 @@
- name: Setup simulated fabric on nova-compute-1 using playbook
command: ansible-playbook {{ ansible_user_dir }}/simulate-fabric-playbook.yml
+ tags:
+ - skip_ansible_lint # running a sub-job
diff --git a/roles/test-client-install/files/test-client-playbook.yml b/roles/test-client-install/files/test-client-playbook.yml
index 7526cf4..c802a83 100644
--- a/roles/test-client-install/files/test-client-playbook.yml
+++ b/roles/test-client-install/files/test-client-playbook.yml
@@ -27,10 +27,14 @@
- name: Create testclient
become: yes
shell: lxc-ls | grep testclient || lxc-create -t ubuntu -n testclient
+ tags:
+ - skip_ansible_lint # FIXME: should used lxc_container module
- name: Start testclient
become: yes
shell: lxc-info -n testclient -s | grep RUNNING || lxc-start -n testclient
+ tags:
+ - skip_ansible_lint # FIXME: should used lxc_container module
- name: Set up networking inside the testclient for testing sample CORD subscriber
become: yes
@@ -40,3 +44,5 @@
- "lxc-attach -n testclient -- bash -c 'ip link show eth0.222.111 || ip link add link eth0.222 name eth0.222.111 type vlan id 111'"
- "lxc-attach -n testclient -- ifconfig eth0.222 up"
- "lxc-attach -n testclient -- ifconfig eth0.222.111 up"
+ tags:
+ - skip_ansible_lint # non-trivial use case
diff --git a/roles/test-client-install/tasks/main.yml b/roles/test-client-install/tasks/main.yml
index d10512d..fdf4eaf 100644
--- a/roles/test-client-install/tasks/main.yml
+++ b/roles/test-client-install/tasks/main.yml
@@ -11,4 +11,6 @@
async: 3600
poll: 0
register: test_client_playbook
+ tags:
+ - skip_ansible_lint # running a sub-job
diff --git a/roles/test-exampleservice/tasks/main.yml b/roles/test-exampleservice/tasks/main.yml
index 2ae3813..92bf70b 100644
--- a/roles/test-exampleservice/tasks/main.yml
+++ b/roles/test-exampleservice/tasks/main.yml
@@ -6,6 +6,8 @@
- name: Onboard ExampleService and instantiate a VM
command: ansible xos-1 -u ubuntu -m shell \
-a "cd ~/service-profile/cord-pod; make exampleservice"
+ tags:
+ - skip_ansible_lint # running a sub job
- name: Pause 60 seconds (work around bug in synchronizer)
pause: seconds=60
@@ -13,6 +15,8 @@
- name: Re-run 'make vtn' (work around bug in synchronizer)
command: ansible xos-1 -u ubuntu -m shell \
-a "cd ~/service-profile/cord-pod; make vtn"
+ tags:
+ - skip_ansible_lint # running a sub job
- name: Wait for ExampleService VM to come up
shell: bash -c "source ~/admin-openrc.sh; nova list --all-tenants|grep 'exampleservice.*ACTIVE' > /dev/null"
@@ -20,18 +24,26 @@
until: result | success
retries: 10
delay: 60
+ tags:
+ - skip_ansible_lint # running a sub job
- name: Get ID of VM
shell: bash -c "source ~/admin-openrc.sh; nova list --all-tenants|grep mysite_exampleservice|cut -d '|' -f 2"
register: nova_id
+ tags:
+ - skip_ansible_lint # running a sub job
- name: Get mgmt IP of VM
shell: bash -c "source ~/admin-openrc.sh; nova interface-list {{ nova_id.stdout }}|grep -o -m 1 172.27.[[:digit:]]*.[[:digit:]]*"
register: mgmt_ip
+ tags:
+ - skip_ansible_lint # running a sub job
- name: Get public IP of VM
shell: bash -c "source ~/admin-openrc.sh; nova interface-list {{ nova_id.stdout }}|grep -o -m 1 10.168.[[:digit:]]*.[[:digit:]]*"
register: public_ip
+ tags:
+ - skip_ansible_lint # running a sub job
- name: Wait for Apache to come up inside VM
shell: ssh -o ProxyCommand="ssh -W %h:%p ubuntu@nova-compute-1" ubuntu@{{ mgmt_ip.stdout }} "ls /var/run/apache2/apache2.pid" > /dev/null
@@ -39,15 +51,21 @@
until: result | success
retries: 20
delay: 60
+ tags:
+ - skip_ansible_lint # running a sub job
- name: Install curl in testclient
command: ansible nova-compute-1 -u ubuntu -m shell \
-s -a "lxc-attach -n testclient -- apt-get -y install curl"
+ tags:
+ - skip_ansible_lint # running a sub job
- name: Test connectivity to ExampleService from test client
command: ansible nova-compute-1 -u ubuntu -m shell \
-s -a "lxc-attach -n testclient -- curl -s http://{{ public_ip.stdout }}"
register: curltest
+ tags:
+ - skip_ansible_lint # running a sub job
- name: Output from curl test
debug: var=curltest.stdout_lines
diff --git a/roles/test-prep/tasks/main.yml b/roles/test-prep/tasks/main.yml
deleted file mode 100644
index 1ebf604..0000000
--- a/roles/test-prep/tasks/main.yml
+++ /dev/null
@@ -1,9 +0,0 @@
----
-# roles/test-prep/tasks/main.yml
-
-- name: Add local resolver to /etc/resolv.conf
- lineinfile:
- dest=/etc/resolv.conf
- insertafter=".*DO NOT EDIT THIS FILE.*"
- line="nameserver 192.168.122.1"
-
diff --git a/roles/test-vsg/tasks/main.yml b/roles/test-vsg/tasks/main.yml
index 14ed325..eaad0a5 100644
--- a/roles/test-vsg/tasks/main.yml
+++ b/roles/test-vsg/tasks/main.yml
@@ -6,6 +6,8 @@
- name: Create a sample CORD subscriber
command: ansible xos-1 -u ubuntu -m shell \
-a "cd ~/service-profile/cord-pod; make cord-subscriber"
+ tags:
+ - skip_ansible_lint # running a sub job
- name: Pause 60 seconds (work around bug in synchronizer)
pause: seconds=60
@@ -13,6 +15,8 @@
- name: Re-run 'make vtn' (work around bug in synchronizer)
command: ansible xos-1 -u ubuntu -m shell \
-a "cd ~/service-profile/cord-pod; make vtn"
+ tags:
+ - skip_ansible_lint # running a sub job
- name: Wait for vSG VM to come up
shell: bash -c "source ~/admin-openrc.sh; nova list --all-tenants|grep 'vsg.*ACTIVE' > /dev/null"
@@ -20,14 +24,20 @@
until: result | success
retries: 10
delay: 60
+ tags:
+ - skip_ansible_lint # running a sub job
- name: Get ID of VM
shell: bash -c "source ~/admin-openrc.sh; nova list --all-tenants|grep mysite_vsg|cut -d '|' -f 2"
register: nova_id
+ tags:
+ - skip_ansible_lint # running a sub job
- name: Get mgmt IP of VM
shell: bash -c "source ~/admin-openrc.sh; nova interface-list {{ nova_id.stdout }}|grep -o -m 1 172.27.[[:digit:]]*.[[:digit:]]*"
register: mgmt_ip
+ tags:
+ - skip_ansible_lint # running a sub job
- name: Wait for Docker container inside VM to come up
shell: ssh -o ProxyCommand="ssh -W %h:%p ubuntu@nova-compute-1" ubuntu@{{ mgmt_ip.stdout }} "sudo docker ps|grep vcpe" > /dev/null
@@ -35,15 +45,21 @@
until: result | success
retries: 20
delay: 60
+ tags:
+ - skip_ansible_lint # running a sub job
- name: Run dhclient inside testclient to get IP address from vSG
command: ansible nova-compute-1 -u ubuntu -m shell \
-s -a "lxc-attach -n testclient -- dhclient eth0.222.111"
+ tags:
+ - skip_ansible_lint # running a sub job
- name: Test external connectivity in test client
command: ansible nova-compute-1 -u ubuntu -m shell \
-s -a "lxc-attach -n testclient -- ping -c 3 8.8.8.8"
register: pingtest
+ tags:
+ - skip_ansible_lint # running a sub job
- name: Output from ping test
- debug: var=pingtest.stdout_lines
\ No newline at end of file
+ debug: var=pingtest.stdout_lines
diff --git a/roles/xos-compute-setup/tasks/main.yml b/roles/xos-compute-setup/tasks/main.yml
index b2689a7..a7b8414 100644
--- a/roles/xos-compute-setup/tasks/main.yml
+++ b/roles/xos-compute-setup/tasks/main.yml
@@ -5,3 +5,5 @@
- name: ssh to XOS VM and run 'make new-nodes'
command: ssh ubuntu@xos "cd {{ service_profile_repo_dest }}/{{ xos_configuration }}/; make new-nodes"
+ tags:
+ - skip_ansible_lint # running a sub-job
diff --git a/roles/xos-install/tasks/main.yml b/roles/xos-install/tasks/main.yml
index 22a045a..7008edb 100644
--- a/roles/xos-install/tasks/main.yml
+++ b/roles/xos-install/tasks/main.yml
@@ -2,24 +2,28 @@
# tasks for xos-install role
- name: checkout XOS repo
- git: repo={{ xos_repo_url }}
- dest={{ xos_repo_dest }}
- version={{ xos_repo_branch }}
+ git:
+ repo: "{{ xos_repo_url }}"
+ dest: "{{ xos_repo_dest }}"
+ version: "{{ xos_repo_branch }}"
- name: checkout service-profile repo
- git: repo={{ service_profile_repo_url }}
- dest={{ service_profile_repo_dest }}
- version={{ service_profile_repo_branch }}
+ git:
+ repo: "{{ service_profile_repo_url }}"
+ dest: "{{ service_profile_repo_dest }}"
+ version: "{{ service_profile_repo_branch }}"
- name: Rebuild XOS containers
when: xos_container_rebuild
- command: make {{ item }}
- chdir="{{ service_profile_repo_dest }}/{{ xos_configuration }}/"
+ make:
+ target: "{{ item }}"
+ chdir: "{{ service_profile_repo_dest }}/{{ xos_configuration }}/"
with_items:
- common_cloudlab
- base
- name: Initial build of XOS
- command: make
- chdir="{{ service_profile_repo_dest }}/{{ xos_configuration }}/"
+ make:
+ target: "{{ item }}"
+ chdir: "{{ service_profile_repo_dest }}/{{ xos_configuration }}/"
diff --git a/roles/xos-start/tasks/main.yml b/roles/xos-start/tasks/main.yml
index 0c98c67..6c7ebb1 100644
--- a/roles/xos-start/tasks/main.yml
+++ b/roles/xos-start/tasks/main.yml
@@ -4,10 +4,14 @@
- name: Build XOS containers
command: ansible xos-1 -u ubuntu -m shell \
-a "bash -c \"set -o pipefail; cd {{ service_profile_repo_dest }}/{{ xos_configuration }}; make local_containers |& tee xos-build.out\""
+ tags:
+ - skip_ansible_lint
- name: Onboard services and start XOS
command: ansible xos-1 -u ubuntu -m shell \
-a "bash -c \"set -o pipefail; cd {{ service_profile_repo_dest }}/{{ xos_configuration }}; make xos |& tee xos-onboard.out\""
+ tags:
+ - skip_ansible_lint
- name: Pause to let XOS initialize
pause: seconds=120
@@ -15,14 +19,23 @@
- name: Initial VTN configuration
command: ansible xos-1 -u ubuntu -m shell \
-a "cd {{ service_profile_repo_dest }}/{{ xos_configuration }}; make vtn"
+ tags:
+ - skip_ansible_lint
- name: Initial fabric configuration
command: ansible xos-1 -u ubuntu -m shell \
-a "cd {{ service_profile_repo_dest }}/{{ xos_configuration }}; make fabric"
+ tags:
+ - skip_ansible_lint
- name: Pause to let ONOS initialize
pause: seconds=20
+ tags:
+ - skip_ansible_lint
- name: Configure CORD services
command: ansible xos-1 -u ubuntu -m shell \
-a "cd {{ service_profile_repo_dest }}/{{ xos_configuration }}; make cord"
+ tags:
+ - skip_ansible_lint
+
diff --git a/roles/xos-vm-install/files/xos-setup-cord-pod-playbook.yml b/roles/xos-vm-install/files/xos-setup-cord-pod-playbook.yml
index d258d54..5b646a5 100644
--- a/roles/xos-vm-install/files/xos-setup-cord-pod-playbook.yml
+++ b/roles/xos-vm-install/files/xos-setup-cord-pod-playbook.yml
@@ -63,6 +63,8 @@
with_items:
- xosproject/xos-postgres
- xosproject/cord-app-build
+ tags:
+ - skip_ansible_lint # FIXME: use new 2.2 docker modules when available
- name: Pull docker images for XOS
when: not xos_container_rebuild
@@ -77,3 +79,5 @@
chdir="{{ xos_repo_dest }}/containers/xos/"
with_items:
- base
+
+
diff --git a/roles/xos-vm-install/files/xos-setup-devel-playbook.yml b/roles/xos-vm-install/files/xos-setup-devel-playbook.yml
index 517f77d..f7d3851 100644
--- a/roles/xos-vm-install/files/xos-setup-devel-playbook.yml
+++ b/roles/xos-vm-install/files/xos-setup-devel-playbook.yml
@@ -62,6 +62,8 @@
command: docker pull {{ item }}
with_items:
- xosproject/xos-postgres
+ tags:
+ - skip_ansible_lint # Should replace with http://docs.ansible.com/ansible/docker_module.html, when replacements are stable
- name: Pull docker images for XOS
when: not xos_container_rebuild
diff --git a/roles/xos-vm-install/tasks/main.yml b/roles/xos-vm-install/tasks/main.yml
index a4fc803..5ee7905 100644
--- a/roles/xos-vm-install/tasks/main.yml
+++ b/roles/xos-vm-install/tasks/main.yml
@@ -18,4 +18,6 @@
async: 4800
poll: 0
register: xos_setup_playbook
+ tags:
+ - skip_ansible_lint # running a sub-job