Merge "OMEC dev cluster name change"
diff --git a/jenkins-scripts/README b/jenkins-scripts/README
index 63a49a6..0c74035 100644
--- a/jenkins-scripts/README
+++ b/jenkins-scripts/README
@@ -1,8 +1,23 @@
-The scripts in this directory are used by the Jenkins spin-up component
-for dynamic minions.
+These scripts are run at boot time of a VM to configure it as a Jenkins minion.
-The spinup script will be as follows (${system_type} will be replaced
-with the appropriate system_type script)
+See the upstream LF project for where they originated:
-git clone https://github.com/edgexfoundry/ci-management.git /ci-management
-/ci-management/jenkins-scripts/jenkins-init-script.sh
+ https://gerrit.linuxfoundation.org/infra/gitweb?p=releng/global-jjb.git;a=tree;f=jenkins-init-scripts;hb=HEAD
+
+Our versions differ from LF's in a number of ways:
+
+- More secure SSH defaults
+- Omit unneeded portions of the scripts
+
+On bootstrap a script is provided as EC2 User Data that will pull down this
+repo and run the init script. It should look like:
+
+ #!/bin/bash
+
+ # Clone the management repo
+ git clone https://gerrit.opencord.org/ci-management.git /ci-management
+ /ci-management/jenkins-scripts/jenkins-init-script.sh
+
+ # clean-up from the init run
+ rm -rf /ci-management
+
diff --git a/jenkins-scripts/basic_settings.sh b/jenkins-scripts/basic_settings.sh
index bf0eea0..47a3f4c 100755
--- a/jenkins-scripts/basic_settings.sh
+++ b/jenkins-scripts/basic_settings.sh
@@ -9,39 +9,31 @@
# http://www.eclipse.org/legal/epl-v10.html
##############################################################################
-case "$(facter operatingsystem)" in
- Ubuntu)
- apt-get update
- ;;
- *)
- # Do nothing on other distros for now
- ;;
-esac
-
+# set hostname
IPADDR=$(facter ipaddress)
HOSTNAME=$(facter hostname)
FQDN=$(facter fqdn)
echo "${IPADDR} ${HOSTNAME} ${FQDN}" >> /etc/hosts
-#Increase limits
+# Increase limits
cat <<EOF > /etc/security/limits.d/jenkins.conf
jenkins soft nofile 16000
jenkins hard nofile 16000
EOF
+# keepalive SSH sessions
cat <<EOSSH >> /etc/ssh/ssh_config
Host *
ServerAliveInterval 60
-
-# we don't want to do SSH host key checking on spin-up systems
-Host 10.30.122.*
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
EOSSH
+# create host-wide known hosts file
cat <<EOKNOWN > /etc/ssh/ssh_known_hosts
github.com ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ==
+[gerrit.opencord.org]:29418 ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQCceEPwEJ5m5tbiL/AB5mY8DfT9UuXsc0l5N4AMxI89g7Vnyb9XOnxubJo2ZmIwDKI6LM5uRCgfIAKmbNNfqA1CL3e/7XKvQ69rrjnM+5swXAvD4ElYppyyU0V9EufuH2AD7zh0VdzqE25TF4nm6A/2neCqcWI7paa8c2h3YbzvHw==
+[gerrit.onosproject.org]:29418 ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDgqAmRpkpZoq8Efz4sslaQYnoNCOlPy7nS/72FkvWP06WbPUsutJznSw4moKTZcxHJADW5eanBHxJ3nI8jo/bXC0qHZfzXVeDCklR/Shq8pY3B7I+WLufq4OKEuYim/ahrAYUvSYyBnnz3fLc+DLLiBhL4BBqpd9ocJd/3HZv4wRAWYmfKMKzjF84u6rehe8ZDUoNICsA/K6Wy1bYQnyJOXVBYdxSkdUc6Er1NDu6W/ijZbcpEt+Y4sYoChxKAtsqcFkjaKFgJbotDGVLnWzZTu08PGtZTE+0UyIozSQvsy/7bGSrA7t0am2IRXz0fFNCq/qOWfkwVbt8hRbEIUk/5
+[gerrit.onosproject.org]:29418 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBMBzs9fkmwgIqvYavMlIFz95RzDoSBQxHIeBj2BuDz0HLz2qrW2Q2Rksq4OwsAuSjRto3+9/BgIKv1ONnh21KMM=
+[gerrit.onosproject.org]:29418 ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIKkIOHzFGowb9yL7FcWD73YF/xDUQ23/As/HAP3flf/L
EOKNOWN
-# vim: sw=2 ts=2 sts=2 et :
diff --git a/jenkins-scripts/create_jenkins_user.sh b/jenkins-scripts/create_jenkins_user.sh
index 86a5c92..e69bbee 100755
--- a/jenkins-scripts/create_jenkins_user.sh
+++ b/jenkins-scripts/create_jenkins_user.sh
@@ -33,9 +33,15 @@
usermod -a -G mock jenkins
fi
+# create SSH config
mkdir /home/jenkins/.ssh
-mkdir /w
cp -r /home/${OS}/.ssh/authorized_keys /home/jenkins/.ssh/authorized_keys
+
# Generate ssh key for use by Robot jobs
echo -e 'y\n' | ssh-keygen -N "" -f /home/jenkins/.ssh/id_rsa -t rsa
+
+# /w is used as the Jenkins "Remote FS root" in the config
+mkdir /w
+
+# Have jenkins user own the ssh and remote fs root
chown -R jenkins:jenkins /home/jenkins/.ssh /w
diff --git a/jenkins-scripts/jenkins-init-script.sh b/jenkins-scripts/jenkins-init-script.sh
index 67987b5..8adf422 100755
--- a/jenkins-scripts/jenkins-init-script.sh
+++ b/jenkins-scripts/jenkins-init-script.sh
@@ -13,10 +13,15 @@
cd /ci-management/jenkins-scripts
chmod +x ./*.sh
-./system_type.sh
+# create system type script and source it
+./system_type.sh
source /tmp/system_type.sh
+
+# run basic settings
./basic_settings.sh
+
+# run per system-type script, if it exists
if [ -f "${SYSTEM_TYPE}.sh" ]
then
./"${SYSTEM_TYPE}.sh"
diff --git a/jjb/api-test.yaml b/jjb/api-test.yaml
index 145a4fa..7e7275e 100644
--- a/jjb/api-test.yaml
+++ b/jjb/api-test.yaml
@@ -29,7 +29,7 @@
parameters:
- string:
name: buildNode
- default: 'ubuntu16.04-basebuild-4c-8g'
+ default: 'ubuntu18.04-basebuild-4c-8g'
description: 'Name of the Jenkins node to run the job on'
- string:
diff --git a/jjb/bbr.yaml b/jjb/bbr.yaml
index f5bd5c9..14b4caa 100644
--- a/jjb/bbr.yaml
+++ b/jjb/bbr.yaml
@@ -48,5 +48,5 @@
parameters:
- string:
name: buildNode
- default: 'ubuntu16.04-basebuild-4c-8g'
+ default: 'ubuntu18.04-basebuild-4c-8g'
description: 'Name of the Jenkins node to run the job on'
diff --git a/jjb/bbsim-e2e-scale.yaml b/jjb/bbsim-e2e-scale.yaml
deleted file mode 100644
index 741941a..0000000
--- a/jjb/bbsim-e2e-scale.yaml
+++ /dev/null
@@ -1,40 +0,0 @@
----
-# bbsim end to end scale test job
-
-- project:
- name: bbsim-e2e-scale
-
- project-name: '{name}'
-
- jobs:
- - 'bbsim-e2e-setup'
-
-- job-template:
- id: 'bbsim-e2e-setup'
- name: 'bbsim-end-to-end-setup'
-
- description: |
- <!-- Managed by Jenkins Job Builder -->
- Created by {id} job-template from ci-management/jjb/bbsim-e2e-scale.yaml <br /><br />
- E2E Validation for Seba-in-a-Box
-
- properties:
- - cord-infra-properties:
- build-days-to-keep: '{build-days-to-keep}'
- artifact-num-to-keep: '{artifact-num-to-keep}'
-
- wrappers:
- - lf-infra-wrappers:
- build-timeout: '{build-timeout}'
- jenkins-ssh-credential: '{jenkins-ssh-credential}'
-
- parameters:
- - string:
- name: buildNode
- default: 'ubuntu16.04-basebuild-4c-8g'
- description: 'Name of the Jenkins node to run the job on'
-
- project-type: pipeline
- concurrent: false
-
- dsl: !include-raw-escape: pipeline/bbsim-scale.groovy
diff --git a/jjb/bbsim-validation.yaml b/jjb/bbsim-validation.yaml
deleted file mode 100644
index a8aa632..0000000
--- a/jjb/bbsim-validation.yaml
+++ /dev/null
@@ -1,151 +0,0 @@
----
-# bbsim test job
-
-- project:
- name: bbsim-test
-
- project-name: '{name}'
-
- jobs:
- - 'bbsim-validation-voltha-1.6'
- - 'bbsim-validation-master'
-
-- job-template:
- id: 'bbsim-validation-voltha-1.6'
- name: 'BBSIM-Validation-Voltha-1.6'
-
- description: |
- <!-- Managed by Jenkins Job Builder -->
- Created by {id} job-template from ci-management/jjb/bbsim-validation.yaml <br /><br />
- Tests run for validation of BBSIM. Tests reside in the helm chart template
-
- properties:
- - cord-infra-properties:
- build-days-to-keep: '{build-days-to-keep}'
- artifact-num-to-keep: '{artifact-num-to-keep}'
-
- wrappers:
- - lf-infra-wrappers:
- build-timeout: '{build-timeout}'
- jenkins-ssh-credential: '{jenkins-ssh-credential}'
-
- parameters:
- - string:
- name: buildNode
- default: 'qct-pod2-node1'
- description: 'Name of the Jenkins node to run the job on'
-
- - string:
- name: manifestUrl
- default: '{gerrit-server-url}/{cord-repo-manifest}'
- description: 'URL to the repo manifest'
-
- - string:
- name: manifestBranch
- default: 'master'
- description: 'Name of the repo branch to use'
-
- - string:
- name: deploymentConfig
- default: 'bbsim-voltha-1.6.yml'
- description: 'kubernetes configurations file'
-
- - string:
- name: OnuCount
- default: '16'
- description: 'Number of ONUs per pon port'
-
- - string:
- name: EmulationMode
- default: '--set emulation_mode=both'
- description: 'Emulation for BBSIM (both|aaa)'
-
- - string:
- name: TestTags
- default: ''
- description: 'Tests to include/exclude. eg "-e serviceinstances"'
-
- - string:
- name: TestTimeout
- default: '300s'
- description: 'timeout for each test case (increase as you scale up)'
-
- - bool:
- name: ArchiveLogs
- default: true
- description: 'Archive all pod logs after test run'
-
- project-type: pipeline
- concurrent: false
-
- dsl: !include-raw-escape: pipeline/bbsim-validation.groovy
-
-- job-template:
- id: 'bbsim-validation-master'
- name: 'BBSIM-Validation-Voltha-master'
-
- description: |
- <!-- Managed by Jenkins Job Builder -->
- Created by {id} job-template from ci-management/jjb/bbsim-validation.yaml <br /><br />
- Tests run for validation of BBSIM. Tests reside in the helm chart template
-
- properties:
- - cord-infra-properties:
- build-days-to-keep: '{build-days-to-keep}'
- artifact-num-to-keep: '{artifact-num-to-keep}'
-
- wrappers:
- - lf-infra-wrappers:
- build-timeout: '{build-timeout}'
- jenkins-ssh-credential: '{jenkins-ssh-credential}'
-
- parameters:
- - string:
- name: buildNode
- default: 'qct-pod2-node1'
- description: 'Name of the Jenkins node to run the job on'
-
- - string:
- name: manifestUrl
- default: '{gerrit-server-url}/{cord-repo-manifest}'
- description: 'URL to the repo manifest'
-
- - string:
- name: manifestBranch
- default: 'master'
- description: 'Name of the repo branch to use'
-
- - string:
- name: deploymentConfig
- default: 'bbsim-voltha-master.yml'
- description: 'kubernetes configurations file'
-
- - string:
- name: OnuCount
- default: '16'
- description: 'Number of ONUs per pon port'
-
- - string:
- name: EmulationMode
- default: '--set emulation_mode=both'
- description: 'Emulation for BBSIM (both|aaa)'
-
- - string:
- name: TestTags
- default: ''
- description: 'Tests to include/exclude. eg "-e serviceinstances"'
-
- - string:
- name: TestTimeout
- default: '300s'
- description: 'timeout for each test case (increase as you scale up)'
-
- - bool:
- name: ArchiveLogs
- default: true
- description: 'Archive all pod logs after test run'
-
- project-type: pipeline
- concurrent: false
-
- dsl: !include-raw-escape: pipeline/bbsim-validation.groovy
diff --git a/jjb/charts.yaml b/jjb/charts.yaml
index 9908e7c..9999d4b 100644
--- a/jjb/charts.yaml
+++ b/jjb/charts.yaml
@@ -45,7 +45,7 @@
choosing-strategy: 'gerrit'
jenkins-ssh-credential: '{jenkins-ssh-credential}'
- node: 'ubuntu16.04-basebuild-2c-4g'
+ node: 'ubuntu18.04-basebuild-2c-4g'
project-type: freestyle
concurrent: false
@@ -153,7 +153,7 @@
choosing-strategy: gerrit
jenkins-ssh-credential: '{jenkins-ssh-credential}'
- node: 'ubuntu16.04-basebuild-2c-4g'
+ node: 'ubuntu18.04-basebuild-2c-4g'
project-type: freestyle
concurrent: true
diff --git a/jjb/ci-management/ci-management.yaml b/jjb/ci-management/ci-management.yaml
index 476e37b..f73d9b5 100644
--- a/jjb/ci-management/ci-management.yaml
+++ b/jjb/ci-management/ci-management.yaml
@@ -3,20 +3,13 @@
archive-artifacts: '**/*.log'
branch: 'master'
build-timeout: '60'
- build-node: 'centos7-basebuild-1c-1g'
+ build-node: 'ubuntu18.04-basebuild-1c-1g'
jobs:
- '{project-name}-ci-jobs'
- - '{project-name}-packer-jobs':
- platforms:
- - 'centos-7'
- - 'ubuntu-16.04'
- templates:
- - basebuild
name: ci-management-jobs
project: 'ci-management'
project-name: 'ci-management'
jjb-version: 3.2.0
- packer-version: 1.2.4
diff --git a/jjb/ci-management/packer.yaml b/jjb/ci-management/packer.yaml
new file mode 100644
index 0000000..83b6d18
--- /dev/null
+++ b/jjb/ci-management/packer.yaml
@@ -0,0 +1,20 @@
+- project:
+ archive-artifacts: '**/*.log'
+ branch: 'master'
+ build-timeout: '60'
+ build-node: 'ubuntu18.04-basebuild-1c-1g'
+
+ jobs:
+ - '{project-name}-packer-jobs':
+ platforms:
+ - 'ubuntu-18.04'
+ templates:
+ - basebuild_1804
+
+ name: packer-jobs
+ project: 'ci-management'
+ project-name: 'ci-management-ami'
+
+ jjb-version: 3.2.0
+ packer-version: 1.6.5
+ packer-builder: aws
diff --git a/jjb/cord-test/att-workflow.yaml b/jjb/cord-test/att-workflow.yaml
index d5fee86..02d8aba 100644
--- a/jjb/cord-test/att-workflow.yaml
+++ b/jjb/cord-test/att-workflow.yaml
@@ -14,6 +14,8 @@
num-of-atomix: '0'
in-band-management: false
reinstall-olt: true
+ num-of-onus: ''
+ num-of-ponports: ''
jobs:
# onlab pod1 build
diff --git a/jjb/cord-test/nightly-build-pipeline.yaml b/jjb/cord-test/nightly-build-pipeline.yaml
index bd045b7..be17049 100644
--- a/jjb/cord-test/nightly-build-pipeline.yaml
+++ b/jjb/cord-test/nightly-build-pipeline.yaml
@@ -109,10 +109,15 @@
- string:
name: onuNumber
- default: '16'
+ default: '{num-of-onus}'
description: "Onus per PonPort"
- string:
+ name: ponNumber
+ default: '{num-of-ponports}'
+ description: "Number of PON Ports"
+
+ - string:
name: workFlow
default: '{work-flow}'
description: 'Installs and uses the specified work flow on the POD'
diff --git a/jjb/defaults.yaml b/jjb/defaults.yaml
index 0f133db..d7da8ca 100644
--- a/jjb/defaults.yaml
+++ b/jjb/defaults.yaml
@@ -56,7 +56,7 @@
# The most frequently used type of Build Executor. Name is per the LF global-jjb.
# See build node types under "Cloud > Amazon EC2" at
# https://jenkins.opencord.org/configure
- build-node: ubuntu16.04-basebuild-1c-1g
+ build-node: ubuntu18.04-basebuild-1c-1g
# CORD Gerrit server definition, set in configuration
gerrit-server-name: 'CORD Gerrit'
@@ -84,7 +84,7 @@
modern-branches-regexp: '^(master|cord-7.0|cord-6.1|cord-6.0|seba-1.0|voltha-2.3|voltha-2.4)$'
platform-branches-regexp: '^(master|cord-7.0|cord-6.1)$'
seba-branches-regexp: '^(master|cord-7.0|seba-1.0)$'
- kind-voltha-regexp: '^(voltha-2.6)$'
+ kind-voltha-regexp: '^(voltha-2.6|voltha-2.7)$'
# matchs all project repos
all-projects-regexp: '.*'
@@ -99,7 +99,7 @@
# matching repos that should be version tagged by the version-tag job
# (basically the same as imagebuilder projects + helm charts + tools
- version-tag-projects-regexp: '^(?!aether)(xos.*|.*helm-charts|automation-tools|cord-tester|chameleon|rcord|mcord|ecord|acordion|addressmanager|epc-service|exampleservice|fabric.*|globalxos|hippie-oss|hss_db|hypercache|internetemulator|kubernetes-service|monitoring|olt-service|onos-service|openstack|progran|sdn-controller|simpleexampleservice|templateservice|vEE|vEG|vBBU|venb|vHSS|vMME|vnaas|vPGWC|vPGWU|vrouter|vsg|vsg-hw|vSGW|vSM|vspgwc|vspgwu|vtn-service|vtr|.*-workflow-driver|ves-agent|voltha-bbsim|openolt|sadis-server|kafka-topic-exporter|pyvoltha||plyxproto|voltha-protos|alpine-grpc-base|cordctl|voltha-go|voltha-onos|device-management|cord-workflow.*|voltha-system-tests|openairinterface|omec-.*|bbsim|omci-sim|ponsim|pppoe.*|voltha-api-server|aaa|config|dhcpl2relay|igmp|igmpproxy|kafka-onos|mcast|olt|sadis|vtn|voltha-.*-adapter.*|voltha-lib-go|voltha-python-base|voltha-docker-tools|mn-stratum-siab|ofagent.*|bng|voltctl|openolt-scale-tester|nem-ondemand-proxy|multifabric|openolt-test|omci-lib-go|kind-voltha|voltha-docs|mac-learning|goloxi|device-management-interface|bbsim-sadis-server|olttopology)$'
+ version-tag-projects-regexp: '^(?!aether)(xos.*|.*helm-charts|automation-tools|cord-tester|chameleon|rcord|mcord|ecord|acordion|addressmanager|epc-service|exampleservice|fabric.*|globalxos|hippie-oss|hss_db|hypercache|internetemulator|kubernetes-service|monitoring|olt-service|onos-service|openstack|progran|sdn-controller|simpleexampleservice|templateservice|vEE|vEG|vBBU|venb|vHSS|vMME|vnaas|vPGWC|vPGWU|vrouter|vsg|vsg-hw|vSGW|vSM|vspgwc|vspgwu|vtn-service|vtr|.*-workflow-driver|ves-agent|voltha-bbsim|openolt|sadis-server|kafka-topic-exporter|pyvoltha||plyxproto|voltha-protos|alpine-grpc-base|cordctl|voltha-go|voltha-onos|device-management|cord-workflow.*|voltha-system-tests|openairinterface|omec-.*|bbsim|omci-sim|ponsim|pppoe.*|voltha-api-server|aaa|config|dhcpl2relay|igmp|igmpproxy|kafka-onos|mcast|olt|sadis|vtn|voltha-.*-adapter.*|voltha-lib-go|voltha-python-base|voltha-docker-tools|mn-stratum-siab|ofagent.*|bng|voltctl|openolt-scale-tester|nem-ondemand-proxy|multifabric|openolt-test|omci-lib-go|kind-voltha|voltha-docs|mac-learning|goloxi|device-management-interface|bbsim-sadis-server|olttopology|opendm-agent|opendevice-manager)$'
# List of all repos that contribute to the CORD guide
cord-guide-projects-regexp: '^(att-workflow-driver|cord-tester|cordctl|exampleservice|fabric|fabric-crossconnect|hippie-oss|kubernetes-service|olt-service|onos-service|openolt|openstack|rcord|simpleexampleservice|vrouter|vtn-service|xos|xos-gui|xos-tosca)$'
@@ -207,3 +207,7 @@
# karaf-home
# Use to grab the logs or onos-diagnostics
karaf-home: 'apache-karaf-4.2.9'
+
+ # maven version
+ # Have to setup JAVA_HOME correctly if we upgrade to mvn36
+ maven-version: mvn33
diff --git a/jjb/device-management.yaml b/jjb/device-management.yaml
index b4eb61b..08e62b5 100644
--- a/jjb/device-management.yaml
+++ b/jjb/device-management.yaml
@@ -1,27 +1,13 @@
---
# device-management tests
-- project:
- name: device-management-e2e
-
- project-name: '{name}'
-
- jobs:
- # Per-patchset Pod builds on Tucson pod
- - 'verify_physical_device-management_patchset_manual':
- name: 'verify_physical_device-management_patchset_manual'
- testvm: 'tucson-pod'
- config-pod: 'tucson-pod'
- branch: 'master'
- oltDebVersion: 'openolt_asfvolt16-2.4.6-5d9ab1a36e13e55ca3687af160cfd4f582317493.deb'
- profile: 'Default'
- time-trigger: "@daily"
-
- job-template:
id: 'device-management-patch-test'
name: 'verify_{project}_sanity-test{name-extension}'
extra-helm-flags: ''
skip-vote: false
+ volthaSystemTestsChange: ''
+ volthaHelmChartsChange: ''
description: |
<!-- Managed by Jenkins Job Builder -->
@@ -41,16 +27,11 @@
parameters:
- string:
name: buildNode
- default: 'ubuntu16.04-basebuild-4c-8g'
+ default: 'ubuntu18.04-basebuild-4c-8g'
description: 'Name of the Jenkins node to run the job on'
- string:
- name: manifestUrl
- default: '{gerrit-server-url}/{voltha-test-manifest-repo}'
- description: 'URL to the repo manifest'
-
- - string:
- name: manifestBranch
+ name: branch
default: 'master'
description: 'Name of the repo branch to use'
@@ -60,19 +41,24 @@
description: 'Name of the Gerrit project'
- string:
- name: gerritChangeNumber
- default: '$GERRIT_CHANGE_NUMBER'
- description: 'Changeset number in Gerrit'
-
- - string:
- name: gerritPatchsetNumber
- default: '$GERRIT_PATCHSET_NUMBER'
- description: 'PatchSet number in Gerrit'
+ name: gerritRefspec
+ default: '$GERRIT_REFSPEC'
+ description: 'PatchSet REFSPEC in Gerrit, example value: "refs/changes/79/18779/13"'
- string:
name: extraHelmFlags
default: '{extra-helm-flags}'
- description: 'Helm flags to pass to ./voltha up'
+ description: 'Helm flags to pass to every helm install command'
+
+ - string:
+ name: volthaSystemTestsChange
+ default: '{volthaSystemTestsChange}'
+ description: 'Download a change for gerrit in the voltha-system-tests repo, example value: "refs/changes/79/18779/13"'
+
+ - string:
+ name: volthaHelmChartsChange
+ default: '{volthaHelmChartsChange}'
+ description: 'Download a change for gerrit in the voltha-helm-charts repo, example value: "refs/changes/79/18779/13"'
project-type: pipeline
concurrent: true
@@ -106,145 +92,3 @@
failed: '{skip-vote}'
unstable: '{skip-vote}'
notbuilt: '{skip-vote}'
-
-# POD Per Patchset Pipeline Jobs
-
-- job-template:
- name: '{name}'
- id: verify_physical_device-management_patchset_manual
- description: |
- <!-- Managed by Jenkins Job Builder -->
- Automated build on POD {config-pod} using {pipeline-script} <br /><br />
- Created from job-template {id} from ci-management/jjb/device-management.yaml <br />
- Created by Andy Bavier, andy@opennetworking.org <br />
- Copyright (c) 2019 Open Networking Foundation (ONF)
- sandbox: true
- pipeline-script: 'device-management-physical-build-and-tests.groovy'
-
- properties:
- - cord-infra-properties:
- build-days-to-keep: '{build-days-to-keep}'
- artifact-num-to-keep: '{artifact-num-to-keep}'
-
- parameters:
- - string:
- name: buildNode
- default: '{testvm}'
- description: 'Pod management node'
-
- - string:
- name: manifestUrl
- default: '{gerrit-server-url}/{voltha-test-manifest-repo}'
- description: 'URL to the repo manifest'
-
- - string:
- name: manifestBranch
- default: master
- description: 'Name of the repo branch to use'
-
- - string:
- name: gerritProject
- default: 'device-management'
- description: 'Name of the Gerrit project'
-
- - string:
- name: gerritRefSpec
- default: '$GERRIT_REFSPEC'
- description: 'Refspec for the Gerrit patchset'
-
- - string:
- name: gerritEventCommentText
- default: '$GERRIT_EVENT_COMMENT_TEXT'
- description: 'Comment text from gerrit commit'
-
- - string:
- name: cordRepoUrl
- default: '{gerrit-server-url}'
- description: 'The URL of the CORD Project repository'
-
- - string:
- name: podName
- default: '{config-pod}'
-
- - string:
- name: deploymentConfigFile
- default: 'pod-configs/deployment-configs/{config-pod}.yaml'
- description: 'Path of deployment config file'
-
- - string:
- name: kindVolthaValuesFile
- default: 'pod-configs/kubernetes-configs/voltha/{config-pod}.yml'
- description: 'Path of kind-voltha values override file'
-
- - string:
- name: sadisConfigFile
- default: 'voltha/voltha-system-tests/tests/data/{config-pod}-sadis.json'
- description: 'Path of SADIS config to load'
-
- - string:
- name: localConfigDir
- default: null
- description: 'If specified, config file paths are relative to this dir; otherwise $WORKSPACE'
-
- - string:
- name: configRepo
- default: 'pod-configs'
- description: 'A repository containing the config files, will be checked out if specified'
-
- - string:
- name: oltDebVersion
- default: '{oltDebVersion}'
- description: 'OLT Software version to install'
-
- - string:
- name: branch
- default: '{branch}'
-
- - string:
- name: profile
- default: '{profile}'
- description: 'Technology Profile pushed to the ETCD'
-
- - string:
- name: notificationEmail
- default: 'andy@opennetworking.org'
- description: ''
-
- - bool:
- name: reinstallOlt
- default: true
- description: "Re-install OLT software"
-
- - string:
- name: extraRobotArgs
- default: '-i sanity'
- description: 'Arguments to pass to robot'
-
- project-type: pipeline
- concurrent: true
-
- dsl: !include-raw-escape: pipeline/{pipeline-script}
-
- triggers:
- - gerrit:
- server-name: '{gerrit-server-name}'
- dependency-jobs: '{dependency-jobs}'
- silent-start: false
- successful-message: "PASSED hardware test"
- failure-message: "FAILED hardware test"
- unstable-message: "UNSTABLE hardware test"
- trigger-on:
- - comment-added-contains-event:
- comment-contains-value: '^hardware test$'
- - comment-added-contains-event:
- comment-contains-value: '^hardware test with delay$'
- projects:
- - project-compare-type: REG_EXP
- project-pattern: '^device-management$'
- branches:
- - branch-compare-type: PLAIN
- branch-pattern: 'master'
- - timed: |
- TZ=America/Los_Angeles
- {time-trigger}
-
diff --git a/jjb/docker-publish.yaml b/jjb/docker-publish.yaml
index d264eb9..dc0185c 100644
--- a/jjb/docker-publish.yaml
+++ b/jjb/docker-publish.yaml
@@ -28,7 +28,7 @@
parameters:
- string:
name: buildNode
- default: 'ubuntu16.04-basebuild-1c-2g'
+ default: 'ubuntu18.04-basebuild-1c-2g'
description: 'Name of the Jenkins build executor to run the job on'
- string:
diff --git a/jjb/gui-unit.yaml b/jjb/gui-unit.yaml
index 6cb5c2e..bb10618 100644
--- a/jjb/gui-unit.yaml
+++ b/jjb/gui-unit.yaml
@@ -40,7 +40,7 @@
jenkins-ssh-credential: '{jenkins-ssh-credential}'
# `npm install` fails on 1Gb RAM
- node: 'ubuntu16.04-basebuild-1c-2g'
+ node: 'ubuntu18.04-basebuild-1c-2g'
project-type: freestyle
concurrent: true
diff --git a/jjb/make-unit.yaml b/jjb/make-unit.yaml
index cceeb5f..22393e2 100644
--- a/jjb/make-unit.yaml
+++ b/jjb/make-unit.yaml
@@ -54,12 +54,40 @@
- junit:
results: "**/*results.xml,**/*report.xml"
allow-empty-results: '{junit-allow-empty-results}'
- - xunit:
- types:
- - gtest:
- pattern: "**/*xunit.xml"
- deleteoutput: false
- skip-if-no-test-files: '{xunit-skip-if-no-test-files}'
+# NOTE: circa 2020-04-11, the Jenkins xUnit plugin version 3.x.x changed the
+# config XML to not be JJB compatible, replacing the previous XML <types> tag
+# with a <tools> tag.
+#
+# Temporarily switch to using raw XML to configure xUnit.
+#
+# The following xunit and XML should be equivalent, except that the variable
+# `xunit-skip-if-no-test-files` is assumed to always be true.
+#
+# - xunit:
+# types:
+# - gtest:
+# pattern: "**/*xunit.xml"
+# deleteoutput: false
+# skip-if-no-test-files: '{xunit-skip-if-no-test-files}'
+#
+ - raw:
+ xml: |
+ <xunit plugin="xunit">
+ <tools>
+ <GoogleTestType>
+ <pattern>**/*xunit.xml</pattern>
+ <failIfNotNew>true</failIfNotNew>
+ <deleteOutputFiles>false</deleteOutputFiles>
+ <skipNoTestFiles>True</skipNoTestFiles>
+ <stopProcessingIfError>true</stopProcessingIfError>
+ </GoogleTestType>
+ </tools>
+ <thresholds/>
+ <thresholdMode>1</thresholdMode>
+ <extraConfiguration>
+ <testTimeMargin>3000</testTimeMargin>
+ </extraConfiguration>
+ </xunit>
- cobertura:
report-file: "**/*coverage.xml"
targets:
diff --git a/jjb/maven.yaml b/jjb/maven.yaml
index 195c9e3..162209f 100644
--- a/jjb/maven.yaml
+++ b/jjb/maven.yaml
@@ -8,7 +8,7 @@
<!-- Managed by Jenkins Job Builder -->
Created by {id} job-template from ci-management/jjb/maven.yaml
- node: 'ubuntu16.04-basebuild-1c-2g'
+ node: 'ubuntu18.04-basebuild-1c-2g'
project-type: freestyle
parameters:
@@ -57,6 +57,7 @@
settings: 'cord-apps-maven-settings'
settings-type: cfp
goals: '-Pci-verify clean test install spotbugs:check'
+ maven-version: '{maven-version}'
publishers:
- junit:
@@ -81,7 +82,7 @@
<!-- Managed by Jenkins Job Builder -->
Created by {id} job-template from ci-management/jjb/maven.yaml
- node: 'ubuntu16.04-basebuild-1c-2g'
+ node: 'ubuntu18.04-basebuild-1c-2g'
project-type: freestyle
parameters:
@@ -137,3 +138,4 @@
settings: 'cord-apps-maven-settings'
settings-type: cfp
goals: '-Prelease clean deploy'
+ maven-version: '{maven-version}'
diff --git a/jjb/omec-ci.yaml b/jjb/omec-ci.yaml
index a1b609a..6e312ac 100644
--- a/jjb/omec-ci.yaml
+++ b/jjb/omec-ci.yaml
@@ -94,14 +94,14 @@
pipeline-file: 'Jenkinsfile-omec-install-ngic-rtc-vnf.groovy'
- 'omec-fossa':
pipeline-file: 'omec-fossa-scan.groovy'
- build-node: 'ubuntu16.04-basebuild-1c-2g'
+ build-node: 'ubuntu18.04-basebuild-1c-2g'
- 'omec-reuse':
pipeline-file: 'omec-reuse-scan.groovy'
- build-node: 'ubuntu16.04-basebuild-1c-2g'
+ build-node: 'ubuntu18.04-basebuild-1c-2g'
- 'docker-publish-github':
build-timeout: 30
docker-repo: 'omecproject'
- build-node: 'ubuntu16.04-basebuild-4c-8g'
+ build-node: 'ubuntu18.04-basebuild-4c-8g'
- 'omec-container'
# for c3po
@@ -131,14 +131,14 @@
pipeline-file: 'Jenkinsfile-omec-install-c3po-hss-vnf.groovy'
- 'omec-fossa':
pipeline-file: 'omec-fossa-scan.groovy'
- build-node: 'ubuntu16.04-basebuild-1c-2g'
+ build-node: 'ubuntu18.04-basebuild-1c-2g'
- 'omec-reuse':
pipeline-file: 'omec-reuse-scan.groovy'
- build-node: 'ubuntu16.04-basebuild-1c-2g'
+ build-node: 'ubuntu18.04-basebuild-1c-2g'
- 'docker-publish-github':
build-timeout: 30
docker-repo: 'omecproject'
- build-node: 'ubuntu16.04-basebuild-8c-15g'
+ build-node: 'ubuntu18.04-basebuild-8c-15g'
- 'omec-container'
- 'omec-container-remote':
pod: 'ci-4g'
@@ -168,14 +168,14 @@
pipeline-file: 'Jenkinsfile-omec-install-openmme-vnf.groovy'
- 'omec-fossa':
pipeline-file: 'omec-fossa-scan.groovy'
- build-node: 'ubuntu16.04-basebuild-1c-2g'
+ build-node: 'ubuntu18.04-basebuild-1c-2g'
- 'omec-reuse':
pipeline-file: 'omec-reuse-scan.groovy'
- build-node: 'ubuntu16.04-basebuild-1c-2g'
+ build-node: 'ubuntu18.04-basebuild-1c-2g'
- 'docker-publish-github':
build-timeout: 30
docker-repo: 'omecproject'
- build-node: 'ubuntu16.04-basebuild-1c-2g'
+ build-node: 'ubuntu18.04-basebuild-1c-2g'
# for nucleus
- project:
@@ -200,17 +200,17 @@
pipeline-file: 'Jenkinsfile-omec-install-Nucleus-vnf.groovy'
- 'omec-fossa':
pipeline-file: 'omec-fossa-scan.groovy'
- build-node: 'ubuntu16.04-basebuild-1c-2g'
+ build-node: 'ubuntu18.04-basebuild-1c-2g'
- 'omec-reuse':
pipeline-file: 'omec-reuse-scan.groovy'
- build-node: 'ubuntu16.04-basebuild-1c-2g'
+ build-node: 'ubuntu18.04-basebuild-1c-2g'
- 'omec-cppcheck':
pipeline-file: 'omec-cppcheck.groovy'
- build-node: 'ubuntu16.04-basebuild-1c-2g'
+ build-node: 'ubuntu18.04-basebuild-1c-2g'
- 'docker-publish-github':
build-timeout: 60
docker-repo: 'omecproject'
- build-node: 'ubuntu16.04-basebuild-1c-2g'
+ build-node: 'ubuntu18.04-basebuild-1c-2g'
- 'omec-container'
- 'omec-container-remote':
pod: 'ci-4g'
@@ -233,10 +233,10 @@
jobs:
- 'omec-fossa':
pipeline-file: 'omec-fossa-scan.groovy'
- build-node: 'ubuntu16.04-basebuild-1c-2g'
+ build-node: 'ubuntu18.04-basebuild-1c-2g'
- 'omec-reuse':
pipeline-file: 'omec-reuse-scan.groovy'
- build-node: 'ubuntu16.04-basebuild-1c-2g'
+ build-node: 'ubuntu18.04-basebuild-1c-2g'
# for ignite
- project:
@@ -254,10 +254,10 @@
jobs:
- 'omec-fossa':
pipeline-file: 'omec-fossa-scan.groovy'
- build-node: 'ubuntu16.04-basebuild-1c-2g'
+ build-node: 'ubuntu18.04-basebuild-1c-2g'
- 'omec-reuse':
pipeline-file: 'omec-reuse-scan.groovy'
- build-node: 'ubuntu16.04-basebuild-1c-2g'
+ build-node: 'ubuntu18.04-basebuild-1c-2g'
# for upf-epc
- project:
@@ -274,14 +274,14 @@
jobs:
- 'omec-fossa':
pipeline-file: 'omec-fossa-scan.groovy'
- build-node: 'ubuntu16.04-basebuild-1c-2g'
+ build-node: 'ubuntu18.04-basebuild-1c-2g'
- 'omec-reuse':
pipeline-file: 'omec-reuse-scan.groovy'
- build-node: 'ubuntu16.04-basebuild-1c-2g'
+ build-node: 'ubuntu18.04-basebuild-1c-2g'
- 'docker-publish-github':
build-timeout: 30
docker-repo: 'omecproject'
- build-node: 'ubuntu16.04-basebuild-2c-4g'
+ build-node: 'ubuntu18.04-basebuild-2c-4g'
extraEnvironmentVars: CPU=haswell
- 'omec-container'
- 'omec-container-remote':
@@ -359,6 +359,8 @@
<!-- Managed by Jenkins Job Builder -->
Created by {id} job-template from ci-management/jjb/omec-ci.yaml<br />
+ disabled: true
+
properties:
- cord-infra-properties:
build-days-to-keep: '{build-days-to-keep}'
@@ -584,6 +586,8 @@
<!-- Managed by Jenkins Job Builder -->
Created by {id} job-template from ci-management/jjb/omec-ci.yaml<br />
+ disabled: true
+
properties:
- cord-infra-properties:
build-days-to-keep: '{build-days-to-keep}'
@@ -624,6 +628,8 @@
<!-- Managed by Jenkins Job Builder -->
Created by {id} job-template from ci-management/jjb/omec-ci.yaml<br />
+ disabled: true
+
properties:
- cord-infra-properties:
build-days-to-keep: '{build-days-to-keep}'
@@ -848,6 +854,8 @@
<!-- Managed by Jenkins Job Builder -->
Created from job-template {id} from ci-management/jjb/omec-ci.yaml <br />
+ disabled: true
+
properties:
- cord-infra-properties:
build-days-to-keep: '{build-days-to-keep}'
@@ -939,6 +947,8 @@
<!-- Managed by Jenkins Job Builder -->
Created from job-template {id} from ci-management/jjb/omec-ci.yaml <br />
+ disabled: true
+
properties:
- cord-infra-properties:
build-days-to-keep: '{build-days-to-keep}'
@@ -1000,6 +1010,8 @@
<!-- Managed by Jenkins Job Builder -->
Created from job-template {id} from ci-management/jjb/omec-ci.yaml <br />
+ disabled: true
+
properties:
- cord-infra-properties:
build-days-to-keep: '{build-days-to-keep}'
@@ -1056,6 +1068,8 @@
Created from job-template {id} from ci-management/jjb/omec-ci.yaml <br />
{desc}
+ disabled: true
+
properties:
- cord-infra-properties:
build-days-to-keep: '{build-days-to-keep}'
@@ -1161,6 +1175,8 @@
description: |
Created from job-template {id} from ci-management/jjb/omec-ci.yaml <br />
+ disabled: true
+
properties:
- cord-infra-properties:
build-days-to-keep: '{build-days-to-keep}'
diff --git a/jjb/onos-app-release.yaml b/jjb/onos-app-release.yaml
index 83e2eaa..e168556 100644
--- a/jjb/onos-app-release.yaml
+++ b/jjb/onos-app-release.yaml
@@ -30,11 +30,26 @@
parameters:
- string:
- name: app
+ name: appRepo
default: ''
description: 'Name of the app repository on the OpenCORD Gerrit server.\n e.g. vtn'
- string:
+ name: appName
+ default: ''
+ description: 'Name of the app in pom.xml API definition. \n e.g. olt'
+
+ - string:
+ name: apiVersion
+ default: ''
+ description: 'Version to release.\n e.g. 1.0.0'
+
+ - string:
+ name: nextApiVersion
+ default: ''
+ description: 'Version to release.\n e.g. 1.0.0'
+
+ - string:
name: version
default: ''
description: 'Version to release.\n e.g. 1.0.0'
@@ -54,7 +69,7 @@
default: '{jdk-distribution}'
description: 'Distribution of the JDK to use with update-java-alternatives'
- node: 'ubuntu16.04-basebuild-1c-2g'
+ node: 'ubuntu18.04-basebuild-1c-2g'
project-type: pipeline
concurrent: true
diff --git a/jjb/pipeline/bbsim-scale.groovy b/jjb/pipeline/bbsim-scale.groovy
deleted file mode 100644
index 6a05cfd..0000000
--- a/jjb/pipeline/bbsim-scale.groovy
+++ /dev/null
@@ -1,63 +0,0 @@
-/* bbsim-scale test */
-
-pipeline {
-
- /* no label, executor is determined by JJB */
- agent {
- label "${params.buildNode}"
- }
-
- options {
- timeout(time: 90, unit: 'MINUTES')
- }
-
- environment {
- VOLTHA_LOG_LEVEL="DEBUG"
- NAME="minimal"
- WITH_RADIUS="y"
- WITH_BBSIM="y"
- INSTALL_ONOS_APPS="y"
- CONFIG_SADIS="y"
- FANCY=0
- WITH_SIM_ADAPTERS="n"
- }
-
- stages {
-
- stage('Create K8s Cluster') {
- steps {
- sh """
- git clone https:/gerrit.opencord.org/kind-voltha
- cd kind-voltha/
- DEPLOY_K8S=y JUST_K8S=y ./voltha up
- """
- }
- }
-
- stage('Deploy Voltha') {
- steps {
- sh '''
- cd $WORKSPACE/kind-voltha/
- echo \$HELM_FLAG
- ./voltha up
- '''
- }
- }
-
-
- }
-
- post {
- always {
- sh '''
- WAIT_ON_DOWN=y ./voltha down
- cd $WORKSPACE/
- rm -rf kind-voltha/ voltha/ || true
- '''
- }
- failure {
- sh '''
- '''
- }
- }
-}
diff --git a/jjb/pipeline/device-management-mock-tests.groovy b/jjb/pipeline/device-management-mock-tests.groovy
deleted file mode 100644
index ac575a5..0000000
--- a/jjb/pipeline/device-management-mock-tests.groovy
+++ /dev/null
@@ -1,198 +0,0 @@
-// Copyright 2017-present Open Networking Foundation
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// voltha-2.x e2e tests
-// uses kind-voltha to deploy voltha-2.X
-// uses bbsim to simulate OLT/ONUs
-
-pipeline {
-
- /* no label, executor is determined by JJB */
- agent {
- label "${params.buildNode}"
- }
- options {
- timeout(time: 90, unit: 'MINUTES')
- }
- environment {
- KUBECONFIG="$HOME/.kube/kind-config-voltha-minimal"
- VOLTCONFIG="$HOME/.volt/config-minimal"
- PATH="$WORKSPACE/voltha/kind-voltha/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
- NAME="minimal"
- FANCY=0
- WITH_SIM_ADAPTERS="n"
- WITH_RADIUS="y"
- WITH_BBSIM="y"
- DEPLOY_K8S="y"
- VOLTHA_LOG_LEVEL="DEBUG"
- CONFIG_SADIS="n"
- ROBOT_MISC_ARGS="-d $WORKSPACE/RobotLogs"
- }
-
- stages {
-
- stage('Repo') {
- steps {
- step([$class: 'WsCleanup'])
- checkout(changelog: false, \
- poll: false,
- scm: [$class: 'RepoScm', \
- manifestRepositoryUrl: "${params.manifestUrl}", \
- manifestBranch: "${params.manifestBranch}", \
- currentBranch: true, \
- destinationDir: 'voltha', \
- forceSync: true,
- resetFirst: true, \
- quiet: true, \
- jobs: 4, \
- showAllChanges: true] \
- )
- }
- }
- stage('Patch') {
- steps {
- sh """
- pushd $WORKSPACE/
- echo "${gerritProject}" "${gerritChangeNumber}" "${gerritPatchsetNumber}"
- echo "${GERRIT_REFSPEC}"
- git clone https://gerrit.opencord.org/${gerritProject}
- cd "${gerritProject}"
- git fetch https://gerrit.opencord.org/${gerritProject} "${GERRIT_REFSPEC}" && git checkout FETCH_HEAD
- popd
- """
- }
- }
- stage('Create K8s Cluster') {
- steps {
- sh """
- cd $WORKSPACE/voltha/kind-voltha/
- JUST_K8S=y ./voltha up
- bash <( curl -sfL https://raw.githubusercontent.com/boz/kail/master/godownloader.sh) -b "$WORKSPACE/voltha/kind-voltha/bin"
- """
- }
- }
-
- stage('Build Redfish Importer Image') {
- steps {
- sh """
- make -C $WORKSPACE/device-management/\$1 DOCKER_REPOSITORY=opencord/ DOCKER_TAG=citest docker-build-importer
- """
- }
- }
-
- stage('Build demo_test Image') {
- steps {
- sh """
- make -C $WORKSPACE/device-management/\$1/demo_test DOCKER_REPOSITORY=opencord/ DOCKER_TAG=citest docker-build
- """
- }
- }
-
- stage('Build mock-redfish-server Image') {
- steps {
- sh """
- make -C $WORKSPACE/device-management/\$1/mock-redfish-server DOCKER_REPOSITORY=opencord/ DOCKER_TAG=citest docker-build
- """
- }
- }
-
- stage('Push Images') {
- steps {
- sh '''
- docker images | grep citest
- for image in \$(docker images -f "reference=*/*citest" --format "{{.Repository}}"); do echo "Pushing \$image to nodes"; kind load docker-image \$image:citest --name voltha-\$NAME --nodes voltha-\$NAME-worker,voltha-\$NAME-worker2; done
- '''
- }
- }
- stage('Deploy Voltha') {
- steps {
- sh '''
- export EXTRA_HELM_FLAGS="--set log_agent.enabled=False ${extraHelmFlags} "
-
- cd $WORKSPACE/voltha/kind-voltha/
- echo \$EXTRA_HELM_FLAGS
- kail -n voltha -n default > $WORKSPACE/onos-voltha-combined.log &
- ./voltha up
- '''
- }
- }
-
- stage('Run E2E Tests') {
- steps {
- sh '''
- mkdir -p $WORKSPACE/RobotLogs
-
- # tell the kubernetes script to use images tagged citest and pullPolicy:Never
- sed -i 's/master/citest/g' $WORKSPACE/device-management/kubernetes/deploy*.yaml
- sed -i 's/imagePullPolicy: Always/imagePullPolicy: Never/g' $WORKSPACE/device-management/kubernetes/deploy*.yaml
- make -C $WORKSPACE/device-management functional-mock-test || true
- '''
- }
- }
- }
-
- post {
- always {
- sh '''
- set +e
- cp $WORKSPACE/voltha/kind-voltha/install-minimal.log $WORKSPACE/
- kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\n'}" | sort | uniq
- kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.imageID}{'\\n'}" | sort | uniq
- kubectl get nodes -o wide
- kubectl get pods -o wide
- kubectl get pods -n voltha -o wide
-
- sync
- pkill kail || true
- md5sum $WORKSPACE/voltha/kind-voltha/bin/voltctl
-
- ## Pull out errors from log files
- extract_errors_go() {
- echo
- echo "Error summary for $1:"
- grep $1 $WORKSPACE/onos-voltha-combined.log | grep '"level":"error"' | cut -d ' ' -f 2- | jq -r '.msg'
- echo
- }
-
- extract_errors_python() {
- echo
- echo "Error summary for $1:"
- grep $1 $WORKSPACE/onos-voltha-combined.log | grep 'ERROR' | cut -d ' ' -f 2-
- echo
- }
-
- extract_errors_go voltha-rw-core > $WORKSPACE/error-report.log
- extract_errors_go adapter-open-olt >> $WORKSPACE/error-report.log
- extract_errors_python adapter-open-onu >> $WORKSPACE/error-report.log
- extract_errors_python voltha-ofagent >> $WORKSPACE/error-report.log
-
- gzip $WORKSPACE/onos-voltha-combined.log
-
- ## shut down kind-voltha
- cd $WORKSPACE/voltha/kind-voltha
- WAIT_ON_DOWN=y ./voltha down
- '''
- step([$class: 'RobotPublisher',
- disableArchiveOutput: false,
- logFileName: 'RobotLogs/log*.html',
- otherFiles: '',
- outputFileName: 'RobotLogs/output*.xml',
- outputPath: '.',
- passThreshold: 80,
- reportFileName: 'RobotLogs/report*.html',
- unstableThreshold: 0]);
- archiveArtifacts artifacts: '*.log,*.gz'
- }
- }
-}
diff --git a/jjb/pipeline/device-management-physical-build-and-tests.groovy b/jjb/pipeline/device-management-physical-build-and-tests.groovy
deleted file mode 100644
index 6579cc4..0000000
--- a/jjb/pipeline/device-management-physical-build-and-tests.groovy
+++ /dev/null
@@ -1,373 +0,0 @@
-// Copyright 2019-present Open Networking Foundation
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// deploy VOLTHA built from patchset on a physical pod and run e2e test
-// uses kind-voltha to deploy voltha-2.X
-
-// Need this so that deployment_config has global scope when it's read later
-deployment_config = null
-localDeploymentConfigFile = null
-localKindVolthaValuesFile = null
-localSadisConfigFile = null
-
-pipeline {
-
- /* no label, executor is determined by JJB */
- agent {
- label "${params.buildNode}"
- }
- options {
- timeout(time: 90, unit: 'MINUTES')
- }
-
- environment {
- KUBECONFIG="$HOME/.kube/kind-config-voltha-minimal"
- VOLTCONFIG="$HOME/.volt/config-minimal"
- PATH="$WORKSPACE/voltha/kind-voltha/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
- NAME="minimal"
- FANCY=0
- //VOL-2194 ONOS SSH and REST ports hardcoded to 30115/30120 in tests
- ONOS_SSH_PORT=30115
- ONOS_API_PORT=30120
- }
-
- stages {
- stage ('Initialize') {
- steps {
- sh returnStdout: false, script: """
- test -e $WORKSPACE/voltha/kind-voltha/voltha && cd $WORKSPACE/voltha/kind-voltha && ./voltha down
- cd $WORKSPACE
- rm -rf $WORKSPACE/*
- """
- script {
- if (env.configRepo && ! env.localConfigDir) {
- env.localConfigDir = "$WORKSPACE"
- sh returnStdout: false, script: "git clone -b master ${cordRepoUrl}/${configRepo}"
- }
- localDeploymentConfigFile = "${env.localConfigDir}/${params.deploymentConfigFile}"
- localKindVolthaValuesFile = "${env.localConfigDir}/${params.kindVolthaValuesFile}"
- localSadisConfigFile = "${env.localConfigDir}/${params.sadisConfigFile}"
- }
- }
- }
-
- stage('Repo') {
- steps {
- checkout(changelog: true,
- poll: false,
- scm: [$class: 'RepoScm',
- manifestRepositoryUrl: "${params.manifestUrl}",
- manifestBranch: "${params.manifestBranch}",
- currentBranch: true,
- destinationDir: 'voltha',
- forceSync: true,
- resetFirst: true,
- quiet: true,
- jobs: 4,
- showAllChanges: true]
- )
- }
- }
-
- stage('Patch') {
- steps {
- sh """
- pushd $WORKSPACE/
- git clone https://gerrit.opencord.org/${gerritProject}
- cd "${gerritProject}"
- if [[ ! -z "${gerritRefSpec}" ]]; then
- git fetch https://gerrit.opencord.org/${gerritProject} "${gerritRefSpec}" && git checkout FETCH_HEAD
- fi
- popd
- """
- }
- }
-
- stage('Check config files') {
- steps {
- script {
- try {
- deployment_config = readYaml file: "${localDeploymentConfigFile}"
- } catch (err) {
- echo "Error reading ${localDeploymentConfigFile}"
- throw err
- }
- sh returnStdout: false, script: """
- if [ ! -e ${localKindVolthaValuesFile} ]; then echo "${localKindVolthaValuesFile} not found"; exit 1; fi
- if [ ! -e ${localSadisConfigFile} ]; then echo "${localSadisConfigFile} not found"; exit 1; fi
- """
- }
- }
- }
-
- stage('Build olt addr list') {
- steps {
- script {
- sh """
- echo "ADDR_LIST:" > /tmp/robot_vars.yaml
- """
- deployment_config.olts.each { olt ->
- sh """
- echo " - ${olt.ip}:8888" >> /tmp/robot_vars.yaml
- """
- }
- sh """
- cat /tmp/robot_vars.yaml
- """
- }
- }
- }
-
- stage('Create KinD Cluster') {
- steps {
- sh returnStdout: false, script: """
- cd $WORKSPACE/voltha/kind-voltha/
- JUST_K8S=y ./voltha up
- """
- }
- }
-
- stage('Build Redfish Importer Image') {
- steps {
- sh """
- make -C $WORKSPACE/device-management/\$1 DOCKER_REPOSITORY=opencord/ DOCKER_TAG=citest docker-build-importer
- """
- }
- }
-
- stage('Build demo_test Image') {
- steps {
- sh """
- make -C $WORKSPACE/device-management/\$1/demo_test DOCKER_REPOSITORY=opencord/ DOCKER_TAG=citest docker-build
- """
- }
- }
-
- stage('Build mock-redfish-server Image') {
- steps {
- sh """
- make -C $WORKSPACE/device-management/\$1/mock-redfish-server DOCKER_REPOSITORY=opencord/ DOCKER_TAG=citest docker-build
- """
- }
- }
-
- stage('Push Images') {
- steps {
- sh '''
- docker images | grep citest
- for image in \$(docker images -f "reference=*/*citest" --format "{{.Repository}}"); do echo "Pushing \$image to nodes"; kind load docker-image \$image:citest --name voltha-\$NAME --nodes voltha-\$NAME-worker,voltha-\$NAME-worker2; done
- '''
- }
- }
-
- stage('Deploy Voltha') {
- environment {
- WITH_SIM_ADAPTERS="no"
- WITH_RADIUS="yes"
- DEPLOY_K8S="no"
- VOLTHA_LOG_LEVEL="DEBUG"
- }
- steps {
- script {
- sh returnStdout: false, script: """
- export EXTRA_HELM_FLAGS='--set log_agent.enabled=False -f ${localKindVolthaValuesFile} '
-
- cd $WORKSPACE/voltha/kind-voltha/
- echo \$EXTRA_HELM_FLAGS
- kail -n voltha -n default > $WORKSPACE/onos-voltha-combined.log &
- ./voltha up
- """
- }
- }
- }
-
- stage('Deploy Kafka Dump Chart') {
- steps {
- script {
- sh returnStdout: false, script: """
- helm repo add cord https://charts.opencord.org
- helm repo update
- helm del --purge voltha-kafka-dump || true
- helm install -n voltha-kafka-dump cord/voltha-kafka-dump
- """
- }
- }
- }
-
- stage('Push Tech-Profile') {
- when {
- expression { params.profile != "Default" }
- }
- steps {
- sh returnStdout: false, script: """
- etcd_container=\$(kubectl get pods -n voltha | grep voltha-etcd-cluster | awk 'NR==1{print \$1}')
- kubectl cp $WORKSPACE/voltha/voltha-system-tests/tests/data/TechProfile-${profile}.json voltha/\$etcd_container:/tmp/flexpod.json
- kubectl exec -it \$etcd_container -n voltha -- /bin/sh -c 'cat /tmp/flexpod.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/XGS-PON/64'
- """
- }
- }
-
- stage('Push Sadis-config') {
- steps {
- sh returnStdout: false, script: """
- curl -sSL --user karaf:karaf -X POST -H Content-Type:application/json http://${deployment_config.nodes[0].ip}:$ONOS_API_PORT/onos/v1/network/configuration --data @${localSadisConfigFile}
- """
- }
- }
-
- stage('Reinstall OLT software') {
- when {
- expression { params.reinstallOlt }
- }
- steps {
- script {
- deployment_config.olts.each { olt ->
- sh returnStdout: false, script: "sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'service openolt stop' || true"
- sh returnStdout: false, script: "sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'killall dev_mgmt_daemon' || true"
- sh returnStdout: false, script: "sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'dpkg --remove asfvolt16 && dpkg --purge asfvolt16'"
- waitUntil {
- olt_sw_present = sh returnStdout: true, script: "sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'dpkg --list | grep asfvolt16 | wc -l'"
- return olt_sw_present.toInteger() == 0
- }
- sh returnStdout: false, script: "sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'dpkg --install ${oltDebVersion}'"
- waitUntil {
- olt_sw_present = sh returnStdout: true, script: "sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'dpkg --list | grep asfvolt16 | wc -l'"
- return olt_sw_present.toInteger() == 1
- }
- if ( olt.fortygig ) {
- // If the OLT is connected to a 40G switch interface, set the NNI port to be downgraded
- sh returnStdout: false, script: "sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'echo port ce128 sp=40000 >> /broadcom/qax.soc ; /opt/bcm68620/svk_init.sh'"
- }
- }
- }
- }
- }
-
- stage('Restart OLT processes') {
- steps {
- script {
- deployment_config.olts.each { olt ->
- sh returnStdout: false, script: """
- ssh-keyscan -H ${olt.ip} >> ~/.ssh/known_hosts
- sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'service openolt stop' || true
- sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'killall dev_mgmt_daemon' || true
- sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'rm -f /var/log/openolt.log'
- sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'rm -f /var/log/dev_mgmt_daemon.log'
- sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'service dev_mgmt_daemon start &'
- sleep 5
- sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'service openolt start &'
- # restart redfish server
- sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'service psme stop' || true
- sleep 10
- sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'service psme start'
- sleep 10
- sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'ps auxw | grep -i psme'
- """
- // Note: ONU Discovery wait loop removed as it was not necessary
- }
- }
- }
- }
-
- stage('Run E2E Tests') {
- steps {
- sh '''
- mkdir -p $WORKSPACE/RobotLogs
- # tell the kubernetes script to use images tagged citest and pullPolicy:Never
- sed -i 's/master/citest/g' $WORKSPACE/device-management/kubernetes/deploy*.yaml
- sed -i 's/imagePullPolicy: Always/imagePullPolicy: Never/g' $WORKSPACE/device-management/kubernetes/deploy*.yaml
- # passing a list to robot framework on the command line is hard, so put the vars in a file
- make -C $WORKSPACE/device-management ROBOT_EXTRA_ARGS="-V /tmp/robot_vars.yaml" functional-physical-test-single || true
- '''
- }
- }
-
- stage('After-Test Delay') {
- when {
- expression { params.withPatchset }
- }
- steps {
- sh returnStdout: false, script: """
- # Note: Gerrit comment text will be prefixed by "Patch set n:" and a blank line
- REGEX="hardware test with delay\$"
- [[ "${gerritEventCommentText}" =~ \$REGEX ]] && sleep 10m || true
- """
- }
- }
- }
-
- post {
- always {
- sh returnStdout: false, script: '''
- set +e
- cp kind-voltha/install-minimal.log $WORKSPACE/
- kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\t'}{.imageID}{'\\n'}" | sort | uniq -c
- kubectl get nodes -o wide
- kubectl get pods -o wide
- kubectl get pods -n voltha -o wide
-
- sync
- pkill kail || true
-
- ## Pull out errors from log files
- extract_errors_go() {
- echo
- echo "Error summary for $1:"
- grep $1 $WORKSPACE/onos-voltha-combined.log | grep '"level":"error"' | cut -d ' ' -f 2- | jq -r '.msg'
- echo
- }
-
- extract_errors_python() {
- echo
- echo "Error summary for $1:"
- grep $1 $WORKSPACE/onos-voltha-combined.log | grep 'ERROR' | cut -d ' ' -f 2-
- echo
- }
-
- extract_errors_go voltha-rw-core > $WORKSPACE/error-report.log
- extract_errors_go adapter-open-olt >> $WORKSPACE/error-report.log
- extract_errors_python adapter-open-onu >> $WORKSPACE/error-report.log
- extract_errors_python voltha-ofagent >> $WORKSPACE/error-report.log
-
- gzip $WORKSPACE/onos-voltha-combined.log
-
- ## collect events, the chart should be running by now
- kubectl get pods | grep -i voltha-kafka-dump | grep -i running
- if [[ $? == 0 ]]; then
- kubectl exec -it `kubectl get pods | grep -i voltha-kafka-dump | grep -i running | cut -f1 -d " "` ./voltha-dump-events.sh > $WORKSPACE/voltha-events.log
- fi
- '''
- script {
- deployment_config.olts.each { olt ->
- sh returnStdout: false, script: """
- sshpass -p ${olt.pass} scp ${olt.user}@${olt.ip}:/var/log/openolt.log $WORKSPACE/openolt-${olt.ip}.log || true
- sed -i 's/\\x1b\\[[0-9;]*[a-zA-Z]//g' $WORKSPACE/openolt-${olt.ip}.log # Remove escape sequences
- sshpass -p ${olt.pass} scp ${olt.user}@${olt.ip}:/var/log/dev_mgmt_daemon.log $WORKSPACE/dev_mgmt_daemon-${olt.ip}.log || true
- sed -i 's/\\x1b\\[[0-9;]*[a-zA-Z]//g' $WORKSPACE/dev_mgmt_daemon-${olt.ip}.log # Remove escape sequences
- """
- }
- }
- step([$class: 'RobotPublisher',
- disableArchiveOutput: false,
- logFileName: 'device-management/demo_test/functional_test/log*.html',
- otherFiles: '',
- outputFileName: 'device-management/demo_test/functional_test/output*.xml',
- outputPath: '.',
- passThreshold: 100,
- reportFileName: 'device-management/demo_test/functional_test/report*.html',
- unstableThreshold: 0]);
- archiveArtifacts artifacts: '*.log,*.gz'
- }
- }
-}
diff --git a/jjb/pipeline/omec-postmerge.groovy b/jjb/pipeline/omec-postmerge.groovy
index aaac1b7..06e2be2 100644
--- a/jjb/pipeline/omec-postmerge.groovy
+++ b/jjb/pipeline/omec-postmerge.groovy
@@ -12,8 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-// omec-postmerge.groovy
-// Combines docker-publish, deploy and test pipelines into one job that can be triggered by a GitHub PR merge
+// Builds and publishes OMEC docker images. Triggered by GitHub PR merge.
pipeline {
@@ -57,92 +56,6 @@
}
}
}
-
- stage ("Get Image Tags"){
- steps {
- script {
- hssdb_tag = sh returnStdout: true, script: """curl -s 'https://registry.hub.docker.com/v2/repositories/omecproject/c3po-hssdb/tags/' | jq '.results[] | select(.name | test("${c3poBranchName}-[0-9a-z]{7}\$")) | select(.last_updater_username=="onfauto") |.name' | head -1 | tr -d \\\""""
- hss_tag = sh returnStdout: true, script: """curl -s 'https://registry.hub.docker.com/v2/repositories/omecproject/c3po-hss/tags/' | jq '.results[] | select(.name | test("${c3poBranchName}-[0-9a-z]{7}\$")) | select(.last_updater_username=="onfauto") |.name' | head -1 | tr -d \\\""""
- mme_tag = sh returnStdout: true, script: """curl -s 'https://registry.hub.docker.com/v2/repositories/omecproject/nucleus/tags/' | jq '.results[] | select(.name | test("${nucleusBranchName}-[0-9a-z]{7}\$")) | select(.last_updater_username=="onfauto") |.name' | head -1 | tr -d \\\""""
- spgwc_tag = sh returnStdout: true, script: """curl -s 'https://registry.hub.docker.com/v2/repositories/omecproject/spgw/tags/' | jq '.results[] | select(.name | test("${spgwBranchName}-[0-9a-z]{7}\$")) | select(.last_updater_username=="onfauto") |.name' | head -1 | tr -d \\\""""
- bess_tag = sh returnStdout: true, script: """curl -s 'https://registry.hub.docker.com/v2/repositories/omecproject/upf-epc-bess/tags/' | jq '.results[] | select(.name | test("${upfBranchName}-[0-9a-z]{7}\$")) | select(.last_updater_username=="onfauto") |.name' | head -1 | tr -d \\\""""
- zmqiface_tag = sh returnStdout: true, script: """curl -s 'https://registry.hub.docker.com/v2/repositories/omecproject/upf-epc-cpiface/tags/' | jq '.results[] | select(.name | test("${upfBranchName}-[0-9a-z]{7}\$")) | select(.last_updater_username=="onfauto") |.name' | head -1 | tr -d \\\""""
- pfcpiface_tag = sh returnStdout: true, script: """curl -s 'https://registry.hub.docker.com/v2/repositories/omecproject/upf-epc-pfcpiface/tags/' | jq '.results[] | select(.name | test("${upfBranchName}-[0-9a-z]{7}\$")) | select(.last_updater_username=="onfauto") |.name' | head -1 | tr -d \\\""""
-
- hssdb_image = "${params.registryProxy}/c3po-hssdb:"+hssdb_tag
- hss_image = "${params.registryProxy}/c3po-hss:"+hss_tag
- mme_image = "${params.registryProxy}/nucleus:"+mme_tag
- spgwc_image = "${params.registryProxy}/spgw:"+spgwc_tag
- bess_image = "${params.registryProxy}/upf-epc-bess:"+bess_tag
- zmqiface_image = "${params.registryProxy}/upf-epc-cpiface:"+zmqiface_tag
- pfcpiface_image = "${params.registryProxy}/upf-epc-pfcpiface:"+pfcpiface_tag
-
- updatedImages = ""
- switch("${params.repoName}") {
- case "c3po":
- hssdb_image = "${params.registryProxy}/c3po-hssdb:${branchName}-${abbreviated_commit_hash}"
- hss_image = "${params.registryProxy}/c3po-hss:${branchName}-${abbreviated_commit_hash}"
- updatedImages += hssdb_image + ","
- updatedImages += hss_image
- break
- case "spgw":
- spgwc_image = "${params.registryProxy}/spgw:${branchName}-${abbreviated_commit_hash}"
- updatedImages += spgwc_image
- break
- case "Nucleus":
- mme_image = "${params.registryProxy}/nucleus:${branchName}-${abbreviated_commit_hash}"
- updatedImages += mme_image
- break
- case "upf-epc":
- bess_image = "${params.registryProxy}/upf-epc-bess:${branchName}-${abbreviated_commit_hash}"
- zmqiface_image = "${params.registryProxy}/upf-epc-cpiface:${branchName}-${abbreviated_commit_hash}"
- pfcpiface_image = "${params.registryProxy}/upf-epc-pfcpiface:${branchName}-${abbreviated_commit_hash}"
- updatedImages += bess_image + ","
- updatedImages += zmqiface_image + ","
- updatedImages += pfcpiface_image + ","
- updatedImages += bess_image + "-ivybridge,"
- updatedImages += zmqiface_image + "-ivybridge,"
- updatedImages += pfcpiface_image + "-ivybridge"
- break
- }
- }
- }
- }
-
- stage ("Deploy and Test"){
- options {
- lock(resource: 'aether-dev-cluster')
- }
-
- stages {
- stage ("Deploy OMEC"){
- steps {
- echo "Using hssdb image: ${hssdb_image}"
- echo "Using hss image: ${hss_image}"
- echo "Using mme image: ${mme_image}"
- echo "Using spgwc image: ${spgwc_image}"
- echo "Using bess image: ${bess_image}"
- echo "Using zmqiface image: ${zmqiface_image}"
- echo "Using pfcpiface image: ${pfcpiface_image}"
- build job: "omec_deploy_dev", parameters: [
- string(name: 'hssdbImage', value: "${hssdb_image.trim()}"),
- string(name: 'hssImage', value: "${hss_image.trim()}"),
- string(name: 'mmeImage', value: "${mme_image.trim()}"),
- string(name: 'spgwcImage', value: "${spgwc_image.trim()}"),
- string(name: 'bessImage', value: "${bess_image.trim()}"),
- string(name: 'zmqifaceImage', value: "${zmqiface_image.trim()}"),
- string(name: 'pfcpifaceImage', value: "${pfcpiface_image.trim()}"),
- ]
- }
- }
-
- stage ("Run NG40 Tests"){
- steps {
- build job: "omec_ng40-test_dev"
- }
- }
- }
- }
}
post {
failure {
diff --git a/jjb/pipeline/onos-app-release.groovy b/jjb/pipeline/onos-app-release.groovy
index f4b78bd..f8d87de 100644
--- a/jjb/pipeline/onos-app-release.groovy
+++ b/jjb/pipeline/onos-app-release.groovy
@@ -12,7 +12,10 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-def app = '${app}'
+def appRepo = '${appRepo}'
+def appName = '${appName}'
+def apiVersion = '${apiVersion}'
+def nextApiVersion = '${nextApiVersion}'
def version = '${version}'
def nextVersion = '${nextVersion}'
def branch = '${branch}'
@@ -32,12 +35,17 @@
sh 'mvn versions:set -DnewVersion=' + newVersion + ' versions:commit'
}
+def changeApiVersion(def appName, def newApiVersion) {
+ // Update the top-level <*appName*.api.version> tag in the root pom.xml.
+ sh 'mvn versions:set-property -Dproperty=' + appName + '.api.version -DnewVersion=' + newApiVersion + ' -DallowSnapshots=true versions:commit'
+}
+
// TODO: use the declarative pipeline syntax, like all other groovy files.
// This implementation is based on the legacy cord-onos-publisher/Jenkinsfile.release
-node ('ubuntu16.04-basebuild-1c-2g') {
+node ('ubuntu18.04-basebuild-1c-2g') {
- sh 'echo Releasing ' + app + ' repository on ' + branch + ' branch'
- sh 'echo Releasing version ' + version + ' and starting ' + nextVersion + '-SNAPSHOT'
+ sh 'echo Releasing ' + appRepo + ' repository on ' + branch + ' branch'
+ sh 'echo Releasing version ' + version + ' with API version ' + apiVersion + ' and starting ' + nextVersion + '-SNAPSHOT with API version ' + nextApiVersion + '-SNAPSHOT'
// Set the JDK version
sh 'echo Using JDK distribution: ' + jdkDistro
@@ -68,15 +76,20 @@
cleanWs()
sshagent (credentials: ['gerrit-jenkins-user']) {
- git branch: branch, url: 'ssh://jenkins@gerrit.opencord.org:29418/' + app, credentialsId: 'gerrit-jenkins-user'
+ git branch: branch, url: 'ssh://jenkins@gerrit.opencord.org:29418/' + appRepo, credentialsId: 'gerrit-jenkins-user'
sh 'gitdir=$(git rev-parse --git-dir); scp -p -P 29418 jenkins@gerrit.opencord.org:hooks/commit-msg ${gitdir}/hooks/'
}
}
stage ('Move to release version') {
+ //Splitting version and apiVersion and check if apiVersion different from empty then update API it.
+ //Allows to release apps that dont' have api.version (e.g. bng,pppoe,kafka)
changeVersion(version)
- sh 'git add -A && git commit -m "Release version ' + version + '"'
+ if (apiVersion != "") {
+ changeApiVersion(appName, apiVersion)
+ }
+ sh 'git add -A && git commit -m "Release app version ' + version + ' with API version ' + apiVersion + '"'
}
stage ('Verify code') {
@@ -99,8 +112,12 @@
stage ('Move to next SNAPSHOT version') {
def snapshot = nextVersion + '-SNAPSHOT'
+ def apiSnapshot = nextApiVersion + '-SNAPSHOT'
changeVersion(snapshot)
- sh 'git add -A && git commit -m "Starting snapshot ' + snapshot + '"'
+ if (apiVersion != "") {
+ changeApiVersion(appName, apiSnapshot)
+ }
+ sh 'git add -A && git commit -m "Starting snapshot ' + snapshot + ' with API version ' + apiSnapshot + '"'
sshagent (credentials: ['gerrit-jenkins-user']) {
sh 'git push origin HEAD:refs/for/' + branch
}
diff --git a/jjb/pipeline/voltha-atest-provisioning.groovy b/jjb/pipeline/voltha-atest-provisioning.groovy
deleted file mode 100755
index 297c27d..0000000
--- a/jjb/pipeline/voltha-atest-provisioning.groovy
+++ /dev/null
@@ -1,106 +0,0 @@
-/* voltha-atest-provisioning pipeline */
-
-pipeline {
-
- /* no label, executor is determined by JJB */
- agent {
- label "${params.buildNode}"
- }
-
- stages {
-
- stage ('Clean up') {
- steps {
- sh '''
- sudo rm -rf *
- sudo rm -rf /home/cord/cord*
- '''
- }
- }
-
- stage('Voltha Repo') {
- steps {
- checkout(changelog: false, \
- poll: false,
- scm: [$class: 'RepoScm', \
- manifestRepositoryUrl: "${params.manifestUrl}", \
- manifestBranch: "${params.manifestBranch}", \
- currentBranch: true, \
- destinationDir: 'cord', \
- forceSync: true,
- resetFirst: true, \
- quiet: true, \
- jobs: 4, \
- showAllChanges: true] \
- )
- }
- }
-
- stage ('Build Voltha and ONOS') {
- when { expression { return params.BuildVoltha } }
- steps {
- sh '''
- sudo service docker restart
- cd $WORKSPACE/cord/incubator/voltha
- repo download "${GERRIT_PROJECT}" "${gerritChangeNumber}/${gerritPatchsetNumber}"
- chmod +x env.sh
- source env.sh
- make fetch
- make clean
- make build
- '''
- }
- }
-
- stage ('Build BBSIM') {
- when { expression { return params.BuildBbsim } }
- steps {
- sh '''
- sudo service docker restart
- cd $WORKSPACE/cord/incubator/voltha-bbsim
- repo download "${GERRIT_PROJECT}" "${gerritChangeNumber}/${gerritPatchsetNumber}"
- make DOCKER_TAG=latest docker-build
- docker images | grep bbsim
- '''
- }
- }
-
- stage ('Start Voltha Test Suite') {
- steps {
- sh """
- cd $WORKSPACE/cord/incubator/voltha/tests/atests/common/
- ./run_robot.sh jenkinstest ${params.adapter} || true
- """
- }
- }
-
- stage('Publish') {
- steps {
- sh """
- if [ -d RobotLogs ]; then rm -r RobotLogs; fi; mkdir RobotLogs
- cp -r $WORKSPACE/cord/incubator/voltha/jenkinstest/ ./RobotLogs
- cp -r $WORKSPACE/cord/incubator/voltha/jenkinstest/voltha_test_results/*.log $WORKSPACE/
- """
-
- step([$class: 'RobotPublisher',
- disableArchiveOutput: false,
- logFileName: 'RobotLogs/jenkinstest/log*.html',
- otherFiles: '',
- outputFileName: 'RobotLogs/jenkinstest/output*.xml',
- outputPath: '.',
- passThreshold: 100,
- reportFileName: 'RobotLogs/jenkinstest/report*.html',
- unstableThreshold: 0]);
- }
- }
- }
-
- post {
- always {
- archiveArtifacts artifacts: '*.log'
- step([$class: 'Mailer', notifyEveryUnstableBuild: true, recipients: "gdepatie@northforgeinc.com, kailash@opennetworking.org", sendToIndividuals: false])
- }
- }
-}
-
-
diff --git a/jjb/pipeline/voltha-automated-build.groovy b/jjb/pipeline/voltha-automated-build.groovy
deleted file mode 100644
index 3b2ac05..0000000
--- a/jjb/pipeline/voltha-automated-build.groovy
+++ /dev/null
@@ -1,68 +0,0 @@
-/* voltha-automated-build pipeline */
-
-pipeline {
-
- /* no label, executor is determined by JJB */
- agent {
- label "${params.buildNode}"
- }
-
- stages {
-
- stage ('Cleanup workspace') {
- steps {
- sh 'rm -rf ./build ./component ./incubator ./onos-apps ./orchestration ./test ./.repo'
- }
- }
-
- stage('repo') {
- steps {
- checkout(changelog: false, \
- poll: false,
- scm: [$class: 'RepoScm', \
- manifestRepositoryUrl: "${params.manifestUrl}", \
- manifestBranch: "${params.manifestBranch}", \
- currentBranch: true, \
- destinationDir: 'cord', \
- forceSync: true,
- resetFirst: true, \
- quiet: true, \
- jobs: 4, \
- showAllChanges: true] \
- )
- }
- }
-
-
- stage ('Bring up voltha dev vm') {
- steps {
- sh '''
- pushd incubator/voltha
- vagrant up voltha
- popd
- '''
- }
- }
- stage ('Remove the pre-created venv-linux') {
- steps {
- sh 'vagrant ssh -c "rm -rf /cord/incubator/voltha/venv-linux"'
- }
- }
-
- stage ('Build voltha') {
- steps {
- sh 'vagrant ssh -c "cd /cord/incubator/voltha && source env.sh && make fetch-jenkins && make jenkins" voltha' }
- }
-
- stage ('Bring up voltha containers') {
- steps {
- sh 'vagrant ssh -c "cd /cord/incubator/voltha && source env.sh && docker-compose -f compose/docker-compose-docutests.yml up -d" voltha' }
- }
-
- stage ('Run Integration Tests') {
- steps {
- sh 'vagrant ssh -c "cd /cord/incubator/voltha && source env.sh && make jenkins-test" voltha' }
- }
-
- }
-}
diff --git a/jjb/pipeline/voltha-dt-physical-functional-tests-openonu-go.groovy b/jjb/pipeline/voltha-dt-physical-functional-tests-openonu-go.groovy
deleted file mode 100644
index f60bd3f..0000000
--- a/jjb/pipeline/voltha-dt-physical-functional-tests-openonu-go.groovy
+++ /dev/null
@@ -1,261 +0,0 @@
-// Copyright 2017-present Open Networking Foundation
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-node {
- // Need this so that deployment_config has global scope when it's read later
- deployment_config = null
-}
-
-pipeline {
- /* no label, executor is determined by JJB */
- agent {
- label "${params.buildNode}"
- }
- options {
- timeout(time: 60, unit: 'MINUTES')
- }
-
- environment {
- KUBECONFIG="$WORKSPACE/${configBaseDir}/${configKubernetesDir}/${configFileName}.conf"
- VOLTCONFIG="$HOME/.volt/config-minimal"
- PATH="$WORKSPACE/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
- }
-
- stages {
- stage('Clone kind-voltha') {
- steps {
- step([$class: 'WsCleanup'])
- checkout([
- $class: 'GitSCM',
- userRemoteConfigs: [[
- url: "https://gerrit.opencord.org/kind-voltha",
- refspec: "${kindVolthaChange}"
- ]],
- branches: [[ name: "master", ]],
- extensions: [
- [$class: 'WipeWorkspace'],
- [$class: 'RelativeTargetDirectory', relativeTargetDir: "kind-voltha"],
- [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
- ],
- ])
- }
- }
- stage('Clone voltha-system-tests') {
- steps {
- checkout([
- $class: 'GitSCM',
- userRemoteConfigs: [[
- url: "https://gerrit.opencord.org/voltha-system-tests",
- refspec: "${volthaSystemTestsChange}"
- ]],
- branches: [[ name: "${branch}", ]],
- extensions: [
- [$class: 'WipeWorkspace'],
- [$class: 'RelativeTargetDirectory', relativeTargetDir: "voltha-system-tests"],
- [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
- ],
- ])
- }
- }
- stage('Clone cord-tester') {
- steps {
- checkout([
- $class: 'GitSCM',
- userRemoteConfigs: [[
- url: "https://gerrit.opencord.org/cord-tester",
- refspec: "${cordTesterChange}"
- ]],
- branches: [[ name: "master", ]],
- extensions: [
- [$class: 'WipeWorkspace'],
- [$class: 'RelativeTargetDirectory', relativeTargetDir: "cord-tester"],
- [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
- ],
- ])
- }
- }
- // This checkout allows us to show changes in Jenkins
- // we only do this on master as we don't branch all the repos for all the releases
- // (we should compute the difference by tracking the container version, not the code)
- stage('Download All the VOLTHA repos') {
- when {
- expression {
- return "${branch}" == 'master';
- }
- }
- steps {
- checkout(changelog: true,
- poll: false,
- scm: [$class: 'RepoScm',
- manifestRepositoryUrl: "${params.manifestUrl}",
- manifestBranch: "${params.branch}",
- currentBranch: true,
- destinationDir: 'voltha',
- forceSync: true,
- resetFirst: true,
- quiet: true,
- jobs: 4,
- showAllChanges: true]
- )
- }
- }
- stage ('Initialize') {
- steps {
- sh returnStdout: false, script: "git clone -b master ${cordRepoUrl}/${configBaseDir}"
- script {
- deployment_config = readYaml file: "${configBaseDir}/${configDeploymentDir}/${configFileName}-DT.yaml"
- }
- sh returnStdout: false, script: """
- mkdir -p $WORKSPACE/bin
- bash <( curl -sfL https://raw.githubusercontent.com/boz/kail/master/godownloader.sh) -b "$WORKSPACE/bin"
- cd $WORKSPACE
- if [ "${params.branch}" != "master" ]; then
- cd $WORKSPACE/kind-voltha
- source releases/${params.branch}
- else
- VOLTCTL_VERSION=\$(curl -sSL https://api.github.com/repos/opencord/voltctl/releases/latest | jq -r .tag_name | sed -e 's/^v//g')
- fi
-
- HOSTOS=\$(uname -s | tr "[:upper:]" "[:lower:"])
- HOSTARCH=\$(uname -m | tr "[:upper:]" "[:lower:"])
- if [ \$HOSTARCH == "x86_64" ]; then
- HOSTARCH="amd64"
- fi
- curl -o $WORKSPACE/bin/voltctl -sSL https://github.com/opencord/voltctl/releases/download/v\${VOLTCTL_VERSION}/voltctl-\${VOLTCTL_VERSION}-\${HOSTOS}-\${HOSTARCH}
- chmod 755 $WORKSPACE/bin/voltctl
- voltctl version --clientonly
-
-
- # Default kind-voltha config doesn't work on ONF demo pod for accessing kvstore.
- # The issue is that the mgmt node is also one of the k8s nodes and so port forwarding doesn't work.
- # We should change this. In the meantime here is a workaround.
- if [ "${params.branch}" == "master" ]; then
- set +e
-
-
- # Remove noise from voltha-core logs
- voltctl log level set WARN read-write-core#github.com/opencord/voltha-go/db/model
- voltctl log level set WARN read-write-core#github.com/opencord/voltha-lib-go/v3/pkg/kafka
- # Remove noise from openolt logs
- voltctl log level set WARN adapter-open-olt#github.com/opencord/voltha-lib-go/v3/pkg/db
- voltctl log level set WARN adapter-open-olt#github.com/opencord/voltha-lib-go/v3/pkg/probe
- voltctl log level set WARN adapter-open-olt#github.com/opencord/voltha-lib-go/v3/pkg/kafka
- fi
- """
- }
- }
-
- stage('Functional Tests') {
- environment {
- ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}-DT.yaml"
- ROBOT_FILE="Voltha_DT_PODTests.robot"
- ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/dt-workflow/FunctionalTests"
- }
- steps {
- timeout(time: 30, unit: 'MINUTES') {
- sh """
- cd $WORKSPACE/kind-voltha/scripts
- ./log-collector.sh > /dev/null &
- ./log-combine.sh > /dev/null &
-
- mkdir -p $ROBOT_LOGS_DIR
- if ( ${powerSwitch} ); then
- export ROBOT_MISC_ARGS="--removekeywords wuks -i PowerSwitch -i sanityDt -i functional -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE"
- else
- export ROBOT_MISC_ARGS="--removekeywords wuks -e PowerSwitch -i sanityDt -i functional -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE"
- fi
- make -C $WORKSPACE/voltha-system-tests voltha-dt-test || true
- """
- }
- }
- }
- }
- post {
- always {
- sh returnStdout: false, script: '''
- set +e
- kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\n'}" | sort | uniq
- kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.imageID}{'\\n'}" | sort | uniq
- kubectl get nodes -o wide
- kubectl get pods -n voltha -o wide
-
- sleep 60 # Wait for log-collector and log-combine to complete
-
- # Clean up "announcer" pod used by the tests if present
- kubectl delete pod announcer || true
-
- ## Pull out errors from log files
- extract_errors_go() {
- echo
- echo "Error summary for $1:"
- grep '"level":"error"' $WORKSPACE/kind-voltha/scripts/logger/combined/$1*
- echo
- }
-
- extract_errors_python() {
- echo
- echo "Error summary for $1:"
- grep 'ERROR' $WORKSPACE/kind-voltha/scripts/logger/combined/$1*
- echo
- }
-
- extract_errors_go voltha-rw-core > $WORKSPACE/error-report.log
- extract_errors_go adapter-open-olt >> $WORKSPACE/error-report.log
- extract_errors_python adapter-open-onu >> $WORKSPACE/error-report.log
- extract_errors_python voltha-ofagent >> $WORKSPACE/error-report.log
- extract_errors_python onos >> $WORKSPACE/error-report.log
-
- gzip error-report.log || true
-
- cd $WORKSPACE/kind-voltha/scripts/logger/combined/
- tar czf $WORKSPACE/container-logs.tgz *
-
- cd $WORKSPACE
- gzip *-combined.log || true
-
- # collect ETCD cluster logs
- mkdir -p $WORKSPACE/etcd
- printf '%s\n' $(kubectl get pods -l app=etcd -o=jsonpath="{.items[*]['metadata.name']}") | xargs -I% bash -c "kubectl logs % > $WORKSPACE/etcd/%.log"
- '''
- script {
- deployment_config.olts.each { olt ->
- sh returnStdout: false, script: """
- sshpass -p ${olt.pass} scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ${olt.user}@${olt.sship}:/var/log/openolt.log $WORKSPACE/openolt-${olt.sship}.log || true
- sed -i 's/\\x1b\\[[0-9;]*[a-zA-Z]//g' $WORKSPACE/openolt-${olt.sship}.log # Remove escape sequences
- sshpass -p ${olt.pass} scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ${olt.user}@${olt.sship}:/var/log/dev_mgmt_daemon.log $WORKSPACE/dev_mgmt_daemon-${olt.sship}.log || true
- sed -i 's/\\x1b\\[[0-9;]*[a-zA-Z]//g' $WORKSPACE/dev_mgmt_daemon-${olt.sship}.log # Remove escape sequences
- sshpass -p ${olt.pass} scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ${olt.user}@${olt.sship}:/var/log/startup.log $WORKSPACE/startup-${olt.sship}.log || true
- sed -i 's/\\x1b\\[[0-9;]*[a-zA-Z]//g' $WORKSPACE/startup-${olt.sship}.log || true # Remove escape sequences
- sshpass -p ${olt.pass} scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ${olt.user}@${olt.sship}:/var/log/openolt_process_watchdog.log $WORKSPACE/openolt_process_watchdog-${olt.sship}.log || true
- sed -i 's/\\x1b\\[[0-9;]*[a-zA-Z]//g' $WORKSPACE/openolt_process_watchdog-${olt.sship}.log || true # Remove escape sequences
- """
- }
- }
- step([$class: 'RobotPublisher',
- disableArchiveOutput: false,
- logFileName: '**/log*.html',
- otherFiles: '',
- outputFileName: '**/output*.xml',
- outputPath: 'RobotLogs',
- passThreshold: 100,
- reportFileName: '**/report*.html',
- unstableThreshold: 0
- ]);
- archiveArtifacts artifacts: '*.log,*.gz,*.tgz,etcd/*.log'
- }
- unstable {
- step([$class: 'Mailer', notifyEveryUnstableBuild: true, recipients: "${notificationEmail}", sendToIndividuals: false])
- }
- }
-}
diff --git a/jjb/pipeline/voltha-dt-physical-functional-tests.groovy b/jjb/pipeline/voltha-dt-physical-functional-tests.groovy
index a3f7083..41419f8 100644
--- a/jjb/pipeline/voltha-dt-physical-functional-tests.groovy
+++ b/jjb/pipeline/voltha-dt-physical-functional-tests.groovy
@@ -23,7 +23,7 @@
label "${params.buildNode}"
}
options {
- timeout(time: 340, unit: 'MINUTES')
+ timeout(time: 640, unit: 'MINUTES')
}
environment {
@@ -66,6 +66,14 @@
[$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
],
])
+ script {
+ sh(script:"""
+ if [ '${volthaSystemTestsChange}' != '' ] ; then
+ cd $WORKSPACE/voltha-system-tests;
+ git fetch https://gerrit.opencord.org/voltha-system-tests ${volthaSystemTestsChange} && git checkout FETCH_HEAD
+ fi
+ """)
+ }
}
}
stage('Clone cord-tester') {
@@ -309,6 +317,14 @@
gzip *-combined.log || true
rm *-combined.log || true
+ # store information on running charts
+ helm ls > $WORKSPACE/helm-list.txt || true
+
+ # store information on the running pods
+ kubectl get pods --all-namespaces -o wide > $WORKSPACE/pods.txt || true
+ kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\n'}" | sort | uniq | tee $WORKSPACE/pod-images.txt || true
+ kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.imageID}{'\\n'}" | sort | uniq | tee $WORKSPACE/pod-imagesId.txt || true
+
# collect ETCD cluster logs
mkdir -p $WORKSPACE/etcd
printf '%s\n' $(kubectl get pods -l app=etcd -o=jsonpath="{.items[*]['metadata.name']}") | xargs -I% bash -c "kubectl logs % > $WORKSPACE/etcd/%.log"
@@ -339,7 +355,7 @@
reportFileName: '**/report*.html',
unstableThreshold: 0
]);
- archiveArtifacts artifacts: '*.log,*.gz,*.tgz,etcd/*.log'
+ archiveArtifacts artifacts: '*.log,*.gz,*.tgz,etcd/*.log,*.txt'
}
unstable {
step([$class: 'Mailer', notifyEveryUnstableBuild: true, recipients: "${notificationEmail}", sendToIndividuals: false])
diff --git a/jjb/pipeline/voltha-openonu-go-tests.groovy b/jjb/pipeline/voltha-openonu-go-tests.groovy
deleted file mode 100755
index e86d61f..0000000
--- a/jjb/pipeline/voltha-openonu-go-tests.groovy
+++ /dev/null
@@ -1,521 +0,0 @@
-// Copyright 2017-present Open Networking Foundation
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// voltha-2.x e2e tests
-// uses kind-voltha to deploy voltha-2.X
-// uses bbsim to simulate OLT/ONUs
-
-pipeline {
-
- /* no label, executor is determined by JJB */
- agent {
- label "${params.buildNode}"
- }
- options {
- timeout(time: 90, unit: 'MINUTES')
- }
- environment {
- KUBECONFIG="$HOME/.kube/kind-config-voltha-minimal"
- VOLTCONFIG="$HOME/.volt/config-minimal"
- PATH="$WORKSPACE/kind-voltha/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
- NAME="minimal"
- FANCY=0
- WITH_SIM_ADAPTERS="n"
- WITH_RADIUS="y"
- WITH_BBSIM="y"
- DEPLOY_K8S="y"
- VOLTHA_LOG_LEVEL="DEBUG"
- CONFIG_SADIS="external"
- BBSIM_CFG="configs/bbsim-sadis-att.yaml"
- ROBOT_MISC_ARGS="-d $WORKSPACE/RobotLogs"
- EXTRA_HELM_FLAGS=" --set global.image_registry=mirror.registry.opennetworking.org/ --set defaults.image_registry=mirror.registry.opennetworking.org/ "
- }
- stages {
- stage('Clone kind-voltha') {
- steps {
- checkout([
- $class: 'GitSCM',
- userRemoteConfigs: [[
- url: "https://gerrit.opencord.org/kind-voltha",
- refspec: "${kindVolthaChange}"
- ]],
- branches: [[ name: "master", ]],
- extensions: [
- [$class: 'WipeWorkspace'],
- [$class: 'RelativeTargetDirectory', relativeTargetDir: "kind-voltha"],
- [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
- ],
- ])
- sh """
- if [ '${kindVolthaChange}' != '' ] ; then
- cd $WORKSPACE/kind-voltha
- git fetch https://gerrit.opencord.org/kind-voltha ${kindVolthaChange} && git checkout FETCH_HEAD
- fi
- """
- }
- }
- stage('Clone voltha-system-tests') {
- steps {
- checkout([
- $class: 'GitSCM',
- userRemoteConfigs: [[
- url: "https://gerrit.opencord.org/voltha-system-tests",
- refspec: "${volthaSystemTestsChange}"
- ]],
- branches: [[ name: "${branch}", ]],
- extensions: [
- [$class: 'WipeWorkspace'],
- [$class: 'RelativeTargetDirectory', relativeTargetDir: "voltha-system-tests"],
- [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
- ],
- ])
- sh """
- if [ '${volthaSystemTestsChange}' != '' ] ; then
- cd $WORKSPACE/voltha-system-tests
- git fetch https://gerrit.opencord.org/voltha-system-tests ${volthaSystemTestsChange} && git checkout FETCH_HEAD
- fi
- """
- }
- }
- // If the repo under test is not kind-voltha
- // then download it and checkout the patch
- stage('Download Patch') {
- when {
- expression {
- return "${gerritProject}" != 'kind-voltha';
- }
- }
- steps {
- checkout([
- $class: 'GitSCM',
- userRemoteConfigs: [[
- url: "https://gerrit.opencord.org/${gerritProject}",
- refspec: "${gerritRefspec}"
- ]],
- branches: [[ name: "${branch}", ]],
- extensions: [
- [$class: 'WipeWorkspace'],
- [$class: 'RelativeTargetDirectory', relativeTargetDir: "${gerritProject}"],
- [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
- ],
- ])
- sh """
- pushd $WORKSPACE/${gerritProject}
- git fetch https://gerrit.opencord.org/${gerritProject} ${gerritRefspec} && git checkout FETCH_HEAD
- echo "Currently on commit: \n"
- git log -1 --oneline
- popd
- """
- }
- }
- // If the repo under test is kind-voltha we don't need to download it again,
- // as we already have it, simply checkout the patch
- stage('Checkout kind-voltha patch') {
- when {
- expression {
- return "${gerritProject}" == 'kind-voltha';
- }
- }
- steps {
- sh """
- cd $WORKSPACE/kind-voltha
- git fetch https://gerrit.opencord.org/kind-voltha ${gerritRefspec} && git checkout FETCH_HEAD
- """
- }
- }
- stage('Create K8s Cluster') {
- steps {
- sh """
- cd $WORKSPACE/kind-voltha/
-
- if [ "${branch}" != "master" ]; then
- echo "on branch: ${branch}, sourcing kind-voltha/releases/${branch}"
- source "$WORKSPACE/kind-voltha/releases/${branch}"
- else
- echo "on master, using default settings for kind-voltha"
- fi
-
- JUST_K8S=y ./voltha up
- bash <( curl -sfL https://raw.githubusercontent.com/boz/kail/master/godownloader.sh) -b "$WORKSPACE/kind-voltha/bin"
- """
- }
- }
-
- stage('Build Images') {
- steps {
- sh """
- make -C $WORKSPACE/voltha-openonu-adapter-go DOCKER_REGISTRY=mirror.registry.opennetworking.org/ DOCKER_REPOSITORY=voltha/ DOCKER_TAG=citest docker-build
- """
- }
- }
-
- stage('Push Images') {
- steps {
- sh '''
- docker images | grep citest
- for image in \$(docker images -f "reference=*/*/*citest" --format "{{.Repository}}"); do echo "Pushing \$image to nodes"; kind load docker-image \$image:citest --name voltha-\$NAME --nodes voltha-\$NAME-control-plane,voltha-\$NAME-worker,voltha-\$NAME-worker2; done
- '''
- }
- }
- stage('Deploy Voltha') {
- steps {
- sh """
- export EXTRA_HELM_FLAGS+="--set log_agent.enabled=False ${extraHelmFlags} "
-
- IMAGES="adapter_open_onu_go"
-
- for I in \$IMAGES
- do
- EXTRA_HELM_FLAGS+="--set images.\$I.tag=citest,images.\$I.pullPolicy=Never "
- done
-
- cd $WORKSPACE/kind-voltha/
- echo \$EXTRA_HELM_FLAGS
-
- if [ "${branch}" != "master" ]; then
- echo "on branch: ${branch}, sourcing kind-voltha/releases/${branch}"
- source "$WORKSPACE/kind-voltha/releases/${branch}"
- else
- echo "on master, using default settings for kind-voltha"
- fi
-
- ./voltha up
- """
- }
- }
-
- stage('Run E2E Tests 1t8gem') {
- environment {
- ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/1t8gem"
- }
- steps {
- sh '''
- cd $WORKSPACE/kind-voltha/
- #source $NAME-env.sh
- WAIT_ON_DOWN=y DEPLOY_K8S=n ./voltha down
-
- export EXTRA_HELM_FLAGS+="--set log_agent.enabled=False ${extraHelmFlags} "
-
- IMAGES="adapter_open_onu_go"
-
- for I in \$IMAGES
- do
- EXTRA_HELM_FLAGS+="--set images.\$I.tag=citest,images.\$I.pullPolicy=Never "
- done
- #1t8gem
- mkdir -p $WORKSPACE/1t8gem
- _TAG=kail-1t8gem kail -n voltha -n default > $WORKSPACE/1t8gem/onos-voltha-combined.log &
-
- if [ "${branch}" != "master" ]; then
- echo "on branch: ${branch}, sourcing kind-voltha/releases/${branch}"
- source "$WORKSPACE/kind-voltha/releases/${branch}"
- else
- echo "on master, using default settings for kind-voltha"
- fi
-
- DEPLOY_K8S=n ./voltha up
-
- mkdir -p $ROBOT_LOGS_DIR/1t8gem
- export ROBOT_MISC_ARGS="-d $ROBOT_LOGS_DIR"
- export TARGET_1T8GEM=1t8gem-openonu-go-adapter-test
-
- if [ "${branch}" != "voltha-2.6" ]; then
- export NAME=voltha_voltha
- fi
-
- make -C $WORKSPACE/voltha-system-tests \$TARGET_1T8GEM || true
-
- # stop logging
- P_IDS="$(ps e -ww -A | grep "_TAG=kail-1t8gem" | grep -v grep | awk '{print $1}')"
- if [ -n "$P_IDS" ]; then
- echo $P_IDS
- for P_ID in $P_IDS; do
- kill -9 $P_ID
- done
- fi
-
- # get pods information
- kubectl get pods -o wide --all-namespaces > $WORKSPACE/1t8gem/pods.txt || true
- '''
- }
- }
-
- stage('DT workflow') {
- environment {
- ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/DTWorkflow"
- }
- steps {
- sh '''
- cd $WORKSPACE/kind-voltha/
- #source $NAME-env.sh
- WAIT_ON_DOWN=y DEPLOY_K8S=n ./voltha down
-
- export EXTRA_HELM_FLAGS+="--set log_agent.enabled=False ${extraHelmFlags} "
-
- IMAGES="adapter_open_onu_go"
-
- for I in \$IMAGES
- do
- EXTRA_HELM_FLAGS+="--set images.\$I.tag=citest,images.\$I.pullPolicy=Never "
- done
-
- # Workflow-specific flags
- export WITH_RADIUS=no
- export WITH_EAPOL=no
- export WITH_DHCP=no
- export WITH_IGMP=no
- export CONFIG_SADIS="external"
- export BBSIM_CFG="configs/bbsim-sadis-dt.yaml"
-
- # start logging
- mkdir -p $WORKSPACE/dt
- _TAG=kail-dt kail -n voltha -n default > $WORKSPACE/dt/onos-voltha-combined.log &
-
- if [ "${branch}" != "master" ]; then
- echo "on branch: ${branch}, sourcing kind-voltha/releases/${branch}"
- source "$WORKSPACE/kind-voltha/releases/${branch}"
- else
- echo "on master, using default settings for kind-voltha"
- fi
-
- DEPLOY_K8S=n ./voltha up
-
- mkdir -p $ROBOT_LOGS_DIR
- export ROBOT_MISC_ARGS="-d $ROBOT_LOGS_DIR -e PowerSwitch"
-
- # By default, all tests tagged 'sanityDt' are run. This covers basic functionality
- # like running through the DT workflow for a single subscriber.
- export TARGET=sanity-kind-dt
-
- # If the Gerrit comment contains a line with "functional tests" then run the full
- # functional test suite. This covers tests tagged either 'sanityDt' or 'functionalDt'.
- # Note: Gerrit comment text will be prefixed by "Patch set n:" and a blank line
- REGEX="functional tests"
- if [[ "$GERRIT_EVENT_COMMENT_TEXT" =~ \$REGEX ]]; then
- TARGET=functional-single-kind-dt
- fi
-
- make -C $WORKSPACE/voltha-system-tests \$TARGET || true
-
- # stop logging
- P_IDS="$(ps e -ww -A | grep "_TAG=kail-dt" | grep -v grep | awk '{print $1}')"
- if [ -n "$P_IDS" ]; then
- echo $P_IDS
- for P_ID in $P_IDS; do
- kill -9 $P_ID
- done
- fi
-
- # get pods information
- kubectl get pods -o wide --all-namespaces > $WORKSPACE/dt/pods.txt || true
- '''
- }
- }
-
- stage('ATT workflow') {
- environment {
- ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/ATTWorkflow"
- }
- steps {
- sh '''
- cd $WORKSPACE/kind-voltha/
- WAIT_ON_DOWN=y DEPLOY_K8S=n ./voltha down
-
- export EXTRA_HELM_FLAGS+="--set log_agent.enabled=False ${extraHelmFlags} "
-
- IMAGES="adapter_open_onu_go"
-
- for I in \$IMAGES
- do
- EXTRA_HELM_FLAGS+="--set images.\$I.tag=citest,images.\$I.pullPolicy=Never "
- done
-
- # Workflow-specific flags
- export WITH_RADIUS=yes
- export WITH_EAPOL=yes
- export WITH_BBSIM=yes
- export DEPLOY_K8S=yes
- export CONFIG_SADIS="external"
- export BBSIM_CFG="configs/bbsim-sadis-att.yaml"
-
- if [ "${gerritProject}" = "voltctl" ]; then
- export VOLTCTL_VERSION=$(cat $WORKSPACE/voltctl/VERSION)
- cp $WORKSPACE/voltctl/voltctl $WORKSPACE/kind-voltha/bin/voltctl
- md5sum $WORKSPACE/kind-voltha/bin/voltctl
- fi
-
- # start logging
- mkdir -p $WORKSPACE/att
- _TAG=kail-att kail -n voltha -n default > $WORKSPACE/att/onos-voltha-combined.log &
-
- if [ "${branch}" != "master" ]; then
- echo "on branch: ${branch}, sourcing kind-voltha/releases/${branch}"
- source "$WORKSPACE/kind-voltha/releases/${branch}"
- else
- echo "on master, using default settings for kind-voltha"
- fi
-
- DEPLOY_K8S=n ./voltha up
-
- mkdir -p $ROBOT_LOGS_DIR
- export ROBOT_MISC_ARGS="-d $ROBOT_LOGS_DIR -e PowerSwitch"
-
- # By default, all tests tagged 'sanity' are run. This covers basic functionality
- # like running through the ATT workflow for a single subscriber.
- export TARGET=sanity-single-kind
-
- # If the Gerrit comment contains a line with "functional tests" then run the full
- # functional test suite. This covers tests tagged either 'sanity' or 'functional'.
- # Note: Gerrit comment text will be prefixed by "Patch set n:" and a blank line
- REGEX="functional tests"
- if [[ "$GERRIT_EVENT_COMMENT_TEXT" =~ \$REGEX ]]; then
- TARGET=functional-single-kind
- fi
-
- make -C $WORKSPACE/voltha-system-tests \$TARGET || true
-
- # stop logging
- P_IDS="$(ps e -ww -A | grep "_TAG=kail-att" | grep -v grep | awk '{print $1}')"
- if [ -n "$P_IDS" ]; then
- echo $P_IDS
- for P_ID in $P_IDS; do
- kill -9 $P_ID
- done
- fi
-
- # get pods information
- kubectl get pods -o wide --all-namespaces > $WORKSPACE/att/pods.txt || true
- '''
- }
- }
-
- stage('TT workflow') {
- environment {
- ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/TTWorkflow"
- }
- steps {
- sh '''
- cd $WORKSPACE/kind-voltha/
- WAIT_ON_DOWN=y DEPLOY_K8S=n ./voltha down
-
- export EXTRA_HELM_FLAGS+="--set log_agent.enabled=False ${extraHelmFlags} "
-
- IMAGES="adapter_open_onu_go"
-
- for I in \$IMAGES
- do
- EXTRA_HELM_FLAGS+="--set images.\$I.tag=citest,images.\$I.pullPolicy=Never "
- done
-
- # Workflow-specific flags
- export WITH_RADIUS=no
- export WITH_EAPOL=no
- export WITH_DHCP=yes
- export WITH_IGMP=yes
- export CONFIG_SADIS="external"
- export BBSIM_CFG="configs/bbsim-sadis-tt.yaml"
-
- # start logging
- mkdir -p $WORKSPACE/tt
- _TAG=kail-tt kail -n voltha -n default > $WORKSPACE/tt/onos-voltha-combined.log &
-
- if [ "${branch}" != "master" ]; then
- echo "on branch: ${branch}, sourcing kind-voltha/releases/${branch}"
- source "$WORKSPACE/kind-voltha/releases/${branch}"
- else
- echo "on master, using default settings for kind-voltha"
- fi
-
- DEPLOY_K8S=n ./voltha up
-
- mkdir -p $ROBOT_LOGS_DIR
- export ROBOT_MISC_ARGS="-d $ROBOT_LOGS_DIR -e PowerSwitch"
-
- # By default, all tests tagged 'sanityTt' are run. This covers basic functionality
- # like running through the TT workflow for a single subscriber.
- export TARGET=sanity-kind-tt
-
- # If the Gerrit comment contains a line with "functional tests" then run the full
- # functional test suite. This covers tests tagged either 'sanityTt' or 'functionalTt'.
- # Note: Gerrit comment text will be prefixed by "Patch set n:" and a blank line
- REGEX="functional tests"
- if [[ "$GERRIT_EVENT_COMMENT_TEXT" =~ \$REGEX ]]; then
- TARGET=functional-single-kind-tt
- fi
-
- make -C $WORKSPACE/voltha-system-tests \$TARGET || true
-
- # stop logging
- P_IDS="$(ps e -ww -A | grep "_TAG=kail-tt" | grep -v grep | awk '{print $1}')"
- if [ -n "$P_IDS" ]; then
- echo $P_IDS
- for P_ID in $P_IDS; do
- kill -9 $P_ID
- done
- fi
-
- # get pods information
- kubectl get pods -o wide --all-namespaces > $WORKSPACE/tt/pods.txt || true
- '''
- }
- }
- }
- post {
- always {
- sh '''
- set +e
- # get pods information
- kubectl get pods -o wide --all-namespaces
- kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\n'}"
- helm ls
-
- sync
- pkill kail || true
- md5sum $WORKSPACE/kind-voltha/bin/voltctl
-
- ## Pull out errors from log files
- extract_errors_go() {
- echo
- echo "Error summary for $1:"
- grep $1 $WORKSPACE/onos-voltha-combined.log | grep '"level":"error"' | cut -d ' ' -f 2- | jq -r '.msg'
- echo
- }
-
- extract_errors_python() {
- echo
- echo "Error summary for $1:"
- grep $1 $WORKSPACE/onos-voltha-combined.log | grep 'ERROR' | cut -d ' ' -f 2-
- echo
- }
-
- extract_errors_go voltha-rw-core > $WORKSPACE/error-report.log || true
- extract_errors_go adapter-open-olt >> $WORKSPACE/error-report.log || true
- extract_errors_python adapter-open-onu >> $WORKSPACE/error-report.log || true
- extract_errors_python voltha-ofagent >> $WORKSPACE/error-report.log || true
-
- gzip $WORKSPACE/onos-voltha-combined.log || true
- '''
- step([$class: 'RobotPublisher',
- disableArchiveOutput: false,
- logFileName: 'RobotLogs/*/log*.html',
- otherFiles: '',
- outputFileName: 'RobotLogs/*/output*.xml',
- outputPath: '.',
- passThreshold: 100,
- reportFileName: 'RobotLogs/*/report*.html',
- unstableThreshold: 0]);
- archiveArtifacts artifacts: '**/*.log,**/*.gz,**/*.txt'
- }
- }
-}
diff --git a/jjb/pipeline/voltha-physical-functional-tests-openonu-go.groovy b/jjb/pipeline/voltha-physical-functional-tests-openonu-go.groovy
deleted file mode 100644
index 6850bc2..0000000
--- a/jjb/pipeline/voltha-physical-functional-tests-openonu-go.groovy
+++ /dev/null
@@ -1,273 +0,0 @@
-// Copyright 2017-present Open Networking Foundation
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-node {
- // Need this so that deployment_config has global scope when it's read later
- deployment_config = null
-}
-
-pipeline {
- /* no label, executor is determined by JJB */
- agent {
- label "${params.buildNode}"
- }
- options {
- timeout(time: 380, unit: 'MINUTES')
- }
-
- environment {
- KUBECONFIG="$WORKSPACE/${configBaseDir}/${configKubernetesDir}/${configFileName}.conf"
- VOLTCONFIG="$HOME/.volt/config-minimal"
- PATH="$WORKSPACE/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
- }
- stages {
- stage('Clone kind-voltha') {
- steps {
- step([$class: 'WsCleanup'])
- checkout([
- $class: 'GitSCM',
- userRemoteConfigs: [[
- url: "https://gerrit.opencord.org/kind-voltha",
- refspec: "${kindVolthaChange}"
- ]],
- branches: [[ name: "master", ]],
- extensions: [
- [$class: 'WipeWorkspace'],
- [$class: 'RelativeTargetDirectory', relativeTargetDir: "kind-voltha"],
- [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
- ],
- ])
- }
- }
- stage('Clone voltha-system-tests') {
- steps {
- checkout([
- $class: 'GitSCM',
- userRemoteConfigs: [[
- url: "https://gerrit.opencord.org/voltha-system-tests",
- refspec: "${volthaSystemTestsChange}"
- ]],
- branches: [[ name: "${branch}", ]],
- extensions: [
- [$class: 'WipeWorkspace'],
- [$class: 'RelativeTargetDirectory', relativeTargetDir: "voltha-system-tests"],
- [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
- ],
- ])
- }
- }
- stage('Clone cord-tester') {
- steps {
- checkout([
- $class: 'GitSCM',
- userRemoteConfigs: [[
- url: "https://gerrit.opencord.org/cord-tester",
- refspec: "${cordTesterChange}"
- ]],
- branches: [[ name: "master", ]],
- extensions: [
- [$class: 'WipeWorkspace'],
- [$class: 'RelativeTargetDirectory', relativeTargetDir: "cord-tester"],
- [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
- ],
- ])
- }
- }
- stage('Download All the VOLTHA repos') {
- when {
- expression {
- return "${branch}" == 'master';
- }
- }
- steps {
- checkout(changelog: true,
- poll: false,
- scm: [$class: 'RepoScm',
- manifestRepositoryUrl: "${params.manifestUrl}",
- manifestBranch: "${params.branch}",
- currentBranch: true,
- destinationDir: 'voltha',
- forceSync: true,
- resetFirst: true,
- quiet: true,
- jobs: 4,
- showAllChanges: true]
- )
- }
- }
- stage ('Initialize') {
- steps {
- sh returnStdout: false, script: "git clone -b master ${cordRepoUrl}/${configBaseDir}"
- script {
- deployment_config = readYaml file: "${configBaseDir}/${configDeploymentDir}/${configFileName}.yaml"
- }
- sh returnStdout: false, script: """
- mkdir -p $WORKSPACE/bin
- bash <( curl -sfL https://raw.githubusercontent.com/boz/kail/master/godownloader.sh) -b "$WORKSPACE/bin"
- cd $WORKSPACE
- if [ "${params.branch}" != "master" ]; then
- cd $WORKSPACE/kind-voltha
- source releases/${params.branch}
- else
- VOLTCTL_VERSION=\$(curl -sSL https://api.github.com/repos/opencord/voltctl/releases/latest | jq -r .tag_name | sed -e 's/^v//g')
- fi
-
- HOSTOS=\$(uname -s | tr "[:upper:]" "[:lower:"])
- HOSTARCH=\$(uname -m | tr "[:upper:]" "[:lower:"])
- if [ \$HOSTARCH == "x86_64" ]; then
- HOSTARCH="amd64"
- fi
- curl -o $WORKSPACE/bin/voltctl -sSL https://github.com/opencord/voltctl/releases/download/v\${VOLTCTL_VERSION}/voltctl-\${VOLTCTL_VERSION}-\${HOSTOS}-\${HOSTARCH}
- chmod 755 $WORKSPACE/bin/voltctl
- voltctl version --clientonly
-
- if [ "${params.branch}" == "master" ]; then
- # Default kind-voltha config doesn't work on ONF demo pod for accessing kvstore.
- # The issue is that the mgmt node is also one of the k8s nodes and so port forwarding doesn't work.
- # We should change this. In the meantime here is a workaround.
- set +e
-
- # Remove noise from voltha-core logs
- voltctl log level set WARN read-write-core#github.com/opencord/voltha-go/db/model
- voltctl log level set WARN read-write-core#github.com/opencord/voltha-lib-go/v3/pkg/kafka
- # Remove noise from openolt logs
- voltctl log level set WARN adapter-open-olt#github.com/opencord/voltha-lib-go/v3/pkg/db
- voltctl log level set WARN adapter-open-olt#github.com/opencord/voltha-lib-go/v3/pkg/probe
- voltctl log level set WARN adapter-open-olt#github.com/opencord/voltha-lib-go/v3/pkg/kafka
- fi
- """
- }
- }
-
- stage('Functional Tests') {
- environment {
- ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}.yaml"
- ROBOT_FILE="Voltha_PODTests.robot"
- ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/FunctionalTests"
- }
- steps {
- sh """
- cd $WORKSPACE/kind-voltha/scripts
- ./log-collector.sh > /dev/null &
- ./log-combine.sh > /dev/null &
-
- mkdir -p $ROBOT_LOGS_DIR
- if ( ${powerSwitch} ); then
- export ROBOT_MISC_ARGS="--removekeywords wuks -i PowerSwitch -i sanity -i functional -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE"
- else
- export ROBOT_MISC_ARGS="--removekeywords wuks -e PowerSwitch -i sanity -i functional -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE"
- fi
- make -C $WORKSPACE/voltha-system-tests voltha-test || true
- """
- }
- }
-
- stage('Dataplane Tests') {
- environment {
- ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}.yaml"
- ROBOT_FILE="Voltha_PODTests.robot"
- ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/DataplaneTests"
- }
- steps {
- sh """
- mkdir -p $ROBOT_LOGS_DIR
- export ROBOT_MISC_ARGS="--removekeywords wuks -i dataplane -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE"
- make -C $WORKSPACE/voltha-system-tests voltha-test || true
- """
- }
- }
-
- }
- post {
- always {
- sh returnStdout: false, script: '''
- set +e
- kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\n'}" | sort | uniq
- kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.imageID}{'\\n'}" | sort | uniq
- kubectl get nodes -o wide
- kubectl get pods -n voltha -o wide
- kubectl get pods -o wide
-
- sleep 60 # Wait for log-collector and log-combine to complete
-
- # Clean up "announcer" pod used by the tests if present
- kubectl delete pod announcer || true
-
- ## Pull out errors from log files
- extract_errors_go() {
- echo
- echo "Error summary for $1:"
- grep '"level":"error"' $WORKSPACE/kind-voltha/scripts/logger/combined/$1*
- echo
- }
-
- extract_errors_python() {
- echo
- echo "Error summary for $1:"
- grep 'ERROR' $WORKSPACE/kind-voltha/scripts/logger/combined/$1*
- echo
- }
-
- extract_errors_go voltha-rw-core > $WORKSPACE/error-report.log
- extract_errors_go adapter-open-olt >> $WORKSPACE/error-report.log
- extract_errors_python adapter-open-onu >> $WORKSPACE/error-report.log
- extract_errors_python voltha-ofagent >> $WORKSPACE/error-report.log
- extract_errors_python onos >> $WORKSPACE/error-report.log
-
- gzip error-report.log || true
- rm error-report.log || true
-
- cd $WORKSPACE/kind-voltha/scripts/logger/combined/
- tar czf $WORKSPACE/container-logs.tgz *
- rm * || true
-
- cd $WORKSPACE
- gzip *-combined.log || true
- rm *-combined.log || true
-
- # collect ETCD cluster logs
- mkdir -p $WORKSPACE/etcd
- printf '%s\n' $(kubectl get pods -l app=etcd -o=jsonpath="{.items[*]['metadata.name']}") | xargs -I% bash -c "kubectl logs % > $WORKSPACE/etcd/%.log"
- '''
- script {
- deployment_config.olts.each { olt ->
- sh returnStdout: false, script: """
- sshpass -p ${olt.pass} scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ${olt.user}@${olt.sship}:/var/log/openolt.log $WORKSPACE/openolt-${olt.sship}.log || true
- sed -i 's/\\x1b\\[[0-9;]*[a-zA-Z]//g' $WORKSPACE/openolt-${olt.sship}.log # Remove escape sequences
- sshpass -p ${olt.pass} scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ${olt.user}@${olt.sship}:/var/log/dev_mgmt_daemon.log $WORKSPACE/dev_mgmt_daemon-${olt.sship}.log || true
- sed -i 's/\\x1b\\[[0-9;]*[a-zA-Z]//g' $WORKSPACE/dev_mgmt_daemon-${olt.sship}.log # Remove escape sequences
- sshpass -p ${olt.pass} scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ${olt.user}@${olt.sship}:/var/log/startup.log $WORKSPACE/startup-${olt.sship}.log || true
- sed -i 's/\\x1b\\[[0-9;]*[a-zA-Z]//g' $WORKSPACE/startup-${olt.sship}.log || true # Remove escape sequences
- sshpass -p ${olt.pass} scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no ${olt.user}@${olt.sship}:/var/log/openolt_process_watchdog.log $WORKSPACE/openolt_process_watchdog-${olt.sship}.log || true
- sed -i 's/\\x1b\\[[0-9;]*[a-zA-Z]//g' $WORKSPACE/openolt_process_watchdog-${olt.sship}.log || true # Remove escape sequences
- """
- }
- }
- step([$class: 'RobotPublisher',
- disableArchiveOutput: false,
- logFileName: '**/log*.html',
- otherFiles: '',
- outputFileName: '**/output*.xml',
- outputPath: 'RobotLogs',
- passThreshold: 100,
- reportFileName: '**/report*.html',
- unstableThreshold: 0
- ]);
- archiveArtifacts artifacts: '*.log,*.gz,*.tgz,etcd/*.log'
- }
- unstable {
- step([$class: 'Mailer', notifyEveryUnstableBuild: true, recipients: "${notificationEmail}", sendToIndividuals: false])
- }
- }
-}
diff --git a/jjb/pipeline/voltha-physical-functional-tests.groovy b/jjb/pipeline/voltha-physical-functional-tests.groovy
index 09e8c45..a2d9c8d 100644
--- a/jjb/pipeline/voltha-physical-functional-tests.groovy
+++ b/jjb/pipeline/voltha-physical-functional-tests.groovy
@@ -283,6 +283,15 @@
gzip *-combined.log || true
rm *-combined.log || true
+ # store information on running charts
+ helm ls > $WORKSPACE/helm-list.txt || true
+
+ # store information on the running pods
+ kubectl get pods --all-namespaces -o wide > $WORKSPACE/pods.txt || true
+ kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\n'}" | sort | uniq | tee $WORKSPACE/pod-images.txt || true
+ kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.imageID}{'\\n'}" | sort | uniq | tee $WORKSPACE/pod-imagesId.txt || true
+
+
# collect ETCD cluster logs
mkdir -p $WORKSPACE/etcd
printf '%s\n' $(kubectl get pods -l app=etcd -o=jsonpath="{.items[*]['metadata.name']}") | xargs -I% bash -c "kubectl logs % > $WORKSPACE/etcd/%.log"
diff --git a/jjb/pipeline/voltha-physical-soak-dt-tests.groovy b/jjb/pipeline/voltha-physical-soak-dt-tests.groovy
index 396cac9..ccb918a 100644
--- a/jjb/pipeline/voltha-physical-soak-dt-tests.groovy
+++ b/jjb/pipeline/voltha-physical-soak-dt-tests.groovy
@@ -32,32 +32,100 @@
PATH="$WORKSPACE/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
}
+
stages {
- stage ('Initialize') {
+ stage('Clone kind-voltha') {
steps {
step([$class: 'WsCleanup'])
- sh returnStdout: false, script: "git clone -b master ${cordRepoUrl}/${configBaseDir}"
- sh returnStdout: false, script: "git clone -b master ${cordRepoUrl}/kind-voltha"
+ checkout([
+ $class: 'GitSCM',
+ userRemoteConfigs: [[
+ url: "https://gerrit.opencord.org/kind-voltha",
+ refspec: "${kindVolthaChange}"
+ ]],
+ branches: [[ name: "master", ]],
+ extensions: [
+ [$class: 'WipeWorkspace'],
+ [$class: 'RelativeTargetDirectory', relativeTargetDir: "kind-voltha"],
+ [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
+ ],
+ ])
+ }
+ }
+ stage('Clone voltha-system-tests') {
+ steps {
+ checkout([
+ $class: 'GitSCM',
+ userRemoteConfigs: [[
+ url: "https://gerrit.opencord.org/voltha-system-tests",
+ refspec: "${volthaSystemTestsChange}"
+ ]],
+ branches: [[ name: "${branch}", ]],
+ extensions: [
+ [$class: 'WipeWorkspace'],
+ [$class: 'RelativeTargetDirectory', relativeTargetDir: "voltha-system-tests"],
+ [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
+ ],
+ ])
script {
- deployment_config = readYaml file: "${configBaseDir}/${configDeploymentDir}/${configFileName}-DT.yaml"
+ sh(script:"""
+ if [ '${volthaSystemTestsChange}' != '' ] ; then
+ cd $WORKSPACE/voltha-system-tests;
+ git fetch https://gerrit.opencord.org/voltha-system-tests ${volthaSystemTestsChange} && git checkout FETCH_HEAD
+ fi
+ """)
}
- // This checkout allows us to show changes in Jenkins
- checkout(changelog: true,
- poll: false,
- scm: [$class: 'RepoScm',
- manifestRepositoryUrl: "${params.manifestUrl}",
- manifestBranch: "${params.branch}",
- currentBranch: true,
- destinationDir: 'voltha',
- forceSync: true,
- resetFirst: true,
- quiet: true,
- jobs: 4,
- showAllChanges: true]
- )
+ }
+ }
+ stage('Clone cord-tester') {
+ steps {
+ checkout([
+ $class: 'GitSCM',
+ userRemoteConfigs: [[
+ url: "https://gerrit.opencord.org/cord-tester",
+ refspec: "${cordTesterChange}"
+ ]],
+ branches: [[ name: "master", ]],
+ extensions: [
+ [$class: 'WipeWorkspace'],
+ [$class: 'RelativeTargetDirectory', relativeTargetDir: "cord-tester"],
+ [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
+ ],
+ ])
+ }
+ }
+ // This checkout allows us to show changes in Jenkins
+ // we only do this on master as we don't branch all the repos for all the releases
+ // (we should compute the difference by tracking the container version, not the code)
+ stage('Download All the VOLTHA repos') {
+ when {
+ expression {
+ return "${branch}" == 'master';
+ }
+ }
+ steps {
+ checkout(changelog: true,
+ poll: false,
+ scm: [$class: 'RepoScm',
+ manifestRepositoryUrl: "${params.manifestUrl}",
+ manifestBranch: "${params.branch}",
+ currentBranch: true,
+ destinationDir: 'voltha',
+ forceSync: true,
+ resetFirst: true,
+ quiet: true,
+ jobs: 4,
+ showAllChanges: true]
+ )
+ }
+ }
+ stage ('Initialize') {
+ steps {
+ sh returnStdout: false, script: "git clone -b master ${cordRepoUrl}/${configBaseDir}"
+ script {
+ deployment_config = readYaml file: "${configBaseDir}/${configDeploymentDir}/${configFileName}-DT.yaml"
+ }
sh returnStdout: false, script: """
- cd voltha
- git clone -b master ${cordRepoUrl}/cord-tester
mkdir -p $WORKSPACE/bin
bash <( curl -sfL https://raw.githubusercontent.com/boz/kail/master/godownloader.sh) -b "$WORKSPACE/bin"
cd $WORKSPACE
@@ -104,18 +172,18 @@
}
steps {
sh """
- cd $WORKSPACE/voltha/kind-voltha/scripts
+ cd $WORKSPACE/kind-voltha/scripts
./log-collector.sh > /dev/null &
./log-combine.sh > /dev/null &
mkdir -p $ROBOT_LOGS_DIR
if [ "${params.testType}" == "Functional" ]; then
if ( ${powerSwitch} ); then
- export ROBOT_MISC_ARGS="--removekeywords wuks -i PowerSwitch -i sanityDt -i functionalDt -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE"
+ export ROBOT_MISC_ARGS="--removekeywords wuks -i PowerSwitch -i soak -e dataplaneDt -e bbsim -e notready -d $ROBOT_LOGS_DIR -v teardown_device:False -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE"
else
- export ROBOT_MISC_ARGS="--removekeywords wuks -e PowerSwitch -i sanityDt -i functionalDt -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE"
+ export ROBOT_MISC_ARGS="--removekeywords wuks -e PowerSwitch -i soak -e dataplaneDt -e bbsim -e notready -d $ROBOT_LOGS_DIR -v teardown_device:False -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE"
fi
- make -C $WORKSPACE/voltha/voltha-system-tests voltha-dt-test || true
+ make -C $WORKSPACE/voltha-system-tests voltha-dt-test || true
fi
"""
}
@@ -131,12 +199,8 @@
sh """
mkdir -p $ROBOT_LOGS_DIR
if [ "${params.testType}" == "Failure" ]; then
- if ( ${powerSwitch} ); then
- export ROBOT_MISC_ARGS="--removekeywords wuks -L TRACE -i functionalDt -i PowerSwitch -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE"
- else
- export ROBOT_MISC_ARGS="--removekeywords wuks -L TRACE -i functionalDt -e PowerSwitch -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE"
- fi
- make -C $WORKSPACE/voltha/voltha-system-tests voltha-dt-test || true
+ export ROBOT_MISC_ARGS="--removekeywords wuks -L TRACE -i soak -e PowerSwitch -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE"
+ make -C $WORKSPACE/voltha-system-tests voltha-dt-test || true
fi
"""
}
@@ -152,8 +216,8 @@
sh """
mkdir -p $ROBOT_LOGS_DIR
if [ "${params.testType}" == "Dataplane" ]; then
- export ROBOT_MISC_ARGS="--removekeywords wuks -i dataplaneDt -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE"
- make -C $WORKSPACE/voltha/voltha-system-tests voltha-dt-test || true
+ export ROBOT_MISC_ARGS="--removekeywords wuks -i BandwidthProfileUDPDt -i TechProfileDt -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE"
+ make -C $WORKSPACE/voltha-system-tests voltha-dt-test || true
fi
"""
}
@@ -178,14 +242,14 @@
extract_errors_go() {
echo
echo "Error summary for $1:"
- grep '"level":"error"' $WORKSPACE/voltha/kind-voltha/scripts/logger/combined/$1*
+ grep '"level":"error"' $WORKSPACE/kind-voltha/scripts/logger/combined/$1*
echo
}
extract_errors_python() {
echo
echo "Error summary for $1:"
- grep 'ERROR' $WORKSPACE/voltha/kind-voltha/scripts/logger/combined/$1*
+ grep 'ERROR' $WORKSPACE/kind-voltha/scripts/logger/combined/$1*
echo
}
@@ -195,7 +259,7 @@
extract_errors_python voltha-ofagent >> $WORKSPACE/error-report.log
extract_errors_python onos >> $WORKSPACE/error-report.log
- cd $WORKSPACE/voltha/kind-voltha/scripts/logger/combined/
+ cd $WORKSPACE/kind-voltha/scripts/logger/combined/
tar czf $WORKSPACE/container-logs.tgz *
cd $WORKSPACE
diff --git a/jjb/pipeline/voltha-publish.groovy b/jjb/pipeline/voltha-publish.groovy
deleted file mode 100644
index 7af9a53..0000000
--- a/jjb/pipeline/voltha-publish.groovy
+++ /dev/null
@@ -1,85 +0,0 @@
-/* voltha-publish pipeline */
-pipeline {
-
- /* no label, executor is determined by JJB */
- agent {
- label "${params.buildNode}"
- }
- stages {
-
- stage('repo') {
- steps {
- checkout(changelog: false, \
- poll: false,
- scm: [$class: 'RepoScm', \
- manifestRepositoryUrl: "${params.manifestUrl}", \
- manifestBranch: "${params.manifestBranch}", \
- manifestGroup: 'voltha', \
- currentBranch: true, \
- destinationDir: 'cord', \
- forceSync: true, \
- resetFirst: true, \
- quiet: true, \
- jobs: 4, \
- showAllChanges: true] \
- )
- }
- }
-
- stage('build'){
- steps {
- sh """
- #!/usr/bin/env bash
-
- pushd cord/incubator/voltha
- if [ "${params.manifestBranch}" == "master" ]
- then
- TAG="latest"
- else
- TAG="${params.manifestBranch}"
- fi
- VOLTHA_BUILD=docker DOCKER_CACHE_ARG=--no-cache TAG=\$TAG make build
- popd
- """
- }
- }
-
- stage('push'){
- steps {
- withDockerRegistry([credentialsId: 'docker-artifact-push-credentials', url: '']) {
- sh """
- #!/usr/bin/env bash
-
- pushd cord/incubator/voltha
- if [ "${params.manifestBranch}" == "master" ]
- then
- TAG="latest"
- else
- TAG="${params.manifestBranch}"
- fi
-
- # Check for SemVer in VERSION (only numbers and dots)
- RELEASETAG=\$(cat voltha/VERSION|tr -d ' '|egrep '^[0-9]+(\\.[0-9]+)*\$'||true)
- if [ "\$RELEASETAG" != "" ]
- then
- VOLTHA_BUILD=docker TAG=\$TAG TARGET_REPOSITORY=voltha/ TARGET_TAG=\$RELEASETAG make push
- else
- VOLTHA_BUILD=docker TAG=\$TAG TARGET_REPOSITORY=voltha/ TARGET_TAG=\$TAG make push
- fi
- popd
- """
- }
- }
- }
- }
-
- post {
- failure {
- emailext (
- subject: "$PROJECT_NAME - Build # $BUILD_NUMBER - $BUILD_STATUS",
- body: "Check console output at $BUILD_URL to view the results.",
- to: "${params.failureEmail}"
- )
- }
- }
-}
diff --git a/jjb/pipeline/voltha-scale-multi-stack.groovy b/jjb/pipeline/voltha-scale-multi-stack.groovy
index 22b1549..8264387 100644
--- a/jjb/pipeline/voltha-scale-multi-stack.groovy
+++ b/jjb/pipeline/voltha-scale-multi-stack.groovy
@@ -101,6 +101,9 @@
cd $WORKSPACE
rm -rf $WORKSPACE/*
+
+ # remove orphaned port-forward from different namespaces
+ ps aux | grep port-forw | grep -v grep | awk '{print \$2}' | xargs --no-run-if-empty kill -9 || true
"""
}
}
@@ -255,12 +258,6 @@
sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 app deactivate org.opencord.olt
fi
- if [ ${withMibTemplate} = true ] ; then
- rm -f BBSM-12345123451234512345-00000000000001-v1.json
- wget https://raw.githubusercontent.com/opencord/voltha-openonu-adapter/master/templates/BBSM-12345123451234512345-00000000000001-v1.json
- cat BBSM-12345123451234512345-00000000000001-v1.json | kubectl -n \$INFRA_NS exec -it \$(kubectl -n \$INFRA_NS get pods -l app=etcd | awk 'NR==2{print \$1}') -- etcdctl put service/voltha/omci_mibs/templates/BBSM/12345123451234512345/00000000000001
- fi
-
if [ ${withPcap} = true ] && [ ${volthaStacks} -eq 1 ] ; then
# Start the tcp-dump in ofagent
export OF_AGENT=\$(kubectl -n \$INFRA_NS get pods -l app=ofagent -o name)
@@ -362,6 +359,14 @@
done
fi
done
+ '''
+ // compressing the logs to save space on Jenkins
+ sh '''
+ cd $LOG_FOLDER
+ tar -czf logs.tar.gz *.log
+ rm *.log
+ '''
+ sh '''
if [ ${withPcap} = true ] && [ ${volthaStacks} -eq 1 ]; then
# stop ofAgent tcpdump
@@ -546,7 +551,7 @@
python tests/scale/sizing.py -o $WORKSPACE/plots || true
fi
'''
- archiveArtifacts artifacts: 'kind-voltha/install-*.log,execution-time-*.txt,logs/**/*,RobotLogs/**/*,plots/*,etcd-metrics/*'
+ archiveArtifacts artifacts: 'kind-voltha/install-*.log,execution-time-*.txt,logs/**/*.txt,logs/**/*.tar.gz,RobotLogs/**/*,plots/*,etcd-metrics/*'
}
}
}
diff --git a/jjb/pipeline/voltha-scale-test-2.6.groovy b/jjb/pipeline/voltha-scale-test-2.6.groovy
deleted file mode 100644
index db5107e..0000000
--- a/jjb/pipeline/voltha-scale-test-2.6.groovy
+++ /dev/null
@@ -1,679 +0,0 @@
-// Copyright 2019-present Open Networking Foundation
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// deploy VOLTHA using kind-voltha and performs a scale test
-
-pipeline {
-
- /* no label, executor is determined by JJB */
- agent {
- label "${params.buildNode}"
- }
- options {
- timeout(time: 60, unit: 'MINUTES')
- }
- environment {
- JENKINS_NODE_COOKIE="dontKillMe" // do not kill processes after the build is done
- KUBECONFIG="$HOME/.kube/config"
- VOLTCONFIG="$HOME/.volt/config"
- SSHPASS="karaf"
- PATH="$PATH:$WORKSPACE/kind-voltha/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
- SCHEDULE_ON_CONTROL_NODES="yes"
- FANCY=0
- WITH_SIM_ADAPTERS="no"
- WITH_RADIUS="${withRadius}"
- WITH_BBSIM="yes"
- LEGACY_BBSIM_INDEX="no"
- DEPLOY_K8S="no"
- CONFIG_SADIS="external"
- WITH_KAFKA="kafka.default.svc.cluster.local"
- WITH_ETCD="etcd.default.svc.cluster.local"
- VOLTHA_ETCD_PORT=9999
-
- // install everything in the default namespace
- VOLTHA_NS="default"
- ADAPTER_NS="default"
- INFRA_NS="default"
- BBSIM_NS="default"
-
- // configurable options
- WITH_EAPOL="${withEapol}"
- WITH_DHCP="${withDhcp}"
- WITH_IGMP="${withIgmp}"
- VOLTHA_LOG_LEVEL="${logLevel}"
- NUM_OF_BBSIM="${olts}"
- NUM_OF_OPENONU="${openonuAdapterReplicas}"
- NUM_OF_ONOS="${onosReplicas}"
- NUM_OF_ATOMIX="${atomixReplicas}"
- WITH_PPROF="${withProfiling}"
- EXTRA_HELM_FLAGS="${extraHelmFlags} " // note that the trailing space is required to separate the parameters from appends done later
- VOLTHA_CHART="${volthaChart}"
- VOLTHA_BBSIM_CHART="${bbsimChart}"
- VOLTHA_ADAPTER_OPEN_OLT_CHART="${openoltAdapterChart}"
- VOLTHA_ADAPTER_OPEN_ONU_CHART="${openonuAdapterChart}"
- ONOS_CLASSIC_CHART="${onosChart}"
- RADIUS_CHART="${radiusChart}"
-
- APPS_TO_LOG="etcd kafka onos-onos-classic adapter-open-onu adapter-open-olt rw-core ofagent bbsim radius bbsim-sadis-server"
- LOG_FOLDER="$WORKSPACE/logs"
-
- GERRIT_PROJECT="${GERRIT_PROJECT}"
- }
-
- stages {
- stage ('Cleanup') {
- steps {
- timeout(time: 11, unit: 'MINUTES') {
- sh returnStdout: false, script: """
- helm repo add stable https://charts.helm.sh/stable
- helm repo add onf https://charts.opencord.org
- helm repo add cord https://charts.opencord.org
- helm repo add onos https://charts.onosproject.org
- helm repo add atomix https://charts.atomix.io
- helm repo add bbsim-sadis https://ciena.github.io/bbsim-sadis-server/charts
- helm repo update
-
- # removing ETCD port forward
- P_ID="\$(ps e -ww -A | grep "_TAG=etcd-port-forward" | grep -v grep | awk '{print \$1}')"
- if [ -n "\$P_ID" ]; then
- kill -9 \$P_ID
- fi
-
- NAMESPACES="voltha1 voltha2 infra default"
- for NS in \$NAMESPACES
- do
- for hchart in \$(helm list -n \$NS -q | grep -E -v 'docker-registry|kafkacat');
- do
- echo "Purging chart: \${hchart}"
- helm delete -n \$NS "\${hchart}"
- done
- done
-
- test -e $WORKSPACE/kind-voltha/voltha && cd $WORKSPACE/kind-voltha && ./voltha down
-
- # remove orphaned port-forward from different namespaces
- ps aux | grep port-forw | grep -v grep | awk '{print \$2}' | xargs --no-run-if-empty kill -9
-
- cd $WORKSPACE
- rm -rf $WORKSPACE/*
- """
- }
- }
- }
- stage('Clone kind-voltha') {
- steps {
- checkout([
- $class: 'GitSCM',
- userRemoteConfigs: [[
- url: "https://gerrit.opencord.org/kind-voltha",
- ]],
- branches: [[ name: "master", ]],
- extensions: [
- [$class: 'WipeWorkspace'],
- [$class: 'RelativeTargetDirectory', relativeTargetDir: "kind-voltha"],
- [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
- ],
- ])
- }
- }
- stage('Clone voltha-system-tests') {
- steps {
- checkout([
- $class: 'GitSCM',
- userRemoteConfigs: [[
- url: "https://gerrit.opencord.org/voltha-system-tests",
- refspec: "${volthaSystemTestsChange}"
- ]],
- branches: [[ name: "${release}", ]],
- extensions: [
- [$class: 'WipeWorkspace'],
- [$class: 'RelativeTargetDirectory', relativeTargetDir: "voltha-system-tests"],
- [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
- ],
- ])
- script {
- sh(script:"""
- if [ '${volthaSystemTestsChange}' != '' ] ; then
- cd $WORKSPACE/voltha-system-tests;
- git fetch https://gerrit.opencord.org/voltha-system-tests ${volthaSystemTestsChange} && git checkout FETCH_HEAD
- fi
- """)
- }
- }
- }
- stage('Build patch') {
- when {
- expression {
- return params.GERRIT_PROJECT
- }
- }
- steps {
- sh """
- git clone https://\$GERRIT_HOST/\$GERRIT_PROJECT
- cd \$GERRIT_PROJECT
- git fetch https://\$GERRIT_HOST/\$GERRIT_PROJECT \$GERRIT_REFSPEC && git checkout FETCH_HEAD
-
- DOCKER_REGISTRY=${dockerRegistry}/ DOCKER_REPOSITORY=voltha/ DOCKER_TAG=voltha-scale make docker-build
- DOCKER_REGISTRY=${dockerRegistry}/ DOCKER_REPOSITORY=voltha/ DOCKER_TAG=voltha-scale make docker-push
- """
- }
- }
- stage('Deploy common infrastructure') {
- // includes monitoring, kafka, etcd
- steps {
- sh '''
- helm install kafka $HOME/teone/helm-charts/kafka --set replicaCount=${kafkaReplicas} --set persistence.enabled=false \
- --set zookeeper.replicaCount=${kafkaReplicas} --set zookeeper.persistence.enabled=false \
- --set prometheus.kafka.enabled=true,prometheus.operator.enabled=true,prometheus.jmx.enabled=true,prometheus.operator.serviceMonitor.namespace=default
-
- # the ETCD chart use "auth" for resons different than BBsim, so strip that away
- ETCD_FLAGS=$(echo ${extraHelmFlags} | sed -e 's/--set auth=false / /g') | sed -e 's/--set auth=true / /g'
- ETCD_FLAGS+=" --set auth.rbac.enabled=false,persistence.enabled=false,statefulset.replicaCount=${etcdReplicas}"
- ETCD_FLAGS+=" --set memoryMode=${inMemoryEtcdStorage} "
- helm install -f $WORKSPACE/kind-voltha/values.yaml --set replicas=${etcdReplicas} etcd $HOME/teone/helm-charts/etcd $ETCD_FLAGS
-
- if [ ${withMonitoring} = true ] ; then
- helm install nem-monitoring cord/nem-monitoring \
- -f $HOME/voltha-scale/grafana.yaml \
- --set prometheus.alertmanager.enabled=false,prometheus.pushgateway.enabled=false \
- --set kpi_exporter.enabled=false,dashboards.xos=false,dashboards.onos=false,dashboards.aaa=false,dashboards.voltha=false
- fi
- '''
- }
- }
- stage('Deploy Voltha') {
- steps {
- script {
- sh returnStdout: false, script: """
-
- cd $WORKSPACE/kind-voltha/
-
- export EXTRA_HELM_FLAGS+=' '
-
- # Load the release defaults
- if [ '${release.trim()}' != 'master' ]; then
- source $WORKSPACE/kind-voltha/releases/${release}
- EXTRA_HELM_FLAGS+=" ${extraHelmFlags} "
- fi
-
- # BBSim custom image handling
- if [ '${bbsimImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'bbsim' ]; then
- IFS=: read -r bbsimRepo bbsimTag <<< '${bbsimImg.trim()}'
- EXTRA_HELM_FLAGS+="--set images.bbsim.repository=\$bbsimRepo,images.bbsim.tag=\$bbsimTag "
- fi
-
- # VOLTHA and ofAgent custom image handling
- # NOTE to override the rw-core image in a released version you must set the ofAgent image too
- # TODO split ofAgent and voltha-go
- if [ '${rwCoreImg.trim()}' != '' ] && [ '${ofAgentImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'voltha-go' ]; then
- IFS=: read -r rwCoreRepo rwCoreTag <<< '${rwCoreImg.trim()}'
- IFS=: read -r ofAgentRepo ofAgentTag <<< '${ofAgentImg.trim()}'
- EXTRA_HELM_FLAGS+="--set images.rw_core.repository=\$rwCoreRepo,images.rw_core.tag=\$rwCoreTag,images.ofagent.repository=\$ofAgentRepo,images.ofagent.tag=\$ofAgentTag "
- fi
-
- # OpenOLT custom image handling
- if [ '${openoltAdapterImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'voltha-openolt-adapter' ]; then
- IFS=: read -r openoltAdapterRepo openoltAdapterTag <<< '${openoltAdapterImg.trim()}'
- EXTRA_HELM_FLAGS+="--set images.adapter_open_olt.repository=\$openoltAdapterRepo,images.adapter_open_olt.tag=\$openoltAdapterTag "
- fi
-
- # OpenONU custom image handling
- if [ '${openonuAdapterImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'voltha-openonu-adapter' ]; then
- IFS=: read -r openonuAdapterRepo openonuAdapterTag <<< '${openonuAdapterImg.trim()}'
- EXTRA_HELM_FLAGS+="--set images.adapter_open_onu.repository=\$openonuAdapterRepo,images.adapter_open_onu.tag=\$openonuAdapterTag "
- fi
-
- # OpenONU GO custom image handling
- if [ '${openonuAdapterGoImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'voltha-openonu-adapter-go' ]; then
- IFS=: read -r openonuAdapterGoRepo openonuAdapterGoTag <<< '${openonuAdapterGoImg.trim()}'
- EXTRA_HELM_FLAGS+="--set images.adapter_open_onu_go.repository=\$openonuAdapterGoRepo,images.adapter_open_onu_go.tag=\$openonuAdapterGoTag "
- fi
-
- # ONOS custom image handling
- if [ '${onosImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'voltha-onos' ]; then
- IFS=: read -r onosRepo onosTag <<< '${onosImg.trim()}'
- EXTRA_HELM_FLAGS+="--set images.onos.repository=\$onosRepo,images.onos.tag=\$onosTag "
- fi
-
- # set BBSim parameters
- EXTRA_HELM_FLAGS+='--set enablePerf=true,pon=${pons},onu=${onus} '
-
- # disable the securityContext, this is a development cluster
- EXTRA_HELM_FLAGS+='--set securityContext.enabled=false '
-
- # No persistent-volume-claims in Atomix
- EXTRA_HELM_FLAGS+="--set atomix.persistence.enabled=false "
-
- echo "Installing with the following extra arguments:"
- echo $EXTRA_HELM_FLAGS
-
- # if it's newer than voltha-2.4 set the correct BBSIM_CFG
- if [ '${release.trim()}' != 'voltha-2.4' ]; then
- export BBSIM_CFG="$WORKSPACE/kind-voltha/configs/bbsim-sadis-${workflow}.yaml"
- fi
-
- # Use custom built images
-
- if [ '\$GERRIT_PROJECT' == 'voltha-go' ]; then
- EXTRA_HELM_FLAGS+="--set images.rw_core.repository=${dockerRegistry}/voltha/voltha-rw-core,images.rw_core.tag=voltha-scale "
- fi
-
- if [ '\$GERRIT_PROJECT' == 'voltha-openolt-adapter' ]; then
- EXTRA_HELM_FLAGS+="--set images.adapter_open_olt.repository=${dockerRegistry}/voltha/voltha-openolt-adapter,images.adapter_open_olt.tag=voltha-scale "
- fi
-
- if [ '\$GERRIT_PROJECT' == 'voltha-openonu-adapter' ]; then
- EXTRA_HELM_FLAGS+="--set images.adapter_open_onu.repository=${dockerRegistry}/voltha/voltha-openonu-adapter,images.adapter_open_onu.tag=voltha-scale "
- fi
-
- if [ '\$GERRIT_PROJECT' == 'voltha-openonu-adapter-go' ]; then
- EXTRA_HELM_FLAGS+="--set images.adapter_open_onu_go.repository=${dockerRegistry}/voltha/voltha-openonu-adapter-go,images.adapter_open_onu_go.tag=voltha-scale "
- fi
-
- if [ '\$GERRIT_PROJECT' == 'ofagent-go' ]; then
- EXTRA_HELM_FLAGS+="--set images.ofagent.repository=${dockerRegistry}/voltha/voltha-ofagent-go,images.ofagent.tag=voltha-scale "
- fi
-
- if [ '\$GERRIT_PROJECT' == 'voltha-onos' ]; then
- EXTRA_HELM_FLAGS+="--set images.onos.repository=${dockerRegistry}/voltha/voltha-onos,images.onos.tag=voltha-scale "
- fi
-
- if [ '\$GERRIT_PROJECT' == 'bbsim' ]; then
- EXTRA_HELM_FLAGS+="--set images.bbsim.repository=${dockerRegistry}/voltha/bbsim,images.bbsim.tag=voltha-scale "
- fi
-
- ./voltha up
-
- # Forward the ETCD port onto $VOLTHA_ETCD_PORT
- _TAG=etcd-port-forward kubectl port-forward --address 0.0.0.0 -n default service/etcd $VOLTHA_ETCD_PORT:2379&
- """
- }
- sh returnStdout: false, script: '''
- # start logging with kail
-
- mkdir -p $LOG_FOLDER
-
- list=($APPS_TO_LOG)
- for app in "${list[@]}"
- do
- echo "Starting logs for: ${app}"
- _TAG=kail-$app kail -l app=$app --since 1h > $LOG_FOLDER/$app.log&
- done
- '''
- }
- }
- stage('Configuration') {
- steps {
- script {
- sh returnStdout: false, script: """
- #Setting link discovery
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.provider.lldp.impl.LldpLinkProvider enabled ${withLLDP}
-
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.net.flow.impl.FlowRuleManager allowExtraneousRules true
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.net.flow.impl.FlowRuleManager importExtraneousRules true
-
-
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.net.flowobjective.impl.InOrderFlowObjectiveManager accumulatorMaxBatchMillis 1000
-
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.net.flowobjective.impl.InOrderFlowObjectiveManager accumulatorMaxIdleMillis 500
-
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg log:set ${logLevel} org.onosproject
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg log:set ${logLevel} org.opencord
-
-
- kubectl exec \$(kubectl get pods | grep -E "bbsim[0-9]" | awk 'NR==1{print \$1}') -- bbsimctl log ${logLevel.toLowerCase()} false
-
- # Set Flows/Ports/Meters poll frequency
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.provider.of.flow.impl.OpenFlowRuleProvider flowPollFrequency ${onosStatInterval}
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.provider.of.device.impl.OpenFlowDeviceProvider portStatsPollFrequency ${onosStatInterval}
-
- if [ ${withFlows} = false ]; then
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 app deactivate org.opencord.olt
- fi
-
- if [ ${withMibTemplate} = true ] ; then
- rm -f BBSM-12345123451234512345-00000000000001-v1.json
- wget https://raw.githubusercontent.com/opencord/voltha-openonu-adapter/master/templates/BBSM-12345123451234512345-00000000000001-v1.json
- cat BBSM-12345123451234512345-00000000000001-v1.json | kubectl exec -it \$(kubectl get pods |grep etcd | awk 'NR==1{print \$1}') -- etcdctl put service/voltha/omci_mibs/templates/BBSM/12345123451234512345/00000000000001
- fi
-
- if [ ${withPcap} = true ] ; then
- # Start the tcp-dump in ofagent
- export OF_AGENT=\$(kubectl get pods -l app=ofagent -o name)
- kubectl exec \$OF_AGENT -- apk update
- kubectl exec \$OF_AGENT -- apk add tcpdump
- kubectl exec \$OF_AGENT -- mv /usr/sbin/tcpdump /usr/bin/tcpdump
- _TAG=ofagent-tcpdump kubectl exec \$OF_AGENT -- tcpdump -nei eth0 -w out.pcap&
-
- # Start the tcp-dump in radius
- export RADIUS=\$(kubectl get pods -l app=radius -o name)
- kubectl exec \$RADIUS -- apt-get update
- kubectl exec \$RADIUS -- apt-get install -y tcpdump
- _TAG=radius-tcpdump kubectl exec \$RADIUS -- tcpdump -w out.pcap&
-
- # Start the tcp-dump in ONOS
- for i in \$(seq 0 \$ONOSES); do
- INSTANCE="onos-onos-classic-\$i"
- kubectl exec \$INSTANCE -- apt-get update
- kubectl exec \$INSTANCE -- apt-get install -y tcpdump
- kubectl exec \$INSTANCE -- mv /usr/sbin/tcpdump /usr/bin/tcpdump
- _TAG=\$INSTANCE kubectl exec \$INSTANCE -- /usr/bin/tcpdump -nei eth0 port 1812 -w out.pcap&
- done
- fi
- """
- }
- }
- }
- stage('Run Test') {
- steps {
- sh '''
- mkdir -p $WORKSPACE/RobotLogs
- cd $WORKSPACE/voltha-system-tests
- make vst_venv
- '''
- sh '''
- if [ ${withProfiling} = true ] ; then
- mkdir -p $LOG_FOLDER/pprof
- echo $PATH
- #Creating Python script for ONU Detection
- cat << EOF > $WORKSPACE/pprof.sh
-timestamp() {
- date +"%T"
-}
-
-i=0
-while [[ true ]]; do
- ((i++))
- ts=$(timestamp)
- go tool pprof -png http://127.0.0.1:6060/debug/pprof/heap > $LOG_FOLDER/pprof/rw-core-heap-\\$i-\\$ts.png
- go tool pprof -png http://127.0.0.1:6060/debug/pprof/goroutine > $LOG_FOLDER/pprof/rw-core-goroutine-\\$i-\\$ts.png
- curl -o $LOG_FOLDER/pprof/rw-core-profile-\\$i-\\$ts.pprof http://127.0.0.1:6060/debug/pprof/profile?seconds=10
- go tool pprof -png $LOG_FOLDER/pprof/rw-core-profile-\\$i-\\$ts.pprof > $LOG_FOLDER/pprof/rw-core-profile-\\$i-\\$ts.png
-
- go tool pprof -png http://127.0.0.1:6061/debug/pprof/heap > $LOG_FOLDER/pprof/openolt-heap-\\$i-\\$ts.png
- go tool pprof -png http://127.0.0.1:6061/debug/pprof/goroutine > $LOG_FOLDER/pprof/openolt-goroutine-\\$i-\\$ts.png
- curl -o $LOG_FOLDER/pprof/openolt-profile-\\$i-\\$ts.pprof http://127.0.0.1:6061/debug/pprof/profile?seconds=10
- go tool pprof -png $LOG_FOLDER/pprof/openolt-profile-\\$i-\\$ts.pprof > $LOG_FOLDER/pprof/openolt-profile-\\$i-\\$ts.png
-
- go tool pprof -png http://127.0.0.1:6062/debug/pprof/heap > $LOG_FOLDER/pprof/ofagent-heap-\\$i-\\$ts.png
- go tool pprof -png http://127.0.0.1:6062/debug/pprof/goroutine > $LOG_FOLDER/pprof/ofagent-goroutine-\\$i-\\$ts.png
- curl -o $LOG_FOLDER/pprof/ofagent-profile-\\$i-\\$ts.pprof http://127.0.0.1:6062/debug/pprof/profile?seconds=10
- go tool pprof -png $LOG_FOLDER/pprof/ofagent-profile-\\$i-\\$ts.pprof > $LOG_FOLDER/pprof/ofagent-profile-\\$i-\\$ts.png
-
- sleep 10
-done
-EOF
-
- _TAG="pprof"
- _TAG=$_TAG bash $WORKSPACE/pprof.sh &
- fi
- '''
- timeout(time: 15, unit: 'MINUTES') {
- sh '''
- ROBOT_PARAMS="-v olt:${olts} \
- -v pon:${pons} \
- -v onu:${onus} \
- -v workflow:${workflow} \
- -v withEapol:${withEapol} \
- -v withDhcp:${withDhcp} \
- -v withIgmp:${withIgmp} \
- --noncritical non-critical \
- -e teardown "
-
- if [ ${withEapol} = false ] ; then
- ROBOT_PARAMS+="-e authentication "
- fi
-
- if [ ${withDhcp} = false ] ; then
- ROBOT_PARAMS+="-e dhcp "
- fi
-
- if [ ${provisionSubscribers} = false ] ; then
- # if we're not considering subscribers then we don't care about authentication and dhcp
- ROBOT_PARAMS+="-e authentication -e provision -e flow-after -e dhcp "
- fi
-
- if [ ${withFlows} = false ] ; then
- ROBOT_PARAMS+="-i setup -i activation "
- fi
-
- cd $WORKSPACE/voltha-system-tests
- source ./vst_venv/bin/activate
- robot -d $WORKSPACE/RobotLogs \
- $ROBOT_PARAMS tests/scale/Voltha_Scale_Tests.robot
- '''
- }
- }
- }
- }
- post {
- always {
- // collect result, done in the "post" step so it's executed even in the
- // event of a timeout in the tests
- sh '''
-
- # stop the kail processes
- list=($APPS_TO_LOG)
- for app in "${list[@]}"
- do
- echo "Stopping logs for: ${app}"
- _TAG="kail-$app"
- P_IDS="$(ps e -ww -A | grep "_TAG=$_TAG" | grep -v grep | awk '{print $1}')"
- if [ -n "$P_IDS" ]; then
- echo $P_IDS
- for P_ID in $P_IDS; do
- kill -9 $P_ID
- done
- fi
- done
-
- if [ ${withPcap} = true ] ; then
- # stop ofAgent tcpdump
- P_ID="\$(ps e -ww -A | grep "_TAG=ofagent-tcpdump" | grep -v grep | awk '{print \$1}')"
- if [ -n "\$P_ID" ]; then
- kill -9 \$P_ID
- fi
-
- # stop radius tcpdump
- P_ID="\$(ps e -ww -A | grep "_TAG=radius-tcpdump" | grep -v grep | awk '{print \$1}')"
- if [ -n "\$P_ID" ]; then
- kill -9 \$P_ID
- fi
-
- # stop onos tcpdump
- LIMIT=$(($NUM_OF_ONOS - 1))
- for i in $(seq 0 $LIMIT); do
- INSTANCE="onos-onos-classic-$i"
- P_ID="\$(ps e -ww -A | grep "_TAG=$INSTANCE" | grep -v grep | awk '{print \$1}')"
- if [ -n "\$P_ID" ]; then
- kill -9 \$P_ID
- fi
- done
-
- # copy the file
- export OF_AGENT=$(kubectl get pods -l app=ofagent | awk 'NR==2{print $1}') || true
- kubectl cp $OF_AGENT:out.pcap $LOG_FOLDER/ofagent.pcap || true
-
- export RADIUS=$(kubectl get pods -l app=radius | awk 'NR==2{print $1}') || true
- kubectl cp $RADIUS:out.pcap $LOG_FOLDER/radius.pcap || true
-
- LIMIT=$(($NUM_OF_ONOS - 1))
- for i in $(seq 0 $LIMIT); do
- INSTANCE="onos-onos-classic-$i"
- kubectl cp $INSTANCE:out.pcap $LOG_FOLDER/$INSTANCE.pcap || true
- done
- fi
-
- cd voltha-system-tests
- source ./vst_venv/bin/activate
- python tests/scale/collect-result.py -r $WORKSPACE/RobotLogs/output.xml -p $WORKSPACE/plots > $WORKSPACE/execution-time.txt || true
- cat $WORKSPACE/execution-time.txt
- '''
- sh '''
- if [ ${withProfiling} = true ] ; then
- _TAG="pprof"
- P_IDS="$(ps e -ww -A | grep "_TAG=$_TAG" | grep -v grep | awk '{print $1}')"
- if [ -n "$P_IDS" ]; then
- echo $P_IDS
- for P_ID in $P_IDS; do
- kill -9 $P_ID
- done
- fi
- fi
- '''
- plot([
- csvFileName: 'scale-test.csv',
- csvSeries: [
- [file: 'plots/plot-voltha-onus.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
- [file: 'plots/plot-onos-ports.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
- [file: 'plots/plot-voltha-flows-before.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
- [file: 'plots/plot-voltha-openolt-flows-before.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
- [file: 'plots/plot-onos-flows-before.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
- [file: 'plots/plot-onos-auth.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
- [file: 'plots/plot-voltha-flows-after.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
- [file: 'plots/plot-voltha-openolt-flows-after.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
- [file: 'plots/plot-onos-flows-after.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
- [file: 'plots/plot-onos-dhcp.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
- ],
- group: 'Voltha-Scale-Numbers', numBuilds: '20', style: 'line', title: "Scale Test (OLTs: ${olts}, PONs: ${pons}, ONUs: ${onus})", yaxis: 'Time (s)', useDescr: true
- ])
- step([$class: 'RobotPublisher',
- disableArchiveOutput: false,
- logFileName: 'RobotLogs/log.html',
- otherFiles: '',
- outputFileName: 'RobotLogs/output.xml',
- outputPath: '.',
- passThreshold: 100,
- reportFileName: 'RobotLogs/report.html',
- unstableThreshold: 0]);
- // get all the logs from kubernetes PODs
- sh returnStdout: false, script: '''
-
- # store information on running charts
- helm ls > $LOG_FOLDER/helm-list.txt || true
-
- # store information on the running pods
- kubectl get pods -o wide > $LOG_FOLDER/pods.txt || true
- kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\n'}" | sort | uniq | tee $LOG_FOLDER/pod-images.txt || true
- kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.imageID}{'\\n'}" | sort | uniq | tee $LOG_FOLDER/pod-imagesId.txt || true
-
- # copy the ONOS logs directly from the container to avoid the color codes
- printf '%s\n' $(kubectl get pods -l app=onos-onos-classic -o=jsonpath="{.items[*]['metadata.name']}") | xargs --no-run-if-empty -I# bash -c "kubectl cp #:${karafHome}/data/log/karaf.log $LOG_FOLDER/#.log" || true
-
- # get radius logs out of the container
- kubectl cp $(kubectl get pods -l app=radius --no-headers | awk '{print $1}'):/var/log/freeradius/radius.log $LOG_FOLDER//radius.log || true
- '''
- // dump all the BBSim(s) ONU information
- sh '''
- BBSIM_IDS=$(kubectl get pods | grep bbsim | grep -v server | awk '{print $1}')
- IDS=($BBSIM_IDS)
-
- for bbsim in "${IDS[@]}"
- do
- kubectl exec -t $bbsim -- bbsimctl onu list > $LOG_FOLDER/$bbsim-device-list.txt || true
- kubectl exec -t $bbsim -- bbsimctl service list > $LOG_FOLDER/$bbsim-service-list.txt || true
- done
- '''
- // get DHCP server stats
- sh '''
- BBSIM_IDS=$(kubectl get pods | grep bbsim | grep -v server | awk '{print $1}')
- IDS=($BBSIM_IDS)
-
- for bbsim in "${IDS[@]}"
- do
- kubectl exec -t $bbsim -- dhcpd -lf /var/lib/dhcp/dhcpd.leases -play /tmp/dhcplog 2>&1 | tee $LOG_FOLDER/$bbsim-dhcp-replay.txt || true
- kubectl cp $bbsim:/tmp/dhcplog $LOG_FOLDER/$bbsim-dhcp-logs || true
- kubectl cp $bbsim:/var/lib/dhcp/dhcpd.leases $LOG_FOLDER/$bbsim-dhcp-leases || true
- done
- '''
- // get ONOS debug infos
- sh '''
-
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 apps -a -s > $LOG_FOLDER/onos-apps.txt || true
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 nodes > $LOG_FOLDER/onos-nodes.txt || true
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 masters > $LOG_FOLDER/onos-masters.txt || true
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 roles > $LOG_FOLDER/onos-roles.txt || true
-
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 ports > $LOG_FOLDER/onos-ports-list.txt || true
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 hosts > $LOG_FOLDER/onos-hosts-list.txt || true
-
- if [ ${withFlows} = true ] ; then
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 volt-olts > $LOG_FOLDER/onos-olt-list.txt || true
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 flows -s > $LOG_FOLDER/onos-flows-list.txt || true
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 meters > $LOG_FOLDER/onos-meters-list.txt || true
- fi
-
- if [ ${provisionSubscribers} = true ]; then
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 volt-programmed-subscribers > $LOG_FOLDER/onos-programmed-subscribers.txt || true
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 volt-programmed-meters > $LOG_FOLDER/onos-programmed-meters.txt || true
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 volt-bpmeter-mappings > $LOG_FOLDER/onos-bpmeter-mappings.txt || true
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 volt-failed-subscribers > $LOG_FOLDER/onos-failed-subscribers.txt || true
- fi
-
- if [ ${withEapol} = true ] ; then
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 aaa-users > $LOG_FOLDER/onos-aaa-users.txt || true
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 aaa-statistics > $LOG_FOLDER/onos-aaa-statistics.txt || true
- fi
-
- if [ ${withDhcp} = true ] ; then
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 dhcpl2relay-allocations > $LOG_FOLDER/onos-dhcp-allocations.txt || true
- fi
- '''
- // collect etcd metrics
- sh '''
- mkdir -p $WORKSPACE/etcd-metrics
- curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_debugging_mvcc_keys_total' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-key-count.json || true
- curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=grpc_server_handled_total{grpc_service="etcdserverpb.KV"}' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-rpc-count.json || true
- curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_debugging_mvcc_db_total_size_in_bytes' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-db-size.json || true
- curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_disk_backend_commit_duration_seconds_sum' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-backend-write-time.json || true
- curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_disk_backend_commit_duration_seconds_sum' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-backend-write-time-sum.json || true
- curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_disk_backend_commit_duration_seconds_bucket' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-backend-write-time-bucket.json || true
- curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_disk_wal_fsync_duration_seconds_bucket' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-wal-fsync-time-bucket.json || true
- curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_network_peer_round_trip_time_seconds_bucket' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-network-peer-round-trip-time-seconds.json || true
-
- '''
- // get VOLTHA debug infos
- script {
- try {
- sh '''
- voltctl -m 8MB device list -o json > $LOG_FOLDER/device-list.json || true
- python -m json.tool $LOG_FOLDER/device-list.json > $LOG_FOLDER/voltha-devices-list.json || true
- rm $LOG_FOLDER/device-list.json || true
- voltctl -m 8MB device list > $LOG_FOLDER/voltha-devices-list.txt || true
-
- printf '%s\n' $(voltctl -m 8MB device list | grep olt | awk '{print $1}') | xargs --no-run-if-empty -I# bash -c "voltctl -m 8MB device flows # > $LOG_FOLDER/voltha-device-flows-#.txt" || true
- printf '%s\n' $(voltctl -m 8MB device list | grep olt | awk '{print $1}') | xargs --no-run-if-empty -I# bash -c "voltctl -m 8MB device port list --format 'table{{.PortNo}}\t{{.Label}}\t{{.Type}}\t{{.AdminState}}\t{{.OperStatus}}' # > $LOG_FOLDER/voltha-device-ports-#.txt" || true
-
- printf '%s\n' $(voltctl -m 8MB logicaldevice list -q) | xargs --no-run-if-empty -I# bash -c "voltctl -m 8MB logicaldevice flows # > $LOG_FOLDER/voltha-logicaldevice-flows-#.txt" || true
- printf '%s\n' $(voltctl -m 8MB logicaldevice list -q) | xargs --no-run-if-empty -I# bash -c "voltctl -m 8MB logicaldevice port list # > $LOG_FOLDER/voltha-logicaldevice-ports-#.txt" || true
- '''
- } catch(e) {
- sh '''
- echo "Can't get device list from voltclt"
- '''
- }
- }
- // get cpu usage by container
- sh '''
- if [ ${withMonitoring} = true ] ; then
- cd $WORKSPACE/voltha-system-tests
- source ./vst_venv/bin/activate
- sleep 60 # we have to wait for prometheus to collect all the information
- python tests/scale/sizing.py -o $WORKSPACE/plots || true
- fi
- '''
- archiveArtifacts artifacts: 'kind-voltha/install-minimal.log,execution-time.txt,logs/*,logs/pprof/*,RobotLogs/*,plots/*,etcd-metrics/*'
- }
- }
-}
diff --git a/jjb/pipeline/voltha-scale-test-etcd-kafka-bitnami.groovy b/jjb/pipeline/voltha-scale-test-etcd-kafka-bitnami.groovy
deleted file mode 100644
index 5327012..0000000
--- a/jjb/pipeline/voltha-scale-test-etcd-kafka-bitnami.groovy
+++ /dev/null
@@ -1,689 +0,0 @@
-// Copyright 2019-present Open Networking Foundation
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// deploy VOLTHA using kind-voltha and performs a scale test
-
-pipeline {
-
- /* no label, executor is determined by JJB */
- agent {
- label "${params.buildNode}"
- }
- options {
- timeout(time: 60, unit: 'MINUTES')
- }
- environment {
- JENKINS_NODE_COOKIE="dontKillMe" // do not kill processes after the build is done
- KUBECONFIG="$HOME/.kube/config"
- VOLTCONFIG="$HOME/.volt/config"
- SSHPASS="karaf"
- PATH="$PATH:$WORKSPACE/kind-voltha/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
- SCHEDULE_ON_CONTROL_NODES="yes"
- FANCY=0
- WITH_SIM_ADAPTERS="no"
- WITH_RADIUS="${withRadius}"
- WITH_BBSIM="yes"
- LEGACY_BBSIM_INDEX="no"
- DEPLOY_K8S="no"
- CONFIG_SADIS="external"
- WITH_KAFKA="kafka.default.svc.cluster.local"
- WITH_ETCD="etcd.default.svc.cluster.local"
- VOLTHA_ETCD_PORT=9999
-
- // install everything in the default namespace
- VOLTHA_NS="default"
- ADAPTER_NS="default"
- INFRA_NS="default"
- BBSIM_NS="default"
-
- // configurable options
- WITH_EAPOL="${withEapol}"
- WITH_DHCP="${withDhcp}"
- WITH_IGMP="${withIgmp}"
- VOLTHA_LOG_LEVEL="${logLevel}"
- NUM_OF_BBSIM="${olts}"
- NUM_OF_OPENONU="${openonuAdapterReplicas}"
- NUM_OF_ONOS="${onosReplicas}"
- NUM_OF_ATOMIX="${atomixReplicas}"
- WITH_PPROF="${withProfiling}"
- EXTRA_HELM_FLAGS="${extraHelmFlags} " // note that the trailing space is required to separate the parameters from appends done later
- VOLTHA_CHART="${volthaChart}"
- VOLTHA_BBSIM_CHART="${bbsimChart}"
- VOLTHA_ADAPTER_OPEN_OLT_CHART="${openoltAdapterChart}"
- VOLTHA_ADAPTER_OPEN_ONU_CHART="${openonuAdapterChart}"
- ONOS_CLASSIC_CHART="${onosChart}"
- RADIUS_CHART="${radiusChart}"
-
- APPS_TO_LOG="etcd kafka onos-onos-classic adapter-open-onu adapter-open-olt rw-core ofagent bbsim radius bbsim-sadis-server"
- LOG_FOLDER="$WORKSPACE/logs"
-
- GERRIT_PROJECT="${GERRIT_PROJECT}"
- }
-
- stages {
- stage ('Cleanup') {
- steps {
- timeout(time: 11, unit: 'MINUTES') {
- sh returnStdout: false, script: """
- helm repo add stable https://charts.helm.sh/stable
- helm repo add onf https://charts.opencord.org
- helm repo add cord https://charts.opencord.org
- helm repo add onos https://charts.onosproject.org
- helm repo add atomix https://charts.atomix.io
- helm repo add bbsim-sadis https://ciena.github.io/bbsim-sadis-server/charts
- helm repo update
-
- # removing ETCD port forward
- P_ID="\$(ps e -ww -A | grep "_TAG=etcd-port-forward" | grep -v grep | awk '{print \$1}')"
- if [ -n "\$P_ID" ]; then
- kill -9 \$P_ID
- fi
-
- NAMESPACES="voltha1 voltha2 infra default"
- for NS in \$NAMESPACES
- do
- for hchart in \$(helm list -n \$NS -q | grep -E -v 'docker-registry|kafkacat');
- do
- echo "Purging chart: \${hchart}"
- helm delete -n \$NS "\${hchart}"
- done
- done
-
- test -e $WORKSPACE/kind-voltha/voltha && cd $WORKSPACE/kind-voltha && ./voltha down
-
- # remove orphaned port-forward from different namespaces
- ps aux | grep port-forw | grep -v grep | awk '{print \$2}' | xargs --no-run-if-empty kill -9
-
- cd $WORKSPACE
- rm -rf $WORKSPACE/*
- """
- }
- }
- }
- stage('Clone kind-voltha') {
- steps {
- checkout([
- $class: 'GitSCM',
- userRemoteConfigs: [[
- url: "https://gerrit.opencord.org/kind-voltha",
- refspec: "${kindVolthaChange}"
- ]],
- branches: [[ name: "master", ]],
- extensions: [
- [$class: 'WipeWorkspace'],
- [$class: 'RelativeTargetDirectory', relativeTargetDir: "kind-voltha"],
- [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
- ],
- ])
- script {
- sh(script:"""
- if [ '${kindVolthaChange}' != '' ] ; then
- cd $WORKSPACE/kind-voltha;
- git fetch https://gerrit.opencord.org/kind-voltha ${kindVolthaChange} && git checkout FETCH_HEAD
- fi
- """)
- }
- }
- }
- stage('Clone voltha-system-tests') {
- steps {
- checkout([
- $class: 'GitSCM',
- userRemoteConfigs: [[
- url: "https://gerrit.opencord.org/voltha-system-tests",
- refspec: "${volthaSystemTestsChange}"
- ]],
- branches: [[ name: "${release}", ]],
- extensions: [
- [$class: 'WipeWorkspace'],
- [$class: 'RelativeTargetDirectory', relativeTargetDir: "voltha-system-tests"],
- [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
- ],
- ])
- script {
- sh(script:"""
- if [ '${volthaSystemTestsChange}' != '' ] ; then
- cd $WORKSPACE/voltha-system-tests;
- git fetch https://gerrit.opencord.org/voltha-system-tests ${volthaSystemTestsChange} && git checkout FETCH_HEAD
- fi
- """)
- }
- }
- }
- stage('Build patch') {
- when {
- expression {
- return params.GERRIT_PROJECT
- }
- }
- steps {
- sh """
- git clone https://\$GERRIT_HOST/\$GERRIT_PROJECT
- cd \$GERRIT_PROJECT
- git fetch https://\$GERRIT_HOST/\$GERRIT_PROJECT \$GERRIT_REFSPEC && git checkout FETCH_HEAD
-
- DOCKER_REGISTRY=${dockerRegistry}/ DOCKER_REPOSITORY=voltha/ DOCKER_TAG=voltha-scale make docker-build
- DOCKER_REGISTRY=${dockerRegistry}/ DOCKER_REPOSITORY=voltha/ DOCKER_TAG=voltha-scale make docker-push
- """
- }
- }
- stage('Deploy common infrastructure') {
- // includes monitoring, kafka, etcd
- steps {
- sh '''
- helm repo add bitnami https://charts.bitnami.com/bitnami
- helm install kafka bitnami/kafka --set replicaCount=${kafkaReplicas} --set persistence.enabled=false \
- --set zookeeper.replicaCount=${kafkaReplicas} --set zookeeper.persistence.enabled=false \
- --set prometheus.kafka.enabled=true,prometheus.operator.enabled=true,prometheus.jmx.enabled=true,prometheus.operator.serviceMonitor.namespace=default
-
- # the ETCD chart use "auth" for resons different than BBsim, so strip that away
- helm repo add bitnami https://charts.bitnami.com/bitnami
- ETCD_FLAGS=$(echo ${extraHelmFlags} | sed -e 's/--set auth=false / /g') | sed -e 's/--set auth=true / /g'
- ETCD_FLAGS+=" --set auth.rbac.enabled=false,persistence.enabled=false,statefulset.replicaCount=${etcdReplicas}"
- ETCD_FLAGS+=" --set memoryMode=${inMemoryEtcdStorage} "
- helm install -f $WORKSPACE/kind-voltha/values.yaml --set replicas=${etcdReplicas} etcd bitnami/etcd $ETCD_FLAGS
-
- if [ ${withMonitoring} = true ] ; then
- helm install nem-monitoring cord/nem-monitoring \
- -f $HOME/voltha-scale/grafana.yaml \
- --set prometheus.alertmanager.enabled=false,prometheus.pushgateway.enabled=false \
- --set kpi_exporter.enabled=false,dashboards.xos=false,dashboards.onos=false,dashboards.aaa=false,dashboards.voltha=false
- fi
- '''
- }
- }
- stage('Deploy Voltha') {
- steps {
- script {
- sh returnStdout: false, script: """
-
- cd $WORKSPACE/kind-voltha/
-
- export EXTRA_HELM_FLAGS+=' '
-
- # Load the release defaults
- if [ '${release.trim()}' != 'master' ]; then
- source $WORKSPACE/kind-voltha/releases/${release}
- EXTRA_HELM_FLAGS+=" ${extraHelmFlags} "
- fi
-
- # BBSim custom image handling
- if [ '${bbsimImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'bbsim' ]; then
- IFS=: read -r bbsimRepo bbsimTag <<< '${bbsimImg.trim()}'
- EXTRA_HELM_FLAGS+="--set images.bbsim.repository=\$bbsimRepo,images.bbsim.tag=\$bbsimTag "
- fi
-
- # VOLTHA and ofAgent custom image handling
- # NOTE to override the rw-core image in a released version you must set the ofAgent image too
- # TODO split ofAgent and voltha-go
- if [ '${rwCoreImg.trim()}' != '' ] && [ '${ofAgentImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'voltha-go' ]; then
- IFS=: read -r rwCoreRepo rwCoreTag <<< '${rwCoreImg.trim()}'
- IFS=: read -r ofAgentRepo ofAgentTag <<< '${ofAgentImg.trim()}'
- EXTRA_HELM_FLAGS+="--set images.rw_core.repository=\$rwCoreRepo,images.rw_core.tag=\$rwCoreTag,images.ofagent.repository=\$ofAgentRepo,images.ofagent.tag=\$ofAgentTag "
- fi
-
- # OpenOLT custom image handling
- if [ '${openoltAdapterImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'voltha-openolt-adapter' ]; then
- IFS=: read -r openoltAdapterRepo openoltAdapterTag <<< '${openoltAdapterImg.trim()}'
- EXTRA_HELM_FLAGS+="--set images.adapter_open_olt.repository=\$openoltAdapterRepo,images.adapter_open_olt.tag=\$openoltAdapterTag "
- fi
-
- # OpenONU custom image handling
- if [ '${openonuAdapterImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'voltha-openonu-adapter' ]; then
- IFS=: read -r openonuAdapterRepo openonuAdapterTag <<< '${openonuAdapterImg.trim()}'
- EXTRA_HELM_FLAGS+="--set images.adapter_open_onu.repository=\$openonuAdapterRepo,images.adapter_open_onu.tag=\$openonuAdapterTag "
- fi
-
- # OpenONU GO custom image handling
- if [ '${openonuAdapterGoImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'voltha-openonu-adapter-go' ]; then
- IFS=: read -r openonuAdapterGoRepo openonuAdapterGoTag <<< '${openonuAdapterGoImg.trim()}'
- EXTRA_HELM_FLAGS+="--set images.adapter_open_onu_go.repository=\$openonuAdapterGoRepo,images.adapter_open_onu_go.tag=\$openonuAdapterGoTag "
- fi
-
- # ONOS custom image handling
- if [ '${onosImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'voltha-onos' ]; then
- IFS=: read -r onosRepo onosTag <<< '${onosImg.trim()}'
- EXTRA_HELM_FLAGS+="--set images.onos.repository=\$onosRepo,images.onos.tag=\$onosTag "
- fi
-
- # set BBSim parameters
- EXTRA_HELM_FLAGS+='--set enablePerf=true,pon=${pons},onu=${onus} '
-
- # disable the securityContext, this is a development cluster
- EXTRA_HELM_FLAGS+='--set securityContext.enabled=false '
-
- # No persistent-volume-claims in Atomix
- EXTRA_HELM_FLAGS+="--set atomix.persistence.enabled=false "
-
- echo "Installing with the following extra arguments:"
- echo $EXTRA_HELM_FLAGS
-
- # if it's newer than voltha-2.4 set the correct BBSIM_CFG
- if [ '${release.trim()}' != 'voltha-2.4' ]; then
- export BBSIM_CFG="$WORKSPACE/kind-voltha/configs/bbsim-sadis-${workflow}.yaml"
- fi
-
- # Use custom built images
-
- if [ '\$GERRIT_PROJECT' == 'voltha-go' ]; then
- EXTRA_HELM_FLAGS+="--set images.rw_core.repository=${dockerRegistry}/voltha/voltha-rw-core,images.rw_core.tag=voltha-scale "
- fi
-
- if [ '\$GERRIT_PROJECT' == 'voltha-openolt-adapter' ]; then
- EXTRA_HELM_FLAGS+="--set images.adapter_open_olt.repository=${dockerRegistry}/voltha/voltha-openolt-adapter,images.adapter_open_olt.tag=voltha-scale "
- fi
-
- if [ '\$GERRIT_PROJECT' == 'voltha-openonu-adapter' ]; then
- EXTRA_HELM_FLAGS+="--set images.adapter_open_onu.repository=${dockerRegistry}/voltha/voltha-openonu-adapter,images.adapter_open_onu.tag=voltha-scale "
- fi
-
- if [ '\$GERRIT_PROJECT' == 'voltha-openonu-adapter-go' ]; then
- EXTRA_HELM_FLAGS+="--set images.adapter_open_onu_go.repository=${dockerRegistry}/voltha/voltha-openonu-adapter-go,images.adapter_open_onu_go.tag=voltha-scale "
- fi
-
- if [ '\$GERRIT_PROJECT' == 'ofagent-go' ]; then
- EXTRA_HELM_FLAGS+="--set images.ofagent.repository=${dockerRegistry}/voltha/voltha-ofagent-go,images.ofagent.tag=voltha-scale "
- fi
-
- if [ '\$GERRIT_PROJECT' == 'voltha-onos' ]; then
- EXTRA_HELM_FLAGS+="--set images.onos.repository=${dockerRegistry}/voltha/voltha-onos,images.onos.tag=voltha-scale "
- fi
-
- if [ '\$GERRIT_PROJECT' == 'bbsim' ]; then
- EXTRA_HELM_FLAGS+="--set images.bbsim.repository=${dockerRegistry}/voltha/bbsim,images.bbsim.tag=voltha-scale "
- fi
-
- ./voltha up
-
- # Forward the ETCD port onto $VOLTHA_ETCD_PORT
- _TAG=etcd-port-forward kubectl port-forward --address 0.0.0.0 -n default service/etcd $VOLTHA_ETCD_PORT:2379&
- """
- }
- sh returnStdout: false, script: '''
- # start logging with kail
-
- mkdir -p $LOG_FOLDER
-
- list=($APPS_TO_LOG)
- for app in "${list[@]}"
- do
- echo "Starting logs for: ${app}"
- _TAG=kail-$app kail -l app=$app --since 1h > $LOG_FOLDER/$app.log&
- done
-
- _TAG=kail-etcd-bintami kail -l app.kubernetes.io/name=etcd --since 1h > $LOG_FOLDER/etcd-bitnami.log&
-
- '''
- // bbsim-sadis server takes a while to cache the subscriber entries
- // wait for that before starting the tests
- sleep(120)
- }
- }
- stage('Configuration') {
- steps {
- script {
- sh returnStdout: false, script: """
- kubectl exec \$(kubectl get pods | grep -E "bbsim[0-9]" | awk 'NR==1{print \$1}') -- bbsimctl log ${logLevel.toLowerCase()} false
-
- #Setting link discovery
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.provider.lldp.impl.LldpLinkProvider enabled ${withLLDP}
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.net.flow.impl.FlowRuleManager allowExtraneousRules true
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.net.flow.impl.FlowRuleManager importExtraneousRules true
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.net.flowobjective.impl.InOrderFlowObjectiveManager accumulatorMaxBatchMillis 1000
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.net.flowobjective.impl.InOrderFlowObjectiveManager accumulatorMaxIdleMillis 500
-
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg log:set ${logLevel} org.onosproject
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg log:set ${logLevel} org.opencord
-
- # Set Flows/Ports/Meters poll frequency
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.provider.of.flow.impl.OpenFlowRuleProvider flowPollFrequency ${onosStatInterval}
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.provider.of.device.impl.OpenFlowDeviceProvider portStatsPollFrequency ${onosStatInterval}
-
- if [ ${withFlows} = false ]; then
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 app deactivate org.opencord.olt
- fi
-
- if [ ${withMibTemplate} = true ] ; then
- rm -f BBSM-12345123451234512345-00000000000001-v1.json
- wget https://raw.githubusercontent.com/opencord/voltha-openonu-adapter/master/templates/BBSM-12345123451234512345-00000000000001-v1.json
- cat BBSM-12345123451234512345-00000000000001-v1.json | kubectl exec -i \$(kubectl get pods |grep etcd | awk 'NR==1{print \$1}') -- etcdctl put service/voltha/omci_mibs/templates/BBSM/12345123451234512345/00000000000001
- fi
-
- if [ ${withPcap} = true ] ; then
- # Start the tcp-dump in ofagent
- export OF_AGENT=\$(kubectl get pods -l app=ofagent -o name)
- kubectl exec \$OF_AGENT -- apk update
- kubectl exec \$OF_AGENT -- apk add tcpdump
- kubectl exec \$OF_AGENT -- mv /usr/sbin/tcpdump /usr/bin/tcpdump
- _TAG=ofagent-tcpdump kubectl exec \$OF_AGENT -- tcpdump -nei eth0 -w out.pcap&
-
- # Start the tcp-dump in radius
- export RADIUS=\$(kubectl get pods -l app=radius -o name)
- kubectl exec \$RADIUS -- apt-get update
- kubectl exec \$RADIUS -- apt-get install -y tcpdump
- _TAG=radius-tcpdump kubectl exec \$RADIUS -- tcpdump -w out.pcap&
-
- # Start the tcp-dump in ONOS
- for i in \$(seq 0 \$ONOSES); do
- INSTANCE="onos-onos-classic-\$i"
- kubectl exec \$INSTANCE -- apt-get update
- kubectl exec \$INSTANCE -- apt-get install -y tcpdump
- kubectl exec \$INSTANCE -- mv /usr/sbin/tcpdump /usr/bin/tcpdump
- _TAG=\$INSTANCE kubectl exec \$INSTANCE -- /usr/bin/tcpdump -nei eth0 port 1812 -w out.pcap&
- done
- fi
- """
- }
- }
- }
- stage('Run Test') {
- steps {
- sh '''
- mkdir -p $WORKSPACE/RobotLogs
- cd $WORKSPACE/voltha-system-tests
- make vst_venv
- '''
- sh '''
- if [ ${withProfiling} = true ] ; then
- mkdir -p $LOG_FOLDER/pprof
- echo $PATH
- #Creating Python script for ONU Detection
- cat << EOF > $WORKSPACE/pprof.sh
-timestamp() {
- date +"%T"
-}
-
-i=0
-while [[ true ]]; do
- ((i++))
- ts=$(timestamp)
- go tool pprof -png http://127.0.0.1:6060/debug/pprof/heap > $LOG_FOLDER/pprof/rw-core-heap-\\$i-\\$ts.png
- go tool pprof -png http://127.0.0.1:6060/debug/pprof/goroutine > $LOG_FOLDER/pprof/rw-core-goroutine-\\$i-\\$ts.png
- curl -o $LOG_FOLDER/pprof/rw-core-profile-\\$i-\\$ts.pprof http://127.0.0.1:6060/debug/pprof/profile?seconds=10
- go tool pprof -png $LOG_FOLDER/pprof/rw-core-profile-\\$i-\\$ts.pprof > $LOG_FOLDER/pprof/rw-core-profile-\\$i-\\$ts.png
-
- go tool pprof -png http://127.0.0.1:6061/debug/pprof/heap > $LOG_FOLDER/pprof/openolt-heap-\\$i-\\$ts.png
- go tool pprof -png http://127.0.0.1:6061/debug/pprof/goroutine > $LOG_FOLDER/pprof/openolt-goroutine-\\$i-\\$ts.png
- curl -o $LOG_FOLDER/pprof/openolt-profile-\\$i-\\$ts.pprof http://127.0.0.1:6061/debug/pprof/profile?seconds=10
- go tool pprof -png $LOG_FOLDER/pprof/openolt-profile-\\$i-\\$ts.pprof > $LOG_FOLDER/pprof/openolt-profile-\\$i-\\$ts.png
-
- go tool pprof -png http://127.0.0.1:6062/debug/pprof/heap > $LOG_FOLDER/pprof/ofagent-heap-\\$i-\\$ts.png
- go tool pprof -png http://127.0.0.1:6062/debug/pprof/goroutine > $LOG_FOLDER/pprof/ofagent-goroutine-\\$i-\\$ts.png
- curl -o $LOG_FOLDER/pprof/ofagent-profile-\\$i-\\$ts.pprof http://127.0.0.1:6062/debug/pprof/profile?seconds=10
- go tool pprof -png $LOG_FOLDER/pprof/ofagent-profile-\\$i-\\$ts.pprof > $LOG_FOLDER/pprof/ofagent-profile-\\$i-\\$ts.png
-
- sleep 10
-done
-EOF
-
- _TAG="pprof"
- _TAG=$_TAG bash $WORKSPACE/pprof.sh &
- fi
- '''
- timeout(time: 15, unit: 'MINUTES') {
- sh '''
- ROBOT_PARAMS="-v olt:${olts} \
- -v pon:${pons} \
- -v onu:${onus} \
- -v workflow:${workflow} \
- -v withEapol:${withEapol} \
- -v withDhcp:${withDhcp} \
- -v withIgmp:${withIgmp} \
- --noncritical non-critical \
- -e teardown "
-
- if [ ${withEapol} = false ] ; then
- ROBOT_PARAMS+="-e authentication "
- fi
-
- if [ ${withDhcp} = false ] ; then
- ROBOT_PARAMS+="-e dhcp "
- fi
-
- if [ ${provisionSubscribers} = false ] ; then
- # if we're not considering subscribers then we don't care about authentication and dhcp
- ROBOT_PARAMS+="-e authentication -e provision -e flow-after -e dhcp "
- fi
-
- if [ ${withFlows} = false ] ; then
- ROBOT_PARAMS+="-i setup -i activation "
- fi
-
- cd $WORKSPACE/voltha-system-tests
- source ./vst_venv/bin/activate
- robot -d $WORKSPACE/RobotLogs \
- $ROBOT_PARAMS tests/scale/Voltha_Scale_Tests.robot
- '''
- }
- }
- }
- }
- post {
- always {
- // collect result, done in the "post" step so it's executed even in the
- // event of a timeout in the tests
- sh '''
-
- # stop the kail processes
- list=($APPS_TO_LOG)
- for app in "${list[@]}"
- do
- echo "Stopping logs for: ${app}"
- _TAG="kail-$app"
- P_IDS="$(ps e -ww -A | grep "_TAG=$_TAG" | grep -v grep | awk '{print $1}')"
- if [ -n "$P_IDS" ]; then
- echo $P_IDS
- for P_ID in $P_IDS; do
- kill -9 $P_ID
- done
- fi
- done
-
- if [ ${withPcap} = true ] ; then
- # stop ofAgent tcpdump
- P_ID="\$(ps e -ww -A | grep "_TAG=ofagent-tcpdump" | grep -v grep | awk '{print \$1}')"
- if [ -n "\$P_ID" ]; then
- kill -9 \$P_ID
- fi
-
- # stop radius tcpdump
- P_ID="\$(ps e -ww -A | grep "_TAG=radius-tcpdump" | grep -v grep | awk '{print \$1}')"
- if [ -n "\$P_ID" ]; then
- kill -9 \$P_ID
- fi
-
- # stop onos tcpdump
- LIMIT=$(($NUM_OF_ONOS - 1))
- for i in $(seq 0 $LIMIT); do
- INSTANCE="onos-onos-classic-$i"
- P_ID="\$(ps e -ww -A | grep "_TAG=$INSTANCE" | grep -v grep | awk '{print \$1}')"
- if [ -n "\$P_ID" ]; then
- kill -9 \$P_ID
- fi
- done
-
- # copy the file
- export OF_AGENT=$(kubectl get pods -l app=ofagent | awk 'NR==2{print $1}') || true
- kubectl cp $OF_AGENT:out.pcap $LOG_FOLDER/ofagent.pcap || true
-
- export RADIUS=$(kubectl get pods -l app=radius | awk 'NR==2{print $1}') || true
- kubectl cp $RADIUS:out.pcap $LOG_FOLDER/radius.pcap || true
-
- LIMIT=$(($NUM_OF_ONOS - 1))
- for i in $(seq 0 $LIMIT); do
- INSTANCE="onos-onos-classic-$i"
- kubectl cp $INSTANCE:out.pcap $LOG_FOLDER/$INSTANCE.pcap || true
- done
- fi
-
- cd voltha-system-tests
- source ./vst_venv/bin/activate
- python tests/scale/collect-result.py -r $WORKSPACE/RobotLogs/output.xml -p $WORKSPACE/plots > $WORKSPACE/execution-time.txt || true
- cat $WORKSPACE/execution-time.txt
- '''
- sh '''
- if [ ${withProfiling} = true ] ; then
- _TAG="pprof"
- P_IDS="$(ps e -ww -A | grep "_TAG=$_TAG" | grep -v grep | awk '{print $1}')"
- if [ -n "$P_IDS" ]; then
- echo $P_IDS
- for P_ID in $P_IDS; do
- kill -9 $P_ID
- done
- fi
- fi
- '''
- plot([
- csvFileName: 'scale-test.csv',
- csvSeries: [
- [file: 'plots/plot-voltha-onus.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
- [file: 'plots/plot-onos-ports.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
- [file: 'plots/plot-voltha-flows-before.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
- [file: 'plots/plot-voltha-openolt-flows-before.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
- [file: 'plots/plot-onos-flows-before.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
- [file: 'plots/plot-onos-auth.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
- [file: 'plots/plot-voltha-flows-after.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
- [file: 'plots/plot-voltha-openolt-flows-after.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
- [file: 'plots/plot-onos-flows-after.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
- [file: 'plots/plot-onos-dhcp.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
- ],
- group: 'Voltha-Scale-Numbers', numBuilds: '20', style: 'line', title: "Scale Test (OLTs: ${olts}, PONs: ${pons}, ONUs: ${onus})", yaxis: 'Time (s)', useDescr: true
- ])
- step([$class: 'RobotPublisher',
- disableArchiveOutput: false,
- logFileName: 'RobotLogs/log.html',
- otherFiles: '',
- outputFileName: 'RobotLogs/output.xml',
- outputPath: '.',
- passThreshold: 100,
- reportFileName: 'RobotLogs/report.html',
- unstableThreshold: 0]);
- // get all the logs from kubernetes PODs
- sh returnStdout: false, script: '''
-
- # store information on running charts
- helm ls > $LOG_FOLDER/helm-list.txt || true
-
- # store information on the running pods
- kubectl get pods -o wide > $LOG_FOLDER/pods.txt || true
- kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\n'}" | sort | uniq | tee $LOG_FOLDER/pod-images.txt || true
- kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.imageID}{'\\n'}" | sort | uniq | tee $LOG_FOLDER/pod-imagesId.txt || true
-
- # copy the ONOS logs directly from the container to avoid the color codes
- printf '%s\n' $(kubectl get pods -l app=onos-onos-classic -o=jsonpath="{.items[*]['metadata.name']}") | xargs --no-run-if-empty -I# bash -c "kubectl cp #:${karafHome}/data/log/karaf.log $LOG_FOLDER/#.log" || true
-
- # get radius logs out of the container
- kubectl cp $(kubectl get pods -l app=radius --no-headers | awk '{print $1}'):/var/log/freeradius/radius.log $LOG_FOLDER//radius.log || true
- '''
- // dump all the BBSim(s) ONU information
- sh '''
- BBSIM_IDS=$(kubectl get pods | grep bbsim | grep -v server | awk '{print $1}')
- IDS=($BBSIM_IDS)
-
- for bbsim in "${IDS[@]}"
- do
- kubectl exec -t $bbsim -- bbsimctl onu list > $LOG_FOLDER/$bbsim-device-list.txt || true
- kubectl exec -t $bbsim -- bbsimctl service list > $LOG_FOLDER/$bbsim-service-list.txt || true
- done
- '''
- // get DHCP server stats
- sh '''
- BBSIM_IDS=$(kubectl get pods | grep bbsim | grep -v server | awk '{print $1}')
- IDS=($BBSIM_IDS)
-
- for bbsim in "${IDS[@]}"
- do
- kubectl exec -t $bbsim -- dhcpd -lf /var/lib/dhcp/dhcpd.leases -play /tmp/dhcplog 2>&1 | tee $LOG_FOLDER/$bbsim-dhcp-replay.txt || true
- kubectl cp $bbsim:/tmp/dhcplog $LOG_FOLDER/$bbsim-dhcp-logs || true
- kubectl cp $bbsim:/var/lib/dhcp/dhcpd.leases $LOG_FOLDER/$bbsim-dhcp-leases || true
- done
- '''
- // get ONOS debug infos
- sh '''
-
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 apps -a -s > $LOG_FOLDER/onos-apps.txt || true
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 nodes > $LOG_FOLDER/onos-nodes.txt || true
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 masters > $LOG_FOLDER/onos-masters.txt || true
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 roles > $LOG_FOLDER/onos-roles.txt || true
-
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 ports > $LOG_FOLDER/onos-ports-list.txt || true
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 hosts > $LOG_FOLDER/onos-hosts-list.txt || true
-
- if [ ${withFlows} = true ] ; then
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 volt-olts > $LOG_FOLDER/onos-olt-list.txt || true
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 flows -s > $LOG_FOLDER/onos-flows-list.txt || true
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 meters > $LOG_FOLDER/onos-meters-list.txt || true
- fi
-
- if [ ${provisionSubscribers} = true ]; then
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 volt-programmed-subscribers > $LOG_FOLDER/onos-programmed-subscribers.txt || true
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 volt-programmed-meters > $LOG_FOLDER/onos-programmed-meters.txt || true
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 volt-bpmeter-mappings > $LOG_FOLDER/onos-bpmeter-mappings.txt || true
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 volt-failed-subscribers > $LOG_FOLDER/onos-failed-subscribers.txt || true
- fi
-
- if [ ${withEapol} = true ] ; then
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 aaa-users > $LOG_FOLDER/onos-aaa-users.txt || true
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 aaa-statistics > $LOG_FOLDER/onos-aaa-statistics.txt || true
- fi
-
- if [ ${withDhcp} = true ] ; then
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 dhcpl2relay-allocations > $LOG_FOLDER/onos-dhcp-allocations.txt || true
- fi
- '''
- // collect etcd metrics
- sh '''
- mkdir -p $WORKSPACE/etcd-metrics
- curl -s -X GET -G http://10.90.0.201:31301/api/v1/query --data-urlencode 'query=etcd_debugging_mvcc_keys_total' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-key-count.json || true
- curl -s -X GET -G http://10.90.0.201:31301/api/v1/query --data-urlencode 'query=grpc_server_handled_total{grpc_service="etcdserverpb.KV"}' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-rpc-count.json || true
- curl -s -X GET -G http://10.90.0.201:31301/api/v1/query --data-urlencode 'query=etcd_debugging_mvcc_db_total_size_in_bytes' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-db-size.json || true
- curl -s -X GET -G http://10.90.0.201:31301/api/v1/query --data-urlencode 'query=etcd_disk_backend_commit_duration_seconds_sum' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-backend-write-time-sum.json || true
- curl -s -X GET -G http://10.90.0.201:31301/api/v1/query --data-urlencode 'query=etcd_disk_backend_commit_duration_seconds_bucket' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-backend-write-time-bucket.json || true
- curl -s -X GET -G http://10.90.0.201:31301/api/v1/query --data-urlencode 'query=etcd_disk_wal_fsync_duration_seconds_bucket' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-wal-fsync-time-bucket.json || true
- curl -s -X GET -G http://10.90.0.201:31301/api/v1/query --data-urlencode 'query=etcd_network_peer_round_trip_time_seconds_bucket' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-network-peer-round-trip-time-seconds.json || true
- '''
- // get VOLTHA debug infos
- script {
- try {
- sh '''
- voltctl -m 8MB device list -o json > $LOG_FOLDER/device-list.json || true
- python -m json.tool $LOG_FOLDER/device-list.json > $LOG_FOLDER/voltha-devices-list.json || true
- rm $LOG_FOLDER/device-list.json || true
- voltctl -m 8MB device list > $LOG_FOLDER/voltha-devices-list.txt || true
-
- printf '%s\n' $(voltctl -m 8MB device list | grep olt | awk '{print $1}') | xargs --no-run-if-empty -I# bash -c "voltctl -m 8MB device flows # > $LOG_FOLDER/voltha-device-flows-#.txt" || true
- printf '%s\n' $(voltctl -m 8MB device list | grep olt | awk '{print $1}') | xargs --no-run-if-empty -I# bash -c "voltctl -m 8MB device port list --format 'table{{.PortNo}}\t{{.Label}}\t{{.Type}}\t{{.AdminState}}\t{{.OperStatus}}' # > $LOG_FOLDER/voltha-device-ports-#.txt" || true
-
- printf '%s\n' $(voltctl -m 8MB logicaldevice list -q) | xargs --no-run-if-empty -I# bash -c "voltctl -m 8MB logicaldevice flows # > $LOG_FOLDER/voltha-logicaldevice-flows-#.txt" || true
- printf '%s\n' $(voltctl -m 8MB logicaldevice list -q) | xargs --no-run-if-empty -I# bash -c "voltctl -m 8MB logicaldevice port list # > $LOG_FOLDER/voltha-logicaldevice-ports-#.txt" || true
- '''
- } catch(e) {
- sh '''
- echo "Can't get device list from voltclt"
- '''
- }
- }
- // get cpu usage by container
- sh '''
- if [ ${withMonitoring} = true ] ; then
- cd $WORKSPACE/voltha-system-tests
- source ./vst_venv/bin/activate
- sleep 60 # we have to wait for prometheus to collect all the information
- python tests/scale/sizing.py -o $WORKSPACE/plots || true
- fi
- '''
- archiveArtifacts artifacts: 'kind-voltha/install-minimal.log,execution-time.txt,logs/*,logs/pprof/*,RobotLogs/*,plots/*,etcd-metrics/*'
- }
- }
-}
diff --git a/jjb/pipeline/voltha-scale-test.groovy b/jjb/pipeline/voltha-scale-test.groovy
deleted file mode 100644
index 7959f48..0000000
--- a/jjb/pipeline/voltha-scale-test.groovy
+++ /dev/null
@@ -1,704 +0,0 @@
-// Copyright 2019-present Open Networking Foundation
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// deploy VOLTHA and performs a scale test
-
-pipeline {
-
- /* no label, executor is determined by JJB */
- agent {
- label "${params.buildNode}"
- }
- options {
- timeout(time: 60, unit: 'MINUTES')
- }
- environment {
- JENKINS_NODE_COOKIE="dontKillMe" // do not kill processes after the build is done
- KUBECONFIG="$HOME/.kube/config"
- VOLTCONFIG="$HOME/.volt/config"
- SSHPASS="karaf"
- // PATH="$PATH:$WORKSPACE/kind-voltha/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
- // SCHEDULE_ON_CONTROL_NODES="yes"
- // FANCY=0
- // WITH_SIM_ADAPTERS="no"
- // WITH_RADIUS="${withRadius}"
- // WITH_BBSIM="yes"
- // LEGACY_BBSIM_INDEX="no"
- // DEPLOY_K8S="no"
- // CONFIG_SADIS="external"
- // WITH_KAFKA="kafka.default.svc.cluster.local"
- // WITH_ETCD="etcd.default.svc.cluster.local"
- // VOLTHA_ETCD_PORT=9999
-
- // configurable options
- // WITH_EAPOL="${withEapol}"
- // WITH_DHCP="${withDhcp}"
- // WITH_IGMP="${withIgmp}"
- VOLTHA_LOG_LEVEL="${logLevel}"
- NUM_OF_BBSIM="${olts}"
- NUM_OF_OPENONU="${openonuAdapterReplicas}"
- NUM_OF_ONOS="${onosReplicas}"
- NUM_OF_ATOMIX="${atomixReplicas}"
- // WITH_PPROF="${withProfiling}"
- EXTRA_HELM_FLAGS="${extraHelmFlags} " // note that the trailing space is required to separate the parameters from appends done later
- // VOLTHA_CHART="${volthaChart}"
- // VOLTHA_BBSIM_CHART="${bbsimChart}"
- // VOLTHA_ADAPTER_OPEN_OLT_CHART="${openoltAdapterChart}"
- // VOLTHA_ADAPTER_OPEN_ONU_CHART="${openonuAdapterChart}"
- // ONOS_CLASSIC_CHART="${onosChart}"
- // RADIUS_CHART="${radiusChart}"
-
- APPS_TO_LOG="etcd kafka onos-onos-classic adapter-open-onu adapter-open-olt rw-core ofagent bbsim radius bbsim-sadis-server"
- LOG_FOLDER="$WORKSPACE/logs"
-
- GERRIT_PROJECT="${GERRIT_PROJECT}"
- }
-
- stages {
- stage ('Cleanup') {
- steps {
- timeout(time: 11, unit: 'MINUTES') {
- sh returnStdout: false, script: '''
- helm repo add onf https://charts.opencord.org
- helm repo update
-
- NAMESPACES="voltha1 voltha2 infra default"
- for NS in $NAMESPACES
- do
- for hchart in $(helm list -n $NS -q | grep -E -v 'docker-registry|kafkacat');
- do
- echo "Purging chart: ${hchart}"
- helm delete -n $NS "${hchart}"
- done
- done
-
- # wait for pods to be removed
- echo -ne "\nWaiting for PODs to be removed..."
- PODS=$(kubectl get pods --all-namespaces --no-headers | grep -v -E "kube|cattle|registry" | wc -l)
- while [[ $PODS != 0 ]]; do
- sleep 5
- echo -ne "."
- PODS=$(kubectl get pods --all-namespaces --no-headers | grep -v -E "kube|cattle|registry" | wc -l)
- done
-
- # remove orphaned port-forward from different namespaces
- ps aux | grep port-forw | grep -v grep | awk '{print $2}' | xargs --no-run-if-empty kill -9
-
- cd $WORKSPACE
- rm -rf $WORKSPACE/*
- '''
- }
- }
- }
- stage('Clone voltha-system-tests') {
- steps {
- checkout([
- $class: 'GitSCM',
- userRemoteConfigs: [[
- url: "https://gerrit.opencord.org/voltha-system-tests",
- refspec: "${volthaSystemTestsChange}"
- ]],
- branches: [[ name: "${release}", ]],
- extensions: [
- [$class: 'WipeWorkspace'],
- [$class: 'RelativeTargetDirectory', relativeTargetDir: "voltha-system-tests"],
- [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
- ],
- ])
- script {
- sh(script:"""
- if [ '${volthaSystemTestsChange}' != '' ] ; then
- cd $WORKSPACE/voltha-system-tests;
- git fetch https://gerrit.opencord.org/voltha-system-tests ${volthaSystemTestsChange} && git checkout FETCH_HEAD
- fi
- """)
- }
- }
- }
- stage('Clone voltha-helm-charts') {
- steps {
- checkout([
- $class: 'GitSCM',
- userRemoteConfigs: [[
- url: "https://gerrit.opencord.org/voltha-helm-charts",
- refspec: "${volthaHelmChartsChange}"
- ]],
- branches: [[ name: "master", ]],
- extensions: [
- [$class: 'WipeWorkspace'],
- [$class: 'RelativeTargetDirectory', relativeTargetDir: "voltha-helm-charts"],
- [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
- ],
- ])
- script {
- sh(script:"""
- if [ '${volthaHelmChartsChange}' != '' ] ; then
- cd $WORKSPACE/voltha-helm-charts;
- git fetch https://gerrit.opencord.org/voltha-helm-charts ${volthaHelmChartsChange} && git checkout FETCH_HEAD
- fi
- """)
- }
- }
- }
- stage('Build patch') {
- when {
- expression {
- return params.GERRIT_PROJECT
- }
- }
- steps {
- sh """
- git clone https://\$GERRIT_HOST/\$GERRIT_PROJECT
- cd \$GERRIT_PROJECT
- git fetch https://\$GERRIT_HOST/\$GERRIT_PROJECT \$GERRIT_REFSPEC && git checkout FETCH_HEAD
-
- DOCKER_REGISTRY=${dockerRegistry}/ DOCKER_REPOSITORY=voltha/ DOCKER_TAG=voltha-scale make docker-build
- DOCKER_REGISTRY=${dockerRegistry}/ DOCKER_REPOSITORY=voltha/ DOCKER_TAG=voltha-scale make docker-push
- """
- }
- }
- stage('Deploy common infrastructure') {
- // includes monitoring, kafka, etcd
- steps {
- sh '''
- helm install kafka $HOME/teone/helm-charts/kafka --set replicaCount=${kafkaReplicas},replicas=${kafkaReplicas} --set persistence.enabled=false \
- --set zookeeper.replicaCount=${kafkaReplicas} --set zookeeper.persistence.enabled=false \
- --set prometheus.kafka.enabled=true,prometheus.operator.enabled=true,prometheus.jmx.enabled=true,prometheus.operator.serviceMonitor.namespace=default
-
- # the ETCD chart use "auth" for resons different than BBsim, so strip that away
- ETCD_FLAGS=$(echo ${extraHelmFlags} | sed -e 's/--set auth=false / /g') | sed -e 's/--set auth=true / /g'
- ETCD_FLAGS+=" --set auth.rbac.enabled=false,persistence.enabled=false,statefulset.replicaCount=${etcdReplicas}"
- ETCD_FLAGS+=" --set memoryMode=${inMemoryEtcdStorage} "
- helm install --set replicas=${etcdReplicas} etcd $HOME/teone/helm-charts/etcd $ETCD_FLAGS
-
- if [ ${withMonitoring} = true ] ; then
- helm install nem-monitoring onf/nem-monitoring \
- -f $HOME/voltha-scale/grafana.yaml \
- --set prometheus.alertmanager.enabled=false,prometheus.pushgateway.enabled=false \
- --set kpi_exporter.enabled=false,dashboards.xos=false,dashboards.onos=false,dashboards.aaa=false,dashboards.voltha=false
- fi
- '''
- }
- }
- stage('Deploy Voltha') {
- steps {
- script {
- sh returnStdout: false, script: """
-
- export EXTRA_HELM_FLAGS+=' '
-
- # BBSim custom image handling
- if [ '${bbsimImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'bbsim' ]; then
- IFS=: read -r bbsimRepo bbsimTag <<< '${bbsimImg.trim()}'
- EXTRA_HELM_FLAGS+="--set images.bbsim.repository=\$bbsimRepo,images.bbsim.tag=\$bbsimTag "
- fi
-
- # VOLTHA custom image handling
- if [ '${rwCoreImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'voltha-go' ]; then
- IFS=: read -r rwCoreRepo rwCoreTag <<< '${rwCoreImg.trim()}'
- EXTRA_HELM_FLAGS+="--set voltha.images.rw_core.repository=\$rwCoreRepo,voltha.images.rw_core.tag=\$rwCoreTag "
- fi
-
- # ofAgent custom image handling
- if [ '${ofAgentImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'of-agent' ]; then
- IFS=: read -r ofAgentRepo ofAgentTag <<< '${ofAgentImg.trim()}'
- EXTRA_HELM_FLAGS+="--set voltha.images.ofagent.repository=\$ofAgentRepo,voltha.images.ofagent.tag=\$ofAgentTag "
- fi
-
- # OpenOLT custom image handling
- if [ '${openoltAdapterImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'voltha-openolt-adapter' ]; then
- IFS=: read -r openoltAdapterRepo openoltAdapterTag <<< '${openoltAdapterImg.trim()}'
- EXTRA_HELM_FLAGS+="--set voltha-adapter-openolt.images.adapter_open_olt.repository=\$openoltAdapterRepo,voltha-adapter-openolt.images.adapter_open_olt.tag=\$openoltAdapterTag "
- fi
-
- # OpenONU custom image handling
- if [ '${openonuAdapterImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'voltha-openonu-adapter' ]; then
- IFS=: read -r openonuAdapterRepo openonuAdapterTag <<< '${openonuAdapterImg.trim()}'
- EXTRA_HELM_FLAGS+="--set voltha-adapter-openonu.images.adapter_open_onu.repository=\$openonuAdapterRepo,voltha-adapter-openonu.images.adapter_open_onu.tag=\$openonuAdapterTag "
- fi
-
- # OpenONU GO custom image handling
- if [ '${openonuAdapterGoImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'voltha-openonu-adapter-go' ]; then
- IFS=: read -r openonuAdapterGoRepo openonuAdapterGoTag <<< '${openonuAdapterGoImg.trim()}'
- EXTRA_HELM_FLAGS+="--set voltha-adapter-openonu.images.adapter_open_onu_go.repository=\$openonuAdapterGoRepo,voltha-adapter-openonu.images.adapter_open_onu_go.tag=\$openonuAdapterGoTag "
- fi
-
- # ONOS custom image handling
- if [ '${onosImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'voltha-onos' ]; then
- IFS=: read -r onosRepo onosTag <<< '${onosImg.trim()}'
- EXTRA_HELM_FLAGS+="--set onos-classic.images.onos.repository=\$onosRepo,onos-classic.images.onos.tag=\$onosTag "
- fi
-
- # set BBSim parameters
- EXTRA_HELM_FLAGS+='--set enablePerf=true,pon=${pons},onu=${onus} '
-
- # disable the securityContext, this is a development cluster
- EXTRA_HELM_FLAGS+='--set securityContext.enabled=false '
-
- # No persistent-volume-claims in Atomix
- EXTRA_HELM_FLAGS+="--set onos-classic.atomix.persistence.enabled=false "
-
- echo "Installing with the following extra arguments:"
- echo $EXTRA_HELM_FLAGS
-
-
-
- # Use custom built images
-
- if [ '\$GERRIT_PROJECT' == 'voltha-go' ]; then
- EXTRA_HELM_FLAGS+="--set voltha.images.rw_core.repository=${dockerRegistry}/voltha/voltha-rw-core,voltha.images.rw_core.tag=voltha-scale "
- fi
-
- if [ '\$GERRIT_PROJECT' == 'voltha-openolt-adapter' ]; then
- EXTRA_HELM_FLAGS+="--set voltha-openolt-adapter.images.adapter_open_olt.repository=${dockerRegistry}/voltha/voltha-openolt-adapter,voltha-openolt-adapter.images.adapter_open_olt.tag=voltha-scale "
- fi
-
- if [ '\$GERRIT_PROJECT' == 'voltha-openonu-adapter' ]; then
- EXTRA_HELM_FLAGS+="--set voltha-openonu-adapter.images.adapter_open_onu.repository=${dockerRegistry}/voltha/voltha-openonu-adapter,voltha-openonu-adapter.images.adapter_open_onu.tag=voltha-scale "
- fi
-
- if [ '\$GERRIT_PROJECT' == 'voltha-openonu-adapter-go' ]; then
- EXTRA_HELM_FLAGS+="--set voltha-openonu-adapter-go.images.adapter_open_onu_go.repository=${dockerRegistry}/voltha/voltha-openonu-adapter-go,voltha-openonu-adapter-go.images.adapter_open_onu_go.tag=voltha-scale "
- fi
-
- if [ '\$GERRIT_PROJECT' == 'ofagent-go' ]; then
- EXTRA_HELM_FLAGS+="--set voltha.images.ofagent.repository=${dockerRegistry}/voltha/voltha-ofagent-go,ofagent-go.images.ofagent.tag=voltha-scale "
- fi
-
- if [ '\$GERRIT_PROJECT' == 'voltha-onos' ]; then
- EXTRA_HELM_FLAGS+="--set onos-classic.images.onos.repository=${dockerRegistry}/voltha/voltha-onos,onos-classic.images.onos.tag=voltha-scale "
- fi
-
- if [ '\$GERRIT_PROJECT' == 'bbsim' ]; then
- EXTRA_HELM_FLAGS+="--set images.bbsim.repository=${dockerRegistry}/voltha/bbsim,images.bbsim.tag=voltha-scale "
- fi
-
- helm upgrade --install voltha-infra onf/voltha-infra \$EXTRA_HELM_FLAGS \
- --set onos-classic.replicas=${onosReplicas},onos-classic.atomix.replicas=${atomixReplicas} \
- --set etcd.enabled=false,kafka.enabled=false \
- --set global.log_level=${logLevel} \
- -f $WORKSPACE/voltha-helm-charts/examples/${workflow}-values.yaml
-
- helm upgrade --install voltha1 onf/voltha-stack \$EXTRA_HELM_FLAGS \
- --set global.stack_name=voltha1 \
- --set global.voltha_infra_name=voltha-infra \
- --set global.voltha_infra_namespace=default \
- --set global.log_level=${logLevel} \
- --set voltha.services.kafka.adapter.address=kafka.default.svc:9092 \
- --set voltha.services.kafka.cluster.address=kafka.default.svc:9092 \
- --set voltha.services.etcd.address=etcd.default.svc:2379 \
- --set voltha-adapter-openolt.services.kafka.adapter.address=kafka.default.svc:9092 \
- --set voltha-adapter-openolt.services.kafka.cluster.address=kafka.default.svc:9092 \
- --set voltha-adapter-openolt.services.etcd.address=etcd.default.svc:2379 \
- --set voltha-adapter-openonu.services.kafka.adapter.address=kafka.default.svc:9092 \
- --set voltha-adapter-openonu.services.kafka.cluster.address=kafka.default.svc:9092 \
- --set voltha-adapter-openonu.services.etcd.address=etcd.default.svc:2379
- # TODO having to set all of these values is annoying, is there a better solution?
-
-
- for i in {0..${olts.toInteger() - 1}}; do
- stackId=1
- helm upgrade --install bbsim\$i onf/bbsim \$EXTRA_HELM_FLAGS \
- --set olt_id="\${stackId}\${i}" \
- --set onu=${onus},pon=${pons} \
- --set global.log_level=${logLevel.toLowerCase()} \
- -f $WORKSPACE/voltha-helm-charts/examples/${workflow}-values.yaml
- done
-
- echo -ne "\nWaiting for VOLTHA to start..."
- voltha=\$(kubectl get pods --all-namespaces -l app.kubernetes.io/part-of=voltha --no-headers | grep "0/" | wc -l)
- while [[ \$voltha != 0 ]]; do
- sleep 5
- echo -ne "."
- voltha=\$(kubectl get pods --all-namespaces -l app.kubernetes.io/part-of=voltha --no-headers | grep "0/" | wc -l)
- done
-
- # forward ONOS and VOLTHA ports
- _TAG=onos-port-forward kubectl port-forward --address 0.0.0.0 -n default svc/voltha-infra-onos-classic-hs 8101:8101&
- _TAG=onos-port-forward kubectl port-forward --address 0.0.0.0 -n default svc/voltha-infra-onos-classic-hs 8181:8181&
- _TAG=voltha-port-forward kubectl port-forward --address 0.0.0.0 -n default svc/voltha1-voltha-api 55555:55555&
- """
- }
- sh returnStdout: false, script: '''
- # start logging with kail
-
- mkdir -p $LOG_FOLDER
-
- list=($APPS_TO_LOG)
- for app in "${list[@]}"
- do
- echo "Starting logs for: ${app}"
- _TAG=kail-$app kail -l app=$app --since 1h > $LOG_FOLDER/$app.log&
- done
- '''
- }
- }
- stage('Configuration') {
- steps {
- script {
- sh returnStdout: false, script: """
- #Setting link discovery
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.provider.lldp.impl.LldpLinkProvider enabled ${withLLDP}
-
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.net.flow.impl.FlowRuleManager allowExtraneousRules true
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.net.flow.impl.FlowRuleManager importExtraneousRules true
-
-
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.net.flowobjective.impl.InOrderFlowObjectiveManager accumulatorMaxBatchMillis 1000
-
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.net.flowobjective.impl.InOrderFlowObjectiveManager accumulatorMaxIdleMillis 500
-
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg log:set ${logLevel} org.onosproject
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg log:set ${logLevel} org.opencord
-
-
- kubectl exec \$(kubectl get pods | grep -E "bbsim[0-9]" | awk 'NR==1{print \$1}') -- bbsimctl log ${logLevel.toLowerCase()} false
-
- # Set Flows/Ports/Meters poll frequency
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.provider.of.flow.impl.OpenFlowRuleProvider flowPollFrequency ${onosStatInterval}
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.provider.of.device.impl.OpenFlowDeviceProvider portStatsPollFrequency ${onosStatInterval}
-
- if [ ${withFlows} = false ]; then
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 app deactivate org.opencord.olt
- fi
-
- if [ ${withPcap} = true ] ; then
- # Start the tcp-dump in ofagent
- export OF_AGENT=\$(kubectl get pods -l app=ofagent -o name)
- kubectl exec \$OF_AGENT -- apk update
- kubectl exec \$OF_AGENT -- apk add tcpdump
- kubectl exec \$OF_AGENT -- mv /usr/sbin/tcpdump /usr/bin/tcpdump
- _TAG=ofagent-tcpdump kubectl exec \$OF_AGENT -- tcpdump -nei eth0 -w out.pcap&
-
- # Start the tcp-dump in radius
- export RADIUS=\$(kubectl get pods -l app=radius -o name)
- kubectl exec \$RADIUS -- apt-get update
- kubectl exec \$RADIUS -- apt-get install -y tcpdump
- _TAG=radius-tcpdump kubectl exec \$RADIUS -- tcpdump -w out.pcap&
-
- # Start the tcp-dump in ONOS
- for i in \$(seq 0 \$ONOSES); do
- INSTANCE="onos-onos-classic-\$i"
- kubectl exec \$INSTANCE -- apt-get update
- kubectl exec \$INSTANCE -- apt-get install -y tcpdump
- kubectl exec \$INSTANCE -- mv /usr/sbin/tcpdump /usr/bin/tcpdump
- _TAG=\$INSTANCE kubectl exec \$INSTANCE -- /usr/bin/tcpdump -nei eth0 port 1812 -w out.pcap&
- done
- fi
- """
- }
- }
- }
- stage('Run Test') {
- steps {
- sh '''
- mkdir -p $WORKSPACE/RobotLogs
- cd $WORKSPACE/voltha-system-tests
- make vst_venv
- '''
- sh '''
- if [ ${withProfiling} = true ] ; then
- mkdir -p $LOG_FOLDER/pprof
- echo $PATH
- #Creating Python script for ONU Detection
- cat << EOF > $WORKSPACE/pprof.sh
-timestamp() {
- date +"%T"
-}
-
-i=0
-while [[ true ]]; do
- ((i++))
- ts=$(timestamp)
- go tool pprof -png http://127.0.0.1:6060/debug/pprof/heap > $LOG_FOLDER/pprof/rw-core-heap-\\$i-\\$ts.png
- go tool pprof -png http://127.0.0.1:6060/debug/pprof/goroutine > $LOG_FOLDER/pprof/rw-core-goroutine-\\$i-\\$ts.png
- curl -o $LOG_FOLDER/pprof/rw-core-profile-\\$i-\\$ts.pprof http://127.0.0.1:6060/debug/pprof/profile?seconds=10
- go tool pprof -png $LOG_FOLDER/pprof/rw-core-profile-\\$i-\\$ts.pprof > $LOG_FOLDER/pprof/rw-core-profile-\\$i-\\$ts.png
-
- go tool pprof -png http://127.0.0.1:6061/debug/pprof/heap > $LOG_FOLDER/pprof/openolt-heap-\\$i-\\$ts.png
- go tool pprof -png http://127.0.0.1:6061/debug/pprof/goroutine > $LOG_FOLDER/pprof/openolt-goroutine-\\$i-\\$ts.png
- curl -o $LOG_FOLDER/pprof/openolt-profile-\\$i-\\$ts.pprof http://127.0.0.1:6061/debug/pprof/profile?seconds=10
- go tool pprof -png $LOG_FOLDER/pprof/openolt-profile-\\$i-\\$ts.pprof > $LOG_FOLDER/pprof/openolt-profile-\\$i-\\$ts.png
-
- go tool pprof -png http://127.0.0.1:6062/debug/pprof/heap > $LOG_FOLDER/pprof/ofagent-heap-\\$i-\\$ts.png
- go tool pprof -png http://127.0.0.1:6062/debug/pprof/goroutine > $LOG_FOLDER/pprof/ofagent-goroutine-\\$i-\\$ts.png
- curl -o $LOG_FOLDER/pprof/ofagent-profile-\\$i-\\$ts.pprof http://127.0.0.1:6062/debug/pprof/profile?seconds=10
- go tool pprof -png $LOG_FOLDER/pprof/ofagent-profile-\\$i-\\$ts.pprof > $LOG_FOLDER/pprof/ofagent-profile-\\$i-\\$ts.png
-
- sleep 10
-done
-EOF
-
- _TAG="pprof"
- _TAG=$_TAG bash $WORKSPACE/pprof.sh &
- fi
- '''
- timeout(time: 15, unit: 'MINUTES') {
- sh '''
- ROBOT_PARAMS="-v olt:${olts} \
- -v pon:${pons} \
- -v onu:${onus} \
- -v workflow:${workflow} \
- -v withEapol:${withEapol} \
- -v withDhcp:${withDhcp} \
- -v withIgmp:${withIgmp} \
- --noncritical non-critical \
- -e teardown "
-
- if [ ${withEapol} = false ] ; then
- ROBOT_PARAMS+="-e authentication "
- fi
-
- if [ ${withDhcp} = false ] ; then
- ROBOT_PARAMS+="-e dhcp "
- fi
-
- if [ ${provisionSubscribers} = false ] ; then
- # if we're not considering subscribers then we don't care about authentication and dhcp
- ROBOT_PARAMS+="-e authentication -e provision -e flow-after -e dhcp "
- fi
-
- if [ ${withFlows} = false ] ; then
- ROBOT_PARAMS+="-i setup -i activation "
- fi
-
- cd $WORKSPACE/voltha-system-tests
- source ./vst_venv/bin/activate
- robot -d $WORKSPACE/RobotLogs \
- $ROBOT_PARAMS tests/scale/Voltha_Scale_Tests.robot
- '''
- }
- }
- }
- }
- post {
- always {
- // collect result, done in the "post" step so it's executed even in the
- // event of a timeout in the tests
- sh '''
-
- # stop the kail processes
- list=($APPS_TO_LOG)
- for app in "${list[@]}"
- do
- echo "Stopping logs for: ${app}"
- _TAG="kail-$app"
- P_IDS="$(ps e -ww -A | grep "_TAG=$_TAG" | grep -v grep | awk '{print $1}')"
- if [ -n "$P_IDS" ]; then
- echo $P_IDS
- for P_ID in $P_IDS; do
- kill -9 $P_ID
- done
- fi
- done
-
- if [ ${withPcap} = true ] ; then
- # stop ofAgent tcpdump
- P_ID="\$(ps e -ww -A | grep "_TAG=ofagent-tcpdump" | grep -v grep | awk '{print \$1}')"
- if [ -n "\$P_ID" ]; then
- kill -9 \$P_ID
- fi
-
- # stop radius tcpdump
- P_ID="\$(ps e -ww -A | grep "_TAG=radius-tcpdump" | grep -v grep | awk '{print \$1}')"
- if [ -n "\$P_ID" ]; then
- kill -9 \$P_ID
- fi
-
- # stop onos tcpdump
- LIMIT=$(($NUM_OF_ONOS - 1))
- for i in $(seq 0 $LIMIT); do
- INSTANCE="onos-onos-classic-$i"
- P_ID="\$(ps e -ww -A | grep "_TAG=$INSTANCE" | grep -v grep | awk '{print \$1}')"
- if [ -n "\$P_ID" ]; then
- kill -9 \$P_ID
- fi
- done
-
- # copy the file
- export OF_AGENT=$(kubectl get pods -l app=ofagent | awk 'NR==2{print $1}') || true
- kubectl cp $OF_AGENT:out.pcap $LOG_FOLDER/ofagent.pcap || true
-
- export RADIUS=$(kubectl get pods -l app=radius | awk 'NR==2{print $1}') || true
- kubectl cp $RADIUS:out.pcap $LOG_FOLDER/radius.pcap || true
-
- LIMIT=$(($NUM_OF_ONOS - 1))
- for i in $(seq 0 $LIMIT); do
- INSTANCE="onos-onos-classic-$i"
- kubectl cp $INSTANCE:out.pcap $LOG_FOLDER/$INSTANCE.pcap || true
- done
- fi
-
- cd voltha-system-tests
- source ./vst_venv/bin/activate
- python tests/scale/collect-result.py -r $WORKSPACE/RobotLogs/output.xml -p $WORKSPACE/plots > $WORKSPACE/execution-time.txt || true
- cat $WORKSPACE/execution-time.txt
- '''
- sh '''
- if [ ${withProfiling} = true ] ; then
- _TAG="pprof"
- P_IDS="$(ps e -ww -A | grep "_TAG=$_TAG" | grep -v grep | awk '{print $1}')"
- if [ -n "$P_IDS" ]; then
- echo $P_IDS
- for P_ID in $P_IDS; do
- kill -9 $P_ID
- done
- fi
- fi
- '''
- plot([
- csvFileName: 'scale-test.csv',
- csvSeries: [
- [file: 'plots/plot-voltha-onus.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
- [file: 'plots/plot-onos-ports.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
- [file: 'plots/plot-voltha-flows-before.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
- [file: 'plots/plot-voltha-openolt-flows-before.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
- [file: 'plots/plot-onos-flows-before.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
- [file: 'plots/plot-onos-auth.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
- [file: 'plots/plot-voltha-flows-after.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
- [file: 'plots/plot-voltha-openolt-flows-after.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
- [file: 'plots/plot-onos-flows-after.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
- [file: 'plots/plot-onos-dhcp.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
- ],
- group: 'Voltha-Scale-Numbers', numBuilds: '20', style: 'line', title: "Scale Test (OLTs: ${olts}, PONs: ${pons}, ONUs: ${onus})", yaxis: 'Time (s)', useDescr: true
- ])
- step([$class: 'RobotPublisher',
- disableArchiveOutput: false,
- logFileName: 'RobotLogs/log.html',
- otherFiles: '',
- outputFileName: 'RobotLogs/output.xml',
- outputPath: '.',
- passThreshold: 100,
- reportFileName: 'RobotLogs/report.html',
- unstableThreshold: 0]);
- // get all the logs from kubernetes PODs
- sh returnStdout: false, script: '''
-
- # store information on running charts
- helm ls > $LOG_FOLDER/helm-list.txt || true
-
- # store information on the running pods
- kubectl get pods -o wide > $LOG_FOLDER/pods.txt || true
- kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\n'}" | sort | uniq | tee $LOG_FOLDER/pod-images.txt || true
- kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.imageID}{'\\n'}" | sort | uniq | tee $LOG_FOLDER/pod-imagesId.txt || true
-
- # copy the ONOS logs directly from the container to avoid the color codes
- printf '%s\n' $(kubectl get pods -l app=onos-onos-classic -o=jsonpath="{.items[*]['metadata.name']}") | xargs --no-run-if-empty -I# bash -c "kubectl cp #:${karafHome}/data/log/karaf.log $LOG_FOLDER/#.log" || true
-
- # get radius logs out of the container
- kubectl cp $(kubectl get pods -l app=radius --no-headers | awk '{print $1}'):/var/log/freeradius/radius.log $LOG_FOLDER//radius.log || true
- '''
- // dump all the BBSim(s) ONU information
- sh '''
- BBSIM_IDS=$(kubectl get pods | grep bbsim | grep -v server | awk '{print $1}')
- IDS=($BBSIM_IDS)
-
- for bbsim in "${IDS[@]}"
- do
- kubectl exec -t $bbsim -- bbsimctl onu list > $LOG_FOLDER/$bbsim-device-list.txt || true
- kubectl exec -t $bbsim -- bbsimctl service list > $LOG_FOLDER/$bbsim-service-list.txt || true
- done
- '''
- // get DHCP server stats
- sh '''
- BBSIM_IDS=$(kubectl get pods | grep bbsim | grep -v server | awk '{print $1}')
- IDS=($BBSIM_IDS)
-
- for bbsim in "${IDS[@]}"
- do
- kubectl exec -t $bbsim -- dhcpd -lf /var/lib/dhcp/dhcpd.leases -play /tmp/dhcplog 2>&1 | tee $LOG_FOLDER/$bbsim-dhcp-replay.txt || true
- kubectl cp $bbsim:/tmp/dhcplog $LOG_FOLDER/$bbsim-dhcp-logs || true
- kubectl cp $bbsim:/var/lib/dhcp/dhcpd.leases $LOG_FOLDER/$bbsim-dhcp-leases || true
- done
- '''
- // get ONOS debug infos
- sh '''
-
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 apps -a -s > $LOG_FOLDER/onos-apps.txt || true
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 nodes > $LOG_FOLDER/onos-nodes.txt || true
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 masters > $LOG_FOLDER/onos-masters.txt || true
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 roles > $LOG_FOLDER/onos-roles.txt || true
-
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 ports > $LOG_FOLDER/onos-ports-list.txt || true
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 hosts > $LOG_FOLDER/onos-hosts-list.txt || true
-
- if [ ${withFlows} = true ] ; then
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 volt-olts > $LOG_FOLDER/onos-olt-list.txt || true
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 flows -s > $LOG_FOLDER/onos-flows-list.txt || true
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 meters > $LOG_FOLDER/onos-meters-list.txt || true
- fi
-
- if [ ${provisionSubscribers} = true ]; then
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 volt-programmed-subscribers > $LOG_FOLDER/onos-programmed-subscribers.txt || true
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 volt-programmed-meters > $LOG_FOLDER/onos-programmed-meters.txt || true
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 volt-bpmeter-mappings > $LOG_FOLDER/onos-bpmeter-mappings.txt || true
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 volt-failed-subscribers > $LOG_FOLDER/onos-failed-subscribers.txt || true
- fi
-
- if [ ${withEapol} = true ] ; then
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 aaa-users > $LOG_FOLDER/onos-aaa-users.txt || true
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 aaa-statistics > $LOG_FOLDER/onos-aaa-statistics.txt || true
- fi
-
- if [ ${withDhcp} = true ] ; then
- sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 dhcpl2relay-allocations > $LOG_FOLDER/onos-dhcp-allocations.txt || true
- fi
- '''
- // collect etcd metrics
- sh '''
- mkdir -p $WORKSPACE/etcd-metrics
- curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_debugging_mvcc_keys_total' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-key-count.json || true
- curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=grpc_server_handled_total{grpc_service="etcdserverpb.KV"}' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-rpc-count.json || true
- curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_debugging_mvcc_db_total_size_in_bytes' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-db-size.json || true
- curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_disk_backend_commit_duration_seconds_sum' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-backend-write-time.json || true
- curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_disk_backend_commit_duration_seconds_sum' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-backend-write-time-sum.json || true
- curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_disk_backend_commit_duration_seconds_bucket' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-backend-write-time-bucket.json || true
- curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_disk_wal_fsync_duration_seconds_bucket' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-wal-fsync-time-bucket.json || true
- curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_network_peer_round_trip_time_seconds_bucket' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-network-peer-round-trip-time-seconds.json || true
-
- '''
- // get VOLTHA debug infos
- script {
- try {
- sh '''
- voltctl -m 8MB device list -o json > $LOG_FOLDER/device-list.json || true
- python -m json.tool $LOG_FOLDER/device-list.json > $LOG_FOLDER/voltha-devices-list.json || true
- rm $LOG_FOLDER/device-list.json || true
- voltctl -m 8MB device list > $LOG_FOLDER/voltha-devices-list.txt || true
-
- printf '%s\n' $(voltctl -m 8MB device list | grep olt | awk '{print $1}') | xargs --no-run-if-empty -I# bash -c "voltctl -m 8MB device flows # > $LOG_FOLDER/voltha-device-flows-#.txt" || true
- printf '%s\n' $(voltctl -m 8MB device list | grep olt | awk '{print $1}') | xargs --no-run-if-empty -I# bash -c "voltctl -m 8MB device port list --format 'table{{.PortNo}}\t{{.Label}}\t{{.Type}}\t{{.AdminState}}\t{{.OperStatus}}' # > $LOG_FOLDER/voltha-device-ports-#.txt" || true
-
- printf '%s\n' $(voltctl -m 8MB logicaldevice list -q) | xargs --no-run-if-empty -I# bash -c "voltctl -m 8MB logicaldevice flows # > $LOG_FOLDER/voltha-logicaldevice-flows-#.txt" || true
- printf '%s\n' $(voltctl -m 8MB logicaldevice list -q) | xargs --no-run-if-empty -I# bash -c "voltctl -m 8MB logicaldevice port list # > $LOG_FOLDER/voltha-logicaldevice-ports-#.txt" || true
- '''
- } catch(e) {
- sh '''
- echo "Can't get device list from voltclt"
- '''
- }
- }
- // get cpu usage by container
- sh '''
- if [ ${withMonitoring} = true ] ; then
- cd $WORKSPACE/voltha-system-tests
- source ./vst_venv/bin/activate
- sleep 60 # we have to wait for prometheus to collect all the information
- python tests/scale/sizing.py -o $WORKSPACE/plots || true
- fi
- '''
- archiveArtifacts artifacts: 'execution-time.txt,logs/*,logs/pprof/*,RobotLogs/*,plots/*,etcd-metrics/*'
- }
- }
-}
diff --git a/jjb/pipeline/voltha-tt-physical-functional-tests.groovy b/jjb/pipeline/voltha-tt-physical-functional-tests.groovy
index d286789..93486ab 100644
--- a/jjb/pipeline/voltha-tt-physical-functional-tests.groovy
+++ b/jjb/pipeline/voltha-tt-physical-functional-tests.groovy
@@ -66,6 +66,14 @@
[$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
],
])
+ script {
+ sh(script:"""
+ if [ '${volthaSystemTestsChange}' != '' ] ; then
+ cd $WORKSPACE/voltha-system-tests;
+ git fetch https://gerrit.opencord.org/voltha-system-tests ${volthaSystemTestsChange} && git checkout FETCH_HEAD
+ fi
+ """)
+ }
}
}
stage('Clone cord-tester') {
@@ -178,6 +186,26 @@
"""
}
}
+
+ stage('Failure/Recovery Tests') {
+ environment {
+ ROBOT_CONFIG_FILE="$WORKSPACE/${configBaseDir}/${configDeploymentDir}/${configFileName}-TT.yaml"
+ ROBOT_FILE="Voltha_TT_FailureScenarios.robot"
+ ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/tt-workflow/FailureScenarios"
+ }
+ steps {
+ sh """
+ mkdir -p $ROBOT_LOGS_DIR
+ if ( ${powerSwitch} ); then
+ export ROBOT_MISC_ARGS="--removekeywords wuks -L TRACE -i functionalTT -i PowerSwitch -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
+ else
+ export ROBOT_MISC_ARGS="--removekeywords wuks -L TRACE -i functionalTT -e PowerSwitch -e bbsim -e notready -d $ROBOT_LOGS_DIR -v POD_NAME:${configFileName} -v KUBERNETES_CONFIGS_DIR:$WORKSPACE/${configBaseDir}/${configKubernetesDir} -v container_log_dir:$WORKSPACE -v OLT_ADAPTER_APP_LABEL:${oltAdapterAppLabel}"
+ fi
+ make -C $WORKSPACE/voltha-system-tests voltha-tt-test || true
+ """
+ }
+ }
+
}
post {
always {
@@ -225,6 +253,15 @@
cd $WORKSPACE
gzip *-combined.log || true
rm *-combined.log || true
+
+ # store information on running charts
+ helm ls > $WORKSPACE/helm-list.txt || true
+
+ # store information on the running pods
+ kubectl get pods --all-namespaces -o wide > $WORKSPACE/pods.txt || true
+ kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\n'}" | sort | uniq | tee $WORKSPACE/pod-images.txt || true
+ kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.imageID}{'\\n'}" | sort | uniq | tee $WORKSPACE/pod-imagesId.txt || true
+
'''
script {
deployment_config.olts.each { olt ->
diff --git a/jjb/pipeline/voltha/master/bbsim-tests.groovy b/jjb/pipeline/voltha/master/bbsim-tests.groovy
index 6027cf5..a67d32a 100644
--- a/jjb/pipeline/voltha/master/bbsim-tests.groovy
+++ b/jjb/pipeline/voltha/master/bbsim-tests.groovy
@@ -23,30 +23,38 @@
remote: 'https://gerrit.opencord.org/ci-management.git'
])
-def customImageFlags(image) {
- return "--set images.${image}.tag=citest,images.${image}.pullPolicy=Never "
-}
-
def test_workflow(name) {
- stage('Deploy - '+ name + ' workflow') {
- def extraHelmFlags = "${extraHelmFlags} --set global.log_level=DEBUG,onu=1,pon=1 "
+ timeout(time: 10, unit: 'MINUTES') {
+ stage('Deploy - '+ name + ' workflow') {
+ def extraHelmFlags = "${extraHelmFlags} --set global.log_level=DEBUG,onu=1,pon=1 "
- if (gerritProject != "") {
- extraHelmFlags = extraHelmFlags + customImageFlags("${gerritProject}")
- }
+ if (gerritProject != "") {
+ extraHelmFlags = extraHelmFlags + getVolthaImageFlags("${gerritProject}")
+ }
- volthaDeploy([workflow: name, extraHelmFlags: extraHelmFlags])
- // start logging
- sh """
- mkdir -p $WORKSPACE/${name}
- _TAG=kail-${name} kail -n infra -n voltha > $WORKSPACE/${name}/onos-voltha-combined.log &
- """
- // forward ONOS and VOLTHA ports
- sh """
- _TAG=onos-port-forward kubectl port-forward --address 0.0.0.0 -n infra svc/voltha-infra-onos-classic-hs 8101:8101&
- _TAG=onos-port-forward kubectl port-forward --address 0.0.0.0 -n infra svc/voltha-infra-onos-classic-hs 8181:8181&
- _TAG=voltha-port-forward kubectl port-forward --address 0.0.0.0 -n voltha svc/voltha-voltha-api 55555:55555&
- """
+ def localCharts = false
+ if (gerritProject == "voltha-helm-charts") {
+ localCharts = true
+ }
+
+ volthaDeploy([
+ workflow: name,
+ extraHelmFlags:extraHelmFlags,
+ localCharts: localCharts,
+ dockerRegistry: "mirror.registry.opennetworking.org"
+ ])
+ // start logging
+ sh """
+ mkdir -p $WORKSPACE/${name}
+ _TAG=kail-${name} kail -n infra -n voltha > $WORKSPACE/${name}/onos-voltha-combined.log &
+ """
+ // forward ONOS and VOLTHA ports
+ sh """
+ _TAG=onos-port-forward kubectl port-forward --address 0.0.0.0 -n infra svc/voltha-infra-onos-classic-hs 8101:8101&
+ _TAG=onos-port-forward kubectl port-forward --address 0.0.0.0 -n infra svc/voltha-infra-onos-classic-hs 8181:8181&
+ _TAG=voltha-port-forward kubectl port-forward --address 0.0.0.0 -n voltha svc/voltha-voltha-api 55555:55555&
+ """
+ }
}
stage('Test VOLTHA - '+ name + ' workflow') {
sh """
@@ -90,14 +98,10 @@
// remove port-forwarding
sh """
# remove orphaned port-forward from different namespaces
- ps aux | grep port-forw | grep -v grep | awk '{print \$2}' | xargs --no-run-if-empty kill -9
+ ps aux | grep port-forw | grep -v grep | awk '{print \$2}' | xargs --no-run-if-empty kill -9 || true
"""
// collect pod details
- sh """
- kubectl get pods --all-namespaces -o wide > \$WORKSPACE/${name}/pods.txt || true
- kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\n'}" | sort | uniq | tee \$WORKSPACE/att/pod-images.txt || true
- kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.imageID}{'\\n'}" | sort | uniq | tee \$WORKSPACE/att/pod-imagesId.txt || true
- """
+ getPodsInfo("$WORKSPACE/${name}")
helmTeardown(['infra', 'voltha'])
}
}
@@ -109,7 +113,7 @@
label "${params.buildNode}"
}
options {
- timeout(time: 30, unit: 'MINUTES')
+ timeout(time: 35, unit: 'MINUTES')
}
environment {
PATH="$PATH:$WORKSPACE/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin"
@@ -145,16 +149,46 @@
loadToKind()
}
}
+ stage('Replace voltctl') {
+ // if the project is voltctl override the downloaded one with the built one
+ when {
+ expression {
+ return gerritProject == "voltctl"
+ }
+ }
+ steps{
+ sh """
+ mv `ls $WORKSPACE/voltctl/release/voltctl-*-linux-amd*` $WORKSPACE/bin/voltctl
+ chmod +x $WORKSPACE/bin/voltctl
+ """
+ }
+ }
stage('Run Test') {
steps {
- test_workflow("att")
- test_workflow("dt")
- test_workflow("tt")
+ timeout(time: 30, unit: 'MINUTES') {
+ test_workflow("att")
+ test_workflow("dt")
+ test_workflow("tt")
+ }
}
}
}
post {
+ aborted {
+ getPodsInfo("$WORKSPACE/failed")
+ sh """
+ kubectl logs -n voltha -l app.kubernetes.io/part-of=voltha > $WORKSPACE/failed/voltha.log || true
+ """
+ archiveArtifacts artifacts: '**/*.log,**/*.txt'
+ }
+ failure {
+ getPodsInfo("$WORKSPACE/failed")
+ sh """
+ kubectl logs -n voltha -l app.kubernetes.io/part-of=voltha > $WORKSPACE/failed/voltha.logs || true
+ """
+ archiveArtifacts artifacts: '**/*.log,**/*.txt'
+ }
always {
sh '''
gzip $WORKSPACE/att/onos-voltha-combined.log || true
@@ -170,7 +204,7 @@
passThreshold: 100,
reportFileName: 'RobotLogs/*/report*.html',
unstableThreshold: 0]);
- archiveArtifacts artifacts: '*.log,**/*.log,**/*.gz,*.gz'
+ archiveArtifacts artifacts: '*.log,**/*.log,**/*.gz,*.gz,*.txt,**/*.txt'
}
}
}
diff --git a/jjb/pipeline/voltha/master/device-management-mock-tests.groovy b/jjb/pipeline/voltha/master/device-management-mock-tests.groovy
new file mode 100644
index 0000000..8a15ac6
--- /dev/null
+++ b/jjb/pipeline/voltha/master/device-management-mock-tests.groovy
@@ -0,0 +1,172 @@
+// Copyright 2017-present Open Networking Foundation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// NOTE we are importing the library even if it's global so that it's
+// easier to change the keywords during a replay
+library identifier: 'cord-jenkins-libraries@master',
+ retriever: modernSCM([
+ $class: 'GitSCMSource',
+ remote: 'https://gerrit.opencord.org/ci-management.git'
+])
+
+def localCharts = false
+
+pipeline {
+
+ /* no label, executor is determined by JJB */
+ agent {
+ label "${params.buildNode}"
+ }
+ options {
+ timeout(time: 90, unit: 'MINUTES')
+ }
+ environment {
+ KUBECONFIG="$HOME/.kube/kind-config-voltha-minimal"
+ PATH="$PATH:$WORKSPACE/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+ ROBOT_MISC_ARGS="-d $WORKSPACE/RobotLogs"
+ }
+
+ stages {
+
+ stage('Download Code') {
+ steps {
+ getVolthaCode([
+ branch: "${branch}",
+ gerritProject: "${gerritProject}",
+ gerritRefspec: "${gerritRefspec}",
+ volthaSystemTestsChange: "${volthaSystemTestsChange}",
+ volthaHelmChartsChange: "${volthaHelmChartsChange}",
+ ])
+ }
+ }
+ stage('Build Redfish Importer Image') {
+ steps {
+ sh """
+ make -C $WORKSPACE/device-management/\$1 DOCKER_REPOSITORY=opencord/ DOCKER_TAG=citest docker-build-importer
+ """
+ }
+ }
+ stage('Build demo_test Image') {
+ steps {
+ sh """
+ make -C $WORKSPACE/device-management/\$1/demo_test DOCKER_REPOSITORY=opencord/ DOCKER_TAG=citest docker-build
+ """
+ }
+ }
+ stage('Build mock-redfish-server Image') {
+ steps {
+ sh """
+ make -C $WORKSPACE/device-management/\$1/mock-redfish-server DOCKER_REPOSITORY=opencord/ DOCKER_TAG=citest docker-build
+ """
+ }
+ }
+ stage('Create K8s Cluster') {
+ steps {
+ createKubernetesCluster([nodes: 3])
+ }
+ }
+ stage('Load image in kind nodes') {
+ steps {
+ loadToKind()
+ }
+ }
+ stage('Deploy Voltha') {
+ steps {
+ script {
+ if (branch != "master" || volthaHelmChartsChange != "") {
+ // if we're using a release or testing changes in the charts, then use the local clone
+ localCharts = true
+ }
+ }
+ volthaDeploy([
+ workflow: "att",
+ extraHelmFlags: extraHelmFlags,
+ dockerRegistry: "mirror.registry.opennetworking.org",
+ localCharts: localCharts,
+ ])
+ // start logging
+ sh """
+ mkdir -p $WORKSPACE/att
+ _TAG=kail-att kail -n infra -n voltha -n default > $WORKSPACE/att/onos-voltha-combined.log &
+ """
+ // forward ONOS and VOLTHA ports
+ sh """
+ _TAG=onos-port-forward kubectl port-forward --address 0.0.0.0 -n infra svc/voltha-infra-onos-classic-hs 8101:8101&
+ _TAG=onos-port-forward kubectl port-forward --address 0.0.0.0 -n infra svc/voltha-infra-onos-classic-hs 8181:8181&
+ _TAG=voltha-port-forward kubectl port-forward --address 0.0.0.0 -n voltha svc/voltha-voltha-api 55555:55555&
+ """
+ }
+ }
+
+ stage('Run E2E Tests') {
+ steps {
+ sh '''
+ mkdir -p $WORKSPACE/RobotLogs
+
+ # tell the kubernetes script to use images tagged citest and pullPolicy:Never
+ sed -i 's/master/citest/g' $WORKSPACE/device-management/kubernetes/deploy*.yaml
+ sed -i 's/imagePullPolicy: Always/imagePullPolicy: Never/g' $WORKSPACE/device-management/kubernetes/deploy*.yaml
+ make -C $WORKSPACE/device-management functional-mock-test || true
+ '''
+ }
+ }
+ }
+
+ post {
+ always {
+ sh '''
+ set +e
+ kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\n'}" | sort | uniq
+ kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.imageID}{'\\n'}" | sort | uniq
+ kubectl get nodes -o wide
+ kubectl get pods -o wide --all-namespaces
+
+ sync
+ pkill kail || true
+
+ ## Pull out errors from log files
+ extract_errors_go() {
+ echo
+ echo "Error summary for $1:"
+ grep $1 $WORKSPACE/att/onos-voltha-combined.log | grep '"level":"error"' | cut -d ' ' -f 2- | jq -r '.msg'
+ echo
+ }
+
+ extract_errors_python() {
+ echo
+ echo "Error summary for $1:"
+ grep $1 $WORKSPACE/att/onos-voltha-combined.log | grep 'ERROR' | cut -d ' ' -f 2-
+ echo
+ }
+
+ extract_errors_go voltha-rw-core > $WORKSPACE/error-report.log
+ extract_errors_go adapter-open-olt >> $WORKSPACE/error-report.log
+ extract_errors_python adapter-open-onu >> $WORKSPACE/error-report.log
+ extract_errors_python voltha-ofagent >> $WORKSPACE/error-report.log
+
+ gzip $WORKSPACE/att/onos-voltha-combined.log
+ '''
+ step([$class: 'RobotPublisher',
+ disableArchiveOutput: false,
+ logFileName: 'RobotLogs/log*.html',
+ otherFiles: '',
+ outputFileName: 'RobotLogs/output*.xml',
+ outputPath: '.',
+ passThreshold: 80,
+ reportFileName: 'RobotLogs/report*.html',
+ unstableThreshold: 0]);
+ archiveArtifacts artifacts: '**/*.log,**/*.gz'
+ }
+ }
+}
diff --git a/jjb/pipeline/voltha/master/periodic-bbsim-tests.groovy b/jjb/pipeline/voltha/master/periodic-bbsim-tests.groovy
new file mode 100755
index 0000000..d05a406
--- /dev/null
+++ b/jjb/pipeline/voltha/master/periodic-bbsim-tests.groovy
@@ -0,0 +1,250 @@
+// Copyright 2021-present Open Networking Foundation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// voltha-2.x e2e tests for openonu-go
+// uses bbsim to simulate OLT/ONUs
+
+library identifier: 'cord-jenkins-libraries@master',
+ retriever: modernSCM([
+ $class: 'GitSCMSource',
+ remote: 'https://gerrit.opencord.org/ci-management.git'
+])
+
+def clusterName = "kind-ci"
+
+def execute_test(testTarget, workflow, teardown, testSpecificHelmFlags = "") {
+ def infraNamespace = "default"
+ def volthaNamespace = "voltha"
+ def robotLogsDir = "RobotLogs"
+ stage('Cleanup') {
+ if (teardown) {
+ timeout(15) {
+ script {
+ helmTeardown(["default", infraNamespace, volthaNamespace])
+ }
+ timeout(1) {
+ sh returnStdout: false, script: '''
+ # remove orphaned port-forward from different namespaces
+ ps aux | grep port-forw | grep -v grep | awk '{print $2}' | xargs --no-run-if-empty kill -9 || true
+ '''
+ }
+ }
+ }
+ }
+ stage('Deploy Voltha') {
+ if (teardown) {
+ timeout(20) {
+ script {
+
+ sh """
+ mkdir -p $WORKSPACE/${testTarget}-components
+ _TAG=kail-startup kail -n infra -n voltha > $WORKSPACE/${testTarget}-components/onos-voltha-startup-combined.log &
+ """
+
+ // if we're downloading a voltha-helm-charts patch, then install from a local copy of the charts
+ def localCharts = false
+ if (volthaHelmChartsChange != "") {
+ localCharts = true
+ }
+
+ // NOTE temporary workaround expose ONOS node ports
+ def localHelmFlags = extraHelmFlags + " --set global.log_level=${logLevel.toUpperCase()} " +
+ " --set onos-classic.onosSshPort=30115 " +
+ " --set onos-classic.onosApiPort=30120 " +
+ " --set onos-classic.onosOfPort=31653 " +
+ " --set onos-classic.individualOpenFlowNodePorts=true " + testSpecificHelmFlags
+
+ if (gerritProject != "") {
+ localHelmFlags = "${localHelmFlags} " + getVolthaImageFlags("${gerritProject}")
+ }
+
+ volthaDeploy([
+ infraNamespace: infraNamespace,
+ volthaNamespace: volthaNamespace,
+ workflow: workflow.toLowerCase(),
+ extraHelmFlags: localHelmFlags,
+ localCharts: localCharts,
+ bbsimReplica: olts.toInteger(),
+ dockerRegistry: registry,
+ ])
+ }
+
+ // stop logging
+ sh """
+ P_IDS="\$(ps e -ww -A | grep "_TAG=kail-startup" | grep -v grep | awk '{print \$1}')"
+ if [ -n "\$P_IDS" ]; then
+ echo \$P_IDS
+ for P_ID in \$P_IDS; do
+ kill -9 \$P_ID
+ done
+ fi
+ """
+ }
+ sh """
+ JENKINS_NODE_COOKIE="dontKillMe" _TAG="voltha-voltha-api" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${volthaNamespace} svc/voltha-voltha-api 55555:55555; done"&
+ JENKINS_NODE_COOKIE="dontKillMe" _TAG="voltha-infra-etcd" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${infraNamespace} svc/voltha-infra-etcd 2379:2379; done"&
+ JENKINS_NODE_COOKIE="dontKillMe" _TAG="voltha-infra-kafka" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${infraNamespace} svc/voltha-infra-kafka 9092:9092; done"&
+ bbsimDmiPortFwd=50075
+ for i in {0..${olts.toInteger() - 1}}; do
+ JENKINS_NODE_COOKIE="dontKillMe" _TAG="bbsim\${i}" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${volthaNamespace} svc/bbsim\${i} 50075:\${bbsimDmiPortFwd}; done"&
+ ((bbsimDmiPortFwd++))
+ done
+ ps aux | grep port-forward
+ """
+ }
+ }
+ stage('Run test ' + testTarget + ' on ' + workflow + ' workFlow') {
+ // start logging
+ sh """
+ mkdir -p $WORKSPACE/${testTarget}-components
+ _TAG=kail-${workflow} kail -n infra -n voltha > $WORKSPACE/${testTarget}-components/onos-voltha-combined.log &
+ """
+ sh """
+ mkdir -p $WORKSPACE/${robotLogsDir}/${testTarget}-robot
+ export ROBOT_MISC_ARGS="-d $WORKSPACE/${robotLogsDir}/${testTarget}-robot "
+ ROBOT_MISC_ARGS+="-v ONOS_SSH_PORT:30115 -v ONOS_REST_PORT:30120 -v INFRA_NAMESPACE:${infraNamespace}"
+ export KVSTOREPREFIX=voltha/voltha_voltha
+
+ make -C $WORKSPACE/voltha-system-tests ${testTarget} || true
+ """
+ // stop logging
+ sh """
+ P_IDS="\$(ps e -ww -A | grep "_TAG=kail-${workflow}" | grep -v grep | awk '{print \$1}')"
+ if [ -n "\$P_IDS" ]; then
+ echo \$P_IDS
+ for P_ID in \$P_IDS; do
+ kill -9 \$P_ID
+ done
+ fi
+ """
+ getPodsInfo("$WORKSPACE/${testTarget}-components")
+ }
+}
+
+def collectArtifacts(exitStatus) {
+ getPodsInfo("$WORKSPACE/${exitStatus}")
+ sh """
+ kubectl logs -n voltha -l app.kubernetes.io/part-of=voltha > $WORKSPACE/${exitStatus}/voltha.log || true
+ """
+ archiveArtifacts artifacts: '**/*.log,**/*.gz,**/*.txt,**/*.html'
+ sh '''
+ sync
+ pkill kail || true
+ which voltctl
+ md5sum $(which voltctl)
+ '''
+ step([$class: 'RobotPublisher',
+ disableArchiveOutput: false,
+ logFileName: "RobotLogs/*/log*.html",
+ otherFiles: '',
+ outputFileName: "RobotLogs/*/output*.xml",
+ outputPath: '.',
+ passThreshold: 100,
+ reportFileName: "RobotLogs/*/report*.html",
+ unstableThreshold: 0]);
+}
+
+pipeline {
+
+ /* no label, executor is determined by JJB */
+ agent {
+ label "${params.buildNode}"
+ }
+ options {
+ timeout(time: "${timeout}", unit: 'MINUTES')
+ }
+ environment {
+ KUBECONFIG="$HOME/.kube/kind-${clusterName}"
+ VOLTCONFIG="$HOME/.volt/config"
+ PATH="$PATH:$WORKSPACE/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+ ROBOT_MISC_ARGS="-e PowerSwitch ${params.extraRobotArgs}"
+ DIAGS_PROFILE="VOLTHA_PROFILE"
+ }
+ stages {
+ stage('Download Code') {
+ steps {
+ getVolthaCode([
+ branch: "${branch}",
+ gerritProject: "${gerritProject}",
+ gerritRefspec: "${gerritRefspec}",
+ volthaSystemTestsChange: "${volthaSystemTestsChange}",
+ volthaHelmChartsChange: "${volthaHelmChartsChange}",
+ ])
+ }
+ }
+ stage('Build patch') {
+ // build the patch only if gerritProject is specified
+ when {
+ expression {
+ return !gerritProject.isEmpty()
+ }
+ }
+ steps {
+ // NOTE that the correct patch has already been checked out
+ // during the getVolthaCode step
+ buildVolthaComponent("${gerritProject}")
+ }
+ }
+ stage('Create K8s Cluster') {
+ steps {
+ script {
+ def clusterExists = sh returnStdout: true, script: """
+ kind get clusters | grep ${clusterName} | wc -l
+ """
+ if (clusterExists.trim() == "0") {
+ createKubernetesCluster([nodes: 3, name: clusterName])
+ }
+ }
+ }
+ }
+ stage('Load image in kind nodes') {
+ when {
+ expression {
+ return !gerritProject.isEmpty()
+ }
+ }
+ steps {
+ loadToKind()
+ }
+ }
+ stage('Parse and execute tests') {
+ steps {
+ script {
+ def tests = readYaml text: testTargets
+
+ for(int i = 0;i<tests.size();i++) {
+ def test = tests[i]
+ def target = test["target"]
+ def workflow = test["workflow"]
+ def flags = test["flags"]
+ def teardown = test["teardown"].toBoolean()
+ println "Executing test ${target} on workflow ${workflow} with extra flags ${flags}"
+ execute_test(target, workflow, teardown, flags)
+ }
+ }
+ }
+ }
+ }
+ post {
+ aborted {
+ collectArtifacts("aborted")
+ }
+ failure {
+ collectArtifacts("failed")
+ }
+ always {
+ collectArtifacts("always")
+ }
+ }
+}
diff --git a/jjb/pipeline/voltha/master/physical-build.groovy b/jjb/pipeline/voltha/master/physical-build.groovy
new file mode 100644
index 0000000..05a059e
--- /dev/null
+++ b/jjb/pipeline/voltha/master/physical-build.groovy
@@ -0,0 +1,359 @@
+// Copyright 2017-present Open Networking Foundation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// used to deploy VOLTHA and configure ONOS physical PODs
+
+// NOTE we are importing the library even if it's global so that it's
+// easier to change the keywords during a replay
+library identifier: 'cord-jenkins-libraries@master',
+ retriever: modernSCM([
+ $class: 'GitSCMSource',
+ remote: 'https://gerrit.opencord.org/ci-management.git'
+])
+
+def infraNamespace = "infra"
+def volthaNamespace = "voltha"
+
+pipeline {
+
+ /* no label, executor is determined by JJB */
+ agent {
+ label "${params.buildNode}"
+ }
+ options {
+ timeout(time: 35, unit: 'MINUTES')
+ }
+ environment {
+ PATH="$PATH:$WORKSPACE/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin"
+ KUBECONFIG="$WORKSPACE/${configBaseDir}/${configKubernetesDir}/${configFileName}.conf"
+ }
+
+ stages{
+ stage('Download Code') {
+ steps {
+ getVolthaCode([
+ branch: "${branch}",
+ volthaHelmChartsChange: "${volthaHelmChartsChange}",
+ ])
+ }
+ }
+ stage ("Parse deployment configuration file") {
+ steps {
+ sh returnStdout: true, script: "rm -rf ${configBaseDir}"
+ sh returnStdout: true, script: "git clone -b master ${cordRepoUrl}/${configBaseDir}"
+ script {
+ if ( params.workFlow == "DT" ) {
+ deployment_config = readYaml file: "${configBaseDir}/${configDeploymentDir}/${configFileName}-DT.yaml"
+ }
+ else if ( params.workFlow == "TT" )
+ {
+ deployment_config = readYaml file: "${configBaseDir}/${configDeploymentDir}/${configFileName}-TT.yaml"
+ }
+ else
+ {
+ deployment_config = readYaml file: "${configBaseDir}/${configDeploymentDir}/${configFileName}.yaml"
+ }
+ }
+ }
+ }
+ stage('Clean up') {
+ steps {
+ timeout(15) {
+ script {
+ helmTeardown(["default", infraNamespace, volthaNamespace])
+ }
+ timeout(1) {
+ sh returnStdout: false, script: '''
+ # remove orphaned port-forward from different namespaces
+ ps aux | grep port-forw | grep -v grep | awk '{print $2}' | xargs --no-run-if-empty kill -9 || true
+ '''
+ }
+ }
+ }
+ }
+ stage('Install Voltha') {
+ steps {
+ timeout(20) {
+ script {
+ // if we're downloading a voltha-helm-charts patch, then install from a local copy of the charts
+ def localCharts = false
+ if (volthaHelmChartsChange != "") {
+ localCharts = true
+ }
+
+ // should the config file be suffixed with the workflow? see "deployment_config"
+ def extraHelmFlags = "-f $WORKSPACE/${configBaseDir}/${configKubernetesDir}/voltha/${configFileName}.yml --set global.log_level=${logLevel} "
+
+ if (workFlow.toLowerCase() == "dt") {
+ extraHelmFlags += " --set radius.enabled=false "
+ }
+ if (workFlow.toLowerCase() == "tt") {
+ extraHelmFlags += " --set radius.enabled=false --set global.incremental_evto_update=true "
+ }
+
+ // NOTE temporary workaround expose ONOS node ports (pod-config needs to be updated to contain these values)
+ extraHelmFlags = extraHelmFlags + " --set onos-classic.onosSshPort=30115 " +
+ "--set onos-classic.onosApiPort=30120 " +
+ "--set onos-classic.onosOfPort=31653 " +
+ "--set onos-classic.individualOpenFlowNodePorts=true "
+
+ volthaDeploy([
+ workflow: workFlow.toLowerCase(),
+ extraHelmFlags: extraHelmFlags,
+ localCharts: localCharts,
+ kubeconfig: "$WORKSPACE/${configBaseDir}/${configKubernetesDir}/${configFileName}.conf",
+ onosReplica: params.NumOfOnos,
+ atomixReplica: params.NumOfAtomix,
+ // NOTE does this needs to be configured?
+ kafkaReplica: 3,
+ etcdReplica: 3,
+ bbsimReplica: 0,
+ ])
+ }
+ sh """
+ JENKINS_NODE_COOKIE="dontKillMe" _TAG="voltha-api" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${volthaNamespace} svc/voltha-voltha-api 55555:55555; done"&
+ JENKINS_NODE_COOKIE="dontKillMe" _TAG="etcd" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${infraNamespace} svc/voltha-infra-etcd ${params.VolthaEtcdPort}:2379; done"&
+ JENKINS_NODE_COOKIE="dontKillMe" _TAG="kafka" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${infraNamespace} svc/voltha-infra-kafka 9092:9092; done"&
+ ps aux | grep port-forward
+ """
+ getPodsInfo("$WORKSPACE")
+ }
+ }
+ }
+ stage('Push Tech-Profile') {
+ steps {
+ script {
+ if ( params.configurePod && params.profile != "Default" ) {
+ for(int i=0; i < deployment_config.olts.size(); i++) {
+ def tech_prof_directory = "XGS-PON"
+ // If no debian package is specified we default to GPON for the ADTRAN OLT.
+ if (!deployment_config.olts[i].containsKey("oltDebVersion") || deployment_config.olts[i].oltDebVersion.contains("asgvolt64")){
+ tech_prof_directory = "GPON"
+ }
+ timeout(1) {
+ sh returnStatus: true, script: """
+ export KUBECONFIG=$WORKSPACE/${configBaseDir}/${configKubernetesDir}/${configFileName}.conf
+ etcd_container=\$(kubectl get pods -n ${infraNamespace} | grep etcd | awk 'NR==1{print \$1}')
+ if [[ "${workFlow}" == "TT" ]]; then
+ kubectl cp -n ${infraNamespace} $WORKSPACE/voltha-system-tests/tests/data/TechProfile-TT-HSIA.json \$etcd_container:/tmp/hsia.json
+ kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'cat /tmp/hsia.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/${tech_prof_directory}/64'
+ kubectl cp -n ${infraNamespace} $WORKSPACE/voltha-system-tests/tests/data/TechProfile-TT-VoIP.json \$etcd_container:/tmp/voip.json
+ kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'cat /tmp/voip.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/${tech_prof_directory}/65'
+ kubectl cp -n ${infraNamespace} $WORKSPACE/voltha-system-tests/tests/data/TechProfile-TT-MCAST.json \$etcd_container:/tmp/mcast.json
+ kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'cat /tmp/mcast.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/${tech_prof_directory}/66'
+ else
+ kubectl cp -n ${infraNamespace} $WORKSPACE/voltha-system-tests/tests/data/TechProfile-${profile}.json \$etcd_container:/tmp/flexpod.json
+ kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'cat /tmp/flexpod.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/${tech_prof_directory}/64'
+ fi
+ """
+ }
+ timeout(1) {
+ sh returnStatus: true, script: """
+ export KUBECONFIG=$WORKSPACE/${configBaseDir}/${configKubernetesDir}/${configFileName}.conf
+ etcd_container=\$(kubectl get pods -n ${infraNamespace} | grep etcd | awk 'NR==1{print \$1}')
+ kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'ETCDCTL_API=3 etcdctl get --prefix service/voltha/technology_profiles/${tech_prof_directory}/64'
+ """
+ }
+ }
+ }
+ }
+ }
+ }
+ stage('Push MIB templates') {
+ steps {
+ sh """
+ export KUBECONFIG=$WORKSPACE/${configBaseDir}/${configKubernetesDir}/${configFileName}.conf
+ etcd_container=\$(kubectl get pods -n ${infraNamespace} | grep etcd | awk 'NR==1{print \$1}')
+ kubectl cp -n ${infraNamespace} $WORKSPACE/voltha-system-tests/tests/data/MIB_Alpha.json \$etcd_container:/tmp/MIB_Alpha.json
+ kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'cat /tmp/MIB_Alpha.json | ETCDCTL_API=3 etcdctl put service/voltha/omci_mibs/go_templates/BRCM/BVM4K00BRA0915-0083/5023_020O02414'
+ kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'cat /tmp/MIB_Alpha.json | ETCDCTL_API=3 etcdctl put service/voltha/omci_mibs/templates/BRCM/BVM4K00BRA0915-0083/5023_020O02414'
+ kubectl cp -n ${infraNamespace} $WORKSPACE/voltha-system-tests/tests/data/MIB_Scom.json \$etcd_container:/tmp/MIB_Scom.json
+ kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'cat /tmp/MIB_Scom.json | ETCDCTL_API=3 etcdctl put service/voltha/omci_mibs/go_templates/SCOM/Glasfaser-Modem/090140.1.0.304'
+ kubectl exec -n ${infraNamespace} -it \$etcd_container -- /bin/sh -c 'cat /tmp/MIB_Scom.json | ETCDCTL_API=3 etcdctl put service/voltha/omci_mibs/templates/SCOM/Glasfaser-Modem/090140.1.0.304'
+ """
+ }
+ }
+ stage('Push Sadis-config') {
+ steps {
+ timeout(1) {
+ sh returnStatus: true, script: """
+ if [[ "${workFlow}" == "DT" ]]; then
+ curl -sSL --user karaf:karaf -X POST -H Content-Type:application/json http://${deployment_config.nodes[0].ip}:30120/onos/v1/network/configuration --data @$WORKSPACE/voltha-system-tests/tests/data/${configFileName}-sadis-DT.json
+ elif [[ "${workFlow}" == "TT" ]]; then
+ curl -sSL --user karaf:karaf -X POST -H Content-Type:application/json http://${deployment_config.nodes[0].ip}:30120/onos/v1/network/configuration --data @$WORKSPACE/voltha-system-tests/tests/data/${configFileName}-sadis-TT.json
+ else
+ # this is the ATT case, rename the file in *-sadis-ATT.json so that we can avoid special cases and just load the file
+ curl -sSL --user karaf:karaf -X POST -H Content-Type:application/json http://${deployment_config.nodes[0].ip}:30120/onos/v1/network/configuration --data @$WORKSPACE/voltha-system-tests/tests/data/${configFileName}-sadis.json
+ fi
+ """
+ }
+ }
+ }
+ stage('Switch Configurations in ONOS') {
+ steps {
+ script {
+ if ( deployment_config.fabric_switches.size() > 0 ) {
+ timeout(1) {
+ def netcfg = "$WORKSPACE/${configBaseDir}/${configToscaDir}/voltha/${configFileName}-onos-netcfg-switch.json"
+ if (params.inBandManagement){
+ netcfg = "$WORKSPACE/${configBaseDir}/${configToscaDir}/voltha/${configFileName}-onos-netcfg-switch-inband.json"
+ }
+ sh """
+ curl -sSL --user karaf:karaf -X POST -H Content-Type:application/json http://${deployment_config.nodes[0].ip}:30120/onos/v1/network/configuration --data @${netcfg}
+ curl -sSL --user karaf:karaf -X POST http://${deployment_config.nodes[0].ip}:30120/onos/v1/applications/org.onosproject.segmentrouting/active
+ """
+ }
+ timeout(1) {
+ waitUntil {
+ sr_active_out = sh returnStatus: true, script: """
+ sshpass -p karaf ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@${deployment_config.nodes[0].ip} "log:set TRACE org.opencord.dhcpl2relay"
+ sshpass -p karaf ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@${deployment_config.nodes[0].ip} "log:set TRACE org.opencord.aaa"
+ sshpass -p karaf ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@${deployment_config.nodes[0].ip} "log:set TRACE org.opencord.olt"
+ sshpass -p karaf ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@${deployment_config.nodes[0].ip} "log:set DEBUG org.onosproject.net.flowobjective.impl.FlowObjectiveManager"
+ sshpass -p karaf ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@${deployment_config.nodes[0].ip} "log:set DEBUG org.onosproject.net.flowobjective.impl.InOrderFlowObjectiveManager"
+ curl -sSL --user karaf:karaf -X GET http://${deployment_config.nodes[0].ip}:30120/onos/v1/applications/org.onosproject.segmentrouting | jq '.state' | grep ACTIVE
+ sshpass -p karaf ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@${deployment_config.nodes[0].ip} "cfg set org.onosproject.provider.lldp.impl.LldpLinkProvider enabled false"
+ sshpass -p karaf ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@${deployment_config.nodes[0].ip} "cfg set org.onosproject.net.flow.impl.FlowRuleManager purgeOnDisconnection false"
+ sshpass -p karaf ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@${deployment_config.nodes[0].ip} "cfg set org.onosproject.net.meter.impl.MeterManager purgeOnDisconnection false"
+ """
+ return sr_active_out == 0
+ }
+ }
+ timeout(5) {
+ for(int i=0; i < deployment_config.hosts.src.size(); i++) {
+ for(int j=0; j < deployment_config.olts.size(); j++) {
+ def aggPort = -1
+ if(deployment_config.olts[j].serial == deployment_config.hosts.src[i].olt){
+ aggPort = deployment_config.olts[j].aggPort
+ if(aggPort == -1){
+ throw new Exception("Upstream port for the olt is not configured, field aggPort is empty")
+ }
+ sh """
+ sleep 30 # NOTE why are we sleeping?
+ curl -X POST --user karaf:karaf --header 'Content-Type: application/json' --header 'Accept: application/json' -d '{"deviceId": "${deployment_config.fabric_switches[0].device_id}", "vlanId": "${deployment_config.hosts.src[i].s_tag}", "endpoints": [${deployment_config.fabric_switches[0].bngPort},${aggPort}]}' 'http://${deployment_config.nodes[0].ip}:30120/onos/segmentrouting/xconnect'
+ """
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ stage('Reinstall OLT software') {
+ steps {
+ script {
+ if ( params.reinstallOlt ) {
+ for(int i=0; i < deployment_config.olts.size(); i++) {
+ // NOTE what is oltDebVersion23? is that for VOLTHA-2.3? do we still need this differentiation?
+ sh returnStdout: true, script: """
+ if [[ "${branch}" != "master" ]] && [[ "${params.inBandManagement}" == "true" ]]; then
+ ssh-keyscan -H ${deployment_config.olts[i].sship} >> ~/.ssh/known_hosts
+ sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'kill -9 `pgrep -f "[b]ash /opt/openolt/openolt_dev_mgmt_daemon_process_watchdog"` || true'
+ sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} "dpkg --install ${deployment_config.olts[i].oltDebVersion23}"
+ fi
+ if [[ "${branch}" != "master" ]] && [[ "${params.inBandManagement}" == "false" ]]; then
+ ssh-keyscan -H ${deployment_config.olts[i].sship} >> ~/.ssh/known_hosts
+ sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} "dpkg --install ${deployment_config.olts[i].oltDebVersion23}"
+ fi
+ if [[ "${branch}" == "master" ]] && [[ "${params.inBandManagement}" == "true" ]]; then
+ ssh-keyscan -H ${deployment_config.olts[i].sship} >> ~/.ssh/known_hosts
+ sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'kill -9 `pgrep -f "[b]ash /opt/openolt/openolt_dev_mgmt_daemon_process_watchdog"` || true'
+ sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} "dpkg --install ${deployment_config.olts[i].oltDebVersion}"
+ fi
+ if [[ "${branch}" == "master" ]] && [[ "${params.inBandManagement}" == "false" ]]; then
+ ssh-keyscan -H ${deployment_config.olts[i].sship} >> ~/.ssh/known_hosts
+ sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} "dpkg --install ${deployment_config.olts[i].oltDebVersion}"
+ fi
+ sleep 10
+ """
+ timeout(5) {
+ waitUntil {
+ olt_sw_present = sh returnStdout: true, script: """
+ if [ "${deployment_config.olts[i].oltDebVersion}" == *"asfvolt16"* ]; then
+ sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'dpkg --list | grep asfvolt16 | wc -l'
+ else
+ sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'dpkg --list | grep asgvolt64 | wc -l'
+ fi
+ if (${deployment_config.olts[i].fortygig}); then
+ if [[ "${params.inBandManagement}" == "true" ]]; then
+ ssh-keyscan -H ${deployment_config.olts[i].sship} >> ~/.ssh/known_hosts
+ sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'mkdir -p /opt/openolt/'
+ sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'cp /root/watchdog-script/* /opt/openolt/'
+ sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'cp /root/bal_cli_appl/example_user_appl /broadcom'
+ sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'cp in-band-startup-script/* /etc/init.d/'
+ fi
+ fi
+ """
+ return olt_sw_present.toInteger() > 0
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ stage('Restart OLT processes') {
+ steps {
+ script {
+ for(int i=0; i < deployment_config.olts.size(); i++) {
+ int waitTimerForOltUp = 360
+ if ( params.inBandManagement ) {
+ waitTimerForOltUp = 540
+ }
+ timeout(15) {
+ sh returnStdout: true, script: """
+ ssh-keyscan -H ${deployment_config.olts[i].sship} >> ~/.ssh/known_hosts
+ sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'rm -f /var/log/openolt.log; rm -f /var/log/dev_mgmt_daemon.log; rm -f /var/log/openolt_process_watchdog.log; reboot -f > /dev/null &' || true
+ sleep ${waitTimerForOltUp}
+ """
+ }
+ timeout(15) {
+ waitUntil {
+ devprocess = sh returnStdout: true, script: "sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'ps -ef | grep dev_mgmt_daemon | wc -l'"
+ return devprocess.toInteger() > 0
+ }
+ }
+ timeout(15) {
+ waitUntil {
+ openoltprocess = sh returnStdout: true, script: "sshpass -p ${deployment_config.olts[i].pass} ssh -l ${deployment_config.olts[i].user} ${deployment_config.olts[i].sship} 'ps -ef | grep openolt | wc -l'"
+ return openoltprocess.toInteger() > 0
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ post {
+ aborted {
+ getPodsInfo("$WORKSPACE/failed")
+ sh """
+ kubectl logs -n voltha -l app.kubernetes.io/part-of=voltha > $WORKSPACE/failed/voltha.log || true
+ """
+ archiveArtifacts artifacts: '**/*.log,**/*.txt'
+ }
+ failure {
+ getPodsInfo("$WORKSPACE/failed")
+ sh """
+ kubectl logs -n voltha -l app.kubernetes.io/part-of=voltha > $WORKSPACE/failed/voltha.logs || true
+ """
+ archiveArtifacts artifacts: '**/*.log,**/*.txt'
+ }
+ always {
+ archiveArtifacts artifacts: '*.txt'
+ }
+ }
+}
diff --git a/jjb/pipeline/voltha/master/software-upgrades.groovy b/jjb/pipeline/voltha/master/software-upgrades.groovy
new file mode 100644
index 0000000..aca2d7f
--- /dev/null
+++ b/jjb/pipeline/voltha/master/software-upgrades.groovy
@@ -0,0 +1,247 @@
+// Copyright 2021-present Open Networking Foundation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+// voltha-2.x e2e tests
+// uses bbsim to simulate OLT/ONUs
+// NOTE we are importing the library even if it's global so that it's
+// easier to change the keywords during a replay
+library identifier: 'cord-jenkins-libraries@master',
+ retriever: modernSCM([
+ $class: 'GitSCMSource',
+ remote: 'https://gerrit.opencord.org/ci-management.git'
+])
+def test_software_upgrade(name) {
+ stage('Deploy Voltha - '+ name) {
+ def extraHelmFlags = "${extraHelmFlags} --set global.log_level=DEBUG,onu=1,pon=1 --set onos-classic.replicas=3,onos-classic.atomix.replicas=3 "
+ if ("${name}" == "onos-app-upgrade" || "${name}" == "onu-software-upgrade") {
+ extraHelmFlags = extraHelmFlags + "--set global.image_tag=master --set onos-classic.image.tag=master "
+ }
+ if ("${name}" == "voltha-component-upgrade") {
+ extraHelmFlags = extraHelmFlags + "--set images.onos_config_loader.tag=master-onos-config-loader --set onos-classic.image.tag=master "
+ }
+ extraHelmFlags = extraHelmFlags + """ --set voltha.services.controller[0].service=voltha-infra-onos-classic-0.voltha-infra-onos-classic-hs.infra.svc \
+ --set voltha.services.controller[0].port=6653 \
+ --set voltha.services.controller[0].address=voltha-infra-onos-classic-0.voltha-infra-onos-classic-hs.infra.svc:6653 \
+ --set voltha.services.controller[1].service=voltha-infra-onos-classic-1.voltha-infra-onos-classic-hs.infra.svc \
+ --set voltha.services.controller[1].port=6653 \
+ --set voltha.services.controller[1].address=voltha-infra-onos-classic-1.voltha-infra-onos-classic-hs.infra.svc:6653 \
+ --set voltha.services.controller[2].service=voltha-infra-onos-classic-2.voltha-infra-onos-classic-hs.infra.svc \
+ --set voltha.services.controller[2].port=6653 \
+ --set voltha.services.controller[2].address=voltha-infra-onos-classic-2.voltha-infra-onos-classic-hs.infra.svc:6653 """
+ //ONOS custom image handling
+ if ( onosImg.trim() != '' ) {
+ String[] split;
+ onosImg = onosImg.trim()
+ split = onosImg.split(':')
+ extraHelmFlags = extraHelmFlags + "--set onos-classic.image.repository=" + split[0] +",onos-classic.image.tag=" + split[1] + " "
+ }
+ def localCharts = false
+ // Currently only testing with ATT workflow
+ // TODO: Support for other workflows
+ volthaDeploy([workflow: "att", extraHelmFlags: extraHelmFlags, localCharts: localCharts])
+ // start logging
+ sh """
+ rm -rf $WORKSPACE/${name} || true
+ mkdir -p $WORKSPACE/${name}
+ _TAG=kail-${name} kail -n infra -n voltha > $WORKSPACE/${name}/onos-voltha-combined.log &
+ """
+ // forward ONOS and VOLTHA ports
+ sh """
+ _TAG=onos-port-forward bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n infra svc/voltha-infra-onos-classic-hs 8101:8101; done &"
+ _TAG=onos-port-forward bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n infra svc/voltha-infra-onos-classic-hs 8181:8181; done &"
+ _TAG=port-forward-voltha-api bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n voltha svc/voltha-voltha-api 55555:55555; done &"
+ """
+ sh """
+ sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 log:set DEBUG org.opencord
+ """
+ }
+ stage('Test - '+ name) {
+ sh """
+ ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/${name}"
+ mkdir -p \$ROBOT_LOGS_DIR
+ if [[ ${name} == 'onos-app-upgrade' ]]; then
+ export ONOS_APPS_UNDER_TEST+=''
+ if [ ${aaaVer.trim()} != '' ] && [ ${aaaOarUrl.trim()} != '' ]; then
+ ONOS_APPS_UNDER_TEST+="org.opencord.aaa,${aaaVer.trim()},${aaaOarUrl.trim()}*"
+ fi
+ if [ ${oltVer.trim()} != '' ] && [ ${oltOarUrl.trim()} != '' ]; then
+ ONOS_APPS_UNDER_TEST+="org.opencord.olt,${oltVer.trim()},${oltOarUrl.trim()}*"
+ fi
+ if [ ${dhcpl2relayVer.trim()} != '' ] && [ ${dhcpl2relayOarUrl.trim()} != '' ]; then
+ ONOS_APPS_UNDER_TEST+="org.opencord.dhcpl2relay,${dhcpl2relayVer.trim()},${dhcpl2relayOarUrl.trim()}*"
+ fi
+ if [ ${igmpproxyVer.trim()} != '' ] && [ ${igmpproxyOarUrl.trim()} != '' ]; then
+ ONOS_APPS_UNDER_TEST+="org.opencord.igmpproxy,${igmpproxyVer.trim()},${igmpproxyOarUrl.trim()}*"
+ fi
+ if [ ${sadisVer.trim()} != '' ] && [ ${sadisOarUrl.trim()} != '' ]; then
+ ONOS_APPS_UNDER_TEST+="org.opencord.sadis,${sadisVer.trim()},${sadisOarUrl.trim()}*"
+ fi
+ if [ ${mcastVer.trim()} != '' ] && [ ${mcastOarUrl.trim()} != '' ]; then
+ ONOS_APPS_UNDER_TEST+="org.opencord.mcast,${mcastVer.trim()},${mcastOarUrl.trim()}*"
+ fi
+ if [ ${kafkaVer.trim()} != '' ] && [ ${kafkaOarUrl.trim()} != '' ]; then
+ ONOS_APPS_UNDER_TEST+="org.opencord.kafka,${kafkaVer.trim()},${kafkaOarUrl.trim()}*"
+ fi
+ export ROBOT_MISC_ARGS="-d \$ROBOT_LOGS_DIR -v onos_apps_under_test:\$ONOS_APPS_UNDER_TEST -e PowerSwitch"
+ export TARGET=onos-app-upgrade-test
+ fi
+ if [[ ${name} == 'voltha-component-upgrade' ]]; then
+ export VOLTHA_COMPS_UNDER_TEST+=''
+ if [ ${adapterOpenOltImage.trim()} != '' ]; then
+ VOLTHA_COMPS_UNDER_TEST+="adapter-open-olt,adapter-open-olt,${adapterOpenOltImage.trim()}*"
+ fi
+ if [ ${adapterOpenOnuImage.trim()} != '' ]; then
+ VOLTHA_COMPS_UNDER_TEST+="adapter-open-onu,adapter-open-onu,${adapterOpenOnuImage.trim()}*"
+ fi
+ if [ ${rwCoreImage.trim()} != '' ]; then
+ VOLTHA_COMPS_UNDER_TEST+="rw-core,voltha,${rwCoreImage.trim()}*"
+ fi
+ if [ ${ofAgentImage.trim()} != '' ]; then
+ VOLTHA_COMPS_UNDER_TEST+="ofagent,ofagent,${ofAgentImage.trim()}*"
+ fi
+ export ROBOT_MISC_ARGS="-d \$ROBOT_LOGS_DIR -v voltha_comps_under_test:\$VOLTHA_COMPS_UNDER_TEST -e PowerSwitch"
+ export TARGET=voltha-comp-upgrade-test
+ fi
+ if [[ ${name} == 'onu-software-upgrade' ]]; then
+ export ROBOT_MISC_ARGS="-d \$ROBOT_LOGS_DIR -v onu_image_name:${onuImageName.trim()} -v onu_image_url:${onuImageUrl.trim()} -v onu_image_version:${onuImageVersion.trim()} -v onu_image_crc:${onuImageCrc.trim()} -v onu_image_local_dir:${onuImageLocalDir.trim()} -e PowerSwitch"
+ export TARGET=onu-upgrade-test
+ fi
+ export VOLTCONFIG=$HOME/.volt/config-minimal
+ export KUBECONFIG=$HOME/.kube/kind-config-voltha-minimal
+ # Run the specified tests
+ make -C $WORKSPACE/voltha-system-tests \$TARGET || true
+ """
+ // stop logging
+ sh """
+ P_IDS="\$(ps e -ww -A | grep "_TAG=kail-${name}" | grep -v grep | awk '{print \$1}')"
+ if [ -n "\$P_IDS" ]; then
+ echo \$P_IDS
+ for P_ID in \$P_IDS; do
+ kill -9 \$P_ID
+ done
+ fi
+ """
+ // remove port-forwarding
+ sh """
+ # remove orphaned port-forward from different namespaces
+ ps aux | grep port-forw | grep -v grep | awk '{print \$2}' | xargs --no-run-if-empty kill -9 || true
+ """
+ // collect pod details
+ get_pods_info("$WORKSPACE/${name}")
+ helmTeardown(['infra', 'voltha'])
+ }
+}
+def get_pods_info(dest) {
+ // collect pod details, this is here in case of failure
+ sh """
+ mkdir -p ${dest} || true
+ kubectl get pods --all-namespaces -o wide > ${dest}/pods.txt || true
+ kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\n'}" | sort | uniq | tee ${dest}/pod-images.txt || true
+ kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.imageID}{'\\n'}" | sort | uniq | tee ${dest}/pod-imagesId.txt || true
+ kubectl describe pods --all-namespaces -l app.kubernetes.io/part-of=voltha > ${dest}/voltha-pods-describe.txt
+ kubectl describe pods -n infra -l app=onos-classic > ${dest}/onos-pods-describe.txt
+ helm ls --all-namespaces > ${dest}/helm-charts.txt
+ """
+ sh '''
+ # copy the ONOS logs directly from the container to avoid the color codes
+ printf '%s\\n' $(kubectl get pods -n infra -l app=onos-classic -o=jsonpath="{.items[*]['metadata.name']}") | xargs --no-run-if-empty -I# bash -c 'kubectl -n infra cp #:apache-karaf-4.2.9/data/log/karaf.log ''' + dest + '''/#.log' || true
+ '''
+}
+pipeline {
+ /* no label, executor is determined by JJB */
+ agent {
+ label "${params.buildNode}"
+ }
+ options {
+ timeout(time: 40, unit: 'MINUTES')
+ }
+ environment {
+ PATH="$PATH:$WORKSPACE/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin"
+ KUBECONFIG="$HOME/.kube/kind-config-voltha-minimal"
+ SSHPASS="karaf"
+ }
+ stages{
+ stage('Download Code') {
+ steps {
+ getVolthaCode([
+ branch: "${branch}",
+ volthaSystemTestsChange: "${volthaSystemTestsChange}",
+ volthaHelmChartsChange: "${volthaHelmChartsChange}",
+ ])
+ }
+ }
+ stage('Cleanup') {
+ steps {
+ // remove port-forwarding
+ sh """
+ # remove orphaned port-forward from different namespaces
+ ps aux | grep port-forw | grep -v grep | awk '{print \$2}' | xargs --no-run-if-empty kill -9 || true
+ """
+ helmTeardown(['infra', 'voltha'])
+ }
+ }
+ stage('Install latest voltctl') {
+ steps {
+ sh """
+ mkdir -p $WORKSPACE/bin || true
+ # install voltctl
+ HOSTOS="\$(uname -s | tr "[:upper:]" "[:lower:"])"
+ HOSTARCH="\$(uname -m | tr "[:upper:]" "[:lower:"])"
+ if [ "\$HOSTARCH" == "x86_64" ]; then
+ HOSTARCH="amd64"
+ fi
+ VC_VERSION="\$(curl --fail -sSL https://api.github.com/repos/opencord/voltctl/releases/latest | jq -r .tag_name | sed -e 's/^v//g')"
+ curl -Lo $WORKSPACE/bin/voltctl https://github.com/opencord/voltctl/releases/download/v\$VC_VERSION/voltctl-\$VC_VERSION-\$HOSTOS-\$HOSTARCH
+ chmod +x $WORKSPACE/bin/voltctl
+ """
+ }
+ }
+ stage('Create K8s Cluster') {
+ steps {
+ createKubernetesCluster([nodes: 3])
+ }
+ }
+ stage('Run Test') {
+ steps {
+ test_software_upgrade("onos-app-upgrade")
+ test_software_upgrade("voltha-component-upgrade")
+ test_software_upgrade("onu-software-upgrade")
+ }
+ }
+ }
+ post {
+ aborted {
+ get_pods_info("$WORKSPACE/failed")
+ }
+ failure {
+ get_pods_info("$WORKSPACE/failed")
+ }
+ always {
+ sh '''
+ gzip $WORKSPACE/onos-app-upgrade/onos-voltha-combined.log || true
+ gzip $WORKSPACE/voltha-component-upgrade/onos-voltha-combined.log || true
+ gzip $WORKSPACE/onu-software-upgrade/onos-voltha-combined.log || true
+ '''
+ step([$class: 'RobotPublisher',
+ disableArchiveOutput: false,
+ logFileName: 'RobotLogs/*/log*.html',
+ otherFiles: '',
+ outputFileName: 'RobotLogs/*/output*.xml',
+ outputPath: '.',
+ passThreshold: 100,
+ reportFileName: 'RobotLogs/*/report*.html',
+ unstableThreshold: 0]);
+ archiveArtifacts artifacts: '*.log,**/*.log,**/*.gz,*.gz,*.txt,**/*.txt'
+ }
+ }
+}
diff --git a/jjb/pipeline/voltha/master/tucson-build-and-test.groovy b/jjb/pipeline/voltha/master/tucson-build-and-test.groovy
new file mode 100644
index 0000000..e6e1bbd
--- /dev/null
+++ b/jjb/pipeline/voltha/master/tucson-build-and-test.groovy
@@ -0,0 +1,312 @@
+
+// Copyright 2017-present Open Networking Foundation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+// used to deploy VOLTHA and configure ONOS physical PODs
+// NOTE we are importing the library even if it's global so that it's
+// easier to change the keywords during a replay
+library identifier: 'cord-jenkins-libraries@master',
+ retriever: modernSCM([
+ $class: 'GitSCMSource',
+ remote: 'https://gerrit.opencord.org/ci-management.git'
+])
+def infraNamespace = "infra"
+def volthaNamespace = "voltha"
+def clusterName = "kind-ci"
+pipeline {
+ /* no label, executor is determined by JJB */
+ agent {
+ label "${params.buildNode}"
+ }
+ options {
+ timeout(time: 120, unit: 'MINUTES')
+ }
+ environment {
+ PATH="$PATH:$WORKSPACE/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin"
+ KUBECONFIG="$HOME/.kube/kind-${clusterName}"
+ VOLTCONFIG="$HOME/.volt/config"
+ }
+ stages{
+ stage('Download Code') {
+ steps {
+ getVolthaCode([
+ branch: "${branch}",
+ gerritProject: "${gerritProject}",
+ gerritRefspec: "${gerritRefspec}",
+ volthaSystemTestsChange: "${volthaSystemTestsChange}",
+ volthaHelmChartsChange: "${volthaHelmChartsChange}",
+ ])
+ }
+ }
+ stage ("Parse deployment configuration file") {
+ steps {
+ sh returnStdout: true, script: "rm -rf ${configBaseDir}"
+ sh returnStdout: true, script: "git clone -b master ${cordRepoUrl}/${configBaseDir}"
+ script {
+ if ( params.workflow.toUpperCase() == "DT" ) {
+ deployment_config = readYaml file: "${configBaseDir}/${configDeploymentDir}/${configFileName}-DT.yaml"
+ }
+ else if ( params.workflow.toUpperCase() == "TT" ) {
+ deployment_config = readYaml file: "${configBaseDir}/${configDeploymentDir}/${configFileName}-TT.yaml"
+ }
+ else {
+ deployment_config = readYaml file: "${configBaseDir}/${configDeploymentDir}/${configFileName}.yaml"
+ }
+ }
+ }
+ }
+ stage('Clean up') {
+ steps {
+ timeout(15) {
+ script {
+ helmTeardown(["default", infraNamespace, volthaNamespace])
+ }
+ timeout(1) {
+ sh returnStdout: false, script: '''
+ # remove orphaned port-forward from different namespaces
+ ps aux | grep port-forw | grep -v grep | awk '{print $2}' | xargs --no-run-if-empty kill -9 || true
+ '''
+ }
+ }
+ }
+ }
+ stage('Build patch') {
+ steps {
+ // NOTE that the correct patch has already been checked out
+ // during the getVolthaCode step
+ buildVolthaComponent("${gerritProject}")
+ }
+ }
+ stage('Create K8s Cluster') {
+ steps {
+ script {
+ def clusterExists = sh returnStdout: true, script: """
+ kind get clusters | grep ${clusterName} | wc -l
+ """
+ if (clusterExists.trim() == "0") {
+ createKubernetesCluster([nodes: 3, name: clusterName])
+ }
+ }
+ }
+ }
+ stage('Load image in kind nodes') {
+ steps {
+ loadToKind()
+ }
+ }
+ stage('Install Voltha') {
+ steps {
+ timeout(20) {
+ script {
+ imageFlags = getVolthaImageFlags(gerritProject)
+ // if we're downloading a voltha-helm-charts patch, then install from a local copy of the charts
+ def localCharts = false
+ if (volthaHelmChartsChange != "" || gerritProject == "voltha-helm-charts") {
+ localCharts = true
+ }
+ def flags = "-f $WORKSPACE/${configBaseDir}/${configKubernetesDir}/voltha/${configFileName}.yml ${imageFlags} "
+ // NOTE temporary workaround expose ONOS node ports (pod-config needs to be updated to contain these values)
+ flags = flags + "--set onos-classic.onosSshPort=30115 " +
+ "--set onos-classic.onosApiPort=30120 " +
+ "--set onos-classic.onosOfPort=31653 " +
+ "--set onos-classic.individualOpenFlowNodePorts=true " + extraHelmFlags
+ volthaDeploy([
+ workflow: workFlow.toLowerCase(),
+ extraHelmFlags: flags,
+ localCharts: localCharts,
+ kubeconfig: "$WORKSPACE/${configBaseDir}/${configKubernetesDir}/${configFileName}.conf",
+ onosReplica: 3,
+ atomixReplica: 3,
+ kafkaReplica: 3,
+ etcdReplica: 3,
+ ])
+ }
+ // start logging
+ sh """
+ mkdir -p $WORKSPACE/${workFlow}
+ _TAG=kail-${workFlow} kail -n infra -n voltha > $WORKSPACE/${workFlow}/onos-voltha-combined.log &
+ """
+ sh """
+ JENKINS_NODE_COOKIE="dontKillMe" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${volthaNamespace} svc/voltha-voltha-api 55555:55555; done"&
+ JENKINS_NODE_COOKIE="dontKillMe" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${infraNamespace} svc/voltha-infra-etcd 2379:2379; done"&
+ JENKINS_NODE_COOKIE="dontKillMe" bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n ${infraNamespace} svc/voltha-infra-kafka 9092:9092; done"&
+ ps aux | grep port-forward
+ """
+ getPodsInfo("$WORKSPACE")
+ }
+ }
+ }
+ stage('Deploy Kafka Dump Chart') {
+ steps {
+ script {
+ sh returnStdout: false, script: """
+ helm repo add cord https://charts.opencord.org
+ helm repo update
+ if helm version -c --short|grep v2 -q; then
+ helm install -n voltha-kafka-dump cord/voltha-kafka-dump
+ else
+ helm install voltha-kafka-dump cord/voltha-kafka-dump
+ fi
+ """
+ }
+ }
+ }
+ stage('Push Tech-Profile') {
+ when {
+ expression { params.profile != "Default" }
+ }
+ steps {
+ sh returnStdout: false, script: """
+ etcd_container=\$(kubectl get pods -n voltha | grep voltha-etcd-cluster | awk 'NR==1{print \$1}')
+ kubectl cp $WORKSPACE/voltha-system-tests/tests/data/TechProfile-${profile}.json voltha/\$etcd_container:/tmp/flexpod.json
+ kubectl exec -it \$etcd_container -n voltha -- /bin/sh -c 'cat /tmp/flexpod.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/XGS-PON/64'
+ """
+ }
+ }
+
+ stage('Push Sadis-config') {
+ steps {
+ sh returnStdout: false, script: """
+ ssh-keygen -R [${deployment_config.nodes[0].ip}]:30115
+ ssh-keyscan -p 30115 -H ${deployment_config.nodes[0].ip} >> ~/.ssh/known_hosts
+ sshpass -p karaf ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@${deployment_config.nodes[0].ip} "log:set TRACE org.opencord.dhcpl2relay"
+ sshpass -p karaf ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@${deployment_config.nodes[0].ip} "log:set TRACE org.opencord.aaa"
+ sshpass -p karaf ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@${deployment_config.nodes[0].ip} "log:set TRACE org.opencord.olt"
+ sshpass -p karaf ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@${deployment_config.nodes[0].ip} "log:set DEBUG org.onosproject.net.flowobjective.impl.FlowObjectiveManager"
+ sshpass -p karaf ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@${deployment_config.nodes[0].ip} "log:set DEBUG org.onosproject.net.flowobjective.impl.InOrderFlowObjectiveManager"
+
+ if [[ "${workFlow.toUpperCase()}" == "DT" ]]; then
+ curl -sSL --user karaf:karaf -X POST -H Content-Type:application/json http://${deployment_config.nodes[0].ip}:30120/onos/v1/network/configuration --data @$WORKSPACE/voltha-system-tests/tests/data/${configFileName}-sadis-DT.json
+ elif [[ "${workFlow.toUpperCase()}" == "TT" ]]; then
+ curl -sSL --user karaf:karaf -X POST -H Content-Type:application/json http://${deployment_config.nodes[0].ip}:30120/onos/v1/network/configuration --data @$WORKSPACE/voltha-system-tests/tests/data/${configFileName}-sadis-TT.json
+ else
+ # this is the ATT case, rename the file in *-sadis-ATT.json so that we can avoid special cases and just load the file
+ curl -sSL --user karaf:karaf -X POST -H Content-Type:application/json http://${deployment_config.nodes[0].ip}:30120/onos/v1/network/configuration --data @$WORKSPACE/voltha-system-tests/tests/data/${configFileName}-sadis.json
+ fi
+ """
+ }
+ }
+ stage('Reinstall OLT software') {
+ when {
+ expression { params.reinstallOlt }
+ }
+ steps {
+ script {
+ deployment_config.olts.each { olt ->
+ sh returnStdout: false, script: "sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'dpkg --remove asfvolt16 && dpkg --purge asfvolt16'"
+ waitUntil {
+ olt_sw_present = sh returnStdout: true, script: "sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'dpkg --list | grep asfvolt16 | wc -l'"
+ return olt_sw_present.toInteger() == 0
+ }
+ if ( params.branch == 'voltha-2.3' ) {
+ oltDebVersion = oltDebVersionVoltha23
+ } else {
+ oltDebVersion = oltDebVersionMaster
+ }
+ sh returnStdout: false, script: "sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'dpkg --install ${oltDebVersion}'"
+ waitUntil {
+ olt_sw_present = sh returnStdout: true, script: "sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'dpkg --list | grep asfvolt16 | wc -l'"
+ return olt_sw_present.toInteger() == 1
+ }
+ if ( olt.fortygig ) {
+ // If the OLT is connected to a 40G switch interface, set the NNI port to be downgraded
+ sh returnStdout: false, script: "sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'echo port ce128 sp=40000 >> /broadcom/qax.soc ; /opt/bcm68620/svk_init.sh'"
+ }
+ }
+ }
+ }
+ }
+
+ stage('Restart OLT processes') {
+ steps {
+ script {
+ deployment_config.olts.each { olt ->
+ sh returnStdout: false, script: """
+ ssh-keyscan -H ${olt.ip} >> ~/.ssh/known_hosts
+ sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'rm -f /var/log/openolt.log; rm -f /var/log/dev_mgmt_daemon.log; reboot'
+ sleep 120
+ """
+ waitUntil {
+ onu_discovered = sh returnStdout: true, script: "sshpass -p ${olt.pass} ssh -l ${olt.user} ${olt.ip} 'grep \"onu discover indication\" /var/log/openolt.log | wc -l'"
+ return onu_discovered.toInteger() > 0
+ }
+ }
+ }
+ }
+ }
+ stage('Run E2E Tests') {
+ steps {
+ script {
+ if ( params.workflow.toUpperCase() == "DT" ) {
+ robotConfigFile = "${configBaseDir}/${configDeploymentDir}/${configFileName}-DT.yaml"
+ }
+ else if ( params.workflow.toUpperCase() == "TT" ) {
+ robotConfigFile = "${configBaseDir}/${configDeploymentDir}/${configFileName}-TT.yaml"
+ }
+ else {
+ robotConfigFile = "${configBaseDir}/${configDeploymentDir}/${configFileName}.yaml"
+ }
+ }
+ sh returnStdout: false, script: """
+ mkdir -p $WORKSPACE/RobotLogs
+
+ export ROBOT_CONFIG_FILE="$WORKSPACE/${robotConfigFile}"
+ export ROBOT_MISC_ARGS="${params.extraRobotArgs} --removekeywords wuks -d $WORKSPACE/RobotLogs -v container_log_dir:$WORKSPACE "
+ export ROBOT_FILE="Voltha_PODTests.robot"
+
+ # If the Gerrit comment contains a line with "functional tests" then run the full
+ # functional test suite. This covers tests tagged either 'sanity' or 'functional'.
+ # Note: Gerrit comment text will be prefixed by "Patch set n:" and a blank line
+ REGEX="functional tests"
+ if [[ "${gerritComment}" =~ \$REGEX ]]; then
+ ROBOT_MISC_ARGS+="-i functional"
+ fi
+ # Likewise for dataplane tests
+ REGEX="dataplane tests"
+ if [[ "${gerritComment}" =~ \$REGEX ]]; then
+ ROBOT_MISC_ARGS+="-i dataplane"
+ fi
+
+ make -C $WORKSPACE/voltha-system-tests voltha-test || true
+ """
+ }
+ }
+ }
+ post {
+ always {
+ // stop logging
+ sh """
+ P_IDS="\$(ps e -ww -A | grep "_TAG=kail-${workFlow}" | grep -v grep | awk '{print \$1}')"
+ if [ -n "\$P_IDS" ]; then
+ echo \$P_IDS
+ for P_ID in \$P_IDS; do
+ kill -9 \$P_ID
+ done
+ fi
+ gzip $WORKSPACE/${workFlow}/onos-voltha-combined.log || true
+ """
+ step([$class: 'RobotPublisher',
+ disableArchiveOutput: false,
+ logFileName: 'RobotLogs/log*.html',
+ otherFiles: '',
+ outputFileName: 'RobotLogs/output*.xml',
+ outputPath: '.',
+ passThreshold: 100,
+ reportFileName: 'RobotLogs/report*.html',
+ unstableThreshold: 0]);
+ archiveArtifacts artifacts: '**/*.txt,**/*.gz,*.gz'
+ }
+ }
+}
+
+// refs/changes/06/24206/5
diff --git a/jjb/pipeline/voltha/master/voltha-scale-multi-stack.groovy b/jjb/pipeline/voltha/master/voltha-scale-multi-stack.groovy
new file mode 100644
index 0000000..fb5e75c
--- /dev/null
+++ b/jjb/pipeline/voltha/master/voltha-scale-multi-stack.groovy
@@ -0,0 +1,463 @@
+// Copyright 2019-present Open Networking Foundation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// deploy VOLTHA using kind-voltha and performs a scale test
+
+// NOTE we are importing the library even if it's global so that it's
+// easier to change the keywords during a replay
+library identifier: 'cord-jenkins-libraries@master',
+ retriever: modernSCM([
+ $class: 'GitSCMSource',
+ remote: 'https://gerrit.opencord.org/ci-management.git'
+])
+
+def ofAgentConnections(numOfOnos, releaseName, namespace) {
+ def params = " "
+ numOfOnos.times {
+ params += "--set voltha.services.controller[${it}].address=${releaseName}-onos-classic-${it}.${releaseName}-onos-classic-hs.${namespace}.svc:6653 "
+ }
+ return params
+}
+
+pipeline {
+
+ /* no label, executor is determined by JJB */
+ agent {
+ label "${params.buildNode}"
+ }
+ options {
+ timeout(time: 120, unit: 'MINUTES')
+ }
+ environment {
+ JENKINS_NODE_COOKIE="dontKillMe" // do not kill processes after the build is done
+ KUBECONFIG="$HOME/.kube/config"
+ SSHPASS="karaf"
+ PATH="$PATH:$WORKSPACE/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin"
+
+ APPS_TO_LOG="etcd kafka onos-onos-classic adapter-open-onu adapter-open-olt rw-core ofagent bbsim radius bbsim-sadis-server"
+ LOG_FOLDER="$WORKSPACE/logs"
+ }
+
+ stages {
+ stage ('Cleanup') {
+ steps {
+ timeout(time: 11, unit: 'MINUTES') {
+ sh """
+ # remove orphaned port-forward from different namespaces
+ ps aux | grep port-forw | grep -v grep | awk '{print \$2}' | xargs --no-run-if-empty kill -9 || true
+ """
+ script {
+ def namespaces = ["infra"]
+ // FIXME we may have leftovers from more VOLTHA stacks (eg: run1 had 10 stacks, run2 had 2 stacks)
+ volthaStacks.toInteger().times {
+ namespaces += "voltha${it + 1}"
+ }
+ helmTeardown(namespaces)
+ }
+ sh returnStdout: false, script: """
+ helm repo add onf https://charts.opencord.org
+ helm repo add cord https://charts.opencord.org
+ helm repo update
+
+ # remove all port-forward from different namespaces
+ ps aux | grep port-forw | grep -v grep | awk '{print \$2}' | xargs --no-run-if-empty kill -9 || true
+ """
+ }
+ }
+ }
+ stage('Download Code') {
+ steps {
+ getVolthaCode([
+ branch: "${release}",
+ volthaSystemTestsChange: "${volthaSystemTestsChange}",
+ //volthaHelmChartsChange: "${volthaHelmChartsChange}",
+ ])
+ }
+ }
+ stage('Deploy common infrastructure') {
+ // includes monitoring
+ steps {
+ sh '''
+ if [ ${withMonitoring} = true ] ; then
+ helm install -n infra nem-monitoring cord/nem-monitoring \
+ -f $HOME/voltha-scale/grafana.yaml \
+ --set prometheus.alertmanager.enabled=false,prometheus.pushgateway.enabled=false \
+ --set kpi_exporter.enabled=false,dashboards.xos=false,dashboards.onos=false,dashboards.aaa=false,dashboards.voltha=false
+ fi
+ '''
+ }
+ }
+ stage('Deploy VOLTHA infrastructure') {
+ steps {
+ sh returnStdout: false, script: '''
+
+ helm install kafka -n infra $HOME/teone/helm-charts/kafka --set replicaCount=${kafkaReplicas},replicas=${kafkaReplicas} --set persistence.enabled=false \
+ --set zookeeper.replicaCount=${kafkaReplicas} --set zookeeper.persistence.enabled=false \
+ --set prometheus.kafka.enabled=true,prometheus.operator.enabled=true,prometheus.jmx.enabled=true,prometheus.operator.serviceMonitor.namespace=default
+
+ # the ETCD chart use "auth" for resons different than BBsim, so strip that away
+ ETCD_FLAGS=$(echo ${extraHelmFlags} | sed -e 's/--set auth=false / /g') | sed -e 's/--set auth=true / /g'
+ ETCD_FLAGS+=" --set auth.rbac.enabled=false,persistence.enabled=false,statefulset.replicaCount=${etcdReplicas}"
+ ETCD_FLAGS+=" --set memoryMode=${inMemoryEtcdStorage} "
+ helm install -n infra --set replicas=${etcdReplicas} etcd $HOME/teone/helm-charts/etcd $ETCD_FLAGS
+
+ helm upgrade --install -n infra voltha-infra onf/voltha-infra \
+ -f $WORKSPACE/voltha-helm-charts/examples/${workflow}-values.yaml \
+ --set onos-classic.replicas=${onosReplicas},onos-classic.atomix.replicas=${atomixReplicas} \
+ --set radius.enabled=${withEapol} \
+ --set kafka.enabled=false \
+ --set etcd.enabled=false
+ '''
+ }
+ }
+ stage('Deploy Voltha') {
+ steps {
+ deploy_voltha_stacks(params.volthaStacks)
+ }
+ }
+ stage('Start logging') {
+ steps {
+ sh returnStdout: false, script: '''
+ # start logging with kail
+
+ mkdir -p $LOG_FOLDER
+
+ list=($APPS_TO_LOG)
+ for app in "${list[@]}"
+ do
+ echo "Starting logs for: ${app}"
+ _TAG=kail-$app kail -l app=$app --since 1h > $LOG_FOLDER/$app.log&
+ done
+ '''
+ }
+ }
+ stage('Configuration') {
+ steps {
+ script {
+ sh returnStdout: false, script: """
+
+ # forward ETCD port
+ JENKINS_NODE_COOKIE="dontKillMe" _TAG=etcd-port-forward /bin/bash -c "while true; do kubectl -n infra port-forward --address 0.0.0.0 service/etcd 9999:2379; done 2>&1 " &
+
+ # forward ONOS ports
+ JENKINS_NODE_COOKIE="dontKillMe" _TAG=onos-port-forward /bin/bash -c "while true; do kubectl -n infra port-forward --address 0.0.0.0 service/voltha-infra-onos-classic-hs 8101:8101; done 2>&1 " &
+ JENKINS_NODE_COOKIE="dontKillMe" _TAG=onos-port-forward /bin/bash -c "while true; do kubectl -n infra port-forward --address 0.0.0.0 service/voltha-infra-onos-classic-hs 8181:8181; done 2>&1 " &
+
+ # make sure the the port-forward has started before moving forward
+ sleep 5
+ """
+ sh returnStdout: false, script: """
+ # TODO this needs to be repeated per stack
+ # kubectl exec \$(kubectl get pods | grep -E "bbsim[0-9]" | awk 'NR==1{print \$1}') -- bbsimctl log ${logLevel.toLowerCase()} false
+
+ #Setting link discovery
+ sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.provider.lldp.impl.LldpLinkProvider enabled ${withLLDP}
+ sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.net.flow.impl.FlowRuleManager allowExtraneousRules true
+ sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.net.flow.impl.FlowRuleManager importExtraneousRules true
+ sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.net.flowobjective.impl.InOrderFlowObjectiveManager accumulatorMaxBatchMillis 900
+ sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.net.flowobjective.impl.InOrderFlowObjectiveManager accumulatorMaxIdleMillis 500
+ sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.opencord.olt.impl.Olt provisionDelay 1000
+
+ sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg log:set ${logLevel} org.onosproject
+ sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg log:set ${logLevel} org.opencord
+
+ # Set Flows/Ports/Meters poll frequency
+ sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.provider.of.flow.impl.OpenFlowRuleProvider flowPollFrequency ${onosStatInterval}
+ sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 cfg set org.onosproject.provider.of.device.impl.OpenFlowDeviceProvider portStatsPollFrequency ${onosStatInterval}
+
+ if [ ${withFlows} = false ]; then
+ sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 app deactivate org.opencord.olt
+ fi
+ """
+ }
+ }
+ }
+ stage('Setup Test') {
+ steps {
+ sh '''
+ mkdir -p $WORKSPACE/RobotLogs
+ cd $WORKSPACE/voltha-system-tests
+ make vst_venv
+ '''
+ }
+ }
+ stage('Run Test') {
+ steps {
+ test_voltha_stacks(params.volthaStacks)
+ }
+ }
+ }
+ post {
+ always {
+ // collect result, done in the "post" step so it's executed even in the
+ // event of a timeout in the tests
+ sh '''
+
+ # stop the kail processes
+ list=($APPS_TO_LOG)
+ for app in "${list[@]}"
+ do
+ echo "Stopping logs for: ${app}"
+ _TAG="kail-$app"
+ P_IDS="$(ps e -ww -A | grep "_TAG=$_TAG" | grep -v grep | awk '{print $1}')"
+ if [ -n "$P_IDS" ]; then
+ echo $P_IDS
+ for P_ID in $P_IDS; do
+ kill -9 $P_ID
+ done
+ fi
+ done
+ '''
+ // compressing the logs to save space on Jenkins
+ sh '''
+ cd $LOG_FOLDER
+ tar -czf logs.tar.gz *.log
+ rm *.log
+ '''
+ plot([
+ csvFileName: 'scale-test.csv',
+ csvSeries: [
+ [file: 'plots/plot-voltha-onus.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
+ [file: 'plots/plot-onos-ports.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
+ [file: 'plots/plot-voltha-flows-before.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
+ [file: 'plots/plot-voltha-openolt-flows-before.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
+ [file: 'plots/plot-onos-flows-before.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
+ [file: 'plots/plot-onos-auth.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
+ [file: 'plots/plot-voltha-flows-after.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
+ [file: 'plots/plot-voltha-openolt-flows-after.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
+ [file: 'plots/plot-onos-flows-after.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
+ [file: 'plots/plot-onos-dhcp.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
+ ],
+ group: 'Voltha-Scale-Numbers', numBuilds: '20', style: 'line', title: "Scale Test (Stacks: ${params.volthaStacks}, OLTs: ${olts}, PONs: ${pons}, ONUs: ${onus})", yaxis: 'Time (s)', useDescr: true
+ ])
+ step([$class: 'RobotPublisher',
+ disableArchiveOutput: false,
+ logFileName: 'RobotLogs/**/log.html',
+ otherFiles: '',
+ outputFileName: 'RobotLogs/**/output.xml',
+ outputPath: '.',
+ passThreshold: 100,
+ reportFileName: 'RobotLogs/**/report.html',
+ unstableThreshold: 0]);
+ // get all the logs from kubernetes PODs
+ sh returnStdout: false, script: '''
+
+ # store information on running charts
+ helm ls --all-namespaces > $LOG_FOLDER/helm-list.txt || true
+
+ # store information on the running pods
+ kubectl get pods --all-namespaces -o wide > $LOG_FOLDER/pods.txt || true
+ kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\n'}" | sort | uniq | tee $LOG_FOLDER/pod-images.txt || true
+ kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.imageID}{'\\n'}" | sort | uniq | tee $LOG_FOLDER/pod-imagesId.txt || true
+
+ # copy the ONOS logs directly from the container to avoid the color codes
+ printf '%s\n' $(kubectl -n \$INFRA_NS get pods -l app=onos-onos-classic -o=jsonpath="{.items[*]['metadata.name']}") | xargs --no-run-if-empty -I# bash -c "kubectl -n \$INFRA_NS cp #:${karafHome}/data/log/karaf.log $LOG_FOLDER/#.log" || true
+
+ # get radius logs out of the container
+ kubectl -n \$INFRA_NS cp $(kubectl -n \$INFRA_NS get pods -l app=radius --no-headers | awk '{print $1}'):/var/log/freeradius/radius.log $LOG_FOLDER//radius.log || true
+ '''
+ // dump all the BBSim(s) ONU information
+ script {
+ for (int i = 1; i <= params.volthaStacks.toInteger(); i++) {
+ stack_ns="voltha"+i
+ sh """
+ BBSIM_IDS=\$(kubectl -n ${stack_ns} get pods | grep bbsim | grep -v server | awk '{print \$1}')
+ IDS=(\$BBSIM_IDS)
+
+ for bbsim in "\${IDS[@]}"
+ do
+ kubectl -n ${stack_ns} exec -t \$bbsim -- bbsimctl onu list > $LOG_FOLDER/${stack_ns}/\$bbsim-device-list.txt || true
+ kubectl -n ${stack_ns} exec -t \$bbsim -- bbsimctl service list > $LOG_FOLDER/${stack_ns}/\$bbsim-service-list.txt || true
+ done
+ """
+ }
+ }
+ // get ONOS debug infos
+ sh '''
+
+ sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 apps -a -s > $LOG_FOLDER/onos-apps.txt || true
+ sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 nodes > $LOG_FOLDER/onos-nodes.txt || true
+ sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 masters > $LOG_FOLDER/onos-masters.txt || true
+ sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 roles > $LOG_FOLDER/onos-roles.txt || true
+
+ sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 ports > $LOG_FOLDER/onos-ports-list.txt || true
+ sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 hosts > $LOG_FOLDER/onos-hosts-list.txt || true
+
+ if [ ${withFlows} = true ] ; then
+ sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 volt-olts > $LOG_FOLDER/onos-olt-list.txt || true
+ sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 flows -s > $LOG_FOLDER/onos-flows-list.txt || true
+ sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 meters > $LOG_FOLDER/onos-meters-list.txt || true
+ fi
+
+ if [ ${provisionSubscribers} = true ]; then
+ sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 volt-programmed-subscribers > $LOG_FOLDER/onos-programmed-subscribers.txt || true
+ sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 volt-programmed-meters > $LOG_FOLDER/onos-programmed-meters.txt || true
+ sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 volt-bpmeter-mappings > $LOG_FOLDER/onos-bpmeter-mappings.txt || true
+ sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 volt-failed-subscribers > $LOG_FOLDER/onos-failed-subscribers.txt || true
+ fi
+
+ if [ ${withEapol} = true ] ; then
+ sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 aaa-users > $LOG_FOLDER/onos-aaa-users.txt || true
+ sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 aaa-statistics > $LOG_FOLDER/onos-aaa-statistics.txt || true
+ fi
+
+ if [ ${withDhcp} = true ] ; then
+ sshpass -e ssh -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 dhcpl2relay-allocations > $LOG_FOLDER/onos-dhcp-allocations.txt || true
+ fi
+ '''
+ // collect etcd metrics
+ sh '''
+ mkdir -p $WORKSPACE/etcd-metrics
+ curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_debugging_mvcc_keys_total' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-key-count.json || true
+ curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=grpc_server_handled_total{grpc_service="etcdserverpb.KV"}' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-rpc-count.json || true
+ curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_debugging_mvcc_db_total_size_in_bytes' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-db-size.json || true
+ curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_disk_backend_commit_duration_seconds_sum' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-backend-write-time.json || true
+ '''
+ // get VOLTHA debug infos
+ script {
+ for (int i = 1; i <= params.volthaStacks.toInteger(); i++) {
+ stack_ns="voltha"+i
+ voltcfg="~/.volt/config-voltha"+i
+ try {
+ sh """
+
+ _TAG=voltha-port-forward kubectl port-forward --address 0.0.0.0 -n voltha${i} svc/voltha${i}-voltha-api 55555:55555& 2>&1 > /dev/null
+
+ voltctl -m 8MB device list -o json > $LOG_FOLDER/${stack_ns}/device-list.json || true
+ python -m json.tool $LOG_FOLDER/${stack_ns}/device-list.json > $LOG_FOLDER/${stack_ns}/voltha-devices-list.json || true
+ rm $LOG_FOLDER/${stack_ns}/device-list.json || true
+ voltctl -m 8MB device list > $LOG_FOLDER/${stack_ns}/voltha-devices-list.txt || true
+
+ DEVICE_LIST=
+ printf '%s\n' \$(voltctl -m 8MB device list | grep olt | awk '{print \$1}') | xargs --no-run-if-empty -I# bash -c "voltctl-m 8MB device flows # > $LOG_FOLDER/${stack_ns}/voltha-device-flows-#.txt" || true
+ printf '%s\n' \$(voltctl -m 8MB device list | grep olt | awk '{print \$1}') | xargs --no-run-if-empty -I# bash -c "voltctl -m 8MB device port list --format 'table{{.PortNo}}\t{{.Label}}\t{{.Type}}\t{{.AdminState}}\t{{.OperStatus}}' # > $LOG_FOLDER/${stack_ns}/voltha-device-ports-#.txt" || true
+
+ printf '%s\n' \$(voltctl -m 8MB logicaldevice list -q) | xargs --no-run-if-empty -I# bash -c "voltctl -m 8MB logicaldevice flows # > $LOG_FOLDER/${stack_ns}/voltha-logicaldevice-flows-#.txt" || true
+ printf '%s\n' \$(voltctl -m 8MB logicaldevice list -q) | xargs --no-run-if-empty -I# bash -c "voltctl -m 8MB logicaldevice port list # > $LOG_FOLDER/${stack_ns}/voltha-logicaldevice-ports-#.txt" || true
+
+ # remove VOLTHA port-forward
+ ps aux | grep port-forw | grep voltha-api | grep -v grep | awk '{print \$2}' | xargs --no-run-if-empty kill -9 || true
+ """
+ } catch(e) {
+ sh '''
+ echo "Can't get device list from voltclt"
+ '''
+ }
+ }
+ }
+ // get cpu usage by container
+ sh '''
+ if [ ${withMonitoring} = true ] ; then
+ cd $WORKSPACE/voltha-system-tests
+ source ./vst_venv/bin/activate
+ sleep 60 # we have to wait for prometheus to collect all the information
+ python tests/scale/sizing.py -o $WORKSPACE/plots || true
+ fi
+ '''
+ archiveArtifacts artifacts: 'kind-voltha/install-*.log,execution-time-*.txt,logs/**/*.txt,logs/**/*.tar.gz,RobotLogs/**/*,plots/*,etcd-metrics/*'
+ }
+ }
+}
+
+def deploy_voltha_stacks(numberOfStacks) {
+ for (int i = 1; i <= numberOfStacks.toInteger(); i++) {
+ stage("Deploy VOLTHA stack " + i) {
+ // ${logLevel}
+ def extraHelmFlags = "${extraHelmFlags} --set global.log_level=${logLevel},enablePerf=true,onu=${onus},pon=${pons} "
+ extraHelmFlags += " --set securityContext.enabled=false,atomix.persistence.enabled=false "
+
+ // FIXME having to set all of these values is annoying, is there a better solution?
+ def volthaHelmFlags = extraHelmFlags +
+ "--set voltha.services.kafka.adapter.address=kafka.infra.svc:9092 " +
+ "--set voltha.services.kafka.cluster.address=kafka.infra.svc:9092 " +
+ "--set voltha.services.etcd.address=etcd.infra.svc:2379 " +
+ "--set voltha-adapter-openolt.services.kafka.adapter.address=kafka.infra.svc:9092 " +
+ "--set voltha-adapter-openolt.services.kafka.cluster.address=kafka.infra.svc:9092 " +
+ "--set voltha-adapter-openolt.services.etcd.address=etcd.infra.svc:2379 " +
+ "--set voltha-adapter-openonu.services.kafka.adapter.address=kafka.infra.svc:9092 " +
+ "--set voltha-adapter-openonu.services.kafka.cluster.address=kafka.infra.svc:9092 " +
+ "--set voltha-adapter-openonu.services.etcd.address=etcd.infra.svc:2379" +
+ ofAgentConnections(onosReplicas.toInteger(), "voltha-infra", "infra")
+
+ volthaStackDeploy([
+ bbsimReplica: olts.toInteger(),
+ infraNamespace: "infra",
+ volthaNamespace: "voltha${i}",
+ stackName: "voltha${i}",
+ stackId: i,
+ workflow: workflow,
+ extraHelmFlags: volthaHelmFlags
+ ])
+ }
+ }
+}
+
+def test_voltha_stacks(numberOfStacks) {
+ for (int i = 1; i <= numberOfStacks.toInteger(); i++) {
+ stage("Test VOLTHA stack " + i) {
+ timeout(time: 15, unit: 'MINUTES') {
+ sh """
+
+ # we are restarting the voltha-api port-forward for each stack, no need to have a different voltconfig file
+ voltctl -s 127.0.0.1:55555 config > $HOME/.volt/config
+ export VOLTCONFIG=$HOME/.volt/config
+
+ _TAG=voltha-port-forward kubectl port-forward --address 0.0.0.0 -n voltha${i} svc/voltha${i}-voltha-api 55555:55555& 2>&1 > /dev/null
+
+ ROBOT_PARAMS="-v stackId:${i} \
+ -v olt:${olts} \
+ -v pon:${pons} \
+ -v onu:${onus} \
+ -v workflow:${workflow} \
+ -v withEapol:${withEapol} \
+ -v withDhcp:${withDhcp} \
+ -v withIgmp:${withIgmp} \
+ --noncritical non-critical \
+ -e igmp \
+ -e teardown "
+
+ if [ ${withEapol} = false ] ; then
+ ROBOT_PARAMS+="-e authentication "
+ fi
+
+ if [ ${withDhcp} = false ] ; then
+ ROBOT_PARAMS+="-e dhcp "
+ fi
+
+ if [ ${provisionSubscribers} = false ] ; then
+ # if we're not considering subscribers then we don't care about authentication and dhcp
+ ROBOT_PARAMS+="-e authentication -e provision -e flow-after -e dhcp "
+ fi
+
+ if [ ${withFlows} = false ] ; then
+ ROBOT_PARAMS+="-i setup -i activation "
+ fi
+
+ cd $WORKSPACE/voltha-system-tests
+ source ./vst_venv/bin/activate
+ robot -d $WORKSPACE/RobotLogs/voltha${i} \
+ \$ROBOT_PARAMS tests/scale/Voltha_Scale_Tests.robot
+
+ # collect results
+ python tests/scale/collect-result.py -r $WORKSPACE/RobotLogs/voltha${i}/output.xml -p $WORKSPACE/plots > $WORKSPACE/execution-time-voltha${i}.txt || true
+ cat $WORKSPACE/execution-time-voltha${i}.txt
+ """
+ sh """
+ # remove VOLTHA port-forward
+ ps aux | grep port-forw | grep voltha-api | grep -v grep | awk '{print \$2}' | xargs --no-run-if-empty kill -9 2>&1 > /dev/null || true
+ """
+ }
+ }
+ }
+}
diff --git a/jjb/pipeline/voltha/master/voltha-scale-test.groovy b/jjb/pipeline/voltha/master/voltha-scale-test.groovy
new file mode 100644
index 0000000..35604f8
--- /dev/null
+++ b/jjb/pipeline/voltha/master/voltha-scale-test.groovy
@@ -0,0 +1,773 @@
+// Copyright 2019-present Open Networking Foundation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// deploy VOLTHA and performs a scale test
+
+library identifier: 'cord-jenkins-libraries@master',
+ retriever: modernSCM([
+ $class: 'GitSCMSource',
+ remote: 'https://gerrit.opencord.org/ci-management.git'
+])
+
+// this function generates the correct parameters for ofAgent
+// to connect to multple ONOS instances
+def ofAgentConnections(numOfOnos, releaseName, namespace) {
+ def params = " "
+ numOfOnos.times {
+ params += "--set voltha.services.controller[${it}].address=${releaseName}-onos-classic-${it}.${releaseName}-onos-classic-hs.${namespace}.svc:6653 "
+ }
+ return params
+}
+
+pipeline {
+
+ /* no label, executor is determined by JJB */
+ agent {
+ label "${params.buildNode}"
+ }
+ options {
+ timeout(time: 60, unit: 'MINUTES')
+ }
+ environment {
+ JENKINS_NODE_COOKIE="dontKillMe" // do not kill processes after the build is done
+ KUBECONFIG="$HOME/.kube/config"
+ VOLTCONFIG="$HOME/.volt/config"
+ SSHPASS="karaf"
+ VOLTHA_LOG_LEVEL="${logLevel}"
+ NUM_OF_BBSIM="${olts}"
+ NUM_OF_OPENONU="${openonuAdapterReplicas}"
+ NUM_OF_ONOS="${onosReplicas}"
+ NUM_OF_ATOMIX="${atomixReplicas}"
+ EXTRA_HELM_FLAGS=" "
+
+ APPS_TO_LOG="etcd kafka onos-classic adapter-open-onu adapter-open-olt rw-core ofagent bbsim radius bbsim-sadis-server onos-config-loader"
+ LOG_FOLDER="$WORKSPACE/logs"
+
+ GERRIT_PROJECT="${GERRIT_PROJECT}"
+ }
+
+ stages {
+ stage ('Cleanup') {
+ steps {
+ timeout(time: 11, unit: 'MINUTES') {
+ script {
+ helmTeardown(["default"])
+ }
+ sh returnStdout: false, script: '''
+ helm repo add onf https://charts.opencord.org
+ helm repo update
+
+ # remove orphaned port-forward from different namespaces
+ ps aux | grep port-forw | grep -v grep | awk '{print $2}' | xargs --no-run-if-empty kill -9 || true
+
+ cd $WORKSPACE
+ rm -rf $WORKSPACE/*
+ '''
+ }
+ }
+ }
+ stage('Download Code') {
+ steps {
+ getVolthaCode([
+ branch: "${release}",
+ volthaSystemTestsChange: "${volthaSystemTestsChange}",
+ volthaHelmChartsChange: "${volthaHelmChartsChange}",
+ ])
+ }
+ }
+ stage('Build patch') {
+ when {
+ expression {
+ return params.GERRIT_PROJECT
+ }
+ }
+ steps {
+ sh """
+ git clone https://\$GERRIT_HOST/\$GERRIT_PROJECT
+ cd \$GERRIT_PROJECT
+ git fetch https://\$GERRIT_HOST/\$GERRIT_PROJECT \$GERRIT_REFSPEC && git checkout FETCH_HEAD
+
+ DOCKER_REGISTRY=${dockerRegistry}/ DOCKER_REPOSITORY=voltha/ DOCKER_TAG=voltha-scale make docker-build
+ DOCKER_REGISTRY=${dockerRegistry}/ DOCKER_REPOSITORY=voltha/ DOCKER_TAG=voltha-scale make docker-push
+ """
+ }
+ }
+ stage('Deploy common infrastructure') {
+ // includes monitoring, kafka, etcd
+ steps {
+ sh '''
+ helm install kafka $HOME/teone/helm-charts/kafka --set replicaCount=${kafkaReplicas},replicas=${kafkaReplicas} --set persistence.enabled=false \
+ --set zookeeper.replicaCount=${kafkaReplicas} --set zookeeper.persistence.enabled=false \
+ --set prometheus.kafka.enabled=true,prometheus.operator.enabled=true,prometheus.jmx.enabled=true,prometheus.operator.serviceMonitor.namespace=default
+
+ # the ETCD chart use "auth" for resons different than BBsim, so strip that away
+ ETCD_FLAGS=$(echo ${extraHelmFlags} | sed -e 's/--set auth=false / /g') | sed -e 's/--set auth=true / /g'
+ ETCD_FLAGS+=" --set auth.rbac.enabled=false,persistence.enabled=false,statefulset.replicaCount=${etcdReplicas}"
+ ETCD_FLAGS+=" --set memoryMode=${inMemoryEtcdStorage} "
+ helm install --set replicas=${etcdReplicas} etcd $HOME/teone/helm-charts/etcd $ETCD_FLAGS
+
+ if [ ${withMonitoring} = true ] ; then
+ helm install nem-monitoring onf/nem-monitoring \
+ -f $HOME/voltha-scale/grafana.yaml \
+ --set prometheus.alertmanager.enabled=false,prometheus.pushgateway.enabled=false \
+ --set kpi_exporter.enabled=false,dashboards.xos=false,dashboards.onos=false,dashboards.aaa=false,dashboards.voltha=false
+ fi
+ '''
+ }
+ }
+ stage('Deploy Voltha') {
+ steps {
+ timeout(time: 10, unit: 'MINUTES') {
+ script {
+ sh returnStdout: false, script: '''
+ # start logging with kail
+
+ mkdir -p $LOG_FOLDER
+
+ list=($APPS_TO_LOG)
+ for app in "${list[@]}"
+ do
+ echo "Starting logs for: ${app}"
+ _TAG=kail-$app kail -l app=$app --since 1h > $LOG_FOLDER/$app.log&
+ done
+ '''
+ def returned_flags = sh (returnStdout: true, script: """
+
+ export EXTRA_HELM_FLAGS+=' '
+
+ # BBSim custom image handling
+ if [ '${bbsimImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'bbsim' ]; then
+ IFS=: read -r bbsimRepo bbsimTag <<< '${bbsimImg.trim()}'
+ EXTRA_HELM_FLAGS+="--set images.bbsim.repository=\$bbsimRepo,images.bbsim.tag=\$bbsimTag "
+ fi
+
+ # VOLTHA custom image handling
+ if [ '${rwCoreImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'voltha-go' ]; then
+ IFS=: read -r rwCoreRepo rwCoreTag <<< '${rwCoreImg.trim()}'
+ EXTRA_HELM_FLAGS+="--set voltha.images.rw_core.repository=\$rwCoreRepo,voltha.images.rw_core.tag=\$rwCoreTag "
+ fi
+
+ # ofAgent custom image handling
+ if [ '${ofAgentImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'of-agent' ]; then
+ IFS=: read -r ofAgentRepo ofAgentTag <<< '${ofAgentImg.trim()}'
+ EXTRA_HELM_FLAGS+="--set voltha.images.ofagent.repository=\$ofAgentRepo,voltha.images.ofagent.tag=\$ofAgentTag "
+ fi
+
+ # OpenOLT custom image handling
+ if [ '${openoltAdapterImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'voltha-openolt-adapter' ]; then
+ IFS=: read -r openoltAdapterRepo openoltAdapterTag <<< '${openoltAdapterImg.trim()}'
+ EXTRA_HELM_FLAGS+="--set voltha-adapter-openolt.images.adapter_open_olt.repository=\$openoltAdapterRepo,voltha-adapter-openolt.images.adapter_open_olt.tag=\$openoltAdapterTag "
+ fi
+
+ # OpenONU custom image handling
+ if [ '${openonuAdapterImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'voltha-openonu-adapter' ]; then
+ IFS=: read -r openonuAdapterRepo openonuAdapterTag <<< '${openonuAdapterImg.trim()}'
+ EXTRA_HELM_FLAGS+="--set voltha-adapter-openonu.images.adapter_open_onu.repository=\$openonuAdapterRepo,voltha-adapter-openonu.images.adapter_open_onu.tag=\$openonuAdapterTag "
+ fi
+
+ # OpenONU GO custom image handling
+ if [ '${openonuAdapterGoImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'voltha-openonu-adapter-go' ]; then
+ IFS=: read -r openonuAdapterGoRepo openonuAdapterGoTag <<< '${openonuAdapterGoImg.trim()}'
+ EXTRA_HELM_FLAGS+="--set voltha-adapter-openonu.images.adapter_open_onu_go.repository=\$openonuAdapterGoRepo,voltha-adapter-openonu.images.adapter_open_onu_go.tag=\$openonuAdapterGoTag "
+ fi
+
+ # ONOS custom image handling
+ if [ '${onosImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'voltha-onos' ]; then
+ IFS=: read -r onosRepo onosTag <<< '${onosImg.trim()}'
+ EXTRA_HELM_FLAGS+="--set onos-classic.image.repository=\$onosRepo,onos-classic.image.tag=\$onosTag "
+ fi
+
+ # set BBSim parameters
+ EXTRA_HELM_FLAGS+='--set enablePerf=true,pon=${pons},onu=${onus} '
+
+ # disable the securityContext, this is a development cluster
+ EXTRA_HELM_FLAGS+='--set securityContext.enabled=false '
+
+ # No persistent-volume-claims in Atomix
+ EXTRA_HELM_FLAGS+="--set onos-classic.atomix.persistence.enabled=false "
+
+ # Use custom built images
+
+ if [ '\$GERRIT_PROJECT' == 'voltha-go' ]; then
+ EXTRA_HELM_FLAGS+="--set voltha.images.rw_core.repository=${dockerRegistry}/voltha/voltha-rw-core,voltha.images.rw_core.tag=voltha-scale "
+ fi
+
+ if [ '\$GERRIT_PROJECT' == 'voltha-openolt-adapter' ]; then
+ EXTRA_HELM_FLAGS+="--set voltha-openolt-adapter.images.adapter_open_olt.repository=${dockerRegistry}/voltha/voltha-openolt-adapter,voltha-openolt-adapter.images.adapter_open_olt.tag=voltha-scale "
+ fi
+
+ if [ '\$GERRIT_PROJECT' == 'voltha-openonu-adapter' ]; then
+ EXTRA_HELM_FLAGS+="--set voltha-openonu-adapter.images.adapter_open_onu.repository=${dockerRegistry}/voltha/voltha-openonu-adapter,voltha-openonu-adapter.images.adapter_open_onu.tag=voltha-scale "
+ fi
+
+ if [ '\$GERRIT_PROJECT' == 'voltha-openonu-adapter-go' ]; then
+ EXTRA_HELM_FLAGS+="--set voltha-openonu-adapter-go.images.adapter_open_onu_go.repository=${dockerRegistry}/voltha/voltha-openonu-adapter-go,voltha-openonu-adapter-go.images.adapter_open_onu_go.tag=voltha-scale "
+ fi
+
+ if [ '\$GERRIT_PROJECT' == 'ofagent-go' ]; then
+ EXTRA_HELM_FLAGS+="--set voltha.images.ofagent.repository=${dockerRegistry}/voltha/voltha-ofagent-go,ofagent-go.images.ofagent.tag=voltha-scale "
+ fi
+
+ if [ '\$GERRIT_PROJECT' == 'voltha-onos' ]; then
+ EXTRA_HELM_FLAGS+="--set onos-classic.image.repository=${dockerRegistry}/voltha/voltha-onos,onos-classic.image.tag=voltha-scale "
+ fi
+
+ if [ '\$GERRIT_PROJECT' == 'bbsim' ]; then
+ EXTRA_HELM_FLAGS+="--set images.bbsim.repository=${dockerRegistry}/voltha/bbsim,images.bbsim.tag=voltha-scale "
+ fi
+ echo \$EXTRA_HELM_FLAGS
+
+ """).trim()
+
+ def extraHelmFlags = returned_flags
+ // The added space before params.extraHelmFlags is required due to the .trim() above
+ def infraHelmFlags =
+ " --set etcd.enabled=false,kafka.enabled=false" +
+ " --set global.log_level=${logLevel} " +
+ "--set onos-classic.onosSshPort=30115 " +
+ "--set onos-classic.onosApiPort=30120 " +
+ extraHelmFlags + " " + params.extraHelmFlags
+
+ println "Passing the following parameters to the VOLTHA infra deploy: ${infraHelmFlags}."
+
+ def localCharts = false
+ if (volthaHelmChartsChange != "") {
+ localCharts = true
+ }
+
+ volthaInfraDeploy([
+ workflow: workflow,
+ infraNamespace: "default",
+ extraHelmFlags: infraHelmFlags,
+ localCharts: localCharts,
+ onosReplica: onosReplicas,
+ atomixReplica: atomixReplicas,
+ ])
+
+ def stackHelmFlags = "${ofAgentConnections(onosReplicas.toInteger(), "voltha-infra", "default")} " +
+ "--set voltha.services.kafka.adapter.address=kafka.default.svc:9092 " +
+ "--set voltha.services.kafka.cluster.address=kafka.default.svc:9092 " +
+ "--set voltha.services.etcd.address=etcd.default.svc:2379 " +
+ "--set voltha-adapter-openolt.services.kafka.adapter.address=kafka.default.svc:9092 " +
+ "--set voltha-adapter-openolt.services.kafka.cluster.address=kafka.default.svc:9092 " +
+ "--set voltha-adapter-openolt.services.etcd.address=etcd.default.svc:2379 " +
+ "--set voltha-adapter-openonu.services.kafka.adapter.address=kafka.default.svc:9092 " +
+ "--set voltha-adapter-openonu.services.kafka.cluster.address=kafka.default.svc:9092 " +
+ "--set voltha-adapter-openonu.services.etcd.address=etcd.default.svc:2379"
+
+ stackHelmFlags += " --set onu=${onus},pon=${pons} --set global.log_level=${logLevel.toLowerCase()} "
+ stackHelmFlags += " --set voltha.ingress.enabled=true --set voltha.ingress.enableVirtualHosts=true --set voltha.fullHostnameOverride=voltha.scale1.dev "
+ stackHelmFlags += extraHelmFlags + " " + params.extraHelmFlags
+
+ volthaStackDeploy([
+ bbsimReplica: olts.toInteger(),
+ infraNamespace: "default",
+ volthaNamespace: "default",
+ stackName: "voltha1", // TODO support custom charts
+ workflow: workflow,
+ extraHelmFlags: stackHelmFlags,
+ localCharts: false,
+ ])
+ sh """
+ set +x
+
+ echo -ne "\nWaiting for VOLTHA and ONOS to start..."
+ voltha=\$(kubectl get pods --all-namespaces -l app.kubernetes.io/part-of=voltha --no-headers | grep "0/" | wc -l)
+ onos=\$(kubectl get pods --all-namespaces -l app=onos-classic --no-headers | grep "0/" | wc -l)
+ while [[ \$voltha != 0 || \$onos != 0 ]]; do
+ sleep 5
+ echo -ne "."
+ voltha=\$(kubectl get pods --all-namespaces -l app.kubernetes.io/part-of=voltha --no-headers | grep "0/" | wc -l)
+ onos=\$(kubectl get pods --all-namespaces -l app=onos-classic --no-headers | grep "0/" | wc -l)
+ done
+ echo -ne "\nVOLTHA and ONOS pods ready\n"
+ kubectl get pods --all-namespaces -l app.kubernetes.io/part-of=voltha --no-headers | grep "0/" | wc -l
+ kubectl get pods --all-namespaces -l app=onos-classic --no-headers | grep "0/" | wc -l
+ """
+ start_port_forward(olts)
+ }
+ }
+ }
+ }
+ stage('Configuration') {
+ steps {
+ script {
+ def tech_prof_directory = "XGS-PON"
+ sh returnStdout: false, script: """
+ #Setting link discovery
+ sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg set org.onosproject.provider.lldp.impl.LldpLinkProvider enabled ${withLLDP}
+
+ sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg set org.onosproject.net.flow.impl.FlowRuleManager allowExtraneousRules true
+ sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg set org.onosproject.net.flow.impl.FlowRuleManager importExtraneousRules true
+
+
+ sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg set org.onosproject.net.flowobjective.impl.InOrderFlowObjectiveManager accumulatorMaxBatchMillis 900
+
+ sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg set org.onosproject.net.flowobjective.impl.InOrderFlowObjectiveManager accumulatorMaxIdleMillis 500
+
+ sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg set org.opencord.olt.impl.Olt provisionDelay 1000
+
+ sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 log:set ${logLevel} org.onosproject
+ sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 log:set ${logLevel} org.opencord
+
+ # sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 log:set DEBUG org.opencord.cordmcast
+ # sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 log:set DEBUG org.onosproject.mcast
+ # sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 log:set DEBUG org.opencord.igmpproxy
+ # sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 log:set DEBUG org.opencord.olt
+ # sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 log:set DEBUG org.onosproject.net.flowobjective.impl.FlowObjectiveManager
+
+ kubectl exec \$(kubectl get pods | grep -E "bbsim[0-9]" | awk 'NR==1{print \$1}') -- bbsimctl log ${logLevel.toLowerCase()} false
+
+ # Set Flows/Ports/Meters/Groups poll frequency
+ sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg set org.onosproject.provider.of.flow.impl.OpenFlowRuleProvider flowPollFrequency ${onosStatInterval}
+ sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg set org.onosproject.provider.of.device.impl.OpenFlowDeviceProvider portStatsPollFrequency ${onosStatInterval}
+ sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg set org.onosproject.provider.of.group.impl.OpenFlowGroupProvider groupPollInterval ${onosGroupInterval}
+ sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg set org.onosproject.net.flowobjective.impl.FlowObjectiveManager numThreads ${flowObjWorkerThreads}
+ sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg set org.onosproject.net.flowobjective.impl.InOrderFlowObjectiveManager objectiveTimeoutMs 300000
+
+ if [ ${withFlows} = false ]; then
+ sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 app deactivate org.opencord.olt
+ fi
+
+ if [ '${workflow}' = 'tt' ]; then
+ etcd_container=\$(kubectl get pods --all-namespaces | grep etcd | awk 'NR==1{print \$2}')
+ kubectl cp $WORKSPACE/voltha-system-tests/tests/data/TechProfile-TT-HSIA.json \$etcd_container:/tmp/hsia.json
+ put_result=\$(kubectl exec -it \$etcd_container -- /bin/sh -c 'cat /tmp/hsia.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/${tech_prof_directory}/64')
+ kubectl cp $WORKSPACE/voltha-system-tests/tests/data/TechProfile-TT-VoIP.json \$etcd_container:/tmp/voip.json
+ put_result=\$(kubectl exec -it \$etcd_container -- /bin/sh -c 'cat /tmp/voip.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/${tech_prof_directory}/65')
+ kubectl cp $WORKSPACE/voltha-system-tests/tests/data/TechProfile-TT-MCAST.json \$etcd_container:/tmp/mcast.json
+ put_result=\$(kubectl exec -it \$etcd_container -- /bin/sh -c 'cat /tmp/mcast.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/${tech_prof_directory}/66')
+ fi
+
+ if [ ${withPcap} = true ] ; then
+ # Start the tcp-dump in ofagent
+ export OF_AGENT=\$(kubectl get pods -l app=ofagent -o name)
+ kubectl exec \$OF_AGENT -- apk update
+ kubectl exec \$OF_AGENT -- apk add tcpdump
+ kubectl exec \$OF_AGENT -- mv /usr/sbin/tcpdump /usr/bin/tcpdump
+ _TAG=ofagent-tcpdump kubectl exec \$OF_AGENT -- tcpdump -nei eth0 -w out.pcap&
+
+ # Start the tcp-dump in radius
+ export RADIUS=\$(kubectl get pods -l app=radius -o name)
+ kubectl exec \$RADIUS -- apt-get update
+ kubectl exec \$RADIUS -- apt-get install -y tcpdump
+ _TAG=radius-tcpdump kubectl exec \$RADIUS -- tcpdump -w out.pcap&
+
+ # Start the tcp-dump in ONOS
+ for i in \$(seq 0 \$ONOSES); do
+ INSTANCE="onos-onos-classic-\$i"
+ kubectl exec \$INSTANCE -- apt-get update
+ kubectl exec \$INSTANCE -- apt-get install -y tcpdump
+ kubectl exec \$INSTANCE -- mv /usr/sbin/tcpdump /usr/bin/tcpdump
+ _TAG=\$INSTANCE kubectl exec \$INSTANCE -- /usr/bin/tcpdump -nei eth0 port 1812 -w out.pcap&
+ done
+ fi
+ """
+ }
+ }
+ }
+ stage('Load MIB Template') {
+ when {
+ expression {
+ return params.withMibTemplate
+ }
+ }
+ steps {
+ sh """
+ # load MIB template
+ wget https://raw.githubusercontent.com/opencord/voltha-openonu-adapter-go/master/templates/BBSM-12345123451234512345-00000000000001-v1.json
+ cat BBSM-12345123451234512345-00000000000001-v1.json | kubectl exec -it \$(kubectl get pods |grep etcd | awk 'NR==1{print \$1}') -- etcdctl put service/voltha/omci_mibs/go_templates/BBSM/12345123451234512345/00000000000001
+ """
+ }
+ }
+ stage('Run Test') {
+ steps {
+ sh '''
+ mkdir -p $WORKSPACE/RobotLogs
+ cd $WORKSPACE/voltha-system-tests
+ make vst_venv
+ '''
+ sh '''
+ if [ ${withProfiling} = true ] ; then
+ mkdir -p $LOG_FOLDER/pprof
+ echo $PATH
+ #Creating Python script for ONU Detection
+ cat << EOF > $WORKSPACE/pprof.sh
+timestamp() {
+ date +"%T"
+}
+
+i=0
+while [[ true ]]; do
+ ((i++))
+ ts=$(timestamp)
+ go tool pprof -png http://127.0.0.1:6060/debug/pprof/heap > $LOG_FOLDER/pprof/rw-core-heap-\\$i-\\$ts.png
+ go tool pprof -png http://127.0.0.1:6060/debug/pprof/goroutine > $LOG_FOLDER/pprof/rw-core-goroutine-\\$i-\\$ts.png
+ curl -o $LOG_FOLDER/pprof/rw-core-profile-\\$i-\\$ts.pprof http://127.0.0.1:6060/debug/pprof/profile?seconds=10
+ go tool pprof -png $LOG_FOLDER/pprof/rw-core-profile-\\$i-\\$ts.pprof > $LOG_FOLDER/pprof/rw-core-profile-\\$i-\\$ts.png
+
+ go tool pprof -png http://127.0.0.1:6061/debug/pprof/heap > $LOG_FOLDER/pprof/openolt-heap-\\$i-\\$ts.png
+ go tool pprof -png http://127.0.0.1:6061/debug/pprof/goroutine > $LOG_FOLDER/pprof/openolt-goroutine-\\$i-\\$ts.png
+ curl -o $LOG_FOLDER/pprof/openolt-profile-\\$i-\\$ts.pprof http://127.0.0.1:6061/debug/pprof/profile?seconds=10
+ go tool pprof -png $LOG_FOLDER/pprof/openolt-profile-\\$i-\\$ts.pprof > $LOG_FOLDER/pprof/openolt-profile-\\$i-\\$ts.png
+
+ go tool pprof -png http://127.0.0.1:6062/debug/pprof/heap > $LOG_FOLDER/pprof/ofagent-heap-\\$i-\\$ts.png
+ go tool pprof -png http://127.0.0.1:6062/debug/pprof/goroutine > $LOG_FOLDER/pprof/ofagent-goroutine-\\$i-\\$ts.png
+ curl -o $LOG_FOLDER/pprof/ofagent-profile-\\$i-\\$ts.pprof http://127.0.0.1:6062/debug/pprof/profile?seconds=10
+ go tool pprof -png $LOG_FOLDER/pprof/ofagent-profile-\\$i-\\$ts.pprof > $LOG_FOLDER/pprof/ofagent-profile-\\$i-\\$ts.png
+
+ sleep 10
+done
+EOF
+
+ _TAG="pprof"
+ _TAG=$_TAG bash $WORKSPACE/pprof.sh &
+ fi
+ '''
+ timeout(time: 15, unit: 'MINUTES') {
+ sh '''
+ ROBOT_PARAMS="--exitonfailure \
+ -v olt:${olts} \
+ -v pon:${pons} \
+ -v onu:${onus} \
+ -v ONOS_SSH_PORT:30115 \
+ -v ONOS_REST_PORT:30120 \
+ -v workflow:${workflow} \
+ -v withEapol:${withEapol} \
+ -v withDhcp:${withDhcp} \
+ -v withIgmp:${withIgmp} \
+ --noncritical non-critical \
+ -e igmp -e teardown "
+
+ if [ ${withEapol} = false ] ; then
+ ROBOT_PARAMS+="-e authentication "
+ fi
+
+ if [ ${withDhcp} = false ] ; then
+ ROBOT_PARAMS+="-e dhcp "
+ fi
+
+ if [ ${provisionSubscribers} = false ] ; then
+ # if we're not considering subscribers then we don't care about authentication and dhcp
+ ROBOT_PARAMS+="-e authentication -e provision -e flow-after -e dhcp "
+ fi
+
+ if [ ${withFlows} = false ] ; then
+ ROBOT_PARAMS+="-i setup -i activation "
+ fi
+
+ cd $WORKSPACE/voltha-system-tests
+ source ./vst_venv/bin/activate
+ robot -d $WORKSPACE/RobotLogs \
+ $ROBOT_PARAMS tests/scale/Voltha_Scale_Tests.robot
+ '''
+ }
+ }
+ }
+ stage('Run Igmp Tests') {
+ environment {
+ ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/IgmpTests"
+ }
+ when {
+ expression {
+ return params.withIgmp
+ }
+ }
+ steps {
+ sh returnStdout: false, script: """
+ # sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 log:set DEBUG org.onosproject.store.group.impl
+ """
+ sh '''
+ set +e
+ mkdir -p $ROBOT_LOGS_DIR
+ cd $WORKSPACE/voltha-system-tests
+ make vst_venv
+ '''
+ timeout(time: 11, unit: 'MINUTES') {
+ sh '''
+ ROBOT_PARAMS="--exitonfailure \
+ -v olt:${olts} \
+ -v pon:${pons} \
+ -v onu:${onus} \
+ -v workflow:${workflow} \
+ -v withEapol:${withEapol} \
+ -v withDhcp:${withDhcp} \
+ -v withIgmp:${withIgmp} \
+ -v ONOS_SSH_PORT:30115 \
+ -v ONOS_REST_PORT:30120 \
+ --noncritical non-critical \
+ -i igmp \
+ -e setup -e activation -e flow-before \
+ -e authentication -e provision -e flow-after \
+ -e dhcp -e teardown "
+ cd $WORKSPACE/voltha-system-tests
+ source ./vst_venv/bin/activate
+ robot -d $ROBOT_LOGS_DIR \
+ $ROBOT_PARAMS tests/scale/Voltha_Scale_Tests.robot
+ '''
+ }
+ }
+ }
+ }
+ post {
+ always {
+ // collect result, done in the "post" step so it's executed even in the
+ // event of a timeout in the tests
+ sh '''
+
+ # stop the kail processes
+ list=($APPS_TO_LOG)
+ for app in "${list[@]}"
+ do
+ echo "Stopping logs for: ${app}"
+ _TAG="kail-$app"
+ P_IDS="$(ps e -ww -A | grep "_TAG=$_TAG" | grep -v grep | awk '{print $1}')"
+ if [ -n "$P_IDS" ]; then
+ echo $P_IDS
+ for P_ID in $P_IDS; do
+ kill -9 $P_ID
+ done
+ fi
+ done
+
+ if [ ${withPcap} = true ] ; then
+ # stop ofAgent tcpdump
+ P_ID="\$(ps e -ww -A | grep "_TAG=ofagent-tcpdump" | grep -v grep | awk '{print \$1}')"
+ if [ -n "\$P_ID" ]; then
+ kill -9 \$P_ID
+ fi
+
+ # stop radius tcpdump
+ P_ID="\$(ps e -ww -A | grep "_TAG=radius-tcpdump" | grep -v grep | awk '{print \$1}')"
+ if [ -n "\$P_ID" ]; then
+ kill -9 \$P_ID
+ fi
+
+ # stop onos tcpdump
+ LIMIT=$(($NUM_OF_ONOS - 1))
+ for i in $(seq 0 $LIMIT); do
+ INSTANCE="onos-onos-classic-$i"
+ P_ID="\$(ps e -ww -A | grep "_TAG=$INSTANCE" | grep -v grep | awk '{print \$1}')"
+ if [ -n "\$P_ID" ]; then
+ kill -9 \$P_ID
+ fi
+ done
+
+ # copy the file
+ export OF_AGENT=$(kubectl get pods -l app=ofagent | awk 'NR==2{print $1}') || true
+ kubectl cp $OF_AGENT:out.pcap $LOG_FOLDER/ofagent.pcap || true
+
+ export RADIUS=$(kubectl get pods -l app=radius | awk 'NR==2{print $1}') || true
+ kubectl cp $RADIUS:out.pcap $LOG_FOLDER/radius.pcap || true
+
+ LIMIT=$(($NUM_OF_ONOS - 1))
+ for i in $(seq 0 $LIMIT); do
+ INSTANCE="onos-onos-classic-$i"
+ kubectl cp $INSTANCE:out.pcap $LOG_FOLDER/$INSTANCE.pcap || true
+ done
+ fi
+
+ cd voltha-system-tests
+ source ./vst_venv/bin/activate
+ python tests/scale/collect-result.py -r $WORKSPACE/RobotLogs/output.xml -p $WORKSPACE/plots > $WORKSPACE/execution-time.txt || true
+ cat $WORKSPACE/execution-time.txt
+ '''
+ sh '''
+ if [ ${withProfiling} = true ] ; then
+ _TAG="pprof"
+ P_IDS="$(ps e -ww -A | grep "_TAG=$_TAG" | grep -v grep | awk '{print $1}')"
+ if [ -n "$P_IDS" ]; then
+ echo $P_IDS
+ for P_ID in $P_IDS; do
+ kill -9 $P_ID
+ done
+ fi
+ fi
+ '''
+ plot([
+ csvFileName: 'scale-test.csv',
+ csvSeries: [
+ [file: 'plots/plot-voltha-onus.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
+ [file: 'plots/plot-onos-ports.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
+ [file: 'plots/plot-voltha-flows-before.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
+ [file: 'plots/plot-voltha-openolt-flows-before.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
+ [file: 'plots/plot-onos-flows-before.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
+ [file: 'plots/plot-onos-auth.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
+ [file: 'plots/plot-voltha-flows-after.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
+ [file: 'plots/plot-voltha-openolt-flows-after.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
+ [file: 'plots/plot-onos-flows-after.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
+ [file: 'plots/plot-onos-dhcp.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
+ ],
+ group: 'Voltha-Scale-Numbers', numBuilds: '20', style: 'line', title: "Scale Test (OLTs: ${olts}, PONs: ${pons}, ONUs: ${onus})", yaxis: 'Time (s)', useDescr: true
+ ])
+ step([$class: 'RobotPublisher',
+ disableArchiveOutput: false,
+ logFileName: '**/log*.html',
+ otherFiles: '',
+ outputFileName: '**/output*.xml',
+ outputPath: 'RobotLogs',
+ passThreshold: 100,
+ reportFileName: '**/report*.html',
+ unstableThreshold: 0]);
+ // get all the logs from kubernetes PODs
+ sh returnStdout: false, script: '''
+
+ # store information on running charts
+ helm ls > $LOG_FOLDER/helm-list.txt || true
+
+ # store information on the running pods
+ kubectl get pods -o wide > $LOG_FOLDER/pods.txt || true
+ kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\n'}" | sort | uniq | tee $LOG_FOLDER/pod-images.txt || true
+ kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.imageID}{'\\n'}" | sort | uniq | tee $LOG_FOLDER/pod-imagesId.txt || true
+
+ # copy the ONOS logs directly from the container to avoid the color codes
+ printf '%s\n' $(kubectl get pods -l app=onos-classic -o=jsonpath="{.items[*]['metadata.name']}") | xargs --no-run-if-empty -I# bash -c "kubectl cp #:${karafHome}/data/log/karaf.log $LOG_FOLDER/#.log" || true
+
+ # get ONOS cfg from the 3 nodes
+ # kubectl exec -t voltha-infra-onos-classic-0 -- sh /root/onos/apache-karaf-4.2.9/bin/client cfg get > ~/voltha-infra-onos-classic-0-cfg.txt || true
+ # kubectl exec -t voltha-infra-onos-classic-1 -- sh /root/onos/apache-karaf-4.2.9/bin/client cfg get > ~/voltha-infra-onos-classic-1-cfg.txt || true
+ # kubectl exec -t voltha-infra-onos-classic-2 -- sh /root/onos/apache-karaf-4.2.9/bin/client cfg get > ~/voltha-infra-onos-classic-2-cfg.txt || true
+
+ # kubectl exec -t voltha-infra-onos-classic-0 -- sh /root/onos/apache-karaf-4.2.9/bin/client obj-next-ids > ~/voltha-infra-onos-classic-0-next-objs.txt || true
+ # kubectl exec -t voltha-infra-onos-classic-1 -- sh /root/onos/apache-karaf-4.2.9/bin/client obj-next-ids > ~/voltha-infra-onos-classic-1-next-objs.txt || true
+ # kubectl exec -t voltha-infra-onos-classic-2 -- sh /root/onos/apache-karaf-4.2.9/bin/client obj-next-ids > ~/voltha-infra-onos-classic-2-next-objs.txt || true
+
+ # get radius logs out of the container
+ kubectl cp $(kubectl get pods -l app=radius --no-headers | awk '{print $1}'):/var/log/freeradius/radius.log $LOG_FOLDER/radius.log || true
+ '''
+ // dump all the BBSim(s) ONU information
+ sh '''
+ BBSIM_IDS=$(kubectl get pods | grep bbsim | grep -v server | awk '{print $1}')
+ IDS=($BBSIM_IDS)
+
+ for bbsim in "${IDS[@]}"
+ do
+ kubectl exec -t $bbsim -- bbsimctl onu list > $LOG_FOLDER/$bbsim-device-list.txt || true
+ kubectl exec -t $bbsim -- bbsimctl service list > $LOG_FOLDER/$bbsim-service-list.txt || true
+ kubectl exec -t $bbsim -- bbsimctl olt resources GEM_PORT > $LOG_FOLDER/$bbsim-flows-gem-ports.txt || true
+ kubectl exec -t $bbsim -- bbsimctl olt resources ALLOC_ID > $LOG_FOLDER/$bbsim-flows-alloc-ids.txt || true
+ kubectl exec -t $bbsim -- bbsimctl olt pons > $LOG_FOLDER/$bbsim-pon-resources.txt || true
+ done
+ '''
+ script {
+ // first make sure the port-forward is still running,
+ // sometimes Jenkins kills it relardless of the JENKINS_NODE_COOKIE=dontKillMe
+ def running = sh (
+ script: 'ps aux | grep port-forw | grep -E "onos|voltha" | grep -v grep | wc -l',
+ returnStdout: true
+ ).trim()
+ // if any of the voltha-api, onos-rest, onos-ssh port-forwards are not there
+ // kill all and restart
+ if (running != "3") {
+ start_port_forward(olts)
+ }
+ }
+ // get ONOS debug infos
+ sh '''
+
+ sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 apps -a -s > $LOG_FOLDER/onos-apps.txt
+ sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 nodes > $LOG_FOLDER/onos-nodes.txt
+ sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 masters > $LOG_FOLDER/onos-masters.txt
+ sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 roles > $LOG_FOLDER/onos-roles.txt
+ sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg get > $LOG_FOLDER/onos-cfg.txt
+ sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 netcfg > $LOG_FOLDER/onos-netcfg.txt
+
+ sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 ports > $LOG_FOLDER/onos-ports-list.txt
+ sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 hosts > $LOG_FOLDER/onos-hosts-list.txt
+
+ if [ ${withFlows} = true ] ; then
+ sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 volt-olts > $LOG_FOLDER/onos-olt-list.txt
+ sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 flows -s > $LOG_FOLDER/onos-flows-list.txt
+ sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 meters > $LOG_FOLDER/onos-meters-list.txt
+ fi
+
+ if [ ${provisionSubscribers} = true ]; then
+ sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 volt-programmed-subscribers > $LOG_FOLDER/onos-programmed-subscribers.txt
+ sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 volt-programmed-meters > $LOG_FOLDER/onos-programmed-meters.txt
+ sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 volt-bpmeter-mappings > $LOG_FOLDER/onos-bpmeter-mappings.txt
+ sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 volt-failed-subscribers > $LOG_FOLDER/onos-failed-subscribers.txt
+ fi
+
+ if [ ${withEapol} = true ] ; then
+ sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 aaa-users > $LOG_FOLDER/onos-aaa-users.txt
+ sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 aaa-statistics > $LOG_FOLDER/onos-aaa-statistics.txt
+ fi
+
+ if [ ${withDhcp} = true ] ; then
+ sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 dhcpl2relay-allocations > $LOG_FOLDER/onos-dhcp-allocations.txt
+ fi
+
+ if [ ${withIgmp} = true ] ; then
+ sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 mcast-host-routes > $LOG_FOLDER/onos-mcast-host-routes.txt
+ sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 mcast-host-show > $LOG_FOLDER/onos-mcast-host-show.txt
+ sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 groups > $LOG_FOLDER/onos-groups.txt
+ fi
+ '''
+ // collect etcd metrics
+ sh '''
+ mkdir -p $WORKSPACE/etcd-metrics
+ curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_debugging_mvcc_keys_total' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-key-count.json || true
+ curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=grpc_server_handled_total{grpc_service="etcdserverpb.KV"}' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-rpc-count.json || true
+ curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_debugging_mvcc_db_total_size_in_bytes' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-db-size.json || true
+ curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_disk_backend_commit_duration_seconds_sum' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-backend-write-time.json || true
+ curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_disk_backend_commit_duration_seconds_sum' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-backend-write-time-sum.json || true
+ curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_disk_backend_commit_duration_seconds_bucket' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-backend-write-time-bucket.json || true
+ curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_disk_wal_fsync_duration_seconds_bucket' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-wal-fsync-time-bucket.json || true
+ curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_network_peer_round_trip_time_seconds_bucket' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-network-peer-round-trip-time-seconds.json || true
+
+ '''
+ // get VOLTHA debug infos
+ script {
+ try {
+ sh '''
+ voltctl -m 8MB device list -o json > $LOG_FOLDER/device-list.json || true
+ python -m json.tool $LOG_FOLDER/device-list.json > $LOG_FOLDER/voltha-devices-list.json || true
+ rm $LOG_FOLDER/device-list.json || true
+ voltctl -m 8MB device list > $LOG_FOLDER/voltha-devices-list.txt || true
+
+ printf '%s\n' $(voltctl -m 8MB device list | grep olt | awk '{print $1}') | xargs --no-run-if-empty -I# bash -c "voltctl -m 8MB device flows # > $LOG_FOLDER/voltha-device-flows-#.txt" || true
+ printf '%s\n' $(voltctl -m 8MB device list | grep olt | awk '{print $1}') | xargs --no-run-if-empty -I# bash -c "voltctl -m 8MB device port list --format 'table{{.PortNo}}\t{{.Label}}\t{{.Type}}\t{{.AdminState}}\t{{.OperStatus}}' # > $LOG_FOLDER/voltha-device-ports-#.txt" || true
+
+ printf '%s\n' $(voltctl -m 8MB logicaldevice list -q) | xargs --no-run-if-empty -I# bash -c "voltctl -m 8MB logicaldevice flows # > $LOG_FOLDER/voltha-logicaldevice-flows-#.txt" || true
+ printf '%s\n' $(voltctl -m 8MB logicaldevice list -q) | xargs --no-run-if-empty -I# bash -c "voltctl -m 8MB logicaldevice port list # > $LOG_FOLDER/voltha-logicaldevice-ports-#.txt" || true
+ '''
+ } catch(e) {
+ sh '''
+ echo "Can't get device list from voltclt"
+ '''
+ }
+ }
+ // get cpu usage by container
+ sh '''
+ if [ ${withMonitoring} = true ] ; then
+ cd $WORKSPACE/voltha-system-tests
+ source ./vst_venv/bin/activate
+ sleep 60 # we have to wait for prometheus to collect all the information
+ python tests/scale/sizing.py -o $WORKSPACE/plots || true
+ fi
+ '''
+ archiveArtifacts artifacts: 'execution-time.txt,logs/*,logs/pprof/*,RobotLogs/**/*,plots/*,etcd-metrics/*'
+ }
+ }
+}
+
+def start_port_forward(olts) {
+ sh """
+ bbsimRestPortFwd=50071
+ for i in {0..${olts.toInteger() - 1}}; do
+ daemonize -E JENKINS_NODE_COOKIE="dontKillMe" /usr/local/bin/kubectl port-forward --address 0.0.0.0 -n default svc/bbsim\${i} \${bbsimRestPortFwd}:50071
+ ((bbsimRestPortFwd++))
+ done
+ """
+}
diff --git a/jjb/pipeline/voltha/voltha-2.7/bbsim-tests.groovy b/jjb/pipeline/voltha/voltha-2.7/bbsim-tests.groovy
new file mode 100644
index 0000000..c72c0f9
--- /dev/null
+++ b/jjb/pipeline/voltha/voltha-2.7/bbsim-tests.groovy
@@ -0,0 +1,253 @@
+// Copyright 2017-present Open Networking Foundation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// voltha-2.x e2e tests
+// uses bbsim to simulate OLT/ONUs
+
+// NOTE we are importing the library even if it's global so that it's
+// easier to change the keywords during a replay
+library identifier: 'cord-jenkins-libraries@master',
+ retriever: modernSCM([
+ $class: 'GitSCMSource',
+ remote: 'https://gerrit.opencord.org/ci-management.git'
+])
+
+// TODO move this in a keyword so it can be shared across pipelines
+def customImageFlags(project) {
+ def chart = "unknown"
+ def image = "unknown"
+ switch(project) {
+ case "ofagent-go":
+ chart = "voltha"
+ image = "ofagent"
+ break
+ case "voltha-go":
+ chart = "voltha"
+ image = "rw_core"
+ break
+ case "voltha-openonu-adapter-go":
+ chart = "voltha-adapter-openonu"
+ image = "adapter_open_onu_go"
+ break
+ // TODO remove after 2.7
+ case "voltha-openonu-adapter":
+ chart = "voltha-adapter-openonu"
+ image = "adapter_open_onu"
+ break
+ // TODO end
+ case "voltha-openolt-adapter":
+ chart = "voltha-adapter-openolt"
+ image = "adapter_open_olt"
+ break
+ case "bbsim":
+ // BBSIM has a different format that voltha, return directly
+ return "--set images.bbsim.tag=citest,images.bbsim.pullPolicy=Never"
+ break
+ default:
+ break
+ }
+
+ return "--set ${chart}.images.${image}.tag=citest,${chart}.images.${image}.pullPolicy=Never "
+}
+
+def test_workflow(name) {
+ stage('Deploy - '+ name + ' workflow') {
+ def extraHelmFlags = "${extraHelmFlags} --set global.log_level=DEBUG,onu=1,pon=1 "
+
+ if (gerritProject != "") {
+ extraHelmFlags = extraHelmFlags + customImageFlags("${gerritProject}")
+ }
+
+ def localCharts = false
+ if (gerritProject == "voltha-helm-charts") {
+ localCharts = true
+ }
+
+ volthaDeploy([workflow: name, extraHelmFlags: extraHelmFlags, localCharts: localCharts])
+ // start logging
+ sh """
+ mkdir -p $WORKSPACE/${name}
+ _TAG=kail-${name} kail -n infra -n voltha > $WORKSPACE/${name}/onos-voltha-combined.log &
+ """
+ // forward ONOS and VOLTHA ports
+ sh """
+ _TAG=onos-port-forward kubectl port-forward --address 0.0.0.0 -n infra svc/voltha-infra-onos-classic-hs 8101:8101&
+ _TAG=onos-port-forward kubectl port-forward --address 0.0.0.0 -n infra svc/voltha-infra-onos-classic-hs 8181:8181&
+ _TAG=voltha-port-forward kubectl port-forward --address 0.0.0.0 -n voltha svc/voltha-voltha-api 55555:55555&
+ """
+ }
+ stage('Test VOLTHA - '+ name + ' workflow') {
+ sh """
+ ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/${name.toUpperCase()}Workflow"
+ mkdir -p \$ROBOT_LOGS_DIR
+ export ROBOT_MISC_ARGS="-d \$ROBOT_LOGS_DIR -e PowerSwitch"
+
+ # By default, all tests tagged 'sanity' are run. This covers basic functionality
+ # like running through the ATT workflow for a single subscriber.
+ export TARGET=sanity-kind-${name}
+
+ # If the Gerrit comment contains a line with "functional tests" then run the full
+ # functional test suite. This covers tests tagged either 'sanity' or 'functional'.
+ # Note: Gerrit comment text will be prefixed by "Patch set n:" and a blank line
+ REGEX="functional tests"
+ if [[ "\$GERRIT_EVENT_COMMENT_TEXT" =~ \$REGEX ]]; then
+ export TARGET=functional-single-kind-${name}
+ fi
+
+ if [[ "${gerritProject}" == "bbsim" ]]; then
+ echo "Running BBSim specific Tests"
+ export TARGET=sanity-bbsim-${name}
+ fi
+
+ export VOLTCONFIG=$HOME/.volt/config
+ export KUBECONFIG=$HOME/.kube/config
+
+ # Run the specified tests
+ make -C $WORKSPACE/voltha-system-tests \$TARGET || true
+ """
+ // stop logging
+ sh """
+ P_IDS="\$(ps e -ww -A | grep "_TAG=kail-${name}" | grep -v grep | awk '{print \$1}')"
+ if [ -n "\$P_IDS" ]; then
+ echo \$P_IDS
+ for P_ID in \$P_IDS; do
+ kill -9 \$P_ID
+ done
+ fi
+ """
+ // remove port-forwarding
+ sh """
+ # remove orphaned port-forward from different namespaces
+ ps aux | grep port-forw | grep -v grep | awk '{print \$2}' | xargs --no-run-if-empty kill -9 || true
+ """
+ // collect pod details
+ get_pods_info("$WORKSPACE/${name}")
+ helmTeardown(['infra', 'voltha'])
+ }
+}
+
+def get_pods_info(dest) {
+ // collect pod details, this is here in case of failure
+ sh """
+ mkdir -p ${dest}
+ kubectl get pods --all-namespaces -o wide | tee ${dest}/pods.txt || true
+ kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\n'}" | sort | uniq | tee ${dest}/pod-images.txt || true
+ kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.imageID}{'\\n'}" | sort | uniq | tee ${dest}/pod-imagesId.txt || true
+ kubectl describe pods --all-namespaces -l app.kubernetes.io/part-of=voltha > ${dest}/pods-describe.txt
+ helm ls --all-namespaces | tee ${dest}/helm-charts.txt
+ """
+}
+
+pipeline {
+
+ /* no label, executor is determined by JJB */
+ agent {
+ label "${params.buildNode}"
+ }
+ options {
+ timeout(time: 35, unit: 'MINUTES')
+ }
+ environment {
+ PATH="$PATH:$WORKSPACE/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin"
+ KUBECONFIG="$HOME/.kube/kind-config-${clusterName}"
+ }
+
+ stages{
+ stage('Download Code') {
+ steps {
+ getVolthaCode([
+ branch: "${branch}",
+ gerritProject: "${gerritProject}",
+ gerritRefspec: "${gerritRefspec}",
+ volthaSystemTestsChange: "${volthaSystemTestsChange}",
+ volthaHelmChartsChange: "${volthaHelmChartsChange}",
+ ])
+ }
+ }
+ stage('Build patch') {
+ steps {
+ // NOTE that the correct patch has already been checked out
+ // during the getVolthaCode step
+ buildVolthaComponent("${gerritProject}")
+ }
+ }
+ stage('Create K8s Cluster') {
+ steps {
+ createKubernetesCluster([nodes: 3])
+ }
+ }
+ stage('Load image in kind nodes') {
+ steps {
+ loadToKind()
+ }
+ }
+ stage('Replace voltctl') {
+ // if the project is voltctl override the downloaded one with the built one
+ when {
+ expression {
+ return gerritProject == "voltctl"
+ }
+ }
+ steps{
+ sh """
+ mv `ls $WORKSPACE/voltctl/release/voltctl-*-linux-amd*` $WORKSPACE/bin/voltctl
+ chmod +x $WORKSPACE/bin/voltctl
+ """
+ }
+ }
+ stage('Run Test') {
+ steps {
+ timeout(time: 30, unit: 'MINUTES') {
+ test_workflow("att")
+ test_workflow("dt")
+ test_workflow("tt")
+ }
+ }
+ }
+ }
+
+ post {
+ aborted {
+ get_pods_info("$WORKSPACE/failed")
+ sh """
+ kubectl logs -n voltha -l app.kubernetes.io/part-of=voltha > $WORKSPACE/failed/voltha.log
+ """
+ archiveArtifacts artifacts: '**/*.log,**/*.txt'
+ }
+ failure {
+ get_pods_info("$WORKSPACE/failed")
+ sh """
+ kubectl logs -n voltha -l app.kubernetes.io/part-of=voltha > $WORKSPACE/failed/voltha.logs
+ """
+ archiveArtifacts artifacts: '**/*.log,**/*.txt'
+ }
+ always {
+ sh '''
+ gzip $WORKSPACE/att/onos-voltha-combined.log || true
+ gzip $WORKSPACE/dt/onos-voltha-combined.log || true
+ gzip $WORKSPACE/tt/onos-voltha-combined.log || true
+ '''
+ step([$class: 'RobotPublisher',
+ disableArchiveOutput: false,
+ logFileName: 'RobotLogs/*/log*.html',
+ otherFiles: '',
+ outputFileName: 'RobotLogs/*/output*.xml',
+ outputPath: '.',
+ passThreshold: 100,
+ reportFileName: 'RobotLogs/*/report*.html',
+ unstableThreshold: 0]);
+ archiveArtifacts artifacts: '*.log,**/*.log,**/*.gz,*.gz,*.txt,**/*.txt'
+ }
+ }
+}
diff --git a/jjb/pipeline/voltha/voltha-2.7/software-upgrades.groovy b/jjb/pipeline/voltha/voltha-2.7/software-upgrades.groovy
new file mode 100644
index 0000000..7bdf278
--- /dev/null
+++ b/jjb/pipeline/voltha/voltha-2.7/software-upgrades.groovy
@@ -0,0 +1,243 @@
+// Copyright 2021-present Open Networking Foundation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+// voltha-2.x e2e tests
+// uses bbsim to simulate OLT/ONUs
+// NOTE we are importing the library even if it's global so that it's
+// easier to change the keywords during a replay
+library identifier: 'cord-jenkins-libraries@master',
+ retriever: modernSCM([
+ $class: 'GitSCMSource',
+ remote: 'https://gerrit.opencord.org/ci-management.git'
+])
+def test_software_upgrade(name) {
+ stage('Deploy Voltha - '+ name) {
+ def extraHelmFlags = "${extraHelmFlags} --set global.log_level=DEBUG,onu=1,pon=1 --set onos-classic.replicas=3,onos-classic.atomix.replicas=3 "
+
+ extraHelmFlags = extraHelmFlags + """ --set voltha.services.controller[0].service=voltha-infra-onos-classic-0.voltha-infra-onos-classic-hs.infra.svc \
+ --set voltha.services.controller[0].port=6653 \
+ --set voltha.services.controller[0].address=voltha-infra-onos-classic-0.voltha-infra-onos-classic-hs.infra.svc:6653 \
+ --set voltha.services.controller[1].service=voltha-infra-onos-classic-1.voltha-infra-onos-classic-hs.infra.svc \
+ --set voltha.services.controller[1].port=6653 \
+ --set voltha.services.controller[1].address=voltha-infra-onos-classic-1.voltha-infra-onos-classic-hs.infra.svc:6653 \
+ --set voltha.services.controller[2].service=voltha-infra-onos-classic-2.voltha-infra-onos-classic-hs.infra.svc \
+ --set voltha.services.controller[2].port=6653 \
+ --set voltha.services.controller[2].address=voltha-infra-onos-classic-2.voltha-infra-onos-classic-hs.infra.svc:6653 """
+ //ONOS custom image handling
+ if ( onosImg.trim() != '' ) {
+ String[] split;
+ onosImg = onosImg.trim()
+ split = onosImg.split(':')
+ extraHelmFlags = extraHelmFlags + "--set onos-classic.image.repository=" + split[0] +",onos-classic.image.tag=" + split[1] + " "
+ }
+
+ // Currently only testing with ATT workflow
+ // TODO: Support for other workflows
+ // NOTE localCharts is set to "true" so that we use the locally cloned version of the chart (set to voltha-2.7)
+ volthaDeploy([workflow: "att", extraHelmFlags: extraHelmFlags, localCharts: true])
+ // start logging
+ sh """
+ rm -rf $WORKSPACE/${name} || true
+ mkdir -p $WORKSPACE/${name}
+ _TAG=kail-${name} kail -n infra -n voltha > $WORKSPACE/${name}/onos-voltha-combined.log &
+ """
+ // forward ONOS and VOLTHA ports
+ sh """
+ _TAG=onos-port-forward bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n infra svc/voltha-infra-onos-classic-hs 8101:8101; done &"
+ _TAG=onos-port-forward bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n infra svc/voltha-infra-onos-classic-hs 8181:8181; done &"
+ _TAG=port-forward-voltha-api bash -c "while true; do kubectl port-forward --address 0.0.0.0 -n voltha svc/voltha-voltha-api 55555:55555; done &"
+ """
+ sh """
+ sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 8101 karaf@127.0.0.1 log:set DEBUG org.opencord
+ """
+ }
+ stage('Test - '+ name) {
+ sh """
+ ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/${name}"
+ mkdir -p \$ROBOT_LOGS_DIR
+ if [[ ${name} == 'onos-app-upgrade' ]]; then
+ export ONOS_APPS_UNDER_TEST+=''
+ if [ ${aaaVer.trim()} != '' ] && [ ${aaaOarUrl.trim()} != '' ]; then
+ ONOS_APPS_UNDER_TEST+="org.opencord.aaa,${aaaVer.trim()},${aaaOarUrl.trim()}*"
+ fi
+ if [ ${oltVer.trim()} != '' ] && [ ${oltOarUrl.trim()} != '' ]; then
+ ONOS_APPS_UNDER_TEST+="org.opencord.olt,${oltVer.trim()},${oltOarUrl.trim()}*"
+ fi
+ if [ ${dhcpl2relayVer.trim()} != '' ] && [ ${dhcpl2relayOarUrl.trim()} != '' ]; then
+ ONOS_APPS_UNDER_TEST+="org.opencord.dhcpl2relay,${dhcpl2relayVer.trim()},${dhcpl2relayOarUrl.trim()}*"
+ fi
+ if [ ${igmpproxyVer.trim()} != '' ] && [ ${igmpproxyOarUrl.trim()} != '' ]; then
+ ONOS_APPS_UNDER_TEST+="org.opencord.igmpproxy,${igmpproxyVer.trim()},${igmpproxyOarUrl.trim()}*"
+ fi
+ if [ ${sadisVer.trim()} != '' ] && [ ${sadisOarUrl.trim()} != '' ]; then
+ ONOS_APPS_UNDER_TEST+="org.opencord.sadis,${sadisVer.trim()},${sadisOarUrl.trim()}*"
+ fi
+ if [ ${mcastVer.trim()} != '' ] && [ ${mcastOarUrl.trim()} != '' ]; then
+ ONOS_APPS_UNDER_TEST+="org.opencord.mcast,${mcastVer.trim()},${mcastOarUrl.trim()}*"
+ fi
+ if [ ${kafkaVer.trim()} != '' ] && [ ${kafkaOarUrl.trim()} != '' ]; then
+ ONOS_APPS_UNDER_TEST+="org.opencord.kafka,${kafkaVer.trim()},${kafkaOarUrl.trim()}*"
+ fi
+ export ROBOT_MISC_ARGS="-d \$ROBOT_LOGS_DIR -v onos_apps_under_test:\$ONOS_APPS_UNDER_TEST -e PowerSwitch"
+ export TARGET=onos-app-upgrade-test
+ fi
+ if [[ ${name} == 'voltha-component-upgrade' ]]; then
+ export VOLTHA_COMPS_UNDER_TEST+=''
+ if [ ${adapterOpenOltImage.trim()} != '' ]; then
+ VOLTHA_COMPS_UNDER_TEST+="adapter-open-olt,adapter-open-olt,${adapterOpenOltImage.trim()}*"
+ fi
+ if [ ${adapterOpenOnuImage.trim()} != '' ]; then
+ VOLTHA_COMPS_UNDER_TEST+="adapter-open-onu,adapter-open-onu,${adapterOpenOnuImage.trim()}*"
+ fi
+ if [ ${rwCoreImage.trim()} != '' ]; then
+ VOLTHA_COMPS_UNDER_TEST+="rw-core,voltha,${rwCoreImage.trim()}*"
+ fi
+ if [ ${ofAgentImage.trim()} != '' ]; then
+ VOLTHA_COMPS_UNDER_TEST+="ofagent,ofagent,${ofAgentImage.trim()}*"
+ fi
+ export ROBOT_MISC_ARGS="-d \$ROBOT_LOGS_DIR -v voltha_comps_under_test:\$VOLTHA_COMPS_UNDER_TEST -e PowerSwitch"
+ export TARGET=voltha-comp-upgrade-test
+ fi
+ if [[ ${name} == 'onu-software-upgrade' ]]; then
+ export ROBOT_MISC_ARGS="-d \$ROBOT_LOGS_DIR -v onu_image_name:${onuImageName.trim()} -v onu_image_url:${onuImageUrl.trim()} -v onu_image_version:${onuImageVersion.trim()} -v onu_image_crc:${onuImageCrc.trim()} -v onu_image_local_dir:${onuImageLocalDir.trim()} -e PowerSwitch"
+ export TARGET=onu-upgrade-test
+ fi
+ export VOLTCONFIG=$HOME/.volt/config-minimal
+ export KUBECONFIG=$HOME/.kube/kind-config-voltha-minimal
+ # Run the specified tests
+ make -C $WORKSPACE/voltha-system-tests \$TARGET || true
+ """
+ // stop logging
+ sh """
+ P_IDS="\$(ps e -ww -A | grep "_TAG=kail-${name}" | grep -v grep | awk '{print \$1}')"
+ if [ -n "\$P_IDS" ]; then
+ echo \$P_IDS
+ for P_ID in \$P_IDS; do
+ kill -9 \$P_ID
+ done
+ fi
+ """
+ // remove port-forwarding
+ sh """
+ # remove orphaned port-forward from different namespaces
+ ps aux | grep port-forw | grep -v grep | awk '{print \$2}' | xargs --no-run-if-empty kill -9 || true
+ """
+ // collect pod details
+ get_pods_info("$WORKSPACE/${name}")
+ helmTeardown(['infra', 'voltha'])
+ }
+}
+def get_pods_info(dest) {
+ // collect pod details, this is here in case of failure
+ sh """
+ mkdir -p ${dest} || true
+ kubectl get pods --all-namespaces -o wide > ${dest}/pods.txt || true
+ kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\n'}" | sort | uniq | tee ${dest}/pod-images.txt || true
+ kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.imageID}{'\\n'}" | sort | uniq | tee ${dest}/pod-imagesId.txt || true
+ kubectl describe pods --all-namespaces -l app.kubernetes.io/part-of=voltha > ${dest}/voltha-pods-describe.txt
+ kubectl describe pods -n infra -l app=onos-classic > ${dest}/onos-pods-describe.txt
+ helm ls --all-namespaces > ${dest}/helm-charts.txt
+ """
+ sh '''
+ # copy the ONOS logs directly from the container to avoid the color codes
+ printf '%s\\n' $(kubectl get pods -n infra -l app=onos-classic -o=jsonpath="{.items[*]['metadata.name']}") | xargs --no-run-if-empty -I# bash -c 'kubectl -n infra cp #:apache-karaf-4.2.9/data/log/karaf.log ''' + dest + '''/#.log' || true
+ '''
+}
+pipeline {
+ /* no label, executor is determined by JJB */
+ agent {
+ label "${params.buildNode}"
+ }
+ options {
+ timeout(time: 40, unit: 'MINUTES')
+ }
+ environment {
+ PATH="$PATH:$WORKSPACE/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin"
+ KUBECONFIG="$HOME/.kube/kind-config-voltha-minimal"
+ SSHPASS="karaf"
+ }
+ stages{
+ stage('Download Code') {
+ steps {
+ getVolthaCode([
+ branch: "${branch}",
+ volthaSystemTestsChange: "${volthaSystemTestsChange}",
+ volthaHelmChartsChange: "${volthaHelmChartsChange}",
+ ])
+ }
+ }
+ stage('Cleanup') {
+ steps {
+ // remove port-forwarding
+ sh """
+ # remove orphaned port-forward from different namespaces
+ ps aux | grep port-forw | grep -v grep | awk '{print \$2}' | xargs --no-run-if-empty kill -9 || true
+ """
+ helmTeardown(['infra', 'voltha'])
+ }
+ }
+ stage('Install latest voltctl') {
+ steps {
+ sh """
+ mkdir -p $WORKSPACE/bin || true
+ # install voltctl
+ HOSTOS="\$(uname -s | tr "[:upper:]" "[:lower:"])"
+ HOSTARCH="\$(uname -m | tr "[:upper:]" "[:lower:"])"
+ if [ "\$HOSTARCH" == "x86_64" ]; then
+ HOSTARCH="amd64"
+ fi
+ VC_VERSION="\$(curl --fail -sSL https://api.github.com/repos/opencord/voltctl/releases/latest | jq -r .tag_name | sed -e 's/^v//g')"
+ curl -Lo $WORKSPACE/bin/voltctl https://github.com/opencord/voltctl/releases/download/v\$VC_VERSION/voltctl-\$VC_VERSION-\$HOSTOS-\$HOSTARCH
+ chmod +x $WORKSPACE/bin/voltctl
+ """
+ }
+ }
+ stage('Create K8s Cluster') {
+ steps {
+ createKubernetesCluster([nodes: 3])
+ }
+ }
+ stage('Run Test') {
+ steps {
+ test_software_upgrade("onos-app-upgrade")
+ test_software_upgrade("voltha-component-upgrade")
+ test_software_upgrade("onu-software-upgrade")
+ }
+ }
+ }
+ post {
+ aborted {
+ get_pods_info("$WORKSPACE/failed")
+ }
+ failure {
+ get_pods_info("$WORKSPACE/failed")
+ }
+ always {
+ sh '''
+ gzip $WORKSPACE/onos-app-upgrade/onos-voltha-combined.log || true
+ gzip $WORKSPACE/voltha-component-upgrade/onos-voltha-combined.log || true
+ gzip $WORKSPACE/onu-software-upgrade/onos-voltha-combined.log || true
+ '''
+ step([$class: 'RobotPublisher',
+ disableArchiveOutput: false,
+ logFileName: 'RobotLogs/*/log*.html',
+ otherFiles: '',
+ outputFileName: 'RobotLogs/*/output*.xml',
+ outputPath: '.',
+ passThreshold: 100,
+ reportFileName: 'RobotLogs/*/report*.html',
+ unstableThreshold: 0]);
+ archiveArtifacts artifacts: '*.log,**/*.log,**/*.gz,*.gz,*.txt,**/*.txt'
+ }
+ }
+}
diff --git a/jjb/pipeline/voltha-DMI-bbsim-tests.groovy b/jjb/pipeline/voltha/voltha-2.7/voltha-DMI-bbsim-tests.groovy
similarity index 100%
rename from jjb/pipeline/voltha-DMI-bbsim-tests.groovy
rename to jjb/pipeline/voltha/voltha-2.7/voltha-DMI-bbsim-tests.groovy
diff --git a/jjb/pipeline/voltha-bbsim-tests.groovy b/jjb/pipeline/voltha/voltha-2.7/voltha-bbsim-tests.groovy
similarity index 100%
rename from jjb/pipeline/voltha-bbsim-tests.groovy
rename to jjb/pipeline/voltha/voltha-2.7/voltha-bbsim-tests.groovy
diff --git a/jjb/pipeline/voltha-dt-physical-build-and-tests.groovy b/jjb/pipeline/voltha/voltha-2.7/voltha-dt-physical-build-and-tests.groovy
similarity index 100%
rename from jjb/pipeline/voltha-dt-physical-build-and-tests.groovy
rename to jjb/pipeline/voltha/voltha-2.7/voltha-dt-physical-build-and-tests.groovy
diff --git a/jjb/pipeline/voltha-go-multi-tests.groovy b/jjb/pipeline/voltha/voltha-2.7/voltha-go-multi-tests.groovy
similarity index 100%
rename from jjb/pipeline/voltha-go-multi-tests.groovy
rename to jjb/pipeline/voltha/voltha-2.7/voltha-go-multi-tests.groovy
diff --git a/jjb/pipeline/voltha-go-tests.groovy b/jjb/pipeline/voltha/voltha-2.7/voltha-go-tests.groovy
similarity index 100%
rename from jjb/pipeline/voltha-go-tests.groovy
rename to jjb/pipeline/voltha/voltha-2.7/voltha-go-tests.groovy
diff --git a/jjb/pipeline/voltha-nightly-tests-bbsim.groovy b/jjb/pipeline/voltha/voltha-2.7/voltha-nightly-tests-bbsim.groovy
similarity index 94%
rename from jjb/pipeline/voltha-nightly-tests-bbsim.groovy
rename to jjb/pipeline/voltha/voltha-2.7/voltha-nightly-tests-bbsim.groovy
index b2727af..d86aac9 100644
--- a/jjb/pipeline/voltha-nightly-tests-bbsim.groovy
+++ b/jjb/pipeline/voltha/voltha-2.7/voltha-nightly-tests-bbsim.groovy
@@ -23,7 +23,7 @@
label "${params.buildNode}"
}
options {
- timeout(time: 190, unit: 'MINUTES')
+ timeout(time: 300, unit: 'MINUTES')
}
environment {
KUBECONFIG="$HOME/.kube/kind-config-voltha-minimal"
@@ -75,7 +75,7 @@
$class: 'GitSCM',
userRemoteConfigs: [[
url: "https://gerrit.opencord.org/voltha-system-tests",
- // refspec: "${volthaSystemTestsChange}"
+ refspec: "${volthaSystemTestsChange}"
]],
branches: [[ name: "${branch}", ]],
extensions: [
@@ -84,6 +84,14 @@
[$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
],
])
+ script {
+ sh(script:"""
+ if [ '${volthaSystemTestsChange}' != '' ] ; then
+ cd $WORKSPACE/voltha-system-tests;
+ git fetch https://gerrit.opencord.org/voltha-system-tests ${volthaSystemTestsChange} && git checkout FETCH_HEAD
+ fi
+ """)
+ }
}
}
@@ -95,6 +103,7 @@
echo "on branch: ${branch}, sourcing kind-voltha/releases/${branch}"
source "$WORKSPACE/kind-voltha/releases/${branch}"
else
+ export INFRA_NS="infra"
echo "on master, using default settings for kind-voltha"
fi
diff --git a/jjb/pipeline/voltha-openonu-go-test-bbsim.groovy b/jjb/pipeline/voltha/voltha-2.7/voltha-openonu-go-test-bbsim.groovy
similarity index 85%
rename from jjb/pipeline/voltha-openonu-go-test-bbsim.groovy
rename to jjb/pipeline/voltha/voltha-2.7/voltha-openonu-go-test-bbsim.groovy
index 7a1ab22..a82b9f1 100755
--- a/jjb/pipeline/voltha-openonu-go-test-bbsim.groovy
+++ b/jjb/pipeline/voltha/voltha-2.7/voltha-openonu-go-test-bbsim.groovy
@@ -119,7 +119,7 @@
mkdir -p $ROBOT_LOGS_DIR/1t1gem
export ROBOT_MISC_ARGS="-d $ROBOT_LOGS_DIR"
- export NAME=voltha_voltha
+ export KVSTOREPREFIX=voltha_voltha
make -C $WORKSPACE/voltha-system-tests ${makeTarget} || true
@@ -145,9 +145,15 @@
steps {
sh '''
cd $WORKSPACE/kind-voltha/
- #source $NAME-env.sh
WAIT_ON_DOWN=y DEPLOY_K8S=n ./voltha down
+ export EXTRA_HELM_FLAGS=""
+ if [ "${branch}" != "master" ]; then
+ echo "on branch: ${branch}, sourcing kind-voltha/releases/${branch}"
+ source "$WORKSPACE/kind-voltha/releases/${branch}"
+ else
+ echo "on master, using default settings for kind-voltha"
+ fi
export EXTRA_HELM_FLAGS+="--set log_agent.enabled=False ${extraHelmFlags} "
# start logging
@@ -158,7 +164,7 @@
mkdir -p $ROBOT_LOGS_DIR/1t4gem
export ROBOT_MISC_ARGS="-d $ROBOT_LOGS_DIR"
- export NAME=voltha_voltha
+ export KVSTOREPREFIX=voltha_voltha
make -C $WORKSPACE/voltha-system-tests ${make1t4gemTestTarget} || true
@@ -184,9 +190,15 @@
steps {
sh '''
cd $WORKSPACE/kind-voltha/
- #source $NAME-env.sh
WAIT_ON_DOWN=y DEPLOY_K8S=n ./voltha down
+ export EXTRA_HELM_FLAGS=""
+ if [ "${branch}" != "master" ]; then
+ echo "on branch: ${branch}, sourcing kind-voltha/releases/${branch}"
+ source "$WORKSPACE/kind-voltha/releases/${branch}"
+ else
+ echo "on master, using default settings for kind-voltha"
+ fi
export EXTRA_HELM_FLAGS+="--set log_agent.enabled=False ${extraHelmFlags} "
# start logging
@@ -197,7 +209,7 @@
mkdir -p $ROBOT_LOGS_DIR/1t8gem
export ROBOT_MISC_ARGS="-d $ROBOT_LOGS_DIR"
- export NAME=voltha_voltha
+ export KVSTOREPREFIX=voltha_voltha
make -C $WORKSPACE/voltha-system-tests ${make1t8gemTestTarget} || true
@@ -224,11 +236,16 @@
steps {
sh '''
cd $WORKSPACE/kind-voltha/
- #source $NAME-env.sh
WAIT_ON_DOWN=y DEPLOY_K8S=n ./voltha down
+ export EXTRA_HELM_FLAGS=""
+ if [ "${branch}" != "master" ]; then
+ echo "on branch: ${branch}, sourcing kind-voltha/releases/${branch}"
+ source "$WORKSPACE/kind-voltha/releases/${branch}"
+ else
+ echo "on master, using default settings for kind-voltha"
+ fi
export EXTRA_HELM_FLAGS+="--set log_agent.enabled=False ${extraHelmFlags} "
-
export EXTRA_HELM_FLAGS+="--set pon=2,onu=2,controlledActivation=only-onu "
# start logging
@@ -259,16 +276,21 @@
}
stage('Reconcile DT workflow') {
- when { beforeAgent true; expression { return "${olts}" == "1" } }
environment {
ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/ReconcileDT"
}
steps {
sh '''
cd $WORKSPACE/kind-voltha/
- #source $NAME-env.sh
WAIT_ON_DOWN=y DEPLOY_K8S=n ./voltha down
+ export EXTRA_HELM_FLAGS=""
+ if [ "${branch}" != "master" ]; then
+ echo "on branch: ${branch}, sourcing kind-voltha/releases/${branch}"
+ source "$WORKSPACE/kind-voltha/releases/${branch}"
+ else
+ echo "on master, using default settings for kind-voltha"
+ fi
export EXTRA_HELM_FLAGS+="--set log_agent.enabled=False ${extraHelmFlags} "
# Workflow-specific flags
@@ -288,10 +310,7 @@
mkdir -p $ROBOT_LOGS_DIR
export ROBOT_MISC_ARGS="-d $ROBOT_LOGS_DIR -e PowerSwitch"
- export TARGET=reconcile-openonu-go-adapter-test-dt
-
-
- make -C $WORKSPACE/voltha-system-tests \$TARGET || true
+ make -C $WORKSPACE/voltha-system-tests ${makeReconcileDtTestTarget} || true
# stop logging
P_IDS="$(ps e -ww -A | grep "_TAG=kail-reconcile-dt" | grep -v grep | awk '{print $1}')"
@@ -309,7 +328,6 @@
}
stage('Reconcile ATT workflow') {
- when { beforeAgent true; expression { return "${olts}" == "1" } }
environment {
ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/ReconcileATT"
}
@@ -318,6 +336,13 @@
cd $WORKSPACE/kind-voltha/
WAIT_ON_DOWN=y DEPLOY_K8S=n ./voltha down
+ export EXTRA_HELM_FLAGS=""
+ if [ "${branch}" != "master" ]; then
+ echo "on branch: ${branch}, sourcing kind-voltha/releases/${branch}"
+ source "$WORKSPACE/kind-voltha/releases/${branch}"
+ else
+ echo "on master, using default settings for kind-voltha"
+ fi
export EXTRA_HELM_FLAGS+="--set log_agent.enabled=False ${extraHelmFlags} "
# Workflow-specific flags
@@ -343,10 +368,7 @@
mkdir -p $ROBOT_LOGS_DIR
export ROBOT_MISC_ARGS="-d $ROBOT_LOGS_DIR -e PowerSwitch"
- export TARGET=reconcile-openonu-go-adapter-test
-
-
- make -C $WORKSPACE/voltha-system-tests \$TARGET || true
+ make -C $WORKSPACE/voltha-system-tests ${makeReconcileTestTarget} || true
# stop logging
P_IDS="$(ps e -ww -A | grep "_TAG=kail-reconcile-att" | grep -v grep | awk '{print $1}')"
@@ -364,7 +386,6 @@
}
stage('Reconcile TT workflow') {
- when { beforeAgent true; expression { return "${olts}" == "1" } }
environment {
ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/ReconcileTT"
}
@@ -373,6 +394,13 @@
cd $WORKSPACE/kind-voltha/
WAIT_ON_DOWN=y DEPLOY_K8S=n ./voltha down
+ export EXTRA_HELM_FLAGS=""
+ if [ "${branch}" != "master" ]; then
+ echo "on branch: ${branch}, sourcing kind-voltha/releases/${branch}"
+ source "$WORKSPACE/kind-voltha/releases/${branch}"
+ else
+ echo "on master, using default settings for kind-voltha"
+ fi
export EXTRA_HELM_FLAGS+="--set log_agent.enabled=False ${extraHelmFlags} "
# Workflow-specific flags
@@ -392,9 +420,7 @@
mkdir -p $ROBOT_LOGS_DIR
export ROBOT_MISC_ARGS="-d $ROBOT_LOGS_DIR -e PowerSwitch"
- export TARGET=reconcile-openonu-go-adapter-test-tt
-
- make -C $WORKSPACE/voltha-system-tests \$TARGET || true
+ make -C $WORKSPACE/voltha-system-tests ${makeReconcileTtTestTarget} || true
# stop logging
P_IDS="$(ps e -ww -A | grep "_TAG=kail-reconcile-tt" | grep -v grep | awk '{print $1}')"
diff --git a/jjb/pipeline/voltha-physical-build-and-tests.groovy b/jjb/pipeline/voltha/voltha-2.7/voltha-physical-build-and-tests.groovy
similarity index 87%
rename from jjb/pipeline/voltha-physical-build-and-tests.groovy
rename to jjb/pipeline/voltha/voltha-2.7/voltha-physical-build-and-tests.groovy
index f72b15d..e719a08 100644
--- a/jjb/pipeline/voltha-physical-build-and-tests.groovy
+++ b/jjb/pipeline/voltha/voltha-2.7/voltha-physical-build-and-tests.groovy
@@ -15,6 +15,14 @@
// deploy VOLTHA built from patchset on a physical pod and run e2e test
// uses kind-voltha to deploy voltha-2.X
+// NOTE we are importing the library even if it's global so that it's
+// easier to change the keywords during a replay
+library identifier: 'cord-jenkins-libraries@master',
+ retriever: modernSCM([
+ $class: 'GitSCMSource',
+ remote: 'https://gerrit.opencord.org/ci-management.git'
+])
+
// Need this so that deployment_config has global scope when it's read later
deployment_config = null
localDeploymentConfigFile = null
@@ -43,7 +51,7 @@
environment {
KUBECONFIG="$HOME/.kube/kind-config-voltha-minimal"
VOLTCONFIG="$HOME/.volt/config-minimal"
- PATH="$WORKSPACE/voltha/kind-voltha/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+ PATH="$WORKSPACE/bin:$WORKSPACE/kind-voltha/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
NAME="minimal"
FANCY=0
//VOL-2194 ONOS SSH and REST ports hardcoded to 30115/30120 in tests
@@ -55,7 +63,7 @@
stage ('Initialize') {
steps {
sh returnStdout: false, script: """
- test -e $WORKSPACE/voltha/kind-voltha/voltha && cd $WORKSPACE/voltha/kind-voltha && ./voltha down
+ test -e $WORKSPACE/kind-voltha/voltha && cd $WORKSPACE/kind-voltha && ./voltha down
cd $WORKSPACE
rm -rf $WORKSPACE/*
"""
@@ -71,33 +79,27 @@
}
}
- stage('Repo') {
+ stage('Download Code') {
steps {
- checkout(changelog: true,
- poll: false,
- scm: [$class: 'RepoScm',
- manifestRepositoryUrl: "${params.manifestUrl}",
- manifestBranch: "${params.branch}",
- currentBranch: true,
- destinationDir: 'voltha',
- forceSync: true,
- resetFirst: true,
- quiet: true,
- jobs: 4,
- showAllChanges: true]
- )
- }
- }
-
- stage('Get Patch') {
- when {
- expression { params.manualBranch == "" }
- }
- steps {
- sh returnStdout: false, script: """
- cd voltha
- repo download "${gerritProject}" "${gerritChangeNumber}/${gerritPatchsetNumber}"
- """
+ checkout([
+ $class: 'GitSCM',
+ userRemoteConfigs: [[
+ url: "https://gerrit.opencord.org/kind-voltha",
+ ]],
+ branches: [[ name: "master", ]],
+ extensions: [
+ [$class: 'WipeWorkspace'],
+ [$class: 'RelativeTargetDirectory', relativeTargetDir: "kind-voltha"],
+ [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
+ ],
+ ])
+ getVolthaCode([
+ branch: "${branch}",
+ gerritProject: "${gerritProject}",
+ gerritRefspec: "${gerritRefspec}",
+ volthaSystemTestsChange: "${volthaSystemTestsChange}",
+ volthaHelmChartsChange: "${volthaHelmChartsChange}",
+ ])
}
}
@@ -118,38 +120,42 @@
}
}
+ stage('Build patch') {
+ steps {
+ // NOTE that the correct patch has already been checked out
+ // during the getVolthaCode step
+ buildVolthaComponent("${gerritProject}")
+ }
+ }
+
stage('Create KinD Cluster') {
steps {
sh returnStdout: false, script: """
if [ "${branch}" != "master" ]; then
echo "on branch: ${branch}, sourcing kind-voltha/releases/${branch}"
- source "$WORKSPACE/voltha/kind-voltha/releases/${branch}"
+ source "$WORKSPACE/kind-voltha/releases/${branch}"
else
echo "on master, using default settings for kind-voltha"
fi
-
- cd $WORKSPACE/voltha/kind-voltha/
+ cd $WORKSPACE/kind-voltha/
JUST_K8S=y ./voltha up
"""
}
}
- stage('Build and Push Images') {
+ stage('Load image in kind nodes') {
when {
expression { params.manualBranch == "" }
}
steps {
sh returnStdout: false, script: """
-
if [ "${branch}" != "master" ]; then
echo "on branch: ${branch}, sourcing kind-voltha/releases/${branch}"
- source "$WORKSPACE/voltha/kind-voltha/releases/${branch}"
+ source "$WORKSPACE/kind-voltha/releases/${branch}"
else
echo "on master, using default settings for kind-voltha"
fi
-
if ! [[ "${gerritProject}" =~ ^(voltha-system-tests|kind-voltha|voltha-helm-charts)\$ ]]; then
- make -C $WORKSPACE/voltha/${gerritProject} DOCKER_REPOSITORY=voltha/ DOCKER_TAG=citest docker-build
docker images | grep citest
for image in \$(docker images -f "reference=*/*citest" --format "{{.Repository}}")
do
@@ -174,7 +180,7 @@
sh returnStdout: false, script: """
if [ "${branch}" != "master" ]; then
echo "on branch: ${branch}, sourcing kind-voltha/releases/${branch}"
- source "$WORKSPACE/voltha/kind-voltha/releases/${branch}"
+ source "$WORKSPACE/kind-voltha/releases/${branch}"
else
echo "on master, using default settings for kind-voltha"
fi
@@ -206,7 +212,7 @@
done
if [ "${gerritProject}" = "voltha-helm-charts" ]; then
- export CHART_PATH=$WORKSPACE/voltha/voltha-helm-charts
+ export CHART_PATH=$WORKSPACE/voltha-helm-charts
export VOLTHA_CHART=\$CHART_PATH/voltha
export VOLTHA_ADAPTER_OPEN_OLT_CHART=\$CHART_PATH/voltha-adapter-openolt
export VOLTHA_ADAPTER_OPEN_ONU_CHART=\$CHART_PATH/voltha-adapter-openonu
@@ -215,7 +221,7 @@
helm dep update \$VOLTHA_ADAPTER_OPEN_ONU_CHART
fi
- cd $WORKSPACE/voltha/kind-voltha/
+ cd $WORKSPACE/kind-voltha/
echo \$EXTRA_HELM_FLAGS
kail -n voltha -n default > $WORKSPACE/onos-voltha-combined.log &
./voltha up
@@ -257,7 +263,7 @@
steps {
sh returnStdout: false, script: """
etcd_container=\$(kubectl get pods -n voltha | grep voltha-etcd-cluster | awk 'NR==1{print \$1}')
- kubectl cp $WORKSPACE/voltha/voltha-system-tests/tests/data/TechProfile-${profile}.json voltha/\$etcd_container:/tmp/flexpod.json
+ kubectl cp $WORKSPACE/voltha-system-tests/tests/data/TechProfile-${profile}.json voltha/\$etcd_container:/tmp/flexpod.json
kubectl exec -it \$etcd_container -n voltha -- /bin/sh -c 'cat /tmp/flexpod.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/XGS-PON/64'
"""
}
@@ -335,7 +341,6 @@
}
steps {
sh returnStdout: false, script: """
- cd voltha
mkdir -p $WORKSPACE/RobotLogs
# If the Gerrit comment contains a line with "functional tests" then run the full
@@ -351,7 +356,7 @@
ROBOT_MISC_ARGS+="-i dataplane"
fi
- make -C $WORKSPACE/voltha/voltha-system-tests voltha-test || true
+ make -C $WORKSPACE/voltha-system-tests voltha-test || true
"""
}
}
@@ -374,7 +379,7 @@
always {
sh returnStdout: false, script: '''
set +e
- cp $WORKSPACE/voltha/kind-voltha/install-minimal.log $WORKSPACE/
+ cp $WORKSPACE/kind-voltha/install-minimal.log $WORKSPACE/
kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\n'}" | sort | uniq
kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.imageID}{'\\n'}" | sort | uniq
kubectl get nodes -o wide
diff --git a/jjb/pipeline/voltha/voltha-2.7/voltha-scale-test.groovy b/jjb/pipeline/voltha/voltha-2.7/voltha-scale-test.groovy
new file mode 100644
index 0000000..11bb1c4
--- /dev/null
+++ b/jjb/pipeline/voltha/voltha-2.7/voltha-scale-test.groovy
@@ -0,0 +1,801 @@
+// Copyright 2019-present Open Networking Foundation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// deploy VOLTHA and performs a scale test
+
+// this function generates the correct parameters for ofAgent
+// to connect to multple ONOS instances
+def ofAgentConnections(numOfOnos, releaseName, namespace) {
+ def params = " "
+ numOfOnos.times {
+ params += "--set voltha.services.controller[${it}].address=${releaseName}-onos-classic-${it}.${releaseName}-onos-classic-hs.${namespace}.svc:6653 "
+ }
+ return params
+}
+
+pipeline {
+
+ /* no label, executor is determined by JJB */
+ agent {
+ label "${params.buildNode}"
+ }
+ options {
+ timeout(time: 60, unit: 'MINUTES')
+ }
+ environment {
+ JENKINS_NODE_COOKIE="dontKillMe" // do not kill processes after the build is done
+ KUBECONFIG="$HOME/.kube/config"
+ VOLTCONFIG="$HOME/.volt/config-2.7" // voltha-2.7 does not have ingress and still relies on port-forwarding
+ SSHPASS="karaf"
+ VOLTHA_LOG_LEVEL="${logLevel}"
+ NUM_OF_BBSIM="${olts}"
+ NUM_OF_OPENONU="${openonuAdapterReplicas}"
+ NUM_OF_ONOS="${onosReplicas}"
+ NUM_OF_ATOMIX="${atomixReplicas}"
+ EXTRA_HELM_FLAGS="${extraHelmFlags} " // note that the trailing space is required to separate the parameters from appends done later
+
+ APPS_TO_LOG="etcd kafka onos-classic adapter-open-onu adapter-open-olt rw-core ofagent bbsim radius bbsim-sadis-server onos-config-loader"
+ LOG_FOLDER="$WORKSPACE/logs"
+
+ GERRIT_PROJECT="${GERRIT_PROJECT}"
+ }
+
+ stages {
+ stage ('Cleanup') {
+ steps {
+ timeout(time: 11, unit: 'MINUTES') {
+ sh returnStdout: false, script: '''
+ helm repo add onf https://charts.opencord.org
+ helm repo update
+
+ NAMESPACES="voltha1 voltha2 infra default"
+ for NS in $NAMESPACES
+ do
+ for hchart in $(helm list -n $NS -q | grep -E -v 'docker-registry|kafkacat');
+ do
+ echo "Purging chart: ${hchart}"
+ helm delete -n $NS "${hchart}"
+ done
+ done
+
+ # wait for pods to be removed
+ echo -ne "\nWaiting for PODs to be removed..."
+ PODS=$(kubectl get pods --all-namespaces --no-headers | grep -v -E "kube|cattle|registry|fleet|ingress-nginx" | wc -l)
+ while [[ $PODS != 0 ]]; do
+ sleep 5
+ echo -ne "."
+ PODS=$(kubectl get pods --all-namespaces --no-headers | grep -v -E "kube|cattle|registry|fleet|ingress-nginx" | wc -l)
+ done
+
+ # remove orphaned port-forward from different namespaces
+ ps aux | grep port-forw | grep -v grep | awk '{print $2}' | xargs --no-run-if-empty kill -9 || true
+
+ cd $WORKSPACE
+ rm -rf $WORKSPACE/*
+ '''
+ }
+ }
+ }
+ stage('Clone voltha-system-tests') {
+ steps {
+ checkout([
+ $class: 'GitSCM',
+ userRemoteConfigs: [[
+ url: "https://gerrit.opencord.org/voltha-system-tests",
+ refspec: "${volthaSystemTestsChange}"
+ ]],
+ branches: [[ name: "${release}", ]],
+ extensions: [
+ [$class: 'WipeWorkspace'],
+ [$class: 'RelativeTargetDirectory', relativeTargetDir: "voltha-system-tests"],
+ [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
+ ],
+ ])
+ script {
+ sh(script:"""
+ if [ '${volthaSystemTestsChange}' != '' ] ; then
+ cd $WORKSPACE/voltha-system-tests;
+ git fetch https://gerrit.opencord.org/voltha-system-tests ${volthaSystemTestsChange} && git checkout FETCH_HEAD
+ fi
+ """)
+ }
+ }
+ }
+ stage('Clone voltha-helm-charts') {
+ steps {
+ checkout([
+ $class: 'GitSCM',
+ userRemoteConfigs: [[
+ url: "https://gerrit.opencord.org/voltha-helm-charts",
+ refspec: "${volthaHelmChartsChange}"
+ ]],
+ branches: [[ name: "${release}", ]],
+ extensions: [
+ [$class: 'WipeWorkspace'],
+ [$class: 'RelativeTargetDirectory', relativeTargetDir: "voltha-helm-charts"],
+ [$class: 'CloneOption', depth: 0, noTags: false, reference: '', shallow: false],
+ ],
+ ])
+ script {
+ sh(script:"""
+ if [ '${volthaHelmChartsChange}' != '' ] ; then
+ cd $WORKSPACE/voltha-helm-charts;
+ git fetch https://gerrit.opencord.org/voltha-helm-charts ${volthaHelmChartsChange} && git checkout FETCH_HEAD
+ fi
+ """)
+ }
+ }
+ }
+ stage('Build patch') {
+ when {
+ expression {
+ return params.GERRIT_PROJECT
+ }
+ }
+ steps {
+ sh """
+ git clone https://\$GERRIT_HOST/\$GERRIT_PROJECT
+ cd \$GERRIT_PROJECT
+ git fetch https://\$GERRIT_HOST/\$GERRIT_PROJECT \$GERRIT_REFSPEC && git checkout FETCH_HEAD
+
+ DOCKER_REGISTRY=${dockerRegistry}/ DOCKER_REPOSITORY=voltha/ DOCKER_TAG=voltha-scale make docker-build
+ DOCKER_REGISTRY=${dockerRegistry}/ DOCKER_REPOSITORY=voltha/ DOCKER_TAG=voltha-scale make docker-push
+ """
+ }
+ }
+ stage('Deploy common infrastructure') {
+ // includes monitoring, kafka, etcd
+ steps {
+ sh '''
+ helm install kafka $HOME/teone/helm-charts/kafka --set replicaCount=${kafkaReplicas},replicas=${kafkaReplicas} --set persistence.enabled=false \
+ --set zookeeper.replicaCount=${kafkaReplicas} --set zookeeper.persistence.enabled=false \
+ --set prometheus.kafka.enabled=true,prometheus.operator.enabled=true,prometheus.jmx.enabled=true,prometheus.operator.serviceMonitor.namespace=default
+
+ # the ETCD chart use "auth" for resons different than BBsim, so strip that away
+ ETCD_FLAGS=$(echo ${extraHelmFlags} | sed -e 's/--set auth=false / /g') | sed -e 's/--set auth=true / /g'
+ ETCD_FLAGS+=" --set auth.rbac.enabled=false,persistence.enabled=false,statefulset.replicaCount=${etcdReplicas}"
+ ETCD_FLAGS+=" --set memoryMode=${inMemoryEtcdStorage} "
+ helm install --set replicas=${etcdReplicas} etcd $HOME/teone/helm-charts/etcd $ETCD_FLAGS
+
+ if [ ${withMonitoring} = true ] ; then
+ helm install nem-monitoring onf/nem-monitoring \
+ -f $HOME/voltha-scale/grafana.yaml \
+ --set prometheus.alertmanager.enabled=false,prometheus.pushgateway.enabled=false \
+ --set kpi_exporter.enabled=false,dashboards.xos=false,dashboards.onos=false,dashboards.aaa=false,dashboards.voltha=false
+ fi
+ '''
+ }
+ }
+ stage('Deploy Voltha') {
+ steps {
+ timeout(time: 10, unit: 'MINUTES') {
+ script {
+ sh returnStdout: false, script: '''
+ # start logging with kail
+
+ mkdir -p $LOG_FOLDER
+
+ list=($APPS_TO_LOG)
+ for app in "${list[@]}"
+ do
+ echo "Starting logs for: ${app}"
+ _TAG=kail-$app kail -l app=$app --since 1h > $LOG_FOLDER/$app.log&
+ done
+ '''
+ sh returnStdout: false, script: """
+
+ export EXTRA_HELM_FLAGS+=' '
+
+ # BBSim custom image handling
+ if [ '${bbsimImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'bbsim' ]; then
+ IFS=: read -r bbsimRepo bbsimTag <<< '${bbsimImg.trim()}'
+ EXTRA_HELM_FLAGS+="--set images.bbsim.repository=\$bbsimRepo,images.bbsim.tag=\$bbsimTag "
+ fi
+
+ # VOLTHA custom image handling
+ if [ '${rwCoreImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'voltha-go' ]; then
+ IFS=: read -r rwCoreRepo rwCoreTag <<< '${rwCoreImg.trim()}'
+ EXTRA_HELM_FLAGS+="--set voltha.images.rw_core.repository=\$rwCoreRepo,voltha.images.rw_core.tag=\$rwCoreTag "
+ fi
+
+ # ofAgent custom image handling
+ if [ '${ofAgentImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'of-agent' ]; then
+ IFS=: read -r ofAgentRepo ofAgentTag <<< '${ofAgentImg.trim()}'
+ EXTRA_HELM_FLAGS+="--set voltha.images.ofagent.repository=\$ofAgentRepo,voltha.images.ofagent.tag=\$ofAgentTag "
+ fi
+
+ # OpenOLT custom image handling
+ if [ '${openoltAdapterImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'voltha-openolt-adapter' ]; then
+ IFS=: read -r openoltAdapterRepo openoltAdapterTag <<< '${openoltAdapterImg.trim()}'
+ EXTRA_HELM_FLAGS+="--set voltha-adapter-openolt.images.adapter_open_olt.repository=\$openoltAdapterRepo,voltha-adapter-openolt.images.adapter_open_olt.tag=\$openoltAdapterTag "
+ fi
+
+ # OpenONU custom image handling
+ if [ '${openonuAdapterImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'voltha-openonu-adapter' ]; then
+ IFS=: read -r openonuAdapterRepo openonuAdapterTag <<< '${openonuAdapterImg.trim()}'
+ EXTRA_HELM_FLAGS+="--set voltha-adapter-openonu.images.adapter_open_onu.repository=\$openonuAdapterRepo,voltha-adapter-openonu.images.adapter_open_onu.tag=\$openonuAdapterTag "
+ fi
+
+ # OpenONU GO custom image handling
+ if [ '${openonuAdapterGoImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'voltha-openonu-adapter-go' ]; then
+ IFS=: read -r openonuAdapterGoRepo openonuAdapterGoTag <<< '${openonuAdapterGoImg.trim()}'
+ EXTRA_HELM_FLAGS+="--set voltha-adapter-openonu.images.adapter_open_onu_go.repository=\$openonuAdapterGoRepo,voltha-adapter-openonu.images.adapter_open_onu_go.tag=\$openonuAdapterGoTag "
+ fi
+
+ # ONOS custom image handling
+ if [ '${onosImg.trim()}' != '' ] && [ '\$GERRIT_PROJECT' != 'voltha-onos' ]; then
+ IFS=: read -r onosRepo onosTag <<< '${onosImg.trim()}'
+ EXTRA_HELM_FLAGS+="--set onos-classic.image.repository=\$onosRepo,onos-classic.image.tag=\$onosTag "
+ fi
+
+ # set BBSim parameters
+ EXTRA_HELM_FLAGS+='--set enablePerf=true,pon=${pons},onu=${onus} '
+
+ # disable the securityContext, this is a development cluster
+ EXTRA_HELM_FLAGS+='--set securityContext.enabled=false '
+
+ # No persistent-volume-claims in Atomix
+ EXTRA_HELM_FLAGS+="--set onos-classic.atomix.persistence.enabled=false "
+
+ echo "Installing with the following extra arguments:"
+ echo $EXTRA_HELM_FLAGS
+
+
+
+ # Use custom built images
+
+ if [ '\$GERRIT_PROJECT' == 'voltha-go' ]; then
+ EXTRA_HELM_FLAGS+="--set voltha.images.rw_core.repository=${dockerRegistry}/voltha/voltha-rw-core,voltha.images.rw_core.tag=voltha-scale "
+ fi
+
+ if [ '\$GERRIT_PROJECT' == 'voltha-openolt-adapter' ]; then
+ EXTRA_HELM_FLAGS+="--set voltha-openolt-adapter.images.adapter_open_olt.repository=${dockerRegistry}/voltha/voltha-openolt-adapter,voltha-openolt-adapter.images.adapter_open_olt.tag=voltha-scale "
+ fi
+
+ if [ '\$GERRIT_PROJECT' == 'voltha-openonu-adapter' ]; then
+ EXTRA_HELM_FLAGS+="--set voltha-openonu-adapter.images.adapter_open_onu.repository=${dockerRegistry}/voltha/voltha-openonu-adapter,voltha-openonu-adapter.images.adapter_open_onu.tag=voltha-scale "
+ fi
+
+ if [ '\$GERRIT_PROJECT' == 'voltha-openonu-adapter-go' ]; then
+ EXTRA_HELM_FLAGS+="--set voltha-openonu-adapter-go.images.adapter_open_onu_go.repository=${dockerRegistry}/voltha/voltha-openonu-adapter-go,voltha-openonu-adapter-go.images.adapter_open_onu_go.tag=voltha-scale "
+ fi
+
+ if [ '\$GERRIT_PROJECT' == 'ofagent-go' ]; then
+ EXTRA_HELM_FLAGS+="--set voltha.images.ofagent.repository=${dockerRegistry}/voltha/voltha-ofagent-go,ofagent-go.images.ofagent.tag=voltha-scale "
+ fi
+
+ if [ '\$GERRIT_PROJECT' == 'voltha-onos' ]; then
+ EXTRA_HELM_FLAGS+="--set onos-classic.image.repository=${dockerRegistry}/voltha/voltha-onos,onos-classic.image.tag=voltha-scale "
+ fi
+
+ if [ '\$GERRIT_PROJECT' == 'bbsim' ]; then
+ EXTRA_HELM_FLAGS+="--set images.bbsim.repository=${dockerRegistry}/voltha/bbsim,images.bbsim.tag=voltha-scale "
+ fi
+
+ helm upgrade --install voltha-infra onf/voltha-infra \$EXTRA_HELM_FLAGS \
+ --set onos-classic.replicas=${onosReplicas},onos-classic.atomix.replicas=${atomixReplicas} \
+ --set etcd.enabled=false,kafka.enabled=false \
+ --set global.log_level=${logLevel} \
+ -f $WORKSPACE/voltha-helm-charts/examples/${workflow}-values.yaml \
+ --set onos-classic.onosSshPort=30115 --set onos-classic.onosApiPort=30120 \
+ --version 0.1.13
+
+ helm upgrade --install voltha1 onf/voltha-stack \$EXTRA_HELM_FLAGS \
+ --set global.stack_name=voltha1 \
+ --set global.voltha_infra_name=voltha-infra \
+ --set global.voltha_infra_namespace=default \
+ --set global.log_level=${logLevel} \
+ ${ofAgentConnections(onosReplicas.toInteger(), "voltha-infra", "default")} \
+ --set voltha.services.kafka.adapter.address=kafka.default.svc:9092 \
+ --set voltha.services.kafka.cluster.address=kafka.default.svc:9092 \
+ --set voltha.services.etcd.address=etcd.default.svc:2379 \
+ --set voltha-adapter-openolt.services.kafka.adapter.address=kafka.default.svc:9092 \
+ --set voltha-adapter-openolt.services.kafka.cluster.address=kafka.default.svc:9092 \
+ --set voltha-adapter-openolt.services.etcd.address=etcd.default.svc:2379 \
+ --set voltha-adapter-openonu.services.kafka.adapter.address=kafka.default.svc:9092 \
+ --set voltha-adapter-openonu.services.kafka.cluster.address=kafka.default.svc:9092 \
+ --set voltha-adapter-openonu.services.etcd.address=etcd.default.svc:2379 \
+ --version 0.1.17
+
+
+ for i in {0..${olts.toInteger() - 1}}; do
+ stackId=1
+ helm upgrade --install bbsim\$i onf/bbsim \$EXTRA_HELM_FLAGS \
+ --set olt_id="\${stackId}\${i}" \
+ --set onu=${onus},pon=${pons} \
+ --set global.log_level=${logLevel.toLowerCase()} \
+ -f $WORKSPACE/voltha-helm-charts/examples/${workflow}-values.yaml \
+ --version 4.2.0
+ done
+ """
+ sh """
+ set +x
+
+ echo -ne "\nWaiting for VOLTHA and ONOS to start..."
+ voltha=\$(kubectl get pods --all-namespaces -l app.kubernetes.io/part-of=voltha --no-headers | grep "0/" | wc -l)
+ onos=\$(kubectl get pods --all-namespaces -l app=onos-classic --no-headers | grep "0/" | wc -l)
+ while [[ \$voltha != 0 || \$onos != 0 ]]; do
+ sleep 5
+ echo -ne "."
+ voltha=\$(kubectl get pods --all-namespaces -l app.kubernetes.io/part-of=voltha --no-headers | grep "0/" | wc -l)
+ onos=\$(kubectl get pods --all-namespaces -l app=onos-classic --no-headers | grep "0/" | wc -l)
+ done
+ echo -ne "\nVOLTHA and ONOS pods ready\n"
+ kubectl get pods --all-namespaces -l app.kubernetes.io/part-of=voltha --no-headers | grep "0/" | wc -l
+ kubectl get pods --all-namespaces -l app=onos-classic --no-headers | grep "0/" | wc -l
+ """
+ start_port_forward(olts)
+ }
+ }
+ }
+ }
+ stage('Configuration') {
+ steps {
+ script {
+ def tech_prof_directory = "XGS-PON"
+ sh returnStdout: false, script: """
+ #Setting link discovery
+ sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg set org.onosproject.provider.lldp.impl.LldpLinkProvider enabled ${withLLDP}
+
+ sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg set org.onosproject.net.flow.impl.FlowRuleManager allowExtraneousRules true
+ sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg set org.onosproject.net.flow.impl.FlowRuleManager importExtraneousRules true
+
+
+ sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg set org.onosproject.net.flowobjective.impl.InOrderFlowObjectiveManager accumulatorMaxBatchMillis 900
+
+ sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg set org.onosproject.net.flowobjective.impl.InOrderFlowObjectiveManager accumulatorMaxIdleMillis 500
+
+ sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg set org.opencord.olt.impl.Olt provisionDelay 1000
+
+ sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 log:set ${logLevel} org.onosproject
+ sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 log:set ${logLevel} org.opencord
+
+ # sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 log:set DEBUG org.opencord.cordmcast
+ # sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 log:set DEBUG org.onosproject.mcast
+ # sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 log:set DEBUG org.opencord.igmpproxy
+ # sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 log:set DEBUG org.opencord.olt
+
+ kubectl exec \$(kubectl get pods | grep -E "bbsim[0-9]" | awk 'NR==1{print \$1}') -- bbsimctl log ${logLevel.toLowerCase()} false
+
+ # Set Flows/Ports/Meters poll frequency
+ sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg set org.onosproject.provider.of.flow.impl.OpenFlowRuleProvider flowPollFrequency ${onosStatInterval}
+ sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg set org.onosproject.provider.of.device.impl.OpenFlowDeviceProvider portStatsPollFrequency ${onosStatInterval}
+
+ if [ ${withFlows} = false ]; then
+ sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 app deactivate org.opencord.olt
+ fi
+
+ if [ '${workflow}' = 'tt' ]; then
+ etcd_container=\$(kubectl get pods --all-namespaces | grep etcd | awk 'NR==1{print \$2}')
+ kubectl cp $WORKSPACE/voltha-system-tests/tests/data/TechProfile-TT-HSIA.json \$etcd_container:/tmp/hsia.json
+ put_result=\$(kubectl exec -it \$etcd_container -- /bin/sh -c 'cat /tmp/hsia.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/${tech_prof_directory}/64')
+ kubectl cp $WORKSPACE/voltha-system-tests/tests/data/TechProfile-TT-VoIP.json \$etcd_container:/tmp/voip.json
+ put_result=\$(kubectl exec -it \$etcd_container -- /bin/sh -c 'cat /tmp/voip.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/${tech_prof_directory}/65')
+ kubectl cp $WORKSPACE/voltha-system-tests/tests/data/TechProfile-TT-MCAST.json \$etcd_container:/tmp/mcast.json
+ put_result=\$(kubectl exec -it \$etcd_container -- /bin/sh -c 'cat /tmp/mcast.json | ETCDCTL_API=3 etcdctl put service/voltha/technology_profiles/${tech_prof_directory}/66')
+ fi
+
+ if [ ${withPcap} = true ] ; then
+ # Start the tcp-dump in ofagent
+ export OF_AGENT=\$(kubectl get pods -l app=ofagent -o name)
+ kubectl exec \$OF_AGENT -- apk update
+ kubectl exec \$OF_AGENT -- apk add tcpdump
+ kubectl exec \$OF_AGENT -- mv /usr/sbin/tcpdump /usr/bin/tcpdump
+ _TAG=ofagent-tcpdump kubectl exec \$OF_AGENT -- tcpdump -nei eth0 -w out.pcap&
+
+ # Start the tcp-dump in radius
+ export RADIUS=\$(kubectl get pods -l app=radius -o name)
+ kubectl exec \$RADIUS -- apt-get update
+ kubectl exec \$RADIUS -- apt-get install -y tcpdump
+ _TAG=radius-tcpdump kubectl exec \$RADIUS -- tcpdump -w out.pcap&
+
+ # Start the tcp-dump in ONOS
+ for i in \$(seq 0 \$ONOSES); do
+ INSTANCE="onos-onos-classic-\$i"
+ kubectl exec \$INSTANCE -- apt-get update
+ kubectl exec \$INSTANCE -- apt-get install -y tcpdump
+ kubectl exec \$INSTANCE -- mv /usr/sbin/tcpdump /usr/bin/tcpdump
+ _TAG=\$INSTANCE kubectl exec \$INSTANCE -- /usr/bin/tcpdump -nei eth0 port 1812 -w out.pcap&
+ done
+ fi
+ """
+ }
+ }
+ }
+ stage('Load MIB Template') {
+ when {
+ expression {
+ return params.withMibTemplate
+ }
+ }
+ steps {
+ sh """
+ # load MIB template
+ wget https://raw.githubusercontent.com/opencord/voltha-openonu-adapter-go/master/templates/BBSM-12345123451234512345-00000000000001-v1.json
+ cat BBSM-12345123451234512345-00000000000001-v1.json | kubectl exec -it \$(kubectl get pods |grep etcd | awk 'NR==1{print \$1}') -- etcdctl put service/voltha/omci_mibs/go_templates/BBSM/12345123451234512345/00000000000001
+ """
+ }
+ }
+ stage('Run Test') {
+ steps {
+ sh '''
+ mkdir -p $WORKSPACE/RobotLogs
+ cd $WORKSPACE/voltha-system-tests
+ make vst_venv
+ '''
+ sh '''
+ if [ ${withProfiling} = true ] ; then
+ mkdir -p $LOG_FOLDER/pprof
+ echo $PATH
+ #Creating Python script for ONU Detection
+ cat << EOF > $WORKSPACE/pprof.sh
+timestamp() {
+ date +"%T"
+}
+
+i=0
+while [[ true ]]; do
+ ((i++))
+ ts=$(timestamp)
+ go tool pprof -png http://127.0.0.1:6060/debug/pprof/heap > $LOG_FOLDER/pprof/rw-core-heap-\\$i-\\$ts.png
+ go tool pprof -png http://127.0.0.1:6060/debug/pprof/goroutine > $LOG_FOLDER/pprof/rw-core-goroutine-\\$i-\\$ts.png
+ curl -o $LOG_FOLDER/pprof/rw-core-profile-\\$i-\\$ts.pprof http://127.0.0.1:6060/debug/pprof/profile?seconds=10
+ go tool pprof -png $LOG_FOLDER/pprof/rw-core-profile-\\$i-\\$ts.pprof > $LOG_FOLDER/pprof/rw-core-profile-\\$i-\\$ts.png
+
+ go tool pprof -png http://127.0.0.1:6061/debug/pprof/heap > $LOG_FOLDER/pprof/openolt-heap-\\$i-\\$ts.png
+ go tool pprof -png http://127.0.0.1:6061/debug/pprof/goroutine > $LOG_FOLDER/pprof/openolt-goroutine-\\$i-\\$ts.png
+ curl -o $LOG_FOLDER/pprof/openolt-profile-\\$i-\\$ts.pprof http://127.0.0.1:6061/debug/pprof/profile?seconds=10
+ go tool pprof -png $LOG_FOLDER/pprof/openolt-profile-\\$i-\\$ts.pprof > $LOG_FOLDER/pprof/openolt-profile-\\$i-\\$ts.png
+
+ go tool pprof -png http://127.0.0.1:6062/debug/pprof/heap > $LOG_FOLDER/pprof/ofagent-heap-\\$i-\\$ts.png
+ go tool pprof -png http://127.0.0.1:6062/debug/pprof/goroutine > $LOG_FOLDER/pprof/ofagent-goroutine-\\$i-\\$ts.png
+ curl -o $LOG_FOLDER/pprof/ofagent-profile-\\$i-\\$ts.pprof http://127.0.0.1:6062/debug/pprof/profile?seconds=10
+ go tool pprof -png $LOG_FOLDER/pprof/ofagent-profile-\\$i-\\$ts.pprof > $LOG_FOLDER/pprof/ofagent-profile-\\$i-\\$ts.png
+
+ sleep 10
+done
+EOF
+
+ _TAG="pprof"
+ _TAG=$_TAG bash $WORKSPACE/pprof.sh &
+ fi
+ '''
+ timeout(time: 15, unit: 'MINUTES') {
+ sh '''
+ ROBOT_PARAMS="--exitonfailure \
+ -v olt:${olts} \
+ -v pon:${pons} \
+ -v onu:${onus} \
+ -v workflow:${workflow} \
+ -v withEapol:${withEapol} \
+ -v withDhcp:${withDhcp} \
+ -v withIgmp:${withIgmp} \
+ -v ONOS_SSH_PORT:30115 \
+ -v ONOS_REST_PORT:30120 \
+ --noncritical non-critical \
+ -e igmp -e teardown "
+
+ if [ ${withEapol} = false ] ; then
+ ROBOT_PARAMS+="-e authentication "
+ fi
+
+ if [ ${withDhcp} = false ] ; then
+ ROBOT_PARAMS+="-e dhcp "
+ fi
+
+ if [ ${provisionSubscribers} = false ] ; then
+ # if we're not considering subscribers then we don't care about authentication and dhcp
+ ROBOT_PARAMS+="-e authentication -e provision -e flow-after -e dhcp "
+ fi
+
+ if [ ${withFlows} = false ] ; then
+ ROBOT_PARAMS+="-i setup -i activation "
+ fi
+
+ cd $WORKSPACE/voltha-system-tests
+ source ./vst_venv/bin/activate
+ robot -d $WORKSPACE/RobotLogs \
+ $ROBOT_PARAMS tests/scale/Voltha_Scale_Tests.robot
+ '''
+ }
+ }
+ }
+ stage('Run Igmp Tests') {
+ environment {
+ ROBOT_LOGS_DIR="$WORKSPACE/RobotLogs/IgmpTests"
+ }
+ when {
+ expression {
+ return params.withIgmp
+ }
+ }
+ steps {
+ sh '''
+ set +e
+ mkdir -p $ROBOT_LOGS_DIR
+ cd $WORKSPACE/voltha-system-tests
+ make vst_venv
+ '''
+ timeout(time: 11, unit: 'MINUTES') {
+ sh '''
+ ROBOT_PARAMS="--exitonfailure \
+ -v olt:${olts} \
+ -v pon:${pons} \
+ -v onu:${onus} \
+ -v workflow:${workflow} \
+ -v withEapol:${withEapol} \
+ -v withDhcp:${withDhcp} \
+ -v withIgmp:${withIgmp} \
+ --noncritical non-critical \
+ -i igmp \
+ -e setup -e activation -e flow-before \
+ -e authentication -e provision -e flow-after \
+ -e dhcp -e teardown "
+ cd $WORKSPACE/voltha-system-tests
+ source ./vst_venv/bin/activate
+ robot -d $ROBOT_LOGS_DIR \
+ $ROBOT_PARAMS tests/scale/Voltha_Scale_Tests.robot
+ '''
+ }
+ }
+ }
+ }
+ post {
+ always {
+ // collect result, done in the "post" step so it's executed even in the
+ // event of a timeout in the tests
+ sh '''
+
+ # stop the kail processes
+ list=($APPS_TO_LOG)
+ for app in "${list[@]}"
+ do
+ echo "Stopping logs for: ${app}"
+ _TAG="kail-$app"
+ P_IDS="$(ps e -ww -A | grep "_TAG=$_TAG" | grep -v grep | awk '{print $1}')"
+ if [ -n "$P_IDS" ]; then
+ echo $P_IDS
+ for P_ID in $P_IDS; do
+ kill -9 $P_ID
+ done
+ fi
+ done
+
+ if [ ${withPcap} = true ] ; then
+ # stop ofAgent tcpdump
+ P_ID="\$(ps e -ww -A | grep "_TAG=ofagent-tcpdump" | grep -v grep | awk '{print \$1}')"
+ if [ -n "\$P_ID" ]; then
+ kill -9 \$P_ID
+ fi
+
+ # stop radius tcpdump
+ P_ID="\$(ps e -ww -A | grep "_TAG=radius-tcpdump" | grep -v grep | awk '{print \$1}')"
+ if [ -n "\$P_ID" ]; then
+ kill -9 \$P_ID
+ fi
+
+ # stop onos tcpdump
+ LIMIT=$(($NUM_OF_ONOS - 1))
+ for i in $(seq 0 $LIMIT); do
+ INSTANCE="onos-onos-classic-$i"
+ P_ID="\$(ps e -ww -A | grep "_TAG=$INSTANCE" | grep -v grep | awk '{print \$1}')"
+ if [ -n "\$P_ID" ]; then
+ kill -9 \$P_ID
+ fi
+ done
+
+ # copy the file
+ export OF_AGENT=$(kubectl get pods -l app=ofagent | awk 'NR==2{print $1}') || true
+ kubectl cp $OF_AGENT:out.pcap $LOG_FOLDER/ofagent.pcap || true
+
+ export RADIUS=$(kubectl get pods -l app=radius | awk 'NR==2{print $1}') || true
+ kubectl cp $RADIUS:out.pcap $LOG_FOLDER/radius.pcap || true
+
+ LIMIT=$(($NUM_OF_ONOS - 1))
+ for i in $(seq 0 $LIMIT); do
+ INSTANCE="onos-onos-classic-$i"
+ kubectl cp $INSTANCE:out.pcap $LOG_FOLDER/$INSTANCE.pcap || true
+ done
+ fi
+
+ cd voltha-system-tests
+ source ./vst_venv/bin/activate
+ python tests/scale/collect-result.py -r $WORKSPACE/RobotLogs/output.xml -p $WORKSPACE/plots > $WORKSPACE/execution-time.txt || true
+ cat $WORKSPACE/execution-time.txt
+ '''
+ sh '''
+ if [ ${withProfiling} = true ] ; then
+ _TAG="pprof"
+ P_IDS="$(ps e -ww -A | grep "_TAG=$_TAG" | grep -v grep | awk '{print $1}')"
+ if [ -n "$P_IDS" ]; then
+ echo $P_IDS
+ for P_ID in $P_IDS; do
+ kill -9 $P_ID
+ done
+ fi
+ fi
+ '''
+ plot([
+ csvFileName: 'scale-test.csv',
+ csvSeries: [
+ [file: 'plots/plot-voltha-onus.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
+ [file: 'plots/plot-onos-ports.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
+ [file: 'plots/plot-voltha-flows-before.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
+ [file: 'plots/plot-voltha-openolt-flows-before.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
+ [file: 'plots/plot-onos-flows-before.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
+ [file: 'plots/plot-onos-auth.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
+ [file: 'plots/plot-voltha-flows-after.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
+ [file: 'plots/plot-voltha-openolt-flows-after.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
+ [file: 'plots/plot-onos-flows-after.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
+ [file: 'plots/plot-onos-dhcp.txt', displayTableFlag: false, exclusionValues: '', inclusionFlag: 'OFF', url: ''],
+ ],
+ group: 'Voltha-Scale-Numbers', numBuilds: '20', style: 'line', title: "Scale Test (OLTs: ${olts}, PONs: ${pons}, ONUs: ${onus})", yaxis: 'Time (s)', useDescr: true
+ ])
+ step([$class: 'RobotPublisher',
+ disableArchiveOutput: false,
+ logFileName: '**/log*.html',
+ otherFiles: '',
+ outputFileName: '**/output*.xml',
+ outputPath: 'RobotLogs',
+ passThreshold: 100,
+ reportFileName: '**/report*.html',
+ unstableThreshold: 0]);
+ // get all the logs from kubernetes PODs
+ sh returnStdout: false, script: '''
+
+ # store information on running charts
+ helm ls > $LOG_FOLDER/helm-list.txt || true
+
+ # store information on the running pods
+ kubectl get pods -o wide > $LOG_FOLDER/pods.txt || true
+ kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\n'}" | sort | uniq | tee $LOG_FOLDER/pod-images.txt || true
+ kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.imageID}{'\\n'}" | sort | uniq | tee $LOG_FOLDER/pod-imagesId.txt || true
+
+ # copy the ONOS logs directly from the container to avoid the color codes
+ printf '%s\n' $(kubectl get pods -l app=onos-classic -o=jsonpath="{.items[*]['metadata.name']}") | xargs --no-run-if-empty -I# bash -c "kubectl cp #:${karafHome}/data/log/karaf.log $LOG_FOLDER/#.log" || true
+
+ # get ONOS cfg from the 3 nodes
+ printf '%s\n' $(kubectl get pods -l app=onos-classic -o=jsonpath="{.items[*]['metadata.name']}") | xargs --no-run-if-empty -I# bash -c "kubectl exec -it # -- ${karafHome}/bin/client cfg get > $LOG_FOLDER/#.cfg" || true
+
+
+ # get radius logs out of the container
+ kubectl cp $(kubectl get pods -l app=radius --no-headers | awk '{print $1}'):/var/log/freeradius/radius.log $LOG_FOLDER/radius.log || true
+ '''
+ // dump all the BBSim(s) ONU information
+ sh '''
+ BBSIM_IDS=$(kubectl get pods | grep bbsim | grep -v server | awk '{print $1}')
+ IDS=($BBSIM_IDS)
+
+ for bbsim in "${IDS[@]}"
+ do
+ kubectl exec -t $bbsim -- bbsimctl onu list > $LOG_FOLDER/$bbsim-device-list.txt || true
+ kubectl exec -t $bbsim -- bbsimctl service list > $LOG_FOLDER/$bbsim-service-list.txt || true
+ kubectl exec -t $bbsim -- bbsimctl olt resources GEM_PORT > $LOG_FOLDER/$bbsim-flows-gem-ports.txt || true
+ kubectl exec -t $bbsim -- bbsimctl olt resources ALLOC_ID > $LOG_FOLDER/$bbsim-flows-alloc-ids.txt || true
+ kubectl exec -t $bbsim -- bbsimctl olt pons > $LOG_FOLDER/$bbsim-pon-resources.txt || true
+ done
+ '''
+ script {
+ // first make sure the port-forward is still running,
+ // sometimes Jenkins kills it relardless of the JENKINS_NODE_COOKIE=dontKillMe
+ def running = sh (
+ script: 'ps aux | grep port-forw | grep -E "onos|voltha" | grep -v grep | wc -l',
+ returnStdout: true
+ ).trim()
+ // if any of the voltha-api, onos-rest, onos-ssh port-forwards are not there
+ // kill all and restart
+ if (running != "3") {
+ start_port_forward(olts)
+ }
+ }
+ // get ONOS debug infos
+ sh '''
+
+ sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 apps -a -s > $LOG_FOLDER/onos-apps.txt
+ sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 nodes > $LOG_FOLDER/onos-nodes.txt
+ sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 masters > $LOG_FOLDER/onos-masters.txt
+ sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 roles > $LOG_FOLDER/onos-roles.txt
+ sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 cfg get > $LOG_FOLDER/onos-cfg.txt
+ sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 netcfg > $LOG_FOLDER/onos-netcfg.txt
+
+ sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 ports > $LOG_FOLDER/onos-ports-list.txt
+ sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 hosts > $LOG_FOLDER/onos-hosts-list.txt
+
+ if [ ${withFlows} = true ] ; then
+ sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 volt-olts > $LOG_FOLDER/onos-olt-list.txt
+ sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 flows -s > $LOG_FOLDER/onos-flows-list.txt
+ sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 meters > $LOG_FOLDER/onos-meters-list.txt
+ fi
+
+ if [ ${provisionSubscribers} = true ]; then
+ sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 volt-programmed-subscribers > $LOG_FOLDER/onos-programmed-subscribers.txt
+ sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 volt-programmed-meters > $LOG_FOLDER/onos-programmed-meters.txt
+ sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 volt-bpmeter-mappings > $LOG_FOLDER/onos-bpmeter-mappings.txt
+ sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 volt-failed-subscribers > $LOG_FOLDER/onos-failed-subscribers.txt
+ fi
+
+ if [ ${withEapol} = true ] ; then
+ sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 aaa-users > $LOG_FOLDER/onos-aaa-users.txt
+ sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 aaa-statistics > $LOG_FOLDER/onos-aaa-statistics.txt
+ fi
+
+ if [ ${withDhcp} = true ] ; then
+ sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 dhcpl2relay-allocations > $LOG_FOLDER/onos-dhcp-allocations.txt
+ fi
+
+ if [ ${withIgmp} = true ] ; then
+ sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 mcast-host-routes > $LOG_FOLDER/onos-mcast-host-routes.txt
+ sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 mcast-host-show > $LOG_FOLDER/onos-mcast-host-show.txt
+ sshpass -e ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 30115 karaf@127.0.0.1 groups > $LOG_FOLDER/onos-groups.txt
+ fi
+ '''
+ // collect etcd metrics
+ sh '''
+ mkdir -p $WORKSPACE/etcd-metrics
+ curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_debugging_mvcc_keys_total' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-key-count.json || true
+ curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=grpc_server_handled_total{grpc_service="etcdserverpb.KV"}' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-rpc-count.json || true
+ curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_debugging_mvcc_db_total_size_in_bytes' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-db-size.json || true
+ curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_disk_backend_commit_duration_seconds_sum' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-backend-write-time.json || true
+ curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_disk_backend_commit_duration_seconds_sum' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-backend-write-time-sum.json || true
+ curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_disk_backend_commit_duration_seconds_bucket' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-backend-write-time-bucket.json || true
+ curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_disk_wal_fsync_duration_seconds_bucket' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-wal-fsync-time-bucket.json || true
+ curl -s -X GET -G http://10.90.0.101:31301/api/v1/query --data-urlencode 'query=etcd_network_peer_round_trip_time_seconds_bucket' | jq '.data' > $WORKSPACE/etcd-metrics/etcd-network-peer-round-trip-time-seconds.json || true
+
+ '''
+ // get VOLTHA debug infos
+ script {
+ try {
+ sh '''
+ voltctl -m 8MB device list -o json > $LOG_FOLDER/device-list.json || true
+ python -m json.tool $LOG_FOLDER/device-list.json > $LOG_FOLDER/voltha-devices-list.json || true
+ rm $LOG_FOLDER/device-list.json || true
+ voltctl -m 8MB device list > $LOG_FOLDER/voltha-devices-list.txt || true
+
+ printf '%s\n' $(voltctl -m 8MB device list | grep olt | awk '{print $1}') | xargs --no-run-if-empty -I# bash -c "voltctl -m 8MB device flows # > $LOG_FOLDER/voltha-device-flows-#.txt" || true
+ printf '%s\n' $(voltctl -m 8MB device list | grep olt | awk '{print $1}') | xargs --no-run-if-empty -I# bash -c "voltctl -m 8MB device port list --format 'table{{.PortNo}}\t{{.Label}}\t{{.Type}}\t{{.AdminState}}\t{{.OperStatus}}' # > $LOG_FOLDER/voltha-device-ports-#.txt" || true
+
+ printf '%s\n' $(voltctl -m 8MB logicaldevice list -q) | xargs --no-run-if-empty -I# bash -c "voltctl -m 8MB logicaldevice flows # > $LOG_FOLDER/voltha-logicaldevice-flows-#.txt" || true
+ printf '%s\n' $(voltctl -m 8MB logicaldevice list -q) | xargs --no-run-if-empty -I# bash -c "voltctl -m 8MB logicaldevice port list # > $LOG_FOLDER/voltha-logicaldevice-ports-#.txt" || true
+ '''
+ } catch(e) {
+ sh '''
+ echo "Can't get device list from voltclt"
+ '''
+ }
+ }
+ // get cpu usage by container
+ sh '''
+ if [ ${withMonitoring} = true ] ; then
+ cd $WORKSPACE/voltha-system-tests
+ source ./vst_venv/bin/activate
+ sleep 60 # we have to wait for prometheus to collect all the information
+ python tests/scale/sizing.py -o $WORKSPACE/plots || true
+ fi
+ '''
+ archiveArtifacts artifacts: 'execution-time.txt,logs/*,logs/pprof/*,RobotLogs/**/*,plots/*,etcd-metrics/*'
+ }
+ }
+}
+
+def start_port_forward(olts) {
+ sh """
+ daemonize -E JENKINS_NODE_COOKIE="dontKillMe" /usr/local/bin/kubectl port-forward --address 0.0.0.0 -n default svc/voltha1-voltha-api 55555:55555
+
+ bbsimRestPortFwd=50071
+ for i in {0..${olts.toInteger() - 1}}; do
+ daemonize -E JENKINS_NODE_COOKIE="dontKillMe" /usr/local/bin/kubectl port-forward --address 0.0.0.0 -n default svc/bbsim\${i} \${bbsimRestPortFwd}:50071
+ ((bbsimRestPortFwd++))
+ done
+ """
+}
diff --git a/jjb/pipeline/voltha-system-test-bbsim.groovy b/jjb/pipeline/voltha/voltha-2.7/voltha-system-test-bbsim.groovy
similarity index 100%
rename from jjb/pipeline/voltha-system-test-bbsim.groovy
rename to jjb/pipeline/voltha/voltha-2.7/voltha-system-test-bbsim.groovy
diff --git a/jjb/shell/licensecheck.sh b/jjb/shell/licensecheck.sh
index 4708dc7..160ee8b 100755
--- a/jjb/shell/licensecheck.sh
+++ b/jjb/shell/licensecheck.sh
@@ -108,6 +108,8 @@
! -path "*git*" \
! -path "*swagger*" \
! -path "*.drawio" \
+ ! -name "*.pb.h" \
+ ! -name "*.pb.cc" \
-print0 )
exit ${fail_licensecheck}
diff --git a/jjb/shell/tagcollisionreject.sh b/jjb/shell/tagcollisionreject.sh
index 4a791b6..c1ab545 100755
--- a/jjb/shell/tagcollisionreject.sh
+++ b/jjb/shell/tagcollisionreject.sh
@@ -60,7 +60,7 @@
VERSIONFILE="pom.xml"
else
echo "ERROR: No versioning file found!"
- exit 1
+ fail_validation=1
fi
}
@@ -83,16 +83,87 @@
# check if the version is already a tag in git
function is_git_tag_duplicated {
- for existing_tag in $(git tag)
+ for existing_tag in $existing_tags
do
if [ "$TAG_VERSION" = "$existing_tag" ]
then
echo "ERROR: Duplicate tag: $existing_tag"
- exit 2
+ fail_validation=2
fi
done
}
+# from https://github.com/cloudflare/semver_bash/blob/master/semver.sh
+function semverParseInto() {
+ local RE='[^0-9]*\([0-9]*\)[.]\([0-9]*\)[.]\([0-9]*\)\([0-9A-Za-z-]*\)'
+ #MAJOR
+ eval $2=`echo $1 | sed -e "s#$RE#\1#"`
+ #MINOR
+ eval $3=`echo $1 | sed -e "s#$RE#\2#"`
+ #MINOR
+ eval $4=`echo $1 | sed -e "s#$RE#\3#"`
+ #SPECIAL
+ eval $5=`echo $1 | sed -e "s#$RE#\4#"`
+}
+
+# if it's a -dev version check if a previous tag has been created (to avoid going from 2.7.0-dev to 2.7.1-dev)
+function is_valid_version {
+ local MAJOR=0 MINOR=0 PATCH=0 SPECIAL=""
+ local C_MAJOR=0 C_MINOR=0 C_PATCH=0 C_SPECIAL="" # these are used in the inner loops to compare
+
+ semverParseInto $NEW_VERSION MAJOR MINOR PATCH SPECIAL
+
+ found_parent=false
+
+ # if minor == 0, check that there was a release with MAJOR-1.X.X
+ if [[ "$MINOR" == 0 ]]; then
+ new_major=$(( $MAJOR - 1 ))
+ parent_version="$new_major.x.x"
+ for existing_tag in $existing_tags
+ do
+ semverParseInto $existing_tag C_MAJOR C_MINOR C_PATCH C_SPECIAL
+ if [[ "$new_major" == "$C_MAJOR" ]]; then
+ found_parent=true
+ fi
+ done
+
+ # if patch == 0, check that there was a release with MAJOR.MINOR-1.X
+ elif [[ "$PATCH" == 0 ]]; then
+ new_minor=$(( $MINOR - 1 ))
+ parent_version="$MAJOR.$new_minor.x"
+ for existing_tag in $existing_tags
+ do
+ semverParseInto $existing_tag C_MAJOR C_MINOR C_PATCH C_SPECIAL
+ if [[ "$new_minor" == "$C_MINOR" ]]; then
+ found_parent=true
+ fi
+ done
+
+ # if patch != 0 check that there was a release with MAJOR.MINOR.PATCH-1
+ elif [[ "$PATCH" != 0 ]]; then
+ new_patch=$(( $PATCH - 1 ))
+ parent_version="$MAJOR.$MINOR.$new_patch"
+ for existing_tag in $existing_tags
+ do
+ semverParseInto $existing_tag C_MAJOR C_MINOR C_PATCH C_SPECIAL
+ if [[ "$MAJOR" == "$C_MAJOR" && "$MINOR" == "$C_MINOR" && "$new_patch" == "$C_PATCH" ]]
+ then
+ found_parent=true
+ fi
+ done
+ fi
+
+ # if we are the beginning the is no parent, but that's fine
+ if [[ "$MAJOR" == 0 ]]; then
+ found_parent=true
+ fi
+
+ if [[ $found_parent == false ]]; then
+ echo "Invalid $NEW_VERSION version. Expected parent version $parent_version does not exist."
+ fail_validation=1
+ fi
+}
+
# check if Dockerfiles have a released version as their parent
function dockerfile_parentcheck {
while IFS= read -r -d '' dockerfile
@@ -107,20 +178,31 @@
for df_parent in "${df_parents[@]}"
do
- df_pattern="FROM ([^:]*):(.*)"
+ df_pattern="[FfRrOoMm] +(--platform=[^ ]+ +)?([^@: ]+)(:([^: ]+)|@sha[^ ]+)?"
if [[ "$df_parent" =~ $df_pattern ]]
then
- p_image="${BASH_REMATCH[1]}"
- p_version="${BASH_REMATCH[2]}"
+ p_image="${BASH_REMATCH[2]}"
+ p_sha=${BASH_REMATCH[3]}
+ p_version="${BASH_REMATCH[4]}"
- if [[ "${p_version}" =~ ^([0-9]+)\.([0-9]+)\.([0-9]+)$ ]]
+ echo "IMAGE: '${p_image}'"
+ echo "VERSION: '$p_version'"
+ echo "SHA: '$p_sha'"
+
+ if [[ "${p_image}" == "scratch" ]]
+ then
+ echo " OK: Using the versionless 'scratch' parent: '$df_parent'"
+ elif [[ "${p_image}:${p_version}" == "gcr.io/distroless/static:nonroot" ]]
+ then
+ echo " OK: Using static distroless image with nonroot: '${p_image}:${p_version}'"
+ elif [[ "${p_version}" =~ ^([0-9]+)\.([0-9]+)\.([0-9]+)$ ]]
then
echo " OK: Parent '$p_image:$p_version' is a released SemVer version"
- elif [[ "${p_version}" =~ ^.*@sha256:[0-9a-f]{64}.*$ ]]
+ elif [[ "${p_sha}" =~ ^@sha256:[0-9a-f]{64}.*$ ]]
then
# allow sha256 hashes to be used as version specifiers
- echo " OK: Parent '$p_image:$p_version' is using a specific sha256 hash as a version"
+ echo " OK: Parent '$p_image$p_sha' is using a specific sha256 hash as a version"
elif [[ "${p_version}" =~ ^.*([0-9]+)\.([0-9]+).*$ ]]
then
# handle non-SemVer versions that have a Major.Minor version specifier in the name
@@ -128,16 +210,15 @@
# 'postgres:10.3-alpine'
# 'openjdk:8-jre-alpine3.8'
echo " OK: Parent '$p_image:$p_version' is using a non-SemVer, but sufficient, version"
+ elif [[ -z "${p_version}" ]]
+ then
+ echo " ERROR: Parent '$p_image' is NOT using a specific version"
+ fail_validation=1
else
- echo " ERROR: Parent '$p_image:$p_version' is NOT using an specific version"
+ echo " ERROR: Parent '$p_image:$p_version' is NOT using a specific version"
fail_validation=1
fi
- elif [[ "$df_parent" =~ ^FROM\ scratch$ ]]
- then
- # Handle the parent-less `FROM scratch` case:
- # https://docs.docker.com/develop/develop-images/baseimages/
- echo " OK: Using the versionless 'scratch' parent: '$df_parent'"
else
echo " ERROR: Couldn't find a parent image in $df_parent"
fi
@@ -151,12 +232,15 @@
git remote -v
echo "Branches:"
-git branch -v
+branches=$(git branch -v)
+echo $branches
echo "Existing git tags:"
-git tag -n
+existing_tags=$(git tag -l)
+echo $existing_tags
read_version
+is_valid_version
check_if_releaseversion
# perform checks if a released version
diff --git a/jjb/shell/versiontag.sh b/jjb/shell/versiontag.sh
index 05e7a44..8c14ddd 100755
--- a/jjb/shell/versiontag.sh
+++ b/jjb/shell/versiontag.sh
@@ -108,20 +108,31 @@
for df_parent in "${df_parents[@]}"
do
- df_pattern="FROM ([^:]*):(.*)"
+ df_pattern="[FfRrOoMm] +(--platform=[^ ]+ +)?([^@: ]+)(:([^: ]+)|@sha[^ ]+)?"
if [[ "$df_parent" =~ $df_pattern ]]
then
- p_image="${BASH_REMATCH[1]}"
- p_version="${BASH_REMATCH[2]}"
+ p_image="${BASH_REMATCH[2]}"
+ p_sha=${BASH_REMATCH[3]}
+ p_version="${BASH_REMATCH[4]}"
- if [[ "${p_version}" =~ ^([0-9]+)\.([0-9]+)\.([0-9]+)$ ]]
+ echo "IMAGE: '${p_image}'"
+ echo "VERSION: '$p_version'"
+ echo "SHA: '$p_sha'"
+
+ if [[ "${p_image}" == "scratch" ]]
+ then
+ echo " OK: Using the versionless 'scratch' parent: '$df_parent'"
+ elif [[ "${p_image}:${p_version}" == "gcr.io/distroless/static:nonroot" ]]
+ then
+ echo " OK: Using static distroless image with nonroot: '${p_image}:${p_version}'"
+ elif [[ "${p_version}" =~ ^([0-9]+)\.([0-9]+)\.([0-9]+)$ ]]
then
echo " OK: Parent '$p_image:$p_version' is a released SemVer version"
- elif [[ "${p_version}" =~ ^.*@sha256:[0-9a-f]{64}.*$ ]]
+ elif [[ "${p_sha}" =~ ^@sha256:[0-9a-f]{64}.*$ ]]
then
# allow sha256 hashes to be used as version specifiers
- echo " OK: Parent '$p_image:$p_version' is using a specific sha256 hash as a version"
+ echo " OK: Parent '$p_image$p_sha' is using a specific sha256 hash as a version"
elif [[ "${p_version}" =~ ^.*([0-9]+)\.([0-9]+).*$ ]]
then
# handle non-SemVer versions that have a Major.Minor version specifier in the name
@@ -129,16 +140,15 @@
# 'postgres:10.3-alpine'
# 'openjdk:8-jre-alpine3.8'
echo " OK: Parent '$p_image:$p_version' is using a non-SemVer, but sufficient, version"
+ elif [[ -z "${p_version}" ]]
+ then
+ echo " ERROR: Parent '$p_image' is NOT using a specific version"
+ fail_validation=1
else
- echo " ERROR: Parent '$p_image:$p_version' is NOT using an specific version"
+ echo " ERROR: Parent '$p_image:$p_version' is NOT using a specific version"
fail_validation=1
fi
- elif [[ "$df_parent" =~ ^FROM\ scratch$ ]]
- then
- # Handle the parent-less `FROM scratch` case:
- # https://docs.docker.com/develop/develop-images/baseimages/
- echo " OK: Using the versionless 'scratch' parent: '$df_parent'"
else
echo " ERROR: Couldn't find a parent image in $df_parent"
fi
diff --git a/jjb/software-upgrades.yaml b/jjb/software-upgrades.yaml
new file mode 100644
index 0000000..687face
--- /dev/null
+++ b/jjb/software-upgrades.yaml
@@ -0,0 +1,251 @@
+---
+# voltha 2.X tests
+
+- project:
+ name: software-upgrades
+ project-name: '{name}'
+
+ jobs:
+ - 'software-upgrades-test':
+ name: 'periodic-software-upgrade-test-bbsim'
+ pipeline-script: 'voltha/master/software-upgrades.groovy'
+ build-node: 'ubuntu18.04-basebuild-8c-15g'
+ code-branch: 'master'
+ aaa-version: '2.4.0.SNAPSHOT'
+ aaa-oar-url: 'https://oss.sonatype.org/content/groups/public/org/opencord/aaa-app/2.4.0-SNAPSHOT/aaa-app-2.4.0-20210504.145538-2.oar'
+ olt-version: '4.5.0.SNAPSHOT'
+ olt-oar-url: 'https://oss.sonatype.org/content/groups/public/org/opencord/olt-app/4.5.0-SNAPSHOT/olt-app-4.5.0-20210504.162620-3.oar'
+ dhcpl2relay-version: '2.5.0.SNAPSHOT'
+ dhcpl2relay-oar-url: 'https://oss.sonatype.org/content/groups/public/org/opencord/dhcpl2relay-app/2.5.0-SNAPSHOT/dhcpl2relay-app-2.5.0-20210504.145526-3.oar'
+ igmpproxy-version: '2.3.0.SNAPSHOT'
+ igmpproxy-oar-url: 'https://oss.sonatype.org/content/groups/public/org/opencord/onos-app-igmpproxy-app/2.3.0-SNAPSHOT/onos-app-igmpproxy-app-2.3.0-20210504.145529-2.oar'
+ sadis-version: '5.4.0.SNAPSHOT'
+ sadis-oar-url: 'https://oss.sonatype.org/content/groups/public/org/opencord/sadis-app/5.4.0-SNAPSHOT/sadis-app-5.4.0-20210504.124302-5.oar'
+ mcast-version: '2.4.0.SNAPSHOT'
+ mcast-oar-url: 'https://oss.sonatype.org/content/groups/public/org/opencord/mcast-app/2.4.0-SNAPSHOT/mcast-app-2.4.0-20210504.145514-3.oar'
+ kafka-version: '2.7.0.SNAPSHOT'
+ kafka-oar-url: 'https://oss.sonatype.org/content/groups/public/org/opencord/kafka/2.7.0-SNAPSHOT/kafka-2.7.0-20210504.153949-3.oar'
+ adapter-open-olt-image: 'voltha/voltha-openolt-adapter:3.3.3'
+ adapter-open-onu-image: 'voltha/voltha-openonu-adapter-go:1.2.11'
+ rw-core-image: 'voltha/voltha-rw-core:2.9.1'
+ ofagent-image: 'voltha/voltha-ofagent-go:1.6.1'
+ onu-image-name: 'software-image.img'
+ onu-image-url: 'http://bbsim0:50074/images'
+ onu-image-version: 'v1.0.0'
+ onu-image-crc: '0'
+ onu-image-local-dir: '/tmp'
+ time-trigger: "H H/23 * * *"
+
+ - 'software-upgrades-test':
+ name: 'periodic-software-upgrade-test-bbsim-2.7'
+ pipeline-script: 'voltha/voltha-2.7/software-upgrades.groovy'
+ build-node: 'ubuntu18.04-basebuild-8c-15g'
+ code-branch: 'voltha-2.7'
+ aaa-version: '2.3.0'
+ aaa-oar-url: 'https://oss.sonatype.org/content/groups/public/org/opencord/aaa-app/2.3.0/aaa-app-2.3.0.oar'
+ olt-version: '4.4.0'
+ olt-oar-url: 'https://oss.sonatype.org/content/groups/public/org/opencord/olt-app/4.4.0/olt-app-4.4.0.oar'
+ dhcpl2relay-version: '2.4.0'
+ dhcpl2relay-oar-url: 'https://oss.sonatype.org/content/groups/public/org/opencord/dhcpl2relay-app/2.4.0/dhcpl2relay-app-2.4.0.oar'
+ igmpproxy-version: '2.2.0'
+ igmpproxy-oar-url: 'https://oss.sonatype.org/content/groups/public/org/opencord/onos-app-igmpproxy-app/2.2.0/onos-app-igmpproxy-app-2.2.0.oar'
+ sadis-version: '5.3.0'
+ sadis-oar-url: 'https://oss.sonatype.org/content/groups/public/org/opencord/sadis-app/5.3.0/sadis-app-5.3.0.oar'
+ mcast-version: '2.3.2'
+ mcast-oar-url: 'https://oss.sonatype.org/content/groups/public/org/opencord/mcast-app/2.3.2/mcast-app-2.3.2.oar'
+ kafka-version: '2.6.0'
+ kafka-oar-url: 'https://oss.sonatype.org/content/groups/public/org/opencord/kafka/2.6.0/kafka-2.6.0.oar'
+ adapter-open-olt-image: 'voltha/voltha-openolt-adapter:3.1.8'
+ adapter-open-onu-image: 'voltha/voltha-openonu-adapter-go:1.2.11'
+ rw-core-image: 'voltha/voltha-rw-core:2.7.0'
+ ofagent-image: 'voltha/voltha-ofagent-go:1.5.2'
+ onu-image-name: 'software-image.img'
+ onu-image-url: 'http://bbsim0:50074/images'
+ onu-image-version: 'v1.0.0'
+ onu-image-crc: '0'
+ onu-image-local-dir: '/tmp'
+ time-trigger: "H H/23 * * *"
+
+- job-template:
+ id: 'software-upgrades-test'
+ name: '{name}'
+ sandbox: true
+ volthaSystemTestsChange: ''
+ volthaHelmChartsChange: ''
+ kindVolthaChange: ''
+
+ description: |
+ <!-- Managed by Jenkins Job Builder -->
+ Created by {id} job-template from ci-management/jjb/software-upgrades.yaml <br /><br />
+ E2E Validation for Voltha 2.X
+ properties:
+ - cord-infra-properties:
+ build-days-to-keep: '{build-days-to-keep}'
+ artifact-num-to-keep: '{artifact-num-to-keep}'
+
+ wrappers:
+ - lf-infra-wrappers:
+ build-timeout: '{build-timeout}'
+ jenkins-ssh-credential: '{jenkins-ssh-credential}'
+
+ parameters:
+ - string:
+ name: buildNode
+ default: '{build-node}'
+ description: 'Name of the Jenkins node to run the job on'
+
+ - string:
+ name: extraHelmFlags
+ default: ''
+ description: 'Helm flags to pass to every helm install command'
+
+ - string:
+ name: volthaSystemTestsChange
+ default: ''
+ description: 'Download a change for gerrit in the voltha-system-tests repo, example value: "refs/changes/79/18779/13"'
+
+ - string:
+ name: volthaHelmChartsChange
+ default: ''
+ description: 'Download a change for gerrit in the voltha-helm-charts repo, example value: "refs/changes/79/18779/13"'
+
+ - string:
+ name: branch
+ default: '{code-branch}'
+ description: 'Name of the branch to use'
+
+ # deprecated params (not used in master, remove after 2.6 support is dropped)
+ - string:
+ name: kindVolthaChange
+ default: ''
+ description: 'Download a change for gerrit in the kind-voltha repo, example value: "refs/changes/32/19132/1"'
+
+ - string:
+ name: onosImg
+ default: ''
+ description: 'ONOS Image to use'
+
+ - string:
+ name: aaaVer
+ default: '{aaa-version}'
+ description: 'ONOS AAA App Version to Test Upgrade'
+
+ - string:
+ name: aaaOarUrl
+ default: '{aaa-oar-url}'
+ description: 'ONOS AAA App OAR File Url'
+
+ - string:
+ name: oltVer
+ default: '{olt-version}'
+ description: 'ONOS OLT App Version to Test Upgrade'
+
+ - string:
+ name: oltOarUrl
+ default: '{olt-oar-url}'
+ description: 'ONOS OLT App OAR File Url'
+
+ - string:
+ name: dhcpl2relayVer
+ default: '{dhcpl2relay-version}'
+ description: 'ONOS DHCP L2 Relay App Version to Test Upgrade'
+
+ - string:
+ name: dhcpl2relayOarUrl
+ default: '{dhcpl2relay-oar-url}'
+ description: 'ONOS DHCP L2 Relay App OAR File Url'
+
+ - string:
+ name: igmpproxyVer
+ default: '{igmpproxy-version}'
+ description: 'ONOS Igmp Proxy App Version to Test Upgrade'
+
+ - string:
+ name: igmpproxyOarUrl
+ default: '{igmpproxy-oar-url}'
+ description: 'ONOS Igmp Proxy App OAR File Url'
+
+ - string:
+ name: sadisVer
+ default: '{sadis-version}'
+ description: 'ONOS Sadis App Version to Test Upgrade'
+
+ - string:
+ name: sadisOarUrl
+ default: '{sadis-oar-url}'
+ description: 'ONOS Sadis App OAR File Url'
+
+ - string:
+ name: mcastVer
+ default: '{mcast-version}'
+ description: 'ONOS MCast App Version to Test Upgrade'
+
+ - string:
+ name: mcastOarUrl
+ default: '{mcast-oar-url}'
+ description: 'ONOS MCast App OAR File Url'
+
+ - string:
+ name: kafkaVer
+ default: '{kafka-version}'
+ description: 'ONOS Kafka App Version to Test Upgrade'
+
+ - string:
+ name: kafkaOarUrl
+ default: '{kafka-oar-url}'
+ description: 'ONOS Kafka App OAR File Url'
+
+ - string:
+ name: adapterOpenOltImage
+ default: '{adapter-open-olt-image}'
+ description: 'Voltha Adapter Open OLT Component Image'
+
+ - string:
+ name: adapterOpenOnuImage
+ default: '{adapter-open-onu-image}'
+ description: 'Voltha Adapter Open ONU Component Image'
+
+ - string:
+ name: rwCoreImage
+ default: '{rw-core-image}'
+ description: 'Voltha RW Core Component Image'
+
+ - string:
+ name: ofAgentImage
+ default: '{ofagent-image}'
+ description: 'Voltha Ofagent Component Image'
+
+ - string:
+ name: onuImageName
+ default: '{onu-image-name}'
+ description: 'Name of ONU Image to Upgrade'
+
+ - string:
+ name: onuImageUrl
+ default: '{onu-image-url}'
+ description: 'Url of ONU Image to Upgrade'
+
+ - string:
+ name: onuImageVersion
+ default: '{onu-image-version}'
+ description: 'Version of ONU Image to Upgrade'
+
+ - string:
+ name: onuImageCrc
+ default: '{onu-image-crc}'
+ description: 'CRC of ONU Image to Upgrade'
+
+ - string:
+ name: onuImageLocalDir
+ default: '{onu-image-local-dir}'
+ description: 'Local Dir of ONU Image to Upgrade'
+
+ project-type: pipeline
+ concurrent: true
+
+ dsl: !include-raw-escape: pipeline/{pipeline-script}
+
+ triggers:
+ - timed: |
+ TZ=America/Los_Angeles
+ {time-trigger}
diff --git a/jjb/triggered-api-test.yaml b/jjb/triggered-api-test.yaml
index 853fd5a..8e4cc33 100644
--- a/jjb/triggered-api-test.yaml
+++ b/jjb/triggered-api-test.yaml
@@ -30,7 +30,7 @@
parameters:
- string:
name: buildNode
- default: 'ubuntu16.04-basebuild-4c-8g'
+ default: 'ubuntu18.04-basebuild-4c-8g'
description: 'Name of the Jenkins node to run the job on'
- string:
diff --git a/jjb/verify/abstract-olt.yaml b/jjb/verify/abstract-olt.yaml
index 3aab818..fcd9bff 100644
--- a/jjb/verify/abstract-olt.yaml
+++ b/jjb/verify/abstract-olt.yaml
@@ -51,7 +51,7 @@
jenkins-ssh-credential: '{jenkins-ssh-credential}'
basedir: '{project}'
- node: 'ubuntu16.04-basebuild-1c-2g'
+ node: 'ubuntu18.04-basebuild-1c-2g'
project-type: freestyle
concurrent: true
diff --git a/jjb/verify/bbsim-sadis-server.yaml b/jjb/verify/bbsim-sadis-server.yaml
index 4abebd2..2ce60d3 100644
--- a/jjb/verify/bbsim-sadis-server.yaml
+++ b/jjb/verify/bbsim-sadis-server.yaml
@@ -8,8 +8,8 @@
jobs:
- 'verify-bbsim-sadis-server-jobs':
branch-regexp: '{all-branches-regexp}'
- - 'verify-bbsim-sadis-server-jobs-voltha-2.6':
- name-extension: '-voltha-2.6'
+ - 'verify-bbsim-sadis-server-jobs-voltha-2.7':
+ name-extension: '-voltha-2.7'
branch-regexp: '{kind-voltha-regexp}'
- 'verify-bbsim-sadis-server-jobs-master':
branch-regexp: '^master$'
@@ -23,22 +23,23 @@
- 'tag-collision-reject':
dependency-jobs: 'verify_bbsim-sadis-server_licensed'
- 'make-unit-test':
- build-node: 'ubuntu16.04-basebuild-1c-2g'
+ build-node: 'ubuntu18.04-basebuild-1c-2g'
dest-gopath: "github.com/opencord"
unit-test-targets: 'lint sca test'
unit-test-keep-going: 'true'
- job-group:
- name: 'verify-bbsim-sadis-server-jobs-voltha-2.6'
+ name: 'verify-bbsim-sadis-server-jobs-voltha-2.7'
jobs:
- 'voltha-patch-test':
- pipeline-script: 'voltha-bbsim-tests.groovy'
+ pipeline-script: 'voltha/voltha-2.7/bbsim-tests.groovy'
- job-group:
name: 'verify-bbsim-sadis-server-jobs-master'
jobs:
- 'voltha-patch-test':
pipeline-script: 'voltha/master/bbsim-tests.groovy'
+ extraHelmFlags: '--set global.image_tag=master --set onos-classic.image.tag=master'
- job-group:
name: 'publish-bbsim-sadis-server-jobs'
diff --git a/jjb/verify/bbsim.yaml b/jjb/verify/bbsim.yaml
index 14553c0..37dfbae 100644
--- a/jjb/verify/bbsim.yaml
+++ b/jjb/verify/bbsim.yaml
@@ -8,9 +8,9 @@
jobs:
- 'verify-bbsim-jobs':
branch-regexp: '{all-branches-regexp}'
- - 'verify-bbsim-jobs-voltha-2.6':
- name-extension: '-voltha-2.6'
- override-branch: 'voltha-2.6'
+ - 'verify-bbsim-jobs-voltha-2.7':
+ name-extension: '-voltha-2.7'
+ override-branch: 'voltha-2.7'
branch-regexp: '{kind-voltha-regexp}'
- 'verify-bbsim-jobs-master':
branch-regexp: '^master$'
@@ -24,22 +24,23 @@
- 'tag-collision-reject':
dependency-jobs: 'verify_bbsim_licensed'
- 'make-unit-test':
- build-node: 'ubuntu16.04-basebuild-1c-2g'
+ build-node: 'ubuntu18.04-basebuild-4c-8g'
dest-gopath: "github.com/opencord"
unit-test-targets: 'lint sca test'
unit-test-keep-going: 'true'
- job-group:
- name: 'verify-bbsim-jobs-voltha-2.6'
+ name: 'verify-bbsim-jobs-voltha-2.7'
jobs:
- 'voltha-patch-test':
- pipeline-script: 'voltha-bbsim-tests.groovy'
+ pipeline-script: 'voltha/voltha-2.7/bbsim-tests.groovy'
- job-group:
name: 'verify-bbsim-jobs-master'
jobs:
- 'voltha-patch-test':
pipeline-script: 'voltha/master/bbsim-tests.groovy'
+ extraHelmFlags: '--set global.image_tag=master --set onos-classic.image.tag=master'
- job-group:
name: 'publish-bbsim-jobs'
diff --git a/jjb/verify/device-management.yaml b/jjb/verify/device-management.yaml
index 3921ad9..0179a40 100644
--- a/jjb/verify/device-management.yaml
+++ b/jjb/verify/device-management.yaml
@@ -19,9 +19,9 @@
dependency-jobs: 'verify_device-management_licensed'
- 'make-unit-test':
junit-allow-empty-results: true
- build-node: 'ubuntu16.04-basebuild-2c-4g'
+ build-node: 'ubuntu18.04-basebuild-2c-4g'
- 'device-management-patch-test':
- pipeline-script: 'device-management-mock-tests.groovy'
+ pipeline-script: 'voltha/master/device-management-mock-tests.groovy'
- job-group:
name: 'publish-device-management-jobs'
diff --git a/jjb/verify/kind-voltha.yaml b/jjb/verify/kind-voltha.yaml
index 952b1c0..8b43798 100644
--- a/jjb/verify/kind-voltha.yaml
+++ b/jjb/verify/kind-voltha.yaml
@@ -17,10 +17,8 @@
- 'make-unit-test':
unit-test-targets: 'test'
junit-allow-empty-results: true
- # Remove kind-voltha test on master once we are not depending on it anymore
- 'voltha-patch-test':
- pipeline-script: 'voltha-bbsim-tests.groovy'
- - 'voltha-patch-test':
- pipeline-script: 'voltha-bbsim-tests.groovy'
- name-extension: '-2.6'
- override-branch: 'voltha-2.6'
+ pipeline-script: 'voltha/voltha-2.7/voltha-bbsim-tests.groovy'
+ name-extension: '-2.7'
+ override-branch: 'voltha-2.7'
+ kindVolthaChange: '$GERRIT_REFSPEC'
diff --git a/jjb/verify/ofagent-go.yaml b/jjb/verify/ofagent-go.yaml
index f3475ee..9483d6e 100644
--- a/jjb/verify/ofagent-go.yaml
+++ b/jjb/verify/ofagent-go.yaml
@@ -8,9 +8,9 @@
jobs:
- 'verify-ofagent-go-jobs':
branch-regexp: '{all-branches-regexp}'
- - 'verify-ofagent-jobs-voltha-2.6':
- name-extension: '-voltha-2.6'
- override-branch: 'voltha-2.6'
+ - 'verify-ofagent-jobs-voltha-2.7':
+ name-extension: '-voltha-2.7'
+ override-branch: 'voltha-2.7'
branch-regexp: '{kind-voltha-regexp}'
- 'verify-ofagent-jobs-master':
branch-regexp: '^master$'
@@ -24,23 +24,24 @@
- 'tag-collision-reject':
dependency-jobs: 'verify_ofagent-go_licensed'
- 'make-unit-test':
- build-node: 'ubuntu16.04-basebuild-1c-2g'
+ build-node: 'ubuntu18.04-basebuild-4c-8g'
dest-gopath: "github.com/opencord"
unit-test-targets: 'lint sca test'
unit-test-keep-going: 'true'
junit-allow-empty-results: true
- job-group:
- name: 'verify-ofagent-jobs-voltha-2.6'
+ name: 'verify-ofagent-jobs-voltha-2.7'
jobs:
- 'voltha-patch-test':
- pipeline-script: 'voltha-bbsim-tests.groovy'
+ pipeline-script: 'voltha/voltha-2.7/bbsim-tests.groovy'
- job-group:
name: 'verify-ofagent-jobs-master'
jobs:
- 'voltha-patch-test':
pipeline-script: 'voltha/master/bbsim-tests.groovy'
+ extraHelmFlags: '--set global.image_tag=master --set onos-classic.image.tag=master'
- job-group:
name: 'publish-ofagent-go-jobs'
diff --git a/jjb/verify/ofagent-py.yaml b/jjb/verify/ofagent-py.yaml
index 733cbf7..59a231a 100644
--- a/jjb/verify/ofagent-py.yaml
+++ b/jjb/verify/ofagent-py.yaml
@@ -22,7 +22,7 @@
unit-test-keep-going: 'true'
junit-allow-empty-results: true
- 'voltha-patch-test':
- pipeline-script: 'voltha-bbsim-tests.groovy'
+ pipeline-script: 'voltha/voltha-2.7/bbsim-tests.groovy'
- job-group:
name: 'publish-ofagent-py-jobs'
diff --git a/jjb/verify/opendevice-manager.yaml b/jjb/verify/opendevice-manager.yaml
new file mode 100644
index 0000000..14f09ae
--- /dev/null
+++ b/jjb/verify/opendevice-manager.yaml
@@ -0,0 +1,34 @@
+---
+# verification jobs for 'opendevice-manager' repo
+
+- project:
+ name: opendevice-manager
+ project: '{name}'
+
+ jobs:
+ - 'verify-opendevice-manager-jobs':
+ branch-regexp: '{all-branches-regexp}'
+ - 'publish-voltha-opendevice-manager-jobs':
+ branch-regexp: '{all-branches-regexp}'
+
+- job-group:
+ name: 'verify-opendevice-manager-jobs'
+ jobs:
+ - 'verify-licensed'
+ - 'tag-collision-reject'
+ - 'make-unit-test':
+ build-node: 'ubuntu18.04-basebuild-2c-4g'
+ unit-test-targets: 'test'
+ # opendm-agent uses gtest which outputs in xunit format
+ junit-allow-empty-results: true
+ xunit-skip-if-no-test-files: false
+
+- job-group:
+ name: 'publish-voltha-opendevice-manager-jobs'
+ jobs:
+ - 'docker-publish':
+ build-timeout: 30
+ docker-repo: 'voltha'
+ dependency-jobs: 'version-tag'
+
+
diff --git a/jjb/verify/opendm-agent.yaml b/jjb/verify/opendm-agent.yaml
new file mode 100644
index 0000000..2d5a258
--- /dev/null
+++ b/jjb/verify/opendm-agent.yaml
@@ -0,0 +1,22 @@
+---
+# verification jobs for 'opendm-agent' repo
+
+- project:
+ name: opendm-agent
+ project: '{name}'
+
+ jobs:
+ - 'verify-opendm-agent-jobs':
+ branch-regexp: '{all-branches-regexp}'
+
+- job-group:
+ name: 'verify-opendm-agent-jobs'
+ jobs:
+ - 'verify-licensed'
+ - 'tag-collision-reject'
+ - 'make-unit-test':
+ build-node: 'ubuntu18.04-basebuild-2c-4g'
+ unit-test-targets: 'test'
+ # opendm-agent uses gtest which outputs in xunit format
+ junit-allow-empty-results: true
+ xunit-skip-if-no-test-files: false
diff --git a/jjb/verify/openolt-test.yaml b/jjb/verify/openolt-test.yaml
index c8ef753..67d0f99 100644
--- a/jjb/verify/openolt-test.yaml
+++ b/jjb/verify/openolt-test.yaml
@@ -16,7 +16,7 @@
- 'verify-licensed'
- 'tag-collision-reject'
- 'make-unit-test':
- build-node: 'ubuntu16.04-basebuild-2c-4g'
+ build-node: 'ubuntu18.04-basebuild-2c-4g'
build-timeout: 60
unit-test-targets: 'lint docker-build'
junit-allow-empty-results: true
diff --git a/jjb/verify/openolt.yaml b/jjb/verify/openolt.yaml
index ef8ab38..04ae7b4 100644
--- a/jjb/verify/openolt.yaml
+++ b/jjb/verify/openolt.yaml
@@ -15,7 +15,7 @@
- 'verify-licensed'
- 'tag-collision-reject'
- 'make-unit-test':
- build-node: 'ubuntu16.04-basebuild-2c-4g'
+ build-node: 'ubuntu18.04-basebuild-2c-4g'
unit-test-targets: 'test'
# openolt uses gtest which outputs in xunit format
junit-allow-empty-results: true
diff --git a/jjb/verify/osam.yaml b/jjb/verify/osam.yaml
index a89f1cc..a09a43d 100644
--- a/jjb/verify/osam.yaml
+++ b/jjb/verify/osam.yaml
@@ -51,7 +51,7 @@
jenkins-ssh-credential: '{jenkins-ssh-credential}'
basedir: '{basedir}'
- node: 'ubuntu16.04-basebuild-1c-2g'
+ node: 'ubuntu18.04-basebuild-1c-2g'
project-type: freestyle
concurrent: true
@@ -91,6 +91,7 @@
- maven-target:
goals: "test"
pom: "pom.xml"
+ maven-version: '{maven-version}'
publishers:
- postbuildscript:
diff --git a/jjb/verify/ponsim.yaml b/jjb/verify/ponsim.yaml
index 68cf602..527c0cd 100644
--- a/jjb/verify/ponsim.yaml
+++ b/jjb/verify/ponsim.yaml
@@ -18,7 +18,7 @@
- 'tag-collision-reject':
dependency-jobs: 'verify_ponsim_licensed'
- 'make-unit-test':
- build-node: 'ubuntu16.04-basebuild-1c-2g'
+ build-node: 'ubuntu18.04-basebuild-1c-2g'
dest-gopath: "github.com/opencord"
unit-test-targets: 'test docker-build'
unit-test-keep-going: 'true'
diff --git a/jjb/verify/sadis-server.yaml b/jjb/verify/sadis-server.yaml
index 3d1b591..5edbd53 100644
--- a/jjb/verify/sadis-server.yaml
+++ b/jjb/verify/sadis-server.yaml
@@ -53,7 +53,7 @@
jenkins-ssh-credential: '{jenkins-ssh-credential}'
basedir: '{project}'
- node: 'ubuntu16.04-basebuild-1c-2g'
+ node: 'ubuntu18.04-basebuild-1c-2g'
project-type: freestyle
concurrent: true
diff --git a/jjb/verify/up4.yaml b/jjb/verify/up4.yaml
index 17788a6..087b66b 100644
--- a/jjb/verify/up4.yaml
+++ b/jjb/verify/up4.yaml
@@ -13,7 +13,7 @@
stream: "master"
# As we run integration tests with 3 ONOS instances,
# we need a beefy node.
- build-node: "ubuntu16.04-basebuild-8c-15g"
+ build-node: "ubuntu18.04-basebuild-8c-15g"
- job-group:
name: "up4-jobs"
@@ -73,6 +73,10 @@
- lf-infra-wrappers:
build-timeout: "{build-timeout}"
jenkins-ssh-credential: "{jenkins-ssh-credential}"
+ - credentials-binding:
+ - text:
+ credential-id: codecov-up4
+ variable: CODECOV_TOKEN
- job-template:
id: "up4-postmerge"
@@ -124,3 +128,7 @@
- lf-infra-wrappers:
build-timeout: "{build-timeout}"
jenkins-ssh-credential: "{jenkins-ssh-credential}"
+ - credentials-binding:
+ - text:
+ credential-id: codecov-up4
+ variable: CODECOV_TOKEN
diff --git a/jjb/verify/voltctl.yaml b/jjb/verify/voltctl.yaml
index d7fca71..220e0ce 100644
--- a/jjb/verify/voltctl.yaml
+++ b/jjb/verify/voltctl.yaml
@@ -20,18 +20,19 @@
- 'tag-collision-reject':
dependency-jobs: 'verify_voltctl_licensed'
- 'make-unit-test':
- build-node: 'ubuntu16.04-basebuild-2c-4g'
+ build-node: 'ubuntu18.04-basebuild-2c-4g'
unit-test-targets: 'lint sca test'
unit-test-keep-going: 'true'
dependency-jobs: 'verify_voltctl_tag-collision'
- 'voltha-patch-test':
pipeline-script: 'voltha/master/bbsim-tests.groovy'
+ extraHelmFlags: '--set global.image_tag=master --set onos-classic.image.tag=master'
- job-group:
name: 'post-submit-voltctl-jobs'
jobs:
- 'github-release':
- build-node: 'ubuntu16.04-basebuild-2c-4g'
+ build-node: 'ubuntu18.04-basebuild-2c-4g'
dependency-jobs: 'version-tag'
github-organization: 'opencord'
artifact-glob: 'release/*'
diff --git a/jjb/verify/voltha-api-server.yaml b/jjb/verify/voltha-api-server.yaml
index a0dea40..fe82286 100644
--- a/jjb/verify/voltha-api-server.yaml
+++ b/jjb/verify/voltha-api-server.yaml
@@ -20,13 +20,13 @@
- 'tag-collision-reject':
dependency-jobs: 'verify_voltha-api-server_licensed'
- 'make-unit-test':
- build-node: 'ubuntu16.04-basebuild-2c-4g'
+ build-node: 'ubuntu18.04-basebuild-2c-4g'
dest-gopath: "github.com/opencord"
unit-test-targets: 'lint sca test'
unit-test-keep-going: 'true'
junit-allow-empty-results: true
- 'voltha-patch-test':
- pipeline-script: 'voltha-bbsim-tests.groovy'
+ pipeline-script: 'voltha/voltha-2.7/bbsim-tests.groovy'
- job-group:
name: 'publish-voltha-api-server-jobs'
diff --git a/jjb/verify/voltha-bbsim.yaml b/jjb/verify/voltha-bbsim.yaml
index 1ad61e9..5cdf7ad 100644
--- a/jjb/verify/voltha-bbsim.yaml
+++ b/jjb/verify/voltha-bbsim.yaml
@@ -21,7 +21,7 @@
unit-test-targets: 'test'
dest-gopath: "github.com/opencord"
junit-allow-empty-results: true
- build-node: 'ubuntu16.04-basebuild-1c-2g'
+ build-node: 'ubuntu18.04-basebuild-1c-2g'
- job-group:
name: 'publish-voltha-bbsim-jobs'
diff --git a/jjb/verify/voltha-go.yaml b/jjb/verify/voltha-go.yaml
index bfefd07..e4a4576 100644
--- a/jjb/verify/voltha-go.yaml
+++ b/jjb/verify/voltha-go.yaml
@@ -8,9 +8,9 @@
jobs:
- 'verify-voltha-go-jobs':
branch-regexp: '{all-branches-regexp}'
- - 'verify-voltha-go-jobs-voltha-2.6':
- name-extension: '-voltha-2.6'
- override-branch: 'voltha-2.6'
+ - 'verify-voltha-go-jobs-voltha-2.7':
+ name-extension: '-voltha-2.7'
+ override-branch: 'voltha-2.7'
branch-regexp: '{kind-voltha-regexp}'
- 'verify-voltha-go-jobs-master':
branch-regexp: '^master$'
@@ -24,7 +24,7 @@
- 'tag-collision-reject':
dependency-jobs: 'verify_voltha-go_licensed'
- 'make-unit-test':
- build-node: 'ubuntu16.04-basebuild-2c-4g'
+ build-node: 'ubuntu18.04-basebuild-4c-8g'
build-timeout: 20
dest-gopath: "github.com/opencord"
name-extension: "-lint"
@@ -32,23 +32,24 @@
unit-test-keep-going: 'true'
junit-allow-empty-results: true
- 'make-unit-test':
- build-node: 'ubuntu16.04-basebuild-2c-4g'
+ build-node: 'ubuntu18.04-basebuild-2c-4g'
dest-gopath: "github.com/opencord"
name-extension: "-tests"
unit-test-targets: 'test'
unit-test-keep-going: 'true'
- job-group:
- name: 'verify-voltha-go-jobs-voltha-2.6'
+ name: 'verify-voltha-go-jobs-voltha-2.7'
jobs:
- 'voltha-patch-test':
- pipeline-script: 'voltha-bbsim-tests.groovy'
+ pipeline-script: 'voltha/voltha-2.7/bbsim-tests.groovy'
- job-group:
name: 'verify-voltha-go-jobs-master'
jobs:
- 'voltha-patch-test':
pipeline-script: 'voltha/master/bbsim-tests.groovy'
+ extraHelmFlags: '--set global.image_tag=master --set onos-classic.image.tag=master'
- job-group:
name: 'publish-voltha-go-jobs'
diff --git a/jjb/verify/voltha-helm-charts.yaml b/jjb/verify/voltha-helm-charts.yaml
index 77b8dc4..ebe7c50 100644
--- a/jjb/verify/voltha-helm-charts.yaml
+++ b/jjb/verify/voltha-helm-charts.yaml
@@ -8,9 +8,9 @@
jobs:
- 'verify-voltha-helm-charts-jobs':
branch-regexp: '{all-branches-regexp}'
- - 'verify-voltha-helm-charts-jobs-voltha-2.6':
- name-extension: '-voltha-2.6'
- override-branch: 'voltha-2.6'
+ - 'verify-voltha-helm-charts-jobs-voltha-2.7':
+ name-extension: '-voltha-2.7'
+ override-branch: 'voltha-2.7'
branch-regexp: '{kind-voltha-regexp}'
- 'verify-voltha-helm-charts-jobs-master':
branch-regexp: '^master$'
@@ -25,10 +25,10 @@
dependency-jobs: 'verify_voltha-helm-charts_tag-collision'
- job-group:
- name: 'verify-voltha-helm-charts-jobs-voltha-2.6'
+ name: 'verify-voltha-helm-charts-jobs-voltha-2.7'
jobs:
- 'voltha-patch-test':
- pipeline-script: 'voltha-bbsim-tests.groovy'
+ pipeline-script: 'voltha/voltha-2.7/bbsim-tests.groovy'
- job-group:
name: 'verify-voltha-helm-charts-jobs-master'
diff --git a/jjb/verify/voltha-lib-go.yaml b/jjb/verify/voltha-lib-go.yaml
index 51d0405..f6e2b7a 100644
--- a/jjb/verify/voltha-lib-go.yaml
+++ b/jjb/verify/voltha-lib-go.yaml
@@ -16,7 +16,7 @@
- 'tag-collision-reject':
dependency-jobs: 'verify_voltha-lib-go_licensed'
- 'make-unit-test':
- build-node: 'ubuntu16.04-basebuild-2c-4g'
+ build-node: 'ubuntu18.04-basebuild-4c-8g'
build-timeout: 20
dest-gopath: "github.com/opencord"
unit-test-targets: 'lint sca test'
diff --git a/jjb/verify/voltha-onos.yaml b/jjb/verify/voltha-onos.yaml
index 5d5dd88..d708a43 100644
--- a/jjb/verify/voltha-onos.yaml
+++ b/jjb/verify/voltha-onos.yaml
@@ -8,9 +8,9 @@
jobs:
- 'verify-voltha-onos-jobs':
branch-regexp: '{all-branches-regexp}'
- - 'verify-voltha-onos-jobs-voltha-2.6':
- name-extension: '-voltha-2.6'
- override-branch: 'voltha-2.6'
+ - 'verify-voltha-onos-jobs-voltha-2.7':
+ name-extension: '-voltha-2.7'
+ override-branch: 'voltha-2.7'
branch-regexp: '{kind-voltha-regexp}'
- 'verify-voltha-onos-jobs-master':
branch-regexp: '^master$'
@@ -25,16 +25,17 @@
dependency-jobs: 'verify_voltha-onos_licensed'
- job-group:
- name: 'verify-voltha-onos-jobs-voltha-2.6'
+ name: 'verify-voltha-onos-jobs-voltha-2.7'
jobs:
- 'voltha-patch-test':
- pipeline-script: 'voltha-bbsim-tests.groovy'
+ pipeline-script: 'voltha/voltha-2.7/bbsim-tests.groovy'
- job-group:
name: 'verify-voltha-onos-jobs-master'
jobs:
- 'voltha-patch-test':
pipeline-script: 'voltha/master/bbsim-tests.groovy'
+ extraHelmFlags: '--set global.image_tag=master --set onos-classic.image.tag=master'
- job-group:
name: 'publish-voltha-onos-jobs'
diff --git a/jjb/verify/voltha-openolt-adapter.yaml b/jjb/verify/voltha-openolt-adapter.yaml
index 6e845a8..ff3587b 100644
--- a/jjb/verify/voltha-openolt-adapter.yaml
+++ b/jjb/verify/voltha-openolt-adapter.yaml
@@ -8,9 +8,9 @@
jobs:
- 'verify-voltha-openolt-adapter-jobs':
branch-regexp: '{all-branches-regexp}'
- - 'verify-voltha-openolt-adapter-jobs-voltha-2.6':
- name-extension: '-voltha-2.6'
- override-branch: 'voltha-2.6'
+ - 'verify-voltha-openolt-adapter-jobs-voltha-2.7':
+ name-extension: '-voltha-2.7'
+ override-branch: 'voltha-2.7'
branch-regexp: '{kind-voltha-regexp}'
- 'verify-voltha-openolt-adapter-jobs-master':
branch-regexp: '^master$'
@@ -29,26 +29,27 @@
unit-test-targets: 'lint sca'
unit-test-keep-going: 'true'
junit-allow-empty-results: true
- build-node: 'ubuntu16.04-basebuild-2c-4g'
+ build-node: 'ubuntu18.04-basebuild-4c-8g'
- 'make-unit-test':
dest-gopath: "github.com/opencord"
name-extension: "-tests"
unit-test-targets: 'test'
unit-test-keep-going: 'true'
junit-allow-empty-results: true
- build-node: 'ubuntu16.04-basebuild-2c-4g'
+ build-node: 'ubuntu18.04-basebuild-2c-4g'
- job-group:
- name: 'verify-voltha-openolt-adapter-jobs-voltha-2.6'
+ name: 'verify-voltha-openolt-adapter-jobs-voltha-2.7'
jobs:
- 'voltha-patch-test':
- pipeline-script: 'voltha-bbsim-tests.groovy'
+ pipeline-script: 'voltha/voltha-2.7/bbsim-tests.groovy'
- job-group:
name: 'verify-voltha-openolt-adapter-jobs-master'
jobs:
- 'voltha-patch-test':
pipeline-script: 'voltha/master/bbsim-tests.groovy'
+ extraHelmFlags: '--set global.image_tag=master --set onos-classic.image.tag=master'
- job-group:
name: 'publish-voltha-openolt-adapter-jobs'
diff --git a/jjb/verify/voltha-openonu-adapter-go.yaml b/jjb/verify/voltha-openonu-adapter-go.yaml
index de5ca20..e4e67e3 100644
--- a/jjb/verify/voltha-openonu-adapter-go.yaml
+++ b/jjb/verify/voltha-openonu-adapter-go.yaml
@@ -8,9 +8,9 @@
jobs:
- 'verify-voltha-openonu-adapter-go-jobs':
branch-regexp: '{all-branches-regexp}'
- - 'verify-voltha-openonu-adapter-go-jobs-voltha-2.6':
- name-extension: '-voltha-2.6'
- override-branch: 'voltha-2.6'
+ - 'verify-voltha-openonu-adapter-go-jobs-voltha-2.7':
+ name-extension: '-voltha-2.7'
+ override-branch: 'voltha-2.7'
branch-regexp: '{kind-voltha-regexp}'
- 'verify-voltha-openonu-adapter-go-jobs-master':
branch-regexp: '^master$'
@@ -28,26 +28,27 @@
unit-test-targets: 'lint sca'
unit-test-keep-going: 'true'
junit-allow-empty-results: true
- build-node: 'ubuntu16.04-basebuild-2c-4g'
+ build-node: 'ubuntu18.04-basebuild-4c-8g'
- 'make-unit-test':
dest-gopath: "github.com/opencord"
name-extension: "-tests"
unit-test-targets: 'test'
unit-test-keep-going: 'true'
junit-allow-empty-results: true
- build-node: 'ubuntu16.04-basebuild-2c-4g'
+ build-node: 'ubuntu18.04-basebuild-2c-4g'
- job-group:
- name: 'verify-voltha-openonu-adapter-go-jobs-voltha-2.6'
+ name: 'verify-voltha-openonu-adapter-go-jobs-voltha-2.7'
jobs:
- 'voltha-patch-test':
- pipeline-script: 'voltha-bbsim-tests.groovy'
+ pipeline-script: 'voltha/voltha-2.7/bbsim-tests.groovy'
- job-group:
name: 'verify-voltha-openonu-adapter-go-jobs-master'
jobs:
- 'voltha-patch-test':
pipeline-script: 'voltha/master/bbsim-tests.groovy'
+ extraHelmFlags: '--set global.image_tag=master --set onos-classic.image.tag=master'
- job-group:
name: 'publish-voltha-openonu-adapter-go-jobs'
diff --git a/jjb/verify/voltha-openonu-adapter.yaml b/jjb/verify/voltha-openonu-adapter.yaml
index 65afee2..21a1968 100644
--- a/jjb/verify/voltha-openonu-adapter.yaml
+++ b/jjb/verify/voltha-openonu-adapter.yaml
@@ -1,5 +1,6 @@
---
# verification jobs for 'voltha-openonu-adapter' repo
+# NOTE this component is deprecated, should we keep running the tests?
- project:
name: voltha-openonu-adapter
@@ -8,9 +9,9 @@
jobs:
- 'verify-voltha-openonu-adapter-jobs':
branch-regexp: '{all-branches-regexp}'
- - 'verify-voltha-openonu-adapter-jobs-voltha-2.6':
- name-extension: '-voltha-2.6'
- override-branch: 'voltha-2.6'
+ - 'verify-voltha-openonu-adapter-jobs-voltha-2.7':
+ name-extension: '-voltha-2.7'
+ override-branch: 'voltha-2.7'
branch-regexp: '{kind-voltha-regexp}'
- 'verify-voltha-openonu-adapter-jobs-master':
branch-regexp: '^master$'
@@ -35,16 +36,17 @@
build-timeout: 15
- job-group:
- name: 'verify-voltha-openonu-adapter-jobs-voltha-2.6'
+ name: 'verify-voltha-openonu-adapter-jobs-voltha-2.7'
jobs:
- 'voltha-patch-test':
- pipeline-script: 'voltha-bbsim-tests.groovy'
+ pipeline-script: 'voltha/voltha-2.7/bbsim-tests.groovy'
- job-group:
name: 'verify-voltha-openonu-adapter-jobs-master'
jobs:
- 'voltha-patch-test':
pipeline-script: 'voltha/master/bbsim-tests.groovy'
+ extraHelmFlags: '--set global.image_tag=master --set onos-classic.image.tag=master'
- job-group:
name: 'publish-voltha-openonu-adapter-jobs'
diff --git a/jjb/verify/voltha-protos.yaml b/jjb/verify/voltha-protos.yaml
index aefda8b..14ebf9a 100644
--- a/jjb/verify/voltha-protos.yaml
+++ b/jjb/verify/voltha-protos.yaml
@@ -19,7 +19,7 @@
- 'verify-licensed'
- 'tag-collision-reject':
- 'make-unit-test':
- build-node: 'ubuntu16.04-basebuild-2c-4g'
+ build-node: 'ubuntu18.04-basebuild-2c-4g'
build-timeout: 20
unit-test-targets: 'test'
unit-test-keep-going: 'true'
diff --git a/jjb/verify/voltha-system-tests.yaml b/jjb/verify/voltha-system-tests.yaml
index 937c388..b385e2f 100644
--- a/jjb/verify/voltha-system-tests.yaml
+++ b/jjb/verify/voltha-system-tests.yaml
@@ -7,12 +7,15 @@
jobs:
- 'verify-voltha-system-tests-jobs':
+ build-node: 'ubuntu18.04-basebuild-4c-8g'
branch-regexp: '{all-branches-regexp}'
- - 'verify-voltha-system-tests-jobs-voltha-2.6':
- name-extension: '-voltha-2.6'
- override-branch: 'voltha-2.6'
+ - 'verify-voltha-system-tests-jobs-voltha-2.7':
+ build-node: 'ubuntu18.04-basebuild-4c-8g'
+ name-extension: '-voltha-2.7'
+ override-branch: 'voltha-2.7'
branch-regexp: '{kind-voltha-regexp}'
- 'verify-voltha-system-tests-jobs-master':
+ build-node: 'ubuntu18.04-basebuild-4c-8g'
branch-regexp: '^master$'
- job-group:
@@ -27,13 +30,14 @@
junit-allow-empty-results: true
- job-group:
- name: 'verify-voltha-system-tests-jobs-voltha-2.6'
+ name: 'verify-voltha-system-tests-jobs-voltha-2.7'
jobs:
- 'voltha-patch-test':
- pipeline-script: 'voltha-bbsim-tests.groovy'
+ pipeline-script: 'voltha/voltha-2.7/bbsim-tests.groovy'
- job-group:
name: 'verify-voltha-system-tests-jobs-master'
jobs:
- 'voltha-patch-test':
pipeline-script: 'voltha/master/bbsim-tests.groovy'
+ extraHelmFlags: '--set global.image_tag=master --set onos-classic.image.tag=master'
diff --git a/jjb/verify/xos.yaml b/jjb/verify/xos.yaml
index ff385f5..b33d7d2 100644
--- a/jjb/verify/xos.yaml
+++ b/jjb/verify/xos.yaml
@@ -31,7 +31,7 @@
jobs:
- 'python-unit-test':
build-timeout: 45
- build-node: 'ubuntu16.04-basebuild-2c-4g'
+ build-node: 'ubuntu18.04-basebuild-2c-4g'
- 'api-test':
pipeline-script: 'xos-core.groovy'
diff --git a/jjb/voltha-atest-provisioning.yaml b/jjb/voltha-atest-provisioning.yaml
deleted file mode 100644
index 703baf4..0000000
--- a/jjb/voltha-atest-provisioning.yaml
+++ /dev/null
@@ -1,80 +0,0 @@
----
-# VOLTHA-Automated-Tests
-
-- project:
- name: voltha-atests
-
- project-name: 'voltha'
-
- jobs:
- - 'voltha-atest-provisioning'
-
-- job-template:
- id: voltha-atest-provisioning
- name: 'voltha-atest-provisioning'
- description: |
- Created by {id} job-template from ci-management/jjb/voltha-atest-provisioning.yaml
- Voltha automated tests run per commit. Voltha testing with ponsim to validate voltha+ponsim
-
- triggers:
- - cord-infra-gerrit-trigger-patchset:
- gerrit-server-name: '{gerrit-server-name}'
- project-regexp: 'voltha'
- branch-regexp: '^(master|voltha-1.7)$'
- dependency-jobs: '{dependency-jobs}'
- file-include-regexp: '{all-files-regexp}'
-
- properties:
- - cord-infra-properties:
- build-days-to-keep: '{build-days-to-keep}'
- artifact-num-to-keep: '{artifact-num-to-keep}'
-
- wrappers:
- - lf-infra-wrappers:
- build-timeout: '{build-timeout}'
- jenkins-ssh-credential: '{jenkins-ssh-credential}'
-
- parameters:
- - string:
- name: buildNode
- default: 'qct-pod3-voltha-testing'
- description: 'Name of the Jenkins node to run the job on'
-
- - string:
- name: manifestUrl
- default: '{gerrit-server-url}/{cord-repo-manifest}'
- description: 'URL to the repo manifest'
-
- - string:
- name: manifestBranch
- default: 'master'
- description: 'Name of the repo branch to use'
-
- - string:
- name: gerritChangeNumber
- default: '$GERRIT_CHANGE_NUMBER'
- description: 'Changeset number in Gerrit'
-
- - string:
- name: gerritPatchsetNumber
- default: '$GERRIT_PATCHSET_NUMBER'
- description: 'PatchSet number in Gerrit'
-
- - string:
- name: adapter
- default: ''
- description: 'BLANK for ponsim'
- - bool:
- name: BuildVoltha
- default: true
- description: 'Build modified voltha component'
-
- - bool:
- name: BuildBbsim
- default: false
- description: 'Build modified bbsim component'
-
- project-type: pipeline
- concurrent: false
-
- dsl: !include-raw-escape: pipeline/voltha-atest-provisioning.groovy
diff --git a/jjb/voltha-automated-build.yaml b/jjb/voltha-automated-build.yaml
deleted file mode 100644
index a852607..0000000
--- a/jjb/voltha-automated-build.yaml
+++ /dev/null
@@ -1,53 +0,0 @@
----
-# Run build+test of voltha
-
-- project:
- name: voltha-automated-build
-
- project-name: '{name}'
-
- jobs:
- - 'voltha-build'
-
-- job-template:
- id: voltha-build
- name: 'voltha-automated-build'
- description: |
- Created by {id} job-template from ci-management/jjb/voltha-automated-build.yaml
-
- triggers:
- - timed: |
- TZ=America/Los_Angeles
- H 2 * * *
-
- properties:
- - cord-infra-properties:
- build-days-to-keep: '{build-days-to-keep}'
- artifact-num-to-keep: '{artifact-num-to-keep}'
-
- wrappers:
- - lf-infra-wrappers:
- build-timeout: '60'
- jenkins-ssh-credential: '{jenkins-ssh-credential}'
-
- parameters:
- - string:
- name: buildNode
- default: 'onf-build'
- description: 'Name of the Jenkins node to run the job on'
-
- - string:
- name: manifestUrl
- default: '{gerrit-server-url}/{cord-repo-manifest}'
- description: 'URL to the repo manifest'
-
- - string:
- name: manifestBranch
- default: 'master'
- description: 'Name of the repo branch to use'
-
- project-type: pipeline
- concurrent: true
-
- dsl: !include-raw-escape: pipeline/voltha-automated-build.groovy
-
diff --git a/jjb/voltha-e2e.yaml b/jjb/voltha-e2e.yaml
index e185c53..c89c0d4 100755
--- a/jjb/voltha-e2e.yaml
+++ b/jjb/voltha-e2e.yaml
@@ -16,43 +16,61 @@
make-target-multipleolt: bbsim-multiolt-kind
make-target-1t4gemtest: 1t4gem-openonu-go-adapter-test
make-target-1t8gemtest: 1t8gem-openonu-go-adapter-test
+ make-target-reconciletest: reconcile-openonu-go-adapter-test-att
+ make-target-reconciledttest: reconcile-openonu-go-adapter-test-dt
+ make-target-reconciletttest: reconcile-openonu-go-adapter-test-tt
jobs:
- 'voltha-periodic-test':
name: 'periodic-voltha-test-bbsim'
- pipeline-script: 'voltha-nightly-tests-bbsim.groovy'
- build-node: 'qct-pod4-node2'
- make-target: functional-single-kind
- make-target-failtest: bbsim-failurescenarios
- make-target-errortest: bbsim-errorscenarios
- make-target-alarmtest: bbsim-alarms-kind
- make-target-multipleolt: bbsim-multiolt-kind
- withAlarms: true
code-branch: 'master'
- olts: 1
- onus: 2
- pons: 2
- time-trigger: "H H/12 * * *"
+ extraHelmFlags: '--set global.image_tag=master --set onos-classic.image.tag=master'
+ time-trigger: "H H/23 * * *"
+ testTargets: |
+ - target: functional-single-kind
+ workflow: att
+ flags: ""
+ teardown: true
+ - target: bbsim-alarms-kind
+ workflow: att
+ flags: ""
+ teardown: false
+ - target: bbsim-failurescenarios
+ workflow: att
+ flags: ""
+ teardown: false
+ - target: bbsim-errorscenarios
+ workflow: att
+ flags: ""
+ teardown: false
- 'voltha-periodic-test':
name: 'periodic-voltha-multiple-olts-test-bbsim'
- pipeline-script: 'voltha-nightly-tests-bbsim.groovy'
- build-node: 'qct-pod4-node2'
- make-target: functional-multi-olt
- make-target-failtest: bbsim-multiolt-failurescenarios
- make-target-errortest: bbsim-multiolt-errorscenarios
- make-target-alarmtest: bbsim-alarms-kind
- make-target-multipleolt: bbsim-multiolt-kind
- withAlarms: false
code-branch: 'master'
olts: 2
- onus: 2
- pons: 2
- time-trigger: "H H/12 * * *"
+ extraHelmFlags: '--set global.image_tag=master --set onos-classic.image.tag=master --set onu=2,pon=2'
+ time-trigger: "H H/23 * * *"
+ testTargets: |
+ - target: functional-multi-olt
+ workflow: att
+ flags: ""
+ teardown: true
+ - target: bbsim-multiolt-failurescenarios
+ workflow: att
+ flags: ""
+ teardown: false
+ - target: bbsim-multiolt-errorscenarios
+ workflow: att
+ flags: ""
+ teardown: false
+ - target: bbsim-multiolt-kind
+ workflow: att
+ flags: ""
+ teardown: false
- - 'voltha-periodic-test':
- name: 'periodic-voltha-2.6-multiple-olts-test-bbsim'
- pipeline-script: 'voltha-nightly-tests-bbsim.groovy'
+ - 'voltha-periodic-test-kind-voltha-based':
+ name: 'periodic-voltha-multiple-olts-test-bbsim-2.7'
+ pipeline-script: 'voltha/voltha-2.7/voltha-nightly-tests-bbsim.groovy'
build-node: 'qct-pod4-node2'
make-target: functional-multi-olt
make-target-failtest: bbsim-multiolt-failurescenarios
@@ -60,178 +78,616 @@
make-target-alarmtest: bbsim-alarms-kind
make-target-multipleolt: bbsim-multiolt-kind
withAlarms: false
- code-branch: 'voltha-2.6'
+ code-branch: 'voltha-2.7'
olts: 2
onus: 2
pons: 2
- time-trigger: "H H/12 * * *"
+ time-trigger: "H H * * *"
+ # openonu Go periodic tests
- 'voltha-periodic-test':
name: 'periodic-voltha-openonu-go-test-bbsim'
- pipeline-script: 'voltha-openonu-go-test-bbsim.groovy'
- build-node: 'ubuntu16.04-basebuild-4c-8g'
+ code-branch: 'master'
+ extraHelmFlags: '--set global.image_tag=master --set onos-classic.image.tag=master'
+ time-trigger: "H H/12 * * *"
+ logLevel: 'DEBUG'
+ testTargets: |
+ - target: 1t1gem-openonu-go-adapter-test
+ workflow: att
+ flags: ""
+ teardown: true
+ - target: 1t4gem-openonu-go-adapter-test
+ workflow: att
+ flags: ""
+ teardown: true
+ - target: 1t8gem-openonu-go-adapter-test
+ workflow: att
+ flags: ""
+ teardown: true
+ - target: mib-upload-templating-openonu-go-adapter-test
+ workflow: att
+ flags: "--set pon=2,onu=2,controlledActivation=only-onu"
+ teardown: true
+ - target: reconcile-openonu-go-adapter-test-att
+ workflow: att
+ flags: ""
+ teardown: true
+ - target: reconcile-openonu-go-adapter-test-dt
+ workflow: dt
+ flags: ""
+ teardown: true
+ - target: reconcile-openonu-go-adapter-test-tt
+ workflow: tt
+ flags: ""
+ teardown: true
+ - target: openonu-go-adapter-omci-hardening-passed-test
+ workflow: att
+ flags: "--set omci_response_rate=9 --set omci_timeout=1s"
+ teardown: true
+ - target: openonu-go-adapter-omci-hardening-failed-test
+ workflow: att
+ flags: "--set omci_response_rate=7"
+ teardown: true
+
+ - 'voltha-periodic-test':
+ name: 'patchset-voltha-openonu-go-test-bbsim'
+ trigger-comment: "voltha test openonu singleolt"
+ code-branch: '$GERRIT_BRANCH'
+ extraHelmFlags: '--set global.image_tag=master --set onos-classic.image.tag=master'
+ gerrit-project: '$GERRIT_PROJECT'
+ gerritRefspec: '$GERRIT_REFSPEC'
+ logLevel: 'DEBUG'
+ testTargets: |
+ - target: 1t1gem-openonu-go-adapter-test
+ workflow: att
+ flags: ""
+ teardown: true
+ - target: 1t4gem-openonu-go-adapter-test
+ workflow: att
+ flags: ""
+ teardown: true
+ - target: 1t8gem-openonu-go-adapter-test
+ workflow: att
+ flags: ""
+ teardown: true
+ - target: mib-upload-templating-openonu-go-adapter-test
+ workflow: att
+ flags: "--set pon=2,onu=2,controlledActivation=only-onu"
+ teardown: true
+ - target: reconcile-openonu-go-adapter-test-att
+ workflow: att
+ flags: ""
+ teardown: true
+ - target: reconcile-openonu-go-adapter-test-dt
+ workflow: dt
+ flags: ""
+ teardown: true
+ - target: reconcile-openonu-go-adapter-test-tt
+ workflow: tt
+ flags: ""
+ teardown: true
+ - target: openonu-go-adapter-omci-hardening-passed-test
+ workflow: att
+ flags: "--set omci_response_rate=9 --set omci_timeout=1s"
+ teardown: true
+ - target: openonu-go-adapter-omci-hardening-failed-test
+ workflow: att
+ flags: "--set omci_response_rate=7"
+ teardown: true
+
+ - 'voltha-periodic-test-kind-voltha-based':
+ name: 'periodic-voltha-openonu-go-test-bbsim-2.7'
+ pipeline-script: 'voltha/voltha-2.7/voltha-openonu-go-test-bbsim.groovy'
+ build-node: 'ubuntu18.04-basebuild-8c-15g'
make-target: openonu-go-adapter-test
make-target-1t4gemtest: 1t4gem-openonu-go-adapter-test
make-target-1t8gemtest: 1t8gem-openonu-go-adapter-test
+ make-target-reconciletest: reconcile-openonu-go-adapter-test
+ make-target-reconciledttest: reconcile-openonu-go-adapter-test-dt
+ make-target-reconciletttest: reconcile-openonu-go-adapter-test-tt
withAlarms: false
- code-branch: 'master'
- time-trigger: "H H/12 * * *"
+ code-branch: 'voltha-2.7'
+ time-trigger: "H H/23 * * *"
- 'voltha-periodic-test':
name: 'periodic-voltha-multiple-olts-openonu-go-test-bbsim'
- pipeline-script: 'voltha-openonu-go-test-bbsim.groovy'
- build-node: 'ubuntu16.04-basebuild-4c-8g'
+ code-branch: 'master'
+ extraHelmFlags: '--set global.image_tag=master --set onos-classic.image.tag=master --set onu=2,pon=2'
+ olts: 2
+ logLevel: 'DEBUG'
+ testTargets: |
+ - target: 1t1gem-openonu-go-adapter-multi-olt-test
+ workflow: att
+ flags: ""
+ teardown: true
+ - target: 1t4gem-openonu-go-adapter-multi-olt-test
+ workflow: att
+ flags: ""
+ teardown: true
+ - target: 1t8gem-openonu-go-adapter-multi-olt-test
+ workflow: att
+ flags: ""
+ teardown: true
+ - target: reconcile-openonu-go-adapter-multi-olt-test-att
+ workflow: att
+ flags: ""
+ teardown: true
+ - target: reconcile-openonu-go-adapter-multi-olt-test-dt
+ workflow: dt
+ flags: ""
+ teardown: true
+ - target: reconcile-openonu-go-adapter-multi-olt-test-tt
+ workflow: tt
+ flags: ""
+ teardown: true
+ time-trigger: "H H/12 * * *"
+
+ - 'voltha-periodic-test':
+ name: 'patchset-voltha-multiple-olts-openonu-go-test-bbsim'
+ trigger-comment: "voltha test openonu multiolt"
+ code-branch: '$GERRIT_BRANCH'
+ extraHelmFlags: '--set global.image_tag=master --set onos-classic.image.tag=master --set onu=2,pon=2'
+ gerrit-project: '$GERRIT_PROJECT'
+ gerritRefspec: '$GERRIT_REFSPEC'
+ logLevel: 'DEBUG'
+ testTargets: |
+ - target: 1t1gem-openonu-go-adapter-multi-olt-test
+ workflow: att
+ flags: ""
+ teardown: true
+ - target: 1t4gem-openonu-go-adapter-multi-olt-test
+ workflow: att
+ flags: ""
+ teardown: true
+ - target: 1t8gem-openonu-go-adapter-multi-olt-test
+ workflow: att
+ flags: ""
+ teardown: true
+ - target: reconcile-openonu-go-adapter-multi-olt-test-att
+ workflow: att
+ flags: ""
+ teardown: true
+ - target: reconcile-openonu-go-adapter-multi-olt-test-dt
+ workflow: dt
+ flags: ""
+ teardown: true
+ - target: reconcile-openonu-go-adapter-multi-olt-test-tt
+ workflow: tt
+ flags: ""
+ teardown: true
+ olts: 2
+
+ - 'voltha-periodic-test':
+ name: 'periodic-voltha-pm-data-test-bbsim'
+ code-branch: 'master'
+ extraHelmFlags: '--set global.image_tag=master --set onos-classic.image.tag=master --set externalAccess.enabled=true,externalAccess.service.type=NodePort,externalAccess.service.nodePorts[0]=30201,externalAccess.service.domain=127.0.0.1'
+ time-trigger: "H H/23 * * *"
+ logLevel: 'DEBUG'
+ testTargets: |
+ - target: voltha-pm-data-single-kind-att
+ workflow: att
+ flags: ""
+ teardown: true
+ - target: voltha-pm-data-single-kind-dt
+ workflow: dt
+ flags: ""
+ teardown: true
+ - target: voltha-pm-data-single-kind-tt
+ workflow: tt
+ flags: ""
+ teardown: true
+ timeout: 140
+
+ - 'voltha-periodic-test':
+ name: 'patchset-voltha-pm-data-test-bbsim'
+ trigger-comment: "voltha test pm data singleolt"
+ code-branch: '$GERRIT_BRANCH'
+ extraHelmFlags: '--set global.image_tag=master --set onos-classic.image.tag=master --set externalAccess.enabled=true,externalAccess.service.type=NodePort,externalAccess.service.nodePorts[0]=30201,externalAccess.service.domain=127.0.0.1'
+ code-branch: '$GERRIT_BRANCH'
+ gerrit-project: '$GERRIT_PROJECT'
+ gerritRefspec: '$GERRIT_REFSPEC'
+ logLevel: 'DEBUG'
+ testTargets: |
+ - target: voltha-pm-data-single-kind-att
+ workflow: att
+ flags: ""
+ teardown: true
+ - target: voltha-pm-data-single-kind-dt
+ workflow: dt
+ flags: ""
+ teardown: true
+ - target: voltha-pm-data-single-kind-tt
+ workflow: tt
+ flags: ""
+ teardown: true
+ timeout: 140
+
+ - 'voltha-periodic-test':
+ name: 'periodic-voltha-multiple-olts-pm-data-test-bbsim'
+ code-branch: 'master'
+ extraHelmFlags: '--set global.image_tag=master --set onos-classic.image.tag=master --set onu=2,pon=2 --set externalAccess.enabled=true,externalAccess.service.type=NodePort,externalAccess.service.nodePorts[0]=30201,externalAccess.service.domain=127.0.0.1'
+ olts: 2
+ timeout: 180
+ logLevel: 'DEBUG'
+ testTargets: |
+ - target: voltha-pm-data-multiolt-kind-att
+ workflow: att
+ flags: ""
+ teardown: true
+ - target: voltha-pm-data-multiolt-kind-dt
+ workflow: dt
+ flags: ""
+ teardown: true
+ - target: voltha-pm-data-multiolt-kind-tt
+ workflow: tt
+ flags: ""
+ teardown: true
+ time-trigger: "H H/23 * * *"
+
+ - 'voltha-periodic-test':
+ name: 'patchset-voltha-multiple-olts-pm-data-test-bbsim'
+ trigger-comment: "voltha test pm data multiolt"
+ code-branch: '$GERRIT_BRANCH'
+ extraHelmFlags: '--set global.image_tag=master --set onos-classic.image.tag=master --set onu=2,pon=2 --set externalAccess.enabled=true,externalAccess.service.type=NodePort,externalAccess.service.nodePorts[0]=30201,externalAccess.service.domain=127.0.0.1'
+ gerrit-project: '$GERRIT_PROJECT'
+ gerritRefspec: '$GERRIT_REFSPEC'
+ logLevel: 'DEBUG'
+ testTargets: |
+ - target: voltha-pm-data-multiolt-kind-att
+ workflow: att
+ flags: ""
+ teardown: true
+ - target: voltha-pm-data-multiolt-kind-dt
+ workflow: dt
+ flags: ""
+ teardown: true
+ - target: voltha-pm-data-multiolt-kind-tt
+ workflow: tt
+ flags: ""
+ teardown: true
+ olts: 2
+ timeout: 180
+
+ - 'voltha-periodic-test-kind-voltha-based':
+ name: 'periodic-voltha-multiple-olts-openonu-go-test-bbsim-2.7'
+ pipeline-script: 'voltha/voltha-2.7/voltha-openonu-go-test-bbsim.groovy'
+ build-node: 'ubuntu18.04-basebuild-8c-15g'
make-target: openonu-go-adapter-multi-olt-test
make-target-1t4gemtest: 1t4gem-openonu-go-adapter-multi-olt-test
make-target-1t8gemtest: 1t8gem-openonu-go-adapter-multi-olt-test
+ make-target-reconciletest: reconcile-openonu-go-adapter-multi-olt-test
+ make-target-reconciledttest: reconcile-openonu-go-adapter-multi-olt-test-dt
+ make-target-reconciletttest: reconcile-openonu-go-adapter-multi-olt-test-tt
withAlarms: false
- code-branch: 'master'
+ code-branch: 'voltha-2.7'
olts: 2
onus: 2
pons: 2
- time-trigger: "H H/12 * * *"
+ time-trigger: "H H/23 * * *"
- 'voltha-periodic-test':
name: 'periodic-voltha-test-DMI'
- pipeline-script: 'voltha-DMI-bbsim-tests.groovy'
- build-node: 'qct-pod4-node2'
- make-target: bbsim-dmi-hw-management-test
- withAlarms: false
+ extraHelmFlags: '--set global.image_tag=master --set onos-classic.image.tag=master'
code-branch: 'master'
- time-trigger: "H H/12 * * *"
+ time-trigger: "H H/23 * * *"
+ testTargets: |
+ - target: bbsim-dmi-hw-management-test
+ workflow: att
+ flags: ""
+ teardown: true
- - 'voltha-periodic-test':
- name: 'periodic-voltha-test-DMI-2.6'
- pipeline-script: 'voltha-DMI-bbsim-tests.groovy'
+ - 'voltha-periodic-test-kind-voltha-based':
+ name: 'periodic-voltha-test-DMI-2.7'
+ pipeline-script: 'voltha/voltha-2.7/voltha-DMI-bbsim-tests.groovy'
build-node: 'qct-pod4-node2'
make-target: bbsim-dmi-hw-management-test
withAlarms: false
- code-branch: 'voltha-2.6'
- time-trigger: "H H/12 * * *"
+ code-branch: 'voltha-2.7'
+ time-trigger: "H H/23 * * *"
- - 'voltha-periodic-test':
- name: 'periodic-voltha-test-bbsim-2.6'
- pipeline-script: 'voltha-nightly-tests-bbsim.groovy'
+ - 'voltha-periodic-test-kind-voltha-based':
+ name: 'periodic-voltha-test-bbsim-2.7'
+ pipeline-script: 'voltha/voltha-2.7/voltha-nightly-tests-bbsim.groovy'
build-node: 'qct-pod4-node2'
make-target: functional-single-kind
make-target-failtest: bbsim-failurescenarios
make-target-errortest: bbsim-errorscenarios
make-target-alarmtest: bbsim-alarms-kind
withAlarms: true
- code-branch: 'voltha-2.6'
- onus: 2
- pons: 2
- time-trigger: "H H/12 * * *"
+ code-branch: 'voltha-2.7'
+ time-trigger: "H H * * *"
- 'voltha-periodic-test':
name: 'periodic-voltha-etcd-test'
- pipeline-script: 'voltha-system-test-bbsim.groovy'
- build-node: 'ubuntu16.04-basebuild-4c-8g'
+ build-node: 'ubuntu18.04-basebuild-4c-8g'
code-branch: 'master'
+ extraHelmFlags: '--set global.image_tag=master --set onos-classic.image.tag=master --set onu=2,pon=2'
+ time-trigger: "H H/12 * * *"
+ testTargets: |
+ - target: sanity-multi-kind
+ workflow: att
+ flags: ""
+ teardown: true
+
+ - 'voltha-periodic-test-kind-voltha-based':
+ name: 'periodic-voltha-etcd-test-2.7'
+ pipeline-script: 'voltha/voltha-2.7/voltha-system-test-bbsim.groovy'
+ build-node: 'ubuntu18.04-basebuild-4c-8g'
+ code-branch: 'voltha-2.7'
make-target: sanity-multi-kind
onus: 2
pons: 2
time-trigger: "H H/12 * * *"
- 'voltha-periodic-test':
- name: 'periodic-voltha-etcd-test-2.6'
- pipeline-script: 'voltha-system-test-bbsim.groovy'
- build-node: 'ubuntu16.04-basebuild-4c-8g'
- code-branch: 'voltha-2.6'
- make-target: sanity-multi-kind
- onus: 2
- pons: 2
- time-trigger: "H H/12 * * *"
-
-
- - 'voltha-periodic-test':
name: 'periodic-voltha-sanity-test-multi-runs'
- pipeline-script: 'voltha-go-multi-tests.groovy'
- build-node: 'qct-pod4-node2'
code-branch: 'master'
- make-target: sanity-kind
- onus: 1
- pons: 1
- test-runs: 5
- time-trigger: "H H/12 * * *"
+ time-trigger: "H H/23 * * *"
+ extraHelmFlags: '--set global.image_tag=master --set onos-classic.image.tag=master'
+ testTargets: |
+ - target: sanity-kind
+ workflow: att
+ flags: ""
+ teardown: true
+ - target: sanity-kind
+ workflow: att
+ flags: ""
+ teardown: false
+ - target: sanity-kind
+ workflow: att
+ flags: ""
+ teardown: false
+ - target: sanity-kind
+ workflow: att
+ flags: ""
+ teardown: false
+ - target: sanity-kind
+ workflow: att
+ flags: ""
+ teardown: false
- - 'voltha-periodic-test':
- name: 'periodic-voltha-sanity-test-multi-runs-2.6'
- pipeline-script: 'voltha-go-multi-tests.groovy'
+ - 'voltha-periodic-test-kind-voltha-based':
+ name: 'periodic-voltha-sanity-test-multi-runs-2.7'
+ pipeline-script: 'voltha/voltha-2.7/voltha-go-multi-tests.groovy'
build-node: 'qct-pod4-node2'
- code-branch: 'voltha-2.6'
+ code-branch: 'voltha-2.7'
make-target: sanity-kind
onus: 1
pons: 1
test-runs: 5
- time-trigger: "H H/12 * * *"
+ time-trigger: "H H/23 * * *"
- 'voltha-periodic-test':
name: 'nightly-voltha-DTflow-sanity-test'
- build-node: 'ubuntu16.04-basebuild-4c-8g'
+ build-node: 'ubuntu18.04-basebuild-4c-8g'
code-branch: 'master'
- make-target: sanity-kind-dt
- onus: 1
- pons: 1
time-trigger: "@daily"
- work-flow: DT
+ extraHelmFlags: '--set global.image_tag=master --set onos-classic.image.tag=master'
+ testTargets: |
+ - target: sanity-kind-dt
+ workflow: dt
+ flags: ""
+ teardown: true
- # Per-patchset Pod builds on Tucson pod
+ # Per-patchset Pod builds on Tucson pod (master)
- 'verify_physical_voltha_patchset_auto':
name: 'verify_physical_voltha_patchset_auto'
- build-node: 'tucson-pod'
- config-pod: 'tucson-pod'
- oltDebVersionMaster: 'openolt_asfvolt16-3.3.2-f7feb4b828467ccc99104b56b29dc7a19aa2008b-40G-NNI.deb'
- oltDebVersionVoltha23: 'openolt_asfvolt16-3.2.0-fc10f0d035181d3125ffc6e7a60bf5328fcf5bfa-40G-NNI.deb'
- profile: 'Default'
default-test-args: '-i sanityORDeleteOLT -i PowerSwitch -X'
+ extraHelmFlags: '--set global.image_tag=master --set onos-classic.image.tag=master'
- # Per-patchset Pod builds on Tucson pod
+ # Per-patchset Pod builds on Tucson pod (voltha-2.7)
+ - 'verify_physical_voltha_patchset_auto':
+ name: 'verify_physical_voltha_patchset_auto_voltha-2.7'
+ oltDebVersionMaster: 'openolt_asfvolt16-3.4.1-dev-c5dcc7f2b27df1c42868b3a79a5416808511bb1d-40G-NNI.deb'
+ oltDebVersionVoltha23: 'openolt_asfvolt16-3.3.3-1a5d68b50d8bcc5ba6cb1630d3294c30c37cd2f5-40G-NNI.deb'
+ default-test-args: '-i sanityORDeleteOLT -i PowerSwitch -X'
+ pipeline-script: 'voltha/voltha-2.7/voltha-physical-build-and-tests.groovy'
+ branch-pattern: voltha-2.7
+
+ # Per-patchset Pod builds on Tucson pod (master)
- 'verify_physical_voltha_patchset_manual':
name: 'verify_physical_voltha_patchset_manual'
- build-node: 'tucson-pod'
- config-pod: 'tucson-pod'
- oltDebVersionMaster: 'openolt_asfvolt16-3.3.2-f7feb4b828467ccc99104b56b29dc7a19aa2008b-40G-NNI.deb'
- oltDebVersionVoltha23: 'openolt_asfvolt16-3.2.0-fc10f0d035181d3125ffc6e7a60bf5328fcf5bfa-40G-NNI.deb'
profile: 'Default'
trigger-string: 'hardware test'
default-test-args: '-i sanityORDeleteOLT -i PowerSwitch -X'
+ branch-pattern: master
+ extraHelmFlags: '--set global.image_tag=master --set onos-classic.image.tag=master'
- # Per-patchset Pod builds on Tucson pod
+ # Per-patchset Pod builds on Tucson pod (master)
- 'verify_physical_voltha_patchset_manual':
name: 'verify_physical_voltha_patchset_manual_DT'
- build-node: 'tucson-pod'
- config-pod: 'tucson-pod-DT'
- oltDebVersionMaster: 'openolt_asfvolt16-3.3.2-f7feb4b828467ccc99104b56b29dc7a19aa2008b-40G-NNI.deb'
- oltDebVersionVoltha23: 'openolt_asfvolt16-3.2.0-fc10f0d035181d3125ffc6e7a60bf5328fcf5bfa-40G-NNI.deb'
+ workflow: 'dt'
profile: 'Default'
- pipeline-script: 'voltha-dt-physical-build-and-tests.groovy'
+ pipeline-script: 'voltha/voltha-2.7/voltha-dt-physical-build-and-tests.groovy'
trigger-string: 'DT hardware test'
default-test-args: '-i sanityDt -i PowerSwitch -X'
+ branch-pattern: master
+ extraHelmFlags: '--set global.image_tag=master --set onos-classic.image.tag=master'
+ # Per-patchset Pod builds on Tucson pod (voltha-2.7)
+ - 'verify_physical_voltha_patchset_manual':
+ name: 'verify_physical_voltha_patchset_manual_voltha-2.7'
+ oltDebVersionMaster: 'openolt_asfvolt16-3.4.1-dev-c5dcc7f2b27df1c42868b3a79a5416808511bb1d-40G-NNI.deb'
+ oltDebVersionVoltha23: 'openolt_asfvolt16-3.3.3-1a5d68b50d8bcc5ba6cb1630d3294c30c37cd2f5-40G-NNI.deb'
+ profile: 'Default'
+ pipeline-script: 'voltha/voltha-2.7/voltha-physical-build-and-tests.groovy'
+ trigger-string: 'hardware test'
+ default-test-args: '-i sanityORDeleteOLT -i PowerSwitch -X'
+ branch-pattern: voltha-2.7
+
+
+ # Per-patchset Pod builds on Tucson pod (voltha-2.7)
+ - 'verify_physical_voltha_patchset_manual':
+ name: 'verify_physical_voltha_patchset_manual_DT_voltha-2.7'
+ workflow: 'dt'
+ oltDebVersionMaster: 'openolt_asfvolt16-3.4.1-dev-c5dcc7f2b27df1c42868b3a79a5416808511bb1d-40G-NNI.deb'
+ oltDebVersionVoltha23: 'openolt_asfvolt16-3.3.3-1a5d68b50d8bcc5ba6cb1630d3294c30c37cd2f5-40G-NNI.deb'
+ profile: 'Default'
+ pipeline-script: 'voltha/voltha-2.7/voltha-physical-build-and-tests.groovy'
+ trigger-string: 'DT hardware test'
+ default-test-args: '-i sanityDt -i PowerSwitch -X'
+ branch-pattern: voltha-2.7
# Manual build job for Tucson pod
# Allow local testing without disrupting above job
- 'build_physical_voltha_manual':
name: 'build_tucson-pod_manual'
- build-node: 'tucson-pod'
config-pod: 'tucson-pod'
manualBranch: 'master'
- oltDebVersionMaster: 'openolt_asfvolt16-3.3.2-f7feb4b828467ccc99104b56b29dc7a19aa2008b-40G-NNI.deb'
- oltDebVersionVoltha23: 'openolt_asfvolt16-3.2.0-fc10f0d035181d3125ffc6e7a60bf5328fcf5bfa-40G-NNI.deb'
+ oltDebVersionMaster: 'openolt_asfvolt16-3.4.1-dev-c5dcc7f2b27df1c42868b3a79a5416808511bb1d-40G-NNI.deb'
+ oltDebVersionVoltha23: 'openolt_asfvolt16-3.3.3-1a5d68b50d8bcc5ba6cb1630d3294c30c37cd2f5-40G-NNI.deb'
profile: 'Default'
default-test-args: '-i sanityORDeleteOLT -i PowerSwitch -X'
+ extraHelmFlags: '--set global.image_tag=master --set onos-classic.image.tag=master'
- job-template:
id: 'voltha-periodic-test'
name: '{name}'
- pipeline-script: 'voltha-go-tests.groovy'
+ pipeline-script: 'voltha/master/periodic-bbsim-tests.groovy'
+ build-node: 'ubuntu18.04-basebuild-8c-15g'
+ robot-args: ''
+ gerrit-project: ''
+ gerritRefspec: ''
+ volthaSystemTestsChange: ''
+ volthaHelmChartsChange: ''
+ extraHelmFlags: ''
+ registry: mirror.registry.opennetworking.org
+ sandbox: true
+ olts: 1
+ timeout: 130
+ logLevel: 'INFO'
+
+ trigger-comment: vv7CBoQQYYonvaN8xcru
+ time-trigger: 0 0 29 2 *
+
+ description: |
+ <!-- Managed by Jenkins Job Builder -->
+ Created by {id} job-template from ci-management/jjb/voltha-e2e.yaml <br /><br />
+ E2E Validation for Voltha 2.X
+
+ properties:
+ - cord-infra-properties:
+ build-days-to-keep: '{big-build-days-to-keep}'
+ artifact-num-to-keep: '{big-artifact-num-to-keep}'
+
+ wrappers:
+ - lf-infra-wrappers:
+ build-timeout: '{build-timeout}'
+ jenkins-ssh-credential: '{jenkins-ssh-credential}'
+
+ parameters:
+ - string:
+ name: buildNode
+ default: '{build-node}'
+ description: 'Name of the Jenkins node to run the job on'
+
+ - string:
+ name: extraHelmFlags
+ default: '{extraHelmFlags}'
+ description: 'Helm flags (passed to each deployment)'
+
+ # test configuration
+ # this is a parameter to drive the test execution, VOLTHA is redeployed each time with
+ # the provided configuration and then the make target is invoked,
+ # example value (has to be valid YAML):
+ # testTargets: |
+ # - target: 1t1gem-openonu-go-adapter-test
+ # workflow: att
+ # flags: ""
+ # teardown: true
+ - text:
+ name: testTargets
+ default: '{testTargets}'
+ description: 'Test configuration, see the ci-management job definition for more info'
+
+ - string:
+ name: branch
+ default: '{code-branch}'
+ description: 'Repo manifest branch for code checkout'
+
+ - string:
+ name: gerritProject
+ default: '{gerrit-project}'
+ description: 'Name of the Gerrit project'
+
+ - string:
+ name: gerritRefspec
+ default: ''
+ description: 'PatchSet REFSPEC in Gerrit, example value: "refs/changes/79/18779/13"'
+
+ - string:
+ name: extraRobotArgs
+ default: '{robot-args}'
+ description: 'Arguments to pass to robot'
+
+ - string:
+ name: volthaSystemTestsChange
+ default: '{volthaSystemTestsChange}'
+ description: 'Download a change for gerrit in the voltha-system-tests repo, example value: "refs/changes/79/18779/13"'
+
+ - string:
+ name: volthaHelmChartsChange
+ default: '{volthaHelmChartsChange}'
+ description: 'Download a change for gerrit in the voltha-helm-charts repo, example value: "refs/changes/79/18779/13"'
+
+ - string:
+ name: olts
+ default: '{olts}'
+ description: 'How many BBSim instances to run'
+
+ - string:
+ name: registry
+ default: '{registry}'
+ description: 'Which registry to use (amazon vs menlo)'
+
+ - string:
+ name: logLevel
+ default: '{logLevel}'
+ description: 'Log level for all the components'
+
+ - string:
+ name: timeout
+ default: '{timeout}'
+ description: 'Timeout of pipeline job [minutes]'
+
+ project-type: pipeline
+ concurrent: true
+
+ dsl: !include-raw-escape: pipeline/{pipeline-script}
+
+ triggers:
+ # patchset jobs will set this to "0 0 29 2 *" (feb 29th, it's once every 4 years)
+ - timed: |
+ TZ=America/Los_Angeles
+ {time-trigger}
+ # periodic jobs will set this to vv7CBoQQYYonvaN8xcru (hopefully no-one will comment with that)
+ - gerrit:
+ server-name: '{gerrit-server-name}'
+ silent-start: false
+ successful-message: "PASSED openonu-go test"
+ failure-message: "FAILED openonu-go test"
+ unstable-message: "UNSTABLE openonu-go test"
+ trigger-on:
+ - comment-added-contains-event:
+ comment-contains-value: '.*{trigger-comment}.*'
+ projects:
+ - project-compare-type: REG_EXP
+ project-pattern: '^(voltha-go|voltha-openolt-adapter|voltha-openonu-adapter-go|ofagent-go|voltha-onos|bbsim)$'
+ branches:
+ - branch-compare-type: REG_EXP
+ branch-pattern: '{all-branches-regexp}'
+
+- job-template:
+ id: 'voltha-periodic-test-kind-voltha-based'
+ name: '{name}'
+ pipeline-script: 'voltha/voltha-2.7/voltha-go-tests.groovy'
test-runs: 1
robot-args: ''
gerrit-project: ''
work-flow: ''
volthaSystemTestsChange: ''
+ volthaHelmChartsChange: ''
kindVolthaChange: ''
extraHelmFlags: ''
sandbox: true
@@ -261,7 +717,7 @@
- string:
name: extraHelmFlags
- default: '--set onu={onus},pon={pons},{extraHelmFlags}'
+ default: '--set onu={onus},pon={pons} {extraHelmFlags}'
description: 'Helm flags to pass to ./voltha up'
- bool:
@@ -305,6 +761,21 @@
description: 'Makefile target to invoke during 1t8gem test'
- string:
+ name: makeReconcileTestTarget
+ default: '{make-target-reconciletest}'
+ description: 'Makefile target to invoke during reconcile test'
+
+ - string:
+ name: makeReconcileDtTestTarget
+ default: '{make-target-reconciledttest}'
+ description: 'Makefile target to invoke during reconcile dt test'
+
+ - string:
+ name: makeReconcileTtTestTarget
+ default: '{make-target-reconciletttest}'
+ description: 'Makefile target to invoke during reconcile tt test'
+
+ - string:
name: manifestUrl
default: '{gerrit-server-url}/{voltha-test-manifest-repo}'
description: 'Repo manifest URL for code checkout'
@@ -377,13 +848,15 @@
- job-template:
id: 'voltha-patch-test'
name: 'verify_{project}_sanity-test{name-extension}'
+ build-node: 'ubuntu18.04-basebuild-4c-8g'
override-branch: '$GERRIT_BRANCH'
sandbox: true
build-timeout: 20
volthaSystemTestsChange: ''
volthaHelmChartsChange: ''
- kindVolthaChange: ''
+ extraHelmFlags: ''
branch-regexp: '{all-branches-regexp}'
+ kindVolthaChange: '' # this is only needed to test kind-voltha patches
description: |
<!-- Managed by Jenkins Job Builder -->
@@ -403,7 +876,7 @@
parameters:
- string:
name: buildNode
- default: 'ubuntu16.04-basebuild-4c-8g'
+ default: '{build-node}'
description: 'Name of the Jenkins node to run the job on'
- string:
@@ -418,13 +891,8 @@
- string:
name: extraHelmFlags
- default: ''
- description: 'Helm flags to pass to ./voltha up'
-
- - string:
- name: volthaSystemTestsChange
- default: '{volthaSystemTestsChange}'
- description: 'Download a change for gerrit in the voltha-system-tests repo, example value: "refs/changes/79/18779/13"'
+ default: '{extraHelmFlags}'
+ description: 'Helm flags to pass to every helm command'
- string:
name: volthaSystemTestsChange
@@ -441,11 +909,11 @@
default: '{override-branch}'
description: 'Name of the branch to use'
- # deprecated params (not used in master, remove after 2.6 support is dropped)
+ # Used in the 2.7 based pipeline, can be removed after 2.8
- string:
name: kindVolthaChange
default: '{kindVolthaChange}'
- description: 'Download a change for gerrit in the kind-voltha repo, example value: "refs/changes/32/19132/1"'
+ description: 'Download a change for gerrit in the kind-voltha repo, example value: "refs/changes/32/19132/1" (only used to test kind-voltha changes in 2.7)'
project-type: pipeline
concurrent: true
@@ -476,25 +944,9 @@
pattern: '{all-files-regexp}'
# POD Per Patchset Pipeline Jobs
-
-- job-template:
- name: '{name}'
- id: verify_physical_voltha_patchset_auto
- description: |
- <!-- Managed by Jenkins Job Builder -->
- Automated build on POD {config-pod} using {pipeline-script} <br /><br />
- Created from job-template {id} from ci-management/jjb/voltha-e2e.yaml <br />
- Created by Andy Bavier, andy@opennetworking.org <br />
- Copyright (c) 2019 Open Networking Foundation (ONF)
- sandbox: true
- pipeline-script: 'voltha-physical-build-and-tests.groovy'
- default-test-args: '-i sanityORDeleteOLT -X'
-
- properties:
- - cord-infra-properties:
- build-days-to-keep: '{build-days-to-keep}'
- artifact-num-to-keep: '{artifact-num-to-keep}'
-
+# to use these parameters in a job: `<<: *voltha-physical-patchset-parameters`
+- voltha-physical-patchset-parameters: &voltha-physical-patchset-parameters
+ name: voltha-physical-patchset-parameters
parameters:
- string:
name: buildNode
@@ -502,155 +954,6 @@
description: 'Pod management node'
- string:
- name: manifestUrl
- default: '{gerrit-server-url}/{voltha-test-manifest-repo}'
- description: 'URL to the repo manifest'
-
- - string:
- name: branch
- default: '$GERRIT_BRANCH'
- description: 'Name of the branch to use'
-
- - string:
- name: gerritProject
- default: '$GERRIT_PROJECT'
- description: 'Name of the Gerrit project'
-
- - string:
- name: gerritChangeNumber
- default: '$GERRIT_CHANGE_NUMBER'
- description: 'Changeset number in Gerrit'
-
- - string:
- name: gerritPatchsetNumber
- default: '$GERRIT_PATCHSET_NUMBER'
- description: 'PatchSet number in Gerrit'
-
- - string:
- name: cordRepoUrl
- default: '{gerrit-server-url}'
- description: 'The URL of the CORD Project repository'
-
- - string:
- name: podName
- default: '{config-pod}'
-
- - string:
- name: deploymentConfigFile
- default: 'pod-configs/deployment-configs/{config-pod}.yaml'
- description: 'Path of deployment config file'
-
- - string:
- name: kindVolthaValuesFile
- default: 'pod-configs/kubernetes-configs/voltha/{config-pod}.yml'
- description: 'Path of kind-voltha values override file'
-
- - string:
- name: sadisConfigFile
- default: 'voltha/voltha-system-tests/tests/data/{config-pod}-sadis.json'
- description: 'Path of SADIS config to load'
-
- - string:
- name: localConfigDir
- default: null
- description: 'If specified, config file paths are relative to this dir; otherwise $WORKSPACE'
-
- - string:
- name: configRepo
- default: 'pod-configs'
- description: 'A repository containing the config files, will be checked out if specified'
-
- - string:
- name: oltDebVersionMaster
- default: '{oltDebVersionMaster}'
- description: 'OLT Software version to install for master branch builds'
-
- - string:
- name: oltDebVersion
- default: '{oltDebVersionVoltha23}'
- description: 'OLT Software version to install for voltha-2.3 branch builds'
-
- - string:
- name: profile
- default: '{profile}'
- description: 'Technology Profile pushed to the ETCD'
-
- - string:
- name: notificationEmail
- default: 'andy@opennetworking.org'
- description: ''
-
- - bool:
- name: reinstallOlt
- default: true
- description: "Re-install OLT software"
-
- - string:
- name: manualBranch
- default: '{manualBranch}'
- description: "If a non-empty string, build manually as a specific branch, not with a specific patchset"
-
- - string:
- name: extraRobotArgs
- default: '{default-test-args}'
- description: 'Arguments to pass to robot'
-
- project-type: pipeline
- concurrent: true
-
- dsl: !include-raw-escape: pipeline/{pipeline-script}
-
- triggers:
- - gerrit:
- server-name: '{gerrit-server-name}'
- dependency-jobs: '{dependency-jobs}'
- silent-start: false
- successful-message: "PASSED hardware test"
- failure-message: "FAILED hardware test"
- unstable-message: "UNSTABLE hardware test"
- trigger-on:
- - comment-added-event:
- approval-category: 'Code-Review'
- approval-value: '+2'
- projects:
- - project-compare-type: REG_EXP
- project-pattern: '^(voltha-openolt-adapter|voltha-openonu-adapter|voltha-openonu-adapter-go|voltha-go)$'
- branches:
- - branch-compare-type: REG_EXP
- branch-pattern: '{all-branches-regexp}'
-
-
-- job-template:
- name: '{name}'
- id: verify_physical_voltha_patchset_manual
- description: |
- <!-- Managed by Jenkins Job Builder -->
- Automated build on POD {config-pod} using {pipeline-script} <br /><br />
- Created from job-template {id} from ci-management/jjb/voltha-e2e.yaml <br />
- Created by Andy Bavier, andy@opennetworking.org <br />
- Copyright (c) 2019 Open Networking Foundation (ONF)
- sandbox: true
- pipeline-script: 'voltha-physical-build-and-tests.groovy'
- trigger-string: 'hardware test'
- default-test-args: '-i sanityORDeleteOLT -X'
-
- properties:
- - cord-infra-properties:
- build-days-to-keep: '{build-days-to-keep}'
- artifact-num-to-keep: '{artifact-num-to-keep}'
-
- parameters:
- - string:
- name: buildNode
- default: '{build-node}'
- description: 'Pod management node'
-
- - string:
- name: manifestUrl
- default: '{gerrit-server-url}/{voltha-test-manifest-repo}'
- description: 'URL to the repo manifest'
-
- - string:
name: branch
default: '$GERRIT_BRANCH'
description: 'Name of the repo branch to use'
@@ -661,14 +964,14 @@
description: 'Name of the Gerrit project'
- string:
- name: gerritChangeNumber
- default: '$GERRIT_CHANGE_NUMBER'
- description: 'Changeset number in Gerrit'
+ name: gerritRefspec
+ default: '$GERRIT_REFSPEC'
+ description: 'RefSpec number in Gerrit'
- string:
- name: gerritPatchsetNumber
- default: '$GERRIT_PATCHSET_NUMBER'
- description: 'PatchSet number in Gerrit'
+ name: gerritComment
+ default: '$GERRIT_EVENT_COMMENT_TEXT'
+ description: 'RefSpec number in Gerrit'
- string:
name: cordRepoUrl
@@ -676,35 +979,35 @@
description: 'The URL of the CORD Project repository'
- string:
+ name: configBaseDir
+ default: 'pod-configs'
+ description: 'The directory inside the POD configs repository'
+
+ - string:
+ name: configDeploymentDir
+ default: 'deployment-configs'
+ description: 'The deployment configs folder'
+
+ - string:
+ name: configKubernetesDir
+ default: 'kubernetes-configs'
+ description: 'The kubernetes config folder'
+
+ - string:
+ name: configFileName
+ default: '{config-pod}'
+ description: 'The config file'
+
+ - string:
name: podName
default: '{config-pod}'
- string:
- name: deploymentConfigFile
- default: 'pod-configs/deployment-configs/{config-pod}.yaml'
- description: 'Path of deployment config file'
-
- - string:
- name: kindVolthaValuesFile
- default: 'pod-configs/kubernetes-configs/voltha/{config-pod}.yml'
- description: 'Path of kind-voltha values override file'
-
- - string:
name: sadisConfigFile
- default: 'voltha/voltha-system-tests/tests/data/{config-pod}-sadis.json'
+ default: 'voltha-system-tests/tests/data/{config-pod}-sadis.json'
description: 'Path of SADIS config to load'
- string:
- name: localConfigDir
- default: null
- description: 'If specified, config file paths are relative to this dir; otherwise $WORKSPACE'
-
- - string:
- name: configRepo
- default: 'pod-configs'
- description: 'A repository containing the config files, will be checked out if specified'
-
- - string:
name: oltDebVersionMaster
default: '{oltDebVersionMaster}'
description: 'OLT Software version to install for master branch builds'
@@ -730,15 +1033,156 @@
description: "Re-install OLT software"
- string:
- name: manualBranch
- default: '{manualBranch}'
- description: "If a non-empty string, build manually on a specific branch, not with a specific patchset"
-
- - string:
name: extraRobotArgs
default: '{default-test-args}'
description: 'Arguments to pass to robot'
+ - string:
+ name: volthaSystemTestsChange
+ default: '{volthaSystemTestsChange}'
+ description: 'Download a change for gerrit in the voltha-system-tests repo, example value: "refs/changes/79/18779/13"'
+
+ - string:
+ name: volthaHelmChartsChange
+ default: '{volthaHelmChartsChange}'
+ description: 'Download a change for gerrit in the voltha-helm-charts repo, example value: "refs/changes/79/18779/13"'
+
+ - string:
+ name: workflow
+ default: '{workflow}'
+ description: 'Installs and uses the specified work flow on the POD'
+
+ - string:
+ name: extraHelmFlags
+ default: '{extraHelmFlags}'
+ description: 'Helm flags (passed to each helm command)'
+
+ # deprecated parameters, remove after VOLTHA-2.8 is released
+ - string:
+ name: deploymentConfigFile
+ default: 'pod-configs/deployment-configs/{config-pod}.yaml'
+ description: 'Path of deployment config file'
+
+ - string:
+ name: kindVolthaValuesFile
+ default: 'pod-configs/kubernetes-configs/voltha/{config-pod}.yml'
+ description: 'Path of kind-voltha values override file'
+
+ - string:
+ name: manifestUrl
+ default: '{gerrit-server-url}/{voltha-test-manifest-repo}'
+ description: 'URL to the repo manifest'
+
+ - string:
+ name: gerritChangeNumber
+ default: '$GERRIT_CHANGE_NUMBER'
+ description: 'Changeset number in Gerrit'
+
+ - string:
+ name: gerritPatchsetNumber
+ default: '$GERRIT_PATCHSET_NUMBER'
+ description: 'PatchSet number in Gerrit'
+
+ - string:
+ name: localConfigDir
+ default: null
+ description: 'If specified, config file paths are relative to this dir; otherwise $WORKSPACE'
+
+ - string:
+ name: configRepo
+ default: 'pod-configs'
+ description: 'A repository containing the config files, will be checked out if specified'
+
+ - string:
+ name: manualBranch
+ default: '{manualBranch}'
+ description: "If a non-empty string, build manually as a specific branch, not with a specific patchset"
+
+- job-template:
+ name: '{name}'
+ id: verify_physical_voltha_patchset_auto
+ description: |
+ <!-- Managed by Jenkins Job Builder -->
+ Automated build on POD {config-pod} using {pipeline-script} <br /><br />
+ Created from job-template {id} from ci-management/jjb/voltha-e2e.yaml <br />
+ Created by Andy Bavier, andy@opennetworking.org <br />
+ Copyright (c) 2019 Open Networking Foundation (ONF)
+ sandbox: true
+ pipeline-script: 'voltha/master/tucson-build-and-test.groovy'
+ default-test-args: '-i sanityORDeleteOLT -X'
+ branch-pattern: '{all-branches-regexp}'
+ build-node: 'tucson-pod'
+ config-pod: 'tucson-pod'
+ workflow: 'att'
+ profile: 'Default'
+ oltDebVersionMaster: 'openolt_asfvolt16-3.4.1-dev-c5dcc7f2b27df1c42868b3a79a5416808511bb1d-40G-NNI.deb'
+ oltDebVersionVoltha23: 'openolt_asfvolt16-3.3.3-1a5d68b50d8bcc5ba6cb1630d3294c30c37cd2f5-40G-NNI.deb'
+ volthaSystemTestsChange: ''
+ volthaHelmChartsChange: ''
+ extraHelmFlags: ''
+
+ properties:
+ - cord-infra-properties:
+ build-days-to-keep: '{build-days-to-keep}'
+ artifact-num-to-keep: '{artifact-num-to-keep}'
+
+ <<: *voltha-physical-patchset-parameters
+
+ project-type: pipeline
+ concurrent: true
+
+ dsl: !include-raw-escape: pipeline/{pipeline-script}
+
+ triggers:
+ - gerrit:
+ server-name: '{gerrit-server-name}'
+ dependency-jobs: '{dependency-jobs}'
+ silent-start: false
+ successful-message: "PASSED hardware test"
+ failure-message: "FAILED hardware test"
+ unstable-message: "UNSTABLE hardware test"
+ trigger-on:
+ - comment-added-event:
+ approval-category: 'Code-Review'
+ approval-value: '+2'
+ projects:
+ - project-compare-type: REG_EXP
+ project-pattern: '^(voltha-openolt-adapter|voltha-openonu-adapter|voltha-openonu-adapter-go|voltha-go)$'
+ branches:
+ - branch-compare-type: REG_EXP
+ branch-pattern: '{branch-pattern}'
+
+- job-template:
+ name: '{name}'
+ id: verify_physical_voltha_patchset_manual
+ description: |
+ <!-- Managed by Jenkins Job Builder -->
+ Automated build on POD {config-pod} using {pipeline-script} <br /><br />
+ Created from job-template {id} from ci-management/jjb/voltha-e2e.yaml <br />
+ Created by Andy Bavier, andy@opennetworking.org <br />
+ Copyright (c) 2019 Open Networking Foundation (ONF)
+ sandbox: true
+ build-node: 'tucson-pod'
+ config-pod: 'tucson-pod'
+ oltDebVersionMaster: 'openolt_asfvolt16-3.4.1-dev-c5dcc7f2b27df1c42868b3a79a5416808511bb1d-40G-NNI.deb'
+ oltDebVersionVoltha23: 'openolt_asfvolt16-3.3.3-1a5d68b50d8bcc5ba6cb1630d3294c30c37cd2f5-40G-NNI.deb'
+ pipeline-script: 'voltha/master/tucson-build-and-test.groovy'
+ trigger-string: 'hardware test'
+ default-test-args: '-i sanityORDeleteOLT -X'
+ branch-pattern: '{all-branches-regexp}'
+ volthaSystemTestsChange: ''
+ volthaHelmChartsChange: ''
+ workflow: 'att'
+ extraHelmFlags: ''
+
+ properties:
+ - cord-infra-properties:
+ build-days-to-keep: '{build-days-to-keep}'
+ artifact-num-to-keep: '{artifact-num-to-keep}'
+
+ <<: *voltha-physical-patchset-parameters
+
+
project-type: pipeline
concurrent: true
@@ -762,8 +1206,7 @@
project-pattern: '^(voltha-go|voltha-openolt-adapter|voltha-openonu-adapter|voltha-openonu-adapter-go|voltha-api-server|voltha-system-tests|ofagent-py|ofagent-go|voltha-onos|kind-voltha|voltha-helm-charts)$'
branches:
- branch-compare-type: REG_EXP
- branch-pattern: '{all-branches-regexp}'
-
+ branch-pattern: '{branch-pattern}'
- job-template:
name: '{name}'
@@ -775,7 +1218,7 @@
Created by Andy Bavier, andy@opennetworking.org <br />
Copyright (c) 2019 Open Networking Foundation (ONF)
sandbox: true
- pipeline-script: 'voltha-physical-build-and-tests.groovy'
+ pipeline-script: 'voltha/voltha-2.7/voltha-physical-build-and-tests.groovy'
properties:
- cord-infra-properties:
@@ -814,6 +1257,11 @@
description: 'PatchSet number in Gerrit'
- string:
+ name: gerritRefspec
+ default: '$GERRIT_REFSPEC'
+ description: 'RefSpec number in Gerrit'
+
+ - string:
name: cordRepoUrl
default: '{gerrit-server-url}'
description: 'The URL of the CORD Project repository'
@@ -834,7 +1282,7 @@
- string:
name: sadisConfigFile
- default: 'voltha/voltha-system-tests/tests/data/{config-pod}-sadis.json'
+ default: 'voltha-system-tests/tests/data/{config-pod}-sadis.json'
description: 'Path of SADIS config to load'
- string:
diff --git a/jjb/voltha-publish.yaml b/jjb/voltha-publish.yaml
deleted file mode 100644
index d8b8246..0000000
--- a/jjb/voltha-publish.yaml
+++ /dev/null
@@ -1,63 +0,0 @@
----
-# CORD docker image building + dockerhub publishing tasks
-
-- project:
- name: voltha-publish
-
- # add repos that have documentation to the project list in both jobs
- jobs:
- - 'voltha-publish':
- project-regexp: 'voltha'
- branch-regexp: '^(master|voltha-1.6|voltha-1.7)$'
- build-node: 'ubuntu16.04-basebuild-1c-2g'
- dependency-jobs: ''
-
-- job-template:
- id: voltha-publish
- name: 'voltha-publish'
- description: |
- Created by {id} job-template from ci-management/jjb/voltha-publish.yaml
-
- triggers:
- - cord-infra-gerrit-trigger-merge:
- gerrit-server-name: '{gerrit-server-name}'
- project-regexp: '{project-regexp}'
- branch-regexp: '{branch-regexp}'
- file-include-regexp: '{all-files-regexp}'
- dependency-jobs: '{dependency-jobs}'
-
- properties:
- - cord-infra-properties:
- build-days-to-keep: '{build-days-to-keep}'
- artifact-num-to-keep: '{artifact-num-to-keep}'
-
- wrappers:
- - lf-infra-wrappers:
- build-timeout: '{build-timeout}'
- jenkins-ssh-credential: '{jenkins-ssh-credential}'
-
- parameters:
- - string:
- name: buildNode
- default: '{build-node}'
- description: 'Name of the Jenkins node to run the job on'
-
- - string:
- name: manifestUrl
- default: '{gerrit-server-url}/{cord-repo-manifest}'
- description: 'URL to the repo manifest'
-
- - string:
- name: manifestBranch
- default: '$GERRIT_BRANCH'
- description: 'Name of the repo branch to use'
-
- - string:
- name: failureEmail
- default: '{failure-email-address}, $GERRIT_PATCHSET_UPLOADER_EMAIL'
- description: 'On job failure, send an email to these addresses'
-
- project-type: pipeline
- concurrent: false
-
- dsl: !include-raw-escape: pipeline/voltha-publish.groovy
diff --git a/jjb/voltha-scale.yaml b/jjb/voltha-scale.yaml
index a4466e0..606c3bc 100644
--- a/jjb/voltha-scale.yaml
+++ b/jjb/voltha-scale.yaml
@@ -9,91 +9,15 @@
jobs:
# name format is <job-template>-<olts>-<pon>-<onus>-<setup>
- # OpenONU PY Scale
- - 'voltha-scale-measurements':
- name: 'voltha-openonu-py-scale-measurements-2.6-2-16-32-dt-subscribers'
- 'disable-job': false
- build-node: 'onf-pod1-head-node'
- time-trigger: "H H/4 * * *"
- olts: 2
- pons: 16
- onus: 32
- withFlows: true
- provisionSubscribers: true
- workflow: dt
- release: voltha-2.6
- withEapol: false
- withDhcp: false
- withIgmp: false
- openonuAdapterReplicas: 8
- extraHelmFlags: '--set use_openonu_adapter_go=false'
- bbsimImg: ''
- rwCoreImg: ''
- ofAgentImg: ''
- openoltAdapterImg: ''
- openonuAdapterImg: ''
- openonuAdapterGoImg: ''
- onosImg: ''
-
- - 'voltha-scale-measurements':
- name: 'voltha-openonu-py-scale-measurements-2.6-2-16-32-att-subscribers'
- 'disable-job': false
- build-node: 'onf-pod1-head-node'
- time-trigger: "H H/4 * * *"
- olts: 2
- pons: 16
- onus: 32
- withFlows: true
- provisionSubscribers: true
- release: voltha-2.6
- withEapol: true
- withDhcp: true
- withIgmp: false
- openonuAdapterReplicas: 8
- extraHelmFlags: '--set use_openonu_adapter_go=false,authRetry=true,dhcpRetry=true'
- bbsimImg: ''
- rwCoreImg: ''
- ofAgentImg: ''
- openoltAdapterImg: ''
- openonuAdapterImg: ''
- openonuAdapterGoImg: ''
- onosImg: ''
-
- - 'voltha-scale-measurements':
- name: 'voltha-openonu-py-scale-measurements-2.6-2-16-32-tt-subscribers'
- 'disable-job': false
- build-node: 'onf-pod1-head-node'
- time-trigger: "H H/4 * * *"
- olts: 2
- pons: 16
- onus: 32
- withFlows: true
- workflow: tt
- provisionSubscribers: true
- release: voltha-2.6
- withEapol: false
- withDhcp: true
- withIgmp: false
- openonuAdapterReplicas: 8
- extraHelmFlags: '--set use_openonu_adapter_go=false,dhcpRetry=true,defaults.incremental_evto_update=true'
- bbsimImg: ''
- rwCoreImg: ''
- ofAgentImg: ''
- openoltAdapterImg: ''
- openonuAdapterImg: ''
- openonuAdapterGoImg: ''
- onosImg: ''
-
# this job will be used to test custom images without disrupting tests on master
- 'voltha-scale-measurements':
name: 'voltha-scale-measurements-master-experimental'
- build-node: 'voltha-scale-2'
- pipeline-script: 'voltha-scale-test-etcd-kafka-bitnami.groovy'
+ build-node: 'voltha-scale-1'
'disable-job': false
# trigger on Feb 29th (a.k.a only trigger it manually)
time-trigger: "H 0 29 2 *"
withMonitoring: true
- olts: 2
+ olts: 1
pons: 16
onus: 32
withFlows: true
@@ -103,21 +27,20 @@
withIgmp: false
extraHelmFlags: '--set authRetry=true,dhcpRetry=true'
withPcap: false
- openoltAdapterImg: gcgirish/voltha-openolt-adapter:writeThroughCache
- 'voltha-scale-measurements':
name: 'voltha-scale-measurements-master-experimental-multi-stack'
build-node: 'voltha-scale-2'
- pipeline-script: 'voltha-scale-multi-stack.groovy'
- 'disable-job': false
+ pipeline-script: 'voltha/master/voltha-scale-multi-stack.groovy'
+ 'disable-job': true
# trigger on Feb 29th (a.k.a only trigger it manually)
time-trigger: "H 0 29 2 *"
withMonitoring: true
logLevel: WARN
- volthaStacks: 10
+ volthaStacks: 2
olts: 2
- pons: 16
- onus: 32
+ pons: 2
+ onus: 2
withFlows: true
provisionSubscribers: true
workflow: dt
@@ -125,60 +48,11 @@
withDhcp: false
withIgmp: false
-
- # jobs for 512 ONUs with a 8 openonu-adapters and clustered ONOS (1 OLT)
- - 'voltha-scale-measurements':
- name: 'voltha-scale-measurements-master-1-16-32-att-subscribers'
- 'disable-job': false
- build-node: 'onf-pod1-head-node'
- time-trigger: "H H/4 * * *"
- olts: 1
- pons: 16
- onus: 32
- withFlows: true
- provisionSubscribers: true
- withEapol: true
- withDhcp: true
- withIgmp: false
- extraHelmFlags: '--set authRetry=true,dhcpRetry=true'
-
- - 'voltha-scale-measurements':
- name: 'voltha-scale-measurements-master-1-16-32-tt-subscribers'
- 'disable-job': false
- build-node: 'onf-pod1-head-node'
- time-trigger: "H H/4 * * *"
- olts: 1
- pons: 16
- onus: 32
- withFlows: true
- provisionSubscribers: true
- workflow: tt
- withEapol: false
- withDhcp: true
- withIgmp: true
- extraHelmFlags: '--set authRetry=true,dhcpRetry=true'
-
- - 'voltha-scale-measurements':
- name: 'voltha-scale-measurements-master-1-16-32-dt-subscribers'
- 'disable-job': false
- build-node: 'onf-pod1-head-node'
- time-trigger: "H H/4 * * *"
- olts: 1
- pons: 16
- onus: 32
- withFlows: true
- provisionSubscribers: true
- workflow: dt
- withEapol: false
- withDhcp: false
- withIgmp: false
- extraHelmFlags: '--set authRetry=true,dhcpRetry=true'
-
- # jobs for 1024 ONUs with a 8 openonu-adapters and clustered ONOS (2 OLTs)
+ # jobs for 1024 ONUs with openonu-go and clustered ONOS (2 OLTs)
- 'voltha-scale-measurements':
name: 'voltha-scale-measurements-master-2-16-32-att-subscribers'
'disable-job': false
- build-node: 'onf-pod1-head-node'
+ build-node: 'voltha-scale-1'
time-trigger: "H H/4 * * *"
olts: 2
pons: 16
@@ -188,12 +62,13 @@
withEapol: true
withDhcp: true
withIgmp: false
- extraHelmFlags: '--set authRetry=true,dhcpRetry=true'
+ extraHelmFlags: '--set authRetry=true,dhcpRetry=true --set global.log_correlation.enabled=true -f /home/jenkins/voltha-scale/voltha-values.yaml'
+ onosImg: andreacampanella/voltha-onos:olt-group-pkt-req-mcast-fix
- 'voltha-scale-measurements':
name: 'voltha-scale-measurements-master-2-16-32-dt-subscribers'
'disable-job': false
- build-node: 'onf-pod1-head-node'
+ build-node: 'voltha-scale-1'
time-trigger: "H H/4 * * *"
olts: 2
pons: 16
@@ -204,12 +79,12 @@
withEapol: false
withDhcp: false
withIgmp: false
- extraHelmFlags: '--set authRetry=true,dhcpRetry=true'
+ extraHelmFlags: '--set authRetry=true,dhcpRetry=true --set global.log_correlation.enabled=true -f /home/jenkins/voltha-scale/voltha-values.yaml '
- 'voltha-scale-measurements':
name: 'voltha-scale-measurements-master-2-16-32-tt-subscribers'
'disable-job': false
- build-node: 'onf-pod1-head-node'
+ build-node: 'voltha-scale-1'
time-trigger: "H H/4 * * *"
olts: 2
pons: 16
@@ -222,14 +97,15 @@
withIgmp: true
onosReplicas: 3
atomixReplicas: 3
- extraHelmFlags: '--set authRetry=true,dhcpRetry=true'
+ extraHelmFlags: '--set authRetry=true,dhcpRetry=true --set global.rw_core.core_timeout=60s --set global.log_correlation.enabled=true -f /home/jenkins/voltha-scale/voltha-values.yaml '
+ onosImg: andreacampanella/voltha-onos:olt-group-pkt-req-mcast-fix
# multi-stack jobs
- 'voltha-scale-measurements':
name: 'voltha-scale-measurements-master-10-stacks-2-16-32-att-subscribers'
build-node: 'voltha-scale-2'
- pipeline-script: 'voltha-scale-multi-stack.groovy'
'disable-job': false
+ pipeline-script: 'voltha/master/voltha-scale-multi-stack.groovy'
time-trigger: "H H/4 * * *"
withMonitoring: false
logLevel: WARN
@@ -243,12 +119,13 @@
withEapol: true
withDhcp: true
withIgmp: false
+ extraHelmFlags: "-f /home/jenkins/voltha-scale/voltha-values.yaml "
- 'voltha-scale-measurements':
name: 'voltha-scale-measurements-master-10-stacks-2-16-32-dt-subscribers'
build-node: 'voltha-scale-2'
- pipeline-script: 'voltha-scale-multi-stack.groovy'
'disable-job': false
+ pipeline-script: 'voltha/master/voltha-scale-multi-stack.groovy'
time-trigger: "H H/4 * * *"
withMonitoring: false
logLevel: WARN
@@ -262,12 +139,13 @@
withEapol: false
withDhcp: false
withIgmp: false
+ extraHelmFlags: "-f /home/jenkins/voltha-scale/voltha-values.yaml "
- 'voltha-scale-measurements':
name: 'voltha-scale-measurements-master-10-stacks-2-16-32-tt-subscribers'
build-node: 'voltha-scale-2'
- pipeline-script: 'voltha-scale-multi-stack.groovy'
'disable-job': false
+ pipeline-script: 'voltha/master/voltha-scale-multi-stack.groovy'
time-trigger: "H H/4 * * *"
withMonitoring: false
logLevel: WARN
@@ -281,15 +159,16 @@
withEapol: false
withDhcp: true
withIgmp: true
+ extraHelmFlags: "-f /home/jenkins/voltha-scale/voltha-values.yaml "
- # voltha-2.6 Jobs
+ # voltha-2.7 Jobs
- 'voltha-scale-measurements':
- name: 'voltha-scale-measurements-voltha-2.6-1-16-32-att-subscribers'
- pipeline-script: 'voltha-scale-test-2.6.groovy'
+ name: 'voltha-scale-measurements-voltha-2.7-2-16-32-att-subscribers'
'disable-job': false
- build-node: 'onf-pod1-head-node'
+ pipeline-script: 'voltha/voltha-2.7/voltha-scale-test.groovy'
+ build-node: 'voltha-scale-1'
time-trigger: "H H/4 * * *"
- olts: 1
+ olts: 2
pons: 16
onus: 32
withFlows: true
@@ -298,7 +177,7 @@
withDhcp: true
withIgmp: false
extraHelmFlags: '--set defaults.rw_core.timeout=30s '
- release: voltha-2.6
+ release: voltha-2.7
bbsimImg: ''
rwCoreImg: ''
ofAgentImg: ''
@@ -308,10 +187,10 @@
onosImg: ''
- 'voltha-scale-measurements':
- name: 'voltha-scale-measurements-voltha-2.6-2-16-32-dt-subscribers'
- pipeline-script: 'voltha-scale-test-2.6.groovy'
+ name: 'voltha-scale-measurements-voltha-2.7-2-16-32-dt-subscribers'
'disable-job': false
- build-node: 'onf-pod1-head-node'
+ pipeline-script: 'voltha/voltha-2.7/voltha-scale-test.groovy'
+ build-node: 'voltha-scale-1'
time-trigger: "H H/4 * * *"
olts: 2
pons: 16
@@ -323,7 +202,7 @@
withDhcp: false
withIgmp: false
extraHelmFlags: '--set defaults.rw_core.timeout=30s '
- release: voltha-2.6
+ release: voltha-2.7
bbsimImg: ''
rwCoreImg: ''
ofAgentImg: ''
@@ -333,12 +212,12 @@
onosImg: ''
- 'voltha-scale-measurements':
- name: 'voltha-scale-measurements-voltha-2.6-1-16-32-tt-subscribers'
- pipeline-script: 'voltha-scale-test-2.6.groovy'
+ name: 'voltha-scale-measurements-voltha-2.7-2-16-32-tt-subscribers'
'disable-job': false
- build-node: 'onf-pod1-head-node'
+ pipeline-script: 'voltha/voltha-2.7/voltha-scale-test.groovy'
+ build-node: 'voltha-scale-1'
time-trigger: "H H/4 * * *"
- olts: 1
+ olts: 2
pons: 16
onus: 32
withFlows: true
@@ -348,7 +227,7 @@
withDhcp: true
withIgmp: true
extraHelmFlags: '--set defaults.rw_core.timeout=30s '
- release: voltha-2.6
+ release: voltha-2.7
bbsimImg: ''
rwCoreImg: ''
ofAgentImg: ''
@@ -361,8 +240,8 @@
- 'voltha-scale-measurements':
name: 'voltha-scale-measurements-2.6-10-stacks-2-16-32-att-subscribers'
build-node: 'voltha-scale-2'
+ 'disable-job': true
pipeline-script: 'voltha-scale-multi-stack.groovy'
- 'disable-job': false
time-trigger: "H H/4 * * *"
withMonitoring: false
logLevel: WARN
@@ -377,12 +256,19 @@
withEapol: true
withDhcp: true
withIgmp: false
+ bbsimImg: ''
+ rwCoreImg: ''
+ ofAgentImg: ''
+ openoltAdapterImg: ''
+ openonuAdapterImg: ''
+ openonuAdapterGoImg: ''
+ onosImg: ''
- 'voltha-scale-measurements':
name: 'voltha-scale-measurements-2.6-10-stacks-2-16-32-dt-subscribers'
build-node: 'voltha-scale-2'
pipeline-script: 'voltha-scale-multi-stack.groovy'
- 'disable-job': false
+ 'disable-job': true
time-trigger: "H H/4 * * *"
withMonitoring: false
logLevel: WARN
@@ -397,12 +283,19 @@
withEapol: false
withDhcp: false
withIgmp: false
+ bbsimImg: ''
+ rwCoreImg: ''
+ ofAgentImg: ''
+ openoltAdapterImg: ''
+ openonuAdapterImg: ''
+ openonuAdapterGoImg: ''
+ onosImg: ''
- 'voltha-scale-measurements':
name: 'voltha-scale-measurements-2.6-10-stacks-2-16-32-tt-subscribers'
build-node: 'voltha-scale-2'
pipeline-script: 'voltha-scale-multi-stack.groovy'
- 'disable-job': false
+ 'disable-job': true
time-trigger: "H H/4 * * *"
withMonitoring: false
logLevel: WARN
@@ -417,11 +310,18 @@
withEapol: false
withDhcp: true
withIgmp: true
+ bbsimImg: ''
+ rwCoreImg: ''
+ ofAgentImg: ''
+ openoltAdapterImg: ''
+ openonuAdapterImg: ''
+ openonuAdapterGoImg: ''
+ onosImg: ''
# per patchset job
- 'voltha-scale-measurements':
name: 'voltha-scale-measurements-patchset-1-16-32-att-subscribers'
- build-node: 'onf-pod1-head-node'
+ build-node: 'voltha-scale-1'
olts: 1
pons: 16
onus: 32
@@ -435,7 +335,7 @@
- 'voltha-scale-measurements':
name: 'voltha-scale-measurements-patchset-1-16-32-tt-subscribers'
- build-node: 'onf-pod1-head-node'
+ build-node: 'voltha-scale-1'
olts: 1
pons: 16
onus: 32
@@ -450,7 +350,7 @@
- 'voltha-scale-measurements':
name: 'voltha-scale-measurements-patchset-1-16-32-dt-subscribers'
- build-node: 'onf-pod1-head-node'
+ build-node: 'voltha-scale-1'
olts: 1
pons: 16
onus: 32
@@ -477,7 +377,7 @@
# development matrix
- 'voltha-scale-matrix':
name: 'voltha-scale-matrix-voltha-master'
- build-node: 'onf-pod1-head-node'
+ build-node: 'voltha-scale-1'
onosReplicas: 3
atomixReplicas: 3
kafkaReplicas: 3
@@ -487,7 +387,7 @@
# list of parameters for the VOLTHA Jobs,
# used as anchor so that can be shared across multiple jobs
-# to use in a job: `parameters: *voltha-build-job-parameters`
+# to use in a job: `parameters: *voltha-scale-job-parameters`
- voltha-scale-job-parameters: &voltha-scale-job-parameters
name: voltha-scale-job-parameters
parameters:
@@ -499,7 +399,7 @@
- string:
name: release
default: '{release}'
- description: 'Version of the code to test (matches a branch in kind-voltha and voltha-system-tests repos)'
+ description: 'Version of the code to test (matches a branch in voltha-helm-charts and voltha-system-tests repos)'
- string:
name: buildNode
@@ -627,16 +527,21 @@
description: 'How often ONOS should poll for ports, flows and meters'
- string:
+ name: onosGroupInterval
+ default: '{onosGroupInterval}'
+ description: 'How often ONOS should poll for groups'
+
+ - string:
+ name: flowObjWorkerThreads
+ default: '{flowObjWorkerThreads}'
+ description: 'How many threads are used by the FlowObjectiveManager worker pool'
+
+ - string:
name: bbsimImg
default: '{bbsimImg}'
description: 'Custom image selection for BBSIM (repo:tag)'
- string:
- name: bbsimChart
- default: '{bbsimChart}'
- description: 'BBSim chart name (or location on file system)'
-
- - string:
name: rwCoreImg
default: '{rwCoreImg}'
description: 'Custom image selection for VOLTHA (repo:tag)'
@@ -647,21 +552,11 @@
description: 'Custom image selection for OfAgent (repo:tag), only supports the go version'
- string:
- name: volthaChart
- default: '{volthaChart}'
- description: 'VOLTHA chart name (or location on file system)'
-
- - string:
name: openoltAdapterImg
default: '{openoltAdapterImg}'
description: 'Custom image selection for Openolt Adapter (repo:tag)'
- string:
- name: openoltAdapterChart
- default: '{openoltAdapterChart}'
- description: 'OpenOLT chart name (or location on file system)'
-
- - string:
name: openonuAdapterImg
default: '{openonuAdapterImg}'
description: 'Custom image selection for Openonu Adapter (repo:tag)'
@@ -672,26 +567,11 @@
description: 'Custom image selection for Openonu Go Adapter (repo:tag)'
- string:
- name: openonuAdapterChart
- default: '{openonuAdapterChart}'
- description: 'OpenONU chart name (or location on file system)'
-
- - string:
name: onosImg
default: '{onosImg}'
description: 'Custom image selection for Openonu Adapter (repo:tag)'
- string:
- name: onosChart
- default: '{onosChart}'
- description: 'ONOS chart name (or location on file system)'
-
- - string:
- name: radiusChart
- default: '{radiusChart}'
- description: 'freeradius chart name (or location on file system)'
-
- - string:
name: volthaSystemTestsChange
default: '{volthaSystemTestsChange}'
description: 'Download a change for gerrit in the voltha-system-tests repo, example value: "refs/changes/79/18779/13"'
@@ -699,7 +579,7 @@
- string:
name: volthaHelmChartsChange
default: '{volthaHelmChartsChange}'
- description: 'Download a change for gerrit in the kind-voltha repo, example value: "refs/changes/32/19132/1"'
+ description: 'Download a change for gerrit in the voltha-helm-charts repo, example value: "refs/changes/32/19132/1"'
- bool:
name: inMemoryEtcdStorage
@@ -716,6 +596,9 @@
name: kindVolthaChange
default: '{kindVolthaChange}'
description: 'Download a change for gerrit in the kind-voltha repo, example value: "refs/changes/32/19132/1"'
+ - bool:
+ name: withMibTemplate
+ default: true
- job-template:
id: 'voltha-scale-measurements'
@@ -739,7 +622,7 @@
jenkins-ssh-credential: '{jenkins-ssh-credential}'
# default values
- pipeline-script: 'voltha-scale-test.groovy'
+ pipeline-script: 'voltha/master/voltha-scale-test.groovy'
release: master
bbsimImg: voltha/bbsim:master
@@ -779,6 +662,8 @@
etcdReplicas: 3
extraHelmFlags: ''
onosStatInterval: 5
+ onosGroupInterval: 1
+ flowObjWorkerThreads: 12
volthaSystemTestsChange: ''
volthaHelmChartsChange: ''
kindVolthaChange: ''
@@ -841,7 +726,7 @@
jenkins-ssh-credential: '{jenkins-ssh-credential}'
# default values
- pipeline-script: 'voltha-scale-test.groovy'
+ pipeline-script: 'voltha/master/voltha-scale-test.groovy'
release: master
bbsimImg: voltha/bbsim:master
@@ -881,6 +766,8 @@
etcdReplicas: 1
extraHelmFlags: ''
onosStatInterval: 5
+ onosGroupInterval: 1
+ flowObjWorkerThreads: 12
volthaSystemTestsChange: ''
volthaHelmChartsChange: ''
kindVolthaChange: ''
diff --git a/jjb/voltha-test/voltha-nightly-jobs.yaml b/jjb/voltha-test/voltha-nightly-jobs.yaml
index 2b8a04e..e5ac6bc 100644
--- a/jjb/voltha-test/voltha-nightly-jobs.yaml
+++ b/jjb/voltha-test/voltha-nightly-jobs.yaml
@@ -19,6 +19,11 @@
description: 'DEPRECATED - use buildNode instead'
- string:
+ name: logLevel
+ default: '{logLevel}'
+ description: 'Log level for all the components'
+
+ - string:
name: cordRepoUrl
default: '{gerrit-server-url}'
description: 'The URL of the CORD Project repository'
@@ -72,6 +77,7 @@
default: '{work-flow}'
description: 'Installs and uses the specified work flow on the POD'
+ # openonu-go only supports a single replica, remove after 2.8
- string:
name: NumOfOpenOnu
default: '{num-of-openonu}'
@@ -82,6 +88,21 @@
default: '{num-of-onos}'
description: 'Installs the specified Number of ONOS instances'
+ - bool:
+ name: installBBSim
+ default: false
+ description: "Install the BBSim container"
+
+ - string:
+ name: onuNumber
+ default: '{num-of-onus}'
+ description: "Onus per PonPort"
+
+ - string:
+ name: ponNumber
+ default: '{num-of-ponports}'
+ description: "Number of PON Ports"
+
- string:
name: NumOfAtomix
default: '{num-of-atomix}'
@@ -97,6 +118,7 @@
default: '{reinstall-olt}'
description: "Re-install olt software bringing up CORD"
+ # withKind is not used in the master branch pipeline, remove after 2.8
- bool:
name: withKind
default: '{with-kind}'
@@ -118,15 +140,23 @@
description: 'Download a change for gerrit in the voltha-system-tests repo, example value: "refs/changes/79/18779/13"'
- string:
+ name: volthaHelmChartsChange
+ default: '{volthaHelmChartsChange}'
+ description: 'Download a change for gerrit in the voltha-helm-charts repo, example value: "refs/changes/32/19132/1"'
+
+ # kind-voltha is not used in the master branch pipeline, remove after 2.8
+ - string:
name: kindVolthaChange
default: '{kindVolthaChange}'
description: 'Download a change for gerrit in the kind-voltha repo, example value: "refs/changes/32/19132/1"'
+ # NOTE is this needed/used?
- string:
name: cordTesterChange
default: '{cordTesterChange}'
description: 'Download a change for gerrit in the kind-voltha repo, example value: "refs/changes/32/19132/1"'
+ # this is used in the Adtran DT job
- string:
name: openoltAdapterChart
default: '{openoltAdapterChart}'
@@ -151,13 +181,56 @@
id: build_voltha_pod_release_timer
disabled: '{disable-job}'
description: |
- Manual Build on POD {config-pod}, using {Jenkinsfile} in {gerrit-server-url}/voltha-system-tests' <br /><br />
+ Manual Build on POD {config-pod}, using pipeline/{pipeline-script} in {gerrit-server-url}/ci-management' <br /><br />
Created from job-template {id} from ci-management/jjb/voltha-test/voltha-nightly-jobs.yaml <br />
Created by QA (Suchitra Vemuri - suchitra@opennetworking.org ) <br />
Copyright (c) 2018 Open Networking Foundation (ONF)
<<: *voltha-pipe-job-boiler-plate
+ # default values
+ pipeline-script: 'voltha/master/physical-build.groovy'
+ VolthaEtcdPort: 2379
+ num-of-openonu: 1
+ num-of-onos: 3
+ num-of-atomix: 3
+ test-repo: 'voltha-system-tests'
+ release: 'master'
+ name-extension: ''
+ branch: 'master'
+ configurePod: true
+ volthaHelmChartsChange: ''
+ profile: 'Default'
+ logLevel: 'DEBUG'
+
+ <<: *voltha-build-job-parameters
+
+ concurrent: true
+ project-type: pipeline
+ dsl: !include-raw-escape: ../pipeline/{pipeline-script}
+
+ triggers:
+ - timed: |
+ TZ=America/Los_Angeles
+ H {time} * * *
+
+# this job template is defined to support VOLTHA-2.7 builds, remove after 2.8
+- job-template:
+ name: 'build_{config-pod}_{profile}{name-extension}_voltha_{release}'
+ id: build_voltha_pod_release_legacy
+ disabled: '{disable-job}'
+ description: |
+ Automatic Build on POD {config-pod}, using {Jenkinsfile} in {gerrit-server-url}/voltha-system-tests' <br /><br />
+ Created from job-template {id} from ci-management/jjb/voltha-test/voltha-nightly-jobs.yaml <br />
+ Created by QA (Suchitra Vemuri - suchitra@opennetworking.org ) <br />
+ This job is triggered upon completion of a dependent _test job <br />
+ Copyright (c) 2020 Open Networking Foundation (ONF)
+
+ <<: *voltha-pipe-job-boiler-plate
VolthaEtcdPort: '2379'
+ release: '2.7'
+ branch: 'voltha-2.7'
+ volthaHelmChartsChange: '' # this is not supported in the VOLTHA-2.7 build, but the parameters are shared, ideally we should split them
+ logLevel: 'DEBUG'
<<: *voltha-build-job-parameters
@@ -176,36 +249,6 @@
TZ=America/Los_Angeles
H {time} * * *
-- job-template:
- name: 'build_{config-pod}_{profile}{name-extension}_voltha_{release}'
- id: build_voltha_pod_release_dependent
- disabled: '{disable-job}'
- description: |
- Automatic Build on POD {config-pod}, using {Jenkinsfile} in {gerrit-server-url}/voltha-system-tests' <br /><br />
- Created from job-template {id} from ci-management/jjb/voltha-test/voltha-nightly-jobs.yaml <br />
- Created by QA (Suchitra Vemuri - suchitra@opennetworking.org ) <br />
- This job is triggered upon completion of a dependent _test job <br />
- Copyright (c) 2020 Open Networking Foundation (ONF)
-
- <<: *voltha-pipe-job-boiler-plate
- VolthaEtcdPort: '2379'
-
- <<: *voltha-build-job-parameters
-
- concurrent: true
-
- pipeline-scm:
- script-path: '{Jenkinsfile}'
- scm:
- - git:
- url: '{gerrit-server-url}/voltha-system-tests'
- branches:
- - '{branch}'
- triggers:
- - reverse:
- jobs: '{trigger-after}'
- result: 'success'
-
# VOLTHA Test Job
# This job is automatically triggered after a build job has successfully completed
- job-template:
@@ -327,8 +370,8 @@
result: 'success'
-# VOLTHA Test Job
-# This job is automatically triggered after a build job has successfully completed
+# VOLTHA Soak Test Job
+# This job is triggered by a timer defined in the job
- job-template:
name: 'build_{config-pod}_{profile}_voltha{name-extension}_{release}_test'
id: build_voltha_pod_soak_test
diff --git a/jjb/cord-test/voltha.yaml b/jjb/voltha-test/voltha.yaml
similarity index 74%
rename from jjb/cord-test/voltha.yaml
rename to jjb/voltha-test/voltha.yaml
index 157b607..12a4a36 100644
--- a/jjb/cord-test/voltha.yaml
+++ b/jjb/voltha-test/voltha.yaml
@@ -10,7 +10,7 @@
with-kind: false
power-switch: False
- work-flow: ''
+ work-flow: 'ATT'
in-band-management: false
num-of-openonu: '1'
num-of-onos: '1'
@@ -21,7 +21,8 @@
kindVolthaChange: ''
cordTesterChange: ''
oltAdapterAppLabel: 'adapter-open-olt'
-
+ num-of-onus: ''
+ num-of-ponports: ''
jobs:
# flex OCP pod with olt/onu - manual test job, voltha master build job
@@ -33,6 +34,7 @@
num-of-openonu: '1'
num-of-onos: '3'
num-of-atomix: '3'
+ VolthaEtcdPort: 9999
test-repo: 'voltha-system-tests'
Jenkinsfile: 'Jenkinsfile-voltha-build'
profile: '1T4GEM'
@@ -69,16 +71,11 @@
- 'build_voltha_pod_release_timer':
build-node: 'qa-testvm-pod'
config-pod: 'flex-ocp-cord'
- release: 'master'
- branch: 'master'
- num-of-openonu: '1'
- num-of-onos: '3'
- num-of-atomix: '3'
- test-repo: 'voltha-system-tests'
- Jenkinsfile: 'Jenkinsfile-voltha-build'
- configurePod: true
profile: 'Default'
time: '4'
+ VolthaEtcdPort: 9999
+ num-of-onos: '3'
+ num-of-atomix: '3'
# flex pod1 test job - uses tech profile on voltha branch
- 'build_voltha_pod_test':
@@ -91,11 +88,11 @@
profile: 'Default'
# flex OCP pod with olt/onu - Released versions Default tech profile and timer based job
- - 'build_voltha_pod_release_timer':
+ - 'build_voltha_pod_release_legacy':
build-node: 'qa-testvm-pod'
config-pod: 'flex-ocp-cord'
- release: '2.6'
- branch: 'voltha-2.6'
+ release: '2.7'
+ branch: 'voltha-2.7'
num-of-openonu: '1'
num-of-onos: '3'
num-of-atomix: '3'
@@ -104,25 +101,24 @@
configurePod: true
profile: '1T4GEM'
time: '1'
+ VolthaEtcdPort: 9999
# flex pod1 test job - released versions: uses tech profile on voltha branch
- 'build_voltha_pod_test':
build-node: 'qa-testvm-pod'
config-pod: 'flex-ocp-cord'
- 'disable-job': false
- release: '2.6'
- branch: 'voltha-2.6'
+ release: '2.7'
+ branch: 'voltha-2.7'
power-switch: True
test-repo: 'voltha-system-tests'
profile: '1T4GEM'
-
# flex OCP pod with olt/onu - Released versions Default tech profile and timer based job
- - 'build_voltha_pod_release_timer':
+ - 'build_voltha_pod_release_legacy':
build-node: 'qa-testvm-pod'
config-pod: 'flex-ocp-cord'
- release: '2.6'
- branch: 'voltha-2.6'
+ release: '2.7'
+ branch: 'voltha-2.7'
num-of-openonu: '1'
num-of-onos: '3'
num-of-atomix: '3'
@@ -133,13 +129,14 @@
configurePod: true
profile: 'TP'
time: '22'
+ VolthaEtcdPort: 9999
# flex pod1 test job - released versions: uses tech profile on voltha branch
- 'build_voltha_pod_test':
build-node: 'qa-testvm-pod'
config-pod: 'flex-ocp-cord'
- release: '2.6'
- branch: 'voltha-2.6'
+ release: '2.7'
+ branch: 'voltha-2.7'
name-extension: '_TT'
work-flow: 'TT'
power-switch: True
@@ -153,16 +150,13 @@
config-pod: 'flex-ocp-cord'
release: 'master'
branch: 'master'
- num-of-openonu: '1'
- num-of-onos: '3'
- num-of-atomix: '3'
name-extension: '_TT'
work-flow: 'TT'
- test-repo: 'voltha-system-tests'
- Jenkinsfile: 'Jenkinsfile-voltha-build'
- configurePod: true
profile: 'TP'
time: '9'
+ VolthaEtcdPort: 9999
+ num-of-onos: '3'
+ num-of-atomix: '3'
# flex pod1 test job - released versions: uses tech profile on voltha branch
- 'build_voltha_pod_test':
@@ -177,79 +171,13 @@
test-repo: 'voltha-system-tests'
profile: 'TP'
-
- # Flex pod with xgs-pon olt/onu - master 1T4GEM tech profile and openonu go and timer based job
- - 'build_voltha_pod_release_timer':
- build-node: 'qa-testvm-pod'
- config-pod: 'flex-ocp-cord-openonupy'
- release: '2.6'
- branch: 'voltha-2.6'
- num-of-onos: '3'
- num-of-atomix: '3'
- name-extension: '_openonupy'
- test-repo: 'voltha-system-tests'
- Jenkinsfile: 'Jenkinsfile-voltha-build'
- configurePod: true
- profile: '1T4GEM'
- time: '18'
-
-
- # Flex POD test job - master versions, uses 1T4GEM tech profile and openonu go on voltha branch
- - 'build_voltha_pod_test':
- build-node: 'qa-testvm-pod'
- config-pod: 'flex-ocp-cord-openonupy'
- name-extension: '_openonupy'
- release: '2.6'
- branch: 'voltha-2.6'
- test-repo: 'voltha-system-tests'
- profile: '1T4GEM'
- power-switch: True
-
- # Flex pod with xgs-pon olt/onu - master TT workflow openonu go and timer based job
- - 'build_voltha_pod_release_timer':
- build-node: 'qa-testvm-pod'
- config-pod: 'flex-ocp-cord-openonupy'
- release: '2.6'
- branch: 'voltha-2.6'
- num-of-onos: '3'
- num-of-atomix: '3'
- name-extension: '_TT_openonupy'
- work-flow: 'TT'
- test-repo: 'voltha-system-tests'
- Jenkinsfile: 'Jenkinsfile-voltha-build'
- configurePod: true
- profile: 'TP'
- time: '14'
-
-
- # Flex POD test job - master versions, TT workflow and openonu go on voltha branch
- - 'build_voltha_pod_test':
- build-node: 'qa-testvm-pod'
- config-pod: 'flex-ocp-cord-openonupy'
- name-extension: '_TT_openonupy'
- release: '2.6'
- branch: 'voltha-2.6'
- test-repo: 'voltha-system-tests'
- work-flow: 'TT'
- pipeline-script: 'voltha-tt-physical-functional-tests.groovy'
- profile: 'TP'
- power-switch: True
-
# Menlo pod with olt/onu - 1T4GEM tech profile and timer based job
- 'build_voltha_pod_release_timer':
build-node: 'menlo-demo-pod'
config-pod: 'onf-demo-pod'
- release: 'master'
- branch: 'master'
- num-of-openonu: '1'
- num-of-onos: '3'
- num-of-atomix: '3'
- test-repo: 'voltha-system-tests'
- Jenkinsfile: 'Jenkinsfile-voltha-build'
- configurePod: true
profile: '1T4GEM'
in-band-management: true
- VolthaEtcdPort: '9999'
+ VolthaEtcdPort: 9999
time: '1'
# Menlo pod test job - master test job uses tech profile on voltha branch
@@ -266,18 +194,10 @@
- 'build_voltha_pod_release_timer':
build-node: 'menlo-demo-pod'
config-pod: 'onf-demo-pod'
- release: 'master'
- branch: 'master'
- num-of-openonu: '1'
- num-of-onos: '3'
- num-of-atomix: '3'
name-extension: '_DT'
work-flow: 'DT'
- test-repo: 'voltha-system-tests'
- Jenkinsfile: 'Jenkinsfile-voltha-build'
- configurePod: true
profile: '1T8GEM'
- VolthaEtcdPort: '9999'
+ VolthaEtcdPort: 9999
in-band-management: true
time: '4'
@@ -294,13 +214,12 @@
pipeline-script: 'voltha-dt-physical-functional-tests.groovy'
power-switch: True
-
- # Menlo pod with olt/onu - voltha-2.5 branch, Default tech profile and timer based job
- - 'build_voltha_pod_release_timer':
+ # Menlo pod with olt/onu - released branch, Default tech profile and timer based job
+ - 'build_voltha_pod_release_legacy':
build-node: 'menlo-demo-pod'
config-pod: 'onf-demo-pod'
- release: '2.6'
- branch: 'voltha-2.6'
+ release: '2.7'
+ branch: 'voltha-2.7'
name-extension: '_DT'
work-flow: 'DT'
num-of-openonu: '1'
@@ -310,7 +229,7 @@
Jenkinsfile: 'Jenkinsfile-voltha-build'
configurePod: true
profile: '1T8GEM'
- VolthaEtcdPort: '9999'
+ VolthaEtcdPort: 9999
in-band-management: true
time: '7'
@@ -318,8 +237,8 @@
- 'build_voltha_pod_test':
build-node: 'menlo-demo-pod'
config-pod: 'onf-demo-pod'
- release: '2.6'
- branch: 'voltha-2.6'
+ release: '2.7'
+ branch: 'voltha-2.7'
name-extension: '_DT'
work-flow: 'DT'
test-repo: 'voltha-system-tests'
@@ -341,6 +260,7 @@
num-of-onos: '3'
num-of-atomix: '3'
in-band-management: true
+
- 'build_pod_test':
build-node: 'menlo-demo-pod'
config-pod: 'onf-demo-pod'
@@ -373,41 +293,14 @@
test-repo: 'voltha-system-tests'
Jenkinsfile: 'Jenkinsfile-voltha-test'
-
- # Soak pod with xgs-pon olt/onu - master 1T4GEM tech profile and openonu go and timer based job
- - 'build_voltha_pod_release_timer':
- build-node: 'menlo-soak-pod'
- config-pod: 'onf-soak-pod-openonugo'
- 'disable-job': true
- release: 'master'
- branch: 'master'
- name-extension: '_openonugo'
- test-repo: 'voltha-system-tests'
- Jenkinsfile: 'Jenkinsfile-voltha-build'
- configurePod: true
- profile: '1T4GEM'
- time: '1'
-
-
- # Soak POD test job - master versions, uses 1T4GEM tech profile and openonu go on voltha branch
- - 'build_voltha_pod_test':
- build-node: 'menlo-soak-pod'
- config-pod: 'onf-soak-pod-openonugo'
- 'disable-job': true
- name-extension: '_openonugo'
- release: 'master'
- branch: 'master'
- test-repo: 'voltha-system-tests'
- profile: '1T4GEM'
-
# Menlo pod with olt/onu - Default tech profile and timer based job
# ONF Menlo Soak POD build job - voltha-master branch
# FIXME once the soak-pod is back use 'build_voltha_pod_release_timer'
- 'build_pod_manual':
build-node: 'menlo-soak-pod'
config-pod: 'onf-soak-pod'
- 'disable-job': true
release: 'master'
+ installBBSim: true
branch: 'master'
profile: '1T8GEM'
test-repo: 'voltha-system-tests'
@@ -416,6 +309,8 @@
reinstall-olt: false
Jenkinsfile: 'Jenkinsfile-voltha-build'
configurePod: true
+ num-of-onus: 16
+ num-of-ponports: 8
# ONF Menlo Soak POD test job - voltha-master branch
# FIXME once the soak-pod is back use 'build_voltha_pod_test'
@@ -433,7 +328,7 @@
- 'build_voltha_pod_soak_test':
build-node: 'menlo-soak-pod'
config-pod: 'onf-soak-pod'
- 'disable-job': true
+ disable-job: true
profile: '1T8GEM'
branch: 'master'
release: 'master'
@@ -476,28 +371,47 @@
pipeline-script: 'voltha-physical-soak-dt-tests.groovy'
time-trigger: "H H */3 * *"
- # Berlin pod with olt/onu - master versions timer based job , two OLTs
+ # Certification (Radisys) pod with olt/onu - master versions timer based job , two OLTs
- 'build_voltha_pod_release_timer':
- build-node: 'dt-berlin-community-pod'
- config-pod: 'dt-berlin-pod-multi-olt'
+ build-node: 'menlo-certification-pod'
+ config-pod: 'menlo-certification-pod-radisys-gpon'
+ disable-job: false
+ reinstall-olt: false
+ name-extension: '_DT'
+ work-flow: 'DT'
+ profile: '1T8GEM-unencrypted'
+ time: '1'
+
+ # Certification (Radisys) POD test job - master versions: uses tech profile on voltha branch
+ - 'build_voltha_pod_test':
+ build-node: 'menlo-certification-pod'
+ config-pod: 'menlo-certification-pod-radisys-gpon'
+ disable-job: false
release: 'master'
branch: 'master'
name-extension: '_DT'
work-flow: 'DT'
- num-of-openonu: '1'
- num-of-onos: '3'
- num-of-atomix: '3'
test-repo: 'voltha-system-tests'
- Jenkinsfile: 'Jenkinsfile-voltha-build'
- configurePod: true
- profile: '1T8GEM'
- time: '21'
+ profile: '1T8GEM-unencrypted'
+ power-switch: false
+ pipeline-script: 'voltha-dt-physical-functional-tests.groovy'
+ # Berlin pod with olt/onu - master versions timer based job , two OLTs
+ - 'build_voltha_pod_release_timer':
+ build-node: 'dt-berlin-community-pod'
+ config-pod: 'dt-berlin-pod-multi-olt'
+ disable-job: true
+ name-extension: '_DT'
+ work-flow: 'DT'
+ profile: '1T8GEM'
+ # Update this value accordingly once the job is enabled
+ time: ''
# Berlin POD test job - master versions: uses tech profile on voltha branch
- 'build_voltha_pod_test':
build-node: 'dt-berlin-community-pod'
config-pod: 'dt-berlin-pod-multi-olt'
+ disable-job: true
release: 'master'
branch: 'master'
name-extension: '_DT'
@@ -507,12 +421,12 @@
power-switch: True
pipeline-script: 'voltha-dt-physical-functional-tests.groovy'
- # Berlin pod with olt/onu - voltha-2.6 timer based job , two OLTs
- - 'build_voltha_pod_release_timer':
+ # Berlin pod with olt/onu - voltha-2.7 timer based job , two OLTs
+ - 'build_voltha_pod_release_legacy':
build-node: 'dt-berlin-community-pod'
config-pod: 'dt-berlin-pod-multi-olt'
- release: '2.6'
- branch: 'voltha-2.6'
+ release: '2.7'
+ branch: 'voltha-2.7'
name-extension: '_DT'
work-flow: 'DT'
num-of-openonu: '1'
@@ -522,15 +436,14 @@
Jenkinsfile: 'Jenkinsfile-voltha-build'
configurePod: true
profile: '1T8GEM'
- time: '13'
+ time: '18'
-
- # Berlin POD test job - voltha-2.6 versions: two OLTs
+ # Berlin POD test job - voltha-2.7 versions: two OLTs
- 'build_voltha_pod_test':
build-node: 'dt-berlin-community-pod'
config-pod: 'dt-berlin-pod-multi-olt'
- release: '2.6'
- branch: 'voltha-2.6'
+ release: '2.7'
+ branch: 'voltha-2.7'
name-extension: '_DT'
work-flow: 'DT'
test-repo: 'voltha-system-tests'
@@ -538,12 +451,12 @@
power-switch: True
pipeline-script: 'voltha-dt-physical-functional-tests.groovy'
- # Berlin pod with olt/onu - voltha-2.6 Default tech profile and timer based job
- - 'build_voltha_pod_release_timer':
+ # Berlin pod with olt/onu - voltha-2.7 Default tech profile and timer based job
+ - 'build_voltha_pod_release_legacy':
build-node: 'dt-berlin-community-pod'
config-pod: 'dt-berlin-pod'
- release: '2.6'
- branch: 'voltha-2.6'
+ release: '2.7'
+ branch: 'voltha-2.7'
num-of-openonu: '1'
num-of-onos: '3'
num-of-atomix: '3'
@@ -552,15 +465,15 @@
configurePod: true
disable-job: true
profile: 'Default'
- time: '17'
+ # Update this value accordingly once the job is enabled
+ time: ''
-
- # Berlin POD test job - master versions: uses tech profile on voltha branch
+ # Berlin POD test job - released versions: uses tech profile on voltha branch
- 'build_voltha_pod_test':
build-node: 'dt-berlin-community-pod'
config-pod: 'dt-berlin-pod'
- release: '2.6'
- branch: 'voltha-2.6'
+ release: '2.7'
+ branch: 'voltha-2.7'
test-repo: 'voltha-system-tests'
profile: 'Default'
power-switch: True
@@ -569,20 +482,11 @@
- 'build_voltha_pod_release_timer':
build-node: 'dt-berlin-community-pod'
config-pod: 'dt-berlin-pod-gpon'
- release: 'master'
- branch: 'master'
- num-of-openonu: '1'
- num-of-onos: '3'
- num-of-atomix: '3'
name-extension: '_DT'
work-flow: 'DT'
- test-repo: 'voltha-system-tests'
- Jenkinsfile: 'Jenkinsfile-voltha-build'
- configurePod: true
profile: '1T8GEM'
time: '1'
-
# Berlin POD test job - master versions: uses 1T8GEM tech profile on voltha branch
- 'build_voltha_pod_test':
build-node: 'dt-berlin-community-pod'
@@ -596,12 +500,12 @@
power-switch: True
pipeline-script: 'voltha-dt-physical-functional-tests.groovy'
- # Berlin pod with adtran gpon olt/onu - master 1T8GEM tech profile and timer based job
- - 'build_voltha_pod_release_timer':
+ # Berlin pod with gpon olt/onu - released 1T8GEM tech profile and timer based job
+ - 'build_voltha_pod_release_legacy':
build-node: 'dt-berlin-community-pod'
- config-pod: 'dt-berlin-pod-gpon-adtran'
- release: '2.6'
- branch: 'voltha-2.6'
+ config-pod: 'dt-berlin-pod-gpon'
+ release: '2.7'
+ branch: 'voltha-2.7'
num-of-openonu: '1'
num-of-onos: '3'
num-of-atomix: '3'
@@ -611,51 +515,16 @@
Jenkinsfile: 'Jenkinsfile-voltha-build'
configurePod: true
profile: '1T8GEM'
- reinstall-olt: false
- openoltAdapterChart: '/home/community/adtran-2021-01-29/voltha-adapter-adtran-olt'
- time: '9'
+ time: '13'
-
- # Berlin POD adtran test job - master versions: uses 1T8GEM tech profile on voltha branch
- - 'build_voltha_pod_test':
- build-node: 'dt-berlin-community-pod'
- config-pod: 'dt-berlin-pod-gpon-adtran'
- name-extension: '_DT'
- work-flow: 'DT'
- release: '2.6'
- branch: 'voltha-2.6'
- test-repo: 'voltha-system-tests'
- profile: '1T8GEM'
- power-switch: False
- oltAdapterAppLabel: 'adapter-adtran-olt'
- pipeline-script: 'voltha-dt-physical-functional-tests.groovy'
-
- # Berlin pod with gpon olt/onu - voltha-2.5 1T8GEM tech profile and timer based job
- - 'build_voltha_pod_release_timer':
- build-node: 'dt-berlin-community-pod'
- config-pod: 'dt-berlin-pod-gpon'
- release: '2.6'
- branch: 'voltha-2.6'
- num-of-openonu: '1'
- num-of-onos: '3'
- num-of-atomix: '3'
- name-extension: '_DT'
- work-flow: 'DT'
- test-repo: 'voltha-system-tests'
- Jenkinsfile: 'Jenkinsfile-voltha-build'
- configurePod: true
- profile: '1T8GEM'
- time: '5'
-
-
- # Berlin POD test job - voltha-2.5 versions: uses 1T8GEM tech profile on voltha branch
+ # Berlin POD test job - released versions: uses 1T8GEM tech profile on voltha branch
- 'build_voltha_pod_test':
build-node: 'dt-berlin-community-pod'
config-pod: 'dt-berlin-pod-gpon'
name-extension: '_DT'
work-flow: 'DT'
- release: '2.6'
- branch: 'voltha-2.6'
+ release: '2.7'
+ branch: 'voltha-2.7'
test-repo: 'voltha-system-tests'
profile: '1T8GEM'
power-switch: True
@@ -666,18 +535,12 @@
build-node: 'dt-berlin-community-pod'
config-pod: 'dt-berlin-pod-openonugo'
'disable-job': true
- release: 'master'
- branch: 'master'
- num-of-onos: '3'
- num-of-atomix: '3'
name-extension: '_DT_openonugo'
work-flow: 'DT'
- test-repo: 'voltha-system-tests'
- Jenkinsfile: 'Jenkinsfile-voltha-build'
configurePod: true
profile: '1T8GEM'
- time: '9'
-
+ # Update this value accordingly once the job is enabled
+ time: ''
# Berlin POD test job - master versions, uses 1T8GEM tech profile and openonu go on voltha branch
- 'build_voltha_pod_test':
@@ -712,3 +575,32 @@
branch: 'master'
test-repo: 'voltha-system-tests'
Jenkinsfile: 'Jenkinsfile-voltha-test'
+
+ # Berlin pod with adtran gpon olt/onu - master 1T8GEM tech profile and timer based job
+ - 'build_voltha_pod_release_timer':
+ build-node: 'dt-berlin-community-pod'
+ config-pod: 'dt-berlin-pod-gpon-adtran'
+ release: '2.6'
+ branch: 'voltha-2.6'
+ VolthaEtcdPort: 9999
+ name-extension: '_DT'
+ work-flow: 'DT'
+ profile: '1T8GEM'
+ reinstall-olt: false
+ Jenkinsfile: 'Jenkinsfile-voltha-build' # we are cloning voltha-system-test@2.6 that still has it
+ openoltAdapterChart: '/home/community/adtran-2021-01-29/voltha-adapter-adtran-olt'
+ time: '7'
+
+ # Berlin POD adtran test job - master versions: uses 1T8GEM tech profile on voltha branch
+ - 'build_voltha_pod_test':
+ build-node: 'dt-berlin-community-pod'
+ config-pod: 'dt-berlin-pod-gpon-adtran'
+ name-extension: '_DT'
+ work-flow: 'DT'
+ release: '2.6'
+ branch: 'voltha-2.6'
+ test-repo: 'voltha-system-tests'
+ profile: '1T8GEM'
+ power-switch: True
+ oltAdapterAppLabel: 'adapter-adtran-olt'
+ pipeline-script: 'voltha-dt-physical-functional-tests.groovy'
diff --git a/jjb/voltha-unit-test.yaml b/jjb/voltha-unit-test.yaml
index c2132fa..f4225a3 100644
--- a/jjb/voltha-unit-test.yaml
+++ b/jjb/voltha-unit-test.yaml
@@ -37,7 +37,7 @@
choosing-strategy: gerrit
jenkins-ssh-credential: '{jenkins-ssh-credential}'
- node: 'ubuntu16.04-basebuild-4c-8g'
+ node: 'ubuntu18.04-basebuild-4c-8g'
project-type: freestyle
concurrent: true
diff --git a/jjb/xos-auth.yaml b/jjb/xos-auth.yaml
index 6b035f9..63a81a1 100644
--- a/jjb/xos-auth.yaml
+++ b/jjb/xos-auth.yaml
@@ -32,7 +32,7 @@
branch: '$GERRIT_BRANCH'
destination-dir: 'cord'
- node: 'ubuntu16.04-basebuild-4c-8g'
+ node: 'ubuntu18.04-basebuild-4c-8g'
project-type: freestyle
concurrent: true
diff --git a/jjb/xos-integration-tests.yaml b/jjb/xos-integration-tests.yaml
index c894947..b1cd814 100644
--- a/jjb/xos-integration-tests.yaml
+++ b/jjb/xos-integration-tests.yaml
@@ -33,7 +33,7 @@
parameters:
- string:
name: buildNode
- default: 'ubuntu16.04-basebuild-4c-8g'
+ default: 'ubuntu18.04-basebuild-4c-8g'
description: 'Name of the Jenkins node to run the job on'
- string:
@@ -92,7 +92,7 @@
parameters:
- string:
name: buildNode
- default: 'ubuntu16.04-basebuild-4c-8g'
+ default: 'ubuntu18.04-basebuild-4c-8g'
description: 'Name of the Jenkins node to run the job on'
- string:
@@ -151,7 +151,7 @@
parameters:
- string:
name: buildNode
- default: 'ubuntu16.04-basebuild-4c-8g'
+ default: 'ubuntu18.04-basebuild-4c-8g'
description: 'Name of the Jenkins node to run the job on'
- string:
@@ -210,7 +210,7 @@
parameters:
- string:
name: buildNode
- default: 'ubuntu16.04-basebuild-4c-8g'
+ default: 'ubuntu18.04-basebuild-4c-8g'
description: 'Name of the Jenkins node to run the job on'
- string:
diff --git a/jjb/xos-rest-gw-unit.yaml b/jjb/xos-rest-gw-unit.yaml
index 7b12ffe..5eac9ad 100644
--- a/jjb/xos-rest-gw-unit.yaml
+++ b/jjb/xos-rest-gw-unit.yaml
@@ -40,7 +40,7 @@
choosing-strategy: gerrit
jenkins-ssh-credential: '{jenkins-ssh-credential}'
- node: 'ubuntu16.04-basebuild-1c-2g'
+ node: 'ubuntu18.04-basebuild-1c-2g'
project-type: freestyle
concurrent: true
diff --git a/jjb/xos-synchronizer-update.yaml b/jjb/xos-synchronizer-update.yaml
index 2f49512..3ac10c9 100644
--- a/jjb/xos-synchronizer-update.yaml
+++ b/jjb/xos-synchronizer-update.yaml
@@ -28,7 +28,7 @@
parameters:
- string:
name: buildNode
- default: 'ubuntu16.04-basebuild-4c-8g'
+ default: 'ubuntu18.04-basebuild-4c-8g'
description: 'Name of the Jenkins node to run the job on'
- string:
diff --git a/jjb/xos-unit.yaml b/jjb/xos-unit.yaml
index 89fd6c1..12190b5 100644
--- a/jjb/xos-unit.yaml
+++ b/jjb/xos-unit.yaml
@@ -32,7 +32,7 @@
branch: 'master'
destination-dir: 'cord'
- node: 'ubuntu16.04-basebuild-1c-2g'
+ node: 'ubuntu18.04-basebuild-1c-2g'
project-type: freestyle
concurrent: true
diff --git a/jjb/xos-upgrade.yaml b/jjb/xos-upgrade.yaml
index 506d5dd..4889246 100644
--- a/jjb/xos-upgrade.yaml
+++ b/jjb/xos-upgrade.yaml
@@ -29,7 +29,7 @@
parameters:
- string:
name: buildNode
- default: 'ubuntu16.04-basebuild-4c-8g'
+ default: 'ubuntu18.04-basebuild-4c-8g'
description: 'Name of the Jenkins node to run the job on'
- string:
diff --git a/packer/provision/baseline.yaml b/packer/provision/baseline.yaml
new file mode 100644
index 0000000..2c43ecf
--- /dev/null
+++ b/packer/provision/baseline.yaml
@@ -0,0 +1,88 @@
+---
+- hosts: all
+ become_user: root
+ become_method: sudo
+
+ pre_tasks:
+ - include_role: name=lfit.system-update
+
+ - name: Install base packages
+ include_tasks: "{{item}}"
+ with_first_found:
+ - "install-base-pkgs-{{ansible_distribution}}.yaml"
+ - "install-base-pkgs-{{ansible_os_family}}.yaml"
+
+ - name: Allow jenkins user sudo access
+ copy:
+ dest: /etc/sudoers.d/89-jenkins-user-defaults
+ content: |
+ Defaults:jenkins !requiretty
+ jenkins ALL = NOPASSWD: /usr/sbin/update-alternatives, /usr/sbin/update-java-alternatives
+ validate: /usr/sbin/visudo -cf %s
+ become: yes
+
+ roles:
+ - lfit.lf-recommended-tools
+ - lfit.lf-dev-libs
+ - lfit.haveged-install
+ - lfit.java-install
+ - lfit.python-install
+ - lfit.shellcheck-install
+ - lfit.sysstat-install
+
+ post_tasks:
+ - name: Update /etc/nss-switch.conf to map hostname with IP
+ # Update /etc/nss-switch.conf to map hostname with IP instead of using `localhost`
+ # from /etc/hosts which is required by some of the Java API's to avoid
+ # Java UnknownHostException: "Name or service not known" error.
+ replace:
+ path: /etc/nsswitch.conf
+ regexp: '^hosts:(\s+.*)?$'
+ replace: 'hosts:\1 myhostname'
+ backup: yes
+ become: yes
+
+ - name: Disable periodic updates
+ block:
+ - name: Set all periodic update options to 0
+ replace:
+ path: /etc/apt/apt.conf.d/10periodic
+ regexp: "1"
+ replace: "0"
+ - name: Set all auto update options to 0
+ replace:
+ path: /etc/apt/apt.conf.d/20auto-upgrades
+ regexp: "1"
+ replace: "0"
+ - name: Disable unattended upgrades
+ lineinfile:
+ path: /etc/apt/apt.conf.d/10periodic
+ regexp: "^APT::Periodic::Unattended-Upgrade"
+ line: 'APT::Periodic::Unattended-Upgrade "0";'
+ create: yes
+ - name: Uninstall unattended upgrades
+ apt:
+ name: unattended-upgrades
+ state: absent
+ - name: Prevent unattended upgrades from being installed
+ dpkg_selections:
+ name: unattended-upgrades
+ selection: hold
+ - name: Disable apt-daily.* systemd services
+ systemd:
+ name: "{{service}}"
+ enabled: no
+ masked: yes
+ with_items:
+ - apt-daily.service
+ - apt-daily.timer
+ - apt-daily-upgrade.service
+ - apt-daily-upgrade.timer
+ loop_control:
+ loop_var: service
+ when: ansible_distribution == 'Ubuntu'
+ become: yes
+
+ - name: System Reseal
+ script: system-reseal.sh
+ become: true
diff --git a/packer/provision/install-base-pkgs-Ubuntu.yaml b/packer/provision/install-base-pkgs-Ubuntu.yaml
new file mode 100644
index 0000000..2ab6308
--- /dev/null
+++ b/packer/provision/install-base-pkgs-Ubuntu.yaml
@@ -0,0 +1,15 @@
+# SPDX-FileCopyrightText: 2020 Open Networking Foundation <info@opennetworking.org>
+#
+# SPDX-License-Identifier: LicenseRef-ONF-Member-Only-1.0
+
+---
+- name: Install base packages
+ apt:
+ name:
+ - build-essential
+ - devscripts
+ - dh-systemd
+ - equivs
+ - gdebi
+ state: latest
+ become: yes
diff --git a/packer/provision/local-builder.yaml b/packer/provision/local-builder.yaml
new file mode 100644
index 0000000..8315fa0
--- /dev/null
+++ b/packer/provision/local-builder.yaml
@@ -0,0 +1,6 @@
+# SPDX-FileCopyrightText: 2020 Open Networking Foundation <info@opennetworking.org>
+#
+# SPDX-License-Identifier: LicenseRef-ONF-Member-Only-1.0
+
+---
+- import_playbook: ../provision/baseline.yaml
diff --git a/packer/provision/onf-playbook.yaml b/packer/provision/onf-playbook.yaml
new file mode 100644
index 0000000..a30a228
--- /dev/null
+++ b/packer/provision/onf-playbook.yaml
@@ -0,0 +1,262 @@
+---
+# SPDX-FileCopyrightText: 2020 Open Networking Foundation <info@opennetworking.org>
+# SPDX-License-Identifier: LicenseRef-ONF-Member-Only-1.0
+
+- name: "Provision Packages for Jenkins image"
+ hosts: default
+ become: true
+
+ tasks:
+ - name: Add Java Amazon Corretto Jdk repo GPG key
+ apt_key:
+ url: https://apt.corretto.aws/corretto.key
+ state: present
+
+ - name: Add Java Amazon Corretto Jdk repo
+ apt_repository:
+ repo: deb https://apt.corretto.aws stable main
+ state: present
+
+ - name: Install apt packages
+ apt:
+ name:
+ - "facter"
+ - "libxml2-utils"
+ - "bzip2"
+ - "curl"
+ - "ebtables"
+ - "enchant"
+ - "ethtool"
+ - "git"
+ - "graphviz"
+ - "jq"
+ - "kafkacat"
+ - "less"
+ - "libpcap-dev"
+ - "libxml2-utils"
+ - "maven"
+ - "ruby"
+ - "screen"
+ - "socat"
+ - "ssh"
+ - "sshpass"
+ - "zip"
+ # below four packages are required by npm
+ - "nodejs"
+ - "libssl1.0-dev"
+ - "nodejs-dev"
+ - "node-gyp"
+ - "npm"
+ - "java-1.8.0-amazon-corretto-jdk"
+ - "java-11-amazon-corretto-jdk"
+ state: "present"
+ update_cache: true
+ cache_valid_time: 3600
+
+ - name: Download repo launcher
+ get_url:
+ url: "https://gerrit.googlesource.com/git-repo/+/refs/tags/v2.12.2/repo?format=TEXT"
+ checksum: "sha256:f5afffcc9afae128efd2b325ff19544a0b78acceb33a2edf368ce2de94e8c33e"
+ dest: /tmp/repo.b64
+
+ - name: Decode and make repo launcher executable
+ shell:
+ cmd: |
+ base64 --decode /tmp/repo.b64 > /usr/local/bin/repo;
+ chmod 755 /usr/local/bin/repo
+ creates: /usr/local/bin/repo
+
+ - name: Download helm archive
+ get_url:
+ url: "https://get.helm.sh/helm-v3.5.2-linux-amd64.tar.gz"
+ checksum: "sha256:01b317c506f8b6ad60b11b1dc3f093276bb703281cb1ae01132752253ec706a2"
+ dest: "/tmp/helm.tgz"
+
+ - name: Unarchive helm
+ unarchive:
+ src: "/tmp/helm.tgz"
+ dest: "/tmp"
+ remote_src: yes
+
+ - name: Install helm binary
+ copy:
+ src: /tmp/linux-amd64/helm
+ dest: /usr/local/bin/helm
+ mode: "0755"
+ remote_src: yes
+
+ - name: Download/install kubectl binary
+ get_url:
+ url: "https://storage.googleapis.com/kubernetes-release/release/v1.18.15/bin/linux/amd64/kubectl"
+ checksum: "sha256:eb5a5dd0a72795942ab81d1e4331625e80a90002c8bb39b2cb15aa707a3812c6"
+ dest: /usr/local/bin/kubectl
+ mode: "0755"
+
+ - name: load /etc/docker/daemon.json from file
+ slurp:
+ src: /etc/docker/daemon.json
+ register: imported_var
+
+ - name: append more key/values
+ set_fact:
+ imported_var: "{{ imported_var.content|b64decode|from_json | default([]) | combine({ 'registry-mirrors': ['https://mirror.registry.opennetworking.org'] }) }}"
+
+ - name: write var to file
+ copy:
+ content: "{{ imported_var | to_nice_json }}"
+ dest: /etc/docker/daemon.json
+
+ - name: restart Docker service
+ systemd:
+ name: docker
+ state: restarted
+ daemon_reload: true
+
+ - name: Install multi python3 packages with version specifiers
+ pip:
+ name:
+ - ansible
+ - ansible-lint
+ - docker
+ - docker-compose
+ - git-review
+ - httpie
+ - netaddr
+ - pylint
+ - tox
+ - twine
+ - virtualenv
+ - yamllint
+ executable: pip3
+ - name: Install multi python2 packages with version specifiers
+ pip:
+ name:
+ - Jinja2
+ - coverage
+ - certifi
+ - cryptography
+ - git+https://github.com/linkchecker/linkchecker.git@v9.4.0
+ - graphviz
+ - isort
+ - more-itertools==5.0.0
+ - mock>2.0.0<2.1.0
+ - ndg-httpsclient
+ - nose2>0.9.0<0.10.0
+ - pyopenssl
+ - pexpect
+ - pyyaml>3.10.0<3.11.0
+ - requests>2.14.0<2.15.0
+ - robotframework
+ - robotframework-httplibrary
+ - robotframework-kafkalibrary
+ - robotframework-lint
+ - robotframework-requests
+ - robotframework-sshlibrary
+ - six
+ - urllib3
+
+ - name: Install multi ruby packages with version specifiers
+ gem:
+ name: mdl
+ version: 0.5.0
+
+ - name: Install gitbook-cli npm package with version specifiers
+ npm:
+ name: gitbook-cli
+ global: true
+
+ - name: Install markdownlint npm package with version specifiers
+ npm:
+ name: markdownlint
+ global: true
+
+ - name: Install typings npm package with version specifiers
+ npm:
+ name: typings
+ global: true
+
+ - name: Download minikube
+ get_url:
+ url: "https://storage.googleapis.com/minikube/releases/latest/minikube_1.18.0-0_amd64.deb"
+ checksum: "sha256:6e3918b601704014f3d0b0a09e3116f1ea528ac255525743a800b5f0b5856622"
+ dest: /tmp/minikube.deb
+
+ - name: Install minikube deb
+ apt:
+ deb: /tmp/minikube.deb
+
+ - name: Download protobuf
+ get_url:
+ url: "https://github.com/google/protobuf/releases/download/v3.7.0/protoc-3.7.0-linux-x86_64.zip"
+ checksum: "sha256:a1b8ed22d6dc53c5b8680a6f1760a305b33ef471bece482e92728f00ba2a2969"
+ dest: /tmp/protobuf.zip
+
+ - name: Unarchive protobuf
+ unarchive:
+ src: "/tmp/protobuf.zip"
+ dest: "/usr/local"
+ remote_src: yes
+
+ - name: Download pandoc
+ get_url:
+ url: "https://github.com/jgm/pandoc/releases/download/2.10.1/pandoc-2.10.1-1-amd64.deb"
+ checksum: "sha256:4515d6fe2bf8b82765d8dfa1e1b63ccb0ff3332d60389f948672eaa37932e936"
+ dest: /tmp/pandoc.deb
+
+ - name: Install pandoc deb
+ apt:
+ deb: /tmp/pandoc.deb
+
+ - name: Download yq
+ get_url:
+ url: "https://github.com/mikefarah/yq/releases/download/3.4.0/yq_linux_amd64"
+ checksum: "sha256:f6bd1536a743ab170b35c94ed4c7c4479763356bd543af5d391122f4af852460"
+ dest: /usr/local/bin/yq
+
+ - name: Change yq Permission
+ file:
+ path: /usr/local/bin/yq
+ mode: 0755
+
+ - name: Download hadolint
+ get_url:
+ url: "https://github.com/hadolint/hadolint/releases/download/v1.18.0/hadolint-Linux-x86_64"
+ checksum: "sha256:f9bc9de12438b463ca84e77fde70b07b155d4da07ca21bc3f4354a62c6199db4"
+ dest: /usr/local/bin/hadolint
+
+ - name: Change hadolint Permission
+ file:
+ path: /usr/local/bin/hadolint
+ mode: 0755
+
+ - name: Download github-release
+ get_url:
+ url: "https://github.com/github-release/github-release/releases/download/v0.10.0/linux-amd64-github-release.bz2"
+ checksum: "sha256:b360af98188c5988314d672bb604efd1e99daae3abfb64d04051ee17c77f84b6"
+ dest: /tmp/github-release.bz2
+
+
+ # Unarchive target doesn't support the bz2 format
+ - name: Unarchive github-release
+ shell:
+ cmd: |
+ bzip2 -d /tmp/github-release.bz2
+
+ - name: Install github-release binary
+ copy:
+ src: /tmp/github-release
+ dest: /usr/local/bin/github-release
+ mode: "0755"
+ remote_src: yes
+
+ - name: Recursively remove download files and folders
+ file:
+ path: "{{ item }}"
+ state: absent
+ with_items:
+ - /tmp/linux-amd64
+ - /tmp/helm.tgz
+ - /tmp/minikube.deb
+ - /tmp/protobuf.zip
+ - /tmp/pandoc.deb
+ - /tmp/repo.b64
diff --git a/packer/provision/system-reseal.sh b/packer/provision/system-reseal.sh
new file mode 100644
index 0000000..002edf5
--- /dev/null
+++ b/packer/provision/system-reseal.sh
@@ -0,0 +1,89 @@
+#!/bin/bash
+# SPDX-License-Identifier: EPL-1.0
+##############################################################################
+# Copyright (c) 2016 The Linux Foundation and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Eclipse Public License v1.0
+# which accompanies this distribution, and is available at
+# http://www.eclipse.org/legal/epl-v10.html
+##############################################################################
+# Cleans up VM as preparation for image snapshotting
+
+# vim: sw=2 ts=2 sts=2 et :
+
+rm -rf ~/.viminfo \
+ /etc/Pegasus/*.cnf \
+ /etc/Pegasus/*.crt \
+ /etc/Pegasus/*.csr \
+ /etc/Pegasus/*.pem \
+ /etc/Pegasus/*.srl \
+ /etc/ssh/ssh*key* \
+ /root/.ssh/* \
+ /root/anaconda-ks.cfg \
+ /root/anaconda-post.log \
+ /root/initial-setup-ks.cfg \
+ /root/install.log \
+ /root/install.log.syslog \
+ /tmp/packer \
+ /var/cache/fontconfig/* \
+ /var/cache/gdm/* \
+ /var/cache/man/* \
+ /var/lib/AccountService/users/* \
+ /var/lib/cloud/* \
+ /var/lib/fprint/* \
+ /var/lib/logrotate.status \
+ /var/log/*.log* \
+ /var/log/BackupPC/LOG \
+ /var/log/ConsoleKit/* \
+ /var/log/anaconda.syslog \
+ /var/log/anaconda/* \
+ /var/log/apache2/*_log \
+ /var/log/apache2/*_log-* \
+ /var/log/apt/* \
+ /var/log/aptitude* \
+ /var/log/audit/* \
+ /var/log/btmp* \
+ /var/log/ceph/*.log \
+ /var/log/chrony/*.log \
+ /var/log/cron* \
+ /var/log/cups/*_log \
+ /var/log/debug* \
+ /var/log/dmesg* \
+ /var/log/exim4/* \
+ /var/log/faillog* \
+ /var/log/gdm/* \
+ /var/log/glusterfs/*glusterd.vol.log \
+ /var/log/glusterfs/glusterfs.log \
+ /var/log/httpd/*log \
+ /var/log/installer/* \
+ /var/log/jetty/jetty-console.log \
+ /var/log/journal/* \
+ /var/log/lastlog* \
+ /var/log/libvirt/libvirtd.log \
+ /var/log/libvirt/lxc/*.log \
+ /var/log/libvirt/qemu/*.log \
+ /var/log/libvirt/uml/*.log \
+ /var/log/lightdm/* \
+ /var/log/mail/* \
+ /var/log/maillog* \
+ /var/log/messages* \
+ /var/log/ntp \
+ /var/log/ntpstats/* \
+ /var/log/ppp/connect-errors \
+ /var/log/rhsm/* \
+ /var/log/sa/* \
+ /var/log/secure* \
+ /var/log/setroubleshoot/*.log \
+ /var/log/spooler* \
+ /var/log/squid/*.log \
+ /var/log/syslog* \
+ /var/log/tallylog* \
+ /var/log/tuned/tuned.log \
+ /var/log/wtmp* \
+ /var/named/data/named.run
+
+# Force a system sync and sleep to get around any SSD issues
+echo "Forcing sync and sleep for 10sec"
+sync
+sleep 10
diff --git a/packer/templates/basebuild_1804.json b/packer/templates/basebuild_1804.json
new file mode 100644
index 0000000..0bd5455
--- /dev/null
+++ b/packer/templates/basebuild_1804.json
@@ -0,0 +1,97 @@
+{
+ "variables": {
+ "ansible_roles_path": ".galaxy",
+ "aws_access_key": null,
+ "aws_security_key": null,
+ "arch": "x86_64",
+ "cloud_user_data": null,
+ "distro": null,
+ "instance_type": "t2.medium",
+ "security_group_id": null,
+ "source_ami_filter_name": null,
+ "source_ami_filter_owner": null,
+ "source_ami_filter_product_code": "",
+ "vm_volume_size": "20",
+ "subnet_id": null,
+ "ssh_user": null
+ },
+ "builders": [
+ {
+ "name": "aws",
+ "access_key": "{{user `aws_access_key`}}",
+ "ami_name": "{{user `distro`}} - basebuild - {{user `arch`}} - {{isotime \"20060102-1504\"}}",
+ "instance_type": "{{user `instance_type`}}",
+ "region": "us-west-2",
+ "secret_key": "{{user `aws_security_key`}}",
+ "security_group_id": "{{user `security_group_id`}}",
+ "source_ami_filter": {
+ "filters": {
+ "name": "{{user `source_ami_filter_name`}}",
+ "product-code": "{{user `source_ami_filter_product_code`}}",
+ "architecture": "{{user `arch`}}",
+ "root-device-type": "ebs",
+ "virtualization-type": "hvm"
+ },
+ "most_recent": true,
+ "owners": ["{{user `source_ami_filter_owner`}}"]
+ },
+ "ssh_username": "{{user `ssh_user`}}",
+ "subnet_id": "{{user `subnet_id`}}",
+ "type": "amazon-ebs",
+ "launch_block_device_mappings": [
+ {
+ "device_name": "/dev/sda1",
+ "volume_size": "{{user `vm_volume_size`}}",
+ "volume_type": "gp2",
+ "delete_on_termination": true
+ }
+ ],
+ "user_data_file": "{{user `cloud_user_data`}}"
+ }
+ ],
+ "provisioners": [
+ {
+ "type": "shell-local",
+ "command": "./common-packer/ansible-galaxy.sh {{user `ansible_roles_path`}}"
+ },
+ {
+ "type": "ansible",
+ "user": "{{user `ssh_user`}}",
+ "playbook_file": "provision/local-builder.yaml",
+ "ansible_env_vars": [
+ "ANSIBLE_NOCOWS=1",
+ "ANSIBLE_PIPELINING=False",
+ "ANSIBLE_HOST_KEY_CHECKING=False",
+ "ANSIBLE_ROLES_PATH={{user `ansible_roles_path`}}",
+ "ANSIBLE_CALLBACK_WHITELIST=profile_tasks",
+ "ANSIBLE_STDOUT_CALLBACK=debug"
+ ]
+ },
+ {
+ "type": "ansible",
+ "user": "{{user `ssh_user`}}",
+ "playbook_file": "./common-packer/provision/local-docker.yaml",
+ "ansible_env_vars": [
+ "ANSIBLE_NOCOWS=1",
+ "ANSIBLE_PIPELINING=False",
+ "ANSIBLE_HOST_KEY_CHECKING=False",
+ "ANSIBLE_ROLES_PATH={{user `ansible_roles_path`}}",
+ "ANSIBLE_CALLBACK_WHITELIST=profile_tasks",
+ "ANSIBLE_STDOUT_CALLBACK=debug"
+ ]
+ },
+ {
+ "type": "ansible",
+ "user": "{{user `ssh_user`}}",
+ "playbook_file": "provision/onf-playbook.yaml",
+ "ansible_env_vars": [
+ "ANSIBLE_NOCOWS=1",
+ "ANSIBLE_PIPELINING=False",
+ "ANSIBLE_HOST_KEY_CHECKING=False",
+ "ANSIBLE_ROLES_PATH={{user `ansible_roles_path`}}",
+ "ANSIBLE_CALLBACK_WHITELIST=profile_tasks",
+ "ANSIBLE_STDOUT_CALLBACK=debug"
+ ]
+ }
+ ]
+}
diff --git a/packer/vars/ubuntu-18.04.json b/packer/vars/ubuntu-18.04.json
new file mode 100644
index 0000000..a1ee888
--- /dev/null
+++ b/packer/vars/ubuntu-18.04.json
@@ -0,0 +1,11 @@
+{
+ "source_ami_filter_name": "*ubuntu*18.04*",
+ "source_ami_filter_owner": "aws-marketplace",
+ "source_ami_filter_product_code": "3iplms73etrdhxdepv72l6ywj",
+
+ "ssh_user": "ubuntu",
+
+ "distro": "Ubuntu 18.04",
+ "arch": "x86_64",
+ "cloud_user_data": null
+}
diff --git a/vars/buildVolthaComponent.groovy b/vars/buildVolthaComponent.groovy
index c0e4163..29c9b9c 100644
--- a/vars/buildVolthaComponent.groovy
+++ b/vars/buildVolthaComponent.groovy
@@ -1,12 +1,28 @@
def call(String project) {
// project is the gerrit project name
- if (project != 'voltha-system-tests' &&
- project != 'voltha-helm-charts' &&
- project != '') {
+ // these are project that are not required to be built
+ def ignoredProjects = [
+ '', // this is the case for a manual trigger on master, nothing to be built
+ 'voltha-system-tests',
+ 'voltha-helm-charts'
+ ]
+
+ // some projects have different make targets
+ def Map customMakeTargets = [
+ "voltctl": "release"
+ ]
+
+ def defaultMakeTarget = "docker-build"
+
+ if (!ignoredProjects.contains(project)) {
+
+ def makeTarget = customMakeTargets.get(project, defaultMakeTarget)
+
+ println "Building ${project} with make target ${makeTarget}."
sh """
- make -C $WORKSPACE/${project} DOCKER_REPOSITORY=voltha/ DOCKER_TAG=citest docker-build
+ make -C $WORKSPACE/${project} DOCKER_REPOSITORY=voltha/ DOCKER_TAG=citest ${makeTarget}
"""
} else {
println "The project ${project} does not require to be built."
diff --git a/vars/createKubernetesCluster.groovy b/vars/createKubernetesCluster.groovy
index fccbaed..a1a2bde 100644
--- a/vars/createKubernetesCluster.groovy
+++ b/vars/createKubernetesCluster.groovy
@@ -20,9 +20,26 @@
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
nodes:
+- role: worker
+- role: worker
- role: control-plane
-- role: worker
-- role: worker
+ kubeadmConfigPatches:
+ - |
+ kind: InitConfiguration
+ nodeRegistration:
+ kubeletExtraArgs:
+ node-labels: "ingress-ready=true"
+ extraPortMappings:
+ - containerPort: 80
+ hostPort: 80
+ protocol: TCP
+ - containerPort: 443
+ hostPort: 443
+ protocol: TCP
+ - containerPort: 30115
+ hostPort: 30115
+ - containerPort: 30120
+ hostPort: 30120
"""
writeFile(file: 'kind.cfg', text: data)
@@ -41,9 +58,9 @@
if [ "\$HOSTARCH" == "x86_64" ]; then
HOSTARCH="amd64"
fi
- curl -Lo ./voltctl https://github.com/opencord/voltctl/releases/download/v1.3.1/voltctl-1.3.1-\$HOSTOS-\$HOSTARCH
- chmod +x ./voltctl
- mv ./voltctl $WORKSPACE/bin/
+ VC_VERSION="\$(curl --fail -sSL https://api.github.com/repos/opencord/voltctl/releases/latest | jq -r .tag_name | sed -e 's/^v//g')"
+ curl -Lo $WORKSPACE/bin/voltctl https://github.com/opencord/voltctl/releases/download/v\$VC_VERSION/voltctl-\$VC_VERSION-\$HOSTOS-\$HOSTARCH
+ chmod +x $WORKSPACE/bin/voltctl
# start the kind cluster
kind create cluster --name ${cfg.name} --config kind.cfg
@@ -59,10 +76,6 @@
mkdir -p $HOME/.kube
kind get kubeconfig --name ${cfg.name} > $HOME/.kube/config
- # add helm repositories
- helm repo add onf https://charts.opencord.org
- helm repo update
-
# download kail
bash <( curl -sfL https://raw.githubusercontent.com/boz/kail/master/godownloader.sh) -b "$WORKSPACE/bin"
"""
diff --git a/vars/getPodsInfo.groovy b/vars/getPodsInfo.groovy
new file mode 100644
index 0000000..28fc81d
--- /dev/null
+++ b/vars/getPodsInfo.groovy
@@ -0,0 +1,13 @@
+// This keyword will get all the kubernetes pods info needed for debugging
+// the only parameter required is the destination folder to store the collected information
+def call(String dest) {
+ sh """
+ mkdir -p ${dest}
+ kubectl get pods --all-namespaces -o wide | tee ${dest}/pods.txt || true
+ kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.image}{'\\n'}" | sort | uniq | tee ${dest}/pod-images.txt || true
+ kubectl get pods --all-namespaces -o jsonpath="{range .items[*].status.containerStatuses[*]}{.imageID}{'\\n'}" | sort | uniq | tee ${dest}/pod-imagesId.txt || true
+ kubectl describe pods --all-namespaces -l app.kubernetes.io/part-of=voltha > ${dest}/voltha-pods-describe.txt
+ kubectl describe pods --all-namespaces -l app=onos-classic > ${dest}/onos-pods-describe.txt
+ helm ls --all-namespaces | tee ${dest}/helm-charts.txt
+ """
+}
diff --git a/vars/getVolthaCode.groovy b/vars/getVolthaCode.groovy
index 815e719..d763429 100644
--- a/vars/getVolthaCode.groovy
+++ b/vars/getVolthaCode.groovy
@@ -82,7 +82,7 @@
userRemoteConfigs: [[
url: "https://gerrit.opencord.org/voltha-helm-charts",
]],
- branches: [[ name: "master", ]],
+ branches: [[ name: "${cfg.branch}", ]],
extensions: [
[$class: 'WipeWorkspace'],
[$class: 'RelativeTargetDirectory', relativeTargetDir: "voltha-helm-charts"],
diff --git a/vars/getVolthaImageFlags.groovy b/vars/getVolthaImageFlags.groovy
new file mode 100644
index 0000000..8770ff2
--- /dev/null
+++ b/vars/getVolthaImageFlags.groovy
@@ -0,0 +1,40 @@
+// returns the helm flags required to override a specific image
+def call(String project = "unknown", String tag = "citest", String pullPolicy = "Never") {
+ def chart = "unknown"
+ def image = "unknown"
+ switch(project) {
+ case "ofagent-go":
+ chart = "voltha"
+ image = "ofagent"
+ break
+ case "voltha-go":
+ chart = "voltha"
+ image = "rw_core"
+ break
+ case "voltha-openonu-adapter-go":
+ chart = "voltha-adapter-openonu"
+ image = "adapter_open_onu_go"
+ break
+ // TODO remove after 2.7
+ case "voltha-openonu-adapter":
+ chart = "voltha-adapter-openonu"
+ image = "adapter_open_onu"
+ break
+ // TODO end
+ case "voltha-openolt-adapter":
+ chart = "voltha-adapter-openolt"
+ image = "adapter_open_olt"
+ break
+ case "bbsim":
+ // BBSIM has a different format that voltha, return directly
+ return "--set images.bbsim.tag=${tag},images.bbsim.pullPolicy=${pullPolicy},images.bbsim.registry='' "
+ break
+ case "voltha-onos":
+ return "--set onos-classic.image.repository=voltha/voltha-onos,onos-classic.image.tag=citest,onos-classic.image.pullPolicy=${pullPolicy}"
+ default:
+ return ""
+ break
+ }
+
+ return "--set ${chart}.images.${image}.tag=${tag},${chart}.images.${image}.pullPolicy=${pullPolicy},${chart}.images.${image}.registry='' "
+}
diff --git a/vars/helmTeardown.groovy b/vars/helmTeardown.groovy
index 3dff0ab..71fd263 100644
--- a/vars/helmTeardown.groovy
+++ b/vars/helmTeardown.groovy
@@ -7,7 +7,7 @@
for(int i = 0;i<namespaces.size();i++) {
def n = namespaces[i]
sh """
- for hchart in \$(helm list -n ${n} -q | grep -E -v '${exc}');
+ for hchart in \$(helm list --all -n ${n} -q | grep -E -v '${exc}');
do
echo "Purging chart: \${hchart}"
helm delete -n ${n} "\${hchart}"
diff --git a/vars/loadToKind.groovy b/vars/loadToKind.groovy
index 7df6bc5..379b9f3 100644
--- a/vars/loadToKind.groovy
+++ b/vars/loadToKind.groovy
@@ -28,7 +28,7 @@
println "Loading image ${image} on Kind cluster ${cfg.name}"
sh """
- kind load docker-image ${image}:citest --name ${cfg.name} --nodes ${cfg.name}-worker,${cfg.name}-worker2
+ kind load docker-image ${image}:citest --name ${cfg.name} --nodes ${cfg.name}-control-plane,${cfg.name}-worker,${cfg.name}-worker2
"""
}
}
diff --git a/vars/volthaDeploy.groovy b/vars/volthaDeploy.groovy
index eb9faca..d8d679f 100644
--- a/vars/volthaDeploy.groovy
+++ b/vars/volthaDeploy.groovy
@@ -11,8 +11,13 @@
bbsimReplica: 1,
infraNamespace: "infra",
volthaNamespace: "voltha",
+ stackName: "voltha",
+ stackId: 1,
workflow: "att",
extraHelmFlags: "",
+ localCharts: false, // wether to use locally cloned charts or upstream one (for local we assume they are stored in $WORKSPACE/voltha-helm-charts)
+ dockerRegistry: "", // use a different docker registry for all images, eg: "mirror.registry.opennetworking.org"
+ kubeconfig: null, // location of the kubernetes config file, if null we assume it's stored in the $KUBECONFIG environment variable
]
if (!config) {
@@ -21,6 +26,27 @@
def cfg = defaultConfig + config
+ if (cfg.dockerRegistry != "") {
+ def registryFlags = " --set global.image_registry=${cfg.dockerRegistry}/ "
+ registryFlags += " --set etcd.image.registry=${cfg.dockerRegistry} "
+ registryFlags += " --set kafka.image.registry=${cfg.dockerRegistry} "
+ registryFlags += " --set kafka.zookeper.image.registry=${cfg.dockerRegistry} "
+ registryFlags += " --set onos-classic.image.repository=${cfg.dockerRegistry}/voltha/voltha-onos "
+ registryFlags += " --set onos-classic.atomix.image.repository=${cfg.dockerRegistry}/atomix/atomix "
+ registryFlags += " --set freeradius.images.radius.registry=${cfg.dockerRegistry}/ "
+
+ // we want to always leave the user provided flags at the end, to override changes
+ cfg.extraHelmFlags = registryFlags + " " + cfg.extraHelmFlags
+ }
+
+ // Add helm repositories
+ println "Updating helm repos"
+
+ sh """
+ helm repo add onf https://charts.opencord.org
+ helm repo update
+ """
+
println "Deploying VOLTHA with the following parameters: ${cfg}."
volthaInfraDeploy(cfg)
diff --git a/vars/volthaInfraDeploy.groovy b/vars/volthaInfraDeploy.groovy
index ecea1ad..58bd800 100644
--- a/vars/volthaInfraDeploy.groovy
+++ b/vars/volthaInfraDeploy.groovy
@@ -19,6 +19,8 @@
infraNamespace: "infra",
workflow: "att",
extraHelmFlags: "",
+ localCharts: false,
+ kubeconfig: null, // location of the kubernetes config file, if null we assume it's stored in the $KUBECONFIG environment variable
]
if (!config) {
@@ -27,15 +29,35 @@
def cfg = defaultConfig + config
+ def volthaInfraChart = "onf/voltha-infra"
+
+ if (cfg.localCharts) {
+ volthaInfraChart = "$WORKSPACE/voltha-helm-charts/voltha-infra"
+
+ sh """
+ pushd $WORKSPACE/voltha-helm-charts/voltha-infra
+ helm dep update
+ popd
+ """
+ }
+
println "Deploying VOLTHA Infra with the following parameters: ${cfg}."
+ def kubeconfig = cfg.kubeconfig
+ if (kubeconfig == null) {
+ kubeconfig = env.KUBECONFIG
+ }
+
sh """
kubectl create namespace ${cfg.infraNamespace} || true
- kubectl create configmap -n ${cfg.infraNamespace} kube-config "--from-file=kube_config=$KUBECONFIG" || true
+ kubectl create configmap -n ${cfg.infraNamespace} kube-config "--from-file=kube_config=${kubeconfig}" || true
"""
- // TODO support multiple replicas
+
sh """
- helm upgrade --install --create-namespace -n ${cfg.infraNamespace} voltha-infra onf/voltha-infra ${cfg.extraHelmFlags} \
- -f $WORKSPACE/voltha-helm-charts/examples/${cfg.workflow}-values.yaml
+ helm upgrade --install --create-namespace -n ${cfg.infraNamespace} voltha-infra ${volthaInfraChart} \
+ --set onos-classic.replicas=${cfg.onosReplica},onos-classic.atomix.replicas=${cfg.atomixReplica} \
+ --set kafka.replicaCount=${cfg.kafkaReplica},kafka.zookeeper.replicaCount=${cfg.kafkaReplica} \
+ --set etcd.statefulset.replicaCount=${cfg.etcdReplica} \
+ -f $WORKSPACE/voltha-helm-charts/examples/${cfg.workflow}-values.yaml ${cfg.extraHelmFlags}
"""
}
diff --git a/vars/volthaStackDeploy.groovy b/vars/volthaStackDeploy.groovy
index 78d90bf..b7e43fc 100644
--- a/vars/volthaStackDeploy.groovy
+++ b/vars/volthaStackDeploy.groovy
@@ -2,16 +2,14 @@
def call(Map config) {
// note that I can't define this outside the function as there's no global scope in Groovy
def defaultConfig = [
- onosReplica: 1,
- atomixReplica: 1,
- kafkaReplica: 1,
- etcdReplica: 1,
bbsimReplica: 1,
infraNamespace: "infra",
volthaNamespace: "voltha",
stackName: "voltha",
+ stackId: 1, // NOTE this is used to differentiate between BBSims across multiple stacks
workflow: "att",
extraHelmFlags: "",
+ localCharts: false,
]
if (!config) {
@@ -20,22 +18,55 @@
def cfg = defaultConfig + config
+ def volthaStackChart = "onf/voltha-stack"
+
+ if (cfg.localCharts) {
+ volthaStackChart = "$WORKSPACE/voltha-helm-charts/voltha-stack"
+
+ sh """
+ pushd $WORKSPACE/voltha-helm-charts/voltha-stack
+ helm dep update
+ popd
+ """
+ }
+
println "Deploying VOLTHA Stack with the following parameters: ${cfg}."
sh """
- helm upgrade --install --create-namespace -n ${cfg.volthaNamespace} ${cfg.stackName} onf/voltha-stack ${cfg.extraHelmFlags} \
+ helm upgrade --install --create-namespace -n ${cfg.volthaNamespace} ${cfg.stackName} ${volthaStackChart} \
--set global.stack_name=${cfg.stackName} \
--set global.voltha_infra_name=voltha-infra \
--set global.voltha_infra_namespace=${cfg.infraNamespace} \
+ ${cfg.extraHelmFlags}
"""
for(int i = 0;i<cfg.bbsimReplica;i++) {
- // TODO differentiate olt_id between different stacks
- sh """
- helm upgrade --install --create-namespace -n ${cfg.volthaNamespace} bbsim${i} onf/bbsim ${cfg.extraHelmFlags} \
- --set olt_id="1${i}" \
- -f $WORKSPACE/voltha-helm-charts/examples/${cfg.workflow}-values.yaml
- """
+ // NOTE we don't need to update the tag for DT
+ script {
+ sh """
+ rm -f $WORKSPACE/bbsimCfg${cfg.stackId}${i}.yaml
+ """
+ if (cfg.workflow == "att" || cfg.workflow == "tt") {
+ def startingStag = 900
+ def bbsimCfg = readYaml file: "$WORKSPACE/voltha-helm-charts/examples/${cfg.workflow}-values.yaml"
+ // NOTE we assume that the only service that needs a different s_tag is the first one in the list
+ bbsimCfg["servicesConfig"]["services"][0]["s_tag"] = startingStag + i
+ println "Using BBSim Service config ${bbsimCfg}"
+ writeYaml file: "$WORKSPACE/bbsimCfg${cfg.stackId}${i}.yaml", data: bbsimCfg
+ } else {
+ // NOTE if it's DT just copy the file over
+ sh """
+ cp $WORKSPACE/voltha-helm-charts/examples/${cfg.workflow}-values.yaml $WORKSPACE/bbsimCfg${cfg.stackId}${i}.yaml
+ """
+ }
+ }
+
+ sh """
+ helm upgrade --install --create-namespace -n ${cfg.volthaNamespace} bbsim${i} onf/bbsim \
+ --set olt_id="${cfg.stackId}${i}" \
+ -f $WORKSPACE/bbsimCfg${cfg.stackId}${i}.yaml \
+ ${cfg.extraHelmFlags}
+ """
}
println "Wait for VOLTHA Stack ${cfg.stackName} to start"