Update adapter names to reflect new changes in master
Change-Id: I5314ecba046cd16f63442edf915280670be5c5e0
diff --git a/tests/dt-workflow/Voltha_DT_FailureScenarios.robot b/tests/dt-workflow/Voltha_DT_FailureScenarios.robot
index 407c22c..4c83f56 100644
--- a/tests/dt-workflow/Voltha_DT_FailureScenarios.robot
+++ b/tests/dt-workflow/Voltha_DT_FailureScenarios.robot
@@ -188,7 +188,7 @@
${podStatusOutput}= Run kubectl get pods -n ${NAMESPACE}
Log ${podStatusOutput}
${countBforRestart}= Run kubectl get pods -n ${NAMESPACE} | grep Running | wc -l
- ${podName} Set Variable adapter-open-olt
+ ${podName} Set Variable open-olt
Restart Pod ${NAMESPACE} ${podName}
Wait Until Keyword Succeeds ${waitforRestart} 2s Validate Pod Status ${podName} ${NAMESPACE}
... Running
@@ -231,12 +231,12 @@
... ${onu_device_id} onu=True onu_reason=omci-flows-pushed
END
# Scale down the open OLT adapter deployment to 0 PODs and once confirmed, scale it back to 1
- Scale K8s Deployment voltha adapter-open-olt 0
- Wait Until Keyword Succeeds ${timeout} 2s Pod Does Not Exist voltha adapter-open-olt
+ Scale K8s Deployment voltha open-olt 0
+ Wait Until Keyword Succeeds ${timeout} 2s Pod Does Not Exist voltha open-olt
# Scale up the open OLT adapter deployment and make sure both it and the ofagent deployment are back
- Scale K8s Deployment voltha adapter-open-olt 1
+ Scale K8s Deployment voltha open-olt 1
Wait Until Keyword Succeeds ${timeout} 2s
- ... Check Expected Available Deployment Replicas voltha adapter-open-olt 1
+ ... Check Expected Available Deployment Replicas voltha open-olt 1
# Ensure the device is available in ONOS, this represents system connectivity being restored
Wait Until Keyword Succeeds ${timeout} 2s Device Is Available In ONOS
diff --git a/tests/functional/Voltha_FailureScenarios.robot b/tests/functional/Voltha_FailureScenarios.robot
index 4d32d62..674ea30 100644
--- a/tests/functional/Voltha_FailureScenarios.robot
+++ b/tests/functional/Voltha_FailureScenarios.robot
@@ -185,7 +185,7 @@
${podStatusOutput}= Run kubectl get pods -n ${NAMESPACE}
Log ${podStatusOutput}
${countBforRestart}= Run kubectl get pods -n ${NAMESPACE} | grep Running | wc -l
- ${podName} Set Variable adapter-open-olt
+ ${podName} Set Variable open-olt
Restart Pod ${NAMESPACE} ${podName}
Wait Until Keyword Succeeds ${waitforRestart} 2s Validate Pod Status ${podName} ${NAMESPACE}
... Running
@@ -278,12 +278,12 @@
... ${ONOS_SSH_PORT} ${onu_port}
END
# Scale down the open OLT adapter deployment to 0 PODs and once confirmed, scale it back to 1
- Scale K8s Deployment voltha adapter-open-olt 0
- Wait Until Keyword Succeeds ${timeout} 2s Pod Does Not Exist voltha adapter-open-olt
+ Scale K8s Deployment voltha open-olt 0
+ Wait Until Keyword Succeeds ${timeout} 2s Pod Does Not Exist voltha open-olt
# Scale up the open OLT adapter deployment and make sure both it and the ofagent deployment are back
- Scale K8s Deployment voltha adapter-open-olt 1
+ Scale K8s Deployment voltha open-olt 1
Wait Until Keyword Succeeds ${timeout} 2s
- ... Check Expected Available Deployment Replicas voltha adapter-open-olt 1
+ ... Check Expected Available Deployment Replicas voltha open-olt 1
# Ensure the device is available in ONOS, this represents system connectivity being restored
Wait Until Keyword Succeeds ${timeout} 2s Device Is Available In ONOS
@@ -388,7 +388,7 @@
# Wait for adapter to resync
Sleep 60s
# Restart the onu
- ${podName} Set Variable adapter-open-onu
+ ${podName} Set Variable open-onu
Wait Until Keyword Succeeds ${timeout} 15s Delete K8s Pods By Label ${NAMESPACE} app ${podName}
# Validate ONU Ports
FOR ${I} IN RANGE 0 ${num_onus}