Updated ofagentRestart scenario for ATT, DT and TT to verify pod status during test teardown

Change-Id: Ifa2c9eb46f1cc6fe44b6e0d16e156ff0529c7d91
diff --git a/tests/tt-workflow/Voltha_TT_FailureScenarios.robot b/tests/tt-workflow/Voltha_TT_FailureScenarios.robot
index 6895aeb..73857dc 100755
--- a/tests/tt-workflow/Voltha_TT_FailureScenarios.robot
+++ b/tests/tt-workflow/Voltha_TT_FailureScenarios.robot
@@ -346,6 +346,10 @@
     [Teardown]    Run Keywords    Collect Logs
     ...           AND             Stop Logging    ofagentRestart-TT
     ...           AND             Scale K8s Deployment    ${NAMESPACE}    ${STACK_NAME}-voltha-ofagent    1
+    ...           AND             Wait Until Keyword Succeeds    ${timeout}    2s
+    ...           Validate Pods Status By Label    ${NAMESPACE}    app    ofagent    Running
+    ...           AND             Wait Until Keyword Succeeds    ${timeout}    3s
+    ...           Pods Are Ready By Label    ${NAMESPACE}    app    ofagent
     # set timeout value
     ${waitforRestart}    Set Variable    120s
     ${podStatusOutput}=    Run    kubectl get pods -n ${NAMESPACE}
@@ -390,6 +394,7 @@
     Scale K8s Deployment    ${NAMESPACE}    ${STACK_NAME}-voltha-ofagent    1
     Wait Until Keyword Succeeds    ${waitforRestart}    2s    Validate Pod Status    ofagent    ${NAMESPACE}
     ...    Running
+    Wait Until Keyword Succeeds    ${timeout}    3s    Pods Are Ready By Label    ${NAMESPACE}    app    ${podName}
     Run Keyword If    ${has_dataplane}    Clean Up Linux
     Perform Sanity Tests TT    ${suppressaddsubscriber}
     Log to console    Pod ${podName} restarted and sanity checks passed successfully