[VOL-5435]VGC port changes

Change-Id: I43ea55db72d49463c945a8312168fb36bd737c6b
Signed-off-by: rbodapat <ramakrishna.bodapati@radisys.com>
diff --git a/libraries/vgc.robot b/libraries/vgc.robot
index 32d0900..74c9d31 100755
--- a/libraries/vgc.robot
+++ b/libraries/vgc.robot
@@ -423,11 +423,9 @@
         ${service_name}=    Set Variable    ${service[${I}]['name']}
         ${stag}=    Set Variable    ${service[${I}]['s_tag']}
         ${ctag}=    Set Variable    ${service[${I}]['c_tag']}
-        Run Keyword If    '${service_name}' == 'FTTB_SUBSCRIBER_TRAFFIC'
-        ...    Verify Subscriber Access Flows Added for DT FTTB    ${olt_of_id}
-        ...    ${onu_port}    ${nni_port}    ${stag}    ${ctag}
-        ...    Verify DPU MGMT Flows Added for DT FTTB    ${olt_of_id}
-        ...    ${onu_port}    ${nni_port}    ${stag}    ${ctag}
+        Run Keyword If    '${service_name}' == 'FTTB_SUBSCRIBER_TRAFFIC'   Run Keywords
+             Verify Subscriber Access Flows Added for DT FTTB    ${olt_of_id}    ${onu_port}    ${nni_port}    ${stag}        ${ctag}
+             Verify DPU MGMT Flows Added for DT FTTB    ${olt_of_id}    ${onu_port}    ${nni_port}    ${stag}    ${ctag    }
     END
 
 
diff --git a/tests/data/bbsim-kind-dt-fttb-1OLTx1PONx2ONUx2UNI.yaml b/tests/data/bbsim-kind-dt-fttb-1OLTx1PONx2ONUx2UNI.yaml
index 41f0593..bbc521e 100644
--- a/tests/data/bbsim-kind-dt-fttb-1OLTx1PONx2ONUx2UNI.yaml
+++ b/tests/data/bbsim-kind-dt-fttb-1OLTx1PONx2ONUx2UNI.yaml
@@ -21,7 +21,9 @@
 has_dataplane: false
 teardown_device: true
 ONOS_REST_PORT: 8181
+VGC_REST_PORT: 8181
 ONOS_SSH_PORT: 8101
+VGC_SSH_PORT: 8101
 OLT_PORT: 50060
 
 nodes:
diff --git a/tests/dt-workflow/Voltha_DT_FailureScenarios_VGC.robot b/tests/dt-workflow/Voltha_DT_FailureScenarios_VGC.robot
index 025cc1a..1cdb4d3 100644
--- a/tests/dt-workflow/Voltha_DT_FailureScenarios_VGC.robot
+++ b/tests/dt-workflow/Voltha_DT_FailureScenarios_VGC.robot
@@ -89,6 +89,11 @@
     ${countAfterRestart}=    Run    kubectl get pods -n ${NAMESPACE} | grep Running | wc -l
     Should Be Equal As Strings    ${countAfterRestart}    ${countBeforeRestart}
     Log to console    Pod ${podName} restarted and sanity checks passed successfully
+    # "Once the onu adapter is restarted, it takes a bit of time for the OLT's/ONUs to reconcile, if the OLT is deleted 
+    # before the ONUs are reconiled successfully there would be stale entries. This scenario is not handled in VOLTHA as 
+    # of now. And there is no other to check if the reconcile has happened for all the ONUs. Due to this limitations a
+    # sleep of 60s is introduced to give enough time for onu adapter to reconcile the ONUs."
+    Sleep   60s
     Run Keyword If    '${SOAK_TEST}'=='False'    Delete All Devices and Verify
 
 Verify restart openolt-adapter container after subscriber provisioning for DT
@@ -117,6 +122,11 @@
     Log    ${podStatusOutput}
     ${countAfterRestart}=    Run    kubectl get pods -n ${NAMESPACE} | grep Running | wc -l
     Should Be Equal As Strings    ${countAfterRestart}    ${countBforRestart}
+    # "Once the olt adapter is restarted, it takes a bit of time for the OLT's/ONUs to reconcile, if try to delete OLT
+    # before the OLT's are reconiled successfully there would be recocile error. This scenario is not handled in VOLTHA as
+    # of now. And there is no other to check if the reconcile has happened for all the OLTs. Due to this limitations a
+    # sleep of 60s is introduced to give enough time for OLT adapter to reconcile the OLTs."
+    Sleep   60s
     Log to console    Pod ${podName} restarted and sanity checks passed successfully
 
 Verify openolt adapter restart before subscriber provisioning for DT
@@ -139,7 +149,7 @@
         ${onu_device_id}=    Get Device ID From SN    ${src['onu']}
         Wait Until Keyword Succeeds    ${timeout}    5s
         ...    Validate Device        ENABLED    ACTIVE    REACHABLE
-        ...    ${onu_device_id}    onu=True    onu_reason=initial-mib-downloaded    by_dev_id=True
+        ...    ${onu_device_id}    onu=True    onu_reason=omci-flows-pushed    by_dev_id=True
     END
     # Scale down the open OLT adapter deployment to 0 PODs and once confirmed, scale it back to 1
     Scale K8s Deployment by Pod Label    ${NAMESPACE}    app    ${OLT_ADAPTER_APP_LABEL}    0
@@ -185,6 +195,11 @@
         ...    ${src['ip']}    ${src['user']}    ${src['pass']}    ${src['container_type']}    ${src['container_name']}
         ...    ${dst['dp_iface_name']}    ${dst['ip']}    ${dst['user']}    ${dst['pass']}    ${dst['container_type']}
         ...    ${dst['container_name']}
+    # "Once the olt adapter is restarted, it takes a bit of time for the OLT's/ONUs to reconcile, if try to delete OLT
+    # before the OLT's are reconiled successfully there would be recocile error. This scenario is not handled in VOLTHA as
+    # of now. And there is no other to check if the reconcile has happened for all the OLTs. Due to this limitations a
+    # sleep of 60s is introduced to give enough time for OLT adapter to reconcile the OLTs."
+    Sleep   60s
     END
 
 Sanity E2E Test for OLT/ONU on POD With Core Fail and Restart for DT
@@ -216,7 +231,7 @@
         ${onu_device_id}=    Get Device ID From SN    ${src['onu']}
         # Bring up the device and verify it authenticates
         Wait Until Keyword Succeeds    360s    5s    Validate Device    ENABLED    ACTIVE    REACHABLE
-        ...    ${onu_device_id}    onu=True    onu_reason=initial-mib-downloaded    by_dev_id=True
+        ...    ${onu_device_id}    onu=True    onu_reason=omci-flows-pushed    by_dev_id=True
     END
 
     # Scale down the rw-core deployment to 0 PODs and once confirmed, scale it back to 1
@@ -224,13 +239,13 @@
     Wait Until Keyword Succeeds    ${timeout}    2s    Pod Does Not Exist    voltha    voltha-voltha-rw-core
     # Ensure the ofagent POD goes "not-ready" as expected
     Wait Until keyword Succeeds    ${timeout}    2s
-    ...    Check Expected Available Deployment Replicas    voltha    voltha-voltha-ofagent    0
+    ...    Check Expected Available Deployment Replicas    voltha    voltha-voltha-go-controller    1
     # Scale up the core deployment and make sure both it and the ofagent deployment are back
     Scale K8s Deployment    voltha    voltha-voltha-rw-core    1
     Wait Until Keyword Succeeds    ${timeout}    2s
     ...    Check Expected Available Deployment Replicas    voltha    voltha-voltha-rw-core    1
     Wait Until Keyword Succeeds    ${timeout}    2s
-    ...    Check Expected Available Deployment Replicas    voltha    voltha-voltha-ofagent    1
+    ...    Check Expected Available Deployment Replicas    voltha    voltha-voltha-go-controller    1
     # For some reason scaling down and up the POD behind a service causes the port forward to stop working,
     # so restart the port forwarding for the API service
     Restart VOLTHA Port Forward    voltha-api
@@ -364,6 +379,11 @@
         Run Keyword If    ${has_dataplane}    Check Ping Result    True    ${ping_output}
     END
     # Verify Control Plane Functionality by Deleting and Re-adding the Subscriber
+    # "Once the onu adapter is restarted, it takes a bit of time for the OLT's/ONUs to reconcile, if the OLT is deleted
+    # before the ONUs are reconiled successfully there would be stale entries. This scenario is not handled in VOLTHA as
+    # of now. And there is no other to check if the reconcile has happened for all the ONUs. Due to this limitations a
+    # sleep of 60s is introduced to give enough time for onu adapter to reconcile the ONUs."
+    Sleep   60s
     Verify Control Plane After Pod Restart DT
 
 Verify restart openolt-adapter container for DT
@@ -420,6 +440,11 @@
         Run Keyword If    ${has_dataplane}    Check Ping Result    True    ${ping_output}
     END
     # Verify Control Plane Functionality by Deleting and Re-adding the Subscriber
+    # "Once the olt adapter is restarted, it takes a bit of time for the OLT's/ONUs to reconcile, if try to delete OLT
+    # before the OLT's are reconiled successfully there would be recocile error. This scenario is not handled in VOLTHA as
+    # of now. And there is no other to check if the reconcile has happened for all the OLTs. Due to this limitations a
+    # sleep of 60s is introduced to give enough time for OLT adapter to reconcile the OLTs."
+    Sleep   60s
     Verify Control Plane After Pod Restart DT
 
 Verify restart rw-core container for DT
@@ -479,6 +504,11 @@
         Run Keyword If    ${has_dataplane}    Check Ping Result    True    ${ping_output}
     END
     # Verify Control Plane Functionality by Deleting and Re-adding the Subscriber
+    # "Once the rw core is restarted, it takes a bit of time for the OLT's/ONUs to reconcile, if try to delete OLT
+    # before the OLT's are reconiled successfully there would be recocile error. This scenario is not handled in VOLTHA as
+    # of now. And there is no other to check if the reconcile has happened for all the OLTs. Due to this limitations a
+    # sleep of 60s is introduced to give enough time for rw core to reconcile the OLTs."
+    Sleep   60s
     Verify Control Plane After Pod Restart DT
 
 *** Keywords ***
diff --git a/tests/dt-workflow/Voltha_DT_PODTests_VGC.robot b/tests/dt-workflow/Voltha_DT_PODTests_VGC.robot
index 9923717..fd70309 100755
--- a/tests/dt-workflow/Voltha_DT_PODTests_VGC.robot
+++ b/tests/dt-workflow/Voltha_DT_PODTests_VGC.robot
@@ -261,63 +261,6 @@
     Run Keyword If    ${has_dataplane}    Clean Up Linux
     Perform Sanity Test DT
 
-Test Disable and Enable OLT for DT
-    [Documentation]    Validates E2E Ping Connectivity and object states for the given scenario:
-    ...    Assuming that all the ONUs are DHCP/pingable (i.e. assuming sanityDt test was executed)
-    ...    Perform disable on the OLT and validate that the pings do not succeed
-    ...    Perform enable on the OLT and validate that the pings are successful
-    [Tags]    functionalDt    DisableEnableOLTDt   soak
-    [Setup]    Start Logging    DisableEnableOLTDt
-    [Teardown]    Run Keywords    Run Keyword If    ${logging}    Collect Logs
-    ...           AND             Stop Logging    DisableEnableOLTDt
-    # Disable and Validate OLT Device
-    FOR   ${I}    IN RANGE    0    ${olt_count}
-        ${olt_serial_number}=    Get From Dictionary    ${olt_ids}[${I}]    sn
-        ${olt_device_id}=    Get OLTDeviceID From OLT List    ${olt_serial_number}
-        ${rc}    ${output}=    Run and Return Rc and Output
-        ...    voltctl -c ${VOLTCTL_CONFIG} device disable ${olt_device_id}
-        Should Be Equal As Integers    ${rc}    0
-        Wait Until Keyword Succeeds    ${timeout}    5s    Validate OLT Device    DISABLED    UNKNOWN    REACHABLE
-        ...    ${olt_serial_number}
-    END
-    # Validate ONUs
-    FOR    ${I}    IN RANGE    0    ${num_all_onus}
-        ${src}=    Set Variable    ${hosts.src[${I}]}
-        ${dst}=    Set Variable    ${hosts.dst[${I}]}
-        ${of_id}=    Get ofID From OLT List    ${src['olt']}
-        ${onu_port}=    Wait Until Keyword Succeeds    ${timeout}    2s
-        ...    Get ONU Port in VGC    ${src['onu']}    ${of_id}    ${src['uni_id']}
-        ${onu_device_id}=    Get Device ID From SN    ${src['onu']}
-        Wait Until Keyword Succeeds   ${timeout}    2s
-        ...    Verify UNI Port Is Disabled   ${VGC_SSH_IP}    ${VGC_SSH_PORT}    ${src['onu']}    ${src['uni_id']}
-        Run Keyword If    ${has_dataplane}    Run Keyword And Continue On Failure
-        ...    Wait Until Keyword Succeeds    ${timeout}    2s
-        ...    Check Ping    False    ${dst['dp_iface_ip_qinq']}    ${src['dp_iface_name']}
-        ...    ${src['ip']}    ${src['user']}    ${src['pass']}    ${src['container_type']}    ${src['container_name']}
-        # Remove Subscriber Access (To replicate DT workflow)
-        ${onu_port_name}=    Catenate    SEPARATOR=-    ${src['onu']}    ${src['uni_id']}
-	Wait Until Keyword Succeeds    ${timeout}    2s
-        ...    Delete Request    VGC    services/${onu_port_name}
-        # Delete ONU Device (To replicate DT workflow)
-        Delete Device    ${onu_device_id}
-    END
-    Sleep    5s
-    # Enable the OLT back and check ONU, OLT status are back to "ACTIVE"
-    FOR   ${I}    IN RANGE    0    ${olt_count}
-        ${olt_serial_number}=    Get From Dictionary    ${olt_ids}[${I}]    sn
-        ${olt_device_id}=    Get OLTDeviceID From OLT List    ${olt_serial_number}
-        Enable Device    ${olt_device_id}
-        Wait Until Keyword Succeeds    ${timeout}    5s    Validate OLT Device    ENABLED    ACTIVE    REACHABLE
-        ...    ${olt_serial_number}
-        #TODO: Update for PON_OLT ETHERNET_NNI
-        #Wait Until Keyword Succeeds    ${timeout}    5s    Validate OLT Port Types
-        #...    PON_OLT    ETHERNET_NNI
-    END
-    # Waiting extra time for the ONUs to come up
-    Sleep    60s
-    Run Keyword If    ${has_dataplane}    Clean Up Linux
-    Perform Sanity Test DT
-
 Test Delete and ReAdd OLT for DT
     [Documentation]    Validates E2E Ping Connectivity and object states for the given scenario:
     ...    Assuming that all the ONUs are DHCP/pingable (i.e. assuming sanityDt test was executed)