VOL-1532: BBSim OLT preprovisioning test case
VOL-1533: BBSim OLT/ONU Discovery

Fix issus with pod list no longer printing to console

Change-Id: I9309aee43306e8ca36d0b59e6942db51a929eca7
diff --git a/tests/atests/build/Makefile b/tests/atests/build/Makefile
index 06d53a5..6bbe23c 100644
--- a/tests/atests/build/Makefile
+++ b/tests/atests/build/Makefile
@@ -23,7 +23,8 @@
 
 KAFKA_CHART_VERSION  ?= 0.8.8
 
-/all: $(M)/voltha_ponsim_running
+ponsim: $(M)/voltha-running $(M)/adapter-ponsim $(M)/voltha-adapter-running
+bbsim:  $(M)/voltha-running $(M)/adapter-bbsim $(M)/voltha-adapter-running
 
 $(M)/setup: 
 	echo "MYDIR = ${MYDIR}"
@@ -111,13 +112,19 @@
 	$(HOME)/cord/helm-charts/scripts/wait_for_pods.sh voltha
 	touch $@
 
-$(M)/ponsim: | $(M)/voltha-running
+$(M)/adapter-bbsim: | $(M)/voltha-running
+	cd $(HOME)/cord/helm-charts; helm upgrade --install ponnet ponnet
+	$(HOME)/cord/helm-charts/scripts/wait_for_pods.sh kube-system
+	cd $(HOME)/cord/helm-charts; helm upgrade --install bbsim bbsim -f configs/seba-ponsim.yaml --set images.bbsim.repository=voltha/voltha-bbsim,images.bbsim.tag=latest,images.bbsim.pullPolicy=Never
+	touch $@
+
+$(M)/adapter-ponsim: | $(M)/voltha-running
 	cd $(HOME)/cord/helm-charts; helm upgrade --install ponnet ponnet
 	$(HOME)/cord/helm-charts/scripts/wait_for_pods.sh kube-system
 	cd $(HOME)/cord/helm-charts; helm upgrade --install ponsimv2 ponsimv2 -f configs/seba-ponsim.yaml --set images.olt.repository=voltha-ponsim,images.olt.tag=latest,images.olt.pullPolicy=Never,images.onu.repository=voltha-ponsim,images.onu.tag=latest,images.onu.pullPolicy=Never,images.rg.repository=voltha-tester,images.rg.tag=latest,images.rg.pullPolicy=Never
 	touch $@
 
-$(M)/pon0_fwd: | $(M)/ponsim
+$(M)/pon0-fwd: | $(M)/voltha-running
 	echo 8 > /tmp/pon0_group_fwd_mask
 	until sudo cp /tmp/pon0_group_fwd_mask /sys/class/net/pon0/bridge/group_fwd_mask; \
 	do \
@@ -127,7 +134,7 @@
 	rm /tmp/pon0_group_fwd_mask
 	touch $@
 	
-$(M)/voltha_ponsim_running: | $(M)/pon0_fwd
+$(M)/voltha-adapter-running: | $(M)/pon0-fwd
 	$(HOME)/cord/helm-charts/scripts/wait_for_pods.sh
 	until http -a karaf:karaf --ignore-stdin --check-status GET http://127.0.0.1:30120/onos/v1/configuration/org.opencord.olt.impl.Olt; \
 	do \
@@ -139,35 +146,12 @@
 	touch $@
 	echo "Voltha Test Framework Ready!"
 
-$(M)/authenticate: $(M)/voltha_ponsim_running
-	timeout 60s bash -c "until http -a admin@opencord.org:letmein GET http://127.0.0.1:30001/xosapi/v1/att-workflow-driver/attworkflowdriverserviceinstances |jq '.items[0].authentication_state'|grep AWAITING; do echo 'waiting for att-workflow-driver to be in AWAITING state'; sleep 5; done"
-	kubectl -n voltha exec $(shell kubectl -n voltha get pod|grep "^rg-"|cut -d' ' -f1) -- wpa_supplicant -i eth0 -Dwired -c /etc/wpa_supplicant/wpa_supplicant.conf -B
-	timeout 60s bash -c "until http -a admin@opencord.org:letmein GET http://127.0.0.1:30001/xosapi/v1/att-workflow-driver/attworkflowdriverserviceinstances |jq '.items[0].authentication_state'|grep APPROVED; do echo 'waiting for att-workflow-driver to be in APPROVED state'; sleep 5; done"
-	touch $@
-	
-$(M)/dhclient: $(M)/authenticate
-	sudo iptables -P FORWARD ACCEPT
-	timeout 60s bash -c "until http -a admin@opencord.org:letmein GET http://127.0.0.1:30001/xosapi/v1/fabric-crossconnect/fabriccrossconnectserviceinstances |jq '.items[0].backend_status'|grep OK; do echo 'waiting for fabric-crossconnect to be synchronized';sleep 5; done"
-	kubectl -n voltha exec $(shell kubectl -n voltha get pod|grep "^rg-"|cut -d' ' -f1) -- dhclient
-	kubectl -n voltha exec $(shell kubectl -n voltha get pod|grep "^rg-"|cut -d' ' -f1) -- dhclient -r
-	kubectl -n voltha exec $(shell kubectl -n voltha get pod|grep "^rg-"|cut -d' ' -f1) -- dhclient
-	touch $@
-
-$(M)/pingtest: $(M)/dhclient
-	kubectl -n voltha exec $(shell kubectl -n voltha get pod|grep "^rg-"|cut -d' ' -f1) -- ping -c 3 172.18.0.10
-	touch $@
-
-run-tests: $(M)/pingtest
-
 remove-chart-milestones:
 	cd $(M); sudo rm -f setup kafka kafka-running onos voltha etcd-operator-ready etcd-cluster \
-		voltha-running ponsim pon0_fwd voltha_ponsim_running
+		voltha-running adapter-ponsim adapter-bbsim pon0-fwd voltha-adapter-running
 remove-kube-milestones:
 	cd $(M); sudo rm -f kubeadm helm-init
 
-remove-test-milestones:
-	cd $(M); sudo rm -f authenticate dhclient pingtest
-
 teardown-charts: remove-chart-milestones
 	helm delete --purge $(shell helm ls -q)
 
diff --git a/tests/atests/build/devices_json b/tests/atests/build/devices_json
index 74c0aca..302403b 100644
--- a/tests/atests/build/devices_json
+++ b/tests/atests/build/devices_json
@@ -61,5 +61,23 @@
       "uplink":"0",
       "vlan":"2"
     }
+  },
+  "of:0000626273696d76":{
+    "basic":{
+      "driver":"voltha"
+    },
+    "accessDevice":{
+      "uplink":"0",
+      "vlan":"2"
+    }
+  },
+  "of:0001626273696d76":{
+    "basic":{
+      "driver":"voltha"
+    },
+    "accessDevice":{
+      "uplink":"0",
+      "vlan":"2"
+    }
   }
 }
diff --git a/tests/atests/build/sadis_json b/tests/atests/build/sadis_json
index fcded75..901bfb6 100644
--- a/tests/atests/build/sadis_json
+++ b/tests/atests/build/sadis_json
@@ -15,7 +15,151 @@
           "sTag":44,
           "nasPortId":"PSMO12345678"
         },
-        {  
+        {
+          "id":"BBSM00000100",
+          "cTag":55,
+          "sTag":66,
+          "nasPortId":"BBSM00000100",
+          "technologyProfileId":64,
+          "upstreamBandwidthProfile":"High-Speed-Internet",
+          "downstreamBandwidthProfile":"User1-Specific"
+        },
+        {
+          "id":"BBSM00000101",
+          "cTag":56,
+          "sTag":66,
+          "nasPortId":"BBSM00000101",
+          "technologyProfileId":64,
+          "upstreamBandwidthProfile":"High-Speed-Internet",
+          "downstreamBandwidthProfile":"User1-Specific"
+        },
+        {
+          "id":"BBSM00000102",
+          "cTag":57,
+          "sTag":66,
+          "nasPortId":"BBSM00000102",
+          "technologyProfileId":64,
+          "upstreamBandwidthProfile":"High-Speed-Internet",
+          "downstreamBandwidthProfile":"User1-Specific"
+        },
+        {
+          "id":"BBSM00000103",
+          "cTag":58,
+          "sTag":66,
+          "nasPortId":"BBSM00000103",
+          "technologyProfileId":64,
+          "upstreamBandwidthProfile":"High-Speed-Internet",
+          "downstreamBandwidthProfile":"User1-Specific"
+        },
+        {
+          "id":"BBSM00000104",
+          "cTag":60,
+          "sTag":77,
+          "nasPortId":"BBSM00000104",
+          "technologyProfileId":64,
+          "upstreamBandwidthProfile":"High-Speed-Internet",
+          "downstreamBandwidthProfile":"User1-Specific"
+        },
+        {
+          "id":"BBSM00000105",
+          "cTag":61,
+          "sTag":77,
+          "nasPortId":"BBSM00000105",
+          "technologyProfileId":64,
+          "upstreamBandwidthProfile":"High-Speed-Internet",
+          "downstreamBandwidthProfile":"User1-Specific"
+        },
+        {
+          "id":"BBSM00000106",
+          "cTag":62,
+          "sTag":77,
+          "nasPortId":"BBSM00000106",
+          "technologyProfileId":64,
+          "upstreamBandwidthProfile":"High-Speed-Internet",
+          "downstreamBandwidthProfile":"User1-Specific"
+        },
+        {
+          "id":"BBSM00000107",
+          "cTag":63,
+          "sTag":77,
+          "nasPortId":"BBSM00000107",
+          "technologyProfileId":64,
+          "upstreamBandwidthProfile":"High-Speed-Internet",
+          "downstreamBandwidthProfile":"User1-Specific"
+        },
+        {
+          "id":"BBSM00000108",
+          "cTag":70,
+          "sTag":88,
+          "nasPortId":"BBSM00000108",
+          "technologyProfileId":64,
+          "upstreamBandwidthProfile":"High-Speed-Internet",
+          "downstreamBandwidthProfile":"User1-Specific"
+        },
+        {
+          "id":"BBSM00000109",
+          "cTag":71,
+          "sTag":88,
+          "nasPortId":"BBSM00000109",
+          "technologyProfileId":64,
+          "upstreamBandwidthProfile":"High-Speed-Internet",
+          "downstreamBandwidthProfile":"User1-Specific"
+        },
+        {
+          "id":"BBSM0000010a",
+          "cTag":72,
+          "sTag":88,
+          "nasPortId":"BBSM0000010a",
+          "technologyProfileId":64,
+          "upstreamBandwidthProfile":"High-Speed-Internet",
+          "downstreamBandwidthProfile":"User1-Specific"
+        },
+        {
+          "id":"BBSM0000010b",
+          "cTag":73,
+          "sTag":88,
+          "nasPortId":"BBSM0000010b",
+          "technologyProfileId":64,
+          "upstreamBandwidthProfile":"High-Speed-Internet",
+          "downstreamBandwidthProfile":"User1-Specific"
+        },
+        {
+          "id":"BBSM0000010c",
+          "cTag":80,
+          "sTag":99,
+          "nasPortId":"BBSM0000010c",
+          "technologyProfileId":64,
+          "upstreamBandwidthProfile":"High-Speed-Internet",
+          "downstreamBandwidthProfile":"User1-Specific"
+        },
+        {
+          "id":"BBSM0000010d",
+          "cTag":81,
+          "sTag":99,
+          "nasPortId":"BBSM0000010d",
+          "technologyProfileId":64,
+          "upstreamBandwidthProfile":"High-Speed-Internet",
+          "downstreamBandwidthProfile":"User1-Specific"
+        },
+        {
+          "id":"BBSM0000010e",
+          "cTag":82,
+          "sTag":99,
+          "nasPortId":"BBSM0000010e",
+          "technologyProfileId":64,
+          "upstreamBandwidthProfile":"High-Speed-Internet",
+          "downstreamBandwidthProfile":"User1-Specific"
+        },
+        {
+          "id":"BBSM0000010f",
+          "cTag":83,
+          "sTag":99,
+          "nasPortId":"BBSM0000010f",
+          "technologyProfileId":64,
+          "upstreamBandwidthProfile":"High-Speed-Internet",
+          "downstreamBandwidthProfile":"User1-Specific"
+        },
+        {
           "id":"1d1d1d1d1d1d11",
           "hardwareIdentifier":"00:1b:22:00:b1:78",
           "ipAddress":"192.168.1.252",
@@ -24,6 +168,45 @@
         {  
           "id":"olt.voltha.svc:50060",
           "uplinkPort":2
+        },
+        {
+          "id":"BBSIMOLT000",
+          "uplinkPort":65536
+        }
+      ]
+    },
+    "bandwidthprofile":{
+      "integration":{
+        "cache":{
+          "enabled":true,
+          "maxsize":40,
+          "ttl":"PT1m"
+        }
+      },
+      "entries":[
+        {
+          "id":"High-Speed-Internet",
+          "cir":200000000,
+          "cbs":348000,
+          "eir":10000000,
+          "ebs":348000,
+          "air":10000000
+        },
+        {
+          "id":"User1-Specific",
+          "cir":300000000,
+           "cbs":348000,
+          "eir":20000000,
+          "ebs":348000,
+         "air":30000000
+        },
+        {
+          "id":"Default",
+          "cir":300000000,
+          "cbs":348000,
+          "eir":20000000,
+          "ebs":348000,
+          "air":30000000
         }
       ]
     }
diff --git a/tests/atests/common/auto_test.py b/tests/atests/common/auto_test.py
index d93e7b5..6e21842 100755
--- a/tests/atests/common/auto_test.py
+++ b/tests/atests/common/auto_test.py
@@ -31,12 +31,20 @@
 import logging
 
 DEFAULT_LOG_DIR = '/tmp/voltha_test_results'
+DEFAULT_ADAPTER = 'ponsim'
 logging.basicConfig(level=logging.INFO)
 
 
 def dir_init(log_dir=DEFAULT_LOG_DIR, voltha_dir=os.environ['VOLTHA_BASE']):
-    logging.info(__file__)
     """
+
+    :param log_dir: default log dir
+    :param voltha_dir: voltha base dir
+    :return: root_dir, voltha_dir, log_dir
+    """
+    logging.info(__file__)
+
+    """   
     Init automated testing environment and return three directories: root dir,
     voltha sources dir and log dir
     """
@@ -58,6 +66,31 @@
     return root_dir, voltha_dir, log_dir
 
 
+def adapter_init(adapter=DEFAULT_ADAPTER):
+    """
+
+    :param adapter: ponsim or bbsim
+    :return: olt_type, onu_type, olt_host_ip, onu_count
+    """
+    if adapter == 'ponsim':
+        olt_type = 'ponsim_olt'
+        onu_type = 'ponsim_onu'
+        olt_host_ip = 'olt.voltha.svc'
+        onu_count = 1
+    elif adapter == 'bbsim':
+        olt_type = 'openolt'
+        onu_type = 'brcm_openomci_onu'
+        olt_host_ip = 'bbsim.voltha.svc'
+        onu_count = 16
+    else:
+        olt_type = None
+        onu_type = None
+        olt_host_ip = None
+        onu_count = 0
+
+    return olt_type, onu_type, olt_host_ip, onu_count
+
+
 #
 # MAIN
 #
@@ -69,20 +102,21 @@
     parser = argparse.ArgumentParser(description='VOLTHA Automated Testing')
     parser.add_argument('-l', dest='logDir', default=DEFAULT_LOG_DIR,
                         help='log directory (default: %s).' % DEFAULT_LOG_DIR)
+    parser.add_argument('-a', dest='adapter', choices=['ponsim', 'bbsim'], default=DEFAULT_ADAPTER,
+                        help='adapter (default: %s).' % DEFAULT_ADAPTER)
     args = parser.parse_args()
 
     ROOT_DIR, VOLTHA_DIR, LOG_DIR = dir_init(args.logDir)
+    OLT_TYPE, ONU_TYPE, OLT_HOST_IP, ONU_COUNT = adapter_init(args.adapter)
     
-    volthaMngr.voltha_initialize(ROOT_DIR, VOLTHA_DIR, LOG_DIR)
+    volthaMngr.voltha_initialize(ROOT_DIR, VOLTHA_DIR, LOG_DIR, args.adapter)
 
-    preprovisioning.run_test('olt.voltha.svc', 50060, 'ponsim_olt', 'ponsim_onu', LOG_DIR)
-    
-    discovery.run_test('olt.voltha.svc', 'ponsim_olt', 'ponsim_onu', LOG_DIR)
+    preprovisioning.run_test(OLT_HOST_IP, 50060, OLT_TYPE, ONU_TYPE, ONU_COUNT, LOG_DIR)
+    time.sleep(60)
+    discovery.run_test(OLT_HOST_IP, OLT_TYPE, ONU_TYPE, ONU_COUNT, LOG_DIR)
+    if args.adapter == 'ponsim':
+        authentication.run_test(ROOT_DIR, VOLTHA_DIR, LOG_DIR)
 
-    authentication.run_test(ROOT_DIR, VOLTHA_DIR, LOG_DIR)
+        dhcp.run_test(ROOT_DIR, VOLTHA_DIR, LOG_DIR)
 
-    dhcp.run_test(ROOT_DIR, VOLTHA_DIR, LOG_DIR)
-
-    unicast.run_test(ROOT_DIR, VOLTHA_DIR, LOG_DIR)
-
-    time.sleep(5)
+        unicast.run_test(ONU_TYPE, ONU_COUNT, ROOT_DIR, VOLTHA_DIR, LOG_DIR)
diff --git a/tests/atests/common/build.sh b/tests/atests/common/build.sh
index 23e9adb..6c0b7ed 100755
--- a/tests/atests/common/build.sh
+++ b/tests/atests/common/build.sh
@@ -16,29 +16,28 @@
 SRC_DIR="$( cd -P "$( dirname "${BASH_SOURCE[0]}" )" && pwd)"
 BUILD_DIR="$SRC_DIR/../build"
 
-cd $BUILD_DIR
-
-if [ $# -ne 1 ]
+cd ${BUILD_DIR}
+if [[ $# -ne 2 ]]
   then
-    echo "No arguments supplied"
+    echo "Wrong number of arguments supplied"
     exit 1
 fi
-if [ -z "$1" ]
+if [[ -z "${1}" || -z "${2}" ]]
   then
     echo "Empty argument supplied"
     exit 1
 fi
-if [ $1 == "clear" ]
+if [[ "${1}" == "clear" ]]
   then
     sudo make reset-kubeadm
-elif [ $1 == "start" ]
+elif [[ "${1}" == "start" ]]
   then
     sudo service docker restart
-    sudo make -f Makefile
-elif [ $1 == "stop" ]
+    sudo make -f Makefile ${2}
+elif [[ "${1}" == "stop" ]]
   then
     pods=$( /usr/bin/kubectl get pods --all-namespaces 2>&1 | grep -c -e refused -e resource )
-    if  [ $pods -eq 0 ]
+    if  [[ ${pods} -eq 0 ]]
       then
         sudo make teardown-charts
     fi
diff --git a/tests/atests/common/dhcp.py b/tests/atests/common/dhcp.py
index d52234a..a9acd8e 100644
--- a/tests/atests/common/dhcp.py
+++ b/tests/atests/common/dhcp.py
@@ -70,13 +70,14 @@
 
     def should_now_have_two_dhcp_flows(self):
         testCaseUtils.send_command_to_onos_cli(testCaseUtils.get_dir(self, 'log'),
-                                               'voltha_onos_flows.log', 'flows')
+                                               'voltha_onos_flows.log', 'flows -s')
         statusLines = testCaseUtils.get_fields_from_grep_command(self, 'IP_PROTO:17', 'voltha_onos_flows.log')
         assert statusLines, 'No DHCP Detection flows'
         lines = statusLines.splitlines()
+        assert len(lines) == 2, 'Expected 2 DHCP Detection Flows but result was %s' % len(lines)
         for line in lines:
             self.__fields = testCaseUtils.parse_fields(line, ',')
-            inPortStr = self.__fields[10].strip()
+            inPortStr = self.__fields[5].strip()
             selector, delimiter, inPort = inPortStr.partition('=[')
             assert (inPort == 'IN_PORT:2' or inPort == 'IN_PORT:128'), 'DHCP detection flows not associated with expected ports'
 
diff --git a/tests/atests/common/discovery.py b/tests/atests/common/discovery.py
index 1c5234a..3f0804f 100755
--- a/tests/atests/common/discovery.py
+++ b/tests/atests/common/discovery.py
@@ -38,6 +38,7 @@
         self.__logicalDeviceType = None
         self.__oltType = None
         self.__onuType = None
+        self.__onuCount = None
         self.__fields = []
         self.__logicalDeviceId = None
         self.__oltDeviceId = None
@@ -47,17 +48,21 @@
     def d_set_log_dirs(self, log_dir):
         testCaseUtils.config_dirs(self, log_dir)
 
-    def d_configure(self, logical_device_type, olt_type, onu_type):
+    def d_configure(self, logical_device_type, olt_type, onu_type, onu_count):
         self.__logicalDeviceType = logical_device_type
         self.__oltType = olt_type
         self.__onuType = onu_type
+        self.__onuCount = onu_count
 
     def logical_device(self):
         logging.info('Logical Device Info')
-        statusLines = testCaseUtils.get_fields_from_grep_command(self, self.__logicalDeviceType, 'voltha_devices_after_enable.log')
-        assert statusLines, 'No Logical Devices listed under devices'
+        testCaseUtils.send_command_to_voltha_cli(testCaseUtils.get_dir(self, 'log'),
+                                                 'voltha_logical_devices.log', 'logical_devices')
+        testCaseUtils.print_log_file(self, 'voltha_logical_devices.log')
+        statusLines = testCaseUtils.get_fields_from_grep_command(self, '-i olt', 'voltha_logical_devices.log')
+        assert statusLines, 'No Logical Device listed under logical devices'
         self.__fields = testCaseUtils.parse_fields(statusLines, '|')
-        self.__logicalDeviceId = self.__fields[4].strip()
+        self.__logicalDeviceId = self.__fields[1].strip()
         testCaseUtils.send_command_to_voltha_cli(testCaseUtils.get_dir(self, 'log'),
                                                  'voltha_logical_device.log', 'logical_device ' + self.__logicalDeviceId,
                                                  'voltha_logical_device_ports.log', 'ports', 'voltha_logical_device_flows.log', 'flows')
@@ -69,18 +74,16 @@
 
     def logical_device_ports_should_exist(self):
         statusLines = testCaseUtils.get_fields_from_grep_command(self, self.__oltDeviceId, 'voltha_logical_device_ports.log')
-        assert statusLines, 'No Olt device listed under logical device ports'
+        assert statusLines, 'No Olt ports listed under logical device ports'
         self.__fields = testCaseUtils.parse_fields(statusLines, '|')
         portType = self.__fields[1].strip()
-        assert portType == 'nni', 'Port type for %s does not match expected nni' % self.__oltDeviceId
+        assert portType.count('nni') == 1, 'Port type for %s does not match expected nni' % self.__oltDeviceId
         for onuDeviceId in self.__onuDeviceIds:
             statusLines = testCaseUtils.get_fields_from_grep_command(self, onuDeviceId, 'voltha_logical_device_ports.log')
             assert statusLines, 'No Onu device %s listed under logical device ports' % onuDeviceId
-            lines = statusLines.splitlines()
-            for line in lines:
-                self.__fields = testCaseUtils.parse_fields(line, '|')
-                portType = self.__fields[1].strip()
-                assert portType == 'uni-128', 'Port type for %s does not match expected uni-128' % onuDeviceId
+            self.__fields = testCaseUtils.parse_fields(statusLines, '|')
+            portType = self.__fields[1].strip()
+            assert portType.count('uni') == 1, 'Port type for %s does not match expected uni' % onuDeviceId
 
     def logical_device_should_have_at_least_one_flow(self):
         statusLines = testCaseUtils.get_fields_from_grep_command(self, 'Flows', 'voltha_logical_device_flows.log')
@@ -107,6 +110,7 @@
         statusLines = testCaseUtils.get_fields_from_grep_command(self, self.__onuType, 'voltha_devices_after_enable.log')
         assert statusLines, 'No Onu listed under devices'
         lines = statusLines.splitlines()
+        assert len(lines) == self.__onuCount, 'Onu count mismatch found: %s, should be: %s' % (len(lines), self.__onuCount)
         for line in lines:
             self.__fields = testCaseUtils.parse_fields(line, '|')
             onuDeviceId = self.__fields[1].strip()
@@ -127,7 +131,7 @@
             self.__fields = testCaseUtils.parse_fields(line, '|')
             assert (self.check_states(self.__oltDeviceId) is True), 'States of %s does match expected ' % self.__oltDeviceId
             portType = self.__fields[3].strip()
-            assert (portType == 'ETHERNET_NNI' or portType == 'PON_OLT'),\
+            assert (portType == 'ETHERNET_NNI' or portType == 'PON_OLT' or portType == 'ETHERNET_UNI'),\
                 'Port type for %s does not match expected ETHERNET_NNI or PON_OLT' % self.__oltDeviceId
             if portType == 'PON_OLT':
                 self.__peers = self.__fields[7].strip()
@@ -160,10 +164,12 @@
                     
     def check_states(self, device_id):
         result = True
-        adminState = self.__fields[4].strip()
-        assert adminState == 'ENABLED', 'Admin State of %s not ENABLED' % device_id
-        operatorStatus = self.__fields[5].strip()
-        assert operatorStatus == 'ACTIVE', 'Operator Status of %s not ACTIVE' % device_id
+        stateMatchCount = 0
+        for field in self.__fields:
+            field_no_space = field.strip()
+            if field_no_space == 'ENABLED' or field_no_space == 'ACTIVE':
+                stateMatchCount += 1
+        assert stateMatchCount == 2, 'State of %s is not ENABLED or ACTIVE' % device_id
         return result
 
     def olt_should_have_at_least_one_flow(self):
@@ -185,10 +191,10 @@
                 assert int(plainNumber) > 0, 'Zero number of flows for Onu %s' % onuDeviceId
                       
 
-def run_test(logical_device_type, olt_type, onu_type, log_dir):
+def run_test(logical_device_type, olt_type, onu_type, onu_count, log_dir):
     discovery = Discovery()
     discovery.d_set_log_dirs(log_dir)
-    discovery.d_configure(logical_device_type, olt_type, onu_type)
+    discovery.d_configure(logical_device_type, olt_type, onu_type, onu_count)
     discovery.olt_discovery()
     discovery.onu_discovery()
     discovery.logical_device()
diff --git a/tests/atests/common/preprovisioning.py b/tests/atests/common/preprovisioning.py
index a7f8a7f..fb68634 100755
--- a/tests/atests/common/preprovisioning.py
+++ b/tests/atests/common/preprovisioning.py
@@ -39,23 +39,25 @@
         self.__oltPort = None
         self.__oltType = None
         self.__onuType = None
+        self.__onuCount = None
         self.__fields = []
         self.__oltDeviceId = None
         
     def p_set_log_dirs(self, log_dir):
         testCaseUtils.config_dirs(self, log_dir)
 
-    def p_configure(self, olt_ip_address, olt_port, olt_type, onu_type):
+    def p_configure(self, olt_ip_address, olt_port, olt_type, onu_type, onu_count):
         self.__oltIpAddress = olt_ip_address
         self.__oltPort = olt_port
         self.__oltType = olt_type
         self.__onuType = onu_type
+        self.__onuCount = onu_count
 
     def preprovision_olt(self):
         logging.info('Do PROVISIONING')
         testCaseUtils.send_command_to_voltha_cli(testCaseUtils.get_dir(self, 'log'),
-                                                 'voltha_preprovision_olt.log', 'preprovision_olt -t ponsim_olt -H %s:%s' %
-                                                 (self.__oltIpAddress, self.__oltPort))
+                                                 'voltha_preprovision_olt.log', 'preprovision_olt -t %s -H %s:%s' %
+                                                 (self.__oltType, self.__oltIpAddress, self.__oltPort))
         time.sleep(5)
         
     def status_should_be_success_after_preprovision_command(self):
@@ -84,12 +86,12 @@
            
     def check_states(self, dev_type):
         result = True
-        adminState = self.__fields[7].strip()
-        assert adminState == 'ENABLED', 'Admin State of %s not ENABLED' % dev_type
-        operatorStatus = self.__fields[8].strip()
-        assert operatorStatus == 'ACTIVE', 'Operator Status of %s not ACTIVE' % dev_type
-        connectStatus = self.__fields[9].strip()
-        assert connectStatus == 'REACHABLE', 'Connect Status of %s not REACHABLE' % dev_type
+        stateMatchCount = 0
+        for field in self.__fields:
+            field_no_space = field.strip()
+            if field_no_space == 'ENABLED' or field_no_space == 'ACTIVE' or field_no_space == 'DISCOVERED' or field_no_space == 'REACHABLE':
+                stateMatchCount += 1
+        assert stateMatchCount == 3, 'State of %s is not ENABLED, ACTIVE or DISCOVERED and REACHABLE' % dev_type
         return result
 
     def check_olt_fields_after_enabling(self):
@@ -97,18 +99,17 @@
         assert statusLines, 'No Olt listed under devices'
         self.__fields = testCaseUtils.parse_fields(statusLines, '|')
         assert self.check_states(self.__oltType), 'States of %s does match expected' % self.__oltType
-        hostPort = self.__fields[11].strip()
-        assert hostPort, 'hostPort field is empty'
-        hostPortFields = hostPort.split(":")
-        assert hostPortFields[0] == self.__oltIpAddress or hostPortFields[1] == str(self.__oltPort), \
-            'Olt IP or Port does not match'
-                      
+        for field in self.__fields:
+            if field.strip() == self.__oltIpAddress + ':' + str(self.__oltPort):
+                hostPortCount = True
+        assert hostPortCount, 'hostPort field is empty or Olt IP and/or Port does not match'
+
     def check_onu_fields_after_enabling(self):        
         statusLines = testCaseUtils.get_fields_from_grep_command(self, self.__onuType, 'voltha_devices_after_enable.log')
         assert statusLines, 'No Onu listed under devices'
         lines = statusLines.splitlines()
         lenLines = len(lines)
-        assert lenLines == 1, 'Fixed single onu does not match, ONU Count was %d' % lenLines
+        assert lenLines == self.__onuCount, ' Discovered onu(s) does not match, ONU Count was %d' % lenLines
         for line in lines:
             self.__fields = testCaseUtils.parse_fields(line, '|')
             assert (self.check_states(self.__onuType) is True), 'States of %s does match expected' % self.__onuType
@@ -128,10 +129,10 @@
         testCaseUtils.print_log_file(self, 'voltha_devices_after_enable.log')
 
 
-def run_test(olt_ip_address, olt_port, olt_type, onu_type, log_dir):
+def run_test(olt_ip_address, olt_port, olt_type, onu_type, onu_count, log_dir):
     preprovisioning = Preprovisioning()
     preprovisioning.p_set_log_dirs(log_dir)
-    preprovisioning.p_configure(olt_ip_address, olt_port, olt_type, onu_type)
+    preprovisioning.p_configure(olt_ip_address, olt_port, olt_type, onu_type, onu_count)
     preprovisioning.preprovision_olt()
     preprovisioning.status_should_be_success_after_preprovision_command()
     preprovisioning.query_devices_before_enabling()
diff --git a/tests/atests/common/run_robot.sh b/tests/atests/common/run_robot.sh
index b86a267..84e6bcd 100755
--- a/tests/atests/common/run_robot.sh
+++ b/tests/atests/common/run_robot.sh
@@ -16,7 +16,13 @@
 SRC_DIR="$( cd -P "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
 VOLTHA_DIR="$SRC_DIR/../../.."
 
-echo "Run Robot Framework TEST. Log: $1"
-cd $VOLTHA_DIR
+echo "Run Robot Framework TEST. Log: $1, Adapter: ${2:-ponsim}"
+cd ${VOLTHA_DIR}
 source env.sh
-robot -d $1 -v LOG_DIR:$1/voltha_test_results ./tests/atests/robot/voltha_automated_test_suite.robot
+if [[ "${2:-ponsim}" == "ponsim" ]]
+  then
+    robot -d $1 -v LOG_DIR:$1/voltha_test_results -v ADAPTER:ponsim ./tests/atests/robot/voltha_automated_test_suite.robot
+elif [[ "${2}" == "bbsim" ]]
+    then
+      robot -d $1 -v LOG_DIR:$1/voltha_test_results -v ADAPTER:bbsim -e ponsim ./tests/atests/robot/voltha_automated_test_suite.robot
+fi
\ No newline at end of file
diff --git a/tests/atests/common/unicast.py b/tests/atests/common/unicast.py
index 66db06f..69d3d1a 100644
--- a/tests/atests/common/unicast.py
+++ b/tests/atests/common/unicast.py
@@ -22,7 +22,7 @@
 import testCaseUtils
 import logging
 import subprocess
-import commands
+import json
 
 
 class Unicast(object):
@@ -42,12 +42,20 @@
         self.__rgName = testCaseUtils.discover_rg_pod_name()
         self.__fields = None
         self.__tcpdumpPid = None
+        self.__onuType = None
+        self.__onuCount = None
+        self.__onuSerialNum = []
         self.__sadisCTag = None
         self.__sadisSTag = None
+        self.__datastore = None
 
     def u_set_log_dirs(self, root_dir, voltha_dir, log_dir):
         testCaseUtils.config_dirs(self, log_dir, root_dir, voltha_dir)
 
+    def u_configure(self, onu_type, onu_count):
+        self.__onuType = onu_type
+        self.__onuCount = onu_count
+
     def execute_ping_test(self):
         logging.info('Ping 1.2.3.4 IP Test')
         process_output = open('%s/%s' % (testCaseUtils.get_dir(self, 'log'), self.PING_TEST_FILENAME), 'w')
@@ -144,34 +152,47 @@
             tagCount = line.count('802.1Q')
             assert tagCount == 2, 'Found a non double tagged packet'
 
-    def retrieve_stag_and_ctag_from_sadis_entries(self):
-        logging.info('Retrieving sTag and cTag from Sadis entries')
-        ctagGrepCommand = "grep %s %s/tests/atests/build/sadis_json" % ('cTag', testCaseUtils.get_dir(self, 'voltha'))
-        statusLines = commands.getstatusoutput(ctagGrepCommand)[1]
-        assert statusLines, 'No cTag found in sadis_json'
-        self.__sadisCTag = statusLines.split(':')[1].strip(',')
-        stagGrepCommand = "grep %s %s/tests/atests/build/sadis_json" % ('sTag', testCaseUtils.get_dir(self, 'voltha'))
-        statusLines = commands.getstatusoutput(stagGrepCommand)[1]
-        assert statusLines, 'No sTag found in sadis_json'
-        self.__sadisSTag = statusLines.split(':')[1].strip(',')
+    def retrieve_onu_serial_numbers(self):
+        logging.info('Onu Serial Number Discovery')
+        statusLines = testCaseUtils.get_fields_from_grep_command(self, self.__onuType, 'voltha_devices_after_enable.log')
+        assert statusLines, 'No Onu listed under devices'
+        lines = statusLines.splitlines()
+        assert len(lines) == self.__onuCount, 'Onu count mismatch found: %s, should be: %s' % (len(lines), self.__onuCount)
+        for line in lines:
+            self.__fields = testCaseUtils.parse_fields(line, '|')
+            onuSerialNum = self.__fields[5].strip()
+            self.__onuSerialNum.append(onuSerialNum)
+
+    def retrieve_stag_and_ctag_for_onu(self, onu_serial_num):
+        entries = self.__datastore['org.opencord.sadis']['sadis']['entries']
+        for entry in entries:
+            entry_id = entry['id']
+            if entry_id == onu_serial_num:
+                self.__sadisCTag = entry['cTag']
+                self.__sadisSTag = entry['sTag']
+
+    def read_sadis_entries_from_sadis_json(self):
+        with open('%s/tests/atests/build/sadis_json' % testCaseUtils.get_dir(self, 'voltha'), 'r') as sadis:
+            self.__datastore = json.load(sadis)
 
     def stag_and_ctag_should_match_sadis_file(self, ctag, stag):
-        assert ctag == self.__sadisCTag and stag == self.__sadisSTag, 'cTag and/or sTag do not match value in sadis file\n \
+        assert ctag == str(self.__sadisCTag) and stag == str(self.__sadisSTag), 'cTag and/or sTag do not match value in sadis file\n \
             vlan cTag = %s, sadis cTag = %s : vlan sTag = %s, sadis sTag = %s' % (ctag, self.__sadisCTag, stag, self.__sadisSTag)
 
+    def manage_onu_testing(self):
+        for onuSerial in self.__onuSerialNum:
+            self.retrieve_stag_and_ctag_for_onu(onuSerial)
+            self.execute_ping_test()
+            self.ping_test_should_have_failed()
+            self.should_have_q_in_q_vlan_tagging()
+            self.stag_and_ctag_should_match_sadis_entry()
 
-def run_test(root_dir, voltha_dir, log_dir):
+
+def run_test(onu_type, onu_count, root_dir, voltha_dir, log_dir):
 
     unicast = Unicast()
     unicast.u_set_log_dirs(root_dir, voltha_dir, log_dir)
-    unicast.execute_ping_test()
-    unicast.should_have_q_in_q_vlan_tagging()
-    unicast.retrieve_stag_and_ctag_from_sadis_entries()
-    unicast.stag_and_ctag_should_match_sadis_entry()
-
-
-
-
-
-
-
+    unicast.u_configure(onu_type, onu_count)
+    unicast.read_sadis_entries_from_sadis_json()
+    unicast.retrieve_onu_serial_numbers()
+    unicast.manage_onu_testing()
diff --git a/tests/atests/common/volthaMngr.py b/tests/atests/common/volthaMngr.py
index 20b45bc..1751fa4 100755
--- a/tests/atests/common/volthaMngr.py
+++ b/tests/atests/common/volthaMngr.py
@@ -25,11 +25,15 @@
 import logging
 
 
+
 class VolthaMngr(object):
 
     """
     This class implements voltha startup/shutdown callable helper functions
     """
+
+    DEFAULT_ADAPTER = 'ponsim'
+
     def __init__(self):
         self.dirs = dict()
         self.dirs['root'] = None
@@ -38,25 +42,25 @@
         
     def v_set_log_dirs(self, root_dir, voltha_dir, log_dir):
         testCaseUtils.config_dirs(self, log_dir, root_dir, voltha_dir)
-        
-    def start_all_pods(self):
-        proc1 = subprocess.Popen([testCaseUtils.get_dir(self, 'root') + '/build.sh', 'start'],
+
+    def start_all_pods(self, adapter=DEFAULT_ADAPTER):
+        proc1 = subprocess.Popen([testCaseUtils.get_dir(self, 'root') + '/build.sh', 'start', adapter],
                                  stdout=subprocess.PIPE,
                                  stderr=subprocess.PIPE)
         output = proc1.communicate()[0]
         print(output)
         proc1.stdout.close()
 
-    def stop_all_pods(self):
-        proc1 = subprocess.Popen([testCaseUtils.get_dir(self, 'root') + '/build.sh', 'stop'],
+    def stop_all_pods(self, adapter=DEFAULT_ADAPTER):
+        proc1 = subprocess.Popen([testCaseUtils.get_dir(self, 'root') + '/build.sh', 'stop', adapter],
                                  stdout=subprocess.PIPE,
                                  stderr=subprocess.PIPE)
         output = proc1.communicate()[0]
         print(output)
         proc1.stdout.close()
         
-    def reset_kube_adm(self):
-        proc1 = subprocess.Popen([testCaseUtils.get_dir(self, 'root') + '/build.sh', 'clear'],
+    def reset_kube_adm(self, adapter=DEFAULT_ADAPTER):
+        proc1 = subprocess.Popen([testCaseUtils.get_dir(self, 'root') + '/build.sh', 'clear', adapter],
                                  stdout=subprocess.PIPE,
                                  stderr=subprocess.PIPE)
         output = proc1.communicate()[0]
@@ -123,11 +127,11 @@
     return allRunningPods
 
 
-def voltha_initialize(root_dir, voltha_dir, log_dir):
+def voltha_initialize(root_dir, voltha_dir, log_dir, adapter):
     voltha = VolthaMngr()
     voltha.v_set_log_dirs(root_dir, voltha_dir, log_dir)
-    voltha.stop_all_pods()
-    voltha.reset_kube_adm()
-    voltha.start_all_pods()
+    voltha.stop_all_pods(adapter)
+    voltha.reset_kube_adm(adapter)
+    voltha.start_all_pods(adapter)
     voltha.alter_onos_net_cfg()
     voltha.collect_pod_logs()
diff --git a/tests/atests/robot/voltha_automated_test_suite.robot b/tests/atests/robot/voltha_automated_test_suite.robot
index 4acb91f..71ec642 100755
--- a/tests/atests/robot/voltha_automated_test_suite.robot
+++ b/tests/atests/robot/voltha_automated_test_suite.robot
@@ -36,12 +36,12 @@
 ${LOG_DIR}              /tmp/voltha_test_results
 ${ROOT_DIR}             ${EMPTY}
 ${VOLTHA_DIR}           ${EMPTY}
-${ONOS_SSH_PORT}        8101
-${OLT_IP_ADDR}          olt.voltha.svc
 ${OLT_PORT_ID}          50060
-${LOGICAL_TYPE}         olt.voltha.svc
-${OLT_TYPE}             ponsim_olt
-${ONU_TYPE}             ponsim_onu
+${OLT_TYPE}             ${EMPTY}
+${ONU_TYPE}             ${EMPTY}
+${OLT_HOST_IP}          ${EMPTY}
+${ONU_COUNT}            ${EMPTY}
+${ADAPTER}              ${EMPTY}
 ${RETRY_TIMEOUT_60}     60s
 ${RETRY_INTERVAL_2}     2s
 
@@ -53,7 +53,7 @@
     ...                 information. It then verifies that all the physical and logical devices are ACTIVE
     ...                 and REACHEABLE
     P Set Log Dirs      ${LOG_DIR}
-    P Configure         ${OLT_IP_ADDR}    ${OLT_PORT_ID}    ${OLT_TYPE}    ${ONU_TYPE}
+    P Configure         ${OLT_HOST_IP}    ${OLT_PORT_ID}    ${OLT_TYPE}    ${ONU_TYPE}  ${ONU_COUNT}
     Preprovision Olt
     Wait Until Keyword Succeeds    ${RETRY_TIMEOUT_60}    ${RETRY_INTERVAL_2}    Query Devices Before Enabling
     Status Should Be Success After Preprovision Command
@@ -63,7 +63,8 @@
     Status Should Be Success After Enable Command
     Check Olt Fields After Enabling
     Check Onu Fields After Enabling
-    
+    Sleep    60
+
 Olt Onu Discovery
     [Documentation]     Olt Onu Discovery
     ...                 This test covers both Onu Discovery and yet to be developped Olt Discovery
@@ -73,7 +74,7 @@
     ...                 Olt or Onu device. Functionality to support multiple ONU accomodated
     ...                 The extent of the flow validation is limited to checking whether number of Flows is > 0
     D Set Log Dirs      ${LOG_DIR}
-    D Configure         ${LOGICAL_TYPE}     ${OLT_TYPE}    ${ONU_TYPE}
+    D Configure         ${OLT_HOST_IP}  ${OLT_TYPE}    ${ONU_TYPE}  ${ONU_COUNT}
     Olt Discovery
     Onu Discovery
     Logical Device
@@ -89,6 +90,7 @@
     ...                 This test attempts to perform a Radius Authentication from the RG
     ...                 It uses the wpa_supplicant app to authenticate using EAPOL.
     ...                 We then verify the generated log file confirming all the authentication steps
+    [Tags]              ponsim
     A Set Log Dirs      ${ROOT_DIR}    ${VOLTHA_DIR}    ${LOG_DIR}
     Discover Freeradius Pod Name
     Discover Freeradius Ip Addr
@@ -108,6 +110,7 @@
     ...                 by calling 'add subscriber access' on onos. We then deassign the default
     ...                 IP address granted to RG upon instantiating the RG pod. Finally we invoke
     ...                 'dhclient' on RG to request a DHCP IP address.
+    [Tags]              ponsim
     H Set Log Dirs      ${ROOT_DIR}     ${VOLTHA_DIR}    ${LOG_DIR}
     Set Firewall Rules
     Discover Authorized Users
@@ -128,32 +131,39 @@
     ...                 network looking for ARP request from RG IP address. These packets should
     ...                 be double tagged with different s and c Tags but matching tag configuration
     ...                 in sadis entry.
+    [Tags]              ponsim
     U Set Log Dirs      ${ROOT_DIR}     ${VOLTHA_DIR}    ${LOG_DIR}
-    Execute Ping Test
-    Should Have Q In Q Vlan Tagging
-    Retrieve Stag And Ctag From Sadis Entries
-    Stag And Ctag Should Match Sadis Entry
+    U Configure         ${ONU_TYPE}     ${ONU_COUNT}
+    Read Sadis Entries From Sadis Json
+    Retrieve Onu Serial Numbers
+    Manage Onu Testing
 
 *** Keywords ***
 Start Voltha
     [Documentation]     Start Voltha infrastructure to run test(s). This includes starting all 
     ...                 Kubernetes Pods and start collection of logs. PonsimV2 has now been
     ...                 containerized and does not need to be managed separately
+    ...                 Initialize working DIRs as well as Adapter specific variables
     ${ROOT_DIR}  ${VOLTHA_DIR}  ${LOG_DIR}  Dir Init    ${LOG_DIR}
     Set Suite Variable  ${ROOT_DIR}
     Set Suite Variable  ${VOLTHA_DIR}
     Set Suite Variable  ${LOG_DIR}   
     V Set Log Dirs      ${ROOT_DIR}    ${VOLTHA_DIR}    ${LOG_DIR}
+    ${OLT_TYPE}  ${ONU_TYPE}    ${OLT_HOST_IP}  ${ONU_COUNT}  Adapter Init  ${ADAPTER}
+    Set Suite Variable  ${OLT_TYPE}
+    Set Suite Variable  ${ONU_TYPE}
+    Set Suite Variable  ${OLT_HOST_IP}
+    Set Suite Variable  ${ONU_COUNT}
     Stop Voltha
-    Start All Pods
+    Start All Pods      ${ADAPTER}
     Sleep    60
-    ${pod_status}    Run    kubectl get pods --all-namespaces
-    Log To Console  \n  ${pod_status}
+    ${pod_status}       Run    kubectl get pods --all-namespaces
+    Log To Console      \n${pod_status}\n
     Alter Onos Net Cfg
     
 Stop Voltha
     [Documentation]     Stop Voltha infrastructure. This includes clearing all installation milestones
     ...                 files and stopping all Kubernetes pods
     Collect Pod Logs
-    Stop All Pods
-    Reset Kube Adm 
+    Stop All Pods       ${ADAPTER}
+    Reset Kube Adm      ${ADAPTER}