[ 4222 ] Minor changes after code review

This is the initial commit for Persistence and Voltha restart.
It consists of the following:
1) Introduction of a store config id which represents the data of
   a Voltha instance.
2) The Coordinator service dynamically allocates a store config id
   to each voltha instance on startup.  It also reallocates the same id
   to another voltha instance in the event the previous voltha instance
   with that store id went down.
3) All voltha data is stored in Consul as KV
4) When a Voltha instance is started and get allocated a config id that
   refers to existing data (from an instance that went down), then it will
   load all the data from Consul into its own memory and start a reconciliation
   process.
5) During the reconciliation process, the necessary agents and
   callbacks are created as per the data.  A reconcile() API is also
   invoked on the adapters to perform their side of the reconciliation.
6) The Reconciliation process is implemented in ponsim OLT and ONU
7) A set of integration tests focussed on persistence and voltha
   restarts.
8) Fix a few bugs along the way

Change-Id: I8c2bbae3b2fc79d0afd8ce3b7b0be6bde93e492a
diff --git a/common/utils/consulhelpers.py b/common/utils/consulhelpers.py
index 7eeac38..df4dd58 100644
--- a/common/utils/consulhelpers.py
+++ b/common/utils/consulhelpers.py
@@ -59,7 +59,7 @@
     items = services.keys()
 
     if number_of_expected_services is not None and \
-            len(items) != number_of_expected_services:
+                    len(items) != number_of_expected_services:
         return False
 
     for item in items:
@@ -70,7 +70,6 @@
 
 
 def get_all_services(consul_endpoint):
-
     log.debug('getting-service-verify-health')
 
     consul = connect_to_consult(consul_endpoint)
@@ -78,6 +77,23 @@
 
     return services
 
+
+def get_all_instances_of_service(consul_endpoint, service_name):
+    log.debug('getting-all-instances-of-service', service=service_name)
+
+    consul = connect_to_consult(consul_endpoint)
+    _, services = consul.catalog.service(service_name)
+
+    for service in services:
+        log.debug('service',
+                  name=service['ServiceName'],
+                  serviceid=service['ServiceID'],
+                  serviceport=service['ServicePort'],
+                  createindex=service['CreateIndex'])
+
+    return services
+
+
 def get_endpoint_from_consul(consul_endpoint, service_name):
     """
     Get endpoint of service_name from consul.
@@ -108,7 +124,7 @@
         if service['ServiceAddress'] == local_ipv4:
             log.debug("picking address locally")
             endpoint = '{}:{}'.format(service['ServiceAddress'],
-	                              service['ServicePort'])
+                                      service['ServicePort'])
             return endpoint
 
     """ If service is not available locally, picak a random
@@ -121,5 +137,42 @@
     return endpoint
 
 
+def get_healthy_instances(consul_endpoint, service_name=None,
+                          number_of_expected_services=None):
+    """
+    Verify in consul if any service is healthy
+    :param consul_endpoint: a <host>:<port> string
+    :param service_name: name of service to check, optional
+    :param number_of_expected_services number of services to check for, optional
+    :return: true if healthy, false otherwise
+    """
+
+    def check_health(service):
+        _, serv_health = consul.health.service(service, passing=True)
+        return not serv_health == []
+
+    consul = connect_to_consult(consul_endpoint)
+
+    if service_name is not None:
+        return check_health(service_name)
+
+    services = get_all_services(consul_endpoint)
+
+    items = services.keys()
+
+    if number_of_expected_services is not None and \
+                    len(items) != number_of_expected_services:
+        return False
+
+    for item in items:
+        if not check_health(item):
+            return False
+
+    return True
+
+
 if __name__ == '__main__':
-    print get_endpoint_from_consul('10.100.198.220:8500', 'kafka')
+    # print get_endpoint_from_consul('10.100.198.220:8500', 'kafka')
+    # print get_healthy_instances('10.100.198.220:8500', 'voltha-health')
+    # print get_healthy_instances('10.100.198.220:8500')
+    get_all_instances_of_service('10.100.198.220:8500', 'voltha-grpc')