Add graceful ONOS restart test case.
New cli command to shutdown ONOS gracefully.
Changes to cluster tests to avoid restarting by name since with async restarts,
ip to container name map cannot be mapped to nodes status output.
Add robot cluster controller test case to gracefully restart controllers.

Change-Id: I1b2006e06e876c4456e24aa9237db5b2ea0ae795
diff --git a/src/test/cli/onosclidriver.py b/src/test/cli/onosclidriver.py
index eb64194..ca43f7a 100644
--- a/src/test/cli/onosclidriver.py
+++ b/src/test/cli/onosclidriver.py
@@ -487,6 +487,31 @@
             main.cleanup()
             main.exit()
 
+    def shutdown( self, timeout = 5):
+        """
+        Shuts down ONOS
+        """
+        try:
+            self.handle.sendline("shutdown now")
+            self.handle.expect("yes/no", timeout = timeout)
+            self.handle.sendline("yes")
+            return main.TRUE
+        except AssertionError:
+            main.log.exception( "" )
+            return None
+        except TypeError:
+            main.log.exception( self.name + ": Object not as expected" )
+            return None
+        except pexpect.EOF:
+            main.log.error( self.name + ": EOF exception found" )
+            main.log.error( self.name + ":    " + self.handle.before )
+            main.cleanup()
+            main.exit()
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
+
     # IMPORTANT NOTE:
     # For all cli commands, naming convention should match
     # the cli command changing 'a:b' with 'aB'.
diff --git a/src/test/cluster/clusterTest.py b/src/test/cluster/clusterTest.py
index 5fa6d36..ea3c529 100644
--- a/src/test/cluster/clusterTest.py
+++ b/src/test/cluster/clusterTest.py
@@ -61,7 +61,8 @@
     acl = cluster_acl()
     dhcprelay = cluster_dhcprelay()
     subscriber = cluster_subscriber()
-    testcaseLoggers = ('test_cluster_controller_restarts', 'test_cluster_single_controller_restarts', 'test_cluster_restarts')
+    testcaseLoggers = ('test_cluster_controller_restarts', 'test_cluster_graceful_controller_restarts',
+                       'test_cluster_single_controller_restarts', 'test_cluster_restarts')
 
     def setUp(self):
         if self._testMethodName not in self.testcaseLoggers:
@@ -108,6 +109,18 @@
         self.cliExit()
         return result
 
+    def onos_shutdown(self, controller = None):
+        status = True
+        self.cliEnter(controller = controller)
+        try:
+            self.cli.shutdown(timeout = 10)
+        except:
+            log.info('Graceful shutdown of ONOS failed for controller: %s' %controller)
+            status = False
+
+        self.cliExit()
+        return status
+
     def log_set(self, level = None, app = 'org.onosproject', controllers = None):
         CordLogger.logSet(level = level, app = app, controllers = controllers, forced = True)
 
@@ -319,8 +332,7 @@
 	log.info('Cluster new master is %s'%new_master_ip)
 	return True
 
-    def test_cluster_controller_restarts(self):
-        '''Test the cluster by repeatedly killing the controllers'''
+    def cluster_controller_restarts(self, graceful = False):
         controllers = self.get_controllers()
         ctlr_len = len(controllers)
         if ctlr_len <= 1:
@@ -381,7 +393,9 @@
                 adjacent_controllers = list( set(controllers) - set([controller]) )
                 self.log_set(controllers = adjacent_controllers)
                 self.log_set(app = 'io.atomix', controllers = adjacent_controllers)
-                cord_test_onos_restart(node = controller_name, timeout = 0)
+                if graceful is True:
+                    self.onos_shutdown(controller)
+                cord_test_onos_restart(node = controller, timeout = 0)
                 self.log_set(controllers = controller)
                 self.log_set(app = 'io.atomix', controllers = controller)
                 time.sleep(60)
@@ -390,11 +404,19 @@
                 continue
 
             #first archive the test case logs for this run
-            CordLogger.archive_results('test_cluster_controller_restarts',
+            CordLogger.archive_results(self._testMethodName,
                                        controllers = controllers,
                                        iteration = 'iteration_{}'.format(num+1))
             next_controller = check_exception(controller = controller)
 
+    def test_cluster_controller_restarts(self):
+        '''Test the cluster by repeatedly killing the controllers'''
+        self.cluster_controller_restarts()
+
+    def test_cluster_graceful_controller_restarts(self):
+        '''Test the cluster by repeatedly restarting the controllers gracefully'''
+        self.cluster_controller_restarts(graceful = True)
+
     def test_cluster_single_controller_restarts(self):
         '''Test the cluster by repeatedly restarting the same controller'''
         controllers = self.get_controllers()
@@ -453,7 +475,7 @@
         for num in range(tries):
             log.info('ITERATION: %d. Shutting down Controller %s' %(num + 1, controller_name))
             try:
-                cord_test_onos_shutdown(node = controller_name)
+                cord_test_onos_shutdown(node = controller)
                 time.sleep(20)
             except:
                 time.sleep(5)
@@ -462,7 +484,7 @@
             check_exception(controller)
             #Now restart the controller back
             log.info('Restarting back the controller %s' %controller_name)
-            cord_test_onos_restart(node = controller_name)
+            cord_test_onos_restart(node = controller)
             self.log_set(controllers = controller)
             self.log_set(app = 'io.atomix', controllers = controller)
             time.sleep(60)
@@ -558,7 +580,7 @@
         onos_names_ips =  self.get_cluster_container_names_ips()
         master_onos_name = onos_names_ips[master]
         log.info('Removing cluster current master %s'%(master))
-        cord_test_onos_shutdown(node = master_onos_name)
+        cord_test_onos_shutdown(node = master)
         time.sleep(60)
         onos_instances -= 1
         status = self.verify_cluster_status(onos_instances = onos_instances,controller=standbys[0])
@@ -575,7 +597,7 @@
         onos_names_ips =  self.get_cluster_container_names_ips()
         member_onos_name = onos_names_ips[standbys[0]]
 	log.info('Removing cluster member %s'%standbys[0])
-        cord_test_onos_shutdown(node = member_onos_name)
+        cord_test_onos_shutdown(node = standbys[0])
 	time.sleep(60)
 	onos_instances -= 1
         status = self.verify_cluster_status(onos_instances = onos_instances,controller=master)
@@ -590,9 +612,9 @@
         member1_onos_name = onos_names_ips[standbys[0]]
         member2_onos_name = onos_names_ips[standbys[1]]
         log.info('Removing cluster member %s'%standbys[0])
-        cord_test_onos_shutdown(node = member1_onos_name)
+        cord_test_onos_shutdown(node = standbys[0])
         log.info('Removing cluster member %s'%standbys[1])
-        cord_test_onos_shutdown(node = member2_onos_name)
+        cord_test_onos_shutdown(node = standbys[1])
         time.sleep(60)
         onos_instances = onos_instances - 2
         status = self.verify_cluster_status(onos_instances = onos_instances,controller=master)
@@ -607,7 +629,7 @@
         for i in range(remove):
 	    member_onos_name = onos_names_ips[standbys[i]]
             log.info('Removing onos container with name %s'%standbys[i])
-            cord_test_onos_shutdown(node = member_onos_name)
+            cord_test_onos_shutdown(node = standbys[i])
         time.sleep(60)
         onos_instances = onos_instances - remove
         status = self.verify_cluster_status(onos_instances = onos_instances, controller=master)
@@ -668,7 +690,7 @@
         onos_names_ips =  self.get_cluster_container_names_ips()
         master_onos_name = onos_names_ips[master]
         log.info('Restarting cluster master %s'%master)
-        cord_test_onos_restart(node = master_onos_name)
+        cord_test_onos_restart(node = master)
         status = self.verify_cluster_status(onos_instances = onos_instances)
         assert_equal(status, True)
 	log.info('Cluster came up after master restart as expected')
@@ -680,8 +702,8 @@
         master1, standbys = self.get_cluster_current_master_standbys()
         onos_names_ips =  self.get_cluster_container_names_ips()
         master_onos_name = onos_names_ips[master1]
-        log.info('Restarting cluster master %s'%master)
-        cord_test_onos_restart(node = master_onos_name)
+        log.info('Restarting cluster master %s'%master1)
+        cord_test_onos_restart(node = master1)
         status = self.verify_cluster_status(onos_instances = onos_instances)
         assert_equal(status, True)
 	master2, standbys = self.get_cluster_current_master_standbys()
@@ -696,7 +718,7 @@
         onos_names_ips =  self.get_cluster_container_names_ips()
 	member_onos_name = onos_names_ips[standbys[0]]
         log.info('Restarting cluster member %s'%standbys[0])
-        cord_test_onos_restart(node = member_onos_name)
+        cord_test_onos_restart(node = standbys[0])
         status = self.verify_cluster_status(onos_instances = onos_instances)
         assert_equal(status, True)
 	log.info('Cluster came up as expected after restarting one member')
@@ -710,8 +732,8 @@
         member1_onos_name = onos_names_ips[standbys[0]]
         member2_onos_name = onos_names_ips[standbys[1]]
         log.info('Restarting cluster members %s and %s'%(standbys[0],standbys[1]))
-        cord_test_onos_restart(node = member1_onos_name)
-        cord_test_onos_restart(node = member2_onos_name)
+        cord_test_onos_restart(node = standbys[0])
+        cord_test_onos_restart(node = standbys[1])
         status = self.verify_cluster_status(onos_instances = onos_instances)
         assert_equal(status, True)
 	log.info('Cluster came up as expected after restarting two members')
@@ -725,7 +747,7 @@
 	for i in range(members):
             member_onos_name = onos_names_ips[standbys[i]]
 	    log.info('Restarting cluster member %s'%standbys[i])
-            cord_test_onos_restart(node = member_onos_name)
+            cord_test_onos_restart(node = standbys[i])
 
         status = self.verify_cluster_status(onos_instances = onos_instances)
         assert_equal(status, True)
@@ -779,7 +801,7 @@
         self.vrouter.setUpClass()
         res = self.vrouter.vrouter_network_verify(networks, peers = 1)
 	assert_equal(res,True)
-        cord_test_onos_shutdown(node = master_onos_name)
+        cord_test_onos_shutdown(node = master)
 	time.sleep(60)
 	log.info('Verifying vrouter traffic after cluster master is down')
 	self.vrouter.vrouter_traffic_verify()
@@ -826,7 +848,7 @@
         time.sleep(15) ## Expecting vrouter should work properly if master of cluster goes down
         self.vrouter.vrouter_traffic_verify(positive_test=False)
 	log.info('Verifying vrouter traffic after master down')
-        cord_test_onos_shutdown(node = master_onos_name)
+        cord_test_onos_shutdown(node = master)
 	time.sleep(60)
 	self.vrouter.vrouter_traffic_verify(positive_test=False)
         self.vrouter.vrouter_activate(deactivate=False)
@@ -843,7 +865,7 @@
         res = self.vrouter.vrouter_network_verify(networks, peers = 1)
         assert_equal(res, True) # Expecting vrouter should work properly
         log.info('Verifying vrouter after cluster member down')
-        cord_test_onos_shutdown(node = member_onos_name)
+        cord_test_onos_shutdown(node = standbys[0])
 	time.sleep(60)
 	self.vrouter.vrouter_traffic_verify()# Expecting vrouter should work properly if member of cluster goes down
 
@@ -858,7 +880,7 @@
         log.info('Verifying vrouter traffic before cluster member restart')
         res = self.vrouter.vrouter_network_verify(networks, peers = 1)
         assert_equal(res, True) # Expecting vrouter should work properly
-        cord_test_onos_restart(node = member_onos_name)
+        cord_test_onos_restart(node = standbys[1])
 	log.info('Verifying vrouter traffic after cluster member restart')
         self.vrouter.vrouter_traffic_verify()# Expecting vrouter should work properly if member of cluster restarts
 
@@ -908,7 +930,7 @@
 
 	for i in [0,1]:
 	    if i == 1:
-                cord_test_onos_shutdown(node = master_onos_name)
+                cord_test_onos_shutdown(node = master)
                 log.info('Verifying flows traffic after master killed')
                 time.sleep(45)
 	    else:
@@ -1228,7 +1250,7 @@
         for i in [0,1]:
             if i == 1:
                 log.info('Killing cluster current master %s'%master)
-                cord_test_onos_shutdown(node = master_onos_name)
+                cord_test_onos_shutdown(node = master)
 		time.sleep(20)
                 status = self.verify_cluster_status(controller=standbys[0],onos_instances=onos_instances-1,verify=True)
 		assert_equal(status, True)
@@ -1626,7 +1648,7 @@
         num_channels = 10
 	for i in [0,1]:
 	    if i == 1:
-                cord_test_onos_shutdown(node = member_onos_name)
+                cord_test_onos_shutdown(node = standbys[0])
 		time.sleep(30)
 		status = self.verify_cluster_status(onos_instances=onos_instances-1,verify=True,controller=master)
                 assert_equal(status, True)
@@ -1655,8 +1677,8 @@
         num_channels = 10
 	for i in [0,1]:
 	    if i == 1:
-                cord_test_onos_shutdown(node = member1_onos_name)
-                cord_test_onos_shutdown(node = member2_onos_name)
+                cord_test_onos_shutdown(node = standbys[0])
+                cord_test_onos_shutdown(node = standbys[1])
 		time.sleep(60)
 		status = self.verify_cluster_status(onos_instances=onos_instances-2)
                 assert_equal(status, True)
diff --git a/src/test/robot/cluster_controller.robot b/src/test/robot/cluster_controller.robot
index ddc867d..d51e923 100644
--- a/src/test/robot/cluster_controller.robot
+++ b/src/test/robot/cluster_controller.robot
@@ -10,10 +10,16 @@
 
 *** Test Cases ***
 Verify Onos Controllers Restart Functionality
-  [Documentation]  Verify ONOS cluster by restarting controllers iteratively
+  [Documentation]  Verify ONOS cluster by restarting controllers
   ${rc}=  Run Cord Tester  cluster:cluster_exchange.test_cluster_controller_restarts
   Should Be Equal As Integers  ${rc}  0
 
+Verify Onos Controllers Graceful Restart Functionality
+  [Documentation]  Verify ONOS cluster by restarting controllers gracefully
+  Cord Setup
+  ${rc}=  Run Cord Tester  cluster:cluster_exchange.test_cluster_graceful_controller_restarts
+  Should Be Equal As Integers  ${rc}  0
+
 Verify Onos Single Controller Restart Functionality
   [Documentation]  Verify ONOS cluster by restarting the same controller
   Cord Setup