Fixing issue of failing test case in case of consecutive device delete

Change-Id: I3b43002587929b6c8840314b58e8e02b4b259efd
diff --git a/VERSION b/VERSION
index 6886b8a..dc3c5b9 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-2.7.0-dev
+2.7.1-dev
diff --git a/rw_core/core/api/grpc_nbi_handler_test.go b/rw_core/core/api/grpc_nbi_handler_test.go
index 8766c7c..ff6ff0b 100755
--- a/rw_core/core/api/grpc_nbi_handler_test.go
+++ b/rw_core/core/api/grpc_nbi_handler_test.go
@@ -544,7 +544,12 @@
 
 	//Ensure there are devices in the Core as delete was failed - wait until condition satisfied or timeout
 	var vFunction1 isDevicesConditionSatisfied = func(devices *voltha.Devices) bool {
-		return devices != nil && len(devices.Items) == (nb.numONUPerOLT+1)
+		state, err := nbi.GetTransientState(getContext(), oltDevice.Id)
+		if err != nil {
+			return false
+		}
+		return devices != nil && len(devices.Items) == (nb.numONUPerOLT+1) &&
+			state == voltha.DeviceTransientState_DELETE_FAILED
 	}
 	err = waitUntilConditionForDevices(nb.maxTimeout, nbi, vFunction1)
 	assert.Nil(t, err)
diff --git a/rw_core/core/device/manager.go b/rw_core/core/device/manager.go
index cbc6bb9..461a0b8 100755
--- a/rw_core/core/device/manager.go
+++ b/rw_core/core/device/manager.go
@@ -1649,3 +1649,11 @@
 	//TODO Instead of directly sending to the kafka bus, queue the message and send it asynchronously
 	dMgr.RPCEventManager.SendRPCEvent(ctx, id, rpcEvent, category, subCategory, raisedTs)
 }
+
+func (dMgr *Manager) GetTransientState(ctx context.Context, id string) (voltha.DeviceTransientState_Types, error) {
+	agent := dMgr.getDeviceAgent(ctx, id)
+	if agent == nil {
+		return voltha.DeviceTransientState_NONE, status.Errorf(codes.NotFound, "%s", id)
+	}
+	return agent.getTransientState(), nil
+}