[VOL-3069]Pass Context down the execution call hierarchy across voltha codebase

Change-Id: I97a2630d9a4fe5dc3161113539edda476534f186
diff --git a/rw_core/utils/request_queue.go b/rw_core/utils/request_queue.go
index 2c95e23..1e92690 100644
--- a/rw_core/utils/request_queue.go
+++ b/rw_core/utils/request_queue.go
@@ -36,7 +36,7 @@
 }
 
 // NewRequestQueue creates a new request queue
-func NewRequestQueue() *RequestQueue {
+func NewRequestQueue(ctx context.Context) *RequestQueue {
 	ch := make(chan struct{})
 	close(ch) // assume the "current" request is already complete
 	return &RequestQueue{lastCompleteCh: ch}
@@ -72,7 +72,7 @@
 			// chan has been closed, so the lock has been acquired
 			// context is canceled, so just release the lock immediately
 			rq.current = r
-			rq.releaseWithoutLock()
+			rq.releaseWithoutLock(ctx)
 		default:
 			// on abort, skip our position in the queue
 			r.prev.notifyOnComplete = r.notifyOnComplete
@@ -96,14 +96,14 @@
 
 // RequestComplete must be invoked by a process when it completes processing the request.  That process must have
 // invoked WaitForGreenLight() before.
-func (rq *RequestQueue) RequestComplete() {
+func (rq *RequestQueue) RequestComplete(ctx context.Context) {
 	rq.mutex.Lock()
 	defer rq.mutex.Unlock()
 
-	rq.releaseWithoutLock()
+	rq.releaseWithoutLock(ctx)
 }
 
-func (rq *RequestQueue) releaseWithoutLock() {
+func (rq *RequestQueue) releaseWithoutLock(ctx context.Context) {
 	// Notify the next waiting request.  This will panic if the lock is released more than once.
 	close(rq.current.notifyOnComplete)