VOL-3734 adding rpc events to the queue and sending to kafka from queue
Change-Id: I7a220fd3c7af0312ad20d4a15c0162b5c77f2044
diff --git a/pkg/events/events_proxy.go b/pkg/events/events_proxy.go
index ebd48ab..910fec3 100644
--- a/pkg/events/events_proxy.go
+++ b/pkg/events/events_proxy.go
@@ -17,11 +17,13 @@
package events
import (
+ "container/ring"
"context"
"errors"
"fmt"
"strconv"
"strings"
+ "sync"
"time"
"github.com/golang/protobuf/ptypes"
@@ -31,9 +33,17 @@
"github.com/opencord/voltha-protos/v4/go/voltha"
)
+// TODO: Make configurable through helm chart
+const EVENT_THRESHOLD = 1000
+
+type lastEvent struct{}
+
type EventProxy struct {
- kafkaClient kafka.Client
- eventTopic kafka.Topic
+ kafkaClient kafka.Client
+ eventTopic kafka.Topic
+ eventQueue *EventQueue
+ queueCtx context.Context
+ queueCancelCtx context.CancelFunc
}
func NewEventProxy(opts ...EventProxyOption) *EventProxy {
@@ -41,6 +51,8 @@
for _, option := range opts {
option(&proxy)
}
+ proxy.eventQueue = newEventQueue()
+ proxy.queueCtx, proxy.queueCancelCtx = context.WithCancel(context.Background())
return &proxy
}
@@ -112,15 +124,7 @@
return err
}
event.EventType = &voltha.Event_RpcEvent{RpcEvent: rpcEvent}
- if err := ep.sendEvent(ctx, &event); err != nil {
- logger.Errorw(ctx, "Failed to send rpc event to KAFKA bus", log.Fields{"rpc-event": rpcEvent})
- return err
- }
- logger.Debugw(ctx, "Successfully sent RPC event to KAFKA bus", log.Fields{"Id": event.Header.Id, "Category": event.Header.Category,
- "SubCategory": event.Header.SubCategory, "Type": event.Header.Type, "TypeVersion": event.Header.TypeVersion,
- "ReportedTs": event.Header.ReportedTs, "ResourceId": rpcEvent.ResourceId, "Context": rpcEvent.Context,
- "RPCEventName": id})
-
+ ep.eventQueue.push(&event)
return nil
}
@@ -195,3 +199,155 @@
func (ep *EventProxy) SendLiveness(ctx context.Context) error {
return ep.kafkaClient.SendLiveness(ctx)
}
+
+// Start the event proxy
+func (ep *EventProxy) Start() {
+ eq := ep.eventQueue
+ go eq.start(ep.queueCtx)
+ logger.Debugw(context.Background(), "event-proxy-starting...", log.Fields{"events-threashold": EVENT_THRESHOLD})
+ for {
+ // Notify the queue I am ready
+ eq.readyToSendToKafkaCh <- struct{}{}
+ // Wait for an event
+ elem, ok := <-eq.eventChannel
+ if !ok {
+ logger.Debug(context.Background(), "event-channel-closed-exiting")
+ break
+ }
+ // Check for last event
+ if _, ok := elem.(*lastEvent); ok {
+ // close the queuing loop
+ logger.Info(context.Background(), "received-last-event")
+ ep.queueCancelCtx()
+ break
+ }
+ ctx := context.Background()
+ event, ok := elem.(*voltha.Event)
+ if !ok {
+ logger.Warnw(ctx, "invalid-event", log.Fields{"element": elem})
+ continue
+ }
+ if err := ep.sendEvent(ctx, event); err != nil {
+ logger.Errorw(ctx, "failed-to-send-event-to-kafka-bus", log.Fields{"event": event})
+ } else {
+ logger.Debugw(ctx, "successfully-sent-rpc-event-to-kafka-bus", log.Fields{"id": event.Header.Id, "category": event.Header.Category,
+ "sub-category": event.Header.SubCategory, "type": event.Header.Type, "type-version": event.Header.TypeVersion,
+ "reported-ts": event.Header.ReportedTs, "event-type": event.EventType})
+ }
+ }
+}
+
+func (ep *EventProxy) Stop() {
+ ep.eventQueue.stop()
+}
+
+type EventQueue struct {
+ mutex sync.RWMutex
+ eventChannel chan interface{}
+ insertPosition *ring.Ring
+ popPosition *ring.Ring
+ dataToSendAvailable chan struct{}
+ readyToSendToKafkaCh chan struct{}
+ eventQueueStopped chan struct{}
+}
+
+func newEventQueue() *EventQueue {
+ ev := &EventQueue{
+ eventChannel: make(chan interface{}),
+ insertPosition: ring.New(EVENT_THRESHOLD),
+ dataToSendAvailable: make(chan struct{}),
+ readyToSendToKafkaCh: make(chan struct{}),
+ eventQueueStopped: make(chan struct{}),
+ }
+ ev.popPosition = ev.insertPosition
+ return ev
+}
+
+// push is invoked to push an event at the back of a queue
+func (eq *EventQueue) push(event interface{}) {
+ eq.mutex.Lock()
+
+ if eq.insertPosition != nil {
+ // Handle Queue is full.
+ // TODO: Current default is to overwrite old data if queue is full. Is there a need to
+ // block caller if max threshold is reached?
+ if eq.insertPosition.Value != nil && eq.insertPosition == eq.popPosition {
+ eq.popPosition = eq.popPosition.Next()
+ }
+
+ // Insert data and move pointer to next empty position
+ eq.insertPosition.Value = event
+ eq.insertPosition = eq.insertPosition.Next()
+
+ // Check for last event
+ if _, ok := event.(*lastEvent); ok {
+ eq.insertPosition = nil
+ }
+ eq.mutex.Unlock()
+ // Notify waiting thread of data availability
+ eq.dataToSendAvailable <- struct{}{}
+
+ } else {
+ logger.Debug(context.Background(), "event-queue-is-closed-as-insert-position-is-cleared")
+ eq.mutex.Unlock()
+ }
+}
+
+// start starts the routine that extracts an element from the event queue and
+// send it to the kafka sending routine to process.
+func (eq *EventQueue) start(ctx context.Context) {
+ logger.Info(ctx, "starting-event-queue")
+loop:
+ for {
+ select {
+ case <-eq.dataToSendAvailable:
+ // Do nothing - use to prevent caller pushing data to block
+ case <-eq.readyToSendToKafkaCh:
+ {
+ // Kafka sending routine is ready to process an event
+ eq.mutex.Lock()
+ element := eq.popPosition.Value
+ if element == nil {
+ // No events to send. Wait
+ eq.mutex.Unlock()
+ select {
+ case _, ok := <-eq.dataToSendAvailable:
+ if !ok {
+ // channel closed
+ eq.eventQueueStopped <- struct{}{}
+ return
+ }
+ case <-ctx.Done():
+ logger.Info(ctx, "event-queue-context-done")
+ eq.eventQueueStopped <- struct{}{}
+ return
+ }
+ eq.mutex.Lock()
+ element = eq.popPosition.Value
+ }
+ eq.popPosition.Value = nil
+ eq.popPosition = eq.popPosition.Next()
+ eq.mutex.Unlock()
+ eq.eventChannel <- element
+ }
+ case <-ctx.Done():
+ logger.Info(ctx, "event-queue-context-done")
+ eq.eventQueueStopped <- struct{}{}
+ break loop
+ }
+ }
+ logger.Info(ctx, "event-queue-stopped")
+
+}
+
+func (eq *EventQueue) stop() {
+ // Flush all
+ eq.push(&lastEvent{})
+ <-eq.eventQueueStopped
+ eq.mutex.Lock()
+ close(eq.readyToSendToKafkaCh)
+ close(eq.dataToSendAvailable)
+ close(eq.eventChannel)
+ eq.mutex.Unlock()
+
+}
diff --git a/pkg/events/events_proxy_test.go b/pkg/events/events_proxy_test.go
new file mode 100644
index 0000000..119df28
--- /dev/null
+++ b/pkg/events/events_proxy_test.go
@@ -0,0 +1,180 @@
+/*
+ * Copyright 2020-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package events
+
+import (
+ "context"
+ "github.com/golang/protobuf/ptypes"
+ "github.com/opencord/voltha-lib-go/v4/pkg/kafka"
+ "github.com/opencord/voltha-lib-go/v4/pkg/log"
+ mock_kafka "github.com/opencord/voltha-lib-go/v4/pkg/mocks/kafka"
+ "github.com/opencord/voltha-protos/v4/go/common"
+ "github.com/opencord/voltha-protos/v4/go/voltha"
+ "github.com/stretchr/testify/assert"
+ "strconv"
+ "testing"
+ "time"
+)
+
+const waitForKafkaEventsTimeout = 20 * time.Second
+const waitForEventProxyTimeout = 10 * time.Second
+
+func waitForEventProxyToStop(ep *EventProxy, resp chan string) {
+ timer := time.NewTimer(waitForEventProxyTimeout)
+ defer timer.Stop()
+ for {
+ select {
+ case <-time.After(2 * time.Millisecond):
+ if ep.eventQueue.insertPosition == nil {
+ resp <- "ok"
+ return
+ }
+ case <-timer.C:
+ resp <- "timer expired"
+ return
+ }
+
+ }
+}
+func waitForKafkaEvents(kc kafka.Client, topic *kafka.Topic, numEvents int, resp chan string) {
+ kafkaChnl, err := kc.Subscribe(context.Background(), topic)
+ if err != nil {
+ resp <- err.Error()
+ return
+ }
+ defer func() {
+ if kafkaChnl != nil {
+ if err = kc.UnSubscribe(context.Background(), topic, kafkaChnl); err != nil {
+ logger.Errorw(context.Background(), "unsubscribe-failed", log.Fields{"error": err})
+ }
+ }
+ }()
+ timer := time.NewTimer(waitForKafkaEventsTimeout)
+ defer timer.Stop()
+ count := 0
+loop:
+ for {
+ select {
+ case msg := <-kafkaChnl:
+ if msg.Body != nil {
+ event := voltha.Event{}
+ if err := ptypes.UnmarshalAny(msg.Body, &event); err == nil {
+ count += 1
+ if count == numEvents {
+ resp <- "ok"
+ break loop
+ }
+ }
+ }
+ case <-timer.C:
+ resp <- "timer expired"
+ break loop
+ }
+ }
+}
+
+func createAndSendEvent(proxy *EventProxy, ID string) error {
+ eventMsg := &voltha.RPCEvent{
+ Rpc: "dummy",
+ OperationId: ID,
+ ResourceId: "dummy",
+ Service: "dummy",
+ StackId: "dummy",
+ Status: &common.OperationResp{
+ Code: common.OperationResp_OPERATION_FAILURE,
+ },
+ Description: "dummy",
+ Context: nil,
+ }
+ var event voltha.Event
+ raisedTS := time.Now().Unix()
+ event.Header, _ = proxy.getEventHeader("RPC_ERROR_RAISE_EVENT", voltha.EventCategory_COMMUNICATION, nil, voltha.EventType_RPC_EVENT, raisedTS)
+ event.EventType = &voltha.Event_RpcEvent{RpcEvent: eventMsg}
+ err := proxy.SendRPCEvent(context.Background(), "RPC_ERROR_RAISE_EVENT", eventMsg, voltha.EventCategory_COMMUNICATION,
+ nil, time.Now().Unix())
+ return err
+}
+
+func TestEventProxyReceiveAndSendMessage(t *testing.T) {
+ // Init Kafka client
+ log.SetAllLogLevel(log.FatalLevel)
+ cTkc := mock_kafka.NewKafkaClient()
+ topic := kafka.Topic{Name: "myTopic"}
+
+ numEvents := 10
+ resp := make(chan string)
+ go waitForKafkaEvents(cTkc, &topic, numEvents, resp)
+
+ // Init Event Proxy
+ ep := NewEventProxy(MsgClient(cTkc), MsgTopic(topic))
+ go ep.Start()
+ time.Sleep(1 * time.Millisecond)
+ for i := 0; i < numEvents; i++ {
+ go func(ID int) {
+ err := createAndSendEvent(ep, strconv.Itoa(ID))
+ assert.Nil(t, err)
+ }(i)
+ }
+ val := <-resp
+ assert.Equal(t, val, "ok")
+ go ep.Stop()
+ go waitForEventProxyToStop(ep, resp)
+ val = <-resp
+ assert.Equal(t, val, "ok")
+}
+
+func TestEventProxyStopWhileSendingEvents(t *testing.T) {
+ // Init Kafka client
+ log.SetAllLogLevel(log.FatalLevel)
+ cTkc := mock_kafka.NewKafkaClient()
+ topic := kafka.Topic{Name: "myTopic"}
+
+ numEvents := 10
+ resp := make(chan string)
+ // Init Event Proxy
+ ep := NewEventProxy(MsgClient(cTkc), MsgTopic(topic))
+ go ep.Start()
+ time.Sleep(1 * time.Millisecond)
+ for i := 0; i < numEvents; i++ {
+ go func(ID int) {
+ err := createAndSendEvent(ep, strconv.Itoa(ID))
+ assert.Nil(t, err)
+ }(i)
+ }
+ // In this case we cannot guarantee how many events are send before
+ // sending the last event(stopping event proxy), any event send before Stop would be received.
+ go ep.Stop()
+ go waitForEventProxyToStop(ep, resp)
+ val := <-resp
+ assert.Equal(t, val, "ok")
+}
+
+func TestEventProxyStopWhenNoEventsSend(t *testing.T) {
+ // Init Kafka client
+ log.SetAllLogLevel(log.FatalLevel)
+ cTkc := mock_kafka.NewKafkaClient()
+ topic := kafka.Topic{Name: "myTopic"}
+ resp := make(chan string)
+ // Init Event Proxy
+ ep := NewEventProxy(MsgClient(cTkc), MsgTopic(topic))
+ go ep.Start()
+ time.Sleep(1 * time.Millisecond)
+ go ep.Stop()
+ go waitForEventProxyToStop(ep, resp)
+ val := <-resp
+ assert.Equal(t, val, "ok")
+}