[VOL-1349] EPON OLT adapter (package B)
Change-Id: I634ef62c53813dcf4456f54948f13e06358e263c
diff --git a/cmd/openolt-adapter/common.go b/cmd/openolt-adapter/common.go
new file mode 100644
index 0000000..c12b743
--- /dev/null
+++ b/cmd/openolt-adapter/common.go
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2020-present Open Networking Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Package openolt-adapter main Common Logger initialization
+package main
+
+import (
+ "github.com/opencord/voltha-lib-go/v3/pkg/log"
+)
+
+var logger log.CLogger
+
+func init() {
+ // Setup this package so that it's log level can be modified at run time
+ var err error
+ logger, err = log.RegisterPackage(log.JSON, log.ErrorLevel, log.Fields{})
+ if err != nil {
+ panic(err)
+ }
+}
diff --git a/cmd/openolt-adapter/main.go b/cmd/openolt-adapter/main.go
new file mode 100644
index 0000000..19da3d7
--- /dev/null
+++ b/cmd/openolt-adapter/main.go
@@ -0,0 +1,536 @@
+/*
+* Copyright 2018-present Open Networking Foundation
+
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+
+* http://www.apache.org/licenses/LICENSE-2.0
+
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+ */
+
+//Package main invokes the application
+package main
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "os"
+ "os/signal"
+ "syscall"
+ "time"
+
+ "github.com/opencord/voltha-lib-go/v3/pkg/adapters/adapterif"
+
+ "github.com/opencord/voltha-lib-go/v3/pkg/adapters"
+ com "github.com/opencord/voltha-lib-go/v3/pkg/adapters/common"
+ conf "github.com/opencord/voltha-lib-go/v3/pkg/config"
+ "github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore"
+ "github.com/opencord/voltha-lib-go/v3/pkg/kafka"
+ "github.com/opencord/voltha-lib-go/v3/pkg/log"
+ "github.com/opencord/voltha-lib-go/v3/pkg/probe"
+ "github.com/opencord/voltha-lib-go/v3/pkg/version"
+ "github.com/opencord/voltha-openolt-adapter/internal/pkg/config"
+ ac "github.com/opencord/voltha-openolt-adapter/internal/pkg/core"
+ ic "github.com/opencord/voltha-protos/v3/go/inter_container"
+ "github.com/opencord/voltha-protos/v3/go/voltha"
+)
+
+type adapter struct {
+ instanceID string
+ config *config.AdapterFlags
+ iAdapter adapters.IAdapter
+ kafkaClient kafka.Client
+ kvClient kvstore.Client
+ kip kafka.InterContainerProxy
+ coreProxy adapterif.CoreProxy
+ adapterProxy adapterif.AdapterProxy
+ eventProxy adapterif.EventProxy
+ halted bool
+ exitChannel chan int
+ receiverChannels []<-chan *ic.InterContainerMessage
+}
+
+func newAdapter(cf *config.AdapterFlags) *adapter {
+ var a adapter
+ a.instanceID = cf.InstanceID
+ a.config = cf
+ a.halted = false
+ a.exitChannel = make(chan int, 1)
+ a.receiverChannels = make([]<-chan *ic.InterContainerMessage, 0)
+ return &a
+}
+
+func (a *adapter) start(ctx context.Context) {
+ logger.Info(ctx, "Starting Core Adapter components")
+ var err error
+
+ var p *probe.Probe
+ if value := ctx.Value(probe.ProbeContextKey); value != nil {
+ if _, ok := value.(*probe.Probe); ok {
+ p = value.(*probe.Probe)
+ p.RegisterService(
+ ctx,
+ "message-bus",
+ "kv-store",
+ "container-proxy",
+ "core-request-handler",
+ "register-with-core",
+ )
+ }
+ }
+
+ // Setup KV Client
+ logger.Debugw(ctx, "create-kv-client", log.Fields{"kvstore": a.config.KVStoreType})
+ if err = a.setKVClient(ctx); err != nil {
+ logger.Fatalw(ctx, "error-setting-kv-client", log.Fields{"error": err})
+ }
+
+ if p != nil {
+ p.UpdateStatus(ctx, "kv-store", probe.ServiceStatusRunning)
+ }
+
+ // Setup Log Config
+ cm := conf.NewConfigManager(ctx, a.kvClient, a.config.KVStoreType, a.config.KVStoreAddress, a.config.KVStoreTimeout)
+ go conf.StartLogLevelConfigProcessing(cm, ctx)
+ go conf.StartLogFeaturesConfigProcessing(cm, ctx)
+
+ // Setup Kafka Client
+ if a.kafkaClient, err = newKafkaClient(ctx, "sarama", a.config.KafkaAdapterAddress); err != nil {
+ logger.Fatalw(ctx, "Unsupported-common-client", log.Fields{"error": err})
+ }
+
+ if p != nil {
+ p.UpdateStatus(ctx, "message-bus", probe.ServiceStatusRunning)
+ }
+
+ // setup endpointManager
+
+ // Start the common InterContainer Proxy - retries indefinitely
+ if a.kip, err = a.startInterContainerProxy(ctx, -1); err != nil {
+ logger.Fatal(ctx, "error-starting-inter-container-proxy")
+ }
+
+ // Create the core proxy to handle requests to the Core
+ a.coreProxy = com.NewCoreProxy(ctx, a.kip, a.config.Topic, a.config.CoreTopic)
+
+ // Create the adaptor proxy to handle request between olt and onu
+ a.adapterProxy = com.NewAdapterProxy(ctx, a.kip, a.config.CoreTopic, cm.Backend)
+
+ // Create the event proxy to post events to KAFKA
+ a.eventProxy = com.NewEventProxy(com.MsgClient(a.kafkaClient), com.MsgTopic(kafka.Topic{Name: a.config.EventTopic}))
+
+ // Create the open OLT adapter
+ if a.iAdapter, err = a.startOpenOLT(ctx, a.kip, a.coreProxy, a.adapterProxy, a.eventProxy, a.config); err != nil {
+ logger.Fatalw(ctx, "error-starting-openolt", log.Fields{"error": err})
+ }
+
+ // Register the core request handler
+ if err = a.setupRequestHandler(ctx, a.instanceID, a.iAdapter); err != nil {
+ logger.Fatalw(ctx, "error-setting-core-request-handler", log.Fields{"error": err})
+ }
+
+ // Register this adapter to the Core - retries indefinitely
+ if err = a.registerWithCore(ctx, -1); err != nil {
+ logger.Fatal(ctx, "error-registering-with-core")
+ }
+
+ // check the readiness and liveliness and update the probe status
+ a.checkServicesReadiness(ctx)
+}
+
+/**
+This function checks the liveliness and readiness of the kakfa and kv-client services
+and update the status in the probe.
+*/
+func (a *adapter) checkServicesReadiness(ctx context.Context) {
+ // checks the kafka readiness
+ go a.checkKafkaReadiness(ctx)
+
+ // checks the kv-store readiness
+ go a.checkKvStoreReadiness(ctx)
+}
+
+/**
+This function checks the liveliness and readiness of the kv-store service
+and update the status in the probe.
+*/
+func (a *adapter) checkKvStoreReadiness(ctx context.Context) {
+ // dividing the live probe interval by 2 to get updated status every 30s
+ timeout := a.config.LiveProbeInterval / 2
+ kvStoreChannel := make(chan bool, 1)
+
+ // Default false to check the liveliness.
+ kvStoreChannel <- false
+ for {
+ timeoutTimer := time.NewTimer(timeout)
+ select {
+ case liveliness := <-kvStoreChannel:
+ if !liveliness {
+ // kv-store not reachable or down, updating the status to not ready state
+ probe.UpdateStatusFromContext(ctx, "kv-store", probe.ServiceStatusNotReady)
+ timeout = a.config.NotLiveProbeInterval
+ } else {
+ // kv-store is reachable , updating the status to running state
+ probe.UpdateStatusFromContext(ctx, "kv-store", probe.ServiceStatusRunning)
+ timeout = a.config.LiveProbeInterval / 2
+ }
+ // Check if the timer has expired or not
+ if !timeoutTimer.Stop() {
+ <-timeoutTimer.C
+ }
+ case <-timeoutTimer.C:
+ // Check the status of the kv-store. Use timeout of 2 seconds to avoid forever blocking
+ logger.Info(ctx, "kv-store liveliness-recheck")
+ timeoutCtx, cancelFunc := context.WithTimeout(ctx, 2*time.Second)
+
+ kvStoreChannel <- a.kvClient.IsConnectionUp(timeoutCtx)
+ // Cleanup cancel func resources
+ cancelFunc()
+ }
+ }
+}
+
+/**
+This function checks the liveliness and readiness of the kafka service
+and update the status in the probe.
+*/
+func (a *adapter) checkKafkaReadiness(ctx context.Context) {
+ livelinessChannel := a.kafkaClient.EnableLivenessChannel(ctx, true)
+ healthinessChannel := a.kafkaClient.EnableHealthinessChannel(ctx, true)
+ timeout := a.config.LiveProbeInterval
+ failed := false
+ for {
+ timeoutTimer := time.NewTimer(timeout)
+
+ select {
+ case healthiness := <-healthinessChannel:
+ if !healthiness {
+ // This will eventually cause K8s to restart the container, and will do
+ // so in a way that allows cleanup to continue, rather than an immediate
+ // panic and exit here.
+ probe.UpdateStatusFromContext(ctx, "message-bus", probe.ServiceStatusFailed)
+ failed = true
+ }
+ // Check if the timer has expired or not
+ if !timeoutTimer.Stop() {
+ <-timeoutTimer.C
+ }
+ case liveliness := <-livelinessChannel:
+ if failed {
+ // Failures of the message bus are permanent and can't ever be recovered from,
+ // so make sure we never inadvertently reset a failed state back to unready.
+ } else if !liveliness {
+ // kafka not reachable or down, updating the status to not ready state
+ probe.UpdateStatusFromContext(ctx, "message-bus", probe.ServiceStatusNotReady)
+ timeout = a.config.NotLiveProbeInterval
+ } else {
+ // kafka is reachable , updating the status to running state
+ probe.UpdateStatusFromContext(ctx, "message-bus", probe.ServiceStatusRunning)
+ timeout = a.config.LiveProbeInterval
+ }
+ // Check if the timer has expired or not
+ if !timeoutTimer.Stop() {
+ <-timeoutTimer.C
+ }
+ case <-timeoutTimer.C:
+ logger.Info(ctx, "kafka-proxy-liveness-recheck")
+ // send the liveness probe in a goroutine; we don't want to deadlock ourselves as
+ // the liveness probe may wait (and block) writing to our channel.
+ err := a.kafkaClient.SendLiveness(ctx)
+ if err != nil {
+ // Catch possible error case if sending liveness after Sarama has been stopped.
+ logger.Warnw(ctx, "error-kafka-send-liveness", log.Fields{"error": err})
+ }
+ }
+ }
+}
+
+func (a *adapter) stop(ctx context.Context) {
+ // Stop leadership tracking
+ a.halted = true
+
+ // send exit signal
+ a.exitChannel <- 0
+
+ // Cleanup - applies only if we had a kvClient
+ if a.kvClient != nil {
+ // Release all reservations
+ if err := a.kvClient.ReleaseAllReservations(ctx); err != nil {
+ logger.Infow(ctx, "fail-to-release-all-reservations", log.Fields{"error": err})
+ }
+ // Close the DB connection
+ a.kvClient.Close(ctx)
+ }
+
+ if a.kip != nil {
+ a.kip.Stop(ctx)
+ }
+
+ // TODO: More cleanup
+}
+
+func newKVClient(ctx context.Context, storeType, address string, timeout time.Duration) (kvstore.Client, error) {
+
+ logger.Infow(ctx, "kv-store-type", log.Fields{"store": storeType})
+ switch storeType {
+ case "consul":
+ return kvstore.NewConsulClient(ctx, address, timeout)
+ case "etcd":
+ return kvstore.NewEtcdClient(ctx, address, timeout, log.FatalLevel)
+ }
+ return nil, errors.New("unsupported-kv-store")
+}
+
+func newKafkaClient(ctx context.Context, clientType, address string) (kafka.Client, error) {
+
+ logger.Infow(ctx, "common-client-type", log.Fields{"client": clientType})
+ switch clientType {
+ case "sarama":
+ return kafka.NewSaramaClient(
+ kafka.Address(address),
+ kafka.ProducerReturnOnErrors(true),
+ kafka.ProducerReturnOnSuccess(true),
+ kafka.ProducerMaxRetries(6),
+ kafka.ProducerRetryBackoff(time.Millisecond*30),
+ kafka.MetadatMaxRetries(15)), nil
+ }
+
+ return nil, errors.New("unsupported-client-type")
+}
+
+func (a *adapter) setKVClient(ctx context.Context) error {
+ client, err := newKVClient(ctx, a.config.KVStoreType, a.config.KVStoreAddress, a.config.KVStoreTimeout)
+ if err != nil {
+ a.kvClient = nil
+ return err
+ }
+ a.kvClient = client
+
+ return nil
+}
+
+func (a *adapter) startInterContainerProxy(ctx context.Context, retries int) (kafka.InterContainerProxy, error) {
+ logger.Infow(ctx, "starting-intercontainer-messaging-proxy", log.Fields{"address": a.config.KafkaAdapterAddress,
+ "topic": a.config.Topic})
+ var err error
+ kip := kafka.NewInterContainerProxy(
+ kafka.InterContainerAddress(a.config.KafkaAdapterAddress),
+ kafka.MsgClient(a.kafkaClient),
+ kafka.DefaultTopic(&kafka.Topic{Name: a.config.Topic}))
+ count := 0
+ for {
+ if err = kip.Start(ctx); err != nil {
+ logger.Warnw(ctx, "error-starting-messaging-proxy", log.Fields{"error": err})
+ if retries == count {
+ return nil, err
+ }
+ count = +1
+ // Take a nap before retrying
+ time.Sleep(2 * time.Second)
+ } else {
+ break
+ }
+ }
+ probe.UpdateStatusFromContext(ctx, "container-proxy", probe.ServiceStatusRunning)
+ logger.Info(ctx, "common-messaging-proxy-created")
+ return kip, nil
+}
+
+func (a *adapter) startOpenOLT(ctx context.Context, kip kafka.InterContainerProxy,
+ cp adapterif.CoreProxy, ap adapterif.AdapterProxy, ep adapterif.EventProxy,
+ cfg *config.AdapterFlags) (*ac.OpenOLT, error) {
+ logger.Info(ctx, "starting-open-olt")
+ var err error
+ sOLT := ac.NewOpenOLT(ctx, a.kip, cp, ap, ep, cfg)
+
+ if err = sOLT.Start(ctx); err != nil {
+ return nil, err
+ }
+
+ logger.Info(ctx, "open-olt-started")
+ return sOLT, nil
+}
+
+func (a *adapter) setupRequestHandler(ctx context.Context, coreInstanceID string, iadapter adapters.IAdapter) error {
+ logger.Info(ctx, "setting-request-handler")
+ requestProxy := com.NewRequestHandlerProxy(coreInstanceID, iadapter, a.coreProxy)
+ if err := a.kip.SubscribeWithRequestHandlerInterface(ctx, kafka.Topic{Name: a.config.Topic}, requestProxy); err != nil {
+ return err
+
+ }
+ probe.UpdateStatusFromContext(ctx, "core-request-handler", probe.ServiceStatusRunning)
+ logger.Info(ctx, "request-handler-setup-done")
+ return nil
+}
+
+func (a *adapter) registerWithCore(ctx context.Context, retries int) error {
+ adapterID := fmt.Sprintf("eponolt_%d", a.config.CurrentReplica)
+ logger.Infow(ctx, "registering-with-core", log.Fields{
+ "adapterID": adapterID,
+ "currentReplica": a.config.CurrentReplica,
+ "totalReplicas": a.config.TotalReplicas,
+ })
+ adapterDescription := &voltha.Adapter{
+ Id: adapterID, // Unique name for the device type
+ Vendor: "VOLTHA EPON OLT",
+ Version: version.VersionInfo.Version,
+ // TODO once we'll be ready to support multiple versions of the OpenOLT adapter
+ // the Endpoint will have to change to `openolt_<currentReplica`>
+ Endpoint: a.config.Topic,
+ Type: "eponolt",
+ CurrentReplica: int32(a.config.CurrentReplica),
+ TotalReplicas: int32(a.config.TotalReplicas),
+ }
+ types := []*voltha.DeviceType{{
+ Id: "eponolt",
+ Adapter: "eponolt", // Type of the adapter that handles device type
+ AcceptsBulkFlowUpdate: false, // Currently openolt adapter does not support bulk flow handling
+ AcceptsAddRemoveFlowUpdates: true}}
+ deviceTypes := &voltha.DeviceTypes{Items: types}
+ count := 0
+ for {
+ if err := a.coreProxy.RegisterAdapter(log.WithSpanFromContext(context.TODO(), ctx), adapterDescription, deviceTypes); err != nil {
+ logger.Warnw(ctx, "registering-with-core-failed", log.Fields{"error": err})
+ if retries == count {
+ return err
+ }
+ count++
+ // Take a nap before retrying
+ time.Sleep(2 * time.Second)
+ } else {
+ break
+ }
+ }
+ probe.UpdateStatusFromContext(ctx, "register-with-core", probe.ServiceStatusRunning)
+ logger.Info(ctx, "registered-with-core")
+ return nil
+}
+
+func waitForExit(ctx context.Context) int {
+ signalChannel := make(chan os.Signal, 1)
+ signal.Notify(signalChannel,
+ syscall.SIGHUP,
+ syscall.SIGINT,
+ syscall.SIGTERM,
+ syscall.SIGQUIT)
+
+ exitChannel := make(chan int)
+
+ go func() {
+ s := <-signalChannel
+ switch s {
+ case syscall.SIGHUP,
+ syscall.SIGINT,
+ syscall.SIGTERM,
+ syscall.SIGQUIT:
+ logger.Infow(ctx, "closing-signal-received", log.Fields{"signal": s})
+ exitChannel <- 0
+ default:
+ logger.Infow(ctx, "unexpected-signal-received", log.Fields{"signal": s})
+ exitChannel <- 1
+ }
+ }()
+
+ code := <-exitChannel
+ return code
+}
+
+func printBanner() {
+ fmt.Println(` _____ ____ ____ __ _ ____ _ _______ `)
+ fmt.Println(` | ____| _ \ / __ \| \ | |/ __ \| | |__ __| `)
+ fmt.Println(` | |___| |_| | | | | \ | | | | | | | | `)
+ fmt.Println(` | ___| __/| | | | |\ \| | | | | | | | `)
+ fmt.Println(` | |___| | | |__| | | \ | |__| | |____| | `)
+ fmt.Println(` |_____|_| \____/|_| \__|\____/|______|_| `)
+}
+
+
+
+func printVersion() {
+ fmt.Println("VOLTHA OpenOLT Adapter")
+ fmt.Println(version.VersionInfo.String(" "))
+}
+
+func main() {
+ ctx := context.Background()
+ start := time.Now()
+
+ cf := config.NewAdapterFlags()
+ cf.ParseCommandArguments()
+
+ // Setup logging
+
+ logLevel, err := log.StringToLogLevel(cf.LogLevel)
+ if err != nil {
+ logger.Fatalf(ctx, "Cannot setup logging, %s", err)
+ }
+
+ // Setup default logger - applies for packages that do not have specific logger set
+ if _, err := log.SetDefaultLogger(log.JSON, logLevel, log.Fields{"instanceId": cf.InstanceID}); err != nil {
+ logger.With(log.Fields{"error": err}).Fatal(ctx, "Cannot setup logging")
+ }
+
+ // Update all loggers (provisionned via init) with a common field
+ if err := log.UpdateAllLoggers(log.Fields{"instanceId": cf.InstanceID}); err != nil {
+ logger.With(log.Fields{"error": err}).Fatal(ctx, "Cannot setup logging")
+ }
+
+ log.SetAllLogLevel(logLevel)
+
+ realMain()
+
+ defer func() {
+ err := log.CleanUp()
+ if err != nil {
+ logger.Errorw(context.Background(), "unable-to-flush-any-buffered-log-entries", log.Fields{"error": err})
+ }
+ }()
+
+ // Print version / build information and exit
+ if cf.DisplayVersionOnly {
+ printVersion()
+ return
+ }
+
+ // Print banner if specified
+ if cf.Banner {
+ printBanner()
+ }
+
+ logger.Infow(ctx, "config", log.Fields{"config": *cf})
+
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ ad := newAdapter(cf)
+
+ p := &probe.Probe{}
+ go p.ListenAndServe(ctx, ad.config.ProbeAddress)
+
+ probeCtx := context.WithValue(ctx, probe.ProbeContextKey, p)
+
+ closer, err := log.GetGlobalLFM().InitTracingAndLogCorrelation(cf.TraceEnabled, cf.TraceAgentAddress, cf.LogCorrelationEnabled)
+ if err != nil {
+ logger.Warnw(ctx, "unable-to-initialize-tracing-and-log-correlation-module", log.Fields{"error": err})
+ } else {
+ defer log.TerminateTracing(closer)
+ }
+
+ go ad.start(probeCtx)
+
+ code := waitForExit(ctx)
+ logger.Infow(ctx, "received-a-closing-signal", log.Fields{"code": code})
+
+ // Cleanup before leaving
+ ad.stop(ctx)
+
+ elapsed := time.Since(start)
+ logger.Infow(ctx, "run-time", log.Fields{"instanceId": ad.config.InstanceID, "time": elapsed / time.Second})
+}
diff --git a/cmd/openolt-adapter/main_test.go b/cmd/openolt-adapter/main_test.go
new file mode 100644
index 0000000..19e8d5b
--- /dev/null
+++ b/cmd/openolt-adapter/main_test.go
@@ -0,0 +1,228 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package main
+
+import (
+ "context"
+ "errors"
+ "testing"
+ "time"
+
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
+
+ "github.com/opencord/voltha-lib-go/v3/pkg/kafka"
+ "github.com/opencord/voltha-openolt-adapter/internal/pkg/config"
+ "github.com/opencord/voltha-openolt-adapter/pkg/mocks"
+ ca "github.com/opencord/voltha-protos/v3/go/inter_container"
+ "go.etcd.io/etcd/pkg/mock/mockserver"
+)
+
+func newMockAdapter() *adapter {
+ conf := config.NewAdapterFlags()
+ conf.KVStoreType = "etcd"
+ cp := mocks.MockCoreProxy{}
+ ap := mocks.MockAdapterProxy{}
+ ad := newAdapter(conf)
+ ad.coreProxy = &cp
+ ad.adapterProxy = &ap
+ return ad
+}
+func Test_adapter_setKVClient(t *testing.T) {
+ adapt := newMockAdapter()
+ adapt1 := newMockAdapter()
+ adapt1.config.KVStoreType = "consul"
+ adapt2 := newMockAdapter()
+ adapt2.config.KVStoreType = ""
+ a, _ := mockserver.StartMockServers(1)
+ _ = a.StartAt(0)
+ defer a.StopAt(0)
+ tests := []struct {
+ name string
+ clienttype string
+ adapter *adapter
+ wantErr bool
+ }{
+ {"setKVClient", adapt.config.KVStoreType, adapt, false},
+ {"setKVClient", adapt1.config.KVStoreType, adapt1, false},
+ {"setKVClient", adapt2.config.KVStoreType, adapt2, true},
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ if err := tt.adapter.setKVClient(context.Background()); (err != nil) != tt.wantErr {
+ t.Errorf("adapter.setKVClient() error = %v, wantErr %v", err, tt.wantErr)
+ }
+ })
+ }
+}
+
+func Test_adapter_KVClient(t *testing.T) {
+ adapt := newMockAdapter()
+ a, _ := mockserver.StartMockServers(1)
+ _ = a.StartAt(0)
+ defer a.StopAt(0)
+
+ if err := adapt.setKVClient(context.Background()); err != nil {
+ t.Errorf("adapter.setKVClient() error = %v", err)
+ }
+}
+
+func Test_registerWithCore(t *testing.T) {
+ ad := newMockAdapter()
+ ctx := context.TODO()
+ err := ad.registerWithCore(ctx, 1)
+ if err != nil {
+ t.Errorf("Expected error:nil, got error: %v", err)
+ }
+}
+func Test_startInterContainerProxy(t *testing.T) {
+ ad := newMockAdapter()
+ kc := &mockKafkaClient{}
+ ad.kafkaClient = kc
+ ctx := context.TODO()
+ icp, err := ad.startInterContainerProxy(ctx, 1)
+ if icp != nil {
+ t.Log("Intercontainer proxy ", icp)
+ }
+ if err != nil {
+ t.Errorf("err %v", err)
+ }
+}
+
+func Test_startOpenOLT(t *testing.T) {
+ a, _ := mockserver.StartMockServers(1)
+ _ = a.StartAt(0)
+ defer a.StopAt(0)
+
+ ad := newMockAdapter()
+ oolt, err := ad.startOpenOLT(context.TODO(), nil,
+ ad.coreProxy, ad.adapterProxy, ad.eventProxy, ad.config)
+ if oolt != nil {
+ t.Log("Open OLT ", oolt)
+ }
+ if err != nil {
+ t.Errorf("err %v", err)
+ }
+}
+
+func Test_newKafkaClient(t *testing.T) {
+ a, _ := mockserver.StartMockServers(1)
+ _ = a.StartAt(0)
+ defer a.StopAt(0)
+ adapter := newMockAdapter()
+ type args struct {
+ clientType string
+ address string
+ }
+ tests := []struct {
+ name string
+ args args
+ wantErr bool
+ }{
+ // TODO: Add test cases.
+ {"newKafkaClient", args{clientType: "sarama", address: adapter.config.KafkaAdapterAddress}, false},
+ {"newKafkaClient", args{clientType: "sarama", address: adapter.config.KafkaAdapterAddress}, false},
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ _, err := newKafkaClient(context.Background(), tt.args.clientType, tt.args.address)
+ if (err != nil) != tt.wantErr {
+ t.Errorf("newKafkaClient() error = %v, wantErr %v", err, tt.wantErr)
+ return
+ }
+
+ })
+ }
+}
+
+func Test_adapter_setupRequestHandler(t *testing.T) {
+
+ ad := newMockAdapter()
+
+ kip := kafka.NewInterContainerProxy(
+ kafka.InterContainerAddress(ad.config.KafkaAdapterAddress),
+ kafka.MsgClient(&mockKafkaClient{}),
+ kafka.DefaultTopic(&kafka.Topic{Name: ad.config.Topic}))
+
+ ad.kip = kip
+ _ = ad.kip.Start(context.Background())
+
+ oolt, _ := ad.startOpenOLT(context.TODO(), nil,
+ ad.coreProxy, ad.adapterProxy, ad.eventProxy, ad.config)
+ printBanner()
+ printVersion()
+ ctx := context.TODO()
+ if err := ad.setupRequestHandler(ctx, ad.config.InstanceID, oolt); err != nil {
+ t.Logf("adapter.setupRequestHandler() error = %v", err)
+ }
+
+}
+
+// Kafka client mocker
+type mockKafkaClient struct {
+}
+
+func (kc *mockKafkaClient) Start(ctx context.Context) error {
+ return nil
+}
+func (kc *mockKafkaClient) Stop(ctx context.Context) {
+}
+func (kc *mockKafkaClient) CreateTopic(ctx context.Context, topic *kafka.Topic, numPartition int, repFactor int) error {
+ if topic != nil {
+ return nil
+ }
+ return errors.New("invalid Topic")
+}
+func (kc *mockKafkaClient) DeleteTopic(ctx context.Context, topic *kafka.Topic) error {
+ if topic != nil {
+ return nil
+ }
+ return errors.New("invalid Topic")
+}
+func (kc *mockKafkaClient) Subscribe(ctx context.Context, topic *kafka.Topic, kvArgs ...*kafka.KVArg) (<-chan *ca.InterContainerMessage, error) {
+ if topic != nil {
+ ch := make(chan *ca.InterContainerMessage)
+ return ch, nil
+ }
+ return nil, errors.New("invalid Topic")
+}
+func (kc *mockKafkaClient) UnSubscribe(ctx context.Context, topic *kafka.Topic, ch <-chan *ca.InterContainerMessage) error {
+ if topic == nil {
+ return nil
+ }
+ return errors.New("invalid Topic")
+}
+func (kc *mockKafkaClient) Send(ctx context.Context, msg interface{}, topic *kafka.Topic, keys ...string) error {
+ if topic != nil {
+ return nil
+ }
+ return errors.New("invalid topic")
+}
+
+func (kc *mockKafkaClient) SendLiveness(ctx context.Context) error {
+ return status.Error(codes.Unimplemented, "SendLiveness")
+}
+
+func (kc *mockKafkaClient) EnableLivenessChannel(ctx context.Context, enable bool) chan bool {
+ return nil
+}
+
+func (kc *mockKafkaClient) EnableHealthinessChannel(ctx context.Context, enable bool) chan bool {
+ return nil
+}
+
+func (kc *mockKafkaClient) SubscribeForMetadata(context.Context, func(fromTopic string, timestamp time.Time)) {
+}
diff --git a/cmd/openolt-adapter/profile.go b/cmd/openolt-adapter/profile.go
new file mode 100644
index 0000000..f0b8e1f
--- /dev/null
+++ b/cmd/openolt-adapter/profile.go
@@ -0,0 +1,32 @@
+// +build profile
+
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package main
+
+import (
+ "context"
+ "net/http"
+ _ "net/http/pprof"
+)
+
+func realMain() {
+ go func() {
+ logger.Fatal(context.Background(), http.ListenAndServe("0.0.0.0:6060", nil))
+ }()
+
+}
diff --git a/cmd/openolt-adapter/release.go b/cmd/openolt-adapter/release.go
new file mode 100644
index 0000000..0f18e72
--- /dev/null
+++ b/cmd/openolt-adapter/release.go
@@ -0,0 +1,26 @@
+// +build !profile
+
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//Package main invokes the application
+package main
+
+import "context"
+
+func realMain() {
+ logger.Infoln(context.Background(), "NOT PROFILING")
+}