VOL-4077: Improve storage usage on etcd
- Do away with unnecessary data storage on etcd if it can be
reconciled on adapter restart
- For data that needs storage, use lesser footprint if possible
- Use write-through-cache for all data stored on etcd via
resource manager module
- Use ResourceManager module per interface to localize lock
contention per PON port
Change-Id: I21d38216fab195d738a446b3f96a00251569e38b
diff --git a/.golangci.yml b/.golangci.yml
index 68c70c2..b52edd7 100644
--- a/.golangci.yml
+++ b/.golangci.yml
@@ -90,4 +90,5 @@
- "don't use underscores in Go names; method Start_omci_test"
- "don't use underscores in Go names; method Get_ext_value"
- "don't use underscores in Go names; method Single_get_value_request"
+ - "don't use underscores in Go names; method Process_tech_profile_instance_request"
exclude-use-default: false
diff --git a/Makefile b/Makefile
index d8d09d1..1f1d7d3 100644
--- a/Makefile
+++ b/Makefile
@@ -71,8 +71,8 @@
local-lib-go: ## Copies a local version of the voltha-lib-go dependency into the vendor directory
ifdef LOCAL_LIB_GO
- mkdir -p vendor/github.com/opencord/voltha-lib-go/v4/pkg
- cp -r ${LOCAL_LIB_GO}/pkg/* vendor/github.com/opencord/voltha-lib-go/v4/pkg/
+ mkdir -p vendor/github.com/opencord/voltha-lib-go/v5/pkg
+ cp -r ${LOCAL_LIB_GO}/pkg/* vendor/github.com/opencord/voltha-lib-go/v5/pkg/
endif
## Docker targets
diff --git a/VERSION b/VERSION
index 6cb9d3d..1545d96 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-3.4.3
+3.5.0
diff --git a/cmd/openolt-adapter/common.go b/cmd/openolt-adapter/common.go
index 5093658..eae795d 100644
--- a/cmd/openolt-adapter/common.go
+++ b/cmd/openolt-adapter/common.go
@@ -18,7 +18,7 @@
package main
import (
- "github.com/opencord/voltha-lib-go/v4/pkg/log"
+ "github.com/opencord/voltha-lib-go/v5/pkg/log"
)
var logger log.CLogger
diff --git a/cmd/openolt-adapter/main.go b/cmd/openolt-adapter/main.go
index 74ef37a..dadb2a1 100644
--- a/cmd/openolt-adapter/main.go
+++ b/cmd/openolt-adapter/main.go
@@ -26,17 +26,17 @@
"syscall"
"time"
- "github.com/opencord/voltha-lib-go/v4/pkg/adapters"
- "github.com/opencord/voltha-lib-go/v4/pkg/adapters/adapterif"
- com "github.com/opencord/voltha-lib-go/v4/pkg/adapters/common"
- conf "github.com/opencord/voltha-lib-go/v4/pkg/config"
- "github.com/opencord/voltha-lib-go/v4/pkg/db/kvstore"
- "github.com/opencord/voltha-lib-go/v4/pkg/events"
- "github.com/opencord/voltha-lib-go/v4/pkg/events/eventif"
- "github.com/opencord/voltha-lib-go/v4/pkg/kafka"
- "github.com/opencord/voltha-lib-go/v4/pkg/log"
- "github.com/opencord/voltha-lib-go/v4/pkg/probe"
- "github.com/opencord/voltha-lib-go/v4/pkg/version"
+ "github.com/opencord/voltha-lib-go/v5/pkg/adapters"
+ "github.com/opencord/voltha-lib-go/v5/pkg/adapters/adapterif"
+ com "github.com/opencord/voltha-lib-go/v5/pkg/adapters/common"
+ conf "github.com/opencord/voltha-lib-go/v5/pkg/config"
+ "github.com/opencord/voltha-lib-go/v5/pkg/db/kvstore"
+ "github.com/opencord/voltha-lib-go/v5/pkg/events"
+ "github.com/opencord/voltha-lib-go/v5/pkg/events/eventif"
+ "github.com/opencord/voltha-lib-go/v5/pkg/kafka"
+ "github.com/opencord/voltha-lib-go/v5/pkg/log"
+ "github.com/opencord/voltha-lib-go/v5/pkg/probe"
+ "github.com/opencord/voltha-lib-go/v5/pkg/version"
"github.com/opencord/voltha-openolt-adapter/internal/pkg/config"
ac "github.com/opencord/voltha-openolt-adapter/internal/pkg/core"
ic "github.com/opencord/voltha-protos/v4/go/inter_container"
diff --git a/cmd/openolt-adapter/main_test.go b/cmd/openolt-adapter/main_test.go
index 946abd8..4da39b7 100644
--- a/cmd/openolt-adapter/main_test.go
+++ b/cmd/openolt-adapter/main_test.go
@@ -21,12 +21,12 @@
"testing"
"time"
- conf "github.com/opencord/voltha-lib-go/v4/pkg/config"
+ conf "github.com/opencord/voltha-lib-go/v5/pkg/config"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
- "github.com/opencord/voltha-lib-go/v4/pkg/kafka"
+ "github.com/opencord/voltha-lib-go/v5/pkg/kafka"
"github.com/opencord/voltha-openolt-adapter/internal/pkg/config"
"github.com/opencord/voltha-openolt-adapter/pkg/mocks"
ca "github.com/opencord/voltha-protos/v4/go/inter_container"
diff --git a/docker/Dockerfile.openolt b/docker/Dockerfile.openolt
index 18822f2..27cfab3 100644
--- a/docker/Dockerfile.openolt
+++ b/docker/Dockerfile.openolt
@@ -40,13 +40,13 @@
RUN \
CGO_ENABLED=$CGO_PARAMETER go build $EXTRA_GO_BUILD_TAGS -mod=vendor -o /app/openolt \
-ldflags \
-"-X github.com/opencord/voltha-lib-go/v4/pkg/version.version=$org_label_schema_version \
--X github.com/opencord/voltha-lib-go/v4/pkg/version.vcsRef=$org_label_schema_vcs_ref \
--X github.com/opencord/voltha-lib-go/v4/pkg/version.vcsDirty=$org_opencord_vcs_dirty \
--X github.com/opencord/voltha-lib-go/v4/pkg/version.goVersion=$(go version 2>&1 | sed -E 's/.*go([0-9]+\.[0-9]+\.[0-9]+).*/\1/g') \
--X github.com/opencord/voltha-lib-go/v4/pkg/version.os=$(go env GOHOSTOS) \
--X github.com/opencord/voltha-lib-go/v4/pkg/version.arch=$(go env GOHOSTARCH) \
--X github.com/opencord/voltha-lib-go/v4/pkg/version.buildTime=$org_label_schema_build_date" \
+"-X github.com/opencord/voltha-lib-go/v5/pkg/version.version=$org_label_schema_version \
+-X github.com/opencord/voltha-lib-go/v5/pkg/version.vcsRef=$org_label_schema_vcs_ref \
+-X github.com/opencord/voltha-lib-go/v5/pkg/version.vcsDirty=$org_opencord_vcs_dirty \
+-X github.com/opencord/voltha-lib-go/v5/pkg/version.goVersion=$(go version 2>&1 | sed -E 's/.*go([0-9]+\.[0-9]+\.[0-9]+).*/\1/g') \
+-X github.com/opencord/voltha-lib-go/v5/pkg/version.os=$(go env GOHOSTOS) \
+-X github.com/opencord/voltha-lib-go/v5/pkg/version.arch=$(go env GOHOSTARCH) \
+-X github.com/opencord/voltha-lib-go/v5/pkg/version.buildTime=$org_label_schema_build_date" \
./cmd/openolt-adapter/
WORKDIR /app
diff --git a/go.mod b/go.mod
index 575b4e0..690798a 100644
--- a/go.mod
+++ b/go.mod
@@ -7,8 +7,8 @@
github.com/gogo/protobuf v1.3.1
github.com/golang/protobuf v1.3.2
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4
- github.com/opencord/voltha-lib-go/v4 v4.3.5
- github.com/opencord/voltha-protos/v4 v4.1.9
+ github.com/opencord/voltha-lib-go/v5 v5.0.2
+ github.com/opencord/voltha-protos/v4 v4.2.0
go.etcd.io/etcd v0.0.0-20190930204107-236ac2a90522
google.golang.org/grpc v1.25.1
)
diff --git a/go.sum b/go.sum
index db694c4..557341b 100644
--- a/go.sum
+++ b/go.sum
@@ -141,11 +141,10 @@
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/gomega v1.4.2 h1:3mYCb7aPxS/RU7TI1y4rkEn1oKmPRjNJLNEXgw7MH2I=
github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
-github.com/opencord/voltha-lib-go/v4 v4.3.5 h1:7OcAW2B5qpR6yh7c5GqcwlLkSbTfXYhZSsHSFkDBYNw=
-github.com/opencord/voltha-lib-go/v4 v4.3.5/go.mod h1:x0a7TxyzxPFaiewkbFiuy0+ftX5w4zeCRlFyyGZ4hhw=
-github.com/opencord/voltha-protos/v4 v4.1.2/go.mod h1:W/OIFIyvFh/C0vchRUuarIsMylEhzCRM9pNxLvkPtKc=
-github.com/opencord/voltha-protos/v4 v4.1.9 h1:tyXaYSpfPyWWa8szFSJxRwlaTK7U/5+MWpcTqGfDdek=
-github.com/opencord/voltha-protos/v4 v4.1.9/go.mod h1:W/OIFIyvFh/C0vchRUuarIsMylEhzCRM9pNxLvkPtKc=
+github.com/opencord/voltha-lib-go/v5 v5.0.2 h1:nLs42QM75BhKt4eXLdHhQwRPLrI2V2BjWJJlzGMUixg=
+github.com/opencord/voltha-lib-go/v5 v5.0.2/go.mod h1:i1fwPMicFccG38L200+IQAlfHSbszWg//jF1pDQxTPQ=
+github.com/opencord/voltha-protos/v4 v4.2.0 h1:QJZqHPRKa1E1xh40F3UA4xSjBI+6EmW7OfIcJqPNc4A=
+github.com/opencord/voltha-protos/v4 v4.2.0/go.mod h1:wNzWqmTwe7+DbYbpmOX6eMlglREtMkNxIDv3lyI2bco=
github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU=
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2 h1:JhzVVoYvbOACxoUmOs6V/G4D5nPVUW73rKvXxP4XUJc=
diff --git a/internal/pkg/core/common.go b/internal/pkg/core/common.go
index f959b7f..43f0047 100644
--- a/internal/pkg/core/common.go
+++ b/internal/pkg/core/common.go
@@ -18,7 +18,7 @@
package core
import (
- "github.com/opencord/voltha-lib-go/v4/pkg/log"
+ "github.com/opencord/voltha-lib-go/v5/pkg/log"
)
var logger log.CLogger
diff --git a/internal/pkg/core/device_handler.go b/internal/pkg/core/device_handler.go
index 7f2b53e..4243a26 100644
--- a/internal/pkg/core/device_handler.go
+++ b/internal/pkg/core/device_handler.go
@@ -35,12 +35,12 @@
"github.com/golang/protobuf/ptypes"
grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware"
grpc_opentracing "github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing"
- "github.com/opencord/voltha-lib-go/v4/pkg/adapters/adapterif"
- "github.com/opencord/voltha-lib-go/v4/pkg/config"
- "github.com/opencord/voltha-lib-go/v4/pkg/events/eventif"
- flow_utils "github.com/opencord/voltha-lib-go/v4/pkg/flows"
- "github.com/opencord/voltha-lib-go/v4/pkg/log"
- "github.com/opencord/voltha-lib-go/v4/pkg/pmmetrics"
+ "github.com/opencord/voltha-lib-go/v5/pkg/adapters/adapterif"
+ "github.com/opencord/voltha-lib-go/v5/pkg/config"
+ "github.com/opencord/voltha-lib-go/v5/pkg/events/eventif"
+ flow_utils "github.com/opencord/voltha-lib-go/v5/pkg/flows"
+ "github.com/opencord/voltha-lib-go/v5/pkg/log"
+ "github.com/opencord/voltha-lib-go/v5/pkg/pmmetrics"
"github.com/opencord/voltha-openolt-adapter/internal/pkg/olterrors"
rsrcMgr "github.com/opencord/voltha-openolt-adapter/internal/pkg/resourcemanager"
@@ -82,7 +82,9 @@
flowMgr []*OpenOltFlowMgr
groupMgr *OpenOltGroupMgr
eventMgr *OpenOltEventMgr
- resourceMgr *rsrcMgr.OpenOltResourceMgr
+ resourceMgr []*rsrcMgr.OpenOltResourceMgr
+
+ deviceInfo *oop.DeviceInfo
discOnus sync.Map
onus sync.Map
@@ -528,9 +530,6 @@
_ = olterrors.NewErrAdapter("handle-indication-error", log.Fields{"type": "interface-oper-nni", "device-id": dh.device.Id}, err).Log()
}
}()
- if err := dh.resourceMgr.AddNNIToKVStore(ctx, intfOperInd.GetIntfId()); err != nil {
- logger.Warn(ctx, err)
- }
} else if intfOperInd.GetType() == "pon" {
// TODO: Check what needs to be handled here for When PON PORT down, ONU will be down
// Handle pon port update
@@ -826,29 +825,35 @@
}
func (dh *DeviceHandler) initializeDeviceHandlerModules(ctx context.Context) error {
- deviceInfo, err := dh.populateDeviceInfo(ctx)
+ var err error
+ dh.deviceInfo, err = dh.populateDeviceInfo(ctx)
if err != nil {
return olterrors.NewErrAdapter("populate-device-info-failed", log.Fields{"device-id": dh.device.Id}, err)
}
- dh.totalPonPorts = deviceInfo.GetPonPorts()
- dh.agentPreviouslyConnected = deviceInfo.PreviouslyConnected
+ dh.totalPonPorts = dh.deviceInfo.GetPonPorts()
+ dh.agentPreviouslyConnected = dh.deviceInfo.PreviouslyConnected
- // Instantiate resource manager
- if dh.resourceMgr = rsrcMgr.NewResourceMgr(ctx, dh.device.Id, dh.openOLT.KVStoreAddress, dh.openOLT.KVStoreType, dh.device.Type, deviceInfo, dh.cm.Backend.PathPrefix); dh.resourceMgr == nil {
- return olterrors.ErrResourceManagerInstantiating
- }
-
- dh.groupMgr = NewGroupManager(ctx, dh, dh.resourceMgr)
-
+ dh.resourceMgr = make([]*rsrcMgr.OpenOltResourceMgr, dh.totalPonPorts)
dh.flowMgr = make([]*OpenOltFlowMgr, dh.totalPonPorts)
- for i := range dh.flowMgr {
- // Instantiate flow manager
- if dh.flowMgr[i] = NewFlowManager(ctx, dh, dh.resourceMgr, dh.groupMgr, uint32(i)); dh.flowMgr[i] == nil {
+ var i uint32
+ for i = 0; i < dh.totalPonPorts; i++ {
+ // Instantiate resource manager
+ if dh.resourceMgr[i] = rsrcMgr.NewResourceMgr(ctx, i, dh.device.Id, dh.openOLT.KVStoreAddress, dh.openOLT.KVStoreType, dh.device.Type, dh.deviceInfo, dh.cm.Backend.PathPrefix); dh.resourceMgr[i] == nil {
return olterrors.ErrResourceManagerInstantiating
}
}
-
+ // GroupManager instance is per OLT. But it needs a reference to any instance of resourceMgr to interface with
+ // the KV store to manage mcast group data. Provide the first instance (0th index)
+ if dh.groupMgr = NewGroupManager(ctx, dh, dh.resourceMgr[0]); dh.groupMgr == nil {
+ return olterrors.ErrGroupManagerInstantiating
+ }
+ for i = 0; i < dh.totalPonPorts; i++ {
+ // Instantiate flow manager
+ if dh.flowMgr[i] = NewFlowManager(ctx, dh, dh.resourceMgr[i], dh.groupMgr, i); dh.flowMgr[i] == nil {
+ return olterrors.ErrFlowManagerInstantiating
+ }
+ }
/* TODO: Instantiate Alarm , stats , BW managers */
/* Instantiating Event Manager to handle Alarms and KPIs */
dh.eventMgr = NewEventMgr(dh.EventProxy, dh)
@@ -913,7 +918,7 @@
ports, err := dh.coreProxy.ListDevicePorts(log.WithSpanFromContext(context.Background(), ctx), dh.device.Id)
if err != nil {
- logger.Warnw(ctx, "failed-to-list-ports", log.Fields{"device-id": dh.device.Id, "error": err})
+ logger.Warnw(ctx, "failed-to-list-ports", log.Fields{"device-id": dh.device.Id, "err": err})
continue
}
for _, port := range ports {
@@ -935,10 +940,9 @@
}
logger.Debugw(ctx, "publish-pon-metrics", log.Fields{"pon-port": port.Label})
- //ONU & Gem Stats
- onuGemInfo := dh.flowMgr[intfID].onuGemInfo
- if len(onuGemInfo) != 0 {
- go dh.portStats.collectOnuAndGemStats(ctx, onuGemInfo)
+ onuGemInfoLst := dh.flowMgr[intfID].getOnuGemInfoList()
+ if len(onuGemInfoLst) > 0 {
+ go dh.portStats.collectOnuAndGemStats(ctx, onuGemInfoLst)
}
}
}
@@ -978,6 +982,15 @@
}, nil
}
+// GetInterAdapterTechProfileDownloadMessage fetches the TechProfileDownloadMessage for the caller.
+func (dh *DeviceHandler) GetInterAdapterTechProfileDownloadMessage(ctx context.Context, tpPath string, ponPortNum uint32, onuID uint32, uniID uint32) *ic.InterAdapterTechProfileDownloadMessage {
+ ifID, err := IntfIDFromPonPortNum(ctx, ponPortNum)
+ if err != nil {
+ return nil
+ }
+ return dh.flowMgr[ifID].getTechProfileDownloadMessage(ctx, tpPath, ifID, onuID, uniID)
+}
+
func (dh *DeviceHandler) omciIndication(ctx context.Context, omciInd *oop.OmciIndication) error {
logger.Debugw(ctx, "omci-indication", log.Fields{"intf-id": omciInd.IntfId, "onu-id": omciInd.OnuId, "device-id": dh.device.Id})
var deviceType string
@@ -1038,44 +1051,47 @@
func (dh *DeviceHandler) ProcessInterAdapterMessage(ctx context.Context, msg *ic.InterAdapterMessage) error {
logger.Debugw(ctx, "process-inter-adapter-message", log.Fields{"msgID": msg.Header.Id})
if msg.Header.Type == ic.InterAdapterMessageType_OMCI_REQUEST {
- msgID := msg.Header.Id
- fromTopic := msg.Header.FromTopic
- toTopic := msg.Header.ToTopic
- toDeviceID := msg.Header.ToDeviceId
- proxyDeviceID := msg.Header.ProxyDeviceId
+ return dh.handleInterAdapterOmciMsg(ctx, msg)
+ }
+ return olterrors.NewErrInvalidValue(log.Fields{"inter-adapter-message-type": msg.Header.Type}, nil)
+}
- logger.Debugw(ctx, "omci-request-message-header", log.Fields{"msgID": msgID, "fromTopic": fromTopic, "toTopic": toTopic, "toDeviceID": toDeviceID, "proxyDeviceID": proxyDeviceID})
+func (dh *DeviceHandler) handleInterAdapterOmciMsg(ctx context.Context, msg *ic.InterAdapterMessage) error {
+ msgID := msg.Header.Id
+ fromTopic := msg.Header.FromTopic
+ toTopic := msg.Header.ToTopic
+ toDeviceID := msg.Header.ToDeviceId
+ proxyDeviceID := msg.Header.ProxyDeviceId
- msgBody := msg.GetBody()
+ logger.Debugw(ctx, "omci-request-message-header", log.Fields{"msgID": msgID, "fromTopic": fromTopic, "toTopic": toTopic, "toDeviceID": toDeviceID, "proxyDeviceID": proxyDeviceID})
- omciMsg := &ic.InterAdapterOmciMessage{}
- if err := ptypes.UnmarshalAny(msgBody, omciMsg); err != nil {
- return olterrors.NewErrAdapter("cannot-unmarshal-omci-msg-body", log.Fields{"msgbody": msgBody}, err)
+ msgBody := msg.GetBody()
+
+ omciMsg := &ic.InterAdapterOmciMessage{}
+ if err := ptypes.UnmarshalAny(msgBody, omciMsg); err != nil {
+ return olterrors.NewErrAdapter("cannot-unmarshal-omci-msg-body", log.Fields{"msgbody": msgBody}, err)
+ }
+
+ if omciMsg.GetProxyAddress() == nil {
+ onuDevice, err := dh.coreProxy.GetDevice(log.WithSpanFromContext(context.TODO(), ctx), dh.device.Id, toDeviceID)
+ if err != nil {
+ return olterrors.NewErrNotFound("onu", log.Fields{
+ "device-id": dh.device.Id,
+ "onu-device-id": toDeviceID}, err)
}
-
- if omciMsg.GetProxyAddress() == nil {
- onuDevice, err := dh.coreProxy.GetDevice(log.WithSpanFromContext(context.TODO(), ctx), dh.device.Id, toDeviceID)
- if err != nil {
- return olterrors.NewErrNotFound("onu", log.Fields{
- "device-id": dh.device.Id,
- "onu-device-id": toDeviceID}, err)
- }
- logger.Debugw(ctx, "device-retrieved-from-core", log.Fields{"msgID": msgID, "fromTopic": fromTopic, "toTopic": toTopic, "toDeviceID": toDeviceID, "proxyDeviceID": proxyDeviceID})
- if err := dh.sendProxiedMessage(ctx, onuDevice, omciMsg); err != nil {
- return olterrors.NewErrCommunication("send-failed", log.Fields{
- "device-id": dh.device.Id,
- "onu-device-id": toDeviceID}, err)
- }
- } else {
- logger.Debugw(ctx, "proxy-address-found-in-omci-message", log.Fields{"msgID": msgID, "fromTopic": fromTopic, "toTopic": toTopic, "toDeviceID": toDeviceID, "proxyDeviceID": proxyDeviceID})
- if err := dh.sendProxiedMessage(ctx, nil, omciMsg); err != nil {
- return olterrors.NewErrCommunication("send-failed", log.Fields{
- "device-id": dh.device.Id,
- "onu-device-id": toDeviceID}, err)
- }
+ logger.Debugw(ctx, "device-retrieved-from-core", log.Fields{"msgID": msgID, "fromTopic": fromTopic, "toTopic": toTopic, "toDeviceID": toDeviceID, "proxyDeviceID": proxyDeviceID})
+ if err := dh.sendProxiedMessage(ctx, onuDevice, omciMsg); err != nil {
+ return olterrors.NewErrCommunication("send-failed", log.Fields{
+ "device-id": dh.device.Id,
+ "onu-device-id": toDeviceID}, err)
}
} else {
- return olterrors.NewErrInvalidValue(log.Fields{"inter-adapter-message-type": msg.Header.Type}, nil)
+ logger.Debugw(ctx, "proxy-address-found-in-omci-message", log.Fields{"msgID": msgID, "fromTopic": fromTopic, "toTopic": toTopic, "toDeviceID": toDeviceID, "proxyDeviceID": proxyDeviceID})
+ if err := dh.sendProxiedMessage(ctx, nil, omciMsg); err != nil {
+ return olterrors.NewErrCommunication("send-failed", log.Fields{
+ "device-id": dh.device.Id,
+ "onu-device-id": toDeviceID}, err)
+ }
}
return nil
}
@@ -1129,7 +1145,6 @@
if err := dh.flowMgr[intfID].UpdateOnuInfo(ctx, intfID, uint32(onuID), serialNumber); err != nil {
return olterrors.NewErrAdapter("onu-activate-failed", log.Fields{"onu": onuID, "intf-id": intfID}, err)
}
- // TODO: need resource manager
var pir uint32 = 1000000
Onu := oop.Onu{IntfId: intfID, OnuId: uint32(onuID), SerialNumber: serialNum, Pir: pir, OmccEncryption: dh.openOLT.config.OmccEncryption}
if _, err := dh.Client.ActivateOnu(ctx, &Onu); err != nil {
@@ -1182,7 +1197,7 @@
alarmInd.LosStatus = statusCheckOff
go func() {
if err := dh.eventMgr.onuAlarmIndication(ctx, &alarmInd, onuInCache.(*OnuDevice).deviceID, raisedTs); err != nil {
- logger.Debugw(ctx, "indication-failed", log.Fields{"error": err})
+ logger.Debugw(ctx, "indication-failed", log.Fields{"err": err})
}
}()
}
@@ -1220,7 +1235,7 @@
logger.Debugw(ctx, "creating-new-onu", log.Fields{"sn": sn})
// we need to create a new ChildDevice
ponintfid := onuDiscInd.GetIntfId()
- onuID, err = dh.resourceMgr.GetONUID(ctx, ponintfid)
+ onuID, err = dh.resourceMgr[ponintfid].GetONUID(ctx, ponintfid)
logger.Infow(ctx, "creating-new-onu-got-onu-id", log.Fields{"sn": sn, "onuId": onuID})
@@ -1236,13 +1251,13 @@
if onuDevice, err = dh.coreProxy.ChildDeviceDetected(log.WithSpanFromContext(context.TODO(), ctx), dh.device.Id, int(parentPortNo),
"", int(channelID), string(onuDiscInd.SerialNumber.GetVendorId()), sn, int64(onuID)); err != nil {
dh.discOnus.Delete(sn)
- dh.resourceMgr.FreeonuID(ctx, ponintfid, []uint32{onuID}) // NOTE I'm not sure this method is actually cleaning up the right thing
+ dh.resourceMgr[ponintfid].FreeonuID(ctx, ponintfid, []uint32{onuID}) // NOTE I'm not sure this method is actually cleaning up the right thing
return olterrors.NewErrAdapter("core-proxy-child-device-detected-failed", log.Fields{
"pon-intf-id": ponintfid,
"serial-number": sn}, err)
}
if err := dh.eventMgr.OnuDiscoveryIndication(ctx, onuDiscInd, dh.device.Id, onuDevice.Id, onuID, sn, time.Now().Unix()); err != nil {
- logger.Warnw(ctx, "discovery-indication-failed", log.Fields{"error": err})
+ logger.Warnw(ctx, "discovery-indication-failed", log.Fields{"err": err})
}
logger.Infow(ctx, "onu-child-device-added",
log.Fields{"onuDevice": onuDevice,
@@ -1341,7 +1356,7 @@
}
if onuInd.OperState == "down" && onuInd.FailReason != oop.OnuIndication_ONU_ACTIVATION_FAIL_REASON_NONE {
if err := dh.eventMgr.onuActivationIndication(ctx, onuActivationFailEvent, onuInd, dh.device.Id, time.Now().Unix()); err != nil {
- logger.Warnw(ctx, "onu-activation-indication-reporting-failed", log.Fields{"error": err})
+ logger.Warnw(ctx, "onu-activation-indication-reporting-failed", log.Fields{"err": err})
}
}
if err := dh.updateOnuStates(ctx, onuDevice, onuInd); err != nil {
@@ -1614,7 +1629,7 @@
//get the child device for the parent device
onuDevices, err := dh.coreProxy.GetChildDevices(log.WithSpanFromContext(context.TODO(), ctx), dh.device.Id)
if err != nil {
- logger.Errorw(ctx, "failed-to-get-child-devices-information", log.Fields{"device-id": dh.device.Id, "error": err})
+ logger.Errorw(ctx, "failed-to-get-child-devices-information", log.Fields{"device-id": dh.device.Id, "err": err})
}
if onuDevices != nil {
for _, onuDevice := range onuDevices.Items {
@@ -1679,57 +1694,29 @@
logger.Debugw(ctx, "failed-to-remove-tech-profile-instance-for-onu", log.Fields{"onu-id": onu.OnuID})
}
logger.Debugw(ctx, "deleted-tech-profile-instance-for-onu", log.Fields{"onu-id": onu.OnuID})
- tpIDList := dh.resourceMgr.GetTechProfileIDForOnu(ctx, onu.IntfID, onu.OnuID, uniID)
+ tpIDList := dh.resourceMgr[onu.IntfID].GetTechProfileIDForOnu(ctx, onu.IntfID, onu.OnuID, uniID)
for _, tpID := range tpIDList {
- if err = dh.resourceMgr.RemoveMeterInfoForOnu(ctx, "upstream", onu.IntfID, onu.OnuID, uniID, tpID); err != nil {
+ if err = dh.resourceMgr[onu.IntfID].RemoveMeterInfoForOnu(ctx, "upstream", onu.IntfID, onu.OnuID, uniID, tpID); err != nil {
logger.Debugw(ctx, "failed-to-remove-meter-id-for-onu-upstream", log.Fields{"onu-id": onu.OnuID})
}
logger.Debugw(ctx, "removed-meter-id-for-onu-upstream", log.Fields{"onu-id": onu.OnuID})
- if err = dh.resourceMgr.RemoveMeterInfoForOnu(ctx, "downstream", onu.IntfID, onu.OnuID, uniID, tpID); err != nil {
+ if err = dh.resourceMgr[onu.IntfID].RemoveMeterInfoForOnu(ctx, "downstream", onu.IntfID, onu.OnuID, uniID, tpID); err != nil {
logger.Debugw(ctx, "failed-to-remove-meter-id-for-onu-downstream", log.Fields{"onu-id": onu.OnuID})
}
logger.Debugw(ctx, "removed-meter-id-for-onu-downstream", log.Fields{"onu-id": onu.OnuID})
}
- dh.resourceMgr.FreePONResourcesForONU(ctx, onu.IntfID, onu.OnuID, uniID)
- if err = dh.resourceMgr.RemoveTechProfileIDsForOnu(ctx, onu.IntfID, onu.OnuID, uniID); err != nil {
+ dh.resourceMgr[onu.IntfID].FreePONResourcesForONU(ctx, onu.IntfID, onu.OnuID, uniID)
+ if err = dh.resourceMgr[onu.IntfID].RemoveTechProfileIDsForOnu(ctx, onu.IntfID, onu.OnuID, uniID); err != nil {
logger.Debugw(ctx, "failed-to-remove-tech-profile-id-for-onu", log.Fields{"onu-id": onu.OnuID})
}
logger.Debugw(ctx, "removed-tech-profile-id-for-onu", log.Fields{"onu-id": onu.OnuID})
- if err = dh.resourceMgr.DeletePacketInGemPortForOnu(ctx, onu.IntfID, onu.OnuID, port); err != nil {
+ if err = dh.resourceMgr[onu.IntfID].DeletePacketInGemPortForOnu(ctx, onu.IntfID, onu.OnuID, port); err != nil {
logger.Debugw(ctx, "failed-to-remove-gemport-pkt-in", log.Fields{"intfid": onu.IntfID, "onuid": onu.OnuID, "uniId": uniID})
}
- if err = dh.resourceMgr.RemoveAllFlowsForIntfOnuUniKey(ctx, onu.IntfID, int32(onu.OnuID), int32(uniID)); err != nil {
- logger.Debugw(ctx, "failed-to-remove-flow-for", log.Fields{"intfid": onu.IntfID, "onuid": onu.OnuID, "uniId": uniID})
- }
}
return nil
}
-func (dh *DeviceHandler) clearNNIData(ctx context.Context) error {
- nniUniID := -1
- nniOnuID := -1
-
- if dh.resourceMgr == nil {
- return olterrors.NewErrNotFound("resource-manager", log.Fields{"device-id": dh.device.Id}, nil)
- }
- //Free the flow-ids for the NNI port
- nni, err := dh.resourceMgr.GetNNIFromKVStore(ctx)
- if err != nil {
- return olterrors.NewErrPersistence("get", "nni", 0, nil, err)
- }
- logger.Debugw(ctx, "nni-", log.Fields{"nni": nni})
- for _, nniIntfID := range nni {
- dh.resourceMgr.RemoveResourceMap(ctx, nniIntfID, int32(nniOnuID), int32(nniUniID))
- _ = dh.resourceMgr.RemoveAllFlowsForIntfOnuUniKey(ctx, nniIntfID, -1, -1)
-
- }
- if err = dh.resourceMgr.DelNNiFromKVStore(ctx); err != nil {
- return olterrors.NewErrPersistence("clear", "nni", 0, nil, err)
- }
-
- return nil
-}
-
// DeleteDevice deletes the device instance from openolt handler array. Also clears allocated resource manager resources. Also reboots the OLT hardware!
func (dh *DeviceHandler) DeleteDevice(ctx context.Context, device *voltha.Device) error {
logger.Debug(ctx, "function-entry-delete-device")
@@ -1767,13 +1754,8 @@
if dh.resourceMgr != nil {
var ponPort uint32
for ponPort = 0; ponPort < dh.totalPonPorts; ponPort++ {
- var onuGemData []rsrcMgr.OnuGemInfo
- err := dh.resourceMgr.ResourceMgrs[ponPort].GetOnuGemInfo(ctx, ponPort, &onuGemData)
- if err != nil {
- _ = olterrors.NewErrNotFound("onu", log.Fields{
- "device-id": dh.device.Id,
- "pon-port": ponPort}, err).Log()
- }
+ var err error
+ onuGemData := dh.flowMgr[ponPort].getOnuGemInfoList()
for i, onu := range onuGemData {
onuID := make([]uint32, 1)
logger.Debugw(ctx, "onu-data", log.Fields{"onu": onu})
@@ -1782,31 +1764,22 @@
}
// Clear flowids for gem cache.
for _, gem := range onu.GemPorts {
- dh.resourceMgr.DeleteFlowIDsForGem(ctx, ponPort, gem)
+ dh.resourceMgr[ponPort].DeleteFlowIDsForGem(ctx, ponPort, gem)
}
onuID[0] = onu.OnuID
- dh.resourceMgr.FreeonuID(ctx, ponPort, onuID)
+ dh.resourceMgr[ponPort].FreeonuID(ctx, ponPort, onuID)
+ err = dh.resourceMgr[ponPort].DelOnuGemInfo(ctx, ponPort, onu.OnuID)
+ if err != nil {
+ logger.Errorw(ctx, "failed-to-update-onugem-info", log.Fields{"intfid": ponPort, "onugeminfo": onuGemData})
+ }
}
- dh.resourceMgr.DeleteIntfIDGempMapPath(ctx, ponPort)
- onuGemData = nil
- err = dh.resourceMgr.DelOnuGemInfoForIntf(ctx, ponPort)
- if err != nil {
- logger.Errorw(ctx, "failed-to-update-onugem-info", log.Fields{"intfid": ponPort, "onugeminfo": onuGemData})
- }
+ /* Clear the resource pool for each PON port in the background */
+ go func(ponPort uint32) {
+ if err := dh.resourceMgr[ponPort].Delete(ctx, ponPort); err != nil {
+ logger.Debug(ctx, err)
+ }
+ }(ponPort)
}
- /* Clear the flows from KV store associated with NNI port.
- There are mostly trap rules from NNI port (like LLDP)
- */
- if err := dh.clearNNIData(ctx); err != nil {
- logger.Errorw(ctx, "failed-to-clear-data-for-NNI-port", log.Fields{"device-id": dh.device.Id})
- }
-
- /* Clear the resource pool for each PON port in the background */
- go func() {
- if err := dh.resourceMgr.Delete(ctx); err != nil {
- logger.Debug(ctx, err)
- }
- }()
}
/*Delete ONU map for the device*/
@@ -2190,56 +2163,48 @@
}
onu := &oop.Onu{IntfId: intfID, OnuId: onuID, SerialNumber: sn}
+ //clear PON resources associated with ONU
+ onuGem, err := dh.resourceMgr[intfID].GetOnuGemInfo(ctx, intfID, onuID)
+ if err != nil || onuGem == nil || onuGem.OnuID != onuID {
+ logger.Warnw(ctx, "failed-to-get-onu-info-for-pon-port", log.Fields{
+ "device-id": dh.device.Id,
+ "intf-id": intfID,
+ "onuID": onuID,
+ "err": err})
+ } else {
+ logger.Debugw(ctx, "onu-data", log.Fields{"onu": onu})
+ if err := dh.clearUNIData(ctx, onuGem); err != nil {
+ logger.Warnw(ctx, "failed-to-clear-uni-data-for-onu", log.Fields{
+ "device-id": dh.device.Id,
+ "onu-device": onu,
+ "err": err})
+ }
+ // Clear flowids for gem cache.
+ for _, gem := range onuGem.GemPorts {
+ dh.resourceMgr[intfID].DeleteFlowIDsForGem(ctx, intfID, gem)
+ }
+ err := dh.resourceMgr[intfID].DelOnuGemInfo(ctx, intfID, onuID)
+ if err != nil {
+ logger.Warnw(ctx, "persistence-update-onu-gem-info-failed", log.Fields{
+ "intf-id": intfID,
+ "onu-device": onu,
+ "onu-gem": onuGem,
+ "err": err})
+ //Not returning error on cleanup.
+ }
+ logger.Debugw(ctx, "removed-onu-gem-info", log.Fields{"intf": intfID, "onu-device": onu, "onugem": onuGem})
+ dh.resourceMgr[intfID].FreeonuID(ctx, intfID, []uint32{onuGem.OnuID})
+ }
+ dh.onus.Delete(onuKey)
+ dh.discOnus.Delete(onuSn)
+
+ // Now clear the ONU on the OLT
if _, err := dh.Client.DeleteOnu(log.WithSpanFromContext(context.Background(), ctx), onu); err != nil {
return olterrors.NewErrAdapter("failed-to-delete-onu", log.Fields{
"device-id": dh.device.Id,
"onu-id": onuID}, err).Log()
}
- //clear PON resources associated with ONU
- var onuGemData []rsrcMgr.OnuGemInfo
- if onuMgr, ok := dh.resourceMgr.ResourceMgrs[intfID]; !ok {
- logger.Warnw(ctx, "failed-to-get-resource-manager-for-interface-Id", log.Fields{
- "device-id": dh.device.Id,
- "intf-id": intfID})
- } else {
- if err := onuMgr.GetOnuGemInfo(ctx, intfID, &onuGemData); err != nil {
- logger.Warnw(ctx, "failed-to-get-onu-info-for-pon-port", log.Fields{
- "device-id": dh.device.Id,
- "intf-id": intfID,
- "error": err})
- } else {
- for i, onu := range onuGemData {
- if onu.OnuID == onuID && onu.SerialNumber == onuSn {
- logger.Debugw(ctx, "onu-data", log.Fields{"onu": onu})
- if err := dh.clearUNIData(ctx, &onuGemData[i]); err != nil {
- logger.Warnw(ctx, "failed-to-clear-uni-data-for-onu", log.Fields{
- "device-id": dh.device.Id,
- "onu-device": onu,
- "error": err})
- }
- // Clear flowids for gem cache.
- for _, gem := range onu.GemPorts {
- dh.resourceMgr.DeleteFlowIDsForGem(ctx, intfID, gem)
- }
- onuGemData = append(onuGemData[:i], onuGemData[i+1:]...)
- err := onuMgr.AddOnuGemInfo(ctx, intfID, onuGemData)
- if err != nil {
- logger.Warnw(ctx, "persistence-update-onu-gem-info-failed", log.Fields{
- "intf-id": intfID,
- "onu-device": onu,
- "onu-gem": onuGemData,
- "error": err})
- //Not returning error on cleanup.
- }
- logger.Debugw(ctx, "removed-onu-gem-info", log.Fields{"intf": intfID, "onu-device": onu, "onugem": onuGemData})
- dh.resourceMgr.FreeonuID(ctx, intfID, []uint32{onu.OnuID})
- break
- }
- }
- }
- }
- dh.onus.Delete(onuKey)
- dh.discOnus.Delete(onuSn)
+
return nil
}
@@ -2334,7 +2299,7 @@
/*
resp, err = dh.Client.GetValue(ctx, valueparam)
if err != nil {
- logger.Errorw("error-while-getValue", log.Fields{"DeviceID": dh.device, "onu-id": onuid, "error": err})
+ logger.Errorw("error-while-getValue", log.Fields{"DeviceID": dh.device, "onu-id": onuid, "err": err})
return nil, err
}
*/
@@ -2429,6 +2394,7 @@
// Step2 : Push the McastFlowOrGroupControlBlock to appropriate channel
// Step3 : Wait on response channel for response
// Step4 : Return error value
+ startTime := time.Now()
logger.Debugw(ctx, "process-flow-or-group", log.Fields{"flow": flow, "group": group, "action": action})
errChan := make(chan error)
var groupID uint32
@@ -2451,7 +2417,7 @@
dh.incomingMcastFlowOrGroup[groupID%MaxNumOfGroupHandlerChannels] <- mcastFlowOrGroupCb
// Wait for handler to return error value
err := <-errChan
- logger.Debugw(ctx, "process-flow-or-group--received-resp", log.Fields{"flow": flow, "group": group, "action": action, "err": err})
+ logger.Debugw(ctx, "process-flow-or-group--received-resp", log.Fields{"err": err, "totalTimeInSeconds": time.Since(startTime).Milliseconds()})
return err
}
diff --git a/internal/pkg/core/device_handler_test.go b/internal/pkg/core/device_handler_test.go
index 1cae8b6..d69be1a 100644
--- a/internal/pkg/core/device_handler_test.go
+++ b/internal/pkg/core/device_handler_test.go
@@ -19,8 +19,7 @@
import (
"context"
- conf "github.com/opencord/voltha-lib-go/v4/pkg/config"
- tp "github.com/opencord/voltha-lib-go/v4/pkg/techprofile"
+ conf "github.com/opencord/voltha-lib-go/v5/pkg/config"
"net"
"reflect"
"sync"
@@ -29,11 +28,11 @@
"github.com/golang/protobuf/ptypes"
"github.com/golang/protobuf/ptypes/any"
- "github.com/opencord/voltha-lib-go/v4/pkg/db"
- fu "github.com/opencord/voltha-lib-go/v4/pkg/flows"
- "github.com/opencord/voltha-lib-go/v4/pkg/log"
- "github.com/opencord/voltha-lib-go/v4/pkg/pmmetrics"
- ponrmgr "github.com/opencord/voltha-lib-go/v4/pkg/ponresourcemanager"
+ "github.com/opencord/voltha-lib-go/v5/pkg/db"
+ fu "github.com/opencord/voltha-lib-go/v5/pkg/flows"
+ "github.com/opencord/voltha-lib-go/v5/pkg/log"
+ "github.com/opencord/voltha-lib-go/v5/pkg/pmmetrics"
+ ponrmgr "github.com/opencord/voltha-lib-go/v5/pkg/ponresourcemanager"
"github.com/opencord/voltha-openolt-adapter/internal/pkg/config"
"github.com/opencord/voltha-openolt-adapter/internal/pkg/olterrors"
"github.com/opencord/voltha-openolt-adapter/internal/pkg/resourcemanager"
@@ -169,16 +168,17 @@
}}
deviceInf := &oop.DeviceInfo{Vendor: "openolt", Ranges: oopRanges, Model: "openolt", DeviceId: dh.device.Id, PonPorts: NumPonPorts}
- rsrMgr := resourcemanager.OpenOltResourceMgr{DeviceID: dh.device.Id, DeviceType: dh.device.Type, DevInfo: deviceInf,
- KVStore: &db.Backend{
- Client: &mocks.MockKVClient{},
- }}
- rsrMgr.AllocIDMgmtLock = make([]sync.RWMutex, deviceInf.PonPorts)
- rsrMgr.GemPortIDMgmtLock = make([]sync.RWMutex, deviceInf.PonPorts)
- rsrMgr.OnuIDMgmtLock = make([]sync.RWMutex, deviceInf.PonPorts)
+ dh.deviceInfo = deviceInf
+ dh.resourceMgr = make([]*resourcemanager.OpenOltResourceMgr, deviceInf.PonPorts)
+ var i uint32
+ for i = 0; i < deviceInf.PonPorts; i++ {
+ dh.resourceMgr[i] = &resourcemanager.OpenOltResourceMgr{DeviceID: dh.device.Id, DeviceType: dh.device.Type, DevInfo: deviceInf,
+ KVStore: &db.Backend{
+ Client: &mocks.MockKVClient{},
+ }}
+ dh.resourceMgr[i].InitLocalCache()
+ }
- dh.resourceMgr = &rsrMgr
- dh.resourceMgr.ResourceMgrs = make(map[uint32]*ponrmgr.PONResourceManager)
ranges := make(map[string]interface{})
sharedIdxByType := make(map[string]string)
sharedIdxByType["ALLOC_ID"] = "ALLOC_ID"
@@ -195,13 +195,6 @@
ranges["flow_id_shared"] = uint32(0)
ponmgr := &ponrmgr.PONResourceManager{}
-
- ctx := context.TODO()
- tpMgr, err := tp.NewTechProfile(ctx, ponmgr, "etcd", "127.0.0.1", "/")
- if err != nil {
- logger.Fatal(ctx, err.Error())
- }
-
ponmgr.DeviceID = "onu-1"
ponmgr.IntfIDs = []uint32{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}
ponmgr.KVStore = &db.Backend{
@@ -209,19 +202,28 @@
}
ponmgr.PonResourceRanges = ranges
ponmgr.SharedIdxByType = sharedIdxByType
+ ponmgr.Technology = "XGS-PON"
+ for i = 0; i < deviceInf.PonPorts; i++ {
+ dh.resourceMgr[i].PonRsrMgr = ponmgr
+ }
+
+ /*
+ tpMgr, err := tp.NewTechProfile(ctx, ponmgr, "etcd", "127.0.0.1", "/")
+ if err != nil {
+ logger.Fatal(ctx, err.Error())
+ }
+ */
+ tpMgr := &mocks.MockTechProfile{TpID: 64}
ponmgr.TechProfileMgr = tpMgr
- for i := 0; i < NumPonPorts; i++ {
- dh.resourceMgr.ResourceMgrs[uint32(i)] = ponmgr
- }
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
- dh.groupMgr = NewGroupManager(ctx, dh, dh.resourceMgr)
+ dh.groupMgr = NewGroupManager(ctx, dh, dh.resourceMgr[0])
dh.totalPonPorts = NumPonPorts
dh.flowMgr = make([]*OpenOltFlowMgr, dh.totalPonPorts)
- for i := 0; i < int(dh.totalPonPorts); i++ {
+ for i = 0; i < dh.totalPonPorts; i++ {
// Instantiate flow manager
- if dh.flowMgr[i] = NewFlowManager(ctx, dh, dh.resourceMgr, dh.groupMgr, uint32(i)); dh.flowMgr[i] == nil {
+ if dh.flowMgr[i] = NewFlowManager(ctx, dh, dh.resourceMgr[i], dh.groupMgr, uint32(i)); dh.flowMgr[i] == nil {
return nil
}
}
@@ -443,18 +445,18 @@
var err error
if marshalledData, err = ptypes.MarshalAny(body); err != nil {
- logger.Errorw(ctx, "cannot-marshal-request", log.Fields{"error": err})
+ logger.Errorw(ctx, "cannot-marshal-request", log.Fields{"err": err})
}
var marshalledData1 *any.Any
if marshalledData1, err = ptypes.MarshalAny(body2); err != nil {
- logger.Errorw(ctx, "cannot-marshal-request", log.Fields{"error": err})
+ logger.Errorw(ctx, "cannot-marshal-request", log.Fields{"err": err})
}
var marshalledData2 *any.Any
if marshalledData2, err = ptypes.MarshalAny(body3); err != nil {
- logger.Errorw(ctx, "cannot-marshal-request", log.Fields{"error": err})
+ logger.Errorw(ctx, "cannot-marshal-request", log.Fields{"err": err})
}
type args struct {
msg *ic.InterAdapterMessage
diff --git a/internal/pkg/core/olt_platform.go b/internal/pkg/core/olt_platform.go
index b85dd4b..9526ee7 100644
--- a/internal/pkg/core/olt_platform.go
+++ b/internal/pkg/core/olt_platform.go
@@ -20,8 +20,8 @@
import (
"context"
- "github.com/opencord/voltha-lib-go/v4/pkg/flows"
- "github.com/opencord/voltha-lib-go/v4/pkg/log"
+ "github.com/opencord/voltha-lib-go/v5/pkg/flows"
+ "github.com/opencord/voltha-lib-go/v5/pkg/log"
"github.com/opencord/voltha-openolt-adapter/internal/pkg/olterrors"
ofp "github.com/opencord/voltha-protos/v4/go/openflow_13"
"github.com/opencord/voltha-protos/v4/go/voltha"
@@ -112,6 +112,10 @@
minNniIntPortNum = (1 << bitsforNNIID)
// maxNniPortNum is used to store the maximum range of nni port number ((1 << 21)-1) 2097151
maxNniPortNum = ((1 << (bitsforNNIID + 1)) - 1)
+ // minPonIntfPortNum stores the minimum pon port number
+ minPonIntfPortNum = ponIntfMarkerValue << ponIntfMarkerPos
+ // maxPonIntfPortNum stores the maximum pon port number
+ maxPonIntfPortNum = (ponIntfMarkerValue << ponIntfMarkerPos) | (2 ^ bitsForPONID - 1)
)
//MinUpstreamPortID value
@@ -177,6 +181,15 @@
return (portNum & 0xFFFF), nil
}
+//IntfIDFromPonPortNum returns Intf ID derived from portNum
+func IntfIDFromPonPortNum(ctx context.Context, portNum uint32) (uint32, error) {
+ if portNum < minPonIntfPortNum || portNum > maxPonIntfPortNum {
+ logger.Errorw(ctx, "ponportnumber-is-not-in-valid-range", log.Fields{"portnum": portNum})
+ return uint32(0), olterrors.ErrInvalidPortRange
+ }
+ return (portNum & 0x7FFF), nil
+}
+
//IntfIDToPortTypeName returns port type derived from the intfId
func IntfIDToPortTypeName(intfID uint32) voltha.Port_PortType {
if ((ponIntfMarkerValue << ponIntfMarkerPos) ^ intfID) < MaxPonsPerOlt {
diff --git a/internal/pkg/core/olt_platform_test.go b/internal/pkg/core/olt_platform_test.go
index 4c89eaa..6ff32db 100644
--- a/internal/pkg/core/olt_platform_test.go
+++ b/internal/pkg/core/olt_platform_test.go
@@ -19,7 +19,7 @@
import (
"context"
- fu "github.com/opencord/voltha-lib-go/v4/pkg/flows"
+ fu "github.com/opencord/voltha-lib-go/v5/pkg/flows"
"github.com/opencord/voltha-openolt-adapter/internal/pkg/olterrors"
ofp "github.com/opencord/voltha-protos/v4/go/openflow_13"
"github.com/opencord/voltha-protos/v4/go/voltha"
diff --git a/internal/pkg/core/olt_state_transitions.go b/internal/pkg/core/olt_state_transitions.go
index e67bd43..5fab1d5 100644
--- a/internal/pkg/core/olt_state_transitions.go
+++ b/internal/pkg/core/olt_state_transitions.go
@@ -22,7 +22,7 @@
"reflect"
"runtime"
- "github.com/opencord/voltha-lib-go/v4/pkg/log"
+ "github.com/opencord/voltha-lib-go/v5/pkg/log"
)
// DeviceState OLT Device state
diff --git a/internal/pkg/core/openolt.go b/internal/pkg/core/openolt.go
index 436d67a..33b3f1b 100644
--- a/internal/pkg/core/openolt.go
+++ b/internal/pkg/core/openolt.go
@@ -22,11 +22,11 @@
"sync"
"time"
- "github.com/opencord/voltha-lib-go/v4/pkg/adapters/adapterif"
- conf "github.com/opencord/voltha-lib-go/v4/pkg/config"
- "github.com/opencord/voltha-lib-go/v4/pkg/events/eventif"
- "github.com/opencord/voltha-lib-go/v4/pkg/kafka"
- "github.com/opencord/voltha-lib-go/v4/pkg/log"
+ "github.com/opencord/voltha-lib-go/v5/pkg/adapters/adapterif"
+ conf "github.com/opencord/voltha-lib-go/v5/pkg/config"
+ "github.com/opencord/voltha-lib-go/v5/pkg/events/eventif"
+ "github.com/opencord/voltha-lib-go/v5/pkg/kafka"
+ "github.com/opencord/voltha-lib-go/v5/pkg/log"
"github.com/opencord/voltha-openolt-adapter/internal/pkg/config"
"github.com/opencord/voltha-openolt-adapter/internal/pkg/olterrors"
"github.com/opencord/voltha-protos/v4/go/extension"
@@ -159,6 +159,20 @@
return olterrors.NewErrNotFound("device-handler", log.Fields{"device-id": targetDevice}, nil)
}
+//Process_tech_profile_instance_request processes tech profile request message from onu adapter
+func (oo *OpenOLT) Process_tech_profile_instance_request(ctx context.Context, msg *ic.InterAdapterTechProfileInstanceRequestMessage) *ic.InterAdapterTechProfileDownloadMessage {
+ logger.Debugw(ctx, "Process_tech_profile_instance_request", log.Fields{"tpPath": msg.TpInstancePath})
+ targetDeviceID := msg.ParentDeviceId // Request?
+ if targetDeviceID == "" {
+ logger.Error(ctx, "device-id-nil")
+ return nil
+ }
+ if handler := oo.getDeviceHandler(targetDeviceID); handler != nil {
+ return handler.GetInterAdapterTechProfileDownloadMessage(ctx, msg.TpInstancePath, msg.ParentPonPort, msg.OnuId, msg.UniId)
+ }
+ return nil
+}
+
//Adapter_descriptor not implemented
func (oo *OpenOLT) Adapter_descriptor(ctx context.Context) error {
return olterrors.ErrNotImplemented
@@ -407,7 +421,7 @@
if handler := oo.getDeviceHandler(deviceID); handler != nil {
if resp, err = handler.getExtValue(ctx, device, valueparam); err != nil {
logger.Errorw(ctx, "error-occurred-during-get-ext-value", log.Fields{"device-id": deviceID, "onu-id": device.Id,
- "error": err})
+ "err": err})
return nil, err
}
}
diff --git a/internal/pkg/core/openolt_eventmgr.go b/internal/pkg/core/openolt_eventmgr.go
index 336592c..1b5abd1 100644
--- a/internal/pkg/core/openolt_eventmgr.go
+++ b/internal/pkg/core/openolt_eventmgr.go
@@ -23,8 +23,8 @@
"fmt"
"strconv"
- "github.com/opencord/voltha-lib-go/v4/pkg/events/eventif"
- "github.com/opencord/voltha-lib-go/v4/pkg/log"
+ "github.com/opencord/voltha-lib-go/v5/pkg/events/eventif"
+ "github.com/opencord/voltha-lib-go/v5/pkg/log"
"github.com/opencord/voltha-openolt-adapter/internal/pkg/olterrors"
"github.com/opencord/voltha-protos/v4/go/common"
oop "github.com/opencord/voltha-protos/v4/go/openolt"
@@ -660,7 +660,7 @@
func (em *OpenOltEventMgr) oltIntfOperIndication(ctx context.Context, ifindication *oop.IntfOperIndication, deviceID string, raisedTs int64) {
portNo := IntfIDToPortNo(ifindication.IntfId, voltha.Port_PON_OLT)
if port, err := em.handler.coreProxy.GetDevicePort(ctx, deviceID, portNo); err != nil {
- logger.Warnw(ctx, "Error while fetching port object", log.Fields{"device-id": deviceID, "error": err})
+ logger.Warnw(ctx, "Error while fetching port object", log.Fields{"device-id": deviceID, "err": err})
} else if port.AdminState != common.AdminState_ENABLED {
logger.Debugw(ctx, "port-disable/enable-event-not-generated--the-port-is-not-enabled-by-operator", log.Fields{"device-id": deviceID, "port": port})
return
diff --git a/internal/pkg/core/openolt_flowmgr.go b/internal/pkg/core/openolt_flowmgr.go
index 4ae86ac..9c1cb09 100644
--- a/internal/pkg/core/openolt_flowmgr.go
+++ b/internal/pkg/core/openolt_flowmgr.go
@@ -22,14 +22,15 @@
"encoding/hex"
"errors"
"fmt"
- "github.com/opencord/voltha-lib-go/v4/pkg/meters"
+ "github.com/opencord/voltha-lib-go/v5/pkg/meters"
"strconv"
"strings"
"sync"
+ "time"
- "github.com/opencord/voltha-lib-go/v4/pkg/flows"
- "github.com/opencord/voltha-lib-go/v4/pkg/log"
- tp "github.com/opencord/voltha-lib-go/v4/pkg/techprofile"
+ "github.com/opencord/voltha-lib-go/v5/pkg/flows"
+ "github.com/opencord/voltha-lib-go/v5/pkg/log"
+ tp "github.com/opencord/voltha-lib-go/v5/pkg/techprofile"
rsrcMgr "github.com/opencord/voltha-openolt-adapter/internal/pkg/resourcemanager"
"github.com/opencord/voltha-protos/v4/go/common"
ic "github.com/opencord/voltha-protos/v4/go/inter_container"
@@ -154,11 +155,6 @@
pbit1 = '1'
)
-type gemPortKey struct {
- intfID uint32
- gemPort uint32
-}
-
type schedQueue struct {
direction tp_pb.Direction
intfID uint32
@@ -186,15 +182,6 @@
gemToAes map[uint32]bool
}
-// subscriberDataPathFlowIDKey is key to subscriberDataPathFlowIDMap map
-type subscriberDataPathFlowIDKey struct {
- intfID uint32
- onuID uint32
- uniID uint32
- direction string
- tpID uint32
-}
-
// This control block is created per flow add/remove and pushed on the incomingFlows channel slice
// The flowControlBlock is then picked by the perOnuFlowHandlerRoutine for further processing.
// There is on perOnuFlowHandlerRoutine routine per ONU that constantly monitors for any incoming
@@ -210,37 +197,28 @@
//OpenOltFlowMgr creates the Structure of OpenOltFlowMgr obj
type OpenOltFlowMgr struct {
ponPortIdx uint32 // Pon Port this FlowManager is responsible for
- techprofile map[uint32]tp.TechProfileIf
+ techprofile tp.TechProfileIf
deviceHandler *DeviceHandler
grpMgr *OpenOltGroupMgr
resourceMgr *rsrcMgr.OpenOltResourceMgr
- onuIdsLock sync.RWMutex // TODO: Do we need this?
-
- flowsUsedByGemPort map[uint32][]uint64 // gem port id to flow ids
- flowsUsedByGemPortKey sync.RWMutex // lock to be used to access the flowsUsedByGemPort map
+ gemToFlowIDs map[uint32][]uint64 // gem port id to flow ids
+ gemToFlowIDsKey sync.RWMutex // lock to be used to access the gemToFlowIDs map
packetInGemPort map[rsrcMgr.PacketInInfoKey]uint32 //packet in gem port local cache
packetInGemPortLock sync.RWMutex
// TODO create a type rsrcMgr.OnuGemInfos to be used instead of []rsrcMgr.OnuGemInfo
- onuGemInfo []rsrcMgr.OnuGemInfo //onu, gem and uni info local cache
+ onuGemInfoMap map[uint32]*rsrcMgr.OnuGemInfo //onu, gem and uni info local cache -> map of onuID to OnuGemInfo
// We need to have a global lock on the onuGemInfo map
onuGemInfoLock sync.RWMutex
- // Map of voltha flowID associated with subscriberDataPathFlowIDKey
- // This information is not persisted on Kv store and hence should be reconciled on adapter restart
- subscriberDataPathFlowIDMap map[subscriberDataPathFlowIDKey]uint64
- subscriberDataPathFlowIDMapLock sync.RWMutex
+ flowIDToGems map[uint64][]uint32
+ flowIDToGemsLock sync.RWMutex
// Slice of channels. Each channel in slice, index by ONU ID, queues flows per ONU.
// A go routine per ONU, waits on the unique channel (indexed by ONU ID) for incoming flows (add/remove)
incomingFlows []chan flowControlBlock
-
- //this map keeps uni port info by gem and pon port. This relation shall be used for packet-out operations
- gemToUniMap map[gemPortKey][]uint32
- //We need to have a global lock on the gemToUniLock map
- gemToUniLock sync.RWMutex
}
//NewFlowManager creates OpenOltFlowMgr object and initializes the parameters
@@ -248,23 +226,18 @@
logger.Infow(ctx, "initializing-flow-manager", log.Fields{"device-id": dh.device.Id})
var flowMgr OpenOltFlowMgr
var err error
- var idx uint32
flowMgr.deviceHandler = dh
+ flowMgr.ponPortIdx = ponPortIdx
flowMgr.grpMgr = grpMgr
flowMgr.resourceMgr = rMgr
- flowMgr.techprofile = make(map[uint32]tp.TechProfileIf)
if err = flowMgr.populateTechProfilePerPonPort(ctx); err != nil {
- logger.Errorw(ctx, "error-while-populating-tech-profile-mgr", log.Fields{"error": err})
+ logger.Errorw(ctx, "error-while-populating-tech-profile-mgr", log.Fields{"err": err})
return nil
}
- flowMgr.onuIdsLock = sync.RWMutex{}
- flowMgr.flowsUsedByGemPort = make(map[uint32][]uint64)
+ flowMgr.gemToFlowIDs = make(map[uint32][]uint64)
flowMgr.packetInGemPort = make(map[rsrcMgr.PacketInInfoKey]uint32)
- flowMgr.packetInGemPortLock = sync.RWMutex{}
- flowMgr.onuGemInfoLock = sync.RWMutex{}
- flowMgr.subscriberDataPathFlowIDMap = make(map[subscriberDataPathFlowIDKey]uint64)
- flowMgr.subscriberDataPathFlowIDMapLock = sync.RWMutex{}
+ flowMgr.flowIDToGems = make(map[uint64][]uint32)
// Create a slice of buffered channels for handling concurrent flows per ONU.
// The additional entry (+1) is to handle the NNI trap flows on a separate channel from individual ONUs channel
@@ -276,54 +249,35 @@
// This routine will be blocked on the flowMgr.incomingFlows[onu-id] channel for incoming flows.
go flowMgr.perOnuFlowHandlerRoutine(flowMgr.incomingFlows[i])
}
-
+ flowMgr.onuGemInfoMap = make(map[uint32]*rsrcMgr.OnuGemInfo)
//Load the onugem info cache from kv store on flowmanager start
- if flowMgr.onuGemInfo, err = rMgr.GetOnuGemInfo(ctx, ponPortIdx); err != nil {
- logger.Error(ctx, "failed-to-load-onu-gem-info-cache")
+ onuIDStart := flowMgr.deviceHandler.deviceInfo.OnuIdStart
+ onuIDEnd := flowMgr.deviceHandler.deviceInfo.OnuIdEnd
+ for onuID := onuIDStart; onuID <= onuIDEnd; onuID++ {
+ // check for a valid serial number in onuGem as GetOnuGemInfo can return nil error in case of nothing found in the path.
+ onugem, err := rMgr.GetOnuGemInfo(ctx, onuID, ponPortIdx)
+ if err == nil && onugem != nil && onugem.SerialNumber != "" {
+ flowMgr.onuGemInfoMap[onuID] = onugem
+ }
}
- //Load flowID list per gem map per interface from the kvstore.
- flowMgr.loadFlowIDlistForGem(ctx, idx)
+
+ //Load flowID list per gem map And gemIDs per flow per interface from the kvstore.
+ flowMgr.loadFlowIDsForGemAndGemIDsForFlow(ctx)
+
//load interface to multicast queue map from kv store
-
- flowMgr.gemToUniMap = make(map[gemPortKey][]uint32)
- flowMgr.gemToUniLock = sync.RWMutex{}
-
flowMgr.grpMgr.LoadInterfaceToMulticastQueueMap(ctx)
- flowMgr.reconcileSubscriberDataPathFlowIDMap(ctx)
logger.Info(ctx, "initialization-of-flow-manager-success")
return &flowMgr
}
-// toGemToUniMap adds uni info consisting of onu and uni ID to the map and associates it with a gem port
-func (f *OpenOltFlowMgr) toGemToUniMap(ctx context.Context, gemPK gemPortKey, onuID uint32, uniID uint32) {
- f.gemToUniLock.Lock()
- f.gemToUniMap[gemPK] = []uint32{onuID, uniID}
- f.gemToUniLock.Unlock()
-}
-
-// fromGemToUniMap returns onu and uni ID associated with the given key
-func (f *OpenOltFlowMgr) fromGemToUniMap(key gemPortKey) ([]uint32, bool) {
- f.gemToUniLock.RLock()
- defer f.gemToUniLock.RUnlock()
- val, ok := f.gemToUniMap[key]
- return val, ok
-}
-
-// removeFromGemToUniMap removes an entry associated with the given key from gemToUniMap
-func (f *OpenOltFlowMgr) removeFromGemToUniMap(key gemPortKey) {
- f.gemToUniLock.Lock()
- defer f.gemToUniLock.Unlock()
- delete(f.gemToUniMap, key)
-}
-
func (f *OpenOltFlowMgr) registerFlow(ctx context.Context, flowFromCore *ofp.OfpFlowStats, deviceFlow *openoltpb2.Flow) error {
if !deviceFlow.ReplicateFlow && deviceFlow.GemportId > 0 {
// Flow is not replicated in this case, we need to register the flow for a single gem-port
- return f.registerFlowIDForGem(ctx, uint32(deviceFlow.AccessIntfId), uint32(deviceFlow.GemportId), flowFromCore)
+ return f.registerFlowIDForGemAndGemIDForFlow(ctx, uint32(deviceFlow.AccessIntfId), uint32(deviceFlow.GemportId), flowFromCore)
} else if deviceFlow.ReplicateFlow && len(deviceFlow.PbitToGemport) > 0 {
// Flow is replicated in this case. We need to register the flow for all the gem-ports it is replicated to.
for _, gemPort := range deviceFlow.PbitToGemport {
- if err := f.registerFlowIDForGem(ctx, uint32(deviceFlow.AccessIntfId), gemPort, flowFromCore); err != nil {
+ if err := f.registerFlowIDForGemAndGemIDForFlow(ctx, uint32(deviceFlow.AccessIntfId), gemPort, flowFromCore); err != nil {
return err
}
}
@@ -331,15 +285,26 @@
return nil
}
-func (f *OpenOltFlowMgr) registerFlowIDForGem(ctx context.Context, accessIntfID uint32, gemPortID uint32, flowFromCore *ofp.OfpFlowStats) error {
- f.flowsUsedByGemPortKey.Lock()
- flowIDList, ok := f.flowsUsedByGemPort[gemPortID]
+func (f *OpenOltFlowMgr) registerFlowIDForGemAndGemIDForFlow(ctx context.Context, accessIntfID uint32, gemPortID uint32, flowFromCore *ofp.OfpFlowStats) error {
+ // update gem->flows map
+ f.gemToFlowIDsKey.Lock()
+ flowIDList, ok := f.gemToFlowIDs[gemPortID]
if !ok {
flowIDList = []uint64{flowFromCore.Id}
+ } else {
+ flowIDList = appendUnique64bit(flowIDList, flowFromCore.Id)
}
- flowIDList = appendUnique64bit(flowIDList, flowFromCore.Id)
- f.flowsUsedByGemPort[gemPortID] = flowIDList
- f.flowsUsedByGemPortKey.Unlock()
+ f.gemToFlowIDs[gemPortID] = flowIDList
+ f.gemToFlowIDsKey.Unlock()
+
+ // update flow->gems map
+ f.flowIDToGemsLock.Lock()
+ if _, ok := f.flowIDToGems[flowFromCore.Id]; !ok {
+ f.flowIDToGems[flowFromCore.Id] = []uint32{gemPortID}
+ } else {
+ f.flowIDToGems[flowFromCore.Id] = appendUnique32bit(f.flowIDToGems[flowFromCore.Id], gemPortID)
+ }
+ f.flowIDToGemsLock.Unlock()
// update the flowids for a gem to the KVstore
return f.resourceMgr.UpdateFlowIDsForGem(ctx, accessIntfID, gemPortID, flowIDList)
@@ -452,7 +417,7 @@
if meterInfo != nil {
logger.Debugw(ctx, "scheduler-already-created-for-upstream", log.Fields{"device-id": f.deviceHandler.device.Id, "meter-id": sq.meterID})
- if meterInfo.MeterConfig.MeterId == sq.meterID {
+ if meterInfo.MeterID == sq.meterID {
if err := f.resourceMgr.HandleMeterInfoRefCntUpdate(ctx, Direction, sq.intfID, sq.onuID, sq.uniID, sq.tpID, true); err != nil {
return err
}
@@ -460,7 +425,7 @@
}
return olterrors.NewErrInvalidValue(log.Fields{
"unsupported": "meter-id",
- "kv-store-meter-id": meterInfo.MeterConfig.MeterId,
+ "kv-store-meter-id": meterInfo.MeterID,
"meter-id-in-flow": sq.meterID,
"device-id": f.deviceHandler.device.Id}, nil)
}
@@ -472,18 +437,9 @@
"device-id": f.deviceHandler.device.Id})
if sq.direction == tp_pb.Direction_UPSTREAM {
- SchedCfg, err = f.techprofile[sq.intfID].GetUsScheduler(ctx, sq.tpInst.(*tp.TechProfile))
+ SchedCfg = f.techprofile.GetUsScheduler(sq.tpInst.(*tp_pb.TechProfileInstance))
} else if sq.direction == tp_pb.Direction_DOWNSTREAM {
- SchedCfg, err = f.techprofile[sq.intfID].GetDsScheduler(ctx, sq.tpInst.(*tp.TechProfile))
- }
-
- if err != nil {
- return olterrors.NewErrNotFound("scheduler-config",
- log.Fields{
- "intf-id": sq.intfID,
- "direction": sq.direction,
- "tp-inst": sq.tpInst,
- "device-id": f.deviceHandler.device.Id}, err)
+ SchedCfg = f.techprofile.GetDsScheduler(sq.tpInst.(*tp_pb.TechProfileInstance))
}
found := false
@@ -491,13 +447,10 @@
if sq.flowMetadata != nil {
for _, meter := range sq.flowMetadata.Meters {
if sq.meterID == meter.MeterId {
- meterInfo.MeterConfig = ofp.OfpMeterConfig{}
- meterInfo.MeterConfig.MeterId = meter.MeterId
- meterInfo.MeterConfig.Flags = meter.Flags
+ meterInfo.MeterID = meter.MeterId
meterInfo.RefCnt = 1 // initialize it to 1, since this is the first flow that referenced the meter id.
- meterInfo.MeterConfig.Bands = append(meterInfo.MeterConfig.Bands, meter.Bands...)
logger.Debugw(ctx, "found-meter-config-from-flowmetadata",
- log.Fields{"meterConfig": meterInfo.MeterConfig,
+ log.Fields{"meter": meter,
"device-id": f.deviceHandler.device.Id})
found = true
break
@@ -515,14 +468,14 @@
}
var TrafficShaping *tp_pb.TrafficShapingInfo
- if TrafficShaping, err = meters.GetTrafficShapingInfo(ctx, &meterInfo.MeterConfig); err != nil {
+ if TrafficShaping, err = meters.GetTrafficShapingInfo(ctx, sq.flowMetadata.Meters[0]); err != nil {
return olterrors.NewErrInvalidValue(log.Fields{
"reason": "invalid-meter-config",
"meter-id": sq.meterID,
"device-id": f.deviceHandler.device.Id}, nil)
}
- TrafficSched := []*tp_pb.TrafficScheduler{f.techprofile[sq.intfID].GetTrafficScheduler(sq.tpInst.(*tp.TechProfile), SchedCfg, TrafficShaping)}
+ TrafficSched := []*tp_pb.TrafficScheduler{f.techprofile.GetTrafficScheduler(sq.tpInst.(*tp_pb.TechProfileInstance), SchedCfg, TrafficShaping)}
TrafficSched[0].TechProfileId = sq.tpID
if err := f.pushSchedulerQueuesToDevice(ctx, sq, TrafficSched); err != nil {
@@ -549,7 +502,7 @@
}
func (f *OpenOltFlowMgr) pushSchedulerQueuesToDevice(ctx context.Context, sq schedQueue, TrafficSched []*tp_pb.TrafficScheduler) error {
- trafficQueues, err := f.techprofile[sq.intfID].GetTrafficQueues(ctx, sq.tpInst.(*tp.TechProfile), sq.direction)
+ trafficQueues, err := f.techprofile.GetTrafficQueues(ctx, sq.tpInst.(*tp_pb.TechProfileInstance), sq.direction)
if err != nil {
return olterrors.NewErrAdapter("unable-to-construct-traffic-queue-configuration",
@@ -598,10 +551,9 @@
"device-id": f.deviceHandler.device.Id})
if sq.direction == tp_pb.Direction_DOWNSTREAM {
- multicastTrafficQueues := f.techprofile[sq.intfID].GetMulticastTrafficQueues(ctx, sq.tpInst.(*tp.TechProfile))
+ multicastTrafficQueues := f.techprofile.GetMulticastTrafficQueues(ctx, sq.tpInst.(*tp_pb.TechProfileInstance))
if len(multicastTrafficQueues) > 0 {
- if _, present := f.grpMgr.GetInterfaceToMcastQueueMap(sq.intfID); !present {
- //assumed that there is only one queue per PON for the multicast service
+ if _, present := f.grpMgr.GetInterfaceToMcastQueueMap(sq.intfID); !present { //assumed that there is only one queue per PON for the multicast service
//the default queue with multicastQueuePerPonPort.Priority per a pon interface is used for multicast service
//just put it in interfaceToMcastQueueMap to use for building group members
logger.Debugw(ctx, "multicast-traffic-queues", log.Fields{"device-id": f.deviceHandler.device.Id})
@@ -613,7 +565,7 @@
f.grpMgr.UpdateInterfaceToMcastQueueMap(sq.intfID, val)
//also store the queue info in kv store
if err := f.resourceMgr.AddMcastQueueForIntf(ctx, sq.intfID, multicastQueuePerPonPort.GemportId, multicastQueuePerPonPort.Priority); err != nil {
- logger.Errorw(ctx, "failed-to-add-mcast-queue", log.Fields{"error": err})
+ logger.Errorw(ctx, "failed-to-add-mcast-queue", log.Fields{"err": err})
return err
}
@@ -639,27 +591,19 @@
"uni-port": sq.uniPort,
"device-id": f.deviceHandler.device.Id})
if sq.direction == tp_pb.Direction_UPSTREAM {
- SchedCfg, err = f.techprofile[sq.intfID].GetUsScheduler(ctx, sq.tpInst.(*tp.TechProfile))
+ SchedCfg = f.techprofile.GetUsScheduler(sq.tpInst.(*tp_pb.TechProfileInstance))
Direction = "upstream"
} else if sq.direction == tp_pb.Direction_DOWNSTREAM {
- SchedCfg, err = f.techprofile[sq.intfID].GetDsScheduler(ctx, sq.tpInst.(*tp.TechProfile))
+ SchedCfg = f.techprofile.GetDsScheduler(sq.tpInst.(*tp_pb.TechProfileInstance))
Direction = "downstream"
}
- if err != nil {
- return olterrors.NewErrNotFound("scheduler-config",
- log.Fields{
- "int-id": sq.intfID,
- "direction": sq.direction,
- "device-id": f.deviceHandler.device.Id}, err)
- }
-
TrafficShaping := &tp_pb.TrafficShapingInfo{} // this info is not really useful for the agent during flow removal. Just use default values.
- TrafficSched := []*tp_pb.TrafficScheduler{f.techprofile[sq.intfID].GetTrafficScheduler(sq.tpInst.(*tp.TechProfile), SchedCfg, TrafficShaping)}
+ TrafficSched := []*tp_pb.TrafficScheduler{f.techprofile.GetTrafficScheduler(sq.tpInst.(*tp_pb.TechProfileInstance), SchedCfg, TrafficShaping)}
TrafficSched[0].TechProfileId = sq.tpID
- TrafficQueues, err := f.techprofile[sq.intfID].GetTrafficQueues(ctx, sq.tpInst.(*tp.TechProfile), sq.direction)
+ TrafficQueues, err := f.techprofile.GetTrafficQueues(ctx, sq.tpInst.(*tp_pb.TechProfileInstance), sq.direction)
if err != nil {
return olterrors.NewErrAdapter("unable-to-construct-traffic-queue-configuration",
log.Fields{
@@ -703,7 +647,7 @@
"uni-port": sq.uniPort})
if sq.direction == tp_pb.Direction_UPSTREAM {
- allocID := sq.tpInst.(*tp.TechProfile).UsScheduler.AllocID
+ allocID := sq.tpInst.(*tp_pb.TechProfileInstance).UsScheduler.AllocId
f.resourceMgr.FreeAllocID(ctx, sq.intfID, sq.onuID, sq.uniID, allocID)
// Delete the TCONT on the ONU.
uni := getUniPortPath(f.deviceHandler.device.Id, sq.intfID, int32(sq.onuID), int32(sq.uniID))
@@ -752,7 +696,6 @@
var gemPortIDs []uint32
tpInstanceExists := false
var err error
-
allocIDs = f.resourceMgr.GetCurrentAllocIDsForOnu(ctx, intfID, onuID, uniID)
allgemPortIDs = f.resourceMgr.GetCurrentGEMPortIDsForOnu(ctx, intfID, onuID, uniID)
tpPath := f.getTPpath(ctx, intfID, uni, TpID)
@@ -765,24 +708,24 @@
"tp-id": TpID})
// Check tech profile instance already exists for derived port name
- techProfileInstance, _ := f.techprofile[intfID].GetTPInstanceFromKVStore(ctx, TpID, tpPath)
+ techProfileInstance, _ := f.techprofile.GetTPInstance(ctx, tpPath)
if techProfileInstance == nil {
logger.Infow(ctx, "tp-instance-not-found--creating-new",
log.Fields{
"path": tpPath,
"device-id": f.deviceHandler.device.Id})
- techProfileInstance, err = f.techprofile[intfID].CreateTechProfInstance(ctx, TpID, uni, intfID)
+ techProfileInstance, err = f.techprofile.CreateTechProfileInstance(ctx, TpID, uni, intfID)
if err != nil {
// This should not happen, something wrong in KV backend transaction
logger.Errorw(ctx, "tp-instance-create-failed",
log.Fields{
- "error": err,
+ "err": err,
"tp-id": TpID,
"device-id": f.deviceHandler.device.Id})
return 0, nil, nil
}
if err := f.resourceMgr.UpdateTechProfileIDForOnu(ctx, intfID, onuID, uniID, TpID); err != nil {
- logger.Warnw(ctx, "failed-to-update-tech-profile-id", log.Fields{"error": err})
+ logger.Warnw(ctx, "failed-to-update-tech-profile-id", log.Fields{"err": err})
}
} else {
logger.Debugw(ctx, "tech-profile-instance-already-exist-for-given port-name",
@@ -793,14 +736,14 @@
}
switch tpInst := techProfileInstance.(type) {
- case *tp.TechProfile:
+ case *tp_pb.TechProfileInstance:
if UsMeterID != 0 {
sq := schedQueue{direction: tp_pb.Direction_UPSTREAM, intfID: intfID, onuID: onuID, uniID: uniID, tpID: TpID,
uniPort: uniPort, tpInst: techProfileInstance, meterID: UsMeterID, flowMetadata: flowMetadata}
if err := f.CreateSchedulerQueues(ctx, sq); err != nil {
logger.Errorw(ctx, "CreateSchedulerQueues-failed-upstream",
log.Fields{
- "error": err,
+ "err": err,
"onu-id": onuID,
"uni-id": uniID,
"intf-id": intfID,
@@ -815,7 +758,7 @@
if err := f.CreateSchedulerQueues(ctx, sq); err != nil {
logger.Errorw(ctx, "CreateSchedulerQueues-failed-downstream",
log.Fields{
- "error": err,
+ "err": err,
"onu-id": onuID,
"uni-id": uniID,
"intf-id": intfID,
@@ -824,9 +767,9 @@
return 0, nil, nil
}
}
- allocID := tpInst.UsScheduler.AllocID
+ allocID := tpInst.UsScheduler.AllocId
for _, gem := range tpInst.UpstreamGemPortAttributeList {
- gemPortIDs = append(gemPortIDs, gem.GemportID)
+ gemPortIDs = append(gemPortIDs, gem.GemportId)
}
allocIDs = appendUnique32bit(allocIDs, allocID)
@@ -848,12 +791,12 @@
// Send Tconts and GEM ports to KV store
f.storeTcontsGEMPortsIntoKVStore(ctx, intfID, onuID, uniID, allocIDs, allgemPortIDs)
return allocID, gemPortIDs, techProfileInstance
- case *tp.EponProfile:
+ case *openoltpb2.EponTechProfileInstance:
// CreateSchedulerQueues for EPON needs to be implemented here
// when voltha-protos for EPON is completed.
- allocID := tpInst.AllocID
+ allocID := tpInst.AllocId
for _, gem := range tpInst.UpstreamQueueAttributeList {
- gemPortIDs = append(gemPortIDs, gem.GemportID)
+ gemPortIDs = append(gemPortIDs, gem.GemportId)
}
allocIDs = appendUnique32bit(allocIDs, allocID)
@@ -897,36 +840,18 @@
if err := f.resourceMgr.UpdateGEMPortIDsForOnu(ctx, intfID, onuID, uniID, gemPortIDs); err != nil {
logger.Errorw(ctx, "error-while-uploading-gemports-to-kv-store", log.Fields{"device-id": f.deviceHandler.device.Id})
}
- if err := f.resourceMgr.UpdateGEMportsPonportToOnuMapOnKVStore(ctx, gemPortIDs, intfID, onuID, uniID); err != nil {
- logger.Error(ctx, "error-while-uploading-gemtopon-map-to-kv-store", log.Fields{"device-id": f.deviceHandler.device.Id})
- } else {
- //add to gem to uni cache
- f.addGemPortUniAssociationsToCache(ctx, intfID, onuID, uniID, gemPortIDs)
- }
+
logger.Infow(ctx, "stored-tconts-and-gem-into-kv-store-successfully", log.Fields{"device-id": f.deviceHandler.device.Id})
for _, gemPort := range gemPortIDs {
f.addGemPortToOnuInfoMap(ctx, intfID, onuID, gemPort)
}
}
-//addGemPortUniAssociationsToCache
-func (f *OpenOltFlowMgr) addGemPortUniAssociationsToCache(ctx context.Context, intfID uint32, onuID uint32, uniID uint32, gemPortIDs []uint32) {
- for _, gemPortID := range gemPortIDs {
- key := gemPortKey{
- intfID: intfID,
- gemPort: gemPortID,
- }
- f.toGemToUniMap(ctx, key, onuID, uniID)
- }
- logger.Debugw(ctx, "gem-to-uni-info-added-to-cache", log.Fields{"device-id": f.deviceHandler.device.Id, "intfID": intfID,
- "gemPortIDs": gemPortIDs, "onuID": onuID, "uniID": uniID})
-}
-
func (f *OpenOltFlowMgr) populateTechProfilePerPonPort(ctx context.Context) error {
var tpCount int
for _, techRange := range f.resourceMgr.DevInfo.Ranges {
for _, intfID := range techRange.IntfIds {
- f.techprofile[intfID] = f.resourceMgr.ResourceMgrs[intfID].TechProfileMgr
+ f.techprofile = f.resourceMgr.PonRsrMgr.TechProfileMgr
tpCount++
logger.Debugw(ctx, "init-tech-profile-done",
log.Fields{
@@ -1004,13 +929,6 @@
func (f *OpenOltFlowMgr) addSymmetricDataPathFlow(ctx context.Context, flowContext *flowContext, direction string) error {
- var inverseDirection string
- if direction == Upstream {
- inverseDirection = Downstream
- } else {
- inverseDirection = Upstream
- }
-
intfID := flowContext.intfID
onuID := flowContext.onuID
uniID := flowContext.uniID
@@ -1067,33 +985,23 @@
}, err).Log()
}
- // Get symmetric flowID if it exists
- // This symmetric flowID will be needed by agent software to use the same device flow-id that was used for the
- // symmetric flow earlier
- // symmetric flowID 0 is considered by agent as non-existent symmetric flow
- keySymm := subscriberDataPathFlowIDKey{intfID: intfID, onuID: onuID, uniID: uniID, direction: inverseDirection, tpID: tpID}
- f.subscriberDataPathFlowIDMapLock.RLock()
- symmFlowID := f.subscriberDataPathFlowIDMap[keySymm]
- f.subscriberDataPathFlowIDMapLock.RUnlock()
-
flow := openoltpb2.Flow{AccessIntfId: int32(intfID),
- OnuId: int32(onuID),
- UniId: int32(uniID),
- FlowId: logicalFlow.Id,
- FlowType: direction,
- AllocId: int32(allocID),
- NetworkIntfId: int32(networkIntfID),
- GemportId: int32(gemPortID),
- Classifier: classifierProto,
- Action: actionProto,
- Priority: int32(logicalFlow.Priority),
- Cookie: logicalFlow.Cookie,
- PortNo: flowContext.portNo,
- TechProfileId: tpID,
- ReplicateFlow: len(flowContext.pbitToGem) > 0,
- PbitToGemport: flowContext.pbitToGem,
- SymmetricFlowId: symmFlowID,
- GemportToAes: flowContext.gemToAes,
+ OnuId: int32(onuID),
+ UniId: int32(uniID),
+ FlowId: logicalFlow.Id,
+ FlowType: direction,
+ AllocId: int32(allocID),
+ NetworkIntfId: int32(networkIntfID),
+ GemportId: int32(gemPortID),
+ Classifier: classifierProto,
+ Action: actionProto,
+ Priority: int32(logicalFlow.Priority),
+ Cookie: logicalFlow.Cookie,
+ PortNo: flowContext.portNo,
+ TechProfileId: tpID,
+ ReplicateFlow: len(flowContext.pbitToGem) > 0,
+ PbitToGemport: flowContext.pbitToGem,
+ GemportToAes: flowContext.gemToAes,
}
if err := f.addFlowToDevice(ctx, logicalFlow, &flow); err != nil {
return olterrors.NewErrFlowOp("add", logicalFlow.Id, nil, err).Log()
@@ -1104,21 +1012,6 @@
"flow": flow,
"intf-id": intfID,
"onu-id": onuID})
- flowInfo := rsrcMgr.FlowInfo{Flow: &flow, IsSymmtricFlow: true}
- if err := f.resourceMgr.UpdateFlowIDInfo(ctx, uint32(flow.AccessIntfId), flow.OnuId, flow.UniId, flow.FlowId, flowInfo); err != nil {
- return olterrors.NewErrPersistence("update", "flow", logicalFlow.Id,
- log.Fields{
- "flow": flow,
- "device-id": f.deviceHandler.device.Id,
- "intf-id": intfID,
- "onu-id": onuID}, err).Log()
- }
-
- // Update the current flowID to the map
- keyCurr := subscriberDataPathFlowIDKey{intfID: intfID, onuID: onuID, uniID: uniID, direction: direction, tpID: tpID}
- f.subscriberDataPathFlowIDMapLock.Lock()
- f.subscriberDataPathFlowIDMap[keyCurr] = logicalFlow.Id
- f.subscriberDataPathFlowIDMapLock.Unlock()
return nil
}
@@ -1206,13 +1099,6 @@
"flow-id": logicalFlow.Id,
"intf-id": intfID,
"onu-id": onuID})
- flowInfo := rsrcMgr.FlowInfo{Flow: &dhcpFlow}
- if err := f.resourceMgr.UpdateFlowIDInfo(ctx, uint32(dhcpFlow.AccessIntfId), dhcpFlow.OnuId, dhcpFlow.UniId, dhcpFlow.FlowId, flowInfo); err != nil {
- return olterrors.NewErrPersistence("update", "flow", dhcpFlow.FlowId,
- log.Fields{
- "flow": dhcpFlow,
- "device-id": f.deviceHandler.device.Id}, err).Log()
- }
return nil
}
@@ -1301,11 +1187,6 @@
return olterrors.NewErrFlowOp("add", logicalFlow.Id, log.Fields{"flow": flow, "device-id": f.deviceHandler.device.Id}, err).Log()
}
- flowInfo := rsrcMgr.FlowInfo{Flow: &flow}
- if err := f.resourceMgr.UpdateFlowIDInfo(ctx, uint32(flow.AccessIntfId), flow.OnuId, flow.UniId, flow.FlowId, flowInfo); err != nil {
- return olterrors.NewErrPersistence("update", "flow", flow.FlowId, log.Fields{"flow": flow, "device-id": f.deviceHandler.device.Id}, err).Log()
- }
-
return nil
}
@@ -1336,7 +1217,7 @@
uplinkAction := make(map[string]interface{})
// Fill Classfier
- uplinkClassifier[EthType] = uint32(ethType)
+ uplinkClassifier[EthType] = ethType
uplinkClassifier[PacketTagType] = SingleTag
uplinkClassifier[VlanVid] = vlanID
uplinkClassifier[VlanPcp] = classifier[VlanPcp]
@@ -1415,13 +1296,7 @@
"intf-id": intfID,
"ethType": ethType,
})
- flowInfo := rsrcMgr.FlowInfo{Flow: &upstreamFlow}
- if err := f.resourceMgr.UpdateFlowIDInfo(ctx, uint32(upstreamFlow.AccessIntfId), upstreamFlow.OnuId, upstreamFlow.UniId, upstreamFlow.FlowId, flowInfo); err != nil {
- return olterrors.NewErrPersistence("update", "flow", upstreamFlow.FlowId,
- log.Fields{
- "flow": upstreamFlow,
- "device-id": f.deviceHandler.device.Id}, err).Log()
- }
+
return nil
}
@@ -1502,7 +1377,7 @@
// getTPpath return the ETCD path for a given UNI port
func (f *OpenOltFlowMgr) getTPpath(ctx context.Context, intfID uint32, uniPath string, TpID uint32) string {
- return f.techprofile[intfID].GetTechProfileInstanceKVPath(ctx, TpID, uniPath)
+ return f.techprofile.GetTechProfileInstanceKey(ctx, TpID, uniPath)
}
// DeleteTechProfileInstances removes the tech profile instances from persistent storage
@@ -1512,11 +1387,11 @@
for _, tpID := range tpIDList {
if err := f.DeleteTechProfileInstance(ctx, intfID, onuID, uniID, uniPortName, tpID); err != nil {
- _ = olterrors.NewErrAdapter("delete-tech-profile-failed", log.Fields{"device-id": f.deviceHandler.device.Id}, err).Log()
+ logger.Errorw(ctx, "delete-tech-profile-failed", log.Fields{"err": err, "device-id": f.deviceHandler.device.Id})
// return err
// We should continue to delete tech-profile instances for other TP IDs
}
- logger.Debugw(ctx, "tech-profile-deleted", log.Fields{"device-id": f.deviceHandler.device.Id, "tp-id": tpID})
+ logger.Debugw(ctx, "tech-profile-instance-deleted", log.Fields{"device-id": f.deviceHandler.device.Id, "uniPortName": uniPortName, "tp-id": tpID})
}
return nil
}
@@ -1526,7 +1401,7 @@
if uniPortName == "" {
uniPortName = getUniPortPath(f.deviceHandler.device.Id, intfID, int32(onuID), int32(uniID))
}
- if err := f.techprofile[intfID].DeleteTechProfileInstance(ctx, tpID, uniPortName); err != nil {
+ if err := f.techprofile.DeleteTechProfileInstance(ctx, tpID, uniPortName); err != nil {
return olterrors.NewErrAdapter("failed-to-delete-tp-instance-from-kv-store",
log.Fields{
"tp-id": tpID,
@@ -1698,13 +1573,7 @@
"device-id": f.deviceHandler.device.Id,
"onu-id": onuID,
"flow-id": flow.Id})
- flowInfo := rsrcMgr.FlowInfo{Flow: &downstreamflow}
- if err := f.resourceMgr.UpdateFlowIDInfo(ctx, networkInterfaceID, int32(onuID), int32(uniID), flow.Id, flowInfo); err != nil {
- return olterrors.NewErrPersistence("update", "flow", flow.Id,
- log.Fields{
- "flow": downstreamflow,
- "device-id": f.deviceHandler.device.Id}, err)
- }
+
return nil
}
@@ -1781,8 +1650,8 @@
return err
}
- delGemPortMsg := &ic.InterAdapterDeleteGemPortMessage{UniId: uniID, TpPath: tpPath, GemPortId: gemPortID}
- logger.Debugw(ctx, "sending-gem-port-delete-to-openonu-adapter",
+ delGemPortMsg := &ic.InterAdapterDeleteGemPortMessage{UniId: uniID, TpInstancePath: tpPath, GemPortId: gemPortID}
+ logger.Infow(ctx, "sending-gem-port-delete-to-openonu-adapter",
log.Fields{
"msg": *delGemPortMsg,
"device-id": f.deviceHandler.device.Id})
@@ -1822,7 +1691,7 @@
return err
}
- delTcontMsg := &ic.InterAdapterDeleteTcontMessage{UniId: uniID, TpPath: tpPath, AllocId: allocID}
+ delTcontMsg := &ic.InterAdapterDeleteTcontMessage{UniId: uniID, TpInstancePath: tpPath, AllocId: allocID}
logger.Debugw(ctx, "sending-tcont-delete-to-openonu-adapter",
log.Fields{
"msg": *delTcontMsg,
@@ -1853,37 +1722,38 @@
// Otherwise stale info continues to exist after gemport is freed and wrong logicalPortNo
// is conveyed to ONOS during packet-in OF message.
func (f *OpenOltFlowMgr) deleteGemPortFromLocalCache(ctx context.Context, intfID uint32, onuID uint32, gemPortID uint32) {
-
- f.onuGemInfoLock.Lock()
- defer f.onuGemInfoLock.Unlock()
-
logger.Infow(ctx, "deleting-gem-from-local-cache",
log.Fields{
"gem-port-id": gemPortID,
"intf-id": intfID,
"onu-id": onuID,
- "device-id": f.deviceHandler.device.Id,
- "onu-gem": f.onuGemInfo})
-
- onugem := f.onuGemInfo
+ "device-id": f.deviceHandler.device.Id})
+ f.onuGemInfoLock.RLock()
+ onugem, ok := f.onuGemInfoMap[onuID]
+ f.onuGemInfoLock.RUnlock()
+ if !ok {
+ logger.Warnw(ctx, "onu gem info already cleared from cache", log.Fields{
+ "gem-port-id": gemPortID,
+ "intf-id": intfID,
+ "onu-id": onuID,
+ "device-id": f.deviceHandler.device.Id})
+ return
+ }
deleteLoop:
- for i, onu := range onugem {
- if onu.OnuID == onuID {
- for j, gem := range onu.GemPorts {
- // If the gemport is found, delete it from local cache.
- if gem == gemPortID {
- onu.GemPorts = append(onu.GemPorts[:j], onu.GemPorts[j+1:]...)
- onugem[i] = onu
- logger.Infow(ctx, "removed-gemport-from-local-cache",
- log.Fields{
- "intf-id": intfID,
- "onu-id": onuID,
- "deletedgemport-id": gemPortID,
- "gemports": onu.GemPorts,
- "device-id": f.deviceHandler.device.Id})
- break deleteLoop
- }
- }
+ for j, gem := range onugem.GemPorts {
+ // If the gemport is found, delete it from local cache.
+ if gem == gemPortID {
+ onugem.GemPorts = append(onugem.GemPorts[:j], onugem.GemPorts[j+1:]...)
+ f.onuGemInfoLock.Lock()
+ f.onuGemInfoMap[onuID] = onugem
+ f.onuGemInfoLock.Unlock()
+ logger.Infow(ctx, "removed-gemport-from-local-cache",
+ log.Fields{
+ "intf-id": intfID,
+ "onu-id": onuID,
+ "deletedgemport-id": gemPortID,
+ "gemports": onugem.GemPorts,
+ "device-id": f.deviceHandler.device.Id})
break deleteLoop
}
}
@@ -1891,7 +1761,7 @@
//clearResources clears pon resources in kv store and the device
// nolint: gocyclo
-func (f *OpenOltFlowMgr) clearResources(ctx context.Context, flow *ofp.OfpFlowStats, intfID uint32, onuID int32, uniID int32,
+func (f *OpenOltFlowMgr) clearResources(ctx context.Context, intfID uint32, onuID int32, uniID int32,
gemPortID int32, flowID uint64, portNum uint32, tpID uint32) error {
uni := getUniPortPath(f.deviceHandler.device.Id, intfID, onuID, uniID)
@@ -1900,27 +1770,22 @@
log.Fields{
"tpPath": tpPath,
"device-id": f.deviceHandler.device.Id})
- techprofileInst, err := f.techprofile[intfID].GetTPInstanceFromKVStore(ctx, tpID, tpPath)
- if err != nil || techprofileInst == nil { // This should not happen, something wrong in KV backend transaction
- return olterrors.NewErrNotFound("tech-profile-in-kv-store",
- log.Fields{
- "tp-id": tpID,
- "path": tpPath}, err)
- }
used := f.isGemPortUsedByAnotherFlow(uint32(gemPortID))
if used {
- f.flowsUsedByGemPortKey.Lock()
- defer f.flowsUsedByGemPortKey.Unlock()
+ f.gemToFlowIDsKey.RLock()
+ flowIDs := f.gemToFlowIDs[uint32(gemPortID)]
+ f.gemToFlowIDsKey.RUnlock()
- flowIDs := f.flowsUsedByGemPort[uint32(gemPortID)]
for i, flowIDinMap := range flowIDs {
if flowIDinMap == flowID {
flowIDs = append(flowIDs[:i], flowIDs[i+1:]...)
- // everytime flowsUsedByGemPort cache is updated the same should be updated
+ f.gemToFlowIDsKey.Lock()
+ f.gemToFlowIDs[uint32(gemPortID)] = flowIDs
+ f.gemToFlowIDsKey.Unlock()
+ // everytime gemToFlowIDs cache is updated the same should be updated
// in kv store by calling UpdateFlowIDsForGem
- f.flowsUsedByGemPort[uint32(gemPortID)] = flowIDs
if err := f.resourceMgr.UpdateFlowIDsForGem(ctx, intfID, uint32(gemPortID), flowIDs); err != nil {
return err
}
@@ -1936,28 +1801,17 @@
return nil
}
logger.Debugf(ctx, "gem-port-id %d is-not-used-by-another-flow--releasing-the-gem-port", gemPortID)
- f.resourceMgr.RemoveGemPortIDForOnu(ctx, intfID, uint32(onuID), uint32(uniID), uint32(gemPortID))
- // TODO: The TrafficQueue corresponding to this gem-port also should be removed immediately.
- // But it is anyway eventually removed later when the TechProfile is freed, so not a big issue for now.
- f.resourceMgr.RemoveGEMportPonportToOnuMapOnKVStore(ctx, uint32(gemPortID), intfID)
- // also clear gem to uni cache
- f.removeFromGemToUniMap(gemPortKey{
- intfID: intfID,
- gemPort: uint32(gemPortID),
- })
f.deleteGemPortFromLocalCache(ctx, intfID, uint32(onuID), uint32(gemPortID))
-
- f.onuIdsLock.Lock() // TODO: What is this lock?
-
- //everytime an entry is deleted from flowsUsedByGemPort cache, the same should be updated in kv as well
+ _ = f.resourceMgr.RemoveGemFromOnuGemInfo(ctx, intfID, uint32(onuID), uint32(gemPortID)) // ignore error and proceed.
+ //everytime an entry is deleted from gemToFlowIDs cache, the same should be updated in kv as well
// by calling DeleteFlowIDsForGem
- f.flowsUsedByGemPortKey.Lock()
- delete(f.flowsUsedByGemPort, uint32(gemPortID))
- f.flowsUsedByGemPortKey.Unlock()
- f.resourceMgr.DeleteFlowIDsForGem(ctx, intfID, uint32(gemPortID))
- f.resourceMgr.FreeGemPortID(ctx, intfID, uint32(onuID), uint32(uniID), uint32(gemPortID))
+ f.gemToFlowIDsKey.Lock()
+ delete(f.gemToFlowIDs, uint32(gemPortID))
+ f.gemToFlowIDsKey.Unlock()
- f.onuIdsLock.Unlock()
+ f.resourceMgr.DeleteFlowIDsForGem(ctx, intfID, uint32(gemPortID))
+
+ f.resourceMgr.FreeGemPortID(ctx, intfID, uint32(onuID), uint32(uniID), uint32(gemPortID))
// Delete the gem port on the ONU.
if err := f.sendDeleteGemPortToChild(ctx, intfID, uint32(onuID), uint32(uniID), uint32(gemPortID), tpPath); err != nil {
@@ -1970,8 +1824,15 @@
"device-id": f.deviceHandler.device.Id,
"gemport-id": gemPortID})
}
+ techprofileInst, err := f.techprofile.GetTPInstance(ctx, tpPath)
+ if err != nil || techprofileInst == nil { // This should not happen, something wrong in KV backend transaction
+ return olterrors.NewErrNotFound("tech-profile-in-kv-store",
+ log.Fields{
+ "tp-id": tpID,
+ "path": tpPath}, err)
+ }
switch techprofileInst := techprofileInst.(type) {
- case *tp.TechProfile:
+ case *tp_pb.TechProfileInstance:
ok, _ := f.isTechProfileUsedByAnotherGem(ctx, intfID, uint32(onuID), uint32(uniID), techprofileInst, uint32(gemPortID))
if !ok {
if err := f.resourceMgr.RemoveTechProfileIDForOnu(ctx, intfID, uint32(onuID), uint32(uniID), tpID); err != nil {
@@ -1987,23 +1848,23 @@
logger.Warn(ctx, err)
}
}
- case *tp.EponProfile:
+ case *tp_pb.EponTechProfileInstance:
if err := f.resourceMgr.RemoveTechProfileIDForOnu(ctx, intfID, uint32(onuID), uint32(uniID), tpID); err != nil {
logger.Warn(ctx, err)
}
if err := f.DeleteTechProfileInstance(ctx, intfID, uint32(onuID), uint32(uniID), "", tpID); err != nil {
logger.Warn(ctx, err)
}
- f.resourceMgr.FreeAllocID(ctx, intfID, uint32(onuID), uint32(uniID), techprofileInst.AllocID)
+ f.resourceMgr.FreeAllocID(ctx, intfID, uint32(onuID), uint32(uniID), techprofileInst.AllocId)
// Delete the TCONT on the ONU.
- if err := f.sendDeleteTcontToChild(ctx, intfID, uint32(onuID), uint32(uniID), techprofileInst.AllocID, tpPath); err != nil {
+ if err := f.sendDeleteTcontToChild(ctx, intfID, uint32(onuID), uint32(uniID), techprofileInst.AllocId, tpPath); err != nil {
logger.Errorw(ctx, "error-processing-delete-tcont-towards-onu",
log.Fields{
"intf": intfID,
"onu-id": onuID,
"uni-id": uniID,
"device-id": f.deviceHandler.device.Id,
- "alloc-id": techprofileInst.AllocID})
+ "alloc-id": techprofileInst.AllocId})
}
default:
logger.Errorw(ctx, "error-unknown-tech",
@@ -2016,7 +1877,6 @@
// nolint: gocyclo
func (f *OpenOltFlowMgr) clearFlowFromDeviceAndResourceManager(ctx context.Context, flow *ofp.OfpFlowStats, flowDirection string) error {
- var flowInfo *rsrcMgr.FlowInfo
logger.Infow(ctx, "clear-flow-from-resource-manager",
log.Fields{
"flowDirection": flowDirection,
@@ -2037,6 +1897,16 @@
onuID := int32(onu)
uniID := int32(uni)
+ tpID, err := getTpIDFromFlow(ctx, flow)
+ if err != nil {
+ return olterrors.NewErrNotFound("tp-id",
+ log.Fields{
+ "flow": flow,
+ "intf-id": Intf,
+ "onu-id": onuID,
+ "uni-id": uniID,
+ "device-id": f.deviceHandler.device.Id}, err)
+ }
for _, field := range flows.GetOfbFields(flow) {
if field.Type == flows.IP_PROTO {
@@ -2060,86 +1930,45 @@
logger.Errorw(ctx, "invalid-in-port-number",
log.Fields{
"port-number": inPort,
- "error": err})
+ "err": err})
return err
}
}
- if flowInfo = f.resourceMgr.GetFlowIDInfo(ctx, Intf, onuID, uniID, flow.Id); flowInfo == nil {
- logger.Errorw(ctx, "flow-info-not-found-for-flow-to-be-removed", log.Fields{"flow-id": flow.Id, "intf-id": Intf, "onu-id": onuID, "uni-id": uniID})
- return olterrors.NewErrPersistence("remove", "flow", flow.Id, log.Fields{"flow": flow}, err)
- }
- removeFlowMessage := openoltpb2.Flow{FlowId: flowInfo.Flow.FlowId, FlowType: flowInfo.Flow.FlowType}
- logger.Debugw(ctx, "flow-to-be-deleted", log.Fields{"flow": flowInfo.Flow})
+
+ removeFlowMessage := openoltpb2.Flow{FlowId: flow.Id, AccessIntfId: int32(Intf), OnuId: onuID, UniId: uniID, TechProfileId: tpID, FlowType: flowDirection}
+ logger.Debugw(ctx, "flow-to-be-deleted", log.Fields{"flow": flow})
if err = f.removeFlowFromDevice(ctx, &removeFlowMessage, flow.Id); err != nil {
return err
}
- if err = f.resourceMgr.RemoveFlowIDInfo(ctx, Intf, onuID, uniID, flow.Id); err != nil {
- logger.Errorw(ctx, "failed-to-remove-flow-on-kv-store", log.Fields{"error": err})
- return err
- }
- tpID, err := getTpIDFromFlow(ctx, flow)
- if err != nil {
- return olterrors.NewErrNotFound("tp-id",
- log.Fields{
- "flow": flow,
- "intf-id": Intf,
- "onu-id": onuID,
- "uni-id": uniID,
- "device-id": f.deviceHandler.device.Id}, err)
- }
- if !flowInfo.Flow.ReplicateFlow {
- if err = f.clearResources(ctx, flow, Intf, onuID, uniID, flowInfo.Flow.GemportId, flowInfo.Flow.FlowId, portNum, tpID); err != nil {
+ f.flowIDToGemsLock.Lock()
+ gems, ok := f.flowIDToGems[flow.Id]
+ if !ok {
+ logger.Errorw(ctx, "flow-id-to-gem-map-not-found", log.Fields{"flowID": flow.Id})
+ f.flowIDToGemsLock.Unlock()
+ return olterrors.NewErrNotFound("flow-id-to-gem-map-not-found", log.Fields{"flowID": flow.Id}, nil)
+ }
+ copyOfGems := make([]uint32, len(gems))
+ _ = copy(copyOfGems, gems)
+ // Delete the flow-id to gemport list entry from the map now the flow is deleted.
+ delete(f.flowIDToGems, flow.Id)
+ f.flowIDToGemsLock.Unlock()
+
+ logger.Debugw(ctx, "gems-to-be-cleared", log.Fields{"gems": copyOfGems})
+ for _, gem := range copyOfGems {
+ if err = f.clearResources(ctx, Intf, onuID, uniID, int32(gem), flow.Id, portNum, tpID); err != nil {
logger.Errorw(ctx, "failed-to-clear-resources-for-flow", log.Fields{
- "flow-id": flow.Id,
- "stored-flow": flowInfo.Flow,
- "device-id": f.deviceHandler.device.Id,
- "stored-flow-id": flowInfo.Flow.FlowId,
- "onu-id": onuID,
- "intf": Intf,
- "err": err,
+ "flow-id": flow.Id,
+ "device-id": f.deviceHandler.device.Id,
+ "onu-id": onuID,
+ "intf": Intf,
+ "gem": gem,
+ "err": err,
})
return err
}
- } else {
- gems := make([]uint32, 0)
- for _, gem := range flowInfo.Flow.PbitToGemport {
- gems = appendUnique32bit(gems, gem)
- }
- logger.Debugw(ctx, "gems-to-be-cleared", log.Fields{"gems": gems})
- for _, gem := range gems {
- if err = f.clearResources(ctx, flow, Intf, onuID, uniID, int32(gem), flowInfo.Flow.FlowId, portNum, tpID); err != nil {
- logger.Errorw(ctx, "failed-to-clear-resources-for-flow", log.Fields{
- "flow-id": flow.Id,
- "stored-flow": flowInfo.Flow,
- "device-id": f.deviceHandler.device.Id,
- "stored-flow-id": flowInfo.Flow.FlowId,
- "onu-id": onuID,
- "intf": Intf,
- "gem": gem,
- "err": err,
- })
- return err
- }
- }
}
- // If datapath flow, clear the symmetric flow data from the subscriberDataPathFlowIDMap map
- if isDatapathFlow(flow) {
- if tpID, err := getTpIDFromFlow(ctx, flow); err != nil {
- var inverseDirection string
- if flowDirection == Upstream {
- inverseDirection = Downstream
- } else {
- inverseDirection = Upstream
- }
-
- keySymm := subscriberDataPathFlowIDKey{intfID: Intf, onuID: uint32(onuID), uniID: uint32(uniID), direction: inverseDirection, tpID: tpID}
- f.subscriberDataPathFlowIDMapLock.Lock()
- delete(f.subscriberDataPathFlowIDMap, keySymm)
- f.subscriberDataPathFlowIDMapLock.Unlock()
- }
- }
// Decrement reference count for the meter associated with the given <(pon_id, onu_id, uni_id)>/<tp_id>/meter_id/<direction>
if err := f.resourceMgr.HandleMeterInfoRefCntUpdate(ctx, flowDirection, Intf, uint32(onuID), uint32(uniID), tpID, false); err != nil {
return err
@@ -2203,7 +2032,8 @@
// Step2 : Push the flowControlBlock to ONU channel
// Step3 : Wait on response channel for response
// Step4 : Return error value
- logger.Debugw(ctx, "process-flow", log.Fields{"flow": flow, "addFlow": addFlow})
+ startTime := time.Now()
+ logger.Infow(ctx, "process-flow", log.Fields{"flow": flow, "addFlow": addFlow})
errChan := make(chan error)
flowCb := flowControlBlock{
ctx: ctx,
@@ -2223,7 +2053,7 @@
f.incomingFlows[onuID] <- flowCb
// Wait on the channel for flow handlers return value
err := <-errChan
- logger.Debugw(ctx, "process-flow--received-resp", log.Fields{"flow": flow, "addFlow": addFlow, "err": err})
+ logger.Infow(ctx, "process-flow--received-resp", log.Fields{"err": err, "totalTimeSeconds": time.Since(startTime).Seconds()})
return err
}
@@ -2235,17 +2065,17 @@
// process the flow completely before proceeding to handle the next flow
flowCb := <-subscriberFlowChannel
if flowCb.addFlow {
- logger.Debugw(flowCb.ctx, "adding-flow",
- log.Fields{"device-id": f.deviceHandler.device.Id,
- "flowToAdd": flowCb.flow})
+ logger.Info(flowCb.ctx, "adding-flow-start")
+ startTime := time.Now()
err := f.AddFlow(flowCb.ctx, flowCb.flow, flowCb.flowMetadata)
+ logger.Infow(flowCb.ctx, "adding-flow-complete", log.Fields{"processTimeSecs": time.Since(startTime).Seconds()})
// Pass the return value over the return channel
*flowCb.errChan <- err
} else {
- logger.Debugw(flowCb.ctx, "removing-flow",
- log.Fields{"device-id": f.deviceHandler.device.Id,
- "flowToRemove": flowCb.flow})
+ logger.Info(flowCb.ctx, "removing-flow-start")
+ startTime := time.Now()
err := f.RemoveFlow(flowCb.ctx, flowCb.flow)
+ logger.Infow(flowCb.ctx, "removing-flow-complete", log.Fields{"processTimeSecs": time.Since(startTime).Seconds()})
// Pass the return value over the return channel
*flowCb.errChan <- err
}
@@ -2393,14 +2223,10 @@
}
//cached group can be removed now
if err := f.resourceMgr.RemoveFlowGroupFromKVStore(ctx, groupID, true); err != nil {
- logger.Warnw(ctx, "failed-to-remove-flow-group", log.Fields{"group-id": groupID, "error": err})
+ logger.Warnw(ctx, "failed-to-remove-flow-group", log.Fields{"group-id": groupID, "err": err})
}
}
- flowInfo := rsrcMgr.FlowInfo{Flow: &multicastFlow}
- if err = f.resourceMgr.UpdateFlowIDInfo(ctx, networkInterfaceID, int32(onuID), int32(uniID), flow.Id, flowInfo); err != nil {
- return olterrors.NewErrPersistence("update", "flow", flow.Id, log.Fields{"flow": multicastFlow}, err)
- }
return nil
}
@@ -2413,16 +2239,13 @@
}
return nniInterfaceID, nil
}
- // find the first NNI interface id of the device
- nniPorts, e := f.resourceMgr.GetNNIFromKVStore(ctx)
- if e == nil && len(nniPorts) > 0 {
- return nniPorts[0], nil
- }
- return 0, olterrors.NewErrNotFound("nni-port", nil, e).Log()
+
+ // TODO: For now we support only one NNI port in VOLTHA. We shall use only the first NNI port, i.e., interface-id 0.
+ return 0, nil
}
//sendTPDownloadMsgToChild send payload
-func (f *OpenOltFlowMgr) sendTPDownloadMsgToChild(ctx context.Context, intfID uint32, onuID uint32, uniID uint32, uni string, TpID uint32) error {
+func (f *OpenOltFlowMgr) sendTPDownloadMsgToChild(ctx context.Context, intfID uint32, onuID uint32, uniID uint32, uni string, TpID uint32, tpInst tp_pb.TechProfileInstance) error {
onuDev, err := f.getOnuDevice(ctx, intfID, onuID)
if err != nil {
@@ -2436,7 +2259,11 @@
logger.Debugw(ctx, "got-child-device-from-olt-device-handler", log.Fields{"onu-id": onuDev.deviceID})
tpPath := f.getTPpath(ctx, intfID, uni, TpID)
- tpDownloadMsg := &ic.InterAdapterTechProfileDownloadMessage{UniId: uniID, Path: tpPath}
+ tpDownloadMsg := &ic.InterAdapterTechProfileDownloadMessage{
+ UniId: uniID,
+ TpInstancePath: tpPath,
+ TechTpInstance: &ic.InterAdapterTechProfileDownloadMessage_TpInstance{TpInstance: &tpInst},
+ }
logger.Debugw(ctx, "sending-load-tech-profile-request-to-brcm-onu-adapter", log.Fields{"tpDownloadMsg": *tpDownloadMsg})
sendErr := f.deviceHandler.AdapterProxy.SendInterAdapterMessage(log.WithSpanFromContext(context.Background(), ctx),
tpDownloadMsg,
@@ -2458,24 +2285,25 @@
}
//UpdateOnuInfo function adds onu info to cache and kvstore
+//UpdateOnuInfo function adds onu info to cache and kvstore
func (f *OpenOltFlowMgr) UpdateOnuInfo(ctx context.Context, intfID uint32, onuID uint32, serialNum string) error {
- f.onuGemInfoLock.Lock()
- defer f.onuGemInfoLock.Unlock()
- onugem := f.onuGemInfo
+ f.onuGemInfoLock.RLock()
+ _, ok := f.onuGemInfoMap[onuID]
+ f.onuGemInfoLock.RUnlock()
// If the ONU already exists in onuGemInfo list, nothing to do
- for _, onu := range onugem {
- if onu.OnuID == onuID && onu.SerialNumber == serialNum {
- logger.Debugw(ctx, "onu-id-already-exists-in-cache",
- log.Fields{"onuID": onuID,
- "serialNum": serialNum})
- return nil
- }
+ if ok {
+ logger.Debugw(ctx, "onu-id-already-exists-in-cache",
+ log.Fields{"onuID": onuID,
+ "serialNum": serialNum})
+ return nil
}
- onu := rsrcMgr.OnuGemInfo{OnuID: onuID, SerialNumber: serialNum, IntfID: intfID}
- f.onuGemInfo = append(f.onuGemInfo, onu)
- if err := f.resourceMgr.AddOnuGemInfo(ctx, intfID, onu); err != nil {
+ onuGemInfo := rsrcMgr.OnuGemInfo{OnuID: onuID, SerialNumber: serialNum, IntfID: intfID}
+ f.onuGemInfoLock.Lock()
+ f.onuGemInfoMap[onuID] = &onuGemInfo
+ f.onuGemInfoLock.Unlock()
+ if err := f.resourceMgr.AddOnuGemInfo(ctx, intfID, onuID, onuGemInfo); err != nil {
return err
}
logger.Infow(ctx, "updated-onuinfo",
@@ -2483,7 +2311,7 @@
"intf-id": intfID,
"onu-id": onuID,
"serial-num": serialNum,
- "onu": onu,
+ "onu": onuGemInfo,
"device-id": f.deviceHandler.device.Id})
return nil
}
@@ -2491,34 +2319,46 @@
//addGemPortToOnuInfoMap function adds GEMport to ONU map
func (f *OpenOltFlowMgr) addGemPortToOnuInfoMap(ctx context.Context, intfID uint32, onuID uint32, gemPort uint32) {
- f.onuGemInfoLock.Lock()
- defer f.onuGemInfoLock.Unlock()
-
logger.Infow(ctx, "adding-gem-to-onu-info-map",
log.Fields{
"gem-port-id": gemPort,
"intf-id": intfID,
"onu-id": onuID,
- "device-id": f.deviceHandler.device.Id,
- "onu-gem": f.onuGemInfo})
- onugem := f.onuGemInfo
- // update the gem to the local cache as well as to kv strore
- for idx, onu := range onugem {
- if onu.OnuID == onuID {
- // check if gem already exists , else update the cache and kvstore
- for _, gem := range onu.GemPorts {
- if gem == gemPort {
- logger.Debugw(ctx, "gem-already-in-cache-no-need-to-update-cache-and-kv-store",
- log.Fields{
- "gem": gemPort,
- "device-id": f.deviceHandler.device.Id})
- return
- }
+ "device-id": f.deviceHandler.device.Id})
+ f.onuGemInfoLock.RLock()
+ onugem, ok := f.onuGemInfoMap[onuID]
+ f.onuGemInfoLock.RUnlock()
+ if !ok {
+ logger.Warnw(ctx, "onu gem info is missing", log.Fields{
+ "gem-port-id": gemPort,
+ "intf-id": intfID,
+ "onu-id": onuID,
+ "device-id": f.deviceHandler.device.Id})
+ return
+ }
+
+ if onugem.OnuID == onuID {
+ // check if gem already exists , else update the cache and kvstore
+ for _, gem := range onugem.GemPorts {
+ if gem == gemPort {
+ logger.Debugw(ctx, "gem-already-in-cache-no-need-to-update-cache-and-kv-store",
+ log.Fields{
+ "gem": gemPort,
+ "device-id": f.deviceHandler.device.Id})
+ return
}
- onugem[idx].GemPorts = append(onugem[idx].GemPorts, gemPort)
- f.onuGemInfo = onugem
- break
}
+ onugem.GemPorts = append(onugem.GemPorts, gemPort)
+ f.onuGemInfoLock.Lock()
+ f.onuGemInfoMap[onuID] = onugem
+ f.onuGemInfoLock.Unlock()
+ } else {
+ logger.Warnw(ctx, "mismatched onu id", log.Fields{
+ "gem-port-id": gemPort,
+ "intf-id": intfID,
+ "onu-id": onuID,
+ "device-id": f.deviceHandler.device.Id})
+ return
}
err := f.resourceMgr.AddGemToOnuGemInfo(ctx, intfID, onuID, gemPort)
if err != nil {
@@ -2535,24 +2375,18 @@
"gem-port-id": gemPort,
"intf-id": intfID,
"onu-id": onuID,
- "device-id": f.deviceHandler.device.Id,
- "onu-gem": f.onuGemInfo})
+ "device-id": f.deviceHandler.device.Id})
}
//GetLogicalPortFromPacketIn function computes logical port UNI/NNI port from packet-in indication and returns the same
func (f *OpenOltFlowMgr) GetLogicalPortFromPacketIn(ctx context.Context, packetIn *openoltpb2.PacketIndication) (uint32, error) {
var logicalPortNum uint32
- var onuID, uniID uint32
- var err error
if packetIn.IntfType == "pon" {
// packet indication does not have serial number , so sending as nil
// get onu and uni ids associated with the given pon and gem ports
- if onuID, uniID, err = f.GetUniPortByPonPortGemPort(ctx, packetIn.IntfId, packetIn.GemportId); err != nil {
- // Called method is returning error with all data populated; just return the same
- return logicalPortNum, err
- }
- logger.Debugf(ctx, "retrieved ONU and UNI IDs [%d, %d] by interface:%d, gem:%d")
+ onuID, uniID := packetIn.OnuId, packetIn.UniId
+ logger.Debugf(ctx, "retrieved ONU and UNI IDs [%d, %d] by interface:%d, gem:%d", packetIn.OnuId, packetIn.UniId, packetIn.GemportId)
if packetIn.PortNo != 0 {
logicalPortNum = packetIn.PortNo
@@ -2576,40 +2410,6 @@
return logicalPortNum, nil
}
-//GetUniPortByPonPortGemPort return onu and uni IDs associated with given pon and gem ports
-func (f *OpenOltFlowMgr) GetUniPortByPonPortGemPort(ctx context.Context, intfID uint32, gemPortID uint32) (uint32, uint32, error) {
- key := gemPortKey{
- intfID: intfID,
- gemPort: gemPortID,
- }
- uniPortInfo, ok := f.fromGemToUniMap(key) //try to get from the cache first
- if ok {
- if len(uniPortInfo) > 1 {
- //return onu ID and uni port from the cache
- logger.Debugw(ctx, "found-uni-port-by-pon-and-gem-ports",
- log.Fields{
- "intfID": intfID,
- "gemPortID": gemPortID,
- "onuID, uniID": uniPortInfo})
- return uniPortInfo[0], uniPortInfo[1], nil
- }
- }
- //If uni port is not found in cache try to get it from kv store. if it is found in kv store, update the cache and return.
- onuID, uniID, err := f.resourceMgr.GetUniPortByPonPortGemPortFromKVStore(ctx, intfID, gemPortID)
- if err == nil {
- f.toGemToUniMap(ctx, key, onuID, uniID)
- logger.Infow(ctx, "found-uni-port-by-pon-and-gem-port-from-kv-store-and-updating-cache-with-uni-port",
- log.Fields{
- "gemPortKey": key,
- "onuID": onuID,
- "uniID": uniID})
- return onuID, uniID, nil
- }
- return uint32(0), uint32(0), olterrors.NewErrNotFound("uni-id",
- log.Fields{"interfaceID": intfID, "gemPortID": gemPortID},
- errors.New("no uni port found"))
-}
-
//GetPacketOutGemPortID returns gemPortId
func (f *OpenOltFlowMgr) GetPacketOutGemPortID(ctx context.Context, intfID uint32, onuID uint32, portNum uint32, packet []byte) (uint32, error) {
var gemPortID uint32
@@ -2721,10 +2521,6 @@
return olterrors.NewErrFlowOp("add", logicalFlow.Id, log.Fields{"flow": downstreamflow}, err)
}
logger.Info(ctx, "trap-on-nni-flow-added–to-device-successfully")
- flowInfo := rsrcMgr.FlowInfo{Flow: &downstreamflow}
- if err := f.resourceMgr.UpdateFlowIDInfo(ctx, networkInterfaceID, int32(onuID), int32(uniID), logicalFlow.Id, flowInfo); err != nil {
- return olterrors.NewErrPersistence("update", "flow", logicalFlow.Id, log.Fields{"flow": downstreamflow}, err)
- }
return nil
}
@@ -2814,10 +2610,7 @@
return olterrors.NewErrFlowOp("add", logicalFlow.Id, log.Fields{"flow": downstreamflow}, err)
}
logger.Info(ctx, "igmp-trap-on-nni-flow-added-to-device-successfully")
- flowInfo := rsrcMgr.FlowInfo{Flow: &downstreamflow}
- if err := f.resourceMgr.UpdateFlowIDInfo(ctx, networkInterfaceID, int32(onuID), int32(uniID), logicalFlow.Id, flowInfo); err != nil {
- return olterrors.NewErrPersistence("update", "flow", logicalFlow.Id, log.Fields{"flow": downstreamflow}, err)
- }
+
return nil
}
@@ -2846,10 +2639,10 @@
pbitToGem := make(map[uint32]uint32)
gemToAes := make(map[uint32]bool)
- var attributes []tp.IGemPortAttribute
+ var attributes []*tp_pb.GemPortAttributes
var direction = tp_pb.Direction_UPSTREAM
switch TpInst := TpInst.(type) {
- case *tp.TechProfile:
+ case *tp_pb.TechProfileInstance:
if IsUpstream(actionInfo[Output].(uint32)) {
attributes = TpInst.UpstreamGemPortAttributeList
} else {
@@ -2881,9 +2674,9 @@
}
}
} else { // Extract the exact gemport which maps to the PCP classifier in the flow
- if gem := f.techprofile[intfID].GetGemportForPbit(ctx, TpInst, direction, pcp.(uint32)); gem != nil {
- gemPortID = gem.(tp.IGemPortAttribute).GemportID
- gemToAes[gemPortID], _ = strconv.ParseBool(gem.(tp.IGemPortAttribute).AesEncryption)
+ if gem := f.techprofile.GetGemportForPbit(ctx, TpInst, direction, pcp.(uint32)); gem != nil {
+ gemPortID = gem.(*tp_pb.GemPortAttributes).GemportId
+ gemToAes[gemPortID], _ = strconv.ParseBool(gem.(*tp_pb.GemPortAttributes).AesEncryption)
}
}
@@ -2981,26 +2774,26 @@
}
// Send Techprofile download event to child device in go routine as it takes time
go func() {
- if err := f.sendTPDownloadMsgToChild(ctx, intfID, onuID, uniID, uni, tpID); err != nil {
+ if err := f.sendTPDownloadMsgToChild(ctx, intfID, onuID, uniID, uni, tpID, *(TpInst.(*tp_pb.TechProfileInstance))); err != nil {
logger.Warn(ctx, err)
}
}()
}
func (f *OpenOltFlowMgr) isGemPortUsedByAnotherFlow(gemPortID uint32) bool {
- f.flowsUsedByGemPortKey.RLock()
- flowIDList := f.flowsUsedByGemPort[gemPortID]
- f.flowsUsedByGemPortKey.RUnlock()
+ f.gemToFlowIDsKey.RLock()
+ flowIDList := f.gemToFlowIDs[gemPortID]
+ f.gemToFlowIDsKey.RUnlock()
return len(flowIDList) > 1
}
-func (f *OpenOltFlowMgr) isTechProfileUsedByAnotherGem(ctx context.Context, ponIntf uint32, onuID uint32, uniID uint32, tpInst *tp.TechProfile, gemPortID uint32) (bool, uint32) {
+func (f *OpenOltFlowMgr) isTechProfileUsedByAnotherGem(ctx context.Context, ponIntf uint32, onuID uint32, uniID uint32, tpInst *tp_pb.TechProfileInstance, gemPortID uint32) (bool, uint32) {
currentGemPorts := f.resourceMgr.GetCurrentGEMPortIDsForOnu(ctx, ponIntf, onuID, uniID)
tpGemPorts := tpInst.UpstreamGemPortAttributeList
for _, currentGemPort := range currentGemPorts {
for _, tpGemPort := range tpGemPorts {
- if (currentGemPort == tpGemPort.GemportID) && (currentGemPort != gemPortID) {
+ if (currentGemPort == tpGemPort.GemportId) && (currentGemPort != gemPortID) {
return true, currentGemPort
}
}
@@ -3010,21 +2803,21 @@
}
func (f *OpenOltFlowMgr) isAllocUsedByAnotherUNI(ctx context.Context, sq schedQueue) bool {
- tpInst := sq.tpInst.(*tp.TechProfile)
- if tpInst.InstanceCtrl.Onu == "single-instance" && sq.direction == tp_pb.Direction_UPSTREAM {
- tpInstances := f.techprofile[sq.intfID].FindAllTpInstances(ctx, f.deviceHandler.device.Id, sq.tpID, sq.intfID, sq.onuID).([]tp.TechProfile)
+ tpInst := sq.tpInst.(*tp_pb.TechProfileInstance)
+ if tpInst.InstanceControl.Onu == "single-instance" && sq.direction == tp_pb.Direction_UPSTREAM {
+ tpInstances := f.techprofile.FindAllTpInstances(ctx, f.deviceHandler.device.Id, sq.tpID, sq.intfID, sq.onuID).([]tp_pb.TechProfileInstance)
logger.Debugw(ctx, "got-single-instance-tp-instances", log.Fields{"tp-instances": tpInstances})
for i := 0; i < len(tpInstances); i++ {
tpI := tpInstances[i]
if tpI.SubscriberIdentifier != tpInst.SubscriberIdentifier &&
- tpI.UsScheduler.AllocID == tpInst.UsScheduler.AllocID {
+ tpI.UsScheduler.AllocId == tpInst.UsScheduler.AllocId {
logger.Debugw(ctx, "alloc-is-in-use",
log.Fields{
"device-id": f.deviceHandler.device.Id,
"intfID": sq.intfID,
"onuID": sq.onuID,
"uniID": sq.uniID,
- "allocID": tpI.UsScheduler.AllocID,
+ "allocID": tpI.UsScheduler.AllocId,
})
return true
}
@@ -3250,7 +3043,7 @@
logger.Debugw(ctx, "invalid-action-port-number",
log.Fields{
"port-number": action[Output].(uint32),
- "error": err})
+ "err": err})
return uint32(0), err
}
logger.Infow(ctx, "output-nni-intfId-is", log.Fields{"intf-id": intfID})
@@ -3261,7 +3054,7 @@
logger.Debugw(ctx, "invalid-classifier-port-number",
log.Fields{
"port-number": action[Output].(uint32),
- "error": err})
+ "err": err})
return uint32(0), err
}
logger.Infow(ctx, "input-nni-intfId-is", log.Fields{"intf-id": intfID})
@@ -3331,69 +3124,39 @@
return 0, 0, nil
}
-// AddUniPortToOnuInfo adds uni port to the onugem info both in cache and kvstore.
-func (f *OpenOltFlowMgr) AddUniPortToOnuInfo(ctx context.Context, intfID uint32, onuID uint32, portNum uint32) {
-
- f.onuGemInfoLock.Lock()
- defer f.onuGemInfoLock.Unlock()
-
- onugem := f.onuGemInfo
- for idx, onu := range onugem {
- if onu.OnuID == onuID {
- for _, uni := range onu.UniPorts {
- if uni == portNum {
- logger.Infow(ctx, "uni-already-in-cache--no-need-to-update-cache-and-kv-store", log.Fields{"uni": portNum})
- return
+func (f *OpenOltFlowMgr) loadFlowIDsForGemAndGemIDsForFlow(ctx context.Context) {
+ logger.Debug(ctx, "loadFlowIDsForGemAndGemIDsForFlow - start")
+ f.onuGemInfoLock.RLock()
+ f.gemToFlowIDsKey.Lock()
+ f.flowIDToGemsLock.Lock()
+ for _, og := range f.onuGemInfoMap {
+ for _, gem := range og.GemPorts {
+ flowIDs, err := f.resourceMgr.GetFlowIDsForGem(ctx, f.ponPortIdx, gem)
+ if err != nil {
+ f.gemToFlowIDs[gem] = flowIDs
+ for _, flowID := range flowIDs {
+ if _, ok := f.flowIDToGems[flowID]; !ok {
+ f.flowIDToGems[flowID] = []uint32{gem}
+ } else {
+ f.flowIDToGems[flowID] = appendUnique32bit(f.flowIDToGems[flowID], gem)
+ }
}
}
- onugem[idx].UniPorts = append(onugem[idx].UniPorts, portNum)
- f.onuGemInfo = onugem
}
}
- f.resourceMgr.AddUniPortToOnuInfo(ctx, intfID, onuID, portNum)
-
-}
-
-func (f *OpenOltFlowMgr) loadFlowIDlistForGem(ctx context.Context, intf uint32) {
- flowIDsList, err := f.resourceMgr.GetFlowIDsGemMapForInterface(ctx, intf)
- if err != nil {
- logger.Error(ctx, "failed-to-get-flowid-list-per-gem", log.Fields{"intf": intf})
- return
- }
- f.flowsUsedByGemPortKey.Lock()
- for gem, FlowIDs := range flowIDsList {
- f.flowsUsedByGemPort[gem] = FlowIDs
- }
- f.flowsUsedByGemPortKey.Unlock()
+ f.flowIDToGemsLock.Unlock()
+ f.gemToFlowIDsKey.Unlock()
+ f.onuGemInfoLock.RUnlock()
+ logger.Debug(ctx, "loadFlowIDsForGemAndGemIDsForFlow - end")
}
//clearMulticastFlowFromResourceManager removes a multicast flow from the KV store and
// clears resources reserved for this multicast flow
func (f *OpenOltFlowMgr) clearMulticastFlowFromResourceManager(ctx context.Context, flow *ofp.OfpFlowStats) error {
- classifierInfo := make(map[string]interface{})
- var flowInfo *rsrcMgr.FlowInfo
- formulateClassifierInfoFromFlow(ctx, classifierInfo, flow)
- networkInterfaceID, err := f.getNNIInterfaceIDOfMulticastFlow(ctx, classifierInfo)
-
- if err != nil {
- logger.Warnw(ctx, "no-inport-found--cannot-release-resources-of-the-multicast-flow", log.Fields{"flowId:": flow.Id})
- return err
- }
-
- var onuID = int32(NoneOnuID)
- var uniID = int32(NoneUniID)
- if flowInfo = f.resourceMgr.GetFlowIDInfo(ctx, networkInterfaceID, onuID, uniID, flow.Id); flowInfo == nil {
- return olterrors.NewErrPersistence("remove", "flow", flow.Id,
- log.Fields{
- "flow": flow,
- "device-id": f.deviceHandler.device.Id,
- "intf-id": networkInterfaceID,
- "onu-id": onuID}, err).Log()
- }
- removeFlowMessage := openoltpb2.Flow{FlowId: flow.Id, FlowType: flowInfo.Flow.FlowType}
+ removeFlowMessage := openoltpb2.Flow{FlowId: flow.Id, FlowType: Multicast}
logger.Debugw(ctx, "multicast-flow-to-be-deleted",
log.Fields{
- "flow": flowInfo.Flow,
+ "flow": flow,
"flow-id": flow.Id,
"device-id": f.deviceHandler.device.Id})
// Remove from device
@@ -3402,60 +3165,44 @@
logger.Errorw(ctx, "failed-to-remove-multicast-flow",
log.Fields{
"flow-id": flow.Id,
- "error": err})
+ "err": err})
return err
}
- // Remove flow from KV store
- return f.resourceMgr.RemoveFlowIDInfo(ctx, networkInterfaceID, onuID, uniID, flow.Id)
+
+ return nil
}
-// reconcileSubscriberDataPathFlowIDMap reconciles subscriberDataPathFlowIDMap from KV store
-func (f *OpenOltFlowMgr) reconcileSubscriberDataPathFlowIDMap(ctx context.Context) {
- onuGemInfo, err := f.resourceMgr.GetOnuGemInfo(ctx, f.ponPortIdx)
+func (f *OpenOltFlowMgr) getTechProfileDownloadMessage(ctx context.Context, tpPath string, ponID uint32, onuID uint32, uniID uint32) *ic.InterAdapterTechProfileDownloadMessage {
+ tpInst, err := f.techprofile.GetTPInstance(ctx, tpPath)
if err != nil {
- _ = olterrors.NewErrNotFound("onu", log.Fields{
- "pon-port": f.ponPortIdx}, err).Log()
- return
+ logger.Errorw(ctx, "error-fetching-tp-instance", log.Fields{"tpPath": tpPath})
+ return nil
}
- f.subscriberDataPathFlowIDMapLock.Lock()
- defer f.subscriberDataPathFlowIDMapLock.Unlock()
-
- for _, onu := range onuGemInfo {
- for _, uniID := range onu.UniPorts {
- flowIDs, err := f.resourceMgr.GetCurrentFlowIDsForOnu(ctx, onu.IntfID, int32(onu.OnuID), int32(uniID))
- if err != nil {
- logger.Fatalf(ctx, "failed-to-read-flow-ids-of-onu-during-reconciliation")
- }
- for _, flowID := range flowIDs {
- flowInfo := f.resourceMgr.GetFlowIDInfo(ctx, onu.IntfID, int32(onu.OnuID), int32(uniID), flowID)
- if flowInfo == nil {
- // Error is already logged in the called function
- continue
- }
- if flowInfo.Flow.Classifier.PktTagType == DoubleTag &&
- flowInfo.Flow.FlowType == Downstream &&
- flowInfo.Flow.Classifier.OVid > 0 &&
- flowInfo.Flow.TechProfileId > 0 {
- key := subscriberDataPathFlowIDKey{intfID: onu.IntfID, onuID: onu.OnuID, uniID: uniID, direction: flowInfo.Flow.FlowType, tpID: flowInfo.Flow.TechProfileId}
- if _, ok := f.subscriberDataPathFlowIDMap[key]; !ok {
- f.subscriberDataPathFlowIDMap[key] = flowInfo.Flow.FlowId
- }
- } else if flowInfo.Flow.Classifier.PktTagType == SingleTag &&
- flowInfo.Flow.FlowType == Upstream &&
- flowInfo.Flow.Action.OVid > 0 &&
- flowInfo.Flow.TechProfileId > 0 {
- key := subscriberDataPathFlowIDKey{intfID: onu.IntfID, onuID: onu.OnuID, uniID: uniID, direction: flowInfo.Flow.FlowType, tpID: flowInfo.Flow.TechProfileId}
- if _, ok := f.subscriberDataPathFlowIDMap[key]; !ok {
- f.subscriberDataPathFlowIDMap[key] = flowInfo.Flow.FlowId
- }
- }
- }
+ switch tpInst := tpInst.(type) {
+ case *tp_pb.TechProfileInstance:
+ logger.Debugw(ctx, "fetched-tp-instance-successfully--formulating-tp-download-msg", log.Fields{"tpPath": tpPath})
+ return &ic.InterAdapterTechProfileDownloadMessage{UniId: uniID,
+ TpInstancePath: tpPath,
+ TechTpInstance: &ic.InterAdapterTechProfileDownloadMessage_TpInstance{TpInstance: tpInst},
}
+ case *openoltpb2.EponTechProfileInstance:
+ return &ic.InterAdapterTechProfileDownloadMessage{UniId: uniID,
+ TpInstancePath: tpPath,
+ TechTpInstance: &ic.InterAdapterTechProfileDownloadMessage_EponTpInstance{EponTpInstance: tpInst},
+ }
+ default:
+ logger.Errorw(ctx, "unknown-tech", log.Fields{"tpPath": tpPath})
}
+ return nil
}
-// isDatapathFlow declares a flow as datapath flow if it is not a controller bound flow and the flow does not have group
-func isDatapathFlow(flow *ofp.OfpFlowStats) bool {
- return !IsControllerBoundFlow(flows.GetOutPort(flow)) && !flows.HasGroup(flow)
+func (f *OpenOltFlowMgr) getOnuGemInfoList() []rsrcMgr.OnuGemInfo {
+ var onuGemInfoLst []rsrcMgr.OnuGemInfo
+ f.onuGemInfoLock.RLock()
+ defer f.onuGemInfoLock.RUnlock()
+ for _, v := range f.onuGemInfoMap {
+ onuGemInfoLst = append(onuGemInfoLst, *v)
+ }
+ return onuGemInfoLst
}
diff --git a/internal/pkg/core/openolt_flowmgr_test.go b/internal/pkg/core/openolt_flowmgr_test.go
index 44a02fb..214fa61 100644
--- a/internal/pkg/core/openolt_flowmgr_test.go
+++ b/internal/pkg/core/openolt_flowmgr_test.go
@@ -29,15 +29,10 @@
"github.com/opencord/voltha-protos/v4/go/voltha"
- "github.com/opencord/voltha-lib-go/v4/pkg/db"
- fu "github.com/opencord/voltha-lib-go/v4/pkg/flows"
- "github.com/opencord/voltha-lib-go/v4/pkg/log"
- tp "github.com/opencord/voltha-lib-go/v4/pkg/techprofile"
- "github.com/opencord/voltha-openolt-adapter/internal/pkg/resourcemanager"
+ fu "github.com/opencord/voltha-lib-go/v5/pkg/flows"
+ "github.com/opencord/voltha-lib-go/v5/pkg/log"
rsrcMgr "github.com/opencord/voltha-openolt-adapter/internal/pkg/resourcemanager"
- "github.com/opencord/voltha-openolt-adapter/pkg/mocks"
ofp "github.com/opencord/voltha-protos/v4/go/openflow_13"
- "github.com/opencord/voltha-protos/v4/go/openolt"
openoltpb2 "github.com/opencord/voltha-protos/v4/go/openolt"
tp_pb "github.com/opencord/voltha-protos/v4/go/tech_profile"
)
@@ -48,41 +43,11 @@
_, _ = log.SetDefaultLogger(log.JSON, log.DebugLevel, nil)
flowMgr = newMockFlowmgr()
}
-func newMockResourceMgr() *resourcemanager.OpenOltResourceMgr {
- ranges := []*openolt.DeviceInfo_DeviceResourceRanges{
- {
- IntfIds: []uint32{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15},
- Technology: "Default",
- },
- }
-
- deviceinfo := &openolt.DeviceInfo{Vendor: "openolt", Model: "openolt", HardwareVersion: "1.0", FirmwareVersion: "1.0",
- DeviceId: "olt", DeviceSerialNumber: "openolt", PonPorts: 16, Technology: "Default",
- OnuIdStart: OnuIDStart, OnuIdEnd: OnuIDEnd, AllocIdStart: AllocIDStart, AllocIdEnd: AllocIDEnd,
- GemportIdStart: GemIDStart, GemportIdEnd: GemIDEnd, FlowIdStart: FlowIDStart, FlowIdEnd: FlowIDEnd,
- Ranges: ranges,
- }
- ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
- defer cancel()
- rsrMgr := resourcemanager.NewResourceMgr(ctx, "olt", "127.0.0.1:2379", "etcd", "olt", deviceinfo, "service/voltha")
- for key := range rsrMgr.ResourceMgrs {
- rsrMgr.ResourceMgrs[key].KVStore = &db.Backend{}
- rsrMgr.ResourceMgrs[key].KVStore.Client = &mocks.MockKVClient{}
- rsrMgr.ResourceMgrs[key].TechProfileMgr = mocks.MockTechProfile{TpID: key}
- }
- return rsrMgr
-}
func newMockFlowmgr() []*OpenOltFlowMgr {
- rMgr := newMockResourceMgr()
dh := newMockDeviceHandler()
- rMgr.KVStore = &db.Backend{}
- rMgr.KVStore.Client = &mocks.MockKVClient{}
-
- dh.resourceMgr = rMgr
-
- // onuGemInfo := make([]rsrcMgr.OnuGemInfo, NumPonPorts)
+ // onuGemInfoMap := make([]rsrcMgr.onuGemInfoMap, NumPonPorts)
var i uint32
for i = 0; i < NumPonPorts; i++ {
@@ -90,11 +55,7 @@
packetInGemPort[rsrcMgr.PacketInInfoKey{IntfID: i, OnuID: i + 1, LogicalPort: i + 1, VlanID: uint16(i), Priority: uint8(i)}] = i + 1
dh.flowMgr[i].packetInGemPort = packetInGemPort
- tps := make(map[uint32]tp.TechProfileIf)
- for key := range rMgr.ResourceMgrs {
- tps[key] = mocks.MockTechProfile{TpID: key}
- }
- dh.flowMgr[i].techprofile = tps
+ dh.flowMgr[i].techprofile = dh.resourceMgr[i].PonRsrMgr.TechProfileMgr
interface2mcastQeueuMap := make(map[uint32]*QueueInfoBrief)
interface2mcastQeueuMap[0] = &QueueInfoBrief{
gemPortID: 4000,
@@ -102,21 +63,22 @@
}
dh.flowMgr[i].grpMgr.interfaceToMcastQueueMap = interface2mcastQeueuMap
}
-
return dh.flowMgr
}
func TestOpenOltFlowMgr_CreateSchedulerQueues(t *testing.T) {
- tprofile := &tp.TechProfile{Name: "tp1", SubscriberIdentifier: "subscriber1",
+ tprofile := &tp_pb.TechProfileInstance{Name: "tp1", SubscriberIdentifier: "subscriber1",
ProfileType: "pt1", NumGemPorts: 1, Version: 1,
- InstanceCtrl: tp.InstanceControl{Onu: "1", Uni: "1", MaxGemPayloadSize: "1"},
+ InstanceControl: &tp_pb.InstanceControl{Onu: "1", Uni: "1", MaxGemPayloadSize: "1"},
}
- tprofile.UsScheduler.Direction = "UPSTREAM"
- tprofile.UsScheduler.QSchedPolicy = "WRR"
+ tprofile.UsScheduler = &openoltpb2.SchedulerAttributes{}
+ tprofile.UsScheduler.Direction = tp_pb.Direction_UPSTREAM
+ tprofile.UsScheduler.QSchedPolicy = tp_pb.SchedulingPolicy_WRR
tprofile2 := tprofile
- tprofile2.DsScheduler.Direction = "DOWNSTREAM"
- tprofile2.DsScheduler.QSchedPolicy = "WRR"
+ tprofile2.DsScheduler = &openoltpb2.SchedulerAttributes{}
+ tprofile2.DsScheduler.Direction = tp_pb.Direction_DOWNSTREAM
+ tprofile2.DsScheduler.QSchedPolicy = tp_pb.SchedulingPolicy_WRR
tests := []struct {
name string
@@ -135,17 +97,17 @@
{"CreateSchedulerQueues-19", schedQueue{tp_pb.Direction_UPSTREAM, 0, 1, 1, 64, 1, tprofile, 1, createFlowMetadata(tprofile, 5, Upstream)}, false},
{"CreateSchedulerQueues-20", schedQueue{tp_pb.Direction_DOWNSTREAM, 0, 1, 1, 65, 1, tprofile2, 1, createFlowMetadata(tprofile2, 5, Downstream)}, false},
- {"CreateSchedulerQueues-1", schedQueue{tp_pb.Direction_UPSTREAM, 0, 1, 1, 64, 1, tprofile, 1, createFlowMetadata(tprofile, 0, Upstream)}, true},
- {"CreateSchedulerQueues-2", schedQueue{tp_pb.Direction_DOWNSTREAM, 0, 1, 1, 65, 1, tprofile2, 1, createFlowMetadata(tprofile2, 0, Downstream)}, true},
+ {"CreateSchedulerQueues-1", schedQueue{tp_pb.Direction_UPSTREAM, 0, 1, 1, 64, 1, tprofile, 1, createFlowMetadata(tprofile, 0, Upstream)}, false},
+ {"CreateSchedulerQueues-2", schedQueue{tp_pb.Direction_DOWNSTREAM, 0, 1, 1, 65, 1, tprofile2, 1, createFlowMetadata(tprofile2, 0, Downstream)}, false},
{"CreateSchedulerQueues-3", schedQueue{tp_pb.Direction_UPSTREAM, 0, 1, 1, 64, 1, tprofile, 2, createFlowMetadata(tprofile, 2, Upstream)}, true},
{"CreateSchedulerQueues-4", schedQueue{tp_pb.Direction_DOWNSTREAM, 0, 1, 1, 65, 1, tprofile2, 2, createFlowMetadata(tprofile2, 2, Downstream)}, true},
{"CreateSchedulerQueues-5", schedQueue{tp_pb.Direction_UPSTREAM, 1, 2, 2, 64, 2, tprofile, 2, createFlowMetadata(tprofile, 3, Upstream)}, true},
{"CreateSchedulerQueues-6", schedQueue{tp_pb.Direction_DOWNSTREAM, 1, 2, 2, 65, 2, tprofile2, 2, createFlowMetadata(tprofile2, 3, Downstream)}, true},
//Negative testcases
- {"CreateSchedulerQueues-7", schedQueue{tp_pb.Direction_UPSTREAM, 0, 1, 1, 64, 1, tprofile, 1, &voltha.FlowMetadata{}}, true},
+ {"CreateSchedulerQueues-7", schedQueue{tp_pb.Direction_UPSTREAM, 0, 1, 1, 64, 1, tprofile, 1, &voltha.FlowMetadata{}}, false},
{"CreateSchedulerQueues-8", schedQueue{tp_pb.Direction_UPSTREAM, 0, 1, 1, 64, 1, tprofile, 0, &voltha.FlowMetadata{}}, true},
- {"CreateSchedulerQueues-9", schedQueue{tp_pb.Direction_DOWNSTREAM, 0, 1, 1, 65, 1, tprofile2, 1, &voltha.FlowMetadata{}}, true},
+ {"CreateSchedulerQueues-9", schedQueue{tp_pb.Direction_DOWNSTREAM, 0, 1, 1, 65, 1, tprofile2, 1, &voltha.FlowMetadata{}}, false},
{"CreateSchedulerQueues-10", schedQueue{tp_pb.Direction_UPSTREAM, 0, 1, 1, 64, 1, tprofile, 2, &voltha.FlowMetadata{}}, true},
{"CreateSchedulerQueues-11", schedQueue{tp_pb.Direction_DOWNSTREAM, 0, 1, 1, 65, 1, tprofile2, 2, &voltha.FlowMetadata{}}, true},
{"CreateSchedulerQueues-12", schedQueue{tp_pb.Direction_DOWNSTREAM, 0, 1, 1, 65, 1, tprofile2, 2, nil}, true},
@@ -161,35 +123,35 @@
}
}
-func createFlowMetadata(techProfile *tp.TechProfile, tcontType int, direction string) *voltha.FlowMetadata {
- var additionalBw string
+func createFlowMetadata(techProfile *tp_pb.TechProfileInstance, tcontType int, direction string) *voltha.FlowMetadata {
+ var additionalBw openoltpb2.AdditionalBW
bands := make([]*ofp.OfpMeterBandHeader, 0)
switch tcontType {
case 1:
//tcont-type-1
bands = append(bands, &ofp.OfpMeterBandHeader{Type: ofp.OfpMeterBandType_OFPMBT_DROP, Rate: 10000, BurstSize: 0, Data: &ofp.OfpMeterBandHeader_Drop{}})
bands = append(bands, &ofp.OfpMeterBandHeader{Type: ofp.OfpMeterBandType_OFPMBT_DROP, Rate: 10000, BurstSize: 0, Data: &ofp.OfpMeterBandHeader_Drop{}})
- additionalBw = "AdditionalBW_None"
+ additionalBw = tp_pb.AdditionalBW_AdditionalBW_None
case 2:
//tcont-type-2
bands = append(bands, &ofp.OfpMeterBandHeader{Type: ofp.OfpMeterBandType_OFPMBT_DROP, Rate: 60000, BurstSize: 10000, Data: &ofp.OfpMeterBandHeader_Drop{}})
bands = append(bands, &ofp.OfpMeterBandHeader{Type: ofp.OfpMeterBandType_OFPMBT_DROP, Rate: 50000, BurstSize: 10000, Data: &ofp.OfpMeterBandHeader_Drop{}})
- additionalBw = "AdditionalBW_None"
+ additionalBw = tp_pb.AdditionalBW_AdditionalBW_None
case 3:
//tcont-type-3
bands = append(bands, &ofp.OfpMeterBandHeader{Type: ofp.OfpMeterBandType_OFPMBT_DROP, Rate: 100000, BurstSize: 10000, Data: &ofp.OfpMeterBandHeader_Drop{}})
bands = append(bands, &ofp.OfpMeterBandHeader{Type: ofp.OfpMeterBandType_OFPMBT_DROP, Rate: 50000, BurstSize: 20000, Data: &ofp.OfpMeterBandHeader_Drop{}})
- additionalBw = "AdditionalBW_NA"
+ additionalBw = tp_pb.AdditionalBW_AdditionalBW_NA
case 4:
//tcont-type-4
bands = append(bands, &ofp.OfpMeterBandHeader{Type: ofp.OfpMeterBandType_OFPMBT_DROP, Rate: 200000, BurstSize: 10000, Data: &ofp.OfpMeterBandHeader_Drop{}})
- additionalBw = "AdditionalBW_BestEffort"
+ additionalBw = tp_pb.AdditionalBW_AdditionalBW_BestEffort
case 5:
//tcont-type-5
bands = append(bands, &ofp.OfpMeterBandHeader{Type: ofp.OfpMeterBandType_OFPMBT_DROP, Rate: 50000, BurstSize: 10000, Data: &ofp.OfpMeterBandHeader_Drop{}})
bands = append(bands, &ofp.OfpMeterBandHeader{Type: ofp.OfpMeterBandType_OFPMBT_DROP, Rate: 100000, BurstSize: 10000, Data: &ofp.OfpMeterBandHeader_Drop{}})
bands = append(bands, &ofp.OfpMeterBandHeader{Type: ofp.OfpMeterBandType_OFPMBT_DROP, Rate: 10000, BurstSize: 0, Data: &ofp.OfpMeterBandHeader_Drop{}})
- additionalBw = "AdditionalBW_BestEffort"
+ additionalBw = tp_pb.AdditionalBW_AdditionalBW_BestEffort
default:
// do nothing, we will return meter config with no meter bands
}
@@ -206,18 +168,20 @@
}
func TestOpenOltFlowMgr_RemoveSchedulerQueues(t *testing.T) {
- tprofile := &tp.TechProfile{Name: "tp1", SubscriberIdentifier: "subscriber1",
+ tprofile := &tp_pb.TechProfileInstance{Name: "tp1", SubscriberIdentifier: "subscriber1",
ProfileType: "pt1", NumGemPorts: 1, Version: 1,
- InstanceCtrl: tp.InstanceControl{Onu: "1", Uni: "1", MaxGemPayloadSize: "1"},
+ InstanceControl: &tp_pb.InstanceControl{Onu: "1", Uni: "1", MaxGemPayloadSize: "1"},
}
- tprofile.UsScheduler.Direction = "UPSTREAM"
- tprofile.UsScheduler.AdditionalBw = "AdditionalBW_None"
- tprofile.UsScheduler.QSchedPolicy = "WRR"
+ tprofile.UsScheduler = &openoltpb2.SchedulerAttributes{}
+ tprofile.UsScheduler.Direction = tp_pb.Direction_UPSTREAM
+ tprofile.UsScheduler.AdditionalBw = tp_pb.AdditionalBW_AdditionalBW_None
+ tprofile.UsScheduler.QSchedPolicy = tp_pb.SchedulingPolicy_WRR
tprofile2 := tprofile
- tprofile2.DsScheduler.Direction = "DOWNSTREAM"
- tprofile2.DsScheduler.AdditionalBw = "AdditionalBW_None"
- tprofile2.DsScheduler.QSchedPolicy = "WRR"
+ tprofile2.DsScheduler = &openoltpb2.SchedulerAttributes{}
+ tprofile2.DsScheduler.Direction = tp_pb.Direction_DOWNSTREAM
+ tprofile2.DsScheduler.AdditionalBw = tp_pb.AdditionalBW_AdditionalBW_None
+ tprofile2.DsScheduler.QSchedPolicy = tp_pb.SchedulingPolicy_WRR
//defTprofile := &tp.DefaultTechProfile{}
tests := []struct {
name string
@@ -267,7 +231,6 @@
args args
}{
{"createTcontGemports-1", args{intfID: 0, onuID: 1, uniID: 1, uni: "16", uniPort: 1, TpID: 64, UsMeterID: 1, DsMeterID: 1, flowMetadata: flowmetadata}},
- {"createTcontGemports-1", args{intfID: 0, onuID: 1, uniID: 1, uni: "16", uniPort: 1, TpID: 65, UsMeterID: 1, DsMeterID: 1, flowMetadata: flowmetadata}},
}
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
@@ -275,11 +238,11 @@
t.Run(tt.name, func(t *testing.T) {
_, _, tpInst := flowMgr[tt.args.intfID].createTcontGemports(ctx, tt.args.intfID, tt.args.onuID, tt.args.uniID, tt.args.uni, tt.args.uniPort, tt.args.TpID, tt.args.UsMeterID, tt.args.DsMeterID, tt.args.flowMetadata)
switch tpInst := tpInst.(type) {
- case *tp.TechProfile:
+ case *tp_pb.TechProfileInstance:
if tt.args.TpID != 64 {
t.Errorf("OpenOltFlowMgr.createTcontGemports() error = different tech, tech %v", tpInst)
}
- case *tp.EponProfile:
+ case *tp_pb.EponTechProfileInstance:
if tt.args.TpID != 65 {
t.Errorf("OpenOltFlowMgr.createTcontGemports() error = different tech, tech %v", tpInst)
}
@@ -680,7 +643,7 @@
// clean the flowMgr
for i := 0; i < intfNum; i++ {
- flowMgr[i].onuGemInfo = make([]rsrcMgr.OnuGemInfo, 0)
+ flowMgr[i].onuGemInfoMap = make(map[uint32]*rsrcMgr.OnuGemInfo)
}
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
@@ -696,7 +659,6 @@
// Add gemPorts to OnuInfo in parallel threads
wg := sync.WaitGroup{}
-
for o := 1; o <= onuNum; o++ {
for i := 0; i < intfNum; i++ {
wg.Add(1)
@@ -711,15 +673,15 @@
wg.Wait()
- // check that each entry of onuGemInfo has the correct number of ONUs
+ // check that each entry of onuGemInfoMap has the correct number of ONUs
for i := 0; i < intfNum; i++ {
- lenofOnu := len(flowMgr[i].onuGemInfo)
+ lenofOnu := len(flowMgr[i].onuGemInfoMap)
if onuNum != lenofOnu {
- t.Errorf("OnuGemInfo length is not as expected len = %d, want %d", lenofOnu, onuNum)
+ t.Errorf("onuGemInfoMap length is not as expected len = %d, want %d", lenofOnu, onuNum)
}
for o := 1; o <= onuNum; o++ {
- lenOfGemPorts := len(flowMgr[i].onuGemInfo[o-1].GemPorts)
+ lenOfGemPorts := len(flowMgr[i].onuGemInfoMap[uint32(o)].GemPorts)
// check that each onuEntry has 1 gemPort
if lenOfGemPorts != 1 {
t.Errorf("Expected 1 GemPort per ONU, found %d", lenOfGemPorts)
@@ -727,7 +689,7 @@
// check that the value of the gemport is correct
gemID, _ := strconv.Atoi(fmt.Sprintf("90%d%d", i, o-1))
- currentValue := flowMgr[i].onuGemInfo[o-1].GemPorts[0]
+ currentValue := flowMgr[i].onuGemInfoMap[uint32(o)].GemPorts[0]
if uint32(gemID) != currentValue {
t.Errorf("Expected GemPort value to be %d, found %d", gemID, currentValue)
}
@@ -774,11 +736,11 @@
for _, gemPortDeleted := range tt.args.gemPortIDsToBeDeleted {
flowMgr[tt.args.intfID].deleteGemPortFromLocalCache(ctx, tt.args.intfID, tt.args.onuID, gemPortDeleted)
}
- lenofGemPorts := len(flowMgr[tt.args.intfID].onuGemInfo[0].GemPorts)
+ lenofGemPorts := len(flowMgr[tt.args.intfID].onuGemInfoMap[1].GemPorts)
if lenofGemPorts != tt.args.finalLength {
t.Errorf("GemPorts length is not as expected len = %d, want %d", lenofGemPorts, tt.args.finalLength)
}
- gemPorts := flowMgr[tt.args.intfID].onuGemInfo[0].GemPorts
+ gemPorts := flowMgr[tt.args.intfID].onuGemInfoMap[1].GemPorts
if !reflect.DeepEqual(tt.args.gemPortIDsRemaining, gemPorts) {
t.Errorf("GemPorts are not as expected = %v, want %v", gemPorts, tt.args.gemPortIDsRemaining)
}
@@ -798,11 +760,11 @@
wantErr bool
}{
// TODO: Add test cases.
- {"GetLogicalPortFromPacketIn", args{packetIn: &openoltpb2.PacketIndication{IntfType: "pon", IntfId: 0, GemportId: 255, FlowId: 100, PortNo: 1, Cookie: 100, Pkt: []byte("GetLogicalPortFromPacketIn")}}, 1, false},
- {"GetLogicalPortFromPacketIn", args{packetIn: &openoltpb2.PacketIndication{IntfType: "nni", IntfId: 0, GemportId: 1, FlowId: 100, PortNo: 1, Cookie: 100, Pkt: []byte("GetLogicalPortFromPacketIn")}}, 1048576, false},
+ {"GetLogicalPortFromPacketIn", args{packetIn: &openoltpb2.PacketIndication{IntfType: "pon", IntfId: 0, GemportId: 255, OnuId: 1, UniId: 0, FlowId: 100, PortNo: 1, Cookie: 100, Pkt: []byte("GetLogicalPortFromPacketIn")}}, 1, false},
+ {"GetLogicalPortFromPacketIn", args{packetIn: &openoltpb2.PacketIndication{IntfType: "nni", IntfId: 0, GemportId: 1, OnuId: 1, UniId: 0, FlowId: 100, PortNo: 1, Cookie: 100, Pkt: []byte("GetLogicalPortFromPacketIn")}}, 1048576, false},
// Negative Test cases.
- {"GetLogicalPortFromPacketIn", args{packetIn: &openoltpb2.PacketIndication{IntfType: "pon", IntfId: 1, GemportId: 1, FlowId: 100, PortNo: 1, Cookie: 100, Pkt: []byte("GetLogicalPortFromPacketIn")}}, 0, true},
- {"GetLogicalPortFromPacketIn", args{packetIn: &openoltpb2.PacketIndication{IntfType: "pon", IntfId: 0, GemportId: 257, FlowId: 100, PortNo: 0, Cookie: 100, Pkt: []byte("GetLogicalPortFromPacketIn")}}, 16, false},
+ {"GetLogicalPortFromPacketIn", args{packetIn: &openoltpb2.PacketIndication{IntfType: "pon", IntfId: 1, GemportId: 1, OnuId: 1, UniId: 0, FlowId: 100, PortNo: 1, Cookie: 100, Pkt: []byte("GetLogicalPortFromPacketIn")}}, 1, false},
+ {"GetLogicalPortFromPacketIn", args{packetIn: &openoltpb2.PacketIndication{IntfType: "pon", IntfId: 0, GemportId: 257, OnuId: 1, UniId: 0, FlowId: 100, PortNo: 0, Cookie: 100, Pkt: []byte("GetLogicalPortFromPacketIn")}}, 16, false},
}
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
@@ -1067,51 +1029,55 @@
// So just return in case of error
return
}
-
- TpInst := &tp.TechProfile{
+ /*
+ usGemList := make([]*tp_pb.GemPortAttributes, 4)
+ usGemList = append(usGemList, &tp_pb.GemPortAttributes{})
+ usGemList = append(usGemList, &tp_pb.GemPortAttributes{})
+ usGemList = append(usGemList, &tp_pb.GemPortAttributes{})
+ usGemList = append(usGemList, &tp_pb.GemPortAttributes{})
+ dsGemList := make([]*tp_pb.GemPortAttributes, 4)
+ dsGemList = append(usGemList, &tp_pb.GemPortAttributes{})
+ dsGemList = append(usGemList, &tp_pb.GemPortAttributes{})
+ dsGemList = append(usGemList, &tp_pb.GemPortAttributes{})
+ dsGemList = append(usGemList, &tp_pb.GemPortAttributes{})
+ */
+ TpInst := &tp_pb.TechProfileInstance{
Name: "Test-Tech-Profile",
SubscriberIdentifier: "257",
ProfileType: "Mock",
Version: 1,
NumGemPorts: 4,
- InstanceCtrl: tp.InstanceControl{
+ InstanceControl: &tp_pb.InstanceControl{
Onu: "1",
Uni: "16",
},
+ UsScheduler: &openoltpb2.SchedulerAttributes{},
+ DsScheduler: &openoltpb2.SchedulerAttributes{},
}
TpInst.UsScheduler.Priority = 1
- TpInst.UsScheduler.Direction = "upstream"
- TpInst.UsScheduler.AllocID = 1
- TpInst.UsScheduler.AdditionalBw = "None"
- TpInst.UsScheduler.QSchedPolicy = "PQ"
+ TpInst.UsScheduler.Direction = tp_pb.Direction_UPSTREAM
+ TpInst.UsScheduler.AllocId = 1
+ TpInst.UsScheduler.AdditionalBw = tp_pb.AdditionalBW_AdditionalBW_None
+ TpInst.UsScheduler.QSchedPolicy = tp_pb.SchedulingPolicy_WRR
TpInst.UsScheduler.Weight = 4
TpInst.DsScheduler.Priority = 1
- TpInst.DsScheduler.Direction = "upstream"
- TpInst.DsScheduler.AllocID = 1
- TpInst.DsScheduler.AdditionalBw = "None"
- TpInst.DsScheduler.QSchedPolicy = "PQ"
+ TpInst.DsScheduler.Direction = tp_pb.Direction_DOWNSTREAM
+ TpInst.DsScheduler.AllocId = 1
+ TpInst.DsScheduler.AdditionalBw = tp_pb.AdditionalBW_AdditionalBW_None
+ TpInst.DsScheduler.QSchedPolicy = tp_pb.SchedulingPolicy_WRR
TpInst.DsScheduler.Weight = 4
+ TpInst.UpstreamGemPortAttributeList = make([]*tp_pb.GemPortAttributes, 0)
+ TpInst.UpstreamGemPortAttributeList = append(TpInst.UpstreamGemPortAttributeList, &tp_pb.GemPortAttributes{GemportId: 1, PbitMap: "0b00000011"})
+ TpInst.UpstreamGemPortAttributeList = append(TpInst.UpstreamGemPortAttributeList, &tp_pb.GemPortAttributes{GemportId: 2, PbitMap: "0b00001100"})
+ TpInst.UpstreamGemPortAttributeList = append(TpInst.UpstreamGemPortAttributeList, &tp_pb.GemPortAttributes{GemportId: 3, PbitMap: "0b00110000"})
+ TpInst.UpstreamGemPortAttributeList = append(TpInst.UpstreamGemPortAttributeList, &tp_pb.GemPortAttributes{GemportId: 4, PbitMap: "0b11000000"})
- TpInst.UpstreamGemPortAttributeList = make([]tp.IGemPortAttribute, 4)
- TpInst.UpstreamGemPortAttributeList[0].GemportID = 1
- TpInst.UpstreamGemPortAttributeList[0].PbitMap = "0b00000011"
- TpInst.UpstreamGemPortAttributeList[0].GemportID = 2
- TpInst.UpstreamGemPortAttributeList[0].PbitMap = "0b00001100"
- TpInst.UpstreamGemPortAttributeList[0].GemportID = 3
- TpInst.UpstreamGemPortAttributeList[0].PbitMap = "0b00110000"
- TpInst.UpstreamGemPortAttributeList[0].GemportID = 4
- TpInst.UpstreamGemPortAttributeList[0].PbitMap = "0b11000000"
-
- TpInst.DownstreamGemPortAttributeList = make([]tp.IGemPortAttribute, 4)
- TpInst.DownstreamGemPortAttributeList[0].GemportID = 1
- TpInst.DownstreamGemPortAttributeList[0].PbitMap = "0b00000011"
- TpInst.DownstreamGemPortAttributeList[0].GemportID = 2
- TpInst.DownstreamGemPortAttributeList[0].PbitMap = "0b00001100"
- TpInst.DownstreamGemPortAttributeList[0].GemportID = 3
- TpInst.DownstreamGemPortAttributeList[0].PbitMap = "0b00110000"
- TpInst.DownstreamGemPortAttributeList[0].GemportID = 4
- TpInst.DownstreamGemPortAttributeList[0].PbitMap = "0b11000000"
+ TpInst.DownstreamGemPortAttributeList = make([]*tp_pb.GemPortAttributes, 0)
+ TpInst.DownstreamGemPortAttributeList = append(TpInst.DownstreamGemPortAttributeList, &tp_pb.GemPortAttributes{GemportId: 1, PbitMap: "0b00000011"})
+ TpInst.DownstreamGemPortAttributeList = append(TpInst.DownstreamGemPortAttributeList, &tp_pb.GemPortAttributes{GemportId: 2, PbitMap: "0b00001100"})
+ TpInst.DownstreamGemPortAttributeList = append(TpInst.DownstreamGemPortAttributeList, &tp_pb.GemPortAttributes{GemportId: 3, PbitMap: "0b00110000"})
+ TpInst.DownstreamGemPortAttributeList = append(TpInst.DownstreamGemPortAttributeList, &tp_pb.GemPortAttributes{GemportId: 4, PbitMap: "0b11000000"})
type args struct {
args map[string]uint32
@@ -1123,7 +1089,7 @@
onuID uint32
uniID uint32
portNo uint32
- TpInst *tp.TechProfile
+ TpInst *tp_pb.TechProfileInstance
allocID []uint32
gemPorts []uint32
TpID uint32
diff --git a/internal/pkg/core/openolt_groupmgr.go b/internal/pkg/core/openolt_groupmgr.go
index a87073b..4f633a7 100644
--- a/internal/pkg/core/openolt_groupmgr.go
+++ b/internal/pkg/core/openolt_groupmgr.go
@@ -16,8 +16,8 @@
import (
"context"
- "github.com/opencord/voltha-lib-go/v4/pkg/flows"
- "github.com/opencord/voltha-lib-go/v4/pkg/log"
+ "github.com/opencord/voltha-lib-go/v5/pkg/flows"
+ "github.com/opencord/voltha-lib-go/v5/pkg/log"
"github.com/opencord/voltha-openolt-adapter/internal/pkg/olterrors"
rsrcMgr "github.com/opencord/voltha-openolt-adapter/internal/pkg/resourcemanager"
ofp "github.com/opencord/voltha-protos/v4/go/openflow_13"
diff --git a/internal/pkg/core/openolt_test.go b/internal/pkg/core/openolt_test.go
index 3933475..8665028 100644
--- a/internal/pkg/core/openolt_test.go
+++ b/internal/pkg/core/openolt_test.go
@@ -28,13 +28,13 @@
"reflect"
"testing"
- conf "github.com/opencord/voltha-lib-go/v4/pkg/config"
+ conf "github.com/opencord/voltha-lib-go/v5/pkg/config"
- com "github.com/opencord/voltha-lib-go/v4/pkg/adapters/common"
- "github.com/opencord/voltha-lib-go/v4/pkg/events"
- fu "github.com/opencord/voltha-lib-go/v4/pkg/flows"
- "github.com/opencord/voltha-lib-go/v4/pkg/kafka"
- "github.com/opencord/voltha-lib-go/v4/pkg/log"
+ com "github.com/opencord/voltha-lib-go/v5/pkg/adapters/common"
+ "github.com/opencord/voltha-lib-go/v5/pkg/events"
+ fu "github.com/opencord/voltha-lib-go/v5/pkg/flows"
+ "github.com/opencord/voltha-lib-go/v5/pkg/kafka"
+ "github.com/opencord/voltha-lib-go/v5/pkg/log"
"github.com/opencord/voltha-openolt-adapter/internal/pkg/config"
"github.com/opencord/voltha-openolt-adapter/internal/pkg/olterrors"
ic "github.com/opencord/voltha-protos/v4/go/inter_container"
diff --git a/internal/pkg/core/statsmanager.go b/internal/pkg/core/statsmanager.go
index 42c60d7..4d7d52d 100755
--- a/internal/pkg/core/statsmanager.go
+++ b/internal/pkg/core/statsmanager.go
@@ -25,7 +25,7 @@
"sync"
"time"
- "github.com/opencord/voltha-lib-go/v4/pkg/log"
+ "github.com/opencord/voltha-lib-go/v5/pkg/log"
"github.com/opencord/voltha-openolt-adapter/internal/pkg/olterrors"
"github.com/opencord/voltha-protos/v4/go/extension"
"github.com/opencord/voltha-protos/v4/go/openolt"
@@ -285,7 +285,7 @@
var Ports interface{}
Ports, _ = InitPorts(ctx, "nni", Dev.device.Id, 1)
StatMgr.NorthBoundPort, _ = Ports.(map[uint32]*NniPort)
- NumPonPorts := Dev.resourceMgr.DevInfo.GetPonPorts()
+ NumPonPorts := Dev.resourceMgr[0].DevInfo.GetPonPorts()
Ports, _ = InitPorts(ctx, "pon", Dev.device.Id, NumPonPorts)
StatMgr.SouthBoundPort, _ = Ports.(map[uint32]*PonPort)
if StatMgr.Device.openOLT.enableONUStats {
diff --git a/internal/pkg/olterrors/common.go b/internal/pkg/olterrors/common.go
index fa427a7..7a1d00d 100644
--- a/internal/pkg/olterrors/common.go
+++ b/internal/pkg/olterrors/common.go
@@ -18,7 +18,7 @@
package olterrors
import (
- "github.com/opencord/voltha-lib-go/v4/pkg/log"
+ "github.com/opencord/voltha-lib-go/v5/pkg/log"
)
var logger log.CLogger
diff --git a/internal/pkg/olterrors/olterrors.go b/internal/pkg/olterrors/olterrors.go
index c5790ac..43bcd07 100644
--- a/internal/pkg/olterrors/olterrors.go
+++ b/internal/pkg/olterrors/olterrors.go
@@ -21,7 +21,7 @@
"context"
"encoding/json"
"fmt"
- "github.com/opencord/voltha-lib-go/v4/pkg/log"
+ "github.com/opencord/voltha-lib-go/v5/pkg/log"
"strings"
)
@@ -364,4 +364,12 @@
// ErrResourceManagerInstantiating error returned when an unexpected
// condition occcurs while instantiating the resource manager
ErrResourceManagerInstantiating = NewErrAdapter("resoure-manager-instantiating", nil, nil)
+
+ // ErrFlowManagerInstantiating error returned when an unexpected
+ // condition occcurs while instantiating the flow manager
+ ErrFlowManagerInstantiating = NewErrAdapter("flow-manager-instantiating", nil, nil)
+
+ // ErrGroupManagerInstantiating error returned when an unexpected
+ // condition occcurs while instantiating the group manager
+ ErrGroupManagerInstantiating = NewErrAdapter("group-manager-instantiating", nil, nil)
)
diff --git a/internal/pkg/resourcemanager/common.go b/internal/pkg/resourcemanager/common.go
index 5b6eedf..256e657 100644
--- a/internal/pkg/resourcemanager/common.go
+++ b/internal/pkg/resourcemanager/common.go
@@ -18,7 +18,7 @@
package resourcemanager
import (
- "github.com/opencord/voltha-lib-go/v4/pkg/log"
+ "github.com/opencord/voltha-lib-go/v5/pkg/log"
)
var logger log.CLogger
diff --git a/internal/pkg/resourcemanager/resourcemanager.go b/internal/pkg/resourcemanager/resourcemanager.go
index a501310..737f694 100755
--- a/internal/pkg/resourcemanager/resourcemanager.go
+++ b/internal/pkg/resourcemanager/resourcemanager.go
@@ -22,17 +22,14 @@
"encoding/json"
"errors"
"fmt"
- "strconv"
"strings"
"sync"
"time"
- "github.com/opencord/voltha-openolt-adapter/internal/pkg/olterrors"
-
- "github.com/opencord/voltha-lib-go/v4/pkg/db"
- "github.com/opencord/voltha-lib-go/v4/pkg/db/kvstore"
- "github.com/opencord/voltha-lib-go/v4/pkg/log"
- ponrmgr "github.com/opencord/voltha-lib-go/v4/pkg/ponresourcemanager"
+ "github.com/opencord/voltha-lib-go/v5/pkg/db"
+ "github.com/opencord/voltha-lib-go/v5/pkg/db/kvstore"
+ "github.com/opencord/voltha-lib-go/v5/pkg/log"
+ ponrmgr "github.com/opencord/voltha-lib-go/v5/pkg/ponresourcemanager"
ofp "github.com/opencord/voltha-protos/v4/go/openflow_13"
"github.com/opencord/voltha-protos/v4/go/openolt"
)
@@ -42,20 +39,18 @@
KvstoreTimeout = 5 * time.Second
// BasePathKvStore - <pathPrefix>/openolt/<device_id>
BasePathKvStore = "%s/openolt/{%s}"
- // TpIDPathSuffix - <(pon_id, onu_id, uni_id)>/tp_id
- TpIDPathSuffix = "{%d,%d,%d}/tp_id"
+ // tpIDPathSuffix - <(pon_id, onu_id, uni_id)>/tp_id
+ tpIDPathSuffix = "{%d,%d,%d}/tp_id"
//MeterIDPathSuffix - <(pon_id, onu_id, uni_id)>/<tp_id>/meter_id/<direction>
MeterIDPathSuffix = "{%d,%d,%d}/{%d}/meter_id/{%s}"
- //NnniIntfID - nniintfids
- NnniIntfID = "nniintfids"
// OnuPacketINPathPrefix - path prefix where ONU packet-in vlanID/PCP is stored
//format: onu_packetin/{<intfid>,<onuid>,<logicalport>}
OnuPacketINPathPrefix = "onu_packetin/{%d,%d,%d}"
// OnuPacketINPath path on the kvstore to store packetin gemport,which will be used for packetin, packetout
//format: onu_packetin/{<intfid>,<onuid>,<logicalport>}/{<vlanId>,<priority>}
OnuPacketINPath = OnuPacketINPathPrefix + "/{%d,%d}"
- //FlowIDsForGem flowids_per_gem/<intfid>
- FlowIDsForGem = "flowids_per_gem/{%d}"
+ //FlowIDsForGem flowids_per_gem/<intfid>/<gemport-id>
+ FlowIDsForGem = "flowids_per_gem/{%d}/{%d}"
//McastQueuesForIntf multicast queues for pon interfaces
McastQueuesForIntf = "mcast_qs_for_int"
//FlowGroup flow_groups/<flow_group_id>
@@ -74,9 +69,10 @@
//FlowIDPath - Path on the KV store for storing list of Flow IDs for a given subscriber
//Format: BasePathKvStore/<(pon_intf_id, onu_id, uni_id)>/flow_ids
FlowIDPath = "{%s}/flow_ids"
- //FlowIDInfoPath - Used to store more metadata associated with the flow_id
- //Format: BasePathKvStore/<(pon_intf_id, onu_id, uni_id)>/flow_id_info/<flow_id>
- FlowIDInfoPath = "{%s}/flow_id_info/{%d}"
+
+ //OnuGemInfoPath is path on the kvstore to store onugem info map
+ //format: <device-id>/onu_gem_info/<intfid>
+ OnuGemInfoPath = "onu_gem_info/{%d}/{%d}" // onu_gem/<intfid>/<onuID>
)
// FlowInfo holds the flow information
@@ -111,12 +107,13 @@
// MeterInfo store meter information at path <(pon_id, onu_id, uni_id)>/<tp_id>/meter_id/<direction>
type MeterInfo struct {
- RefCnt uint8 // number of flow references for this meter. When RefCnt is 0, the MeterInfo should be deleted.
- MeterConfig ofp.OfpMeterConfig
+ RefCnt uint8 // number of flow references for this meter. When RefCnt is 0, the MeterInfo should be deleted.
+ MeterID uint32
}
// OpenOltResourceMgr holds resource related information as provided below for each field
type OpenOltResourceMgr struct {
+ PonIntfID uint32
DeviceID string // OLT device id
Address string // Host and port of the kv store to connect to
Args string // args
@@ -124,14 +121,40 @@
DeviceType string
DevInfo *openolt.DeviceInfo // device information
// array of pon resource managers per interface technology
- ResourceMgrs map[uint32]*ponrmgr.PONResourceManager
+ PonRsrMgr *ponrmgr.PONResourceManager
- // This protects concurrent gemport_id allocate/delete calls on a per PON port basis
- GemPortIDMgmtLock []sync.RWMutex
- // This protects concurrent alloc_id allocate/delete calls on a per PON port basis
- AllocIDMgmtLock []sync.RWMutex
- // This protects concurrent onu_id allocate/delete calls on a per PON port basis
- OnuIDMgmtLock []sync.RWMutex
+ // Local maps used for write-through-cache - start
+ flowIDsForOnu map[string][]uint64
+ flowIDsForOnuLock sync.RWMutex
+
+ allocIDsForOnu map[string][]uint32
+ allocIDsForOnuLock sync.RWMutex
+
+ gemPortIDsForOnu map[string][]uint32
+ gemPortIDsForOnuLock sync.RWMutex
+
+ techProfileIDsForOnu map[string][]uint32
+ techProfileIDsForOnuLock sync.RWMutex
+
+ meterInfoForOnu map[string]*MeterInfo
+ meterInfoForOnuLock sync.RWMutex
+
+ onuGemInfo map[string]*OnuGemInfo
+ onuGemInfoLock sync.RWMutex
+
+ gemPortForPacketInInfo map[string]uint32
+ gemPortForPacketInInfoLock sync.RWMutex
+
+ flowIDsForGem map[uint32][]uint64
+ flowIDsForGemLock sync.RWMutex
+
+ mcastQueueForIntf map[uint32][]uint32
+ mcastQueueForIntfLock sync.RWMutex
+ mcastQueueForIntfLoadedFromKvStore bool
+
+ groupInfo map[string]*GroupInfo
+ groupInfoLock sync.RWMutex
+ // Local maps used for write-through-cache - end
}
func newKVClient(ctx context.Context, storeType string, address string, timeout time.Duration) (kvstore.Client, error) {
@@ -152,6 +175,7 @@
logger.Fatalw(ctx, "Failed to init KV client\n", log.Fields{"err": err})
return nil
}
+ // return db.NewBackend(ctx, backend, addr, KvstoreTimeout, fmt.Sprintf(BasePathKvStore, basePathKvStore, DeviceID))
kvbackend := &db.Backend{
Client: kvClient,
@@ -166,346 +190,163 @@
// NewResourceMgr init a New resource manager instance which in turn instantiates pon resource manager
// instances according to technology. Initializes the default resource ranges for all
// the resources.
-func NewResourceMgr(ctx context.Context, deviceID string, KVStoreAddress string, kvStoreType string, deviceType string, devInfo *openolt.DeviceInfo, basePathKvStore string) *OpenOltResourceMgr {
+func NewResourceMgr(ctx context.Context, PonIntfID uint32, deviceID string, KVStoreAddress string, kvStoreType string, deviceType string, devInfo *openolt.DeviceInfo, basePathKvStore string) *OpenOltResourceMgr {
var ResourceMgr OpenOltResourceMgr
- logger.Debugf(ctx, "Init new resource manager , address: %s, device-id: %s", KVStoreAddress, deviceID)
+ logger.Debugf(ctx, "Init new resource manager , ponIf: %v, address: %s, device-id: %s", PonIntfID, KVStoreAddress, deviceID)
+ ResourceMgr.PonIntfID = PonIntfID
ResourceMgr.DeviceID = deviceID
ResourceMgr.Address = KVStoreAddress
ResourceMgr.DeviceType = deviceType
ResourceMgr.DevInfo = devInfo
- NumPONPorts := devInfo.GetPonPorts()
Backend := kvStoreType
ResourceMgr.KVStore = SetKVClient(ctx, Backend, ResourceMgr.Address, deviceID, basePathKvStore)
if ResourceMgr.KVStore == nil {
logger.Error(ctx, "Failed to setup KV store")
}
- Ranges := make(map[string]*openolt.DeviceInfo_DeviceResourceRanges)
- RsrcMgrsByTech := make(map[string]*ponrmgr.PONResourceManager)
- ResourceMgr.ResourceMgrs = make(map[uint32]*ponrmgr.PONResourceManager)
-
- ResourceMgr.AllocIDMgmtLock = make([]sync.RWMutex, NumPONPorts)
- ResourceMgr.GemPortIDMgmtLock = make([]sync.RWMutex, NumPONPorts)
- ResourceMgr.OnuIDMgmtLock = make([]sync.RWMutex, NumPONPorts)
// TODO self.args = registry('main').get_args()
- /*
- If a legacy driver returns protobuf without any ranges,s synthesize one from
- the legacy global per-device information. This, in theory, is temporary until
- the legacy drivers are upgrade to support pool ranges.
- */
- if devInfo.Ranges == nil {
- var ranges openolt.DeviceInfo_DeviceResourceRanges
- ranges.Technology = devInfo.GetTechnology()
-
- var index uint32
- for index = 0; index < NumPONPorts; index++ {
- ranges.IntfIds = append(ranges.IntfIds, index)
- }
-
- var Pool openolt.DeviceInfo_DeviceResourceRanges_Pool
- Pool.Type = openolt.DeviceInfo_DeviceResourceRanges_Pool_ONU_ID
- Pool.Start = devInfo.OnuIdStart
- Pool.End = devInfo.OnuIdEnd
- Pool.Sharing = openolt.DeviceInfo_DeviceResourceRanges_Pool_DEDICATED_PER_INTF
- onuPool := Pool
- ranges.Pools = append(ranges.Pools, &onuPool)
-
- Pool.Type = openolt.DeviceInfo_DeviceResourceRanges_Pool_ALLOC_ID
- Pool.Start = devInfo.AllocIdStart
- Pool.End = devInfo.AllocIdEnd
- Pool.Sharing = openolt.DeviceInfo_DeviceResourceRanges_Pool_SHARED_BY_ALL_INTF_ALL_TECH
- allocPool := Pool
- ranges.Pools = append(ranges.Pools, &allocPool)
-
- Pool.Type = openolt.DeviceInfo_DeviceResourceRanges_Pool_GEMPORT_ID
- Pool.Start = devInfo.GemportIdStart
- Pool.End = devInfo.GemportIdEnd
- Pool.Sharing = openolt.DeviceInfo_DeviceResourceRanges_Pool_SHARED_BY_ALL_INTF_ALL_TECH
- gemPool := Pool
- ranges.Pools = append(ranges.Pools, &gemPool)
-
- Pool.Type = openolt.DeviceInfo_DeviceResourceRanges_Pool_FLOW_ID
- Pool.Start = devInfo.FlowIdStart
- Pool.End = devInfo.FlowIdEnd
- Pool.Sharing = openolt.DeviceInfo_DeviceResourceRanges_Pool_SHARED_BY_ALL_INTF_ALL_TECH
- ranges.Pools = append(ranges.Pools, &Pool)
- // Add to device info
- devInfo.Ranges = append(devInfo.Ranges, &ranges)
- }
-
// Create a separate Resource Manager instance for each range. This assumes that
// each technology is represented by only a single range
- var GlobalPONRsrcMgr *ponrmgr.PONResourceManager
- var err error
for _, TechRange := range devInfo.Ranges {
- technology := TechRange.Technology
- logger.Debugf(ctx, "Device info technology %s", technology)
- Ranges[technology] = TechRange
+ for _, intfID := range TechRange.IntfIds {
+ if intfID == PonIntfID {
+ technology := TechRange.Technology
+ logger.Debugf(ctx, "Device info technology %s, intf-id %v", technology, PonIntfID)
- RsrcMgrsByTech[technology], err = ponrmgr.NewPONResourceManager(ctx, technology, deviceType, deviceID,
- Backend, ResourceMgr.Address, basePathKvStore)
- if err != nil {
- logger.Errorf(ctx, "Failed to create pon resource manager instance for technology %s", technology)
- return nil
+ rsrMgr, err := ponrmgr.NewPONResourceManager(ctx, technology, deviceType, deviceID,
+ Backend, ResourceMgr.Address, basePathKvStore)
+ if err != nil {
+ logger.Errorf(ctx, "Failed to create pon resource manager instance for technology %s", technology)
+ return nil
+ }
+ ResourceMgr.PonRsrMgr = rsrMgr
+ // self.initialize_device_resource_range_and_pool(resource_mgr, global_resource_mgr, arange)
+ InitializeDeviceResourceRangeAndPool(ctx, rsrMgr, TechRange, devInfo)
+ if err := ResourceMgr.PonRsrMgr.InitDeviceResourcePoolForIntf(ctx, intfID); err != nil {
+ logger.Fatal(ctx, "failed-to-initialize-device-resource-pool-intf-id-%v-device-id", ResourceMgr.PonIntfID, ResourceMgr.DeviceID)
+ return nil
+ }
+ }
}
- // resource_mgrs_by_tech[technology] = resource_mgr
- if GlobalPONRsrcMgr == nil {
- GlobalPONRsrcMgr = RsrcMgrsByTech[technology]
- }
- for _, IntfID := range TechRange.IntfIds {
- ResourceMgr.ResourceMgrs[IntfID] = RsrcMgrsByTech[technology]
- }
- // self.initialize_device_resource_range_and_pool(resource_mgr, global_resource_mgr, arange)
- InitializeDeviceResourceRangeAndPool(ctx, RsrcMgrsByTech[technology], GlobalPONRsrcMgr,
- TechRange, devInfo)
}
- // After we have initialized resource ranges, initialize the
- // resource pools accordingly.
- for _, PONRMgr := range RsrcMgrsByTech {
- _ = PONRMgr.InitDeviceResourcePool(ctx)
- }
+
+ ResourceMgr.InitLocalCache()
+
logger.Info(ctx, "Initialization of resource manager success!")
return &ResourceMgr
}
+//InitLocalCache initializes local maps used for write-through-cache
+func (rsrcMgr *OpenOltResourceMgr) InitLocalCache() {
+ rsrcMgr.flowIDsForOnu = make(map[string][]uint64)
+ rsrcMgr.allocIDsForOnu = make(map[string][]uint32)
+ rsrcMgr.gemPortIDsForOnu = make(map[string][]uint32)
+ rsrcMgr.techProfileIDsForOnu = make(map[string][]uint32)
+ rsrcMgr.meterInfoForOnu = make(map[string]*MeterInfo)
+ rsrcMgr.onuGemInfo = make(map[string]*OnuGemInfo)
+ rsrcMgr.gemPortForPacketInInfo = make(map[string]uint32)
+ rsrcMgr.flowIDsForGem = make(map[uint32][]uint64)
+ rsrcMgr.mcastQueueForIntf = make(map[uint32][]uint32)
+ rsrcMgr.groupInfo = make(map[string]*GroupInfo)
+}
+
// InitializeDeviceResourceRangeAndPool initializes the resource range pool according to the sharing type, then apply
// device specific information. If KV doesn't exist
// or is broader than the device, the device's information will
// dictate the range limits
-func InitializeDeviceResourceRangeAndPool(ctx context.Context, ponRMgr *ponrmgr.PONResourceManager, globalPONRMgr *ponrmgr.PONResourceManager,
+func InitializeDeviceResourceRangeAndPool(ctx context.Context, ponRMgr *ponrmgr.PONResourceManager,
techRange *openolt.DeviceInfo_DeviceResourceRanges, devInfo *openolt.DeviceInfo) {
+ // var ONUIDShared, AllocIDShared, GEMPortIDShared openolt.DeviceInfo_DeviceResourceRanges_Pool_SharingType
+ var ONUIDStart, ONUIDEnd, AllocIDStart, AllocIDEnd, GEMPortIDStart, GEMPortIDEnd uint32
+ var ONUIDShared, AllocIDShared, GEMPortIDShared, FlowIDShared uint32
+
+ // The below variables are just dummy and needed to pass as arguments to InitDefaultPONResourceRanges function.
+ // The openolt adapter does not need flowIDs to be managed as it is managed on the OLT device
+ // The UNI IDs are dynamically generated by openonu adapter for every discovered UNI.
+ var flowIDDummyStart, flowIDDummyEnd uint32 = 1, 2
+ var uniIDDummyStart, uniIDDummyEnd uint32 = 0, 1
// init the resource range pool according to the sharing type
-
- logger.Debugf(ctx, "Resource range pool init for technology %s", ponRMgr.Technology)
- // first load from KV profiles
- status := ponRMgr.InitResourceRangesFromKVStore(ctx)
- if !status {
- logger.Debugf(ctx, "Failed to load resource ranges from KV store for tech %s", ponRMgr.Technology)
- }
-
- /*
- Then apply device specific information. If KV doesn't exist
- or is broader than the device, the device's information will
- dictate the range limits
- */
- logger.Debugw(ctx, "Using device info to init pon resource ranges", log.Fields{"Tech": ponRMgr.Technology})
-
- ONUIDStart := devInfo.OnuIdStart
- ONUIDEnd := devInfo.OnuIdEnd
- ONUIDShared := openolt.DeviceInfo_DeviceResourceRanges_Pool_DEDICATED_PER_INTF
- ONUIDSharedPoolID := uint32(0)
- AllocIDStart := devInfo.AllocIdStart
- AllocIDEnd := devInfo.AllocIdEnd
- AllocIDShared := openolt.DeviceInfo_DeviceResourceRanges_Pool_SHARED_BY_ALL_INTF_ALL_TECH // TODO EdgeCore/BAL limitation
- AllocIDSharedPoolID := uint32(0)
- GEMPortIDStart := devInfo.GemportIdStart
- GEMPortIDEnd := devInfo.GemportIdEnd
- GEMPortIDShared := openolt.DeviceInfo_DeviceResourceRanges_Pool_SHARED_BY_ALL_INTF_ALL_TECH // TODO EdgeCore/BAL limitation
- GEMPortIDSharedPoolID := uint32(0)
- FlowIDStart := devInfo.FlowIdStart
- FlowIDEnd := devInfo.FlowIdEnd
- FlowIDShared := openolt.DeviceInfo_DeviceResourceRanges_Pool_SHARED_BY_ALL_INTF_ALL_TECH // TODO EdgeCore/BAL limitation
- FlowIDSharedPoolID := uint32(0)
-
- var FirstIntfPoolID uint32
- var SharedPoolID uint32
-
- /*
- * As a zero check is made against SharedPoolID to check whether the resources are shared across all intfs
- * if resources are shared across interfaces then SharedPoolID is given a positive number.
- */
- for _, FirstIntfPoolID = range techRange.IntfIds {
- // skip the intf id 0
- if FirstIntfPoolID == 0 {
- continue
- }
- break
- }
-
+ logger.Debugw(ctx, "Device info init", log.Fields{"technology": techRange.Technology,
+ "onu_id_start": ONUIDStart, "onu_id_end": ONUIDEnd,
+ "alloc_id_start": AllocIDStart, "alloc_id_end": AllocIDEnd,
+ "gemport_id_start": GEMPortIDStart, "gemport_id_end": GEMPortIDEnd,
+ "intf_ids": techRange.IntfIds,
+ })
for _, RangePool := range techRange.Pools {
- if RangePool.Sharing == openolt.DeviceInfo_DeviceResourceRanges_Pool_SHARED_BY_ALL_INTF_ALL_TECH {
- SharedPoolID = FirstIntfPoolID
- } else if RangePool.Sharing == openolt.DeviceInfo_DeviceResourceRanges_Pool_SHARED_BY_ALL_INTF_SAME_TECH {
- SharedPoolID = FirstIntfPoolID
- } else {
- SharedPoolID = 0
- }
+ // FIXME: Remove hardcoding
if RangePool.Type == openolt.DeviceInfo_DeviceResourceRanges_Pool_ONU_ID {
ONUIDStart = RangePool.Start
ONUIDEnd = RangePool.End
- ONUIDShared = RangePool.Sharing
- ONUIDSharedPoolID = SharedPoolID
+ ONUIDShared = uint32(RangePool.Sharing)
} else if RangePool.Type == openolt.DeviceInfo_DeviceResourceRanges_Pool_ALLOC_ID {
AllocIDStart = RangePool.Start
AllocIDEnd = RangePool.End
- AllocIDShared = RangePool.Sharing
- AllocIDSharedPoolID = SharedPoolID
+ AllocIDShared = uint32(RangePool.Sharing)
} else if RangePool.Type == openolt.DeviceInfo_DeviceResourceRanges_Pool_GEMPORT_ID {
GEMPortIDStart = RangePool.Start
GEMPortIDEnd = RangePool.End
- GEMPortIDShared = RangePool.Sharing
- GEMPortIDSharedPoolID = SharedPoolID
- } else if RangePool.Type == openolt.DeviceInfo_DeviceResourceRanges_Pool_FLOW_ID {
- FlowIDStart = RangePool.Start
- FlowIDEnd = RangePool.End
- FlowIDShared = RangePool.Sharing
- FlowIDSharedPoolID = SharedPoolID
+ GEMPortIDShared = uint32(RangePool.Sharing)
}
}
- logger.Debugw(ctx, "Device info init", log.Fields{"technology": techRange.Technology,
- "onu_id_start": ONUIDStart, "onu_id_end": ONUIDEnd, "onu_id_shared_pool_id": ONUIDSharedPoolID,
- "alloc_id_start": AllocIDStart, "alloc_id_end": AllocIDEnd,
- "alloc_id_shared_pool_id": AllocIDSharedPoolID,
- "gemport_id_start": GEMPortIDStart, "gemport_id_end": GEMPortIDEnd,
- "gemport_id_shared_pool_id": GEMPortIDSharedPoolID,
- "flow_id_start": FlowIDStart,
- "flow_id_end_idx": FlowIDEnd,
- "flow_id_shared_pool_id": FlowIDSharedPoolID,
- "intf_ids": techRange.IntfIds,
- "uni_id_start": 0,
- "uni_id_end_idx": 1, /*MaxUNIIDperONU()*/
- })
-
- ponRMgr.InitDefaultPONResourceRanges(ctx, ONUIDStart, ONUIDEnd, ONUIDSharedPoolID,
- AllocIDStart, AllocIDEnd, AllocIDSharedPoolID,
- GEMPortIDStart, GEMPortIDEnd, GEMPortIDSharedPoolID,
- FlowIDStart, FlowIDEnd, FlowIDSharedPoolID, 0, 1,
+ ponRMgr.InitDefaultPONResourceRanges(ctx, ONUIDStart, ONUIDEnd, ONUIDShared,
+ AllocIDStart, AllocIDEnd, AllocIDShared,
+ GEMPortIDStart, GEMPortIDEnd, GEMPortIDShared,
+ flowIDDummyStart, flowIDDummyEnd, FlowIDShared, uniIDDummyStart, uniIDDummyEnd,
devInfo.PonPorts, techRange.IntfIds)
- // For global sharing, make sure to refresh both local and global resource manager instances' range
-
- if ONUIDShared == openolt.DeviceInfo_DeviceResourceRanges_Pool_SHARED_BY_ALL_INTF_ALL_TECH {
- globalPONRMgr.UpdateRanges(ctx, ponrmgr.ONU_ID_START_IDX, ONUIDStart, ponrmgr.ONU_ID_END_IDX, ONUIDEnd,
- "", 0, nil)
- ponRMgr.UpdateRanges(ctx, ponrmgr.ONU_ID_START_IDX, ONUIDStart, ponrmgr.ONU_ID_END_IDX, ONUIDEnd,
- "", 0, globalPONRMgr)
- }
- if AllocIDShared == openolt.DeviceInfo_DeviceResourceRanges_Pool_SHARED_BY_ALL_INTF_ALL_TECH {
- globalPONRMgr.UpdateRanges(ctx, ponrmgr.ALLOC_ID_START_IDX, AllocIDStart, ponrmgr.ALLOC_ID_END_IDX, AllocIDEnd,
- "", 0, nil)
-
- ponRMgr.UpdateRanges(ctx, ponrmgr.ALLOC_ID_START_IDX, AllocIDStart, ponrmgr.ALLOC_ID_END_IDX, AllocIDEnd,
- "", 0, globalPONRMgr)
- }
- if GEMPortIDShared == openolt.DeviceInfo_DeviceResourceRanges_Pool_SHARED_BY_ALL_INTF_ALL_TECH {
- globalPONRMgr.UpdateRanges(ctx, ponrmgr.GEMPORT_ID_START_IDX, GEMPortIDStart, ponrmgr.GEMPORT_ID_END_IDX, GEMPortIDEnd,
- "", 0, nil)
- ponRMgr.UpdateRanges(ctx, ponrmgr.GEMPORT_ID_START_IDX, GEMPortIDStart, ponrmgr.GEMPORT_ID_END_IDX, GEMPortIDEnd,
- "", 0, globalPONRMgr)
- }
- if FlowIDShared == openolt.DeviceInfo_DeviceResourceRanges_Pool_SHARED_BY_ALL_INTF_ALL_TECH {
- globalPONRMgr.UpdateRanges(ctx, ponrmgr.FLOW_ID_START_IDX, FlowIDStart, ponrmgr.FLOW_ID_END_IDX, FlowIDEnd,
- "", 0, nil)
- ponRMgr.UpdateRanges(ctx, ponrmgr.FLOW_ID_START_IDX, FlowIDStart, ponrmgr.FLOW_ID_END_IDX, FlowIDEnd,
- "", 0, globalPONRMgr)
- }
-
- // Make sure loaded range fits the platform bit encoding ranges
- ponRMgr.UpdateRanges(ctx, ponrmgr.UNI_ID_START_IDX, 0, ponrmgr.UNI_ID_END_IDX /* TODO =OpenOltPlatform.MAX_UNIS_PER_ONU-1*/, 1, "", 0, nil)
}
// Delete clears used resources for the particular olt device being deleted
-func (RsrcMgr *OpenOltResourceMgr) Delete(ctx context.Context) error {
- /* TODO
- def __del__(self):
- self.log.info("clearing-device-resource-pool")
- for key, resource_mgr in self.resource_mgrs.iteritems():
- resource_mgr.clear_device_resource_pool()
-
- def assert_pon_id_limit(self, pon_intf_id):
- assert pon_intf_id in self.resource_mgrs
-
- def assert_onu_id_limit(self, pon_intf_id, onu_id):
- self.assert_pon_id_limit(pon_intf_id)
- self.resource_mgrs[pon_intf_id].assert_resource_limits(onu_id, PONResourceManager.ONU_ID)
-
- @property
- def max_uni_id_per_onu(self):
- return 0 #OpenOltPlatform.MAX_UNIS_PER_ONU-1, zero-based indexing Uncomment or override to make default multi-uni
-
- def assert_uni_id_limit(self, pon_intf_id, onu_id, uni_id):
- self.assert_onu_id_limit(pon_intf_id, onu_id)
- self.resource_mgrs[pon_intf_id].assert_resource_limits(uni_id, PONResourceManager.UNI_ID)
- */
- for _, rsrcMgr := range RsrcMgr.ResourceMgrs {
- if err := rsrcMgr.ClearDeviceResourcePool(ctx); err != nil {
- logger.Debug(ctx, "Failed to clear device resource pool")
- return err
- }
+func (rsrcMgr *OpenOltResourceMgr) Delete(ctx context.Context, intfID uint32) error {
+ if err := rsrcMgr.PonRsrMgr.ClearDeviceResourcePoolForIntf(ctx, intfID); err != nil {
+ logger.Debug(ctx, "Failed to clear device resource pool")
+ return err
}
logger.Debug(ctx, "Cleared device resource pool")
return nil
}
-// GetONUID returns the available OnuID for the given pon-port
-func (RsrcMgr *OpenOltResourceMgr) GetONUID(ctx context.Context, ponIntfID uint32) (uint32, error) {
- // Check if Pon Interface ID is present in Resource-manager-map
- RsrcMgr.OnuIDMgmtLock[ponIntfID].Lock()
- defer RsrcMgr.OnuIDMgmtLock[ponIntfID].Unlock()
-
- if _, ok := RsrcMgr.ResourceMgrs[ponIntfID]; !ok {
- err := errors.New("invalid-pon-interface-" + strconv.Itoa(int(ponIntfID)))
- return 0, err
- }
+// GetONUID returns the available onuID for the given pon-port
+func (rsrcMgr *OpenOltResourceMgr) GetONUID(ctx context.Context, PonIntfID uint32) (uint32, error) {
// Get ONU id for a provided pon interface ID.
- onuID, err := RsrcMgr.ResourceMgrs[ponIntfID].TechProfileMgr.GetResourceID(ctx, ponIntfID,
+ onuID, err := rsrcMgr.PonRsrMgr.TechProfileMgr.GetResourceID(ctx, PonIntfID,
ponrmgr.ONU_ID, 1)
if err != nil {
logger.Errorf(ctx, "Failed to get resource for interface %d for type %s",
- ponIntfID, ponrmgr.ONU_ID)
+ PonIntfID, ponrmgr.ONU_ID)
return 0, err
}
- if onuID != nil {
- RsrcMgr.ResourceMgrs[ponIntfID].InitResourceMap(ctx, fmt.Sprintf("%d,%d", ponIntfID, onuID[0]))
+ if len(onuID) > 0 {
+ rsrcMgr.PonRsrMgr.InitResourceMap(ctx, fmt.Sprintf("%d,%d", PonIntfID, onuID[0]))
return onuID[0], err
}
- return 0, err // return OnuID 0 on error
-}
-
-// GetFlowIDInfo returns the slice of flow info of the given pon-port
-// Note: For flows which trap from the NNI and not really associated with any particular
-// ONU (like LLDP), the onu_id and uni_id is set as -1. The intf_id is the NNI intf_id.
-func (RsrcMgr *OpenOltResourceMgr) GetFlowIDInfo(ctx context.Context, ponIntfID uint32, onuID int32, uniID int32, flowID uint64) *FlowInfo {
- var flowInfo FlowInfo
-
- subs := fmt.Sprintf("%d,%d,%d", ponIntfID, onuID, uniID)
- Path := fmt.Sprintf(FlowIDInfoPath, subs, flowID)
- value, err := RsrcMgr.KVStore.Get(ctx, Path)
- if err == nil {
- if value != nil {
- Val, err := toByte(value.Value)
- if err != nil {
- logger.Errorw(ctx, "Failed to convert flowinfo into byte array", log.Fields{"error": err, "subs": subs})
- return nil
- }
- if err = json.Unmarshal(Val, &flowInfo); err != nil {
- logger.Errorw(ctx, "Failed to unmarshal", log.Fields{"error": err, "subs": subs})
- return nil
- }
- }
- }
- if flowInfo.Flow == nil {
- logger.Debugw(ctx, "No flowInfo found in KV store", log.Fields{"subs": subs})
- return nil
- }
- return &flowInfo
+ return 0, err // return onuID 0 on error
}
// GetCurrentFlowIDsForOnu fetches flow ID from the resource manager
// Note: For flows which trap from the NNI and not really associated with any particular
// ONU (like LLDP), the onu_id and uni_id is set as -1. The intf_id is the NNI intf_id.
-func (RsrcMgr *OpenOltResourceMgr) GetCurrentFlowIDsForOnu(ctx context.Context, ponIntfID uint32, onuID int32, uniID int32) ([]uint64, error) {
+func (rsrcMgr *OpenOltResourceMgr) GetCurrentFlowIDsForOnu(ctx context.Context, PonIntfID uint32, onuID int32, uniID int32) ([]uint64, error) {
- subs := fmt.Sprintf("%d,%d,%d", ponIntfID, onuID, uniID)
+ subs := fmt.Sprintf("%d,%d,%d", PonIntfID, onuID, uniID)
path := fmt.Sprintf(FlowIDPath, subs)
+ // fetch from cache
+ rsrcMgr.flowIDsForOnuLock.RLock()
+ flowIDsForOnu, ok := rsrcMgr.flowIDsForOnu[path]
+ rsrcMgr.flowIDsForOnuLock.RUnlock()
+
+ if ok {
+ return flowIDsForOnu, nil
+ }
+
var data []uint64
- value, err := RsrcMgr.KVStore.Get(ctx, path)
+ value, err := rsrcMgr.KVStore.Get(ctx, path)
if err == nil {
if value != nil {
Val, _ := toByte(value.Value)
@@ -515,339 +356,126 @@
}
}
}
+ // update cache
+ rsrcMgr.flowIDsForOnuLock.Lock()
+ rsrcMgr.flowIDsForOnu[path] = data
+ rsrcMgr.flowIDsForOnuLock.Unlock()
+
return data, nil
}
-// UpdateFlowIDInfo updates flow info for the given pon interface, onu id, and uni id
-// Note: For flows which trap from the NNI and not really associated with any particular
-// ONU (like LLDP), the onu_id and uni_id is set as -1. The intf_id is the NNI intf_id.
-func (RsrcMgr *OpenOltResourceMgr) UpdateFlowIDInfo(ctx context.Context, ponIntfID uint32, onuID int32, uniID int32,
- flowID uint64, flowData FlowInfo) error {
-
- subs := fmt.Sprintf("%d,%d,%d", ponIntfID, onuID, uniID)
- path := fmt.Sprintf(FlowIDInfoPath, subs, flowID)
-
- var value []byte
- var err error
- value, err = json.Marshal(flowData)
- if err != nil {
- logger.Errorf(ctx, "failed to Marshal, resource path %s", path)
- return err
- }
-
- if err = RsrcMgr.KVStore.Put(ctx, path, value); err != nil {
- logger.Errorf(ctx, "Failed to update resource %s", path)
- }
-
- // Update the flowID list for the ONU
- if err = RsrcMgr.UpdateFlowIDForOnu(ctx, ponIntfID, onuID, uniID, flowID, true); err != nil {
- // If the operation fails, try to remove FlowInfo from the KV store
- _ = RsrcMgr.KVStore.Delete(ctx, path)
- return err
- }
- return err
-}
-
-// UpdateFlowIDForOnu updates the flow_id list of the ONU (add or remove flow_id from the list)
-func (RsrcMgr *OpenOltResourceMgr) UpdateFlowIDForOnu(ctx context.Context, ponIntfID uint32, onuID int32, uniID int32, flowID uint64, add bool) error {
- /*
- Update the flow_id list of the ONU (add or remove flow_id from the list)
- :param pon_intf_onu_id: reference of PON interface id and onu id
- :param flow_id: flow ID
- :param add: Boolean flag to indicate whether the flow_id should be
- added or removed from the list. Defaults to adding the flow.
- */
- var Value []byte
- var err error
- var retVal bool
- var idx uint64
- subs := fmt.Sprintf("%d,%d,%d", ponIntfID, onuID, uniID)
- path := fmt.Sprintf(FlowIDPath, subs)
- flowIDs, err := RsrcMgr.GetCurrentFlowIDsForOnu(ctx, ponIntfID, onuID, uniID)
- if err != nil {
- // Error logged in the called function
- return err
- }
-
- if add {
- if retVal, _ = checkForFlowIDInList(flowIDs, flowID); retVal {
- return nil
- }
- flowIDs = append(flowIDs, flowID)
- } else {
- if retVal, idx = checkForFlowIDInList(flowIDs, flowID); !retVal {
- return nil
- }
- // delete the index and shift
- flowIDs = append(flowIDs[:idx], flowIDs[idx+1:]...)
- }
- Value, err = json.Marshal(flowIDs)
- if err != nil {
- logger.Error(ctx, "Failed to Marshal")
- return err
- }
-
- if err = RsrcMgr.KVStore.Put(ctx, path, Value); err != nil {
- logger.Errorf(ctx, "Failed to update resource %s", path)
- return err
- }
- return err
-}
-
-// RemoveFlowIDInfo remove flow info for the given pon interface, onu id, and uni id
-// Note: For flows which trap from the NNI and not really associated with any particular
-// ONU (like LLDP), the onu_id and uni_id is set as -1. The intf_id is the NNI intf_id.
-func (RsrcMgr *OpenOltResourceMgr) RemoveFlowIDInfo(ctx context.Context, ponIntfID uint32, onuID int32, uniID int32,
- flowID uint64) error {
-
- subs := fmt.Sprintf("%d,%d,%d", ponIntfID, onuID, uniID)
- path := fmt.Sprintf(FlowIDInfoPath, subs, flowID)
-
- var err error
- if err = RsrcMgr.KVStore.Delete(ctx, path); err != nil {
- logger.Errorf(ctx, "Failed to delete resource %s", path)
- return err
- }
-
- // Update the flowID list for the ONU
- err = RsrcMgr.UpdateFlowIDForOnu(ctx, ponIntfID, onuID, uniID, flowID, false)
-
- return err
-}
-
-// RemoveAllFlowsForIntfOnuUniKey removes flow info for the given interface, onu id, and uni id
-func (RsrcMgr *OpenOltResourceMgr) RemoveAllFlowsForIntfOnuUniKey(ctx context.Context, intf uint32, onuID int32, uniID int32) error {
- flowIDs, err := RsrcMgr.GetCurrentFlowIDsForOnu(ctx, intf, onuID, uniID)
- if err != nil {
- // error logged in the called function
- return err
- }
- for _, flID := range flowIDs {
- if err := RsrcMgr.RemoveFlowIDInfo(ctx, intf, onuID, uniID, flID); err != nil {
- logger.Errorw(ctx, "failed-to-delete-flow-id-info", log.Fields{"intf": intf, "onuID": onuID, "uniID": uniID, "flowID": flID})
- }
- }
- subs := fmt.Sprintf("%d,%d,%d", intf, onuID, uniID)
- path := fmt.Sprintf(FlowIDPath, subs)
- if err := RsrcMgr.KVStore.Delete(ctx, path); err != nil {
- logger.Errorf(ctx, "Failed to delete resource %s", path)
- return err
- }
- return nil
-}
-
-// GetAllocID return the first Alloc ID for a given pon interface id and onu id and then update the resource map on
-// the KV store with the list of alloc_ids allocated for the pon_intf_onu_id tuple
-// Currently of all the alloc_ids available, it returns the first alloc_id in the list for tha given ONU
-func (RsrcMgr *OpenOltResourceMgr) GetAllocID(ctx context.Context, intfID uint32, onuID uint32, uniID uint32) uint32 {
-
- var err error
- IntfOnuIDUniID := fmt.Sprintf("%d,%d,%d", intfID, onuID, uniID)
-
- RsrcMgr.AllocIDMgmtLock[intfID].Lock()
- defer RsrcMgr.AllocIDMgmtLock[intfID].Unlock()
-
- AllocID := RsrcMgr.ResourceMgrs[intfID].GetCurrentAllocIDForOnu(ctx, IntfOnuIDUniID)
- if AllocID != nil {
- // Since we support only one alloc_id for the ONU at the moment,
- // return the first alloc_id in the list, if available, for that
- // ONU.
- logger.Debugw(ctx, "Retrieved alloc ID from pon resource mgr", log.Fields{"AllocID": AllocID})
- return AllocID[0]
- }
- AllocID, err = RsrcMgr.ResourceMgrs[intfID].GetResourceID(ctx, intfID,
- ponrmgr.ALLOC_ID, 1)
-
- if AllocID == nil || err != nil {
- logger.Error(ctx, "Failed to allocate alloc id")
- return 0
- }
- // update the resource map on KV store with the list of alloc_id
- // allocated for the pon_intf_onu_id tuple
- err = RsrcMgr.ResourceMgrs[intfID].UpdateAllocIdsForOnu(ctx, IntfOnuIDUniID, AllocID)
- if err != nil {
- logger.Error(ctx, "Failed to update Alloc ID")
- return 0
- }
- logger.Debugw(ctx, "Allocated new Tcont from pon resource mgr", log.Fields{"AllocID": AllocID})
- return AllocID[0]
-}
-
// UpdateAllocIdsForOnu updates alloc ids in kv store for a given pon interface id, onu id and uni id
-func (RsrcMgr *OpenOltResourceMgr) UpdateAllocIdsForOnu(ctx context.Context, ponPort uint32, onuID uint32, uniID uint32, allocID []uint32) error {
+func (rsrcMgr *OpenOltResourceMgr) UpdateAllocIdsForOnu(ctx context.Context, ponPort uint32, onuID uint32, uniID uint32, allocIDs []uint32) error {
- IntfOnuIDUniID := fmt.Sprintf("%d,%d,%d", ponPort, onuID, uniID)
- return RsrcMgr.ResourceMgrs[ponPort].UpdateAllocIdsForOnu(ctx, IntfOnuIDUniID,
- allocID)
+ intfOnuIDuniID := fmt.Sprintf("%d,%d,%d", ponPort, onuID, uniID)
+ // update cache
+ rsrcMgr.allocIDsForOnuLock.Lock()
+ rsrcMgr.allocIDsForOnu[intfOnuIDuniID] = allocIDs
+ rsrcMgr.allocIDsForOnuLock.Unlock()
+
+ // Note: in case the write to DB fails there could be inconsistent data between cache and db.
+ // Although this is highly unlikely with DB retries in place, this is something we have to deal with in the next release
+ return rsrcMgr.PonRsrMgr.UpdateAllocIdsForOnu(ctx, intfOnuIDuniID,
+ allocIDs)
}
// GetCurrentGEMPortIDsForOnu returns gem ports for given pon interface , onu id and uni id
-func (RsrcMgr *OpenOltResourceMgr) GetCurrentGEMPortIDsForOnu(ctx context.Context, intfID uint32, onuID uint32,
+func (rsrcMgr *OpenOltResourceMgr) GetCurrentGEMPortIDsForOnu(ctx context.Context, intfID uint32, onuID uint32,
uniID uint32) []uint32 {
- /* Get gem ports for given pon interface , onu id and uni id. */
+ intfOnuIDuniID := fmt.Sprintf("%d,%d,%d", intfID, onuID, uniID)
- IntfOnuIDUniID := fmt.Sprintf("%d,%d,%d", intfID, onuID, uniID)
- return RsrcMgr.ResourceMgrs[intfID].GetCurrentGEMPortIDsForOnu(ctx, IntfOnuIDUniID)
+ // fetch from cache
+ rsrcMgr.gemPortIDsForOnuLock.RLock()
+ gemIDs, ok := rsrcMgr.gemPortIDsForOnu[intfOnuIDuniID]
+ rsrcMgr.gemPortIDsForOnuLock.RUnlock()
+ if ok {
+ return gemIDs
+ }
+ /* Get gem ports for given pon interface , onu id and uni id. */
+ gemIDs = rsrcMgr.PonRsrMgr.GetCurrentGEMPortIDsForOnu(ctx, intfOnuIDuniID)
+
+ // update cache
+ rsrcMgr.gemPortIDsForOnuLock.Lock()
+ rsrcMgr.gemPortIDsForOnu[intfOnuIDuniID] = gemIDs
+ rsrcMgr.gemPortIDsForOnuLock.Unlock()
+
+ return gemIDs
}
// GetCurrentAllocIDsForOnu returns alloc ids for given pon interface and onu id
-func (RsrcMgr *OpenOltResourceMgr) GetCurrentAllocIDsForOnu(ctx context.Context, intfID uint32, onuID uint32, uniID uint32) []uint32 {
+func (rsrcMgr *OpenOltResourceMgr) GetCurrentAllocIDsForOnu(ctx context.Context, intfID uint32, onuID uint32, uniID uint32) []uint32 {
- IntfOnuIDUniID := fmt.Sprintf("%d,%d,%d", intfID, onuID, uniID)
- AllocID := RsrcMgr.ResourceMgrs[intfID].GetCurrentAllocIDForOnu(ctx, IntfOnuIDUniID)
- if AllocID != nil {
- return AllocID
+ intfOnuIDuniID := fmt.Sprintf("%d,%d,%d", intfID, onuID, uniID)
+ // fetch from cache
+ rsrcMgr.allocIDsForOnuLock.RLock()
+ allocIDs, ok := rsrcMgr.allocIDsForOnu[intfOnuIDuniID]
+ rsrcMgr.allocIDsForOnuLock.RUnlock()
+ if ok {
+ return allocIDs
}
- return []uint32{}
+ allocIDs = rsrcMgr.PonRsrMgr.GetCurrentAllocIDForOnu(ctx, intfOnuIDuniID)
+
+ // update cache
+ rsrcMgr.allocIDsForOnuLock.Lock()
+ rsrcMgr.allocIDsForOnu[intfOnuIDuniID] = allocIDs
+ rsrcMgr.allocIDsForOnuLock.Unlock()
+
+ return allocIDs
}
// RemoveAllocIDForOnu removes the alloc id for given pon interface, onu id, uni id and alloc id
-func (RsrcMgr *OpenOltResourceMgr) RemoveAllocIDForOnu(ctx context.Context, intfID uint32, onuID uint32, uniID uint32, allocID uint32) {
- allocIDs := RsrcMgr.GetCurrentAllocIDsForOnu(ctx, intfID, onuID, uniID)
+func (rsrcMgr *OpenOltResourceMgr) RemoveAllocIDForOnu(ctx context.Context, intfID uint32, onuID uint32, uniID uint32, allocID uint32) {
+ allocIDs := rsrcMgr.GetCurrentAllocIDsForOnu(ctx, intfID, onuID, uniID)
for i := 0; i < len(allocIDs); i++ {
if allocIDs[i] == allocID {
allocIDs = append(allocIDs[:i], allocIDs[i+1:]...)
break
}
}
- err := RsrcMgr.UpdateAllocIdsForOnu(ctx, intfID, onuID, uniID, allocIDs)
+ err := rsrcMgr.UpdateAllocIdsForOnu(ctx, intfID, onuID, uniID, allocIDs)
if err != nil {
- logger.Errorf(ctx, "Failed to Remove Alloc Id For Onu. IntfID %d onuID %d uniID %d allocID %d",
+ logger.Errorf(ctx, "Failed to Remove Alloc Id For Onu. intfID %d onuID %d uniID %d allocID %d",
intfID, onuID, uniID, allocID)
}
}
// RemoveGemPortIDForOnu removes the gem port id for given pon interface, onu id, uni id and gem port id
-func (RsrcMgr *OpenOltResourceMgr) RemoveGemPortIDForOnu(ctx context.Context, intfID uint32, onuID uint32, uniID uint32, gemPortID uint32) {
- gemPortIDs := RsrcMgr.GetCurrentGEMPortIDsForOnu(ctx, intfID, onuID, uniID)
+func (rsrcMgr *OpenOltResourceMgr) RemoveGemPortIDForOnu(ctx context.Context, intfID uint32, onuID uint32, uniID uint32, gemPortID uint32) {
+ gemPortIDs := rsrcMgr.GetCurrentGEMPortIDsForOnu(ctx, intfID, onuID, uniID)
for i := 0; i < len(gemPortIDs); i++ {
if gemPortIDs[i] == gemPortID {
gemPortIDs = append(gemPortIDs[:i], gemPortIDs[i+1:]...)
break
}
}
- err := RsrcMgr.UpdateGEMPortIDsForOnu(ctx, intfID, onuID, uniID, gemPortIDs)
+ err := rsrcMgr.UpdateGEMPortIDsForOnu(ctx, intfID, onuID, uniID, gemPortIDs)
if err != nil {
- logger.Errorf(ctx, "Failed to Remove Gem Id For Onu. IntfID %d onuID %d uniID %d gemPortId %d",
+ logger.Errorf(ctx, "Failed to Remove Gem Id For Onu. intfID %d onuID %d uniID %d gemPortId %d",
intfID, onuID, uniID, gemPortID)
}
}
-//GetUniPortByPonPortGemPortFromKVStore retrieves onu and uni ID associated with the pon and gem ports.
-func (RsrcMgr *OpenOltResourceMgr) GetUniPortByPonPortGemPortFromKVStore(ctx context.Context, PonPort uint32, GemPort uint32) (uint32, uint32, error) {
- IntfGEMPortPath := fmt.Sprintf("%d,%d", PonPort, GemPort)
- logger.Debugf(ctx, "Getting ONU and UNI IDs from the path %s", IntfGEMPortPath)
- var Data []uint32
- Value, err := RsrcMgr.KVStore.Get(ctx, IntfGEMPortPath)
- if err == nil {
- if Value != nil {
- Val, _ := ponrmgr.ToByte(Value.Value)
- if err = json.Unmarshal(Val, &Data); err != nil {
- logger.Errorw(ctx, "Failed to unmarshal", log.Fields{"error": err})
- return 0, 0, errors.New("failed to unmarshal the data retrieved")
- }
- }
- } else {
- logger.Errorf(ctx, "Failed to get data from kvstore for %s", IntfGEMPortPath, err)
- return 0, 0, errors.New("could not get data")
- }
- if len(Data) < 2 {
- return 0, 0, errors.New("invalid data format")
- }
- return Data[0], Data[1], nil
-}
-
-// UpdateGEMportsPonportToOnuMapOnKVStore updates onu and uni id associated with the gem port to the kv store
-// This stored information is used when packet_indication is received and we need to derive the ONU Id for which
-// the packet arrived based on the pon_intf and gemport available in the packet_indication
-func (RsrcMgr *OpenOltResourceMgr) UpdateGEMportsPonportToOnuMapOnKVStore(ctx context.Context, gemPorts []uint32, PonPort uint32,
- onuID uint32, uniID uint32) error {
-
- /* Update onu and uni id associated with the gem port to the kv store. */
- var IntfGEMPortPath string
- Data := []uint32{onuID, uniID}
- for _, GEM := range gemPorts {
- IntfGEMPortPath = fmt.Sprintf("%d,%d", PonPort, GEM)
- Val, err := json.Marshal(Data)
- if err != nil {
- logger.Error(ctx, "failed to Marshal")
- return err
- }
-
- if err = RsrcMgr.KVStore.Put(ctx, IntfGEMPortPath, Val); err != nil {
- logger.Errorf(ctx, "Failed to update resource %s", IntfGEMPortPath)
- return err
- }
- }
- return nil
-}
-
-// RemoveGEMportPonportToOnuMapOnKVStore removes the relationship between the gem port and pon port
-func (RsrcMgr *OpenOltResourceMgr) RemoveGEMportPonportToOnuMapOnKVStore(ctx context.Context, GemPort uint32, PonPort uint32) {
- IntfGEMPortPath := fmt.Sprintf("%d,%d", PonPort, GemPort)
- err := RsrcMgr.KVStore.Delete(ctx, IntfGEMPortPath)
- if err != nil {
- logger.Errorf(ctx, "Failed to Remove Gem port-Pon port to onu map on kv store. Gem %d PonPort %d", GemPort, PonPort)
- }
-}
-
-// GetGEMPortID gets gem port id for a particular pon port, onu id and uni id and then update the resource map on
-// the KV store with the list of gemport_id allocated for the pon_intf_onu_id tuple
-func (RsrcMgr *OpenOltResourceMgr) GetGEMPortID(ctx context.Context, ponPort uint32, onuID uint32,
- uniID uint32, NumOfPorts uint32) ([]uint32, error) {
-
- /* Get gem port id for a particular pon port, onu id
- and uni id.
- */
-
- var err error
- IntfOnuIDUniID := fmt.Sprintf("%d,%d,%d", ponPort, onuID, uniID)
-
- RsrcMgr.GemPortIDMgmtLock[ponPort].Lock()
- defer RsrcMgr.GemPortIDMgmtLock[ponPort].Unlock()
-
- GEMPortList := RsrcMgr.ResourceMgrs[ponPort].GetCurrentGEMPortIDsForOnu(ctx, IntfOnuIDUniID)
- if GEMPortList != nil {
- return GEMPortList, nil
- }
-
- GEMPortList, err = RsrcMgr.ResourceMgrs[ponPort].GetResourceID(ctx, ponPort,
- ponrmgr.GEMPORT_ID, NumOfPorts)
- if err != nil && GEMPortList == nil {
- logger.Errorf(ctx, "Failed to get gem port id for %s", IntfOnuIDUniID)
- return nil, err
- }
-
- // update the resource map on KV store with the list of gemport_id
- // allocated for the pon_intf_onu_id tuple
- err = RsrcMgr.ResourceMgrs[ponPort].UpdateGEMPortIDsForOnu(ctx, IntfOnuIDUniID,
- GEMPortList)
- if err != nil {
- logger.Errorf(ctx, "Failed to update GEM ports to kv store for %s", IntfOnuIDUniID)
- return nil, err
- }
- _ = RsrcMgr.UpdateGEMportsPonportToOnuMapOnKVStore(ctx, GEMPortList, ponPort,
- onuID, uniID)
- return GEMPortList, err
-}
-
// UpdateGEMPortIDsForOnu updates gemport ids on to the kv store for a given pon port, onu id and uni id
-func (RsrcMgr *OpenOltResourceMgr) UpdateGEMPortIDsForOnu(ctx context.Context, ponPort uint32, onuID uint32,
- uniID uint32, GEMPortList []uint32) error {
- IntfOnuIDUniID := fmt.Sprintf("%d,%d,%d", ponPort, onuID, uniID)
- return RsrcMgr.ResourceMgrs[ponPort].UpdateGEMPortIDsForOnu(ctx, IntfOnuIDUniID,
- GEMPortList)
+func (rsrcMgr *OpenOltResourceMgr) UpdateGEMPortIDsForOnu(ctx context.Context, ponPort uint32, onuID uint32,
+ uniID uint32, gemIDs []uint32) error {
+ intfOnuIDuniID := fmt.Sprintf("%d,%d,%d", ponPort, onuID, uniID)
+ // update cache
+ rsrcMgr.gemPortIDsForOnuLock.Lock()
+ rsrcMgr.gemPortIDsForOnu[intfOnuIDuniID] = gemIDs
+ rsrcMgr.gemPortIDsForOnuLock.Unlock()
+
+ // Note: in case the write to DB fails there could be inconsistent data between cache and db.
+ // Although this is highly unlikely with DB retries in place, this is something we have to deal with in the next release
+ return rsrcMgr.PonRsrMgr.UpdateGEMPortIDsForOnu(ctx, intfOnuIDuniID,
+ gemIDs)
}
// FreeonuID releases(make free) onu id for a particular pon-port
-func (RsrcMgr *OpenOltResourceMgr) FreeonuID(ctx context.Context, intfID uint32, onuID []uint32) {
+func (rsrcMgr *OpenOltResourceMgr) FreeonuID(ctx context.Context, intfID uint32, onuID []uint32) {
- RsrcMgr.OnuIDMgmtLock[intfID].Lock()
- defer RsrcMgr.OnuIDMgmtLock[intfID].Unlock()
-
- if err := RsrcMgr.ResourceMgrs[intfID].TechProfileMgr.FreeResourceID(ctx, intfID, ponrmgr.ONU_ID, onuID); err != nil {
+ if err := rsrcMgr.PonRsrMgr.TechProfileMgr.FreeResourceID(ctx, intfID, ponrmgr.ONU_ID, onuID); err != nil {
logger.Errorw(ctx, "error-while-freeing-onu-id", log.Fields{
"intf-id": intfID,
"onu-id": onuID,
@@ -859,23 +487,23 @@
var IntfonuID string
for _, onu := range onuID {
IntfonuID = fmt.Sprintf("%d,%d", intfID, onu)
- RsrcMgr.ResourceMgrs[intfID].RemoveResourceMap(ctx, IntfonuID)
+ rsrcMgr.PonRsrMgr.RemoveResourceMap(ctx, IntfonuID)
}
}
// FreeAllocID frees AllocID on the PON resource pool and also frees the allocID association
// for the given OLT device.
-func (RsrcMgr *OpenOltResourceMgr) FreeAllocID(ctx context.Context, IntfID uint32, onuID uint32,
+// The caller should ensure that this is a blocking call and this operation is serialized for
+// the ONU so as not cause resource corruption since there are no mutexes used here.
+func (rsrcMgr *OpenOltResourceMgr) FreeAllocID(ctx context.Context, intfID uint32, onuID uint32,
uniID uint32, allocID uint32) {
- RsrcMgr.AllocIDMgmtLock[IntfID].Lock()
- defer RsrcMgr.AllocIDMgmtLock[IntfID].Unlock()
- RsrcMgr.RemoveAllocIDForOnu(ctx, IntfID, onuID, uniID, allocID)
+ rsrcMgr.RemoveAllocIDForOnu(ctx, intfID, onuID, uniID, allocID)
allocIDs := make([]uint32, 0)
allocIDs = append(allocIDs, allocID)
- if err := RsrcMgr.ResourceMgrs[IntfID].TechProfileMgr.FreeResourceID(ctx, IntfID, ponrmgr.ALLOC_ID, allocIDs); err != nil {
+ if err := rsrcMgr.PonRsrMgr.TechProfileMgr.FreeResourceID(ctx, intfID, ponrmgr.ALLOC_ID, allocIDs); err != nil {
logger.Errorw(ctx, "error-while-freeing-alloc-id", log.Fields{
- "intf-id": IntfID,
+ "intf-id": intfID,
"onu-id": onuID,
"err": err.Error(),
})
@@ -884,33 +512,35 @@
// FreeGemPortID frees GemPortID on the PON resource pool and also frees the gemPortID association
// for the given OLT device.
-func (RsrcMgr *OpenOltResourceMgr) FreeGemPortID(ctx context.Context, IntfID uint32, onuID uint32,
+// The caller should ensure that this is a blocking call and this operation is serialized for
+// the ONU so as not cause resource corruption since there are no mutexes used here.
+func (rsrcMgr *OpenOltResourceMgr) FreeGemPortID(ctx context.Context, intfID uint32, onuID uint32,
uniID uint32, gemPortID uint32) {
- RsrcMgr.GemPortIDMgmtLock[IntfID].Lock()
- defer RsrcMgr.GemPortIDMgmtLock[IntfID].Unlock()
+ rsrcMgr.RemoveGemPortIDForOnu(ctx, intfID, onuID, uniID, gemPortID)
- RsrcMgr.RemoveGemPortIDForOnu(ctx, IntfID, onuID, uniID, gemPortID)
gemPortIDs := make([]uint32, 0)
gemPortIDs = append(gemPortIDs, gemPortID)
- if err := RsrcMgr.ResourceMgrs[IntfID].TechProfileMgr.FreeResourceID(ctx, IntfID, ponrmgr.GEMPORT_ID, gemPortIDs); err != nil {
+ if err := rsrcMgr.PonRsrMgr.TechProfileMgr.FreeResourceID(ctx, intfID, ponrmgr.GEMPORT_ID, gemPortIDs); err != nil {
logger.Errorw(ctx, "error-while-freeing-gem-port-id", log.Fields{
- "intf-id": IntfID,
+ "intf-id": intfID,
"onu-id": onuID,
"err": err.Error(),
})
}
}
-// FreePONResourcesForONU make the pon resources free for a given pon interface and onu id, and the clears the
-// resource map and the onuID associated with (pon_intf_id, gemport_id) tuple,
-func (RsrcMgr *OpenOltResourceMgr) FreePONResourcesForONU(ctx context.Context, intfID uint32, onuID uint32, uniID uint32) {
+// FreePONResourcesForONU make the pon resources free for a given pon interface and onu id
+func (rsrcMgr *OpenOltResourceMgr) FreePONResourcesForONU(ctx context.Context, intfID uint32, onuID uint32, uniID uint32) {
- IntfOnuIDUniID := fmt.Sprintf("%d,%d,%d", intfID, onuID, uniID)
+ intfOnuIDuniID := fmt.Sprintf("%d,%d,%d", intfID, onuID, uniID)
- RsrcMgr.AllocIDMgmtLock[intfID].Lock()
- AllocIDs := RsrcMgr.ResourceMgrs[intfID].GetCurrentAllocIDForOnu(ctx, IntfOnuIDUniID)
+ AllocIDs := rsrcMgr.PonRsrMgr.GetCurrentAllocIDForOnu(ctx, intfOnuIDuniID)
- if err := RsrcMgr.ResourceMgrs[intfID].TechProfileMgr.FreeResourceID(ctx, intfID,
+ rsrcMgr.allocIDsForOnuLock.Lock()
+ delete(rsrcMgr.allocIDsForOnu, intfOnuIDuniID)
+ rsrcMgr.allocIDsForOnuLock.Unlock()
+
+ if err := rsrcMgr.PonRsrMgr.TechProfileMgr.FreeResourceID(ctx, intfID,
ponrmgr.ALLOC_ID,
AllocIDs); err != nil {
logger.Errorw(ctx, "error-while-freeing-all-alloc-ids-for-onu", log.Fields{
@@ -919,11 +549,14 @@
"err": err.Error(),
})
}
- RsrcMgr.AllocIDMgmtLock[intfID].Unlock()
- RsrcMgr.GemPortIDMgmtLock[intfID].Lock()
- GEMPortIDs := RsrcMgr.ResourceMgrs[intfID].GetCurrentGEMPortIDsForOnu(ctx, IntfOnuIDUniID)
- if err := RsrcMgr.ResourceMgrs[intfID].TechProfileMgr.FreeResourceID(ctx, intfID,
+ GEMPortIDs := rsrcMgr.PonRsrMgr.GetCurrentGEMPortIDsForOnu(ctx, intfOnuIDuniID)
+
+ rsrcMgr.gemPortIDsForOnuLock.Lock()
+ delete(rsrcMgr.gemPortIDsForOnu, intfOnuIDuniID)
+ rsrcMgr.gemPortIDsForOnuLock.Unlock()
+
+ if err := rsrcMgr.PonRsrMgr.TechProfileMgr.FreeResourceID(ctx, intfID,
ponrmgr.GEMPORT_ID,
GEMPortIDs); err != nil {
logger.Errorw(ctx, "error-while-freeing-all-gem-port-ids-for-onu", log.Fields{
@@ -932,28 +565,23 @@
"err": err.Error(),
})
}
- RsrcMgr.GemPortIDMgmtLock[intfID].Unlock()
// Clear resource map associated with (pon_intf_id, gemport_id) tuple.
- RsrcMgr.ResourceMgrs[intfID].RemoveResourceMap(ctx, IntfOnuIDUniID)
- // Clear the ONU Id associated with the (pon_intf_id, gemport_id) tuple.
- for _, GEM := range GEMPortIDs {
- _ = RsrcMgr.KVStore.Delete(ctx, fmt.Sprintf("%d,%d", intfID, GEM))
- }
+ rsrcMgr.PonRsrMgr.RemoveResourceMap(ctx, intfOnuIDuniID)
}
// IsFlowOnKvStore checks if the given flowID is present on the kv store
// Returns true if the flowID is found, otherwise it returns false
-func (RsrcMgr *OpenOltResourceMgr) IsFlowOnKvStore(ctx context.Context, ponIntfID uint32, onuID int32, uniID int32,
+func (rsrcMgr *OpenOltResourceMgr) IsFlowOnKvStore(ctx context.Context, intfID uint32, onuID int32, uniID int32,
flowID uint64) bool {
- FlowIDs, err := RsrcMgr.GetCurrentFlowIDsForOnu(ctx, ponIntfID, onuID, uniID)
+ FlowIDs, err := rsrcMgr.GetCurrentFlowIDsForOnu(ctx, intfID, onuID, uniID)
if err != nil {
// error logged in the called function
return false
}
if FlowIDs != nil {
- logger.Debugw(ctx, "Found flowId(s) for this ONU", log.Fields{"pon": ponIntfID, "onuID": onuID, "uniID": uniID})
+ logger.Debugw(ctx, "Found flowId(s) for this ONU", log.Fields{"pon": intfID, "onuID": onuID, "uniID": uniID})
for _, id := range FlowIDs {
if flowID == id {
return true
@@ -964,89 +592,117 @@
}
// GetTechProfileIDForOnu fetches Tech-Profile-ID from the KV-Store for the given onu based on the path
-// This path is formed as the following: {IntfID, OnuID, UniID}/tp_id
-func (RsrcMgr *OpenOltResourceMgr) GetTechProfileIDForOnu(ctx context.Context, IntfID uint32, OnuID uint32, UniID uint32) []uint32 {
- Path := fmt.Sprintf(TpIDPathSuffix, IntfID, OnuID, UniID)
- var Data []uint32
- Value, err := RsrcMgr.KVStore.Get(ctx, Path)
+// This path is formed as the following: {intfID, onuID, uniID}/tp_id
+func (rsrcMgr *OpenOltResourceMgr) GetTechProfileIDForOnu(ctx context.Context, intfID uint32, onuID uint32, uniID uint32) []uint32 {
+ Path := fmt.Sprintf(tpIDPathSuffix, intfID, onuID, uniID)
+ // fetch from cache
+ rsrcMgr.techProfileIDsForOnuLock.RLock()
+ tpIDs, ok := rsrcMgr.techProfileIDsForOnu[Path]
+ rsrcMgr.techProfileIDsForOnuLock.RUnlock()
+ if ok {
+ return tpIDs
+ }
+ Value, err := rsrcMgr.KVStore.Get(ctx, Path)
if err == nil {
if Value != nil {
Val, err := kvstore.ToByte(Value.Value)
if err != nil {
- logger.Errorw(ctx, "Failed to convert into byte array", log.Fields{"error": err})
- return Data
+ logger.Errorw(ctx, "Failed to convert into byte array", log.Fields{"err": err})
+ return tpIDs
}
- if err = json.Unmarshal(Val, &Data); err != nil {
- logger.Error(ctx, "Failed to unmarshal", log.Fields{"error": err})
- return Data
+ if err = json.Unmarshal(Val, &tpIDs); err != nil {
+ logger.Error(ctx, "Failed to unmarshal", log.Fields{"err": err})
+ return tpIDs
}
}
} else {
logger.Errorf(ctx, "Failed to get TP id from kvstore for path %s", Path)
}
- logger.Debugf(ctx, "Getting TP id %d from path %s", Data, Path)
- return Data
+ logger.Debugf(ctx, "Getting TP id %d from path %s", tpIDs, Path)
+
+ // update cache
+ rsrcMgr.techProfileIDsForOnuLock.Lock()
+ rsrcMgr.techProfileIDsForOnu[Path] = tpIDs
+ rsrcMgr.techProfileIDsForOnuLock.Unlock()
+
+ return tpIDs
}
// RemoveTechProfileIDsForOnu deletes all tech profile ids from the KV-Store for the given onu based on the path
-// This path is formed as the following: {IntfID, OnuID, UniID}/tp_id
-func (RsrcMgr *OpenOltResourceMgr) RemoveTechProfileIDsForOnu(ctx context.Context, IntfID uint32, OnuID uint32, UniID uint32) error {
- IntfOnuUniID := fmt.Sprintf(TpIDPathSuffix, IntfID, OnuID, UniID)
- if err := RsrcMgr.KVStore.Delete(ctx, IntfOnuUniID); err != nil {
- logger.Errorw(ctx, "Failed to delete techprofile id resource in KV store", log.Fields{"path": IntfOnuUniID})
+// This path is formed as the following: {intfID, onuID, uniID}/tp_id
+func (rsrcMgr *OpenOltResourceMgr) RemoveTechProfileIDsForOnu(ctx context.Context, intfID uint32, onuID uint32, uniID uint32) error {
+ intfOnuUniID := fmt.Sprintf(tpIDPathSuffix, intfID, onuID, uniID)
+ // update cache
+ rsrcMgr.techProfileIDsForOnuLock.Lock()
+ delete(rsrcMgr.techProfileIDsForOnu, intfOnuUniID)
+ rsrcMgr.techProfileIDsForOnuLock.Unlock()
+
+ if err := rsrcMgr.KVStore.Delete(ctx, intfOnuUniID); err != nil {
+ logger.Errorw(ctx, "Failed to delete techprofile id resource in KV store", log.Fields{"path": intfOnuUniID})
return err
}
return nil
}
// RemoveTechProfileIDForOnu deletes a specific tech profile id from the KV-Store for the given onu based on the path
-// This path is formed as the following: {IntfID, OnuID, UniID}/tp_id
-func (RsrcMgr *OpenOltResourceMgr) RemoveTechProfileIDForOnu(ctx context.Context, IntfID uint32, OnuID uint32, UniID uint32, TpID uint32) error {
- tpIDList := RsrcMgr.GetTechProfileIDForOnu(ctx, IntfID, OnuID, UniID)
+// This path is formed as the following: {intfID, onuID, uniID}/tp_id
+func (rsrcMgr *OpenOltResourceMgr) RemoveTechProfileIDForOnu(ctx context.Context, intfID uint32, onuID uint32, uniID uint32, tpID uint32) error {
+ tpIDList := rsrcMgr.GetTechProfileIDForOnu(ctx, intfID, onuID, uniID)
for i, tpIDInList := range tpIDList {
- if tpIDInList == TpID {
+ if tpIDInList == tpID {
tpIDList = append(tpIDList[:i], tpIDList[i+1:]...)
}
}
- IntfOnuUniID := fmt.Sprintf(TpIDPathSuffix, IntfID, OnuID, UniID)
+ intfOnuUniID := fmt.Sprintf(tpIDPathSuffix, intfID, onuID, uniID)
+ // update cache
+ rsrcMgr.techProfileIDsForOnuLock.Lock()
+ rsrcMgr.techProfileIDsForOnu[intfOnuUniID] = tpIDList
+ rsrcMgr.techProfileIDsForOnuLock.Unlock()
+
Value, err := json.Marshal(tpIDList)
if err != nil {
logger.Error(ctx, "failed to Marshal")
return err
}
- if err = RsrcMgr.KVStore.Put(ctx, IntfOnuUniID, Value); err != nil {
- logger.Errorf(ctx, "Failed to update resource %s", IntfOnuUniID)
+ if err = rsrcMgr.KVStore.Put(ctx, intfOnuUniID, Value); err != nil {
+ logger.Errorf(ctx, "Failed to update resource %s", intfOnuUniID)
return err
}
return err
}
// UpdateTechProfileIDForOnu updates (put) already present tech-profile-id for the given onu based on the path
-// This path is formed as the following: {IntfID, OnuID, UniID}/tp_id
-func (RsrcMgr *OpenOltResourceMgr) UpdateTechProfileIDForOnu(ctx context.Context, IntfID uint32, OnuID uint32,
- UniID uint32, TpID uint32) error {
+// This path is formed as the following: {intfID, onuID, uniID}/tp_id
+func (rsrcMgr *OpenOltResourceMgr) UpdateTechProfileIDForOnu(ctx context.Context, intfID uint32, onuID uint32,
+ uniID uint32, tpID uint32) error {
var Value []byte
var err error
- IntfOnuUniID := fmt.Sprintf(TpIDPathSuffix, IntfID, OnuID, UniID)
+ intfOnuUniID := fmt.Sprintf(tpIDPathSuffix, intfID, onuID, uniID)
- tpIDList := RsrcMgr.GetTechProfileIDForOnu(ctx, IntfID, OnuID, UniID)
+ tpIDList := rsrcMgr.GetTechProfileIDForOnu(ctx, intfID, onuID, uniID)
for _, value := range tpIDList {
- if value == TpID {
- logger.Debugf(ctx, "TpID %d is already in tpIdList for the path %s", TpID, IntfOnuUniID)
+ if value == tpID {
+ logger.Debugf(ctx, "tpID %d is already in tpIdList for the path %s", tpID, intfOnuUniID)
return err
}
}
- logger.Debugf(ctx, "updating tp id %d on path %s", TpID, IntfOnuUniID)
- tpIDList = append(tpIDList, TpID)
+ logger.Debugf(ctx, "updating tp id %d on path %s", tpID, intfOnuUniID)
+ tpIDList = append(tpIDList, tpID)
+
+ // update cache
+ rsrcMgr.techProfileIDsForOnuLock.Lock()
+ rsrcMgr.techProfileIDsForOnu[intfOnuUniID] = tpIDList
+ rsrcMgr.techProfileIDsForOnuLock.Unlock()
+
Value, err = json.Marshal(tpIDList)
if err != nil {
logger.Error(ctx, "failed to Marshal")
return err
}
- if err = RsrcMgr.KVStore.Put(ctx, IntfOnuUniID, Value); err != nil {
- logger.Errorf(ctx, "Failed to update resource %s", IntfOnuUniID)
+ if err = rsrcMgr.KVStore.Put(ctx, intfOnuUniID, Value); err != nil {
+ logger.Errorf(ctx, "Failed to update resource %s", intfOnuUniID)
return err
}
return err
@@ -1054,41 +710,56 @@
// StoreMeterInfoForOnu updates the meter id in the KV-Store for the given onu based on the path
// This path is formed as the following: <(pon_id, onu_id, uni_id)>/<tp_id>/meter_id/<direction>
-func (RsrcMgr *OpenOltResourceMgr) StoreMeterInfoForOnu(ctx context.Context, Direction string, IntfID uint32, OnuID uint32,
- UniID uint32, TpID uint32, meterInfo *MeterInfo) error {
+func (rsrcMgr *OpenOltResourceMgr) StoreMeterInfoForOnu(ctx context.Context, Direction string, intfID uint32, onuID uint32,
+ uniID uint32, tpID uint32, meterInfo *MeterInfo) error {
var Value []byte
var err error
- IntfOnuUniID := fmt.Sprintf(MeterIDPathSuffix, IntfID, OnuID, UniID, TpID, Direction)
+ intfOnuUniID := fmt.Sprintf(MeterIDPathSuffix, intfID, onuID, uniID, tpID, Direction)
+
+ // update cache
+ rsrcMgr.meterInfoForOnuLock.Lock()
+ rsrcMgr.meterInfoForOnu[intfOnuUniID] = meterInfo
+ rsrcMgr.meterInfoForOnuLock.Unlock()
+
Value, err = json.Marshal(*meterInfo)
if err != nil {
logger.Error(ctx, "failed to Marshal meter config")
return err
}
- if err = RsrcMgr.KVStore.Put(ctx, IntfOnuUniID, Value); err != nil {
- logger.Errorf(ctx, "Failed to store meter into KV store %s", IntfOnuUniID)
+ if err = rsrcMgr.KVStore.Put(ctx, intfOnuUniID, Value); err != nil {
+ logger.Errorf(ctx, "Failed to store meter into KV store %s", intfOnuUniID)
return err
}
- logger.Debugw(ctx, "meter info updated successfully", log.Fields{"path": IntfOnuUniID, "meter-info": meterInfo})
+ logger.Debugw(ctx, "meter info updated successfully", log.Fields{"path": intfOnuUniID, "meter-info": meterInfo})
return err
}
// GetMeterInfoForOnu fetches the meter id from the kv store for the given onu based on the path
// This path is formed as the following: <(pon_id, onu_id, uni_id)>/<tp_id>/meter_id/<direction>
-func (RsrcMgr *OpenOltResourceMgr) GetMeterInfoForOnu(ctx context.Context, Direction string, IntfID uint32, OnuID uint32,
- UniID uint32, TpID uint32) (*MeterInfo, error) {
- Path := fmt.Sprintf(MeterIDPathSuffix, IntfID, OnuID, UniID, TpID, Direction)
+func (rsrcMgr *OpenOltResourceMgr) GetMeterInfoForOnu(ctx context.Context, Direction string, intfID uint32, onuID uint32,
+ uniID uint32, tpID uint32) (*MeterInfo, error) {
+ Path := fmt.Sprintf(MeterIDPathSuffix, intfID, onuID, uniID, tpID, Direction)
+
+ // get from cache
+ rsrcMgr.meterInfoForOnuLock.RLock()
+ val, ok := rsrcMgr.meterInfoForOnu[Path]
+ rsrcMgr.meterInfoForOnuLock.RUnlock()
+ if ok {
+ return val, nil
+ }
+
var meterInfo MeterInfo
- Value, err := RsrcMgr.KVStore.Get(ctx, Path)
+ Value, err := rsrcMgr.KVStore.Get(ctx, Path)
if err == nil {
if Value != nil {
logger.Debug(ctx, "Found meter info in KV store", log.Fields{"Direction": Direction})
Val, er := kvstore.ToByte(Value.Value)
if er != nil {
- logger.Errorw(ctx, "Failed to convert into byte array", log.Fields{"error": er})
+ logger.Errorw(ctx, "Failed to convert into byte array", log.Fields{"err": er})
return nil, er
}
if er = json.Unmarshal(Val, &meterInfo); er != nil {
- logger.Error(ctx, "Failed to unmarshal meter info", log.Fields{"error": er})
+ logger.Error(ctx, "Failed to unmarshal meter info", log.Fields{"err": er})
return nil, er
}
} else {
@@ -1099,25 +770,30 @@
logger.Errorf(ctx, "Failed to get Meter config from kvstore for path %s", Path)
}
+ // update cache
+ rsrcMgr.meterInfoForOnuLock.Lock()
+ rsrcMgr.meterInfoForOnu[Path] = &meterInfo
+ rsrcMgr.meterInfoForOnuLock.Unlock()
+
return &meterInfo, err
}
// HandleMeterInfoRefCntUpdate increments or decrements the reference counter for a given meter.
// When reference count becomes 0, it clears the meter information from the kv store
-func (RsrcMgr *OpenOltResourceMgr) HandleMeterInfoRefCntUpdate(ctx context.Context, Direction string,
- IntfID uint32, OnuID uint32, UniID uint32, TpID uint32, increment bool) error {
- meterInfo, err := RsrcMgr.GetMeterInfoForOnu(ctx, Direction, IntfID, OnuID, UniID, TpID)
+func (rsrcMgr *OpenOltResourceMgr) HandleMeterInfoRefCntUpdate(ctx context.Context, Direction string,
+ intfID uint32, onuID uint32, uniID uint32, tpID uint32, increment bool) error {
+ meterInfo, err := rsrcMgr.GetMeterInfoForOnu(ctx, Direction, intfID, onuID, uniID, tpID)
if err != nil {
return err
} else if meterInfo == nil {
// If we are increasing the reference count, we expect the meter information to be present on KV store.
// But if decrementing the reference count, the meter is possibly already cleared from KV store. Just log warn but do not return error.
if increment {
- logger.Errorf(ctx, "error-fetching-meter-info-for-intf-%d-onu-%d-uni-%d-tp-id-%d-direction-%s", IntfID, OnuID, UniID, TpID, Direction)
- return fmt.Errorf("error-fetching-meter-info-for-intf-%d-onu-%d-uni-%d-tp-id-%d-direction-%s", IntfID, OnuID, UniID, TpID, Direction)
+ logger.Errorf(ctx, "error-fetching-meter-info-for-intf-%d-onu-%d-uni-%d-tp-id-%d-direction-%s", intfID, onuID, uniID, tpID, Direction)
+ return fmt.Errorf("error-fetching-meter-info-for-intf-%d-onu-%d-uni-%d-tp-id-%d-direction-%s", intfID, onuID, uniID, tpID, Direction)
}
logger.Warnw(ctx, "meter is already cleared",
- log.Fields{"intfID": IntfID, "onuID": OnuID, "uniID": UniID, "direction": Direction, "increment": increment})
+ log.Fields{"intfID": intfID, "onuID": onuID, "uniID": uniID, "direction": Direction, "increment": increment})
return nil
}
@@ -1127,13 +803,13 @@
meterInfo.RefCnt--
// If RefCnt become 0 clear the meter information from the DB.
if meterInfo.RefCnt == 0 {
- if err := RsrcMgr.RemoveMeterInfoForOnu(ctx, Direction, IntfID, OnuID, UniID, TpID); err != nil {
+ if err := rsrcMgr.RemoveMeterInfoForOnu(ctx, Direction, intfID, onuID, uniID, tpID); err != nil {
return err
}
return nil
}
}
- if err := RsrcMgr.StoreMeterInfoForOnu(ctx, Direction, IntfID, OnuID, UniID, TpID, meterInfo); err != nil {
+ if err := rsrcMgr.StoreMeterInfoForOnu(ctx, Direction, intfID, onuID, uniID, tpID, meterInfo); err != nil {
return err
}
return nil
@@ -1141,44 +817,44 @@
// RemoveMeterInfoForOnu deletes the meter id from the kV-Store for the given onu based on the path
// This path is formed as the following: <(pon_id, onu_id, uni_id)>/<tp_id>/meter_id/<direction>
-func (RsrcMgr *OpenOltResourceMgr) RemoveMeterInfoForOnu(ctx context.Context, Direction string, IntfID uint32, OnuID uint32,
- UniID uint32, TpID uint32) error {
- Path := fmt.Sprintf(MeterIDPathSuffix, IntfID, OnuID, UniID, TpID, Direction)
- if err := RsrcMgr.KVStore.Delete(ctx, Path); err != nil {
+func (rsrcMgr *OpenOltResourceMgr) RemoveMeterInfoForOnu(ctx context.Context, Direction string, intfID uint32, onuID uint32,
+ uniID uint32, tpID uint32) error {
+ Path := fmt.Sprintf(MeterIDPathSuffix, intfID, onuID, uniID, tpID, Direction)
+
+ // update cache
+ rsrcMgr.meterInfoForOnuLock.Lock()
+ delete(rsrcMgr.meterInfoForOnu, Path)
+ rsrcMgr.meterInfoForOnuLock.Unlock()
+
+ if err := rsrcMgr.KVStore.Delete(ctx, Path); err != nil {
logger.Errorf(ctx, "Failed to delete meter id %s from kvstore ", Path)
return err
}
return nil
}
-//AddGemToOnuGemInfo adds gemport to onugem info kvstore
-func (RsrcMgr *OpenOltResourceMgr) AddGemToOnuGemInfo(ctx context.Context, intfID uint32, onuID uint32, gemPort uint32) error {
- var onuGemData []OnuGemInfo
- var err error
-
- if err = RsrcMgr.ResourceMgrs[intfID].GetOnuGemInfo(ctx, intfID, &onuGemData); err != nil {
+//AddGemToOnuGemInfo adds gemport to onugem info kvstore and also local cache
+func (rsrcMgr *OpenOltResourceMgr) AddGemToOnuGemInfo(ctx context.Context, intfID uint32, onuID uint32, gemPort uint32) error {
+ onugem, err := rsrcMgr.GetOnuGemInfo(ctx, intfID, onuID)
+ if err != nil || onugem == nil || onugem.SerialNumber == "" {
logger.Errorf(ctx, "failed to get onuifo for intfid %d", intfID)
return err
}
- if len(onuGemData) == 0 {
- logger.Errorw(ctx, "failed to ger Onuid info ", log.Fields{"intfid": intfID, "onuid": onuID})
- return err
+ if onugem.OnuID == onuID {
+ for _, gem := range onugem.GemPorts {
+ if gem == gemPort {
+ logger.Debugw(ctx, "Gem already present in onugem info, skpping addition", log.Fields{"gem": gem})
+ return nil
+ }
+ }
+ logger.Debugw(ctx, "Added gem to onugem info", log.Fields{"gem": gemPort})
+ onugem.GemPorts = append(onugem.GemPorts, gemPort)
+ } else {
+ logger.Errorw(ctx, "onu id in OnuGemInfo does not match", log.Fields{"onuID": onuID, "ponIf": intfID, "onuGemInfoOnuID": onugem.OnuID})
+ return fmt.Errorf("onu-id-in-OnuGemInfo-does-not-match-%v", onuID)
}
- for idx, onugem := range onuGemData {
- if onugem.OnuID == onuID {
- for _, gem := range onuGemData[idx].GemPorts {
- if gem == gemPort {
- logger.Debugw(ctx, "Gem already present in onugem info, skpping addition", log.Fields{"gem": gem})
- return nil
- }
- }
- logger.Debugw(ctx, "Added gem to onugem info", log.Fields{"gem": gemPort})
- onuGemData[idx].GemPorts = append(onuGemData[idx].GemPorts, gemPort)
- break
- }
- }
- err = RsrcMgr.ResourceMgrs[intfID].AddOnuGemInfo(ctx, intfID, onuGemData)
+ err = rsrcMgr.AddOnuGemInfo(ctx, intfID, onuID, *onugem)
if err != nil {
logger.Error(ctx, "Failed to add onugem to kv store")
return err
@@ -1186,78 +862,161 @@
return err
}
-//GetOnuGemInfo gets onu gem info from the kvstore per interface
-func (RsrcMgr *OpenOltResourceMgr) GetOnuGemInfo(ctx context.Context, IntfID uint32) ([]OnuGemInfo, error) {
- var onuGemData []OnuGemInfo
+//RemoveGemFromOnuGemInfo removes gemport from onugem info on kvstore and also local cache
+func (rsrcMgr *OpenOltResourceMgr) RemoveGemFromOnuGemInfo(ctx context.Context, intfID uint32, onuID uint32, gemPort uint32) error {
+ onugem, err := rsrcMgr.GetOnuGemInfo(ctx, intfID, onuID)
+ if err != nil || onugem == nil || onugem.SerialNumber == "" {
+ logger.Errorf(ctx, "failed to get onuifo for intfid %d", intfID)
+ return err
+ }
+ updated := false
+ if onugem.OnuID == onuID {
+ for i, gem := range onugem.GemPorts {
+ if gem == gemPort {
+ logger.Debugw(ctx, "Gem found, removing from onu gem info", log.Fields{"gem": gem})
+ onugem.GemPorts = append(onugem.GemPorts[:i], onugem.GemPorts[i+1:]...)
+ updated = true
+ break
+ }
+ }
+ } else {
+ logger.Errorw(ctx, "onu id in OnuGemInfo does not match", log.Fields{"onuID": onuID, "ponIf": intfID, "onuGemInfoOnuID": onugem.OnuID})
+ return fmt.Errorf("onu-id-in-OnuGemInfo-does-not-match-%v", onuID)
+ }
+ if updated {
+ err = rsrcMgr.AddOnuGemInfo(ctx, intfID, onuID, *onugem)
+ if err != nil {
+ logger.Error(ctx, "Failed to add onugem to kv store")
+ return err
+ }
+ } else {
+ logger.Debugw(ctx, "Gem port not found in onu gem info", log.Fields{"gem": gemPort})
+ }
+ return nil
+}
- if err := RsrcMgr.ResourceMgrs[IntfID].GetOnuGemInfo(ctx, IntfID, &onuGemData); err != nil {
- logger.Errorf(ctx, "failed to get onuifo for intfid %d", IntfID)
+//GetOnuGemInfo gets onu gem info from the kvstore per interface
+func (rsrcMgr *OpenOltResourceMgr) GetOnuGemInfo(ctx context.Context, intfID uint32, onuID uint32) (*OnuGemInfo, error) {
+ var err error
+ var Val []byte
+ var onugem OnuGemInfo
+
+ path := fmt.Sprintf(OnuGemInfoPath, intfID, onuID)
+
+ rsrcMgr.onuGemInfoLock.RLock()
+ val, ok := rsrcMgr.onuGemInfo[path]
+ rsrcMgr.onuGemInfoLock.RUnlock()
+ if ok {
+ return val, nil
+ }
+ value, err := rsrcMgr.KVStore.Get(ctx, path)
+ if err != nil {
+ logger.Errorw(ctx, "Failed to get from kv store", log.Fields{"path": path})
+ return nil, err
+ } else if value == nil {
+ logger.Debug(ctx, "No onuinfo for path", log.Fields{"path": path})
+ return nil, nil // returning nil as this could happen if there are no onus for the interface yet
+ }
+ if Val, err = kvstore.ToByte(value.Value); err != nil {
+ logger.Error(ctx, "Failed to convert to byte array")
return nil, err
}
- return onuGemData, nil
+ if err = json.Unmarshal(Val, &onugem); err != nil {
+ logger.Error(ctx, "Failed to unmarshall")
+ return nil, err
+ }
+ logger.Debugw(ctx, "found onugem info from path", log.Fields{"path": path, "onuGemInfo": onugem})
+ rsrcMgr.onuGemInfoLock.Lock()
+ rsrcMgr.onuGemInfo[path] = &onugem
+ rsrcMgr.onuGemInfoLock.Unlock()
+
+ return &onugem, nil
}
// AddOnuGemInfo adds onu info on to the kvstore per interface
-func (RsrcMgr *OpenOltResourceMgr) AddOnuGemInfo(ctx context.Context, IntfID uint32, onuGem OnuGemInfo) error {
- var onuGemData []OnuGemInfo
+func (rsrcMgr *OpenOltResourceMgr) AddOnuGemInfo(ctx context.Context, intfID uint32, onuID uint32, onuGem OnuGemInfo) error {
+
+ var Value []byte
var err error
+ Path := fmt.Sprintf(OnuGemInfoPath, intfID, onuID)
- if err = RsrcMgr.ResourceMgrs[IntfID].GetOnuGemInfo(ctx, IntfID, &onuGemData); err != nil {
- logger.Errorf(ctx, "failed to get onuifo for intfid %d", IntfID)
- return olterrors.NewErrPersistence("get", "OnuGemInfo", uint64(IntfID),
- log.Fields{"onuGem": onuGem, "intfID": IntfID}, err)
- }
- onuGemData = append(onuGemData, onuGem)
- err = RsrcMgr.ResourceMgrs[IntfID].AddOnuGemInfo(ctx, IntfID, onuGemData)
+ rsrcMgr.onuGemInfoLock.Lock()
+ rsrcMgr.onuGemInfo[Path] = &onuGem
+ rsrcMgr.onuGemInfoLock.Unlock()
+
+ Value, err = json.Marshal(onuGem)
if err != nil {
- logger.Error(ctx, "Failed to add onugem to kv store")
- return olterrors.NewErrPersistence("set", "OnuGemInfo", uint64(IntfID),
- log.Fields{"onuGemData": onuGemData, "intfID": IntfID}, err)
+ logger.Error(ctx, "failed to Marshal")
+ return err
}
- logger.Debugw(ctx, "added onu to onugeminfo", log.Fields{"intf": IntfID, "onugem": onuGem})
+ if err = rsrcMgr.KVStore.Put(ctx, Path, Value); err != nil {
+ logger.Errorf(ctx, "Failed to update resource %s", Path)
+ return err
+ }
+ logger.Debugw(ctx, "added onu gem info", log.Fields{"onuGemInfo": onuGem})
+ return err
+}
+
+// DelOnuGemInfo deletes the onugem info from kvstore per ONU
+func (rsrcMgr *OpenOltResourceMgr) DelOnuGemInfo(ctx context.Context, intfID uint32, onuID uint32) error {
+ path := fmt.Sprintf(OnuGemInfoPath, intfID, onuID)
+ rsrcMgr.onuGemInfoLock.Lock()
+ logger.Debugw(ctx, "removing onu gem info", log.Fields{"onuGemInfo": rsrcMgr.onuGemInfo[path]})
+ delete(rsrcMgr.onuGemInfo, path)
+ rsrcMgr.onuGemInfoLock.Unlock()
+
+ if err := rsrcMgr.KVStore.Delete(ctx, path); err != nil {
+ logger.Errorf(ctx, "failed to remove resource %s", path)
+ return err
+ }
return nil
}
// AddUniPortToOnuInfo adds uni port to the onuinfo kvstore. check if the uni is already present if not update the kv store.
-func (RsrcMgr *OpenOltResourceMgr) AddUniPortToOnuInfo(ctx context.Context, intfID uint32, onuID uint32, portNo uint32) {
- var onuGemData []OnuGemInfo
- var err error
+func (rsrcMgr *OpenOltResourceMgr) AddUniPortToOnuInfo(ctx context.Context, intfID uint32, onuID uint32, portNo uint32) {
- if err = RsrcMgr.ResourceMgrs[intfID].GetOnuGemInfo(ctx, intfID, &onuGemData); err != nil {
- logger.Errorf(ctx, "failed to get onuifo for intfid %d", intfID)
+ onugem, err := rsrcMgr.GetOnuGemInfo(ctx, intfID, onuID)
+ if err != nil || onugem == nil || onugem.SerialNumber == "" {
+ logger.Warnf(ctx, "failed to get onuifo for intfid %d", intfID)
return
}
- for idx, onu := range onuGemData {
- if onu.OnuID == onuID {
- for _, uni := range onu.UniPorts {
- if uni == portNo {
- logger.Debugw(ctx, "uni already present in onugem info", log.Fields{"uni": portNo})
- return
- }
+
+ if onugem.OnuID == onuID {
+ for _, uni := range onugem.UniPorts {
+ if uni == portNo {
+ logger.Debugw(ctx, "uni already present in onugem info", log.Fields{"uni": portNo})
+ return
}
- onuGemData[idx].UniPorts = append(onuGemData[idx].UniPorts, portNo)
- break
}
+ onugem.UniPorts = append(onugem.UniPorts, portNo)
+ } else {
+ logger.Warnw(ctx, "onu id mismatch in onu gem info", log.Fields{"intfID": intfID, "onuID": onuID})
+ return
}
- err = RsrcMgr.ResourceMgrs[intfID].AddOnuGemInfo(ctx, intfID, onuGemData)
+ err = rsrcMgr.AddOnuGemInfo(ctx, intfID, onuID, *onugem)
if err != nil {
- logger.Errorw(ctx, "Failed to add uin port in onugem to kv store", log.Fields{"uni": portNo})
+ logger.Errorw(ctx, "Failed to add uni port in onugem to kv store", log.Fields{"uni": portNo})
return
}
}
//UpdateGemPortForPktIn updates gemport for pkt in path to kvstore, path being intfid, onuid, portno, vlan id, priority bit
-func (RsrcMgr *OpenOltResourceMgr) UpdateGemPortForPktIn(ctx context.Context, pktIn PacketInInfoKey, gemPort uint32) {
+func (rsrcMgr *OpenOltResourceMgr) UpdateGemPortForPktIn(ctx context.Context, pktIn PacketInInfoKey, gemPort uint32) {
path := fmt.Sprintf(OnuPacketINPath, pktIn.IntfID, pktIn.OnuID, pktIn.LogicalPort, pktIn.VlanID, pktIn.Priority)
+ // update cache
+ rsrcMgr.gemPortForPacketInInfoLock.Lock()
+ rsrcMgr.gemPortForPacketInInfo[path] = gemPort
+ rsrcMgr.gemPortForPacketInInfoLock.Unlock()
+
Value, err := json.Marshal(gemPort)
if err != nil {
logger.Error(ctx, "Failed to marshal data")
return
}
- if err = RsrcMgr.KVStore.Put(ctx, path, Value); err != nil {
+ if err = rsrcMgr.KVStore.Put(ctx, path, Value); err != nil {
logger.Errorw(ctx, "Failed to put to kvstore", log.Fields{"path": path, "value": gemPort})
return
}
@@ -1265,15 +1024,22 @@
}
// GetGemPortFromOnuPktIn gets the gem port from onu pkt in path, path being intfid, onuid, portno, vlan id, priority bit
-func (RsrcMgr *OpenOltResourceMgr) GetGemPortFromOnuPktIn(ctx context.Context, packetInInfoKey PacketInInfoKey) (uint32, error) {
+func (rsrcMgr *OpenOltResourceMgr) GetGemPortFromOnuPktIn(ctx context.Context, packetInInfoKey PacketInInfoKey) (uint32, error) {
var Val []byte
- var gemPort uint32
path := fmt.Sprintf(OnuPacketINPath, packetInInfoKey.IntfID, packetInInfoKey.OnuID, packetInInfoKey.LogicalPort,
packetInInfoKey.VlanID, packetInInfoKey.Priority)
+ // get from cache
+ rsrcMgr.gemPortForPacketInInfoLock.RLock()
+ gemPort, ok := rsrcMgr.gemPortForPacketInInfo[path]
+ rsrcMgr.gemPortForPacketInInfoLock.RUnlock()
+ if ok {
+ logger.Debugw(ctx, "found packein gemport from path", log.Fields{"path": path, "gem": gemPort})
+ return gemPort, nil
+ }
- value, err := RsrcMgr.KVStore.Get(ctx, path)
+ value, err := rsrcMgr.KVStore.Get(ctx, path)
if err != nil {
logger.Errorw(ctx, "Failed to get from kv store", log.Fields{"path": path})
return uint32(0), err
@@ -1291,15 +1057,20 @@
return uint32(0), err
}
logger.Debugw(ctx, "found packein gemport from path", log.Fields{"path": path, "gem": gemPort})
+ // update cache
+ rsrcMgr.gemPortForPacketInInfoLock.Lock()
+ rsrcMgr.gemPortForPacketInInfo[path] = gemPort
+ rsrcMgr.gemPortForPacketInInfoLock.Unlock()
return gemPort, nil
}
//DeletePacketInGemPortForOnu deletes the packet-in gemport for ONU
-func (RsrcMgr *OpenOltResourceMgr) DeletePacketInGemPortForOnu(ctx context.Context, intfID uint32, onuID uint32, logicalPort uint32) error {
+func (rsrcMgr *OpenOltResourceMgr) DeletePacketInGemPortForOnu(ctx context.Context, intfID uint32, onuID uint32, logicalPort uint32) error {
path := fmt.Sprintf(OnuPacketINPathPrefix, intfID, onuID, logicalPort)
- value, err := RsrcMgr.KVStore.List(ctx, path)
+
+ value, err := rsrcMgr.KVStore.List(ctx, path)
if err != nil {
logger.Errorf(ctx, "failed-to-read-value-from-path-%s", path)
return errors.New("failed-to-read-value-from-path-" + path)
@@ -1308,12 +1079,16 @@
//remove them one by one
for key := range value {
// Formulate the right key path suffix ti be delete
- stringToBeReplaced := fmt.Sprintf(BasePathKvStore, RsrcMgr.KVStore.PathPrefix, RsrcMgr.DeviceID) + "/"
+ stringToBeReplaced := fmt.Sprintf(BasePathKvStore, rsrcMgr.KVStore.PathPrefix, rsrcMgr.DeviceID) + "/"
replacedWith := ""
key = strings.Replace(key, stringToBeReplaced, replacedWith, 1)
+ // update cache
+ rsrcMgr.gemPortForPacketInInfoLock.Lock()
+ delete(rsrcMgr.gemPortForPacketInInfo, key)
+ rsrcMgr.gemPortForPacketInInfoLock.Unlock()
logger.Debugf(ctx, "removing-key-%s", key)
- if err := RsrcMgr.KVStore.Delete(ctx, key); err != nil {
+ if err := rsrcMgr.KVStore.Delete(ctx, key); err != nil {
logger.Errorf(ctx, "failed-to-remove-resource-%s", key)
return err
}
@@ -1322,222 +1097,156 @@
return nil
}
-// DelOnuGemInfoForIntf deletes the onugem info from kvstore per interface
-func (RsrcMgr *OpenOltResourceMgr) DelOnuGemInfoForIntf(ctx context.Context, intfID uint32) error {
- if err := RsrcMgr.ResourceMgrs[intfID].DelOnuGemInfoForIntf(ctx, intfID); err != nil {
- logger.Errorw(ctx, "failed to delete onu gem info for", log.Fields{"intfid": intfID})
- return err
+//GetFlowIDsForGem gets the list of FlowIDs for the given gemport
+func (rsrcMgr *OpenOltResourceMgr) GetFlowIDsForGem(ctx context.Context, intf uint32, gem uint32) ([]uint64, error) {
+ path := fmt.Sprintf(FlowIDsForGem, intf, gem)
+
+ // get from cache
+ rsrcMgr.flowIDsForGemLock.RLock()
+ flowIDs, ok := rsrcMgr.flowIDsForGem[gem]
+ rsrcMgr.flowIDsForGemLock.RUnlock()
+ if ok {
+ return flowIDs, nil
}
- return nil
-}
-//GetNNIFromKVStore gets NNi intfids from kvstore. path being per device
-func (RsrcMgr *OpenOltResourceMgr) GetNNIFromKVStore(ctx context.Context) ([]uint32, error) {
-
- var nni []uint32
- var Val []byte
-
- path := NnniIntfID
- value, err := RsrcMgr.KVStore.Get(ctx, path)
+ value, err := rsrcMgr.KVStore.Get(ctx, path)
if err != nil {
- logger.Error(ctx, "failed to get data from kv store")
+ logger.Errorw(ctx, "Failed to get from kv store", log.Fields{"path": path})
+ return nil, err
+ } else if value == nil {
+ logger.Debug(ctx, "no flow-ids found", log.Fields{"path": path})
+ return nil, nil
+ }
+ Val, err := kvstore.ToByte(value.Value)
+ if err != nil {
+ logger.Error(ctx, "Failed to convert to byte array")
return nil, err
}
- if value != nil {
- if Val, err = kvstore.ToByte(value.Value); err != nil {
- logger.Error(ctx, "Failed to convert to byte array")
- return nil, err
- }
- if err = json.Unmarshal(Val, &nni); err != nil {
- logger.Error(ctx, "Failed to unmarshall")
- return nil, err
- }
- }
- return nni, err
-}
-// AddNNIToKVStore adds Nni interfaces to kvstore, path being per device.
-func (RsrcMgr *OpenOltResourceMgr) AddNNIToKVStore(ctx context.Context, nniIntf uint32) error {
- var Value []byte
-
- nni, err := RsrcMgr.GetNNIFromKVStore(ctx)
- if err != nil {
- logger.Error(ctx, "failed to fetch nni interfaces from kv store")
- return err
+ if err = json.Unmarshal(Val, &flowIDs); err != nil {
+ logger.Error(ctx, "Failed to unmarshall")
+ return nil, err
}
- path := NnniIntfID
- nni = append(nni, nniIntf)
- Value, err = json.Marshal(nni)
- if err != nil {
- logger.Error(ctx, "Failed to marshal data")
- }
- if err = RsrcMgr.KVStore.Put(ctx, path, Value); err != nil {
- logger.Errorw(ctx, "Failed to put to kvstore", log.Fields{"path": path, "value": Value})
- return err
- }
- logger.Debugw(ctx, "added nni to kv successfully", log.Fields{"path": path, "nni": nniIntf})
- return nil
-}
+ // update cache
+ rsrcMgr.flowIDsForGemLock.Lock()
+ rsrcMgr.flowIDsForGem[gem] = flowIDs
+ rsrcMgr.flowIDsForGemLock.Unlock()
-// DelNNiFromKVStore deletes nni interface list from kv store.
-func (RsrcMgr *OpenOltResourceMgr) DelNNiFromKVStore(ctx context.Context) error {
-
- path := NnniIntfID
-
- if err := RsrcMgr.KVStore.Delete(ctx, path); err != nil {
- logger.Errorw(ctx, "Failed to delete nni interfaces from kv store", log.Fields{"path": path})
- return err
- }
- return nil
+ return flowIDs, nil
}
//UpdateFlowIDsForGem updates flow id per gemport
-func (RsrcMgr *OpenOltResourceMgr) UpdateFlowIDsForGem(ctx context.Context, intf uint32, gem uint32, flowIDs []uint64) error {
+func (rsrcMgr *OpenOltResourceMgr) UpdateFlowIDsForGem(ctx context.Context, intf uint32, gem uint32, flowIDs []uint64) error {
var val []byte
- path := fmt.Sprintf(FlowIDsForGem, intf)
+ path := fmt.Sprintf(FlowIDsForGem, intf, gem)
- flowsForGem, err := RsrcMgr.GetFlowIDsGemMapForInterface(ctx, intf)
- if err != nil {
- logger.Error(ctx, "Failed to ger flowids for interface", log.Fields{"error": err, "intf": intf})
- return err
+ // update cache
+ rsrcMgr.flowIDsForGemLock.Lock()
+ rsrcMgr.flowIDsForGem[gem] = flowIDs
+ rsrcMgr.flowIDsForGemLock.Unlock()
+
+ if flowIDs == nil {
+ return nil
}
- if flowsForGem == nil {
- flowsForGem = make(map[uint32][]uint64)
- }
- flowsForGem[gem] = flowIDs
- val, err = json.Marshal(flowsForGem)
+ val, err := json.Marshal(flowIDs)
if err != nil {
- logger.Error(ctx, "Failed to marshal data", log.Fields{"error": err})
+ logger.Error(ctx, "Failed to marshal data", log.Fields{"err": err})
return err
}
- if err = RsrcMgr.KVStore.Put(ctx, path, val); err != nil {
- logger.Errorw(ctx, "Failed to put to kvstore", log.Fields{"error": err, "path": path, "value": val})
+ if err = rsrcMgr.KVStore.Put(ctx, path, val); err != nil {
+ logger.Errorw(ctx, "Failed to put to kvstore", log.Fields{"err": err, "path": path, "value": val})
return err
}
- logger.Debugw(ctx, "added flowid list for gem to kv successfully", log.Fields{"path": path, "flowidlist": flowsForGem[gem]})
+ logger.Debugw(ctx, "added flowid list for gem to kv successfully", log.Fields{"path": path, "flowidlist": flowIDs})
return nil
}
//DeleteFlowIDsForGem deletes the flowID list entry per gem from kvstore.
-func (RsrcMgr *OpenOltResourceMgr) DeleteFlowIDsForGem(ctx context.Context, intf uint32, gem uint32) {
- path := fmt.Sprintf(FlowIDsForGem, intf)
- var val []byte
-
- flowsForGem, err := RsrcMgr.GetFlowIDsGemMapForInterface(ctx, intf)
- if err != nil {
- logger.Error(ctx, "Failed to ger flowids for interface", log.Fields{"error": err, "intf": intf})
- return
+func (rsrcMgr *OpenOltResourceMgr) DeleteFlowIDsForGem(ctx context.Context, intf uint32, gem uint32) {
+ path := fmt.Sprintf(FlowIDsForGem, intf, gem)
+ // update cache
+ rsrcMgr.flowIDsForGemLock.Lock()
+ delete(rsrcMgr.flowIDsForGem, gem)
+ rsrcMgr.flowIDsForGemLock.Unlock()
+ if err := rsrcMgr.KVStore.Delete(ctx, path); err != nil {
+ logger.Errorw(ctx, "Failed to delete from kvstore", log.Fields{"err": err, "path": path})
}
- if flowsForGem == nil {
- logger.Error(ctx, "No flowids found ", log.Fields{"intf": intf, "gemport": gem})
- return
- }
- // once we get the flows per gem map from kv , just delete the gem entry from the map
- delete(flowsForGem, gem)
- // once gem entry is deleted update the kv store.
- val, err = json.Marshal(flowsForGem)
- if err != nil {
- logger.Error(ctx, "Failed to marshal data", log.Fields{"error": err})
- return
- }
-
- if err = RsrcMgr.KVStore.Put(ctx, path, val); err != nil {
- logger.Errorw(ctx, "Failed to put to kvstore", log.Fields{"error": err, "path": path, "value": val})
- }
-}
-
-//GetFlowIDsGemMapForInterface gets flowids per gemport and interface
-func (RsrcMgr *OpenOltResourceMgr) GetFlowIDsGemMapForInterface(ctx context.Context, intf uint32) (map[uint32][]uint64, error) {
- path := fmt.Sprintf(FlowIDsForGem, intf)
- var flowsForGem map[uint32][]uint64
- var val []byte
- value, err := RsrcMgr.KVStore.Get(ctx, path)
- if err != nil {
- logger.Error(ctx, "failed to get data from kv store")
- return nil, err
- }
- if value != nil && value.Value != nil {
- if val, err = kvstore.ToByte(value.Value); err != nil {
- logger.Error(ctx, "Failed to convert to byte array ", log.Fields{"error": err})
- return nil, err
- }
- if err = json.Unmarshal(val, &flowsForGem); err != nil {
- logger.Error(ctx, "Failed to unmarshall", log.Fields{"error": err})
- return nil, err
- }
- }
- return flowsForGem, nil
-}
-
-//DeleteIntfIDGempMapPath deletes the intf id path used to store flow ids per gem to kvstore.
-func (RsrcMgr *OpenOltResourceMgr) DeleteIntfIDGempMapPath(ctx context.Context, intf uint32) {
- path := fmt.Sprintf(FlowIDsForGem, intf)
-
- if err := RsrcMgr.KVStore.Delete(ctx, path); err != nil {
- logger.Errorw(ctx, "Failed to delete nni interfaces from kv store", log.Fields{"path": path})
- }
-}
-
-// RemoveResourceMap Clear resource map associated with (intfid, onuid, uniid) tuple.
-func (RsrcMgr *OpenOltResourceMgr) RemoveResourceMap(ctx context.Context, intfID uint32, onuID int32, uniID int32) {
- IntfOnuIDUniID := fmt.Sprintf("%d,%d,%d", intfID, onuID, uniID)
- RsrcMgr.ResourceMgrs[intfID].RemoveResourceMap(ctx, IntfOnuIDUniID)
}
//GetMcastQueuePerInterfaceMap gets multicast queue info per pon interface
-func (RsrcMgr *OpenOltResourceMgr) GetMcastQueuePerInterfaceMap(ctx context.Context) (map[uint32][]uint32, error) {
+func (rsrcMgr *OpenOltResourceMgr) GetMcastQueuePerInterfaceMap(ctx context.Context) (map[uint32][]uint32, error) {
path := McastQueuesForIntf
- var mcastQueueToIntfMap map[uint32][]uint32
var val []byte
- kvPair, err := RsrcMgr.KVStore.Get(ctx, path)
+ rsrcMgr.mcastQueueForIntfLock.RLock()
+ if rsrcMgr.mcastQueueForIntfLoadedFromKvStore {
+ rsrcMgr.mcastQueueForIntfLock.RUnlock()
+ return rsrcMgr.mcastQueueForIntf, nil
+ }
+ rsrcMgr.mcastQueueForIntfLock.RUnlock()
+
+ kvPair, err := rsrcMgr.KVStore.Get(ctx, path)
if err != nil {
logger.Error(ctx, "failed to get data from kv store")
return nil, err
}
if kvPair != nil && kvPair.Value != nil {
if val, err = kvstore.ToByte(kvPair.Value); err != nil {
- logger.Error(ctx, "Failed to convert to byte array ", log.Fields{"error": err})
+ logger.Error(ctx, "Failed to convert to byte array ", log.Fields{"err": err})
return nil, err
}
- if err = json.Unmarshal(val, &mcastQueueToIntfMap); err != nil {
- logger.Error(ctx, "Failed to unmarshall ", log.Fields{"error": err})
+ rsrcMgr.mcastQueueForIntfLock.Lock()
+ defer rsrcMgr.mcastQueueForIntfLock.Unlock()
+ if err = json.Unmarshal(val, &rsrcMgr.mcastQueueForIntf); err != nil {
+ logger.Error(ctx, "Failed to unmarshall ", log.Fields{"err": err})
return nil, err
}
+ rsrcMgr.mcastQueueForIntfLoadedFromKvStore = true
}
- return mcastQueueToIntfMap, nil
+ return rsrcMgr.mcastQueueForIntf, nil
}
//AddMcastQueueForIntf adds multicast queue for pon interface
-func (RsrcMgr *OpenOltResourceMgr) AddMcastQueueForIntf(ctx context.Context, intf uint32, gem uint32, servicePriority uint32) error {
+func (rsrcMgr *OpenOltResourceMgr) AddMcastQueueForIntf(ctx context.Context, intf uint32, gem uint32, servicePriority uint32) error {
var val []byte
path := McastQueuesForIntf
- mcastQueues, err := RsrcMgr.GetMcastQueuePerInterfaceMap(ctx)
+ // Load local cache from kv store the first time
+ rsrcMgr.mcastQueueForIntfLock.RLock()
+ if !rsrcMgr.mcastQueueForIntfLoadedFromKvStore {
+ rsrcMgr.mcastQueueForIntfLock.RUnlock()
+ _, err := rsrcMgr.GetMcastQueuePerInterfaceMap(ctx)
+ if err != nil {
+ logger.Errorw(ctx, "Failed to get multicast queue info for interface", log.Fields{"err": err, "intf": intf})
+ return err
+ }
+ } else {
+ rsrcMgr.mcastQueueForIntfLock.RUnlock()
+ }
+
+ // Update KV store
+ rsrcMgr.mcastQueueForIntfLock.Lock()
+ rsrcMgr.mcastQueueForIntf[intf] = []uint32{gem, servicePriority}
+ val, err := json.Marshal(rsrcMgr.mcastQueueForIntf)
if err != nil {
- logger.Errorw(ctx, "Failed to get multicast queue info for interface", log.Fields{"error": err, "intf": intf})
+ rsrcMgr.mcastQueueForIntfLock.Unlock()
+ logger.Errorw(ctx, "Failed to marshal data", log.Fields{"err": err})
return err
}
- if mcastQueues == nil {
- mcastQueues = make(map[uint32][]uint32)
- }
- mcastQueues[intf] = []uint32{gem, servicePriority}
- if val, err = json.Marshal(mcastQueues); err != nil {
- logger.Errorw(ctx, "Failed to marshal data", log.Fields{"error": err})
+ rsrcMgr.mcastQueueForIntfLock.Unlock()
+
+ if err = rsrcMgr.KVStore.Put(ctx, path, val); err != nil {
+ logger.Errorw(ctx, "Failed to put to kvstore", log.Fields{"err": err, "path": path, "value": val})
return err
}
- if err = RsrcMgr.KVStore.Put(ctx, path, val); err != nil {
- logger.Errorw(ctx, "Failed to put to kvstore", log.Fields{"error": err, "path": path, "value": val})
- return err
- }
- logger.Debugw(ctx, "added multicast queue info to KV store successfully", log.Fields{"path": path, "mcastQueueInfo": mcastQueues[intf], "interfaceId": intf})
+ logger.Debugw(ctx, "added multicast queue info to KV store successfully", log.Fields{"path": path, "interfaceId": intf, "gem": gem, "svcPrior": servicePriority})
return nil
}
//AddFlowGroupToKVStore adds flow group into KV store
-func (RsrcMgr *OpenOltResourceMgr) AddFlowGroupToKVStore(ctx context.Context, groupEntry *ofp.OfpGroupEntry, cached bool) error {
+func (rsrcMgr *OpenOltResourceMgr) AddFlowGroupToKVStore(ctx context.Context, groupEntry *ofp.OfpGroupEntry, cached bool) error {
var Value []byte
var err error
var path string
@@ -1560,6 +1269,10 @@
OutPorts: outPorts,
}
+ rsrcMgr.groupInfoLock.Lock()
+ rsrcMgr.groupInfo[path] = &groupInfo
+ rsrcMgr.groupInfoLock.Unlock()
+
Value, err = json.Marshal(groupInfo)
if err != nil {
@@ -1567,7 +1280,7 @@
return err
}
- if err = RsrcMgr.KVStore.Put(ctx, path, Value); err != nil {
+ if err = rsrcMgr.KVStore.Put(ctx, path, Value); err != nil {
logger.Errorf(ctx, "Failed to update resource %s", path)
return err
}
@@ -1575,14 +1288,18 @@
}
//RemoveFlowGroupFromKVStore removes flow group from KV store
-func (RsrcMgr *OpenOltResourceMgr) RemoveFlowGroupFromKVStore(ctx context.Context, groupID uint32, cached bool) error {
+func (rsrcMgr *OpenOltResourceMgr) RemoveFlowGroupFromKVStore(ctx context.Context, groupID uint32, cached bool) error {
var path string
if cached {
path = fmt.Sprintf(FlowGroupCached, groupID)
} else {
path = fmt.Sprintf(FlowGroup, groupID)
}
- if err := RsrcMgr.KVStore.Delete(ctx, path); err != nil {
+ rsrcMgr.groupInfoLock.Lock()
+ delete(rsrcMgr.groupInfo, path)
+ rsrcMgr.groupInfoLock.Unlock()
+
+ if err := rsrcMgr.KVStore.Delete(ctx, path); err != nil {
logger.Errorf(ctx, "Failed to remove resource %s due to %s", path, err)
return err
}
@@ -1592,7 +1309,7 @@
//GetFlowGroupFromKVStore fetches flow group from the KV store. Returns (false, {} error) if any problem occurs during
//fetching the data. Returns (true, groupInfo, nil) if the group is fetched successfully.
// Returns (false, {}, nil) if the group does not exists in the KV store.
-func (RsrcMgr *OpenOltResourceMgr) GetFlowGroupFromKVStore(ctx context.Context, groupID uint32, cached bool) (bool, GroupInfo, error) {
+func (rsrcMgr *OpenOltResourceMgr) GetFlowGroupFromKVStore(ctx context.Context, groupID uint32, cached bool) (bool, GroupInfo, error) {
var groupInfo GroupInfo
var path string
if cached {
@@ -1600,20 +1317,34 @@
} else {
path = fmt.Sprintf(FlowGroup, groupID)
}
- kvPair, err := RsrcMgr.KVStore.Get(ctx, path)
+
+ // read from cache
+ rsrcMgr.groupInfoLock.RLock()
+ gi, ok := rsrcMgr.groupInfo[path]
+ rsrcMgr.groupInfoLock.RUnlock()
+ if ok {
+ return true, *gi, nil
+ }
+
+ kvPair, err := rsrcMgr.KVStore.Get(ctx, path)
if err != nil {
return false, groupInfo, err
}
if kvPair != nil && kvPair.Value != nil {
Val, err := kvstore.ToByte(kvPair.Value)
if err != nil {
- logger.Errorw(ctx, "Failed to convert flow group into byte array", log.Fields{"error": err})
+ logger.Errorw(ctx, "Failed to convert flow group into byte array", log.Fields{"err": err})
return false, groupInfo, err
}
if err = json.Unmarshal(Val, &groupInfo); err != nil {
- logger.Errorw(ctx, "Failed to unmarshal", log.Fields{"error": err})
+ logger.Errorw(ctx, "Failed to unmarshal", log.Fields{"err": err})
return false, groupInfo, err
}
+ // update cache
+ rsrcMgr.groupInfoLock.Lock()
+ rsrcMgr.groupInfo[path] = &groupInfo
+ rsrcMgr.groupInfoLock.Unlock()
+
return true, groupInfo, nil
}
return false, groupInfo, nil
@@ -1631,19 +1362,3 @@
return nil, fmt.Errorf("unexpected-type-%T", t)
}
}
-
-func checkForFlowIDInList(FlowIDList []uint64, FlowID uint64) (bool, uint64) {
- /*
- Check for a flow id in a given list of flow IDs.
- :param FLowIDList: List of Flow IDs
- :param FlowID: Flowd to check in the list
- : return true and the index if present false otherwise.
- */
-
- for idx := range FlowIDList {
- if FlowID == FlowIDList[idx] {
- return true, uint64(idx)
- }
- }
- return false, 0
-}
diff --git a/internal/pkg/resourcemanager/resourcemanager_test.go b/internal/pkg/resourcemanager/resourcemanager_test.go
index 53f8898..443f418 100644
--- a/internal/pkg/resourcemanager/resourcemanager_test.go
+++ b/internal/pkg/resourcemanager/resourcemanager_test.go
@@ -27,19 +27,18 @@
"context"
"encoding/json"
"errors"
- tp "github.com/opencord/voltha-lib-go/v4/pkg/techprofile"
+ "github.com/opencord/voltha-openolt-adapter/pkg/mocks"
"reflect"
"strconv"
"strings"
- "sync"
"testing"
"time"
- "github.com/opencord/voltha-lib-go/v4/pkg/db"
- "github.com/opencord/voltha-lib-go/v4/pkg/db/kvstore"
- fu "github.com/opencord/voltha-lib-go/v4/pkg/flows"
- "github.com/opencord/voltha-lib-go/v4/pkg/log"
- ponrmgr "github.com/opencord/voltha-lib-go/v4/pkg/ponresourcemanager"
+ "github.com/opencord/voltha-lib-go/v5/pkg/db"
+ "github.com/opencord/voltha-lib-go/v5/pkg/db/kvstore"
+ fu "github.com/opencord/voltha-lib-go/v5/pkg/flows"
+ "github.com/opencord/voltha-lib-go/v5/pkg/log"
+ ponrmgr "github.com/opencord/voltha-lib-go/v5/pkg/ponresourcemanager"
ofp "github.com/opencord/voltha-protos/v4/go/openflow_13"
"github.com/opencord/voltha-protos/v4/go/openolt"
)
@@ -77,7 +76,7 @@
KVStore *db.Backend
DeviceType string
DevInfo *openolt.DeviceInfo
- ResourceMgrs map[uint32]*ponrmgr.PONResourceManager
+ PonRsrMgr *ponrmgr.PONResourceManager
NumOfPonPorts uint32
}
@@ -87,12 +86,11 @@
// getResMgr mocks OpenOltResourceMgr struct.
func getResMgr() *fields {
- ctx := context.TODO()
var resMgr fields
resMgr.KVStore = &db.Backend{
Client: &MockResKVClient{},
}
- resMgr.ResourceMgrs = make(map[uint32]*ponrmgr.PONResourceManager)
+ resMgr.PonRsrMgr = &ponrmgr.PONResourceManager{}
ranges := make(map[string]interface{})
sharedIdxByType := make(map[string]string)
sharedIdxByType["ALLOC_ID"] = "ALLOC_ID"
@@ -108,25 +106,22 @@
ranges["gemport_id_shared"] = uint32(0)
ranges["flow_id_shared"] = uint32(0)
resMgr.NumOfPonPorts = 16
- ponMgr := &ponrmgr.PONResourceManager{}
- tpMgr, err := tp.NewTechProfile(ctx, ponMgr, "etcd", "127.0.0.1", "/")
- if err != nil {
- logger.Fatal(ctx, err.Error())
- }
-
- ponMgr.DeviceID = "onu-1"
- ponMgr.IntfIDs = []uint32{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}
- ponMgr.KVStore = &db.Backend{
+ resMgr.PonRsrMgr.DeviceID = "onu-1"
+ resMgr.PonRsrMgr.IntfIDs = []uint32{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}
+ resMgr.PonRsrMgr.KVStore = &db.Backend{
Client: &MockResKVClient{},
}
- ponMgr.PonResourceRanges = ranges
- ponMgr.SharedIdxByType = sharedIdxByType
- ponMgr.TechProfileMgr = tpMgr
+ resMgr.PonRsrMgr.Technology = "XGS-PON"
+ resMgr.PonRsrMgr.PonResourceRanges = ranges
+ resMgr.PonRsrMgr.SharedIdxByType = sharedIdxByType
+ /*
+ tpMgr, err := tp.NewTechProfile(ctx, resMgr.PonRsrMgr, "etcd", "127.0.0.1", "/")
+ if err != nil {
+ logger.Fatal(ctx, err.Error())
+ }
+ */
+ resMgr.PonRsrMgr.TechProfileMgr = &mocks.MockTechProfile{TpID: 64}
- var ponIntf uint32
- for ponIntf = 0; ponIntf < resMgr.NumOfPonPorts; ponIntf++ {
- resMgr.ResourceMgrs[ponIntf] = ponMgr
- }
return &resMgr
}
@@ -265,18 +260,15 @@
// testResMgrObject maps fields type to OpenOltResourceMgr type.
func testResMgrObject(testResMgr *fields) *OpenOltResourceMgr {
var rsrMgr = OpenOltResourceMgr{
- DeviceID: testResMgr.DeviceID,
- Args: testResMgr.Args,
- KVStore: testResMgr.KVStore,
- DeviceType: testResMgr.DeviceType,
- Address: testResMgr.Address,
- DevInfo: testResMgr.DevInfo,
- ResourceMgrs: testResMgr.ResourceMgrs,
+ DeviceID: testResMgr.DeviceID,
+ Args: testResMgr.Args,
+ KVStore: testResMgr.KVStore,
+ DeviceType: testResMgr.DeviceType,
+ Address: testResMgr.Address,
+ DevInfo: testResMgr.DevInfo,
+ PonRsrMgr: testResMgr.PonRsrMgr,
}
-
- rsrMgr.AllocIDMgmtLock = make([]sync.RWMutex, testResMgr.NumOfPonPorts)
- rsrMgr.GemPortIDMgmtLock = make([]sync.RWMutex, testResMgr.NumOfPonPorts)
- rsrMgr.OnuIDMgmtLock = make([]sync.RWMutex, testResMgr.NumOfPonPorts)
+ rsrMgr.InitLocalCache()
return &rsrMgr
}
@@ -284,6 +276,7 @@
func TestNewResourceMgr(t *testing.T) {
type args struct {
deviceID string
+ intfID uint32
KVStoreAddress string
kvStoreType string
deviceType string
@@ -295,14 +288,14 @@
args args
want *OpenOltResourceMgr
}{
- {"NewResourceMgr-2", args{"olt1", "1:2", "etcd",
+ {"NewResourceMgr-2", args{"olt1", 0, "1:2", "etcd",
"onu", &openolt.DeviceInfo{OnuIdStart: 1, OnuIdEnd: 1}, "service/voltha"}, &OpenOltResourceMgr{}},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
- if got := NewResourceMgr(ctx, tt.args.deviceID, tt.args.KVStoreAddress, tt.args.kvStoreType, tt.args.deviceType, tt.args.devInfo, tt.args.kvStorePrefix); reflect.TypeOf(got) != reflect.TypeOf(tt.want) {
+ if got := NewResourceMgr(ctx, tt.args.intfID, tt.args.deviceID, tt.args.KVStoreAddress, tt.args.kvStoreType, tt.args.deviceType, tt.args.devInfo, tt.args.kvStorePrefix); reflect.TypeOf(got) != reflect.TypeOf(tt.want) {
t.Errorf("NewResourceMgr() = %v, want %v", got, tt.want)
}
})
@@ -310,19 +303,23 @@
}
func TestOpenOltResourceMgr_Delete(t *testing.T) {
+ type args struct {
+ intfID uint32
+ }
tests := []struct {
name string
fields *fields
wantErr error
+ args args
}{
- {"Delete-1", getResMgr(), errors.New("failed to clear device resource pool")},
+ {"Delete-1", getResMgr(), errors.New("failed to clear device resource pool"), args{intfID: 0}},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
RsrcMgr := testResMgrObject(tt.fields)
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
- if err := RsrcMgr.Delete(ctx); (err != nil) && reflect.TypeOf(err) != reflect.TypeOf(tt.wantErr) {
+ if err := RsrcMgr.Delete(ctx, tt.args.intfID); (err != nil) && reflect.TypeOf(err) != reflect.TypeOf(tt.wantErr) {
t.Errorf("Delete() error = %v, wantErr %v", err, tt.wantErr)
}
})
@@ -374,33 +371,6 @@
}
}
-func TestOpenOltResourceMgr_GetAllocID(t *testing.T) {
-
- type args struct {
- intfID uint32
- onuID uint32
- uniID uint32
- }
- tests := []struct {
- name string
- fields *fields
- args args
- want uint32
- }{
- {"GetAllocID-1", getResMgr(), args{1, 2, 2}, 0},
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- RsrcMgr := testResMgrObject(tt.fields)
- ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
- defer cancel()
- if got := RsrcMgr.GetAllocID(ctx, tt.args.intfID, tt.args.onuID, tt.args.uniID); reflect.TypeOf(got) != reflect.TypeOf(tt.want) {
- t.Errorf("GetAllocID() = %v, want %v", got, tt.want)
- }
- })
- }
-}
-
func TestOpenOltResourceMgr_GetCurrentAllocIDForOnu(t *testing.T) {
type args struct {
intfID uint32
@@ -420,8 +390,16 @@
RsrcMgr := testResMgrObject(tt.fields)
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
- if got := RsrcMgr.GetCurrentAllocIDsForOnu(ctx, tt.args.intfID, tt.args.onuID, tt.args.uniID); !reflect.DeepEqual(got, tt.want) {
+ got := RsrcMgr.GetCurrentAllocIDsForOnu(ctx, tt.args.intfID, tt.args.onuID, tt.args.uniID)
+ if len(got) != len(tt.want) {
t.Errorf("GetCurrentAllocIDsForOnu() = %v, want %v", got, tt.want)
+ } else {
+ for i := range tt.want {
+ if got[i] != tt.want[i] {
+ t.Errorf("GetCurrentAllocIDsForOnu() = %v, want %v", got, tt.want)
+ break
+ }
+ }
}
})
}
@@ -484,40 +462,6 @@
}
}
-func TestOpenOltResourceMgr_GetGEMPortID(t *testing.T) {
- type args struct {
- ponPort uint32
- onuID uint32
- uniID uint32
- NumOfPorts uint32
- }
- tests := []struct {
- name string
- fields *fields
- args args
- want []uint32
- wantErr error
- }{
- {"GetGEMPortID-1", getResMgr(), args{1, 2, 2, 2}, []uint32{},
- errors.New("failed to get gem port")},
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- RsrcMgr := testResMgrObject(tt.fields)
- ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
- defer cancel()
- got, err := RsrcMgr.GetGEMPortID(ctx, tt.args.ponPort, tt.args.onuID, tt.args.uniID, tt.args.NumOfPorts)
- if reflect.TypeOf(err) != reflect.TypeOf(tt.wantErr) && err != nil {
- t.Errorf("GetGEMPortID() error = %v, wantErr %v", err, tt.wantErr)
- return
- }
- if reflect.TypeOf(got) != reflect.TypeOf(tt.want) {
- t.Errorf("GetGEMPortID() got = %v, want %v", got, tt.want)
- }
- })
- }
-}
-
func TestOpenOltResourceMgr_GetMeterInfoForOnu(t *testing.T) {
type args struct {
Direction string
@@ -693,34 +637,6 @@
}
}
-func TestOpenOltResourceMgr_UpdateFlowIDInfo(t *testing.T) {
- type args struct {
- ponIntfID int32
- onuID int32
- uniID int32
- flowID uint64
- flowData FlowInfo
- }
- tests := []struct {
- name string
- fields *fields
- args args
- wantErr error
- }{
- {"UpdateFlowIDInfo-1", getResMgr(), args{1, 2, 2, 2, FlowInfo{}}, errors.New("")},
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- RsrcMgr := testResMgrObject(tt.fields)
- ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
- defer cancel()
- if err := RsrcMgr.UpdateFlowIDInfo(ctx, uint32(tt.args.ponIntfID), tt.args.onuID, tt.args.uniID, tt.args.flowID, tt.args.flowData); err != nil && reflect.TypeOf(err) != reflect.TypeOf(tt.wantErr) {
- t.Errorf("UpdateFlowIDInfo() error = %v, wantErr %v", err, tt.wantErr)
- }
- })
- }
-}
-
func TestOpenOltResourceMgr_UpdateGEMPortIDsForOnu(t *testing.T) {
type args struct {
@@ -750,35 +666,6 @@
}
}
-func TestOpenOltResourceMgr_UpdateGEMportsPonportToOnuMapOnKVStore(t *testing.T) {
- type args struct {
- gemPorts []uint32
- PonPort uint32
- onuID uint32
- uniID uint32
- }
- tests := []struct {
- name string
- fields *fields
- args args
- wantErr error
- }{
- {"UpdateGEMportsPonportToOnuMapOnKVStore-1", getResMgr(), args{[]uint32{1, 2},
- 1, 2, 2}, errors.New("failed to update resource")},
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- RsrcMgr := testResMgrObject(tt.fields)
- ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
- defer cancel()
- if err := RsrcMgr.UpdateGEMportsPonportToOnuMapOnKVStore(ctx, tt.args.gemPorts, tt.args.PonPort,
- tt.args.onuID, tt.args.uniID); err != nil && reflect.TypeOf(err) != reflect.TypeOf(tt.wantErr) {
- t.Errorf("UpdateGEMportsPonportToOnuMapOnKVStore() error = %v, wantErr %v", err, tt.wantErr)
- }
- })
- }
-}
-
func TestOpenOltResourceMgr_UpdateMeterIDForOnu(t *testing.T) {
type args struct {
Direction string
diff --git a/pkg/mocks/common.go b/pkg/mocks/common.go
index 7a67acf..360007a 100644
--- a/pkg/mocks/common.go
+++ b/pkg/mocks/common.go
@@ -18,7 +18,7 @@
package mocks
import (
- "github.com/opencord/voltha-lib-go/v4/pkg/log"
+ "github.com/opencord/voltha-lib-go/v5/pkg/log"
)
var logger log.CLogger
diff --git a/pkg/mocks/mockAdapterProxy.go b/pkg/mocks/mockAdapterProxy.go
index c410b48..20ea24d 100644
--- a/pkg/mocks/mockAdapterProxy.go
+++ b/pkg/mocks/mockAdapterProxy.go
@@ -43,3 +43,16 @@
}
return nil
}
+
+// TechProfileInstanceRequest mocks TechProfileInstanceRequest function
+func (ma *MockAdapterProxy) TechProfileInstanceRequest(ctx context.Context,
+ tpPath string,
+ parentPonPort uint32,
+ onuID uint32,
+ uniID uint32,
+ fromAdapter string,
+ toAdapter string,
+ toDeviceID string,
+ proxyDeviceID string) (*inter_container.InterAdapterTechProfileDownloadMessage, error) {
+ return nil, nil
+}
diff --git a/pkg/mocks/mockCoreProxy.go b/pkg/mocks/mockCoreProxy.go
index e143fbd..f572f09 100644
--- a/pkg/mocks/mockCoreProxy.go
+++ b/pkg/mocks/mockCoreProxy.go
@@ -22,7 +22,7 @@
"errors"
"fmt"
- "github.com/opencord/voltha-lib-go/v4/pkg/kafka"
+ "github.com/opencord/voltha-lib-go/v5/pkg/kafka"
"github.com/opencord/voltha-protos/v4/go/voltha"
)
@@ -132,6 +132,11 @@
if parentdeviceID == "" {
return nil, errors.New("no deviceID")
}
+ for k, v := range mcp.Devices {
+ if k == "olt" {
+ return v, nil
+ }
+ }
return nil, nil
}
diff --git a/pkg/mocks/mockKVClient.go b/pkg/mocks/mockKVClient.go
index 164e896..6884fc7 100644
--- a/pkg/mocks/mockKVClient.go
+++ b/pkg/mocks/mockKVClient.go
@@ -25,12 +25,9 @@
"strings"
"time"
- "github.com/opencord/voltha-lib-go/v4/pkg/log"
- "github.com/opencord/voltha-openolt-adapter/internal/pkg/resourcemanager"
-
- "github.com/opencord/voltha-lib-go/v4/pkg/db/kvstore"
+ "github.com/opencord/voltha-lib-go/v5/pkg/db/kvstore"
+ "github.com/opencord/voltha-lib-go/v5/pkg/log"
ofp "github.com/opencord/voltha-protos/v4/go/openflow_13"
- openolt "github.com/opencord/voltha-protos/v4/go/openolt"
)
const (
@@ -62,6 +59,21 @@
type MockKVClient struct {
}
+// OnuGemInfo holds onu information along with gem port list and uni port list
+type OnuGemInfo struct {
+ OnuID uint32
+ SerialNumber string
+ IntfID uint32
+ GemPorts []uint32
+ UniPorts []uint32
+}
+
+// GroupInfo holds group information
+type GroupInfo struct {
+ GroupID uint32
+ OutPorts []uint32
+}
+
// List mock function implementation for KVClient
func (kvclient *MockKVClient) List(ctx context.Context, key string) (map[string]*kvstore.KVPair, error) {
if key != "" {
@@ -136,24 +148,7 @@
str, _ := json.Marshal(data)
return kvstore.NewKVPair(key, str, "mock", 3000, 1), nil
}
- if strings.Contains(key, "/{olt}/{0,-1,-1}/flow_id_info/") {
- //multicast flow
- data := resourcemanager.FlowInfo{
- Flow: &openolt.Flow{FlowId: 1, OnuId: 0, UniId: 0, GemportId: 4000},
- }
- logger.Debug(ctx, "Error Error Error Key:", FlowIDs)
- str, _ := json.Marshal(data)
- return kvstore.NewKVPair(key, str, "mock", 3000, 1), nil
- }
- if strings.Contains(key, FlowIDInfo) {
- data := resourcemanager.FlowInfo{
- Flow: &openolt.Flow{FlowId: 1, OnuId: 1, UniId: 1, GemportId: 1},
- }
- logger.Debug(ctx, "Error Error Error Key:", FlowIDs)
- str, _ := json.Marshal(data)
- return kvstore.NewKVPair(key, str, "mock", 3000, 1), nil
- }
if strings.Contains(key, GemportIDs) {
logger.Debug(ctx, "Error Error Error Key:", GemportIDs)
data := []uint32{1}
@@ -168,7 +163,7 @@
}
if strings.Contains(key, FlowGroup) || strings.Contains(key, FlowGroupCached) {
logger.Debug(ctx, "Error Error Error Key:", FlowGroup)
- groupInfo := resourcemanager.GroupInfo{
+ groupInfo := GroupInfo{
GroupID: 2,
OutPorts: []uint32{1},
}
@@ -178,11 +173,13 @@
if strings.Contains(key, OnuPacketIn) {
return getPacketInGemPort(key)
}
+
if strings.Contains(key, OnuGemInfoPath) {
- var data []resourcemanager.OnuGemInfo
+ var data []OnuGemInfo
str, _ := json.Marshal(data)
return kvstore.NewKVPair(key, str, "mock", 3000, 1), nil
}
+
//Interface, GEM port path
if strings.Contains(key, "0,255") {
//return onuID, uniID associated with the given interface and GEM port
diff --git a/pkg/mocks/mockTechprofile.go b/pkg/mocks/mockTechprofile.go
index e51f44a..e06d016 100644
--- a/pkg/mocks/mockTechprofile.go
+++ b/pkg/mocks/mockTechprofile.go
@@ -20,8 +20,7 @@
import (
"context"
- "github.com/opencord/voltha-lib-go/v4/pkg/db"
- tp "github.com/opencord/voltha-lib-go/v4/pkg/techprofile"
+ "github.com/opencord/voltha-lib-go/v5/pkg/db"
tp_pb "github.com/opencord/voltha-protos/v4/go/tech_profile"
)
@@ -35,103 +34,98 @@
return &db.Backend{Client: &MockKVClient{}}
}
-// GetTechProfileInstanceKVPath to mock techprofile GetTechProfileInstanceKVPath method
-func (m MockTechProfile) GetTechProfileInstanceKVPath(ctx context.Context, techProfiletblID uint32, uniPortName string) string {
- return ""
-
-}
-
-// GetTPInstanceFromKVStore to mock techprofile GetTPInstanceFromKVStore method
-func (m MockTechProfile) GetTPInstanceFromKVStore(ctx context.Context, techProfiletblID uint32, path string) (interface{}, error) {
+// GetTPInstance to mock techprofile GetTPInstance method
+func (m MockTechProfile) GetTPInstance(ctx context.Context, path string) (interface{}, error) {
logger.Debug(ctx, "GetTPInstanceFromKVStore")
- if techProfiletblID == 64 {
- return &tp.TechProfile{
- Name: "mock-tech-profile",
- SubscriberIdentifier: "257",
- ProfileType: "mock",
- Version: 0,
- NumGemPorts: 1,
- UsScheduler: tp.IScheduler{
- AllocID: 1,
- Direction: "upstream",
- AdditionalBw: "None",
- Priority: 0,
- Weight: 0,
- QSchedPolicy: "",
- },
- DsScheduler: tp.IScheduler{
- AllocID: 1,
- Direction: "downstream",
- AdditionalBw: "None",
- Priority: 0,
- Weight: 0,
- QSchedPolicy: "",
- },
- UpstreamGemPortAttributeList: []tp.IGemPortAttribute{{
- GemportID: 1,
- PbitMap: "0b11111111",
- },
- },
- DownstreamGemPortAttributeList: []tp.IGemPortAttribute{{
- GemportID: 1,
- PbitMap: "0b11111111",
- },
- },
- }, nil
- } else if techProfiletblID == 65 {
- return &tp.EponProfile{
- Name: "mock-epon-profile",
- SubscriberIdentifier: "257",
- ProfileType: "mock",
- Version: 0,
- NumGemPorts: 2,
- UpstreamQueueAttributeList: nil,
- DownstreamQueueAttributeList: nil,
- }, nil
- } else {
- return nil, nil
- }
+ var usGemPortAttributeList []*tp_pb.GemPortAttributes
+ var dsGemPortAttributeList []*tp_pb.GemPortAttributes
+ usGemPortAttributeList = append(usGemPortAttributeList, &tp_pb.GemPortAttributes{
+ GemportId: 1,
+ PbitMap: "0b11111111",
+ })
+ dsGemPortAttributeList = append(dsGemPortAttributeList, &tp_pb.GemPortAttributes{
+ GemportId: 1,
+ PbitMap: "0b11111111",
+ })
+ return &tp_pb.TechProfileInstance{
+ Name: "mock-tech-profile",
+ SubscriberIdentifier: "257",
+ ProfileType: "mock",
+ Version: 0,
+ NumGemPorts: 1,
+ InstanceControl: &tp_pb.InstanceControl{
+ Onu: "multi-instance",
+ Uni: "single-instance",
+ MaxGemPayloadSize: "",
+ },
+ UsScheduler: &tp_pb.SchedulerAttributes{
+ AllocId: 1,
+ Direction: tp_pb.Direction_UPSTREAM,
+ AdditionalBw: tp_pb.AdditionalBW_AdditionalBW_None,
+ Priority: 0,
+ Weight: 0,
+ QSchedPolicy: tp_pb.SchedulingPolicy_WRR,
+ },
+ DsScheduler: &tp_pb.SchedulerAttributes{
+ AllocId: 1,
+ Direction: tp_pb.Direction_DOWNSTREAM,
+ AdditionalBw: tp_pb.AdditionalBW_AdditionalBW_None,
+ Priority: 0,
+ Weight: 0,
+ QSchedPolicy: tp_pb.SchedulingPolicy_WRR,
+ },
+ UpstreamGemPortAttributeList: usGemPortAttributeList,
+ DownstreamGemPortAttributeList: dsGemPortAttributeList,
+ }, nil
+
}
-// CreateTechProfInstance to mock techprofile CreateTechProfInstance method
-func (m MockTechProfile) CreateTechProfInstance(ctx context.Context, techProfiletblID uint32, uniPortName string, intfID uint32) (interface{}, error) {
+// CreateTechProfileInstance to mock techprofile CreateTechProfileInstance method
+func (m MockTechProfile) CreateTechProfileInstance(ctx context.Context, techProfiletblID uint32, uniPortName string, intfID uint32) (interface{}, error) {
+ var usGemPortAttributeList []*tp_pb.GemPortAttributes
+ var dsGemPortAttributeList []*tp_pb.GemPortAttributes
if techProfiletblID == 64 {
- return &tp.TechProfile{
+ usGemPortAttributeList = append(usGemPortAttributeList, &tp_pb.GemPortAttributes{
+ GemportId: 1,
+ PbitMap: "0b11111111",
+ })
+ dsGemPortAttributeList = append(dsGemPortAttributeList, &tp_pb.GemPortAttributes{
+ GemportId: 1,
+ PbitMap: "0b11111111",
+ })
+ return &tp_pb.TechProfileInstance{
Name: "mock-tech-profile",
SubscriberIdentifier: "257",
ProfileType: "mock",
Version: 0,
NumGemPorts: 1,
- UsScheduler: tp.IScheduler{
- AllocID: 1,
- Direction: "upstream",
- AdditionalBw: "None",
+ InstanceControl: &tp_pb.InstanceControl{
+ Onu: "multi-instance",
+ Uni: "single-instance",
+ MaxGemPayloadSize: "",
+ },
+ UsScheduler: &tp_pb.SchedulerAttributes{
+ AllocId: 1,
+ Direction: tp_pb.Direction_UPSTREAM,
+ AdditionalBw: tp_pb.AdditionalBW_AdditionalBW_None,
Priority: 0,
Weight: 0,
- QSchedPolicy: "",
+ QSchedPolicy: tp_pb.SchedulingPolicy_WRR,
},
- DsScheduler: tp.IScheduler{
- AllocID: 1,
- Direction: "downstream",
- AdditionalBw: "None",
+ DsScheduler: &tp_pb.SchedulerAttributes{
+ AllocId: 1,
+ Direction: tp_pb.Direction_DOWNSTREAM,
+ AdditionalBw: tp_pb.AdditionalBW_AdditionalBW_None,
Priority: 0,
Weight: 0,
- QSchedPolicy: "",
+ QSchedPolicy: tp_pb.SchedulingPolicy_WRR,
},
- UpstreamGemPortAttributeList: []tp.IGemPortAttribute{{
- GemportID: 1,
- PbitMap: "0b11111111",
- },
- },
- DownstreamGemPortAttributeList: []tp.IGemPortAttribute{{
- GemportID: 1,
- PbitMap: "0b11111111",
- },
- },
+ UpstreamGemPortAttributeList: usGemPortAttributeList,
+ DownstreamGemPortAttributeList: dsGemPortAttributeList,
}, nil
} else if techProfiletblID == 65 {
- return &tp.EponProfile{
+ return &tp_pb.EponTechProfileInstance{
Name: "mock-epon-profile",
SubscriberIdentifier: "257",
ProfileType: "mock",
@@ -143,7 +137,6 @@
} else {
return nil, nil
}
-
}
// DeleteTechProfileInstance to mock techprofile DeleteTechProfileInstance method
@@ -158,37 +151,37 @@
}
// GetUsScheduler to mock techprofile GetUsScheduler method
-func (m MockTechProfile) GetUsScheduler(ctx context.Context, tpInstance *tp.TechProfile) (*tp_pb.SchedulerConfig, error) {
- return &tp_pb.SchedulerConfig{}, nil
+func (m MockTechProfile) GetUsScheduler(tpInstance *tp_pb.TechProfileInstance) *tp_pb.SchedulerConfig {
+ return &tp_pb.SchedulerConfig{}
}
// GetDsScheduler to mock techprofile GetDsScheduler method
-func (m MockTechProfile) GetDsScheduler(ctx context.Context, tpInstance *tp.TechProfile) (*tp_pb.SchedulerConfig, error) {
- return &tp_pb.SchedulerConfig{}, nil
+func (m MockTechProfile) GetDsScheduler(tpInstance *tp_pb.TechProfileInstance) *tp_pb.SchedulerConfig {
+ return &tp_pb.SchedulerConfig{}
}
// GetTrafficScheduler to mock techprofile GetTrafficScheduler method
-func (m MockTechProfile) GetTrafficScheduler(tpInstance *tp.TechProfile, SchedCfg *tp_pb.SchedulerConfig,
+func (m MockTechProfile) GetTrafficScheduler(tpInstance *tp_pb.TechProfileInstance, SchedCfg *tp_pb.SchedulerConfig,
ShapingCfg *tp_pb.TrafficShapingInfo) *tp_pb.TrafficScheduler {
return &tp_pb.TrafficScheduler{}
}
// GetTrafficQueues to mock techprofile GetTrafficQueues method
-func (m MockTechProfile) GetTrafficQueues(ctx context.Context, tp *tp.TechProfile, Dir tp_pb.Direction) ([]*tp_pb.TrafficQueue, error) {
+func (m MockTechProfile) GetTrafficQueues(ctx context.Context, tp *tp_pb.TechProfileInstance, Dir tp_pb.Direction) ([]*tp_pb.TrafficQueue, error) {
return []*tp_pb.TrafficQueue{{}}, nil
}
// GetMulticastTrafficQueues to mock techprofile GetMulticastTrafficQueues method
-func (m MockTechProfile) GetMulticastTrafficQueues(ctx context.Context, tp *tp.TechProfile) []*tp_pb.TrafficQueue {
+func (m MockTechProfile) GetMulticastTrafficQueues(ctx context.Context, tp *tp_pb.TechProfileInstance) []*tp_pb.TrafficQueue {
return []*tp_pb.TrafficQueue{{}}
}
// GetGemportForPbit to mock techprofile GetGemportForPbit method
func (m MockTechProfile) GetGemportForPbit(ctx context.Context, tpInst interface{}, Dir tp_pb.Direction, pbit uint32) interface{} {
- return tp.IGemPortAttribute{
- GemportID: 1,
+ return &tp_pb.GemPortAttributes{
+ GemportId: 1,
PbitMap: "0b11111111",
AesEncryption: "false",
}
@@ -196,7 +189,7 @@
// FindAllTpInstances to mock techprofile FindAllTpInstances method
func (m MockTechProfile) FindAllTpInstances(ctx context.Context, oltDeviceID string, tpID uint32, ponIntf uint32, onuID uint32) interface{} {
- return []tp.TechProfile{}
+ return []tp_pb.TechProfileInstance{}
}
// GetResourceID to mock techprofile GetResourceID method
@@ -208,3 +201,8 @@
func (m MockTechProfile) FreeResourceID(ctx context.Context, IntfID uint32, ResourceType string, ReleaseContent []uint32) error {
return nil
}
+
+// GetTechProfileInstanceKey to mock techprofile GetTechProfileInstanceKey method
+func (m MockTechProfile) GetTechProfileInstanceKey(ctx context.Context, tpID uint32, uniPortName string) string {
+ return ""
+}
diff --git a/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go b/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go
new file mode 100644
index 0000000..e9cc202
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go
@@ -0,0 +1,1284 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2015 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+/*
+Package jsonpb provides marshaling and unmarshaling between protocol buffers and JSON.
+It follows the specification at https://developers.google.com/protocol-buffers/docs/proto3#json.
+
+This package produces a different output than the standard "encoding/json" package,
+which does not operate correctly on protocol buffers.
+*/
+package jsonpb
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+
+ stpb "github.com/golang/protobuf/ptypes/struct"
+)
+
+const secondInNanos = int64(time.Second / time.Nanosecond)
+const maxSecondsInDuration = 315576000000
+
+// Marshaler is a configurable object for converting between
+// protocol buffer objects and a JSON representation for them.
+type Marshaler struct {
+ // Whether to render enum values as integers, as opposed to string values.
+ EnumsAsInts bool
+
+ // Whether to render fields with zero values.
+ EmitDefaults bool
+
+ // A string to indent each level by. The presence of this field will
+ // also cause a space to appear between the field separator and
+ // value, and for newlines to be appear between fields and array
+ // elements.
+ Indent string
+
+ // Whether to use the original (.proto) name for fields.
+ OrigName bool
+
+ // A custom URL resolver to use when marshaling Any messages to JSON.
+ // If unset, the default resolution strategy is to extract the
+ // fully-qualified type name from the type URL and pass that to
+ // proto.MessageType(string).
+ AnyResolver AnyResolver
+}
+
+// AnyResolver takes a type URL, present in an Any message, and resolves it into
+// an instance of the associated message.
+type AnyResolver interface {
+ Resolve(typeUrl string) (proto.Message, error)
+}
+
+func defaultResolveAny(typeUrl string) (proto.Message, error) {
+ // Only the part of typeUrl after the last slash is relevant.
+ mname := typeUrl
+ if slash := strings.LastIndex(mname, "/"); slash >= 0 {
+ mname = mname[slash+1:]
+ }
+ mt := proto.MessageType(mname)
+ if mt == nil {
+ return nil, fmt.Errorf("unknown message type %q", mname)
+ }
+ return reflect.New(mt.Elem()).Interface().(proto.Message), nil
+}
+
+// JSONPBMarshaler is implemented by protobuf messages that customize the
+// way they are marshaled to JSON. Messages that implement this should
+// also implement JSONPBUnmarshaler so that the custom format can be
+// parsed.
+//
+// The JSON marshaling must follow the proto to JSON specification:
+// https://developers.google.com/protocol-buffers/docs/proto3#json
+type JSONPBMarshaler interface {
+ MarshalJSONPB(*Marshaler) ([]byte, error)
+}
+
+// JSONPBUnmarshaler is implemented by protobuf messages that customize
+// the way they are unmarshaled from JSON. Messages that implement this
+// should also implement JSONPBMarshaler so that the custom format can be
+// produced.
+//
+// The JSON unmarshaling must follow the JSON to proto specification:
+// https://developers.google.com/protocol-buffers/docs/proto3#json
+type JSONPBUnmarshaler interface {
+ UnmarshalJSONPB(*Unmarshaler, []byte) error
+}
+
+// Marshal marshals a protocol buffer into JSON.
+func (m *Marshaler) Marshal(out io.Writer, pb proto.Message) error {
+ v := reflect.ValueOf(pb)
+ if pb == nil || (v.Kind() == reflect.Ptr && v.IsNil()) {
+ return errors.New("Marshal called with nil")
+ }
+ // Check for unset required fields first.
+ if err := checkRequiredFields(pb); err != nil {
+ return err
+ }
+ writer := &errWriter{writer: out}
+ return m.marshalObject(writer, pb, "", "")
+}
+
+// MarshalToString converts a protocol buffer object to JSON string.
+func (m *Marshaler) MarshalToString(pb proto.Message) (string, error) {
+ var buf bytes.Buffer
+ if err := m.Marshal(&buf, pb); err != nil {
+ return "", err
+ }
+ return buf.String(), nil
+}
+
+type int32Slice []int32
+
+var nonFinite = map[string]float64{
+ `"NaN"`: math.NaN(),
+ `"Infinity"`: math.Inf(1),
+ `"-Infinity"`: math.Inf(-1),
+}
+
+// For sorting extensions ids to ensure stable output.
+func (s int32Slice) Len() int { return len(s) }
+func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] }
+func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+
+type wkt interface {
+ XXX_WellKnownType() string
+}
+
+// marshalObject writes a struct to the Writer.
+func (m *Marshaler) marshalObject(out *errWriter, v proto.Message, indent, typeURL string) error {
+ if jsm, ok := v.(JSONPBMarshaler); ok {
+ b, err := jsm.MarshalJSONPB(m)
+ if err != nil {
+ return err
+ }
+ if typeURL != "" {
+ // we are marshaling this object to an Any type
+ var js map[string]*json.RawMessage
+ if err = json.Unmarshal(b, &js); err != nil {
+ return fmt.Errorf("type %T produced invalid JSON: %v", v, err)
+ }
+ turl, err := json.Marshal(typeURL)
+ if err != nil {
+ return fmt.Errorf("failed to marshal type URL %q to JSON: %v", typeURL, err)
+ }
+ js["@type"] = (*json.RawMessage)(&turl)
+ if m.Indent != "" {
+ b, err = json.MarshalIndent(js, indent, m.Indent)
+ } else {
+ b, err = json.Marshal(js)
+ }
+ if err != nil {
+ return err
+ }
+ }
+
+ out.write(string(b))
+ return out.err
+ }
+
+ s := reflect.ValueOf(v).Elem()
+
+ // Handle well-known types.
+ if wkt, ok := v.(wkt); ok {
+ switch wkt.XXX_WellKnownType() {
+ case "DoubleValue", "FloatValue", "Int64Value", "UInt64Value",
+ "Int32Value", "UInt32Value", "BoolValue", "StringValue", "BytesValue":
+ // "Wrappers use the same representation in JSON
+ // as the wrapped primitive type, ..."
+ sprop := proto.GetProperties(s.Type())
+ return m.marshalValue(out, sprop.Prop[0], s.Field(0), indent)
+ case "Any":
+ // Any is a bit more involved.
+ return m.marshalAny(out, v, indent)
+ case "Duration":
+ s, ns := s.Field(0).Int(), s.Field(1).Int()
+ if s < -maxSecondsInDuration || s > maxSecondsInDuration {
+ return fmt.Errorf("seconds out of range %v", s)
+ }
+ if ns <= -secondInNanos || ns >= secondInNanos {
+ return fmt.Errorf("ns out of range (%v, %v)", -secondInNanos, secondInNanos)
+ }
+ if (s > 0 && ns < 0) || (s < 0 && ns > 0) {
+ return errors.New("signs of seconds and nanos do not match")
+ }
+ // Generated output always contains 0, 3, 6, or 9 fractional digits,
+ // depending on required precision, followed by the suffix "s".
+ f := "%d.%09d"
+ if ns < 0 {
+ ns = -ns
+ if s == 0 {
+ f = "-%d.%09d"
+ }
+ }
+ x := fmt.Sprintf(f, s, ns)
+ x = strings.TrimSuffix(x, "000")
+ x = strings.TrimSuffix(x, "000")
+ x = strings.TrimSuffix(x, ".000")
+ out.write(`"`)
+ out.write(x)
+ out.write(`s"`)
+ return out.err
+ case "Struct", "ListValue":
+ // Let marshalValue handle the `Struct.fields` map or the `ListValue.values` slice.
+ // TODO: pass the correct Properties if needed.
+ return m.marshalValue(out, &proto.Properties{}, s.Field(0), indent)
+ case "Timestamp":
+ // "RFC 3339, where generated output will always be Z-normalized
+ // and uses 0, 3, 6 or 9 fractional digits."
+ s, ns := s.Field(0).Int(), s.Field(1).Int()
+ if ns < 0 || ns >= secondInNanos {
+ return fmt.Errorf("ns out of range [0, %v)", secondInNanos)
+ }
+ t := time.Unix(s, ns).UTC()
+ // time.RFC3339Nano isn't exactly right (we need to get 3/6/9 fractional digits).
+ x := t.Format("2006-01-02T15:04:05.000000000")
+ x = strings.TrimSuffix(x, "000")
+ x = strings.TrimSuffix(x, "000")
+ x = strings.TrimSuffix(x, ".000")
+ out.write(`"`)
+ out.write(x)
+ out.write(`Z"`)
+ return out.err
+ case "Value":
+ // Value has a single oneof.
+ kind := s.Field(0)
+ if kind.IsNil() {
+ // "absence of any variant indicates an error"
+ return errors.New("nil Value")
+ }
+ // oneof -> *T -> T -> T.F
+ x := kind.Elem().Elem().Field(0)
+ // TODO: pass the correct Properties if needed.
+ return m.marshalValue(out, &proto.Properties{}, x, indent)
+ }
+ }
+
+ out.write("{")
+ if m.Indent != "" {
+ out.write("\n")
+ }
+
+ firstField := true
+
+ if typeURL != "" {
+ if err := m.marshalTypeURL(out, indent, typeURL); err != nil {
+ return err
+ }
+ firstField = false
+ }
+
+ for i := 0; i < s.NumField(); i++ {
+ value := s.Field(i)
+ valueField := s.Type().Field(i)
+ if strings.HasPrefix(valueField.Name, "XXX_") {
+ continue
+ }
+
+ // IsNil will panic on most value kinds.
+ switch value.Kind() {
+ case reflect.Chan, reflect.Func, reflect.Interface:
+ if value.IsNil() {
+ continue
+ }
+ }
+
+ if !m.EmitDefaults {
+ switch value.Kind() {
+ case reflect.Bool:
+ if !value.Bool() {
+ continue
+ }
+ case reflect.Int32, reflect.Int64:
+ if value.Int() == 0 {
+ continue
+ }
+ case reflect.Uint32, reflect.Uint64:
+ if value.Uint() == 0 {
+ continue
+ }
+ case reflect.Float32, reflect.Float64:
+ if value.Float() == 0 {
+ continue
+ }
+ case reflect.String:
+ if value.Len() == 0 {
+ continue
+ }
+ case reflect.Map, reflect.Ptr, reflect.Slice:
+ if value.IsNil() {
+ continue
+ }
+ }
+ }
+
+ // Oneof fields need special handling.
+ if valueField.Tag.Get("protobuf_oneof") != "" {
+ // value is an interface containing &T{real_value}.
+ sv := value.Elem().Elem() // interface -> *T -> T
+ value = sv.Field(0)
+ valueField = sv.Type().Field(0)
+ }
+ prop := jsonProperties(valueField, m.OrigName)
+ if !firstField {
+ m.writeSep(out)
+ }
+ if err := m.marshalField(out, prop, value, indent); err != nil {
+ return err
+ }
+ firstField = false
+ }
+
+ // Handle proto2 extensions.
+ if ep, ok := v.(proto.Message); ok {
+ extensions := proto.RegisteredExtensions(v)
+ // Sort extensions for stable output.
+ ids := make([]int32, 0, len(extensions))
+ for id, desc := range extensions {
+ if !proto.HasExtension(ep, desc) {
+ continue
+ }
+ ids = append(ids, id)
+ }
+ sort.Sort(int32Slice(ids))
+ for _, id := range ids {
+ desc := extensions[id]
+ if desc == nil {
+ // unknown extension
+ continue
+ }
+ ext, extErr := proto.GetExtension(ep, desc)
+ if extErr != nil {
+ return extErr
+ }
+ value := reflect.ValueOf(ext)
+ var prop proto.Properties
+ prop.Parse(desc.Tag)
+ prop.JSONName = fmt.Sprintf("[%s]", desc.Name)
+ if !firstField {
+ m.writeSep(out)
+ }
+ if err := m.marshalField(out, &prop, value, indent); err != nil {
+ return err
+ }
+ firstField = false
+ }
+
+ }
+
+ if m.Indent != "" {
+ out.write("\n")
+ out.write(indent)
+ }
+ out.write("}")
+ return out.err
+}
+
+func (m *Marshaler) writeSep(out *errWriter) {
+ if m.Indent != "" {
+ out.write(",\n")
+ } else {
+ out.write(",")
+ }
+}
+
+func (m *Marshaler) marshalAny(out *errWriter, any proto.Message, indent string) error {
+ // "If the Any contains a value that has a special JSON mapping,
+ // it will be converted as follows: {"@type": xxx, "value": yyy}.
+ // Otherwise, the value will be converted into a JSON object,
+ // and the "@type" field will be inserted to indicate the actual data type."
+ v := reflect.ValueOf(any).Elem()
+ turl := v.Field(0).String()
+ val := v.Field(1).Bytes()
+
+ var msg proto.Message
+ var err error
+ if m.AnyResolver != nil {
+ msg, err = m.AnyResolver.Resolve(turl)
+ } else {
+ msg, err = defaultResolveAny(turl)
+ }
+ if err != nil {
+ return err
+ }
+
+ if err := proto.Unmarshal(val, msg); err != nil {
+ return err
+ }
+
+ if _, ok := msg.(wkt); ok {
+ out.write("{")
+ if m.Indent != "" {
+ out.write("\n")
+ }
+ if err := m.marshalTypeURL(out, indent, turl); err != nil {
+ return err
+ }
+ m.writeSep(out)
+ if m.Indent != "" {
+ out.write(indent)
+ out.write(m.Indent)
+ out.write(`"value": `)
+ } else {
+ out.write(`"value":`)
+ }
+ if err := m.marshalObject(out, msg, indent+m.Indent, ""); err != nil {
+ return err
+ }
+ if m.Indent != "" {
+ out.write("\n")
+ out.write(indent)
+ }
+ out.write("}")
+ return out.err
+ }
+
+ return m.marshalObject(out, msg, indent, turl)
+}
+
+func (m *Marshaler) marshalTypeURL(out *errWriter, indent, typeURL string) error {
+ if m.Indent != "" {
+ out.write(indent)
+ out.write(m.Indent)
+ }
+ out.write(`"@type":`)
+ if m.Indent != "" {
+ out.write(" ")
+ }
+ b, err := json.Marshal(typeURL)
+ if err != nil {
+ return err
+ }
+ out.write(string(b))
+ return out.err
+}
+
+// marshalField writes field description and value to the Writer.
+func (m *Marshaler) marshalField(out *errWriter, prop *proto.Properties, v reflect.Value, indent string) error {
+ if m.Indent != "" {
+ out.write(indent)
+ out.write(m.Indent)
+ }
+ out.write(`"`)
+ out.write(prop.JSONName)
+ out.write(`":`)
+ if m.Indent != "" {
+ out.write(" ")
+ }
+ if err := m.marshalValue(out, prop, v, indent); err != nil {
+ return err
+ }
+ return nil
+}
+
+// marshalValue writes the value to the Writer.
+func (m *Marshaler) marshalValue(out *errWriter, prop *proto.Properties, v reflect.Value, indent string) error {
+ var err error
+ v = reflect.Indirect(v)
+
+ // Handle nil pointer
+ if v.Kind() == reflect.Invalid {
+ out.write("null")
+ return out.err
+ }
+
+ // Handle repeated elements.
+ if v.Kind() == reflect.Slice && v.Type().Elem().Kind() != reflect.Uint8 {
+ out.write("[")
+ comma := ""
+ for i := 0; i < v.Len(); i++ {
+ sliceVal := v.Index(i)
+ out.write(comma)
+ if m.Indent != "" {
+ out.write("\n")
+ out.write(indent)
+ out.write(m.Indent)
+ out.write(m.Indent)
+ }
+ if err := m.marshalValue(out, prop, sliceVal, indent+m.Indent); err != nil {
+ return err
+ }
+ comma = ","
+ }
+ if m.Indent != "" {
+ out.write("\n")
+ out.write(indent)
+ out.write(m.Indent)
+ }
+ out.write("]")
+ return out.err
+ }
+
+ // Handle well-known types.
+ // Most are handled up in marshalObject (because 99% are messages).
+ if wkt, ok := v.Interface().(wkt); ok {
+ switch wkt.XXX_WellKnownType() {
+ case "NullValue":
+ out.write("null")
+ return out.err
+ }
+ }
+
+ // Handle enumerations.
+ if !m.EnumsAsInts && prop.Enum != "" {
+ // Unknown enum values will are stringified by the proto library as their
+ // value. Such values should _not_ be quoted or they will be interpreted
+ // as an enum string instead of their value.
+ enumStr := v.Interface().(fmt.Stringer).String()
+ var valStr string
+ if v.Kind() == reflect.Ptr {
+ valStr = strconv.Itoa(int(v.Elem().Int()))
+ } else {
+ valStr = strconv.Itoa(int(v.Int()))
+ }
+ isKnownEnum := enumStr != valStr
+ if isKnownEnum {
+ out.write(`"`)
+ }
+ out.write(enumStr)
+ if isKnownEnum {
+ out.write(`"`)
+ }
+ return out.err
+ }
+
+ // Handle nested messages.
+ if v.Kind() == reflect.Struct {
+ return m.marshalObject(out, v.Addr().Interface().(proto.Message), indent+m.Indent, "")
+ }
+
+ // Handle maps.
+ // Since Go randomizes map iteration, we sort keys for stable output.
+ if v.Kind() == reflect.Map {
+ out.write(`{`)
+ keys := v.MapKeys()
+ sort.Sort(mapKeys(keys))
+ for i, k := range keys {
+ if i > 0 {
+ out.write(`,`)
+ }
+ if m.Indent != "" {
+ out.write("\n")
+ out.write(indent)
+ out.write(m.Indent)
+ out.write(m.Indent)
+ }
+
+ // TODO handle map key prop properly
+ b, err := json.Marshal(k.Interface())
+ if err != nil {
+ return err
+ }
+ s := string(b)
+
+ // If the JSON is not a string value, encode it again to make it one.
+ if !strings.HasPrefix(s, `"`) {
+ b, err := json.Marshal(s)
+ if err != nil {
+ return err
+ }
+ s = string(b)
+ }
+
+ out.write(s)
+ out.write(`:`)
+ if m.Indent != "" {
+ out.write(` `)
+ }
+
+ vprop := prop
+ if prop != nil && prop.MapValProp != nil {
+ vprop = prop.MapValProp
+ }
+ if err := m.marshalValue(out, vprop, v.MapIndex(k), indent+m.Indent); err != nil {
+ return err
+ }
+ }
+ if m.Indent != "" {
+ out.write("\n")
+ out.write(indent)
+ out.write(m.Indent)
+ }
+ out.write(`}`)
+ return out.err
+ }
+
+ // Handle non-finite floats, e.g. NaN, Infinity and -Infinity.
+ if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 {
+ f := v.Float()
+ var sval string
+ switch {
+ case math.IsInf(f, 1):
+ sval = `"Infinity"`
+ case math.IsInf(f, -1):
+ sval = `"-Infinity"`
+ case math.IsNaN(f):
+ sval = `"NaN"`
+ }
+ if sval != "" {
+ out.write(sval)
+ return out.err
+ }
+ }
+
+ // Default handling defers to the encoding/json library.
+ b, err := json.Marshal(v.Interface())
+ if err != nil {
+ return err
+ }
+ needToQuote := string(b[0]) != `"` && (v.Kind() == reflect.Int64 || v.Kind() == reflect.Uint64)
+ if needToQuote {
+ out.write(`"`)
+ }
+ out.write(string(b))
+ if needToQuote {
+ out.write(`"`)
+ }
+ return out.err
+}
+
+// Unmarshaler is a configurable object for converting from a JSON
+// representation to a protocol buffer object.
+type Unmarshaler struct {
+ // Whether to allow messages to contain unknown fields, as opposed to
+ // failing to unmarshal.
+ AllowUnknownFields bool
+
+ // A custom URL resolver to use when unmarshaling Any messages from JSON.
+ // If unset, the default resolution strategy is to extract the
+ // fully-qualified type name from the type URL and pass that to
+ // proto.MessageType(string).
+ AnyResolver AnyResolver
+}
+
+// UnmarshalNext unmarshals the next protocol buffer from a JSON object stream.
+// This function is lenient and will decode any options permutations of the
+// related Marshaler.
+func (u *Unmarshaler) UnmarshalNext(dec *json.Decoder, pb proto.Message) error {
+ inputValue := json.RawMessage{}
+ if err := dec.Decode(&inputValue); err != nil {
+ return err
+ }
+ if err := u.unmarshalValue(reflect.ValueOf(pb).Elem(), inputValue, nil); err != nil {
+ return err
+ }
+ return checkRequiredFields(pb)
+}
+
+// Unmarshal unmarshals a JSON object stream into a protocol
+// buffer. This function is lenient and will decode any options
+// permutations of the related Marshaler.
+func (u *Unmarshaler) Unmarshal(r io.Reader, pb proto.Message) error {
+ dec := json.NewDecoder(r)
+ return u.UnmarshalNext(dec, pb)
+}
+
+// UnmarshalNext unmarshals the next protocol buffer from a JSON object stream.
+// This function is lenient and will decode any options permutations of the
+// related Marshaler.
+func UnmarshalNext(dec *json.Decoder, pb proto.Message) error {
+ return new(Unmarshaler).UnmarshalNext(dec, pb)
+}
+
+// Unmarshal unmarshals a JSON object stream into a protocol
+// buffer. This function is lenient and will decode any options
+// permutations of the related Marshaler.
+func Unmarshal(r io.Reader, pb proto.Message) error {
+ return new(Unmarshaler).Unmarshal(r, pb)
+}
+
+// UnmarshalString will populate the fields of a protocol buffer based
+// on a JSON string. This function is lenient and will decode any options
+// permutations of the related Marshaler.
+func UnmarshalString(str string, pb proto.Message) error {
+ return new(Unmarshaler).Unmarshal(strings.NewReader(str), pb)
+}
+
+// unmarshalValue converts/copies a value into the target.
+// prop may be nil.
+func (u *Unmarshaler) unmarshalValue(target reflect.Value, inputValue json.RawMessage, prop *proto.Properties) error {
+ targetType := target.Type()
+
+ // Allocate memory for pointer fields.
+ if targetType.Kind() == reflect.Ptr {
+ // If input value is "null" and target is a pointer type, then the field should be treated as not set
+ // UNLESS the target is structpb.Value, in which case it should be set to structpb.NullValue.
+ _, isJSONPBUnmarshaler := target.Interface().(JSONPBUnmarshaler)
+ if string(inputValue) == "null" && targetType != reflect.TypeOf(&stpb.Value{}) && !isJSONPBUnmarshaler {
+ return nil
+ }
+ target.Set(reflect.New(targetType.Elem()))
+
+ return u.unmarshalValue(target.Elem(), inputValue, prop)
+ }
+
+ if jsu, ok := target.Addr().Interface().(JSONPBUnmarshaler); ok {
+ return jsu.UnmarshalJSONPB(u, []byte(inputValue))
+ }
+
+ // Handle well-known types that are not pointers.
+ if w, ok := target.Addr().Interface().(wkt); ok {
+ switch w.XXX_WellKnownType() {
+ case "DoubleValue", "FloatValue", "Int64Value", "UInt64Value",
+ "Int32Value", "UInt32Value", "BoolValue", "StringValue", "BytesValue":
+ return u.unmarshalValue(target.Field(0), inputValue, prop)
+ case "Any":
+ // Use json.RawMessage pointer type instead of value to support pre-1.8 version.
+ // 1.8 changed RawMessage.MarshalJSON from pointer type to value type, see
+ // https://github.com/golang/go/issues/14493
+ var jsonFields map[string]*json.RawMessage
+ if err := json.Unmarshal(inputValue, &jsonFields); err != nil {
+ return err
+ }
+
+ val, ok := jsonFields["@type"]
+ if !ok || val == nil {
+ return errors.New("Any JSON doesn't have '@type'")
+ }
+
+ var turl string
+ if err := json.Unmarshal([]byte(*val), &turl); err != nil {
+ return fmt.Errorf("can't unmarshal Any's '@type': %q", *val)
+ }
+ target.Field(0).SetString(turl)
+
+ var m proto.Message
+ var err error
+ if u.AnyResolver != nil {
+ m, err = u.AnyResolver.Resolve(turl)
+ } else {
+ m, err = defaultResolveAny(turl)
+ }
+ if err != nil {
+ return err
+ }
+
+ if _, ok := m.(wkt); ok {
+ val, ok := jsonFields["value"]
+ if !ok {
+ return errors.New("Any JSON doesn't have 'value'")
+ }
+
+ if err := u.unmarshalValue(reflect.ValueOf(m).Elem(), *val, nil); err != nil {
+ return fmt.Errorf("can't unmarshal Any nested proto %T: %v", m, err)
+ }
+ } else {
+ delete(jsonFields, "@type")
+ nestedProto, err := json.Marshal(jsonFields)
+ if err != nil {
+ return fmt.Errorf("can't generate JSON for Any's nested proto to be unmarshaled: %v", err)
+ }
+
+ if err = u.unmarshalValue(reflect.ValueOf(m).Elem(), nestedProto, nil); err != nil {
+ return fmt.Errorf("can't unmarshal Any nested proto %T: %v", m, err)
+ }
+ }
+
+ b, err := proto.Marshal(m)
+ if err != nil {
+ return fmt.Errorf("can't marshal proto %T into Any.Value: %v", m, err)
+ }
+ target.Field(1).SetBytes(b)
+
+ return nil
+ case "Duration":
+ unq, err := unquote(string(inputValue))
+ if err != nil {
+ return err
+ }
+
+ d, err := time.ParseDuration(unq)
+ if err != nil {
+ return fmt.Errorf("bad Duration: %v", err)
+ }
+
+ ns := d.Nanoseconds()
+ s := ns / 1e9
+ ns %= 1e9
+ target.Field(0).SetInt(s)
+ target.Field(1).SetInt(ns)
+ return nil
+ case "Timestamp":
+ unq, err := unquote(string(inputValue))
+ if err != nil {
+ return err
+ }
+
+ t, err := time.Parse(time.RFC3339Nano, unq)
+ if err != nil {
+ return fmt.Errorf("bad Timestamp: %v", err)
+ }
+
+ target.Field(0).SetInt(t.Unix())
+ target.Field(1).SetInt(int64(t.Nanosecond()))
+ return nil
+ case "Struct":
+ var m map[string]json.RawMessage
+ if err := json.Unmarshal(inputValue, &m); err != nil {
+ return fmt.Errorf("bad StructValue: %v", err)
+ }
+
+ target.Field(0).Set(reflect.ValueOf(map[string]*stpb.Value{}))
+ for k, jv := range m {
+ pv := &stpb.Value{}
+ if err := u.unmarshalValue(reflect.ValueOf(pv).Elem(), jv, prop); err != nil {
+ return fmt.Errorf("bad value in StructValue for key %q: %v", k, err)
+ }
+ target.Field(0).SetMapIndex(reflect.ValueOf(k), reflect.ValueOf(pv))
+ }
+ return nil
+ case "ListValue":
+ var s []json.RawMessage
+ if err := json.Unmarshal(inputValue, &s); err != nil {
+ return fmt.Errorf("bad ListValue: %v", err)
+ }
+
+ target.Field(0).Set(reflect.ValueOf(make([]*stpb.Value, len(s))))
+ for i, sv := range s {
+ if err := u.unmarshalValue(target.Field(0).Index(i), sv, prop); err != nil {
+ return err
+ }
+ }
+ return nil
+ case "Value":
+ ivStr := string(inputValue)
+ if ivStr == "null" {
+ target.Field(0).Set(reflect.ValueOf(&stpb.Value_NullValue{}))
+ } else if v, err := strconv.ParseFloat(ivStr, 0); err == nil {
+ target.Field(0).Set(reflect.ValueOf(&stpb.Value_NumberValue{v}))
+ } else if v, err := unquote(ivStr); err == nil {
+ target.Field(0).Set(reflect.ValueOf(&stpb.Value_StringValue{v}))
+ } else if v, err := strconv.ParseBool(ivStr); err == nil {
+ target.Field(0).Set(reflect.ValueOf(&stpb.Value_BoolValue{v}))
+ } else if err := json.Unmarshal(inputValue, &[]json.RawMessage{}); err == nil {
+ lv := &stpb.ListValue{}
+ target.Field(0).Set(reflect.ValueOf(&stpb.Value_ListValue{lv}))
+ return u.unmarshalValue(reflect.ValueOf(lv).Elem(), inputValue, prop)
+ } else if err := json.Unmarshal(inputValue, &map[string]json.RawMessage{}); err == nil {
+ sv := &stpb.Struct{}
+ target.Field(0).Set(reflect.ValueOf(&stpb.Value_StructValue{sv}))
+ return u.unmarshalValue(reflect.ValueOf(sv).Elem(), inputValue, prop)
+ } else {
+ return fmt.Errorf("unrecognized type for Value %q", ivStr)
+ }
+ return nil
+ }
+ }
+
+ // Handle enums, which have an underlying type of int32,
+ // and may appear as strings.
+ // The case of an enum appearing as a number is handled
+ // at the bottom of this function.
+ if inputValue[0] == '"' && prop != nil && prop.Enum != "" {
+ vmap := proto.EnumValueMap(prop.Enum)
+ // Don't need to do unquoting; valid enum names
+ // are from a limited character set.
+ s := inputValue[1 : len(inputValue)-1]
+ n, ok := vmap[string(s)]
+ if !ok {
+ return fmt.Errorf("unknown value %q for enum %s", s, prop.Enum)
+ }
+ if target.Kind() == reflect.Ptr { // proto2
+ target.Set(reflect.New(targetType.Elem()))
+ target = target.Elem()
+ }
+ if targetType.Kind() != reflect.Int32 {
+ return fmt.Errorf("invalid target %q for enum %s", targetType.Kind(), prop.Enum)
+ }
+ target.SetInt(int64(n))
+ return nil
+ }
+
+ // Handle nested messages.
+ if targetType.Kind() == reflect.Struct {
+ var jsonFields map[string]json.RawMessage
+ if err := json.Unmarshal(inputValue, &jsonFields); err != nil {
+ return err
+ }
+
+ consumeField := func(prop *proto.Properties) (json.RawMessage, bool) {
+ // Be liberal in what names we accept; both orig_name and camelName are okay.
+ fieldNames := acceptedJSONFieldNames(prop)
+
+ vOrig, okOrig := jsonFields[fieldNames.orig]
+ vCamel, okCamel := jsonFields[fieldNames.camel]
+ if !okOrig && !okCamel {
+ return nil, false
+ }
+ // If, for some reason, both are present in the data, favour the camelName.
+ var raw json.RawMessage
+ if okOrig {
+ raw = vOrig
+ delete(jsonFields, fieldNames.orig)
+ }
+ if okCamel {
+ raw = vCamel
+ delete(jsonFields, fieldNames.camel)
+ }
+ return raw, true
+ }
+
+ sprops := proto.GetProperties(targetType)
+ for i := 0; i < target.NumField(); i++ {
+ ft := target.Type().Field(i)
+ if strings.HasPrefix(ft.Name, "XXX_") {
+ continue
+ }
+
+ valueForField, ok := consumeField(sprops.Prop[i])
+ if !ok {
+ continue
+ }
+
+ if err := u.unmarshalValue(target.Field(i), valueForField, sprops.Prop[i]); err != nil {
+ return err
+ }
+ }
+ // Check for any oneof fields.
+ if len(jsonFields) > 0 {
+ for _, oop := range sprops.OneofTypes {
+ raw, ok := consumeField(oop.Prop)
+ if !ok {
+ continue
+ }
+ nv := reflect.New(oop.Type.Elem())
+ target.Field(oop.Field).Set(nv)
+ if err := u.unmarshalValue(nv.Elem().Field(0), raw, oop.Prop); err != nil {
+ return err
+ }
+ }
+ }
+ // Handle proto2 extensions.
+ if len(jsonFields) > 0 {
+ if ep, ok := target.Addr().Interface().(proto.Message); ok {
+ for _, ext := range proto.RegisteredExtensions(ep) {
+ name := fmt.Sprintf("[%s]", ext.Name)
+ raw, ok := jsonFields[name]
+ if !ok {
+ continue
+ }
+ delete(jsonFields, name)
+ nv := reflect.New(reflect.TypeOf(ext.ExtensionType).Elem())
+ if err := u.unmarshalValue(nv.Elem(), raw, nil); err != nil {
+ return err
+ }
+ if err := proto.SetExtension(ep, ext, nv.Interface()); err != nil {
+ return err
+ }
+ }
+ }
+ }
+ if !u.AllowUnknownFields && len(jsonFields) > 0 {
+ // Pick any field to be the scapegoat.
+ var f string
+ for fname := range jsonFields {
+ f = fname
+ break
+ }
+ return fmt.Errorf("unknown field %q in %v", f, targetType)
+ }
+ return nil
+ }
+
+ // Handle arrays (which aren't encoded bytes)
+ if targetType.Kind() == reflect.Slice && targetType.Elem().Kind() != reflect.Uint8 {
+ var slc []json.RawMessage
+ if err := json.Unmarshal(inputValue, &slc); err != nil {
+ return err
+ }
+ if slc != nil {
+ l := len(slc)
+ target.Set(reflect.MakeSlice(targetType, l, l))
+ for i := 0; i < l; i++ {
+ if err := u.unmarshalValue(target.Index(i), slc[i], prop); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+ }
+
+ // Handle maps (whose keys are always strings)
+ if targetType.Kind() == reflect.Map {
+ var mp map[string]json.RawMessage
+ if err := json.Unmarshal(inputValue, &mp); err != nil {
+ return err
+ }
+ if mp != nil {
+ target.Set(reflect.MakeMap(targetType))
+ for ks, raw := range mp {
+ // Unmarshal map key. The core json library already decoded the key into a
+ // string, so we handle that specially. Other types were quoted post-serialization.
+ var k reflect.Value
+ if targetType.Key().Kind() == reflect.String {
+ k = reflect.ValueOf(ks)
+ } else {
+ k = reflect.New(targetType.Key()).Elem()
+ var kprop *proto.Properties
+ if prop != nil && prop.MapKeyProp != nil {
+ kprop = prop.MapKeyProp
+ }
+ if err := u.unmarshalValue(k, json.RawMessage(ks), kprop); err != nil {
+ return err
+ }
+ }
+
+ // Unmarshal map value.
+ v := reflect.New(targetType.Elem()).Elem()
+ var vprop *proto.Properties
+ if prop != nil && prop.MapValProp != nil {
+ vprop = prop.MapValProp
+ }
+ if err := u.unmarshalValue(v, raw, vprop); err != nil {
+ return err
+ }
+ target.SetMapIndex(k, v)
+ }
+ }
+ return nil
+ }
+
+ // Non-finite numbers can be encoded as strings.
+ isFloat := targetType.Kind() == reflect.Float32 || targetType.Kind() == reflect.Float64
+ if isFloat {
+ if num, ok := nonFinite[string(inputValue)]; ok {
+ target.SetFloat(num)
+ return nil
+ }
+ }
+
+ // integers & floats can be encoded as strings. In this case we drop
+ // the quotes and proceed as normal.
+ isNum := targetType.Kind() == reflect.Int64 || targetType.Kind() == reflect.Uint64 ||
+ targetType.Kind() == reflect.Int32 || targetType.Kind() == reflect.Uint32 ||
+ targetType.Kind() == reflect.Float32 || targetType.Kind() == reflect.Float64
+ if isNum && strings.HasPrefix(string(inputValue), `"`) {
+ inputValue = inputValue[1 : len(inputValue)-1]
+ }
+
+ // Use the encoding/json for parsing other value types.
+ return json.Unmarshal(inputValue, target.Addr().Interface())
+}
+
+func unquote(s string) (string, error) {
+ var ret string
+ err := json.Unmarshal([]byte(s), &ret)
+ return ret, err
+}
+
+// jsonProperties returns parsed proto.Properties for the field and corrects JSONName attribute.
+func jsonProperties(f reflect.StructField, origName bool) *proto.Properties {
+ var prop proto.Properties
+ prop.Init(f.Type, f.Name, f.Tag.Get("protobuf"), &f)
+ if origName || prop.JSONName == "" {
+ prop.JSONName = prop.OrigName
+ }
+ return &prop
+}
+
+type fieldNames struct {
+ orig, camel string
+}
+
+func acceptedJSONFieldNames(prop *proto.Properties) fieldNames {
+ opts := fieldNames{orig: prop.OrigName, camel: prop.OrigName}
+ if prop.JSONName != "" {
+ opts.camel = prop.JSONName
+ }
+ return opts
+}
+
+// Writer wrapper inspired by https://blog.golang.org/errors-are-values
+type errWriter struct {
+ writer io.Writer
+ err error
+}
+
+func (w *errWriter) write(str string) {
+ if w.err != nil {
+ return
+ }
+ _, w.err = w.writer.Write([]byte(str))
+}
+
+// Map fields may have key types of non-float scalars, strings and enums.
+// The easiest way to sort them in some deterministic order is to use fmt.
+// If this turns out to be inefficient we can always consider other options,
+// such as doing a Schwartzian transform.
+//
+// Numeric keys are sorted in numeric order per
+// https://developers.google.com/protocol-buffers/docs/proto#maps.
+type mapKeys []reflect.Value
+
+func (s mapKeys) Len() int { return len(s) }
+func (s mapKeys) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+func (s mapKeys) Less(i, j int) bool {
+ if k := s[i].Kind(); k == s[j].Kind() {
+ switch k {
+ case reflect.String:
+ return s[i].String() < s[j].String()
+ case reflect.Int32, reflect.Int64:
+ return s[i].Int() < s[j].Int()
+ case reflect.Uint32, reflect.Uint64:
+ return s[i].Uint() < s[j].Uint()
+ }
+ }
+ return fmt.Sprint(s[i].Interface()) < fmt.Sprint(s[j].Interface())
+}
+
+// checkRequiredFields returns an error if any required field in the given proto message is not set.
+// This function is used by both Marshal and Unmarshal. While required fields only exist in a
+// proto2 message, a proto3 message can contain proto2 message(s).
+func checkRequiredFields(pb proto.Message) error {
+ // Most well-known type messages do not contain required fields. The "Any" type may contain
+ // a message that has required fields.
+ //
+ // When an Any message is being marshaled, the code will invoked proto.Unmarshal on Any.Value
+ // field in order to transform that into JSON, and that should have returned an error if a
+ // required field is not set in the embedded message.
+ //
+ // When an Any message is being unmarshaled, the code will have invoked proto.Marshal on the
+ // embedded message to store the serialized message in Any.Value field, and that should have
+ // returned an error if a required field is not set.
+ if _, ok := pb.(wkt); ok {
+ return nil
+ }
+
+ v := reflect.ValueOf(pb)
+ // Skip message if it is not a struct pointer.
+ if v.Kind() != reflect.Ptr {
+ return nil
+ }
+ v = v.Elem()
+ if v.Kind() != reflect.Struct {
+ return nil
+ }
+
+ for i := 0; i < v.NumField(); i++ {
+ field := v.Field(i)
+ sfield := v.Type().Field(i)
+
+ if sfield.PkgPath != "" {
+ // blank PkgPath means the field is exported; skip if not exported
+ continue
+ }
+
+ if strings.HasPrefix(sfield.Name, "XXX_") {
+ continue
+ }
+
+ // Oneof field is an interface implemented by wrapper structs containing the actual oneof
+ // field, i.e. an interface containing &T{real_value}.
+ if sfield.Tag.Get("protobuf_oneof") != "" {
+ if field.Kind() != reflect.Interface {
+ continue
+ }
+ v := field.Elem()
+ if v.Kind() != reflect.Ptr || v.IsNil() {
+ continue
+ }
+ v = v.Elem()
+ if v.Kind() != reflect.Struct || v.NumField() < 1 {
+ continue
+ }
+ field = v.Field(0)
+ sfield = v.Type().Field(0)
+ }
+
+ protoTag := sfield.Tag.Get("protobuf")
+ if protoTag == "" {
+ continue
+ }
+ var prop proto.Properties
+ prop.Init(sfield.Type, sfield.Name, protoTag, &sfield)
+
+ switch field.Kind() {
+ case reflect.Map:
+ if field.IsNil() {
+ continue
+ }
+ // Check each map value.
+ keys := field.MapKeys()
+ for _, k := range keys {
+ v := field.MapIndex(k)
+ if err := checkRequiredFieldsInValue(v); err != nil {
+ return err
+ }
+ }
+ case reflect.Slice:
+ // Handle non-repeated type, e.g. bytes.
+ if !prop.Repeated {
+ if prop.Required && field.IsNil() {
+ return fmt.Errorf("required field %q is not set", prop.Name)
+ }
+ continue
+ }
+
+ // Handle repeated type.
+ if field.IsNil() {
+ continue
+ }
+ // Check each slice item.
+ for i := 0; i < field.Len(); i++ {
+ v := field.Index(i)
+ if err := checkRequiredFieldsInValue(v); err != nil {
+ return err
+ }
+ }
+ case reflect.Ptr:
+ if field.IsNil() {
+ if prop.Required {
+ return fmt.Errorf("required field %q is not set", prop.Name)
+ }
+ continue
+ }
+ if err := checkRequiredFieldsInValue(field); err != nil {
+ return err
+ }
+ }
+ }
+
+ // Handle proto2 extensions.
+ for _, ext := range proto.RegisteredExtensions(pb) {
+ if !proto.HasExtension(pb, ext) {
+ continue
+ }
+ ep, err := proto.GetExtension(pb, ext)
+ if err != nil {
+ return err
+ }
+ err = checkRequiredFieldsInValue(reflect.ValueOf(ep))
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func checkRequiredFieldsInValue(v reflect.Value) error {
+ if pm, ok := v.Interface().(proto.Message); ok {
+ return checkRequiredFields(pm)
+ }
+ return nil
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go b/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go
new file mode 100644
index 0000000..33daa73
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go
@@ -0,0 +1,336 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/protobuf/struct.proto
+
+package structpb
+
+import (
+ fmt "fmt"
+ proto "github.com/golang/protobuf/proto"
+ math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+// `NullValue` is a singleton enumeration to represent the null value for the
+// `Value` type union.
+//
+// The JSON representation for `NullValue` is JSON `null`.
+type NullValue int32
+
+const (
+ // Null value.
+ NullValue_NULL_VALUE NullValue = 0
+)
+
+var NullValue_name = map[int32]string{
+ 0: "NULL_VALUE",
+}
+
+var NullValue_value = map[string]int32{
+ "NULL_VALUE": 0,
+}
+
+func (x NullValue) String() string {
+ return proto.EnumName(NullValue_name, int32(x))
+}
+
+func (NullValue) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_df322afd6c9fb402, []int{0}
+}
+
+func (NullValue) XXX_WellKnownType() string { return "NullValue" }
+
+// `Struct` represents a structured data value, consisting of fields
+// which map to dynamically typed values. In some languages, `Struct`
+// might be supported by a native representation. For example, in
+// scripting languages like JS a struct is represented as an
+// object. The details of that representation are described together
+// with the proto support for the language.
+//
+// The JSON representation for `Struct` is JSON object.
+type Struct struct {
+ // Unordered map of dynamically typed values.
+ Fields map[string]*Value `protobuf:"bytes,1,rep,name=fields,proto3" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Struct) Reset() { *m = Struct{} }
+func (m *Struct) String() string { return proto.CompactTextString(m) }
+func (*Struct) ProtoMessage() {}
+func (*Struct) Descriptor() ([]byte, []int) {
+ return fileDescriptor_df322afd6c9fb402, []int{0}
+}
+
+func (*Struct) XXX_WellKnownType() string { return "Struct" }
+
+func (m *Struct) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Struct.Unmarshal(m, b)
+}
+func (m *Struct) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Struct.Marshal(b, m, deterministic)
+}
+func (m *Struct) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Struct.Merge(m, src)
+}
+func (m *Struct) XXX_Size() int {
+ return xxx_messageInfo_Struct.Size(m)
+}
+func (m *Struct) XXX_DiscardUnknown() {
+ xxx_messageInfo_Struct.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Struct proto.InternalMessageInfo
+
+func (m *Struct) GetFields() map[string]*Value {
+ if m != nil {
+ return m.Fields
+ }
+ return nil
+}
+
+// `Value` represents a dynamically typed value which can be either
+// null, a number, a string, a boolean, a recursive struct value, or a
+// list of values. A producer of value is expected to set one of that
+// variants, absence of any variant indicates an error.
+//
+// The JSON representation for `Value` is JSON value.
+type Value struct {
+ // The kind of value.
+ //
+ // Types that are valid to be assigned to Kind:
+ // *Value_NullValue
+ // *Value_NumberValue
+ // *Value_StringValue
+ // *Value_BoolValue
+ // *Value_StructValue
+ // *Value_ListValue
+ Kind isValue_Kind `protobuf_oneof:"kind"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Value) Reset() { *m = Value{} }
+func (m *Value) String() string { return proto.CompactTextString(m) }
+func (*Value) ProtoMessage() {}
+func (*Value) Descriptor() ([]byte, []int) {
+ return fileDescriptor_df322afd6c9fb402, []int{1}
+}
+
+func (*Value) XXX_WellKnownType() string { return "Value" }
+
+func (m *Value) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Value.Unmarshal(m, b)
+}
+func (m *Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Value.Marshal(b, m, deterministic)
+}
+func (m *Value) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Value.Merge(m, src)
+}
+func (m *Value) XXX_Size() int {
+ return xxx_messageInfo_Value.Size(m)
+}
+func (m *Value) XXX_DiscardUnknown() {
+ xxx_messageInfo_Value.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Value proto.InternalMessageInfo
+
+type isValue_Kind interface {
+ isValue_Kind()
+}
+
+type Value_NullValue struct {
+ NullValue NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,proto3,enum=google.protobuf.NullValue,oneof"`
+}
+
+type Value_NumberValue struct {
+ NumberValue float64 `protobuf:"fixed64,2,opt,name=number_value,json=numberValue,proto3,oneof"`
+}
+
+type Value_StringValue struct {
+ StringValue string `protobuf:"bytes,3,opt,name=string_value,json=stringValue,proto3,oneof"`
+}
+
+type Value_BoolValue struct {
+ BoolValue bool `protobuf:"varint,4,opt,name=bool_value,json=boolValue,proto3,oneof"`
+}
+
+type Value_StructValue struct {
+ StructValue *Struct `protobuf:"bytes,5,opt,name=struct_value,json=structValue,proto3,oneof"`
+}
+
+type Value_ListValue struct {
+ ListValue *ListValue `protobuf:"bytes,6,opt,name=list_value,json=listValue,proto3,oneof"`
+}
+
+func (*Value_NullValue) isValue_Kind() {}
+
+func (*Value_NumberValue) isValue_Kind() {}
+
+func (*Value_StringValue) isValue_Kind() {}
+
+func (*Value_BoolValue) isValue_Kind() {}
+
+func (*Value_StructValue) isValue_Kind() {}
+
+func (*Value_ListValue) isValue_Kind() {}
+
+func (m *Value) GetKind() isValue_Kind {
+ if m != nil {
+ return m.Kind
+ }
+ return nil
+}
+
+func (m *Value) GetNullValue() NullValue {
+ if x, ok := m.GetKind().(*Value_NullValue); ok {
+ return x.NullValue
+ }
+ return NullValue_NULL_VALUE
+}
+
+func (m *Value) GetNumberValue() float64 {
+ if x, ok := m.GetKind().(*Value_NumberValue); ok {
+ return x.NumberValue
+ }
+ return 0
+}
+
+func (m *Value) GetStringValue() string {
+ if x, ok := m.GetKind().(*Value_StringValue); ok {
+ return x.StringValue
+ }
+ return ""
+}
+
+func (m *Value) GetBoolValue() bool {
+ if x, ok := m.GetKind().(*Value_BoolValue); ok {
+ return x.BoolValue
+ }
+ return false
+}
+
+func (m *Value) GetStructValue() *Struct {
+ if x, ok := m.GetKind().(*Value_StructValue); ok {
+ return x.StructValue
+ }
+ return nil
+}
+
+func (m *Value) GetListValue() *ListValue {
+ if x, ok := m.GetKind().(*Value_ListValue); ok {
+ return x.ListValue
+ }
+ return nil
+}
+
+// XXX_OneofWrappers is for the internal use of the proto package.
+func (*Value) XXX_OneofWrappers() []interface{} {
+ return []interface{}{
+ (*Value_NullValue)(nil),
+ (*Value_NumberValue)(nil),
+ (*Value_StringValue)(nil),
+ (*Value_BoolValue)(nil),
+ (*Value_StructValue)(nil),
+ (*Value_ListValue)(nil),
+ }
+}
+
+// `ListValue` is a wrapper around a repeated field of values.
+//
+// The JSON representation for `ListValue` is JSON array.
+type ListValue struct {
+ // Repeated field of dynamically typed values.
+ Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ListValue) Reset() { *m = ListValue{} }
+func (m *ListValue) String() string { return proto.CompactTextString(m) }
+func (*ListValue) ProtoMessage() {}
+func (*ListValue) Descriptor() ([]byte, []int) {
+ return fileDescriptor_df322afd6c9fb402, []int{2}
+}
+
+func (*ListValue) XXX_WellKnownType() string { return "ListValue" }
+
+func (m *ListValue) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_ListValue.Unmarshal(m, b)
+}
+func (m *ListValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_ListValue.Marshal(b, m, deterministic)
+}
+func (m *ListValue) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ListValue.Merge(m, src)
+}
+func (m *ListValue) XXX_Size() int {
+ return xxx_messageInfo_ListValue.Size(m)
+}
+func (m *ListValue) XXX_DiscardUnknown() {
+ xxx_messageInfo_ListValue.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ListValue proto.InternalMessageInfo
+
+func (m *ListValue) GetValues() []*Value {
+ if m != nil {
+ return m.Values
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterEnum("google.protobuf.NullValue", NullValue_name, NullValue_value)
+ proto.RegisterType((*Struct)(nil), "google.protobuf.Struct")
+ proto.RegisterMapType((map[string]*Value)(nil), "google.protobuf.Struct.FieldsEntry")
+ proto.RegisterType((*Value)(nil), "google.protobuf.Value")
+ proto.RegisterType((*ListValue)(nil), "google.protobuf.ListValue")
+}
+
+func init() { proto.RegisterFile("google/protobuf/struct.proto", fileDescriptor_df322afd6c9fb402) }
+
+var fileDescriptor_df322afd6c9fb402 = []byte{
+ // 417 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0x41, 0x8b, 0xd3, 0x40,
+ 0x14, 0xc7, 0x3b, 0xc9, 0x36, 0x98, 0x17, 0x59, 0x97, 0x11, 0xb4, 0xac, 0xa2, 0xa1, 0x7b, 0x09,
+ 0x22, 0x29, 0xd6, 0x8b, 0x18, 0x2f, 0x06, 0xd6, 0x5d, 0x30, 0x2c, 0x31, 0xba, 0x15, 0xbc, 0x94,
+ 0x26, 0x4d, 0x63, 0xe8, 0x74, 0x26, 0x24, 0x33, 0x4a, 0x8f, 0x7e, 0x0b, 0xcf, 0x1e, 0x3d, 0xfa,
+ 0xe9, 0x3c, 0xca, 0xcc, 0x24, 0xa9, 0xb4, 0xf4, 0x94, 0xbc, 0xf7, 0x7e, 0xef, 0x3f, 0xef, 0xff,
+ 0x66, 0xe0, 0x71, 0xc1, 0x58, 0x41, 0xf2, 0x49, 0x55, 0x33, 0xce, 0x52, 0xb1, 0x9a, 0x34, 0xbc,
+ 0x16, 0x19, 0xf7, 0x55, 0x8c, 0xef, 0xe9, 0xaa, 0xdf, 0x55, 0xc7, 0x3f, 0x11, 0x58, 0x1f, 0x15,
+ 0x81, 0x03, 0xb0, 0x56, 0x65, 0x4e, 0x96, 0xcd, 0x08, 0xb9, 0xa6, 0xe7, 0x4c, 0x2f, 0xfc, 0x3d,
+ 0xd8, 0xd7, 0xa0, 0xff, 0x4e, 0x51, 0x97, 0x94, 0xd7, 0xdb, 0xa4, 0x6d, 0x39, 0xff, 0x00, 0xce,
+ 0x7f, 0x69, 0x7c, 0x06, 0xe6, 0x3a, 0xdf, 0x8e, 0x90, 0x8b, 0x3c, 0x3b, 0x91, 0xbf, 0xf8, 0x39,
+ 0x0c, 0xbf, 0x2d, 0x88, 0xc8, 0x47, 0x86, 0x8b, 0x3c, 0x67, 0xfa, 0xe0, 0x40, 0x7c, 0x26, 0xab,
+ 0x89, 0x86, 0x5e, 0x1b, 0xaf, 0xd0, 0xf8, 0x8f, 0x01, 0x43, 0x95, 0xc4, 0x01, 0x00, 0x15, 0x84,
+ 0xcc, 0xb5, 0x80, 0x14, 0x3d, 0x9d, 0x9e, 0x1f, 0x08, 0xdc, 0x08, 0x42, 0x14, 0x7f, 0x3d, 0x48,
+ 0x6c, 0xda, 0x05, 0xf8, 0x02, 0xee, 0x52, 0xb1, 0x49, 0xf3, 0x7a, 0xbe, 0x3b, 0x1f, 0x5d, 0x0f,
+ 0x12, 0x47, 0x67, 0x7b, 0xa8, 0xe1, 0x75, 0x49, 0x8b, 0x16, 0x32, 0xe5, 0xe0, 0x12, 0xd2, 0x59,
+ 0x0d, 0x3d, 0x05, 0x48, 0x19, 0xeb, 0xc6, 0x38, 0x71, 0x91, 0x77, 0x47, 0x1e, 0x25, 0x73, 0x1a,
+ 0x78, 0xa3, 0x54, 0x44, 0xc6, 0x5b, 0x64, 0xa8, 0xac, 0x3e, 0x3c, 0xb2, 0xc7, 0x56, 0x5e, 0x64,
+ 0xbc, 0x77, 0x49, 0xca, 0xa6, 0xeb, 0xb5, 0x54, 0xef, 0xa1, 0xcb, 0xa8, 0x6c, 0x78, 0xef, 0x92,
+ 0x74, 0x41, 0x68, 0xc1, 0xc9, 0xba, 0xa4, 0xcb, 0x71, 0x00, 0x76, 0x4f, 0x60, 0x1f, 0x2c, 0x25,
+ 0xd6, 0xdd, 0xe8, 0xb1, 0xa5, 0xb7, 0xd4, 0xb3, 0x47, 0x60, 0xf7, 0x4b, 0xc4, 0xa7, 0x00, 0x37,
+ 0xb7, 0x51, 0x34, 0x9f, 0xbd, 0x8d, 0x6e, 0x2f, 0xcf, 0x06, 0xe1, 0x0f, 0x04, 0xf7, 0x33, 0xb6,
+ 0xd9, 0x97, 0x08, 0x1d, 0xed, 0x26, 0x96, 0x71, 0x8c, 0xbe, 0xbc, 0x28, 0x4a, 0xfe, 0x55, 0xa4,
+ 0x7e, 0xc6, 0x36, 0x93, 0x82, 0x91, 0x05, 0x2d, 0x76, 0x4f, 0xb1, 0xe2, 0xdb, 0x2a, 0x6f, 0xda,
+ 0x17, 0x19, 0xe8, 0x4f, 0x95, 0xfe, 0x45, 0xe8, 0x97, 0x61, 0x5e, 0xc5, 0xe1, 0x6f, 0xe3, 0xc9,
+ 0x95, 0x16, 0x8f, 0xbb, 0xf9, 0x3e, 0xe7, 0x84, 0xbc, 0xa7, 0xec, 0x3b, 0xfd, 0x24, 0x3b, 0x53,
+ 0x4b, 0x49, 0xbd, 0xfc, 0x17, 0x00, 0x00, 0xff, 0xff, 0xe8, 0x1b, 0x59, 0xf8, 0xe5, 0x02, 0x00,
+ 0x00,
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/struct/struct.proto b/vendor/github.com/golang/protobuf/ptypes/struct/struct.proto
new file mode 100644
index 0000000..7d7808e
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/struct/struct.proto
@@ -0,0 +1,96 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc. All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+syntax = "proto3";
+
+package google.protobuf;
+
+option csharp_namespace = "Google.Protobuf.WellKnownTypes";
+option cc_enable_arenas = true;
+option go_package = "github.com/golang/protobuf/ptypes/struct;structpb";
+option java_package = "com.google.protobuf";
+option java_outer_classname = "StructProto";
+option java_multiple_files = true;
+option objc_class_prefix = "GPB";
+
+
+// `Struct` represents a structured data value, consisting of fields
+// which map to dynamically typed values. In some languages, `Struct`
+// might be supported by a native representation. For example, in
+// scripting languages like JS a struct is represented as an
+// object. The details of that representation are described together
+// with the proto support for the language.
+//
+// The JSON representation for `Struct` is JSON object.
+message Struct {
+ // Unordered map of dynamically typed values.
+ map<string, Value> fields = 1;
+}
+
+// `Value` represents a dynamically typed value which can be either
+// null, a number, a string, a boolean, a recursive struct value, or a
+// list of values. A producer of value is expected to set one of that
+// variants, absence of any variant indicates an error.
+//
+// The JSON representation for `Value` is JSON value.
+message Value {
+ // The kind of value.
+ oneof kind {
+ // Represents a null value.
+ NullValue null_value = 1;
+ // Represents a double value.
+ double number_value = 2;
+ // Represents a string value.
+ string string_value = 3;
+ // Represents a boolean value.
+ bool bool_value = 4;
+ // Represents a structured value.
+ Struct struct_value = 5;
+ // Represents a repeated `Value`.
+ ListValue list_value = 6;
+ }
+}
+
+// `NullValue` is a singleton enumeration to represent the null value for the
+// `Value` type union.
+//
+// The JSON representation for `NullValue` is JSON `null`.
+enum NullValue {
+ // Null value.
+ NULL_VALUE = 0;
+}
+
+// `ListValue` is a wrapper around a repeated field of values.
+//
+// The JSON representation for `ListValue` is JSON array.
+message ListValue {
+ // Repeated field of dynamically typed values.
+ repeated Value values = 1;
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/adapters/common/adapter_proxy.go b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/adapters/common/adapter_proxy.go
deleted file mode 100644
index 9ade0d1..0000000
--- a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/adapters/common/adapter_proxy.go
+++ /dev/null
@@ -1,105 +0,0 @@
-/*
- * Copyright 2018-present Open Networking Foundation
-
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
-
- * http://www.apache.org/licenses/LICENSE-2.0
-
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package common
-
-import (
- "context"
- "github.com/opencord/voltha-lib-go/v4/pkg/db"
-
- "github.com/golang/protobuf/proto"
- "github.com/golang/protobuf/ptypes"
- "github.com/golang/protobuf/ptypes/any"
- "github.com/google/uuid"
- "github.com/opencord/voltha-lib-go/v4/pkg/kafka"
- "github.com/opencord/voltha-lib-go/v4/pkg/log"
- ic "github.com/opencord/voltha-protos/v4/go/inter_container"
-)
-
-type AdapterProxy struct {
- kafkaICProxy kafka.InterContainerProxy
- coreTopic string
- endpointMgr kafka.EndpointManager
-}
-
-func NewAdapterProxy(ctx context.Context, kafkaProxy kafka.InterContainerProxy, coreTopic string, backend *db.Backend) *AdapterProxy {
- proxy := AdapterProxy{
- kafkaICProxy: kafkaProxy,
- coreTopic: coreTopic,
- endpointMgr: kafka.NewEndpointManager(backend),
- }
- logger.Debugw(ctx, "topics", log.Fields{"core": proxy.coreTopic})
- return &proxy
-}
-
-func (ap *AdapterProxy) SendInterAdapterMessage(ctx context.Context,
- msg proto.Message,
- msgType ic.InterAdapterMessageType_Types,
- fromAdapter string,
- toAdapter string,
- toDeviceId string,
- proxyDeviceId string,
- messageId string) error {
- logger.Debugw(ctx, "sending-inter-adapter-message", log.Fields{"type": msgType, "from": fromAdapter,
- "to": toAdapter, "toDevice": toDeviceId, "proxyDevice": proxyDeviceId})
-
- //Marshal the message
- var marshalledMsg *any.Any
- var err error
- if marshalledMsg, err = ptypes.MarshalAny(msg); err != nil {
- logger.Warnw(ctx, "cannot-marshal-msg", log.Fields{"error": err})
- return err
- }
-
- // Set up the required rpc arguments
- endpoint, err := ap.endpointMgr.GetEndpoint(ctx, toDeviceId, toAdapter)
- if err != nil {
- return err
- }
-
- //Build the inter adapter message
- header := &ic.InterAdapterHeader{
- Type: msgType,
- FromTopic: fromAdapter,
- ToTopic: string(endpoint),
- ToDeviceId: toDeviceId,
- ProxyDeviceId: proxyDeviceId,
- }
- if messageId != "" {
- header.Id = messageId
- } else {
- header.Id = uuid.New().String()
- }
- header.Timestamp = ptypes.TimestampNow()
- iaMsg := &ic.InterAdapterMessage{
- Header: header,
- Body: marshalledMsg,
- }
- args := make([]*kafka.KVArg, 1)
- args[0] = &kafka.KVArg{
- Key: "msg",
- Value: iaMsg,
- }
-
- topic := kafka.Topic{Name: string(endpoint)}
- replyToTopic := kafka.Topic{Name: fromAdapter}
- rpc := "process_inter_adapter_message"
-
- // Add a indication in context to differentiate this Inter Adapter message during Span processing in Kafka IC proxy
- ctx = context.WithValue(ctx, "inter-adapter-msg-type", msgType)
- success, result := ap.kafkaICProxy.InvokeRPC(ctx, rpc, &topic, &replyToTopic, true, proxyDeviceId, args...)
- logger.Debugw(ctx, "inter-adapter-msg-response", log.Fields{"replyTopic": replyToTopic, "success": success})
- return unPackResponse(ctx, rpc, "", success, result)
-}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/db/kvstore/etcdclient.go b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/db/kvstore/etcdclient.go
deleted file mode 100644
index 98f0559..0000000
--- a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/db/kvstore/etcdclient.go
+++ /dev/null
@@ -1,506 +0,0 @@
-/*
- * Copyright 2018-present Open Networking Foundation
-
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
-
- * http://www.apache.org/licenses/LICENSE-2.0
-
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package kvstore
-
-import (
- "context"
- "errors"
- "fmt"
- "sync"
- "time"
-
- "github.com/opencord/voltha-lib-go/v4/pkg/log"
- v3Client "go.etcd.io/etcd/clientv3"
-
- v3Concurrency "go.etcd.io/etcd/clientv3/concurrency"
- v3rpcTypes "go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes"
-)
-
-// EtcdClient represents the Etcd KV store client
-type EtcdClient struct {
- ectdAPI *v3Client.Client
- keyReservations map[string]*v3Client.LeaseID
- watchedChannels sync.Map
- keyReservationsLock sync.RWMutex
- lockToMutexMap map[string]*v3Concurrency.Mutex
- lockToSessionMap map[string]*v3Concurrency.Session
- lockToMutexLock sync.Mutex
-}
-
-// NewEtcdCustomClient returns a new client for the Etcd KV store allowing
-// the called to specify etcd client configuration
-func NewEtcdCustomClient(ctx context.Context, config *v3Client.Config) (*EtcdClient, error) {
- c, err := v3Client.New(*config)
- if err != nil {
- logger.Error(ctx, err)
- return nil, err
- }
-
- reservations := make(map[string]*v3Client.LeaseID)
- lockMutexMap := make(map[string]*v3Concurrency.Mutex)
- lockSessionMap := make(map[string]*v3Concurrency.Session)
-
- return &EtcdClient{ectdAPI: c, keyReservations: reservations, lockToMutexMap: lockMutexMap,
- lockToSessionMap: lockSessionMap}, nil
-}
-
-// NewEtcdClient returns a new client for the Etcd KV store
-func NewEtcdClient(ctx context.Context, addr string, timeout time.Duration, level log.LogLevel) (*EtcdClient, error) {
- logconfig := log.ConstructZapConfig(log.JSON, level, log.Fields{})
-
- return NewEtcdCustomClient(
- ctx,
- &v3Client.Config{
- Endpoints: []string{addr},
- DialTimeout: timeout,
- LogConfig: &logconfig})
-}
-
-// IsConnectionUp returns whether the connection to the Etcd KV store is up. If a timeout occurs then
-// it is assumed the connection is down or unreachable.
-func (c *EtcdClient) IsConnectionUp(ctx context.Context) bool {
- // Let's try to get a non existent key. If the connection is up then there will be no error returned.
- if _, err := c.Get(ctx, "non-existent-key"); err != nil {
- return false
- }
- //cancel()
- return true
-}
-
-// List returns an array of key-value pairs with key as a prefix. Timeout defines how long the function will
-// wait for a response
-func (c *EtcdClient) List(ctx context.Context, key string) (map[string]*KVPair, error) {
- resp, err := c.ectdAPI.Get(ctx, key, v3Client.WithPrefix())
- if err != nil {
- logger.Error(ctx, err)
- return nil, err
- }
- m := make(map[string]*KVPair)
- for _, ev := range resp.Kvs {
- m[string(ev.Key)] = NewKVPair(string(ev.Key), ev.Value, "", ev.Lease, ev.Version)
- }
- return m, nil
-}
-
-// Get returns a key-value pair for a given key. Timeout defines how long the function will
-// wait for a response
-func (c *EtcdClient) Get(ctx context.Context, key string) (*KVPair, error) {
-
- resp, err := c.ectdAPI.Get(ctx, key)
-
- if err != nil {
- logger.Error(ctx, err)
- return nil, err
- }
- for _, ev := range resp.Kvs {
- // Only one value is returned
- return NewKVPair(string(ev.Key), ev.Value, "", ev.Lease, ev.Version), nil
- }
- return nil, nil
-}
-
-// Put writes a key-value pair to the KV store. Value can only be a string or []byte since the etcd API
-// accepts only a string as a value for a put operation. Timeout defines how long the function will
-// wait for a response
-func (c *EtcdClient) Put(ctx context.Context, key string, value interface{}) error {
-
- // Validate that we can convert value to a string as etcd API expects a string
- var val string
- var er error
- if val, er = ToString(value); er != nil {
- return fmt.Errorf("unexpected-type-%T", value)
- }
-
- var err error
- // Check if there is already a lease for this key - if there is then use it, otherwise a PUT will make
- // that KV key permanent instead of automatically removing it after a lease expiration
- c.keyReservationsLock.RLock()
- leaseID, ok := c.keyReservations[key]
- c.keyReservationsLock.RUnlock()
- if ok {
- _, err = c.ectdAPI.Put(ctx, key, val, v3Client.WithLease(*leaseID))
- } else {
- _, err = c.ectdAPI.Put(ctx, key, val)
- }
-
- if err != nil {
- switch err {
- case context.Canceled:
- logger.Warnw(ctx, "context-cancelled", log.Fields{"error": err})
- case context.DeadlineExceeded:
- logger.Warnw(ctx, "context-deadline-exceeded", log.Fields{"error": err})
- case v3rpcTypes.ErrEmptyKey:
- logger.Warnw(ctx, "etcd-client-error", log.Fields{"error": err})
- default:
- logger.Warnw(ctx, "bad-endpoints", log.Fields{"error": err})
- }
- return err
- }
- return nil
-}
-
-// Delete removes a key from the KV store. Timeout defines how long the function will
-// wait for a response
-func (c *EtcdClient) Delete(ctx context.Context, key string) error {
-
- // delete the key
- if _, err := c.ectdAPI.Delete(ctx, key); err != nil {
- logger.Errorw(ctx, "failed-to-delete-key", log.Fields{"key": key, "error": err})
- return err
- }
- logger.Debugw(ctx, "key(s)-deleted", log.Fields{"key": key})
- return nil
-}
-
-func (c *EtcdClient) DeleteWithPrefix(ctx context.Context, prefixKey string) error {
-
- //delete the prefix
- if _, err := c.ectdAPI.Delete(ctx, prefixKey, v3Client.WithPrefix()); err != nil {
- logger.Errorw(ctx, "failed-to-delete-prefix-key", log.Fields{"key": prefixKey, "error": err})
- return err
- }
- logger.Debugw(ctx, "key(s)-deleted", log.Fields{"key": prefixKey})
- return nil
-}
-
-// Reserve is invoked to acquire a key and set it to a given value. Value can only be a string or []byte since
-// the etcd API accepts only a string. Timeout defines how long the function will wait for a response. TTL
-// defines how long that reservation is valid. When TTL expires the key is unreserved by the KV store itself.
-// If the key is acquired then the value returned will be the value passed in. If the key is already acquired
-// then the value assigned to that key will be returned.
-func (c *EtcdClient) Reserve(ctx context.Context, key string, value interface{}, ttl time.Duration) (interface{}, error) {
- // Validate that we can convert value to a string as etcd API expects a string
- var val string
- var er error
- if val, er = ToString(value); er != nil {
- return nil, fmt.Errorf("unexpected-type%T", value)
- }
-
- resp, err := c.ectdAPI.Grant(ctx, int64(ttl.Seconds()))
- if err != nil {
- logger.Error(ctx, err)
- return nil, err
- }
- // Register the lease id
- c.keyReservationsLock.Lock()
- c.keyReservations[key] = &resp.ID
- c.keyReservationsLock.Unlock()
-
- // Revoke lease if reservation is not successful
- reservationSuccessful := false
- defer func() {
- if !reservationSuccessful {
- if err = c.ReleaseReservation(context.Background(), key); err != nil {
- logger.Error(ctx, "cannot-release-lease")
- }
- }
- }()
-
- // Try to grap the Key with the above lease
- c.ectdAPI.Txn(context.Background())
- txn := c.ectdAPI.Txn(context.Background())
- txn = txn.If(v3Client.Compare(v3Client.Version(key), "=", 0))
- txn = txn.Then(v3Client.OpPut(key, val, v3Client.WithLease(resp.ID)))
- txn = txn.Else(v3Client.OpGet(key))
- result, er := txn.Commit()
- if er != nil {
- return nil, er
- }
-
- if !result.Succeeded {
- // Verify whether we are already the owner of that Key
- if len(result.Responses) > 0 &&
- len(result.Responses[0].GetResponseRange().Kvs) > 0 {
- kv := result.Responses[0].GetResponseRange().Kvs[0]
- if string(kv.Value) == val {
- reservationSuccessful = true
- return value, nil
- }
- return kv.Value, nil
- }
- } else {
- // Read the Key to ensure this is our Key
- m, err := c.Get(ctx, key)
- if err != nil {
- return nil, err
- }
- if m != nil {
- if m.Key == key && isEqual(m.Value, value) {
- // My reservation is successful - register it. For now, support is only for 1 reservation per key
- // per session.
- reservationSuccessful = true
- return value, nil
- }
- // My reservation has failed. Return the owner of that key
- return m.Value, nil
- }
- }
- return nil, nil
-}
-
-// ReleaseAllReservations releases all key reservations previously made (using Reserve API)
-func (c *EtcdClient) ReleaseAllReservations(ctx context.Context) error {
- c.keyReservationsLock.Lock()
- defer c.keyReservationsLock.Unlock()
-
- for key, leaseID := range c.keyReservations {
- _, err := c.ectdAPI.Revoke(ctx, *leaseID)
- if err != nil {
- logger.Errorw(ctx, "cannot-release-reservation", log.Fields{"key": key, "error": err})
- return err
- }
- delete(c.keyReservations, key)
- }
- return nil
-}
-
-// ReleaseReservation releases reservation for a specific key.
-func (c *EtcdClient) ReleaseReservation(ctx context.Context, key string) error {
- // Get the leaseid using the key
- logger.Debugw(ctx, "Release-reservation", log.Fields{"key": key})
- var ok bool
- var leaseID *v3Client.LeaseID
- c.keyReservationsLock.Lock()
- defer c.keyReservationsLock.Unlock()
- if leaseID, ok = c.keyReservations[key]; !ok {
- return nil
- }
-
- if leaseID != nil {
- _, err := c.ectdAPI.Revoke(ctx, *leaseID)
- if err != nil {
- logger.Error(ctx, err)
- return err
- }
- delete(c.keyReservations, key)
- }
- return nil
-}
-
-// RenewReservation renews a reservation. A reservation will go stale after the specified TTL (Time To Live)
-// period specified when reserving the key
-func (c *EtcdClient) RenewReservation(ctx context.Context, key string) error {
- // Get the leaseid using the key
- var ok bool
- var leaseID *v3Client.LeaseID
- c.keyReservationsLock.RLock()
- leaseID, ok = c.keyReservations[key]
- c.keyReservationsLock.RUnlock()
-
- if !ok {
- return errors.New("key-not-reserved")
- }
-
- if leaseID != nil {
- _, err := c.ectdAPI.KeepAliveOnce(ctx, *leaseID)
- if err != nil {
- logger.Errorw(ctx, "lease-may-have-expired", log.Fields{"error": err})
- return err
- }
- } else {
- return errors.New("lease-expired")
- }
- return nil
-}
-
-// Watch provides the watch capability on a given key. It returns a channel onto which the callee needs to
-// listen to receive Events.
-func (c *EtcdClient) Watch(ctx context.Context, key string, withPrefix bool) chan *Event {
- w := v3Client.NewWatcher(c.ectdAPI)
- ctx, cancel := context.WithCancel(ctx)
- var channel v3Client.WatchChan
- if withPrefix {
- channel = w.Watch(ctx, key, v3Client.WithPrefix())
- } else {
- channel = w.Watch(ctx, key)
- }
-
- // Create a new channel
- ch := make(chan *Event, maxClientChannelBufferSize)
-
- // Keep track of the created channels so they can be closed when required
- channelMap := make(map[chan *Event]v3Client.Watcher)
- channelMap[ch] = w
-
- channelMaps := c.addChannelMap(key, channelMap)
-
- // Changing the log field (from channelMaps) as the underlying logger cannot format the map of channels into a
- // json format.
- logger.Debugw(ctx, "watched-channels", log.Fields{"len": len(channelMaps)})
- // Launch a go routine to listen for updates
- go c.listenForKeyChange(ctx, channel, ch, cancel)
-
- return ch
-
-}
-
-func (c *EtcdClient) addChannelMap(key string, channelMap map[chan *Event]v3Client.Watcher) []map[chan *Event]v3Client.Watcher {
- var channels interface{}
- var exists bool
-
- if channels, exists = c.watchedChannels.Load(key); exists {
- channels = append(channels.([]map[chan *Event]v3Client.Watcher), channelMap)
- } else {
- channels = []map[chan *Event]v3Client.Watcher{channelMap}
- }
- c.watchedChannels.Store(key, channels)
-
- return channels.([]map[chan *Event]v3Client.Watcher)
-}
-
-func (c *EtcdClient) removeChannelMap(key string, pos int) []map[chan *Event]v3Client.Watcher {
- var channels interface{}
- var exists bool
-
- if channels, exists = c.watchedChannels.Load(key); exists {
- channels = append(channels.([]map[chan *Event]v3Client.Watcher)[:pos], channels.([]map[chan *Event]v3Client.Watcher)[pos+1:]...)
- c.watchedChannels.Store(key, channels)
- }
-
- return channels.([]map[chan *Event]v3Client.Watcher)
-}
-
-func (c *EtcdClient) getChannelMaps(key string) ([]map[chan *Event]v3Client.Watcher, bool) {
- var channels interface{}
- var exists bool
-
- channels, exists = c.watchedChannels.Load(key)
-
- if channels == nil {
- return nil, exists
- }
-
- return channels.([]map[chan *Event]v3Client.Watcher), exists
-}
-
-// CloseWatch closes a specific watch. Both the key and the channel are required when closing a watch as there
-// may be multiple listeners on the same key. The previously created channel serves as a key
-func (c *EtcdClient) CloseWatch(ctx context.Context, key string, ch chan *Event) {
- // Get the array of channels mapping
- var watchedChannels []map[chan *Event]v3Client.Watcher
- var ok bool
-
- if watchedChannels, ok = c.getChannelMaps(key); !ok {
- logger.Warnw(ctx, "key-has-no-watched-channels", log.Fields{"key": key})
- return
- }
- // Look for the channels
- var pos = -1
- for i, chMap := range watchedChannels {
- if t, ok := chMap[ch]; ok {
- logger.Debug(ctx, "channel-found")
- // Close the etcd watcher before the client channel. This should close the etcd channel as well
- if err := t.Close(); err != nil {
- logger.Errorw(ctx, "watcher-cannot-be-closed", log.Fields{"key": key, "error": err})
- }
- pos = i
- break
- }
- }
-
- channelMaps, _ := c.getChannelMaps(key)
- // Remove that entry if present
- if pos >= 0 {
- channelMaps = c.removeChannelMap(key, pos)
- }
- logger.Infow(ctx, "watcher-channel-exiting", log.Fields{"key": key, "channel": channelMaps})
-}
-
-func (c *EtcdClient) listenForKeyChange(ctx context.Context, channel v3Client.WatchChan, ch chan<- *Event, cancel context.CancelFunc) {
- logger.Debug(ctx, "start-listening-on-channel ...")
- defer cancel()
- defer close(ch)
- for resp := range channel {
- for _, ev := range resp.Events {
- ch <- NewEvent(getEventType(ev), ev.Kv.Key, ev.Kv.Value, ev.Kv.Version)
- }
- }
- logger.Debug(ctx, "stop-listening-on-channel ...")
-}
-
-func getEventType(event *v3Client.Event) int {
- switch event.Type {
- case v3Client.EventTypePut:
- return PUT
- case v3Client.EventTypeDelete:
- return DELETE
- }
- return UNKNOWN
-}
-
-// Close closes the KV store client
-func (c *EtcdClient) Close(ctx context.Context) {
- if err := c.ectdAPI.Close(); err != nil {
- logger.Errorw(ctx, "error-closing-client", log.Fields{"error": err})
- }
-}
-
-func (c *EtcdClient) addLockName(lockName string, lock *v3Concurrency.Mutex, session *v3Concurrency.Session) {
- c.lockToMutexLock.Lock()
- defer c.lockToMutexLock.Unlock()
- c.lockToMutexMap[lockName] = lock
- c.lockToSessionMap[lockName] = session
-}
-
-func (c *EtcdClient) deleteLockName(lockName string) {
- c.lockToMutexLock.Lock()
- defer c.lockToMutexLock.Unlock()
- delete(c.lockToMutexMap, lockName)
- delete(c.lockToSessionMap, lockName)
-}
-
-func (c *EtcdClient) getLock(lockName string) (*v3Concurrency.Mutex, *v3Concurrency.Session) {
- c.lockToMutexLock.Lock()
- defer c.lockToMutexLock.Unlock()
- var lock *v3Concurrency.Mutex
- var session *v3Concurrency.Session
- if l, exist := c.lockToMutexMap[lockName]; exist {
- lock = l
- }
- if s, exist := c.lockToSessionMap[lockName]; exist {
- session = s
- }
- return lock, session
-}
-
-func (c *EtcdClient) AcquireLock(ctx context.Context, lockName string, timeout time.Duration) error {
- session, _ := v3Concurrency.NewSession(c.ectdAPI, v3Concurrency.WithContext(ctx))
- mu := v3Concurrency.NewMutex(session, "/devicelock_"+lockName)
- if err := mu.Lock(context.Background()); err != nil {
- //cancel()
- return err
- }
- c.addLockName(lockName, mu, session)
- return nil
-}
-
-func (c *EtcdClient) ReleaseLock(lockName string) error {
- lock, session := c.getLock(lockName)
- var err error
- if lock != nil {
- if e := lock.Unlock(context.Background()); e != nil {
- err = e
- }
- }
- if session != nil {
- if e := session.Close(); e != nil {
- err = e
- }
- }
- c.deleteLockName(lockName)
-
- return err
-}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/techprofile/tech_profile.go b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/techprofile/tech_profile.go
deleted file mode 100644
index 2d2332d..0000000
--- a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/techprofile/tech_profile.go
+++ /dev/null
@@ -1,1455 +0,0 @@
-/*
- * Copyright 2019-present Open Networking Foundation
-
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
-
- * http://www.apache.org/licenses/LICENSE-2.0
-
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package techprofile
-
-import (
- "context"
- "encoding/json"
- "errors"
- "fmt"
- "regexp"
- "strconv"
- "sync"
- "time"
-
- "github.com/opencord/voltha-lib-go/v4/pkg/db"
-
- "github.com/opencord/voltha-lib-go/v4/pkg/db/kvstore"
- "github.com/opencord/voltha-lib-go/v4/pkg/log"
- tp_pb "github.com/opencord/voltha-protos/v4/go/tech_profile"
-)
-
-// Interface to pon resource manager APIs
-type iPonResourceMgr interface {
- GetResourceID(ctx context.Context, IntfID uint32, ResourceType string, NumIDs uint32) ([]uint32, error)
- FreeResourceID(ctx context.Context, IntfID uint32, ResourceType string, ReleaseContent []uint32) error
- GetResourceTypeAllocID() string
- GetResourceTypeGemPortID() string
- GetResourceTypeOnuID() string
- GetTechnology() string
-}
-
-type Direction int32
-
-const (
- Direction_UPSTREAM Direction = 0
- Direction_DOWNSTREAM Direction = 1
- Direction_BIDIRECTIONAL Direction = 2
-)
-
-var Direction_name = map[Direction]string{
- 0: "UPSTREAM",
- 1: "DOWNSTREAM",
- 2: "BIDIRECTIONAL",
-}
-
-type SchedulingPolicy int32
-
-const (
- SchedulingPolicy_WRR SchedulingPolicy = 0
- SchedulingPolicy_StrictPriority SchedulingPolicy = 1
- SchedulingPolicy_Hybrid SchedulingPolicy = 2
-)
-
-var SchedulingPolicy_name = map[SchedulingPolicy]string{
- 0: "WRR",
- 1: "StrictPriority",
- 2: "Hybrid",
-}
-
-type AdditionalBW int32
-
-const (
- AdditionalBW_AdditionalBW_None AdditionalBW = 0
- AdditionalBW_AdditionalBW_NA AdditionalBW = 1
- AdditionalBW_AdditionalBW_BestEffort AdditionalBW = 2
- AdditionalBW_AdditionalBW_Auto AdditionalBW = 3
-)
-
-var AdditionalBW_name = map[AdditionalBW]string{
- 0: "AdditionalBW_None",
- 1: "AdditionalBW_NA",
- 2: "AdditionalBW_BestEffort",
- 3: "AdditionalBW_Auto",
-}
-
-type DiscardPolicy int32
-
-const (
- DiscardPolicy_TailDrop DiscardPolicy = 0
- DiscardPolicy_WTailDrop DiscardPolicy = 1
- DiscardPolicy_Red DiscardPolicy = 2
- DiscardPolicy_WRed DiscardPolicy = 3
-)
-
-var DiscardPolicy_name = map[DiscardPolicy]string{
- 0: "TailDrop",
- 1: "WTailDrop",
- 2: "Red",
- 3: "WRed",
-}
-
-// Required uniPortName format
-var uniPortNameFormat = regexp.MustCompile(`^olt-{[a-z0-9\-]+}/pon-{[0-9]+}/onu-{[0-9]+}/uni-{[0-9]+}$`)
-
-/*
- type InferredAdditionBWIndication int32
-
- const (
- InferredAdditionBWIndication_InferredAdditionBWIndication_None InferredAdditionBWIndication = 0
- InferredAdditionBWIndication_InferredAdditionBWIndication_Assured InferredAdditionBWIndication = 1
- InferredAdditionBWIndication_InferredAdditionBWIndication_BestEffort InferredAdditionBWIndication = 2
- )
-
- var InferredAdditionBWIndication_name = map[int32]string{
- 0: "InferredAdditionBWIndication_None",
- 1: "InferredAdditionBWIndication_Assured",
- 2: "InferredAdditionBWIndication_BestEffort",
- }
-*/
-// instance control defaults
-const (
- defaultOnuInstance = "multi-instance"
- defaultUniInstance = "single-instance"
- defaultGemPayloadSize = "auto"
-)
-
-const MAX_GEM_PAYLOAD = "max_gem_payload_size"
-
-type InstanceControl struct {
- Onu string `json:"ONU"`
- Uni string `json:"uni"`
- MaxGemPayloadSize string `json:"max_gem_payload_size"`
-}
-
-// default discard config constants
-const (
- defaultMinThreshold = 0
- defaultMaxThreshold = 0
- defaultMaxProbability = 0
-)
-
-type DiscardConfig struct {
- MinThreshold int `json:"min_threshold"`
- MaxThreshold int `json:"max_threshold"`
- MaxProbability int `json:"max_probability"`
-}
-
-// default scheduler contants
-const (
- defaultAdditionalBw = AdditionalBW_AdditionalBW_BestEffort
- defaultPriority = 0
- defaultWeight = 0
- defaultQueueSchedPolicy = SchedulingPolicy_Hybrid
-)
-
-type Scheduler struct {
- Direction string `json:"direction"`
- AdditionalBw string `json:"additional_bw"`
- Priority uint32 `json:"priority"`
- Weight uint32 `json:"weight"`
- QSchedPolicy string `json:"q_sched_policy"`
-}
-
-// default GEM attribute constants
-const (
- defaultAESEncryption = "True"
- defaultPriorityQueue = 0
- defaultQueueWeight = 0
- defaultMaxQueueSize = "auto"
- defaultdropPolicy = DiscardPolicy_TailDrop
- defaultSchedulePolicy = SchedulingPolicy_WRR
- defaultIsMulticast = "False"
- defaultAccessControlList = "224.0.0.0-239.255.255.255"
- defaultMcastGemID = 4069
-)
-
-type GemPortAttribute struct {
- MaxQueueSize string `json:"max_q_size"`
- PbitMap string `json:"pbit_map"`
- AesEncryption string `json:"aes_encryption"`
- SchedulingPolicy string `json:"scheduling_policy"`
- PriorityQueue uint32 `json:"priority_q"`
- Weight uint32 `json:"weight"`
- DiscardPolicy string `json:"discard_policy"`
- DiscardConfig DiscardConfig `json:"discard_config"`
- IsMulticast string `json:"is_multicast"`
- DControlList string `json:"dynamic_access_control_list"`
- SControlList string `json:"static_access_control_list"`
- McastGemID uint32 `json:"multicast_gem_id"`
-}
-
-// Instance of Scheduler
-type IScheduler struct {
- AllocID uint32 `json:"alloc_id"`
- Direction string `json:"direction"`
- AdditionalBw string `json:"additional_bw"`
- Priority uint32 `json:"priority"`
- Weight uint32 `json:"weight"`
- QSchedPolicy string `json:"q_sched_policy"`
-}
-
-// Instance of GemPortAttribute
-type IGemPortAttribute struct {
- GemportID uint32 `json:"gemport_id"`
- MaxQueueSize string `json:"max_q_size"`
- PbitMap string `json:"pbit_map"`
- AesEncryption string `json:"aes_encryption"`
- SchedulingPolicy string `json:"scheduling_policy"`
- PriorityQueue uint32 `json:"priority_q"`
- Weight uint32 `json:"weight"`
- DiscardPolicy string `json:"discard_policy"`
- DiscardConfig DiscardConfig `json:"discard_config"`
- IsMulticast string `json:"is_multicast"`
- DControlList string `json:"dynamic_access_control_list"`
- SControlList string `json:"static_access_control_list"`
- McastGemID uint32 `json:"multicast_gem_id"`
-}
-
-type TechProfileMgr struct {
- config *TechProfileFlags
- resourceMgr iPonResourceMgr
- OnuIDMgmtLock sync.RWMutex
- GemPortIDMgmtLock sync.RWMutex
- AllocIDMgmtLock sync.RWMutex
-}
-type DefaultTechProfile struct {
- Name string `json:"name"`
- ProfileType string `json:"profile_type"`
- Version int `json:"version"`
- NumGemPorts uint32 `json:"num_gem_ports"`
- InstanceCtrl InstanceControl `json:"instance_control"`
- UsScheduler Scheduler `json:"us_scheduler"`
- DsScheduler Scheduler `json:"ds_scheduler"`
- UpstreamGemPortAttributeList []GemPortAttribute `json:"upstream_gem_port_attribute_list"`
- DownstreamGemPortAttributeList []GemPortAttribute `json:"downstream_gem_port_attribute_list"`
-}
-type TechProfile struct {
- Name string `json:"name"`
- SubscriberIdentifier string `json:"subscriber_identifier"`
- ProfileType string `json:"profile_type"`
- Version int `json:"version"`
- NumGemPorts uint32 `json:"num_gem_ports"`
- InstanceCtrl InstanceControl `json:"instance_control"`
- UsScheduler IScheduler `json:"us_scheduler"`
- DsScheduler IScheduler `json:"ds_scheduler"`
- UpstreamGemPortAttributeList []IGemPortAttribute `json:"upstream_gem_port_attribute_list"`
- DownstreamGemPortAttributeList []IGemPortAttribute `json:"downstream_gem_port_attribute_list"`
-}
-
-// QThresholds struct for EPON
-type QThresholds struct {
- QThreshold1 uint32 `json:"q_threshold1"`
- QThreshold2 uint32 `json:"q_threshold2"`
- QThreshold3 uint32 `json:"q_threshold3"`
- QThreshold4 uint32 `json:"q_threshold4"`
- QThreshold5 uint32 `json:"q_threshold5"`
- QThreshold6 uint32 `json:"q_threshold6"`
- QThreshold7 uint32 `json:"q_threshold7"`
-}
-
-// UpstreamQueueAttribute struct for EPON
-type UpstreamQueueAttribute struct {
- MaxQueueSize string `json:"max_q_size"`
- PbitMap string `json:"pbit_map"`
- AesEncryption string `json:"aes_encryption"`
- TrafficType string `json:"traffic_type"`
- UnsolicitedGrantSize uint32 `json:"unsolicited_grant_size"`
- NominalInterval uint32 `json:"nominal_interval"`
- ToleratedPollJitter uint32 `json:"tolerated_poll_jitter"`
- RequestTransmissionPolicy uint32 `json:"request_transmission_policy"`
- NumQueueSet uint32 `json:"num_q_sets"`
- QThresholds QThresholds `json:"q_thresholds"`
- SchedulingPolicy string `json:"scheduling_policy"`
- PriorityQueue uint32 `json:"priority_q"`
- Weight uint32 `json:"weight"`
- DiscardPolicy string `json:"discard_policy"`
- DiscardConfig DiscardConfig `json:"discard_config"`
-}
-
-// Default EPON constants
-const (
- defaultPakageType = "B"
-)
-const (
- defaultTrafficType = "BE"
- defaultUnsolicitedGrantSize = 0
- defaultNominalInterval = 0
- defaultToleratedPollJitter = 0
- defaultRequestTransmissionPolicy = 0
- defaultNumQueueSet = 2
-)
-const (
- defaultQThreshold1 = 5500
- defaultQThreshold2 = 0
- defaultQThreshold3 = 0
- defaultQThreshold4 = 0
- defaultQThreshold5 = 0
- defaultQThreshold6 = 0
- defaultQThreshold7 = 0
-)
-
-// DownstreamQueueAttribute struct for EPON
-type DownstreamQueueAttribute struct {
- MaxQueueSize string `json:"max_q_size"`
- PbitMap string `json:"pbit_map"`
- AesEncryption string `json:"aes_encryption"`
- SchedulingPolicy string `json:"scheduling_policy"`
- PriorityQueue uint32 `json:"priority_q"`
- Weight uint32 `json:"weight"`
- DiscardPolicy string `json:"discard_policy"`
- DiscardConfig DiscardConfig `json:"discard_config"`
-}
-
-// iUpstreamQueueAttribute struct for EPON
-type iUpstreamQueueAttribute struct {
- GemportID uint32 `json:"q_id"`
- MaxQueueSize string `json:"max_q_size"`
- PbitMap string `json:"pbit_map"`
- AesEncryption string `json:"aes_encryption"`
- TrafficType string `json:"traffic_type"`
- UnsolicitedGrantSize uint32 `json:"unsolicited_grant_size"`
- NominalInterval uint32 `json:"nominal_interval"`
- ToleratedPollJitter uint32 `json:"tolerated_poll_jitter"`
- RequestTransmissionPolicy uint32 `json:"request_transmission_policy"`
- NumQueueSet uint32 `json:"num_q_sets"`
- QThresholds QThresholds `json:"q_thresholds"`
- SchedulingPolicy string `json:"scheduling_policy"`
- PriorityQueue uint32 `json:"priority_q"`
- Weight uint32 `json:"weight"`
- DiscardPolicy string `json:"discard_policy"`
- DiscardConfig DiscardConfig `json:"discard_config"`
-}
-
-// iDownstreamQueueAttribute struct for EPON
-type iDownstreamQueueAttribute struct {
- GemportID uint32 `json:"q_id"`
- MaxQueueSize string `json:"max_q_size"`
- PbitMap string `json:"pbit_map"`
- AesEncryption string `json:"aes_encryption"`
- SchedulingPolicy string `json:"scheduling_policy"`
- PriorityQueue uint32 `json:"priority_q"`
- Weight uint32 `json:"weight"`
- DiscardPolicy string `json:"discard_policy"`
- DiscardConfig DiscardConfig `json:"discard_config"`
-}
-
-// EponAttribute struct for EPON
-type EponAttribute struct {
- PackageType string `json:"pakage_type"`
-}
-
-// DefaultTechProfile struct for EPON
-type DefaultEponProfile struct {
- Name string `json:"name"`
- ProfileType string `json:"profile_type"`
- Version int `json:"version"`
- NumGemPorts uint32 `json:"num_gem_ports"`
- InstanceCtrl InstanceControl `json:"instance_control"`
- EponAttribute EponAttribute `json:"epon_attribute"`
- UpstreamQueueAttributeList []UpstreamQueueAttribute `json:"upstream_queue_attribute_list"`
- DownstreamQueueAttributeList []DownstreamQueueAttribute `json:"downstream_queue_attribute_list"`
-}
-
-// TechProfile struct for EPON
-type EponProfile struct {
- Name string `json:"name"`
- SubscriberIdentifier string `json:"subscriber_identifier"`
- ProfileType string `json:"profile_type"`
- Version int `json:"version"`
- NumGemPorts uint32 `json:"num_gem_ports"`
- InstanceCtrl InstanceControl `json:"instance_control"`
- EponAttribute EponAttribute `json:"epon_attribute"`
- AllocID uint32 `json:"llid"`
- UpstreamQueueAttributeList []iUpstreamQueueAttribute `json:"upstream_queue_attribute_list"`
- DownstreamQueueAttributeList []iDownstreamQueueAttribute `json:"downstream_queue_attribute_list"`
-}
-
-const (
- xgspon = "XGS-PON"
- gpon = "GPON"
- epon = "EPON"
-)
-
-func (t *TechProfileMgr) SetKVClient(ctx context.Context, pathPrefix string) *db.Backend {
- kvClient, err := newKVClient(ctx, t.config.KVStoreType, t.config.KVStoreAddress, t.config.KVStoreTimeout)
- if err != nil {
- logger.Errorw(ctx, "failed-to-create-kv-client",
- log.Fields{
- "type": t.config.KVStoreType, "address": t.config.KVStoreAddress,
- "timeout": t.config.KVStoreTimeout, "prefix": pathPrefix,
- "error": err.Error(),
- })
- return nil
- }
- return &db.Backend{
- Client: kvClient,
- StoreType: t.config.KVStoreType,
- Address: t.config.KVStoreAddress,
- Timeout: t.config.KVStoreTimeout,
- PathPrefix: pathPrefix}
-
- /* TODO : Make sure direct call to NewBackend is working fine with backend , currently there is some
- issue between kv store and backend , core is not calling NewBackend directly
- kv := model.NewBackend(t.config.KVStoreType, t.config.KVStoreHost, t.config.KVStorePort,
- t.config.KVStoreTimeout, kvStoreTechProfilePathPrefix)
- */
-}
-
-func newKVClient(ctx context.Context, storeType string, address string, timeout time.Duration) (kvstore.Client, error) {
-
- logger.Infow(ctx, "kv-store", log.Fields{"storeType": storeType, "address": address})
- switch storeType {
- case "etcd":
- return kvstore.NewEtcdClient(ctx, address, timeout, log.WarnLevel)
- }
- return nil, errors.New("unsupported-kv-store")
-}
-
-func NewTechProfile(ctx context.Context, resourceMgr iPonResourceMgr, KVStoreType string, KVStoreAddress string, basePathKvStore string) (*TechProfileMgr, error) {
- var techprofileObj TechProfileMgr
- logger.Debug(ctx, "Initializing techprofile Manager")
- techprofileObj.config = NewTechProfileFlags(KVStoreType, KVStoreAddress, basePathKvStore)
- techprofileObj.config.KVBackend = techprofileObj.SetKVClient(ctx, techprofileObj.config.TPKVPathPrefix)
- techprofileObj.config.DefaultTpKVBackend = techprofileObj.SetKVClient(ctx, techprofileObj.config.defaultTpKvPathPrefix)
- if techprofileObj.config.KVBackend == nil {
- logger.Error(ctx, "Failed to initialize KV backend\n")
- return nil, errors.New("KV backend init failed")
- }
- techprofileObj.resourceMgr = resourceMgr
- logger.Debug(ctx, "Initializing techprofile object instance success")
- return &techprofileObj, nil
-}
-
-func (t *TechProfileMgr) GetTechProfileInstanceKVPath(ctx context.Context, techProfiletblID uint32, uniPortName string) string {
- logger.Debugw(ctx, "get-tp-instance-kv-path", log.Fields{
- "uniPortName": uniPortName,
- "tpId": techProfiletblID,
- })
- return fmt.Sprintf(t.config.TPInstanceKVPath, t.resourceMgr.GetTechnology(), techProfiletblID, uniPortName)
-}
-
-func (t *TechProfileMgr) GetTPInstanceFromKVStore(ctx context.Context, techProfiletblID uint32, path string) (interface{}, error) {
- var err error
- var kvResult *kvstore.KVPair
- var KvTpIns TechProfile
- var KvEponIns EponProfile
- var resPtr interface{}
- // For example:
- // tpInstPath like "XGS-PON/64/uni_port_name"
- // is broken into ["XGS-PON" "64" ...]
- pathSlice := regexp.MustCompile(`/`).Split(path, -1)
- switch pathSlice[0] {
- case xgspon, gpon:
- resPtr = &KvTpIns
- case epon:
- resPtr = &KvEponIns
- default:
- logger.Errorw(ctx, "unknown-tech", log.Fields{"tech": pathSlice[0]})
- return nil, fmt.Errorf("unknown-tech-%s", pathSlice[0])
- }
-
- kvResult, _ = t.config.KVBackend.Get(ctx, path)
- if kvResult == nil {
- logger.Infow(ctx, "tp-instance-not-found-on-kv", log.Fields{"key": path})
- return nil, nil
- } else {
- if value, err := kvstore.ToByte(kvResult.Value); err == nil {
- if err = json.Unmarshal(value, resPtr); err != nil {
- logger.Errorw(ctx, "error-unmarshal-kv-result", log.Fields{"key": path, "value": value})
- return nil, errors.New("error-unmarshal-kv-result")
- } else {
- return resPtr, nil
- }
- }
- }
- return nil, err
-}
-
-func (t *TechProfileMgr) addTechProfInstanceToKVStore(ctx context.Context, techProfiletblID uint32, uniPortName string, tpInstance *TechProfile) error {
- path := t.GetTechProfileInstanceKVPath(ctx, techProfiletblID, uniPortName)
- logger.Debugw(ctx, "Adding techprof instance to kvstore", log.Fields{"key": path, "tpinstance": tpInstance})
- tpInstanceJson, err := json.Marshal(*tpInstance)
- if err == nil {
- // Backend will convert JSON byte array into string format
- logger.Debugw(ctx, "Storing tech profile instance to KV Store", log.Fields{"key": path, "val": tpInstanceJson})
- err = t.config.KVBackend.Put(ctx, path, tpInstanceJson)
- } else {
- logger.Errorw(ctx, "Error in marshaling into Json format", log.Fields{"key": path, "tpinstance": tpInstance})
- }
- return err
-}
-
-func (t *TechProfileMgr) addEponProfInstanceToKVStore(ctx context.Context, techProfiletblID uint32, uniPortName string, tpInstance *EponProfile) error {
- path := t.GetTechProfileInstanceKVPath(ctx, techProfiletblID, uniPortName)
- logger.Debugw(ctx, "Adding techprof instance to kvstore", log.Fields{"key": path, "tpinstance": tpInstance})
- tpInstanceJson, err := json.Marshal(*tpInstance)
- if err == nil {
- // Backend will convert JSON byte array into string format
- logger.Debugw(ctx, "Storing tech profile instance to KV Store", log.Fields{"key": path, "val": tpInstanceJson})
- err = t.config.KVBackend.Put(ctx, path, tpInstanceJson)
- } else {
- logger.Errorw(ctx, "Error in marshaling into Json format", log.Fields{"key": path, "tpinstance": tpInstance})
- }
- return err
-}
-
-func (t *TechProfileMgr) getTPFromKVStore(ctx context.Context, techProfiletblID uint32) *DefaultTechProfile {
- var kvtechprofile DefaultTechProfile
- key := fmt.Sprintf(t.config.TPFileKVPath, t.resourceMgr.GetTechnology(), techProfiletblID)
- logger.Debugw(ctx, "Getting techprofile from KV store", log.Fields{"techProfiletblID": techProfiletblID, "Key": key})
- kvresult, err := t.config.DefaultTpKVBackend.Get(ctx, key)
- if err != nil {
- logger.Errorw(ctx, "Error while fetching value from KV store", log.Fields{"key": key})
- return nil
- }
- if kvresult != nil {
- /* Backend will return Value in string format,needs to be converted to []byte before unmarshal*/
- if value, err := kvstore.ToByte(kvresult.Value); err == nil {
- if err = json.Unmarshal(value, &kvtechprofile); err != nil {
- logger.Errorw(ctx, "Error unmarshaling techprofile fetched from KV store", log.Fields{"techProfiletblID": techProfiletblID, "error": err, "techprofile_json": value})
- return nil
- }
-
- logger.Debugw(ctx, "Success fetched techprofile from KV store", log.Fields{"techProfiletblID": techProfiletblID, "value": kvtechprofile})
- return &kvtechprofile
- }
- }
- return nil
-}
-
-func (t *TechProfileMgr) getEponTPFromKVStore(ctx context.Context, techProfiletblID uint32) *DefaultEponProfile {
- var kvtechprofile DefaultEponProfile
- key := fmt.Sprintf(t.config.TPFileKVPath, t.resourceMgr.GetTechnology(), techProfiletblID)
- logger.Debugw(ctx, "Getting techprofile from KV store", log.Fields{"techProfiletblID": techProfiletblID, "Key": key})
- kvresult, err := t.config.DefaultTpKVBackend.Get(ctx, key)
- if err != nil {
- logger.Errorw(ctx, "Error while fetching value from KV store", log.Fields{"key": key})
- return nil
- }
- if kvresult != nil {
- /* Backend will return Value in string format,needs to be converted to []byte before unmarshal*/
- if value, err := kvstore.ToByte(kvresult.Value); err == nil {
- if err = json.Unmarshal(value, &kvtechprofile); err != nil {
- logger.Errorw(ctx, "Error unmarshaling techprofile fetched from KV store", log.Fields{"techProfiletblID": techProfiletblID, "error": err, "techprofile_json": value})
- return nil
- }
-
- logger.Debugw(ctx, "Success fetched techprofile from KV store", log.Fields{"techProfiletblID": techProfiletblID, "value": kvtechprofile})
- return &kvtechprofile
- }
- }
- return nil
-}
-
-func (t *TechProfileMgr) CreateTechProfInstance(ctx context.Context, techProfiletblID uint32, uniPortName string, intfId uint32) (interface{}, error) {
- var tpInstance *TechProfile
- var tpEponInstance *EponProfile
-
- logger.Infow(ctx, "creating-tp-instance", log.Fields{"tableid": techProfiletblID, "uni": uniPortName, "intId": intfId})
-
- // Make sure the uniPortName is as per format pon-{[0-9]+}/onu-{[0-9]+}/uni-{[0-9]+}
- if !uniPortNameFormat.Match([]byte(uniPortName)) {
- logger.Errorw(ctx, "uni-port-name-not-confirming-to-format", log.Fields{"uniPortName": uniPortName})
- return nil, errors.New("uni-port-name-not-confirming-to-format")
- }
- tpInstancePath := t.GetTechProfileInstanceKVPath(ctx, techProfiletblID, uniPortName)
- // For example:
- // tpInstPath like "XGS-PON/64/uni_port_name"
- // is broken into ["XGS-PON" "64" ...]
- pathSlice := regexp.MustCompile(`/`).Split(tpInstancePath, -1)
- if pathSlice[0] == epon {
- tp := t.getEponTPFromKVStore(ctx, techProfiletblID)
- if tp != nil {
- if err := t.validateInstanceControlAttr(ctx, tp.InstanceCtrl); err != nil {
- logger.Error(ctx, "invalid-instance-ctrl-attr--using-default-tp")
- tp = t.getDefaultEponProfile(ctx)
- } else {
- logger.Infow(ctx, "using-specified-tp-from-kv-store", log.Fields{"tpid": techProfiletblID})
- }
- } else {
- logger.Info(ctx, "tp-not-found-on-kv--creating-default-tp")
- tp = t.getDefaultEponProfile(ctx)
- }
-
- if tpEponInstance = t.allocateEponTPInstance(ctx, uniPortName, tp, intfId, tpInstancePath); tpEponInstance == nil {
- logger.Error(ctx, "tp-intance-allocation-failed")
- return nil, errors.New("tp-intance-allocation-failed")
- }
- if err := t.addEponProfInstanceToKVStore(ctx, techProfiletblID, uniPortName, tpEponInstance); err != nil {
- logger.Errorw(ctx, "error-adding-tp-to-kv-store", log.Fields{"tableid": techProfiletblID, "uni": uniPortName})
- return nil, errors.New("error-adding-tp-to-kv-store")
- }
- logger.Infow(ctx, "tp-added-to-kv-store-successfully",
- log.Fields{"tpid": techProfiletblID, "uni": uniPortName, "intfId": intfId})
- return tpEponInstance, nil
- } else {
- tp := t.getTPFromKVStore(ctx, techProfiletblID)
- if tp != nil {
- if err := t.validateInstanceControlAttr(ctx, tp.InstanceCtrl); err != nil {
- logger.Error(ctx, "invalid-instance-ctrl-attr--using-default-tp")
- tp = t.getDefaultTechProfile(ctx)
- } else {
- logger.Infow(ctx, "using-specified-tp-from-kv-store", log.Fields{"tpid": techProfiletblID})
- }
- } else {
- logger.Info(ctx, "tp-not-found-on-kv--creating-default-tp")
- tp = t.getDefaultTechProfile(ctx)
- }
-
- if tpInstance = t.allocateTPInstance(ctx, uniPortName, tp, intfId, tpInstancePath); tpInstance == nil {
- logger.Error(ctx, "tp-intance-allocation-failed")
- return nil, errors.New("tp-intance-allocation-failed")
- }
- if err := t.addTechProfInstanceToKVStore(ctx, techProfiletblID, uniPortName, tpInstance); err != nil {
- logger.Errorw(ctx, "error-adding-tp-to-kv-store", log.Fields{"tableid": techProfiletblID, "uni": uniPortName})
- return nil, errors.New("error-adding-tp-to-kv-store")
- }
- logger.Infow(ctx, "tp-added-to-kv-store-successfully",
- log.Fields{"tpid": techProfiletblID, "uni": uniPortName, "intfId": intfId})
- return tpInstance, nil
- }
-}
-
-func (t *TechProfileMgr) DeleteTechProfileInstance(ctx context.Context, techProfiletblID uint32, uniPortName string) error {
- path := t.GetTechProfileInstanceKVPath(ctx, techProfiletblID, uniPortName)
- return t.config.KVBackend.Delete(ctx, path)
-}
-
-func (t *TechProfileMgr) validateInstanceControlAttr(ctx context.Context, instCtl InstanceControl) error {
- if instCtl.Onu != "single-instance" && instCtl.Onu != "multi-instance" {
- logger.Errorw(ctx, "invalid-onu-instance-control-attribute", log.Fields{"onu-inst": instCtl.Onu})
- return errors.New("invalid-onu-instance-ctl-attr")
- }
-
- if instCtl.Uni != "single-instance" && instCtl.Uni != "multi-instance" {
- logger.Errorw(ctx, "invalid-uni-instance-control-attribute", log.Fields{"uni-inst": instCtl.Uni})
- return errors.New("invalid-uni-instance-ctl-attr")
- }
-
- if instCtl.Uni == "multi-instance" {
- logger.Error(ctx, "uni-multi-instance-tp-not-supported")
- return errors.New("uni-multi-instance-tp-not-supported")
- }
-
- return nil
-}
-
-func (t *TechProfileMgr) allocateTPInstance(ctx context.Context, uniPortName string, tp *DefaultTechProfile, intfId uint32, tpInstPath string) *TechProfile {
-
- var usGemPortAttributeList []IGemPortAttribute
- var dsGemPortAttributeList []IGemPortAttribute
- var dsMulticastGemAttributeList []IGemPortAttribute
- var dsUnicastGemAttributeList []IGemPortAttribute
- var tcontIDs []uint32
- var gemPorts []uint32
- var err error
-
- logger.Infow(ctx, "Allocating TechProfileMgr instance from techprofile template", log.Fields{"uniPortName": uniPortName, "intfId": intfId, "numGem": tp.NumGemPorts})
-
- if tp.InstanceCtrl.Onu == "multi-instance" {
- tcontIDs, err = t.GetResourceID(ctx, intfId, t.resourceMgr.GetResourceTypeAllocID(), 1)
- if err != nil {
- logger.Errorw(ctx, "Error getting alloc id from rsrcrMgr", log.Fields{"intfId": intfId})
- return nil
- }
- } else { // "single-instance"
- if tpInst, err := t.getSingleInstanceTp(ctx, tpInstPath); err != nil {
- logger.Errorw(ctx, "Error getting alloc id from rsrcrMgr", log.Fields{"intfId": intfId})
- return nil
- } else if tpInst == nil {
- // No "single-instance" tp found on one any uni port for the given TP ID
- // Allocate a new TcontID or AllocID
- tcontIDs, err = t.GetResourceID(ctx, intfId, t.resourceMgr.GetResourceTypeAllocID(), 1)
- if err != nil {
- logger.Errorw(ctx, "Error getting alloc id from rsrcrMgr", log.Fields{"intfId": intfId})
- return nil
- }
- } else {
- // Use the alloc-id from the existing TpInstance
- tcontIDs = append(tcontIDs, tpInst.UsScheduler.AllocID)
- }
- }
- logger.Debugw(ctx, "Num GEM ports in TP:", log.Fields{"NumGemPorts": tp.NumGemPorts})
- gemPorts, err = t.GetResourceID(ctx, intfId, t.resourceMgr.GetResourceTypeGemPortID(), tp.NumGemPorts)
- if err != nil {
- logger.Errorw(ctx, "Error getting gemport ids from rsrcrMgr", log.Fields{"intfId": intfId, "numGemports": tp.NumGemPorts})
- return nil
- }
- logger.Infow(ctx, "Allocated tconts and GEM ports successfully", log.Fields{"tconts": tcontIDs, "gemports": gemPorts})
- for index := 0; index < int(tp.NumGemPorts); index++ {
- usGemPortAttributeList = append(usGemPortAttributeList,
- IGemPortAttribute{GemportID: gemPorts[index],
- MaxQueueSize: tp.UpstreamGemPortAttributeList[index].MaxQueueSize,
- PbitMap: tp.UpstreamGemPortAttributeList[index].PbitMap,
- AesEncryption: tp.UpstreamGemPortAttributeList[index].AesEncryption,
- SchedulingPolicy: tp.UpstreamGemPortAttributeList[index].SchedulingPolicy,
- PriorityQueue: tp.UpstreamGemPortAttributeList[index].PriorityQueue,
- Weight: tp.UpstreamGemPortAttributeList[index].Weight,
- DiscardPolicy: tp.UpstreamGemPortAttributeList[index].DiscardPolicy,
- DiscardConfig: tp.UpstreamGemPortAttributeList[index].DiscardConfig})
- }
-
- logger.Info(ctx, "length of DownstreamGemPortAttributeList", len(tp.DownstreamGemPortAttributeList))
- //put multicast and unicast downstream GEM port attributes in different lists first
- for index := 0; index < int(len(tp.DownstreamGemPortAttributeList)); index++ {
- if isMulticastGem(tp.DownstreamGemPortAttributeList[index].IsMulticast) {
- dsMulticastGemAttributeList = append(dsMulticastGemAttributeList,
- IGemPortAttribute{
- McastGemID: tp.DownstreamGemPortAttributeList[index].McastGemID,
- MaxQueueSize: tp.DownstreamGemPortAttributeList[index].MaxQueueSize,
- PbitMap: tp.DownstreamGemPortAttributeList[index].PbitMap,
- AesEncryption: tp.DownstreamGemPortAttributeList[index].AesEncryption,
- SchedulingPolicy: tp.DownstreamGemPortAttributeList[index].SchedulingPolicy,
- PriorityQueue: tp.DownstreamGemPortAttributeList[index].PriorityQueue,
- Weight: tp.DownstreamGemPortAttributeList[index].Weight,
- DiscardPolicy: tp.DownstreamGemPortAttributeList[index].DiscardPolicy,
- DiscardConfig: tp.DownstreamGemPortAttributeList[index].DiscardConfig,
- IsMulticast: tp.DownstreamGemPortAttributeList[index].IsMulticast,
- DControlList: tp.DownstreamGemPortAttributeList[index].DControlList,
- SControlList: tp.DownstreamGemPortAttributeList[index].SControlList})
- } else {
- dsUnicastGemAttributeList = append(dsUnicastGemAttributeList,
- IGemPortAttribute{
- MaxQueueSize: tp.DownstreamGemPortAttributeList[index].MaxQueueSize,
- PbitMap: tp.DownstreamGemPortAttributeList[index].PbitMap,
- AesEncryption: tp.DownstreamGemPortAttributeList[index].AesEncryption,
- SchedulingPolicy: tp.DownstreamGemPortAttributeList[index].SchedulingPolicy,
- PriorityQueue: tp.DownstreamGemPortAttributeList[index].PriorityQueue,
- Weight: tp.DownstreamGemPortAttributeList[index].Weight,
- DiscardPolicy: tp.DownstreamGemPortAttributeList[index].DiscardPolicy,
- DiscardConfig: tp.DownstreamGemPortAttributeList[index].DiscardConfig})
- }
- }
- //add unicast downstream GEM ports to dsGemPortAttributeList
- for index := 0; index < int(tp.NumGemPorts); index++ {
- dsGemPortAttributeList = append(dsGemPortAttributeList,
- IGemPortAttribute{GemportID: gemPorts[index],
- MaxQueueSize: dsUnicastGemAttributeList[index].MaxQueueSize,
- PbitMap: dsUnicastGemAttributeList[index].PbitMap,
- AesEncryption: dsUnicastGemAttributeList[index].AesEncryption,
- SchedulingPolicy: dsUnicastGemAttributeList[index].SchedulingPolicy,
- PriorityQueue: dsUnicastGemAttributeList[index].PriorityQueue,
- Weight: dsUnicastGemAttributeList[index].Weight,
- DiscardPolicy: dsUnicastGemAttributeList[index].DiscardPolicy,
- DiscardConfig: dsUnicastGemAttributeList[index].DiscardConfig})
- }
- //add multicast GEM ports to dsGemPortAttributeList afterwards
- for k := range dsMulticastGemAttributeList {
- dsGemPortAttributeList = append(dsGemPortAttributeList, dsMulticastGemAttributeList[k])
- }
-
- return &TechProfile{
- SubscriberIdentifier: uniPortName,
- Name: tp.Name,
- ProfileType: tp.ProfileType,
- Version: tp.Version,
- NumGemPorts: tp.NumGemPorts,
- InstanceCtrl: tp.InstanceCtrl,
- UsScheduler: IScheduler{
- AllocID: tcontIDs[0],
- Direction: tp.UsScheduler.Direction,
- AdditionalBw: tp.UsScheduler.AdditionalBw,
- Priority: tp.UsScheduler.Priority,
- Weight: tp.UsScheduler.Weight,
- QSchedPolicy: tp.UsScheduler.QSchedPolicy},
- DsScheduler: IScheduler{
- AllocID: tcontIDs[0],
- Direction: tp.DsScheduler.Direction,
- AdditionalBw: tp.DsScheduler.AdditionalBw,
- Priority: tp.DsScheduler.Priority,
- Weight: tp.DsScheduler.Weight,
- QSchedPolicy: tp.DsScheduler.QSchedPolicy},
- UpstreamGemPortAttributeList: usGemPortAttributeList,
- DownstreamGemPortAttributeList: dsGemPortAttributeList}
-}
-
-// allocateTPInstance function for EPON
-func (t *TechProfileMgr) allocateEponTPInstance(ctx context.Context, uniPortName string, tp *DefaultEponProfile, intfId uint32, tpInstPath string) *EponProfile {
-
- var usQueueAttributeList []iUpstreamQueueAttribute
- var dsQueueAttributeList []iDownstreamQueueAttribute
- var tcontIDs []uint32
- var gemPorts []uint32
- var err error
-
- logger.Infow(ctx, "Allocating TechProfileMgr instance from techprofile template", log.Fields{"uniPortName": uniPortName, "intfId": intfId, "numGem": tp.NumGemPorts})
-
- if tp.InstanceCtrl.Onu == "multi-instance" {
- if tcontIDs, err = t.GetResourceID(ctx, intfId, t.resourceMgr.GetResourceTypeAllocID(), 1); err != nil {
- logger.Errorw(ctx, "Error getting alloc id from rsrcrMgr", log.Fields{"intfId": intfId})
- return nil
- }
- } else { // "single-instance"
- if tpInst, err := t.getSingleInstanceEponTp(ctx, tpInstPath); err != nil {
- logger.Errorw(ctx, "Error getting alloc id from rsrcrMgr", log.Fields{"intfId": intfId})
- return nil
- } else if tpInst == nil {
- // No "single-instance" tp found on one any uni port for the given TP ID
- // Allocate a new TcontID or AllocID
- if tcontIDs, err = t.GetResourceID(ctx, intfId, t.resourceMgr.GetResourceTypeAllocID(), 1); err != nil {
- logger.Errorw(ctx, "Error getting alloc id from rsrcrMgr", log.Fields{"intfId": intfId})
- return nil
- }
- } else {
- // Use the alloc-id from the existing TpInstance
- tcontIDs = append(tcontIDs, tpInst.AllocID)
- }
- }
- logger.Debugw(ctx, "Num GEM ports in TP:", log.Fields{"NumGemPorts": tp.NumGemPorts})
- if gemPorts, err = t.GetResourceID(ctx, intfId, t.resourceMgr.GetResourceTypeGemPortID(), tp.NumGemPorts); err != nil {
- logger.Errorw(ctx, "Error getting gemport ids from rsrcrMgr", log.Fields{"intfId": intfId, "numGemports": tp.NumGemPorts})
- return nil
- }
- logger.Infow(ctx, "Allocated tconts and GEM ports successfully", log.Fields{"tconts": tcontIDs, "gemports": gemPorts})
- for index := 0; index < int(tp.NumGemPorts); index++ {
- usQueueAttributeList = append(usQueueAttributeList,
- iUpstreamQueueAttribute{GemportID: gemPorts[index],
- MaxQueueSize: tp.UpstreamQueueAttributeList[index].MaxQueueSize,
- PbitMap: tp.UpstreamQueueAttributeList[index].PbitMap,
- AesEncryption: tp.UpstreamQueueAttributeList[index].AesEncryption,
- TrafficType: tp.UpstreamQueueAttributeList[index].TrafficType,
- UnsolicitedGrantSize: tp.UpstreamQueueAttributeList[index].UnsolicitedGrantSize,
- NominalInterval: tp.UpstreamQueueAttributeList[index].NominalInterval,
- ToleratedPollJitter: tp.UpstreamQueueAttributeList[index].ToleratedPollJitter,
- RequestTransmissionPolicy: tp.UpstreamQueueAttributeList[index].RequestTransmissionPolicy,
- NumQueueSet: tp.UpstreamQueueAttributeList[index].NumQueueSet,
- QThresholds: tp.UpstreamQueueAttributeList[index].QThresholds,
- SchedulingPolicy: tp.UpstreamQueueAttributeList[index].SchedulingPolicy,
- PriorityQueue: tp.UpstreamQueueAttributeList[index].PriorityQueue,
- Weight: tp.UpstreamQueueAttributeList[index].Weight,
- DiscardPolicy: tp.UpstreamQueueAttributeList[index].DiscardPolicy,
- DiscardConfig: tp.UpstreamQueueAttributeList[index].DiscardConfig})
- }
-
- logger.Info(ctx, "length of DownstreamGemPortAttributeList", len(tp.DownstreamQueueAttributeList))
- for index := 0; index < int(tp.NumGemPorts); index++ {
- dsQueueAttributeList = append(dsQueueAttributeList,
- iDownstreamQueueAttribute{GemportID: gemPorts[index],
- MaxQueueSize: tp.DownstreamQueueAttributeList[index].MaxQueueSize,
- PbitMap: tp.DownstreamQueueAttributeList[index].PbitMap,
- AesEncryption: tp.DownstreamQueueAttributeList[index].AesEncryption,
- SchedulingPolicy: tp.DownstreamQueueAttributeList[index].SchedulingPolicy,
- PriorityQueue: tp.DownstreamQueueAttributeList[index].PriorityQueue,
- Weight: tp.DownstreamQueueAttributeList[index].Weight,
- DiscardPolicy: tp.DownstreamQueueAttributeList[index].DiscardPolicy,
- DiscardConfig: tp.DownstreamQueueAttributeList[index].DiscardConfig})
- }
-
- return &EponProfile{
- SubscriberIdentifier: uniPortName,
- Name: tp.Name,
- ProfileType: tp.ProfileType,
- Version: tp.Version,
- NumGemPorts: tp.NumGemPorts,
- InstanceCtrl: tp.InstanceCtrl,
- EponAttribute: tp.EponAttribute,
- AllocID: tcontIDs[0],
- UpstreamQueueAttributeList: usQueueAttributeList,
- DownstreamQueueAttributeList: dsQueueAttributeList}
-}
-
-// getSingleInstanceTp returns another TpInstance for an ONU on a different
-// uni port for the same TP ID, if it finds one, else nil.
-func (t *TechProfileMgr) getSingleInstanceTp(ctx context.Context, tpPath string) (*TechProfile, error) {
- var tpInst TechProfile
-
- // For example:
- // tpPath like "service/voltha/technology_profiles/xgspon/64/pon-{0}/onu-{1}/uni-{1}"
- // is broken into ["service/voltha/technology_profiles/xgspon/64/pon-{0}/onu-{1}" ""]
- uniPathSlice := regexp.MustCompile(`/uni-{[0-9]+}$`).Split(tpPath, 2)
- kvPairs, _ := t.config.KVBackend.List(ctx, uniPathSlice[0])
-
- // Find a valid TP Instance among all the UNIs of that ONU for the given TP ID
- for keyPath, kvPair := range kvPairs {
- if value, err := kvstore.ToByte(kvPair.Value); err == nil {
- if err = json.Unmarshal(value, &tpInst); err != nil {
- logger.Errorw(ctx, "error-unmarshal-kv-pair", log.Fields{"keyPath": keyPath, "value": value})
- return nil, errors.New("error-unmarshal-kv-pair")
- } else {
- logger.Debugw(ctx, "found-valid-tp-instance-on-another-uni", log.Fields{"keyPath": keyPath})
- return &tpInst, nil
- }
- }
- }
- return nil, nil
-}
-
-func (t *TechProfileMgr) getSingleInstanceEponTp(ctx context.Context, tpPath string) (*EponProfile, error) {
- var tpInst EponProfile
-
- // For example:
- // tpPath like "service/voltha/technology_profiles/xgspon/64/pon-{0}/onu-{1}/uni-{1}"
- // is broken into ["service/voltha/technology_profiles/xgspon/64/pon-{0}/onu-{1}" ""]
- uniPathSlice := regexp.MustCompile(`/uni-{[0-9]+}$`).Split(tpPath, 2)
- kvPairs, _ := t.config.KVBackend.List(ctx, uniPathSlice[0])
-
- // Find a valid TP Instance among all the UNIs of that ONU for the given TP ID
- for keyPath, kvPair := range kvPairs {
- if value, err := kvstore.ToByte(kvPair.Value); err == nil {
- if err = json.Unmarshal(value, &tpInst); err != nil {
- logger.Errorw(ctx, "error-unmarshal-kv-pair", log.Fields{"keyPath": keyPath, "value": value})
- return nil, errors.New("error-unmarshal-kv-pair")
- } else {
- logger.Debugw(ctx, "found-valid-tp-instance-on-another-uni", log.Fields{"keyPath": keyPath})
- return &tpInst, nil
- }
- }
- }
- return nil, nil
-}
-
-func (t *TechProfileMgr) getDefaultTechProfile(ctx context.Context) *DefaultTechProfile {
- var usGemPortAttributeList []GemPortAttribute
- var dsGemPortAttributeList []GemPortAttribute
-
- for _, pbit := range t.config.DefaultPbits {
- logger.Debugw(ctx, "Creating GEM port", log.Fields{"pbit": pbit})
- usGemPortAttributeList = append(usGemPortAttributeList,
- GemPortAttribute{
- MaxQueueSize: defaultMaxQueueSize,
- PbitMap: pbit,
- AesEncryption: defaultAESEncryption,
- SchedulingPolicy: SchedulingPolicy_name[defaultSchedulePolicy],
- PriorityQueue: defaultPriorityQueue,
- Weight: defaultQueueWeight,
- DiscardPolicy: DiscardPolicy_name[defaultdropPolicy],
- DiscardConfig: DiscardConfig{
- MinThreshold: defaultMinThreshold,
- MaxThreshold: defaultMaxThreshold,
- MaxProbability: defaultMaxProbability}})
- dsGemPortAttributeList = append(dsGemPortAttributeList,
- GemPortAttribute{
- MaxQueueSize: defaultMaxQueueSize,
- PbitMap: pbit,
- AesEncryption: defaultAESEncryption,
- SchedulingPolicy: SchedulingPolicy_name[defaultSchedulePolicy],
- PriorityQueue: defaultPriorityQueue,
- Weight: defaultQueueWeight,
- DiscardPolicy: DiscardPolicy_name[defaultdropPolicy],
- DiscardConfig: DiscardConfig{
- MinThreshold: defaultMinThreshold,
- MaxThreshold: defaultMaxThreshold,
- MaxProbability: defaultMaxProbability},
- IsMulticast: defaultIsMulticast,
- DControlList: defaultAccessControlList,
- SControlList: defaultAccessControlList,
- McastGemID: defaultMcastGemID})
- }
- return &DefaultTechProfile{
- Name: t.config.DefaultTPName,
- ProfileType: t.resourceMgr.GetTechnology(),
- Version: t.config.TPVersion,
- NumGemPorts: uint32(len(usGemPortAttributeList)),
- InstanceCtrl: InstanceControl{
- Onu: defaultOnuInstance,
- Uni: defaultUniInstance,
- MaxGemPayloadSize: defaultGemPayloadSize},
- UsScheduler: Scheduler{
- Direction: Direction_name[Direction_UPSTREAM],
- AdditionalBw: AdditionalBW_name[defaultAdditionalBw],
- Priority: defaultPriority,
- Weight: defaultWeight,
- QSchedPolicy: SchedulingPolicy_name[defaultQueueSchedPolicy]},
- DsScheduler: Scheduler{
- Direction: Direction_name[Direction_DOWNSTREAM],
- AdditionalBw: AdditionalBW_name[defaultAdditionalBw],
- Priority: defaultPriority,
- Weight: defaultWeight,
- QSchedPolicy: SchedulingPolicy_name[defaultQueueSchedPolicy]},
- UpstreamGemPortAttributeList: usGemPortAttributeList,
- DownstreamGemPortAttributeList: dsGemPortAttributeList}
-}
-
-// getDefaultTechProfile function for EPON
-func (t *TechProfileMgr) getDefaultEponProfile(ctx context.Context) *DefaultEponProfile {
-
- var usQueueAttributeList []UpstreamQueueAttribute
- var dsQueueAttributeList []DownstreamQueueAttribute
-
- for _, pbit := range t.config.DefaultPbits {
- logger.Debugw(ctx, "Creating Queue", log.Fields{"pbit": pbit})
- usQueueAttributeList = append(usQueueAttributeList,
- UpstreamQueueAttribute{
- MaxQueueSize: defaultMaxQueueSize,
- PbitMap: pbit,
- AesEncryption: defaultAESEncryption,
- TrafficType: defaultTrafficType,
- UnsolicitedGrantSize: defaultUnsolicitedGrantSize,
- NominalInterval: defaultNominalInterval,
- ToleratedPollJitter: defaultToleratedPollJitter,
- RequestTransmissionPolicy: defaultRequestTransmissionPolicy,
- NumQueueSet: defaultNumQueueSet,
- QThresholds: QThresholds{
- QThreshold1: defaultQThreshold1,
- QThreshold2: defaultQThreshold2,
- QThreshold3: defaultQThreshold3,
- QThreshold4: defaultQThreshold4,
- QThreshold5: defaultQThreshold5,
- QThreshold6: defaultQThreshold6,
- QThreshold7: defaultQThreshold7},
- SchedulingPolicy: SchedulingPolicy_name[defaultSchedulePolicy],
- PriorityQueue: defaultPriorityQueue,
- Weight: defaultQueueWeight,
- DiscardPolicy: DiscardPolicy_name[defaultdropPolicy],
- DiscardConfig: DiscardConfig{
- MinThreshold: defaultMinThreshold,
- MaxThreshold: defaultMaxThreshold,
- MaxProbability: defaultMaxProbability}})
- dsQueueAttributeList = append(dsQueueAttributeList,
- DownstreamQueueAttribute{
- MaxQueueSize: defaultMaxQueueSize,
- PbitMap: pbit,
- AesEncryption: defaultAESEncryption,
- SchedulingPolicy: SchedulingPolicy_name[defaultSchedulePolicy],
- PriorityQueue: defaultPriorityQueue,
- Weight: defaultQueueWeight,
- DiscardPolicy: DiscardPolicy_name[defaultdropPolicy],
- DiscardConfig: DiscardConfig{
- MinThreshold: defaultMinThreshold,
- MaxThreshold: defaultMaxThreshold,
- MaxProbability: defaultMaxProbability}})
- }
- return &DefaultEponProfile{
- Name: t.config.DefaultTPName,
- ProfileType: t.resourceMgr.GetTechnology(),
- Version: t.config.TPVersion,
- NumGemPorts: uint32(len(usQueueAttributeList)),
- InstanceCtrl: InstanceControl{
- Onu: defaultOnuInstance,
- Uni: defaultUniInstance,
- MaxGemPayloadSize: defaultGemPayloadSize},
- EponAttribute: EponAttribute{
- PackageType: defaultPakageType},
- UpstreamQueueAttributeList: usQueueAttributeList,
- DownstreamQueueAttributeList: dsQueueAttributeList}
-}
-
-func (t *TechProfileMgr) GetprotoBufParamValue(ctx context.Context, paramType string, paramKey string) int32 {
- var result int32 = -1
-
- if paramType == "direction" {
- for key, val := range tp_pb.Direction_value {
- if key == paramKey {
- result = val
- }
- }
- } else if paramType == "discard_policy" {
- for key, val := range tp_pb.DiscardPolicy_value {
- if key == paramKey {
- result = val
- }
- }
- } else if paramType == "sched_policy" {
- for key, val := range tp_pb.SchedulingPolicy_value {
- if key == paramKey {
- logger.Debugw(ctx, "Got value in proto", log.Fields{"key": key, "value": val})
- result = val
- }
- }
- } else if paramType == "additional_bw" {
- for key, val := range tp_pb.AdditionalBW_value {
- if key == paramKey {
- result = val
- }
- }
- } else {
- logger.Error(ctx, "Could not find proto parameter", log.Fields{"paramType": paramType, "key": paramKey})
- return -1
- }
- logger.Debugw(ctx, "Got value in proto", log.Fields{"key": paramKey, "value": result})
- return result
-}
-
-func (t *TechProfileMgr) GetUsScheduler(ctx context.Context, tpInstance *TechProfile) (*tp_pb.SchedulerConfig, error) {
- dir := tp_pb.Direction(t.GetprotoBufParamValue(ctx, "direction", tpInstance.UsScheduler.Direction))
- if dir == -1 {
- logger.Errorf(ctx, "Error in getting proto id for direction %s for upstream scheduler", tpInstance.UsScheduler.Direction)
- return nil, fmt.Errorf("unable to get proto id for direction %s for upstream scheduler", tpInstance.UsScheduler.Direction)
- }
-
- bw := tp_pb.AdditionalBW(t.GetprotoBufParamValue(ctx, "additional_bw", tpInstance.UsScheduler.AdditionalBw))
- if bw == -1 {
- logger.Errorf(ctx, "Error in getting proto id for bandwidth %s for upstream scheduler", tpInstance.UsScheduler.AdditionalBw)
- return nil, fmt.Errorf("unable to get proto id for bandwidth %s for upstream scheduler", tpInstance.UsScheduler.AdditionalBw)
- }
-
- policy := tp_pb.SchedulingPolicy(t.GetprotoBufParamValue(ctx, "sched_policy", tpInstance.UsScheduler.QSchedPolicy))
- if policy == -1 {
- logger.Errorf(ctx, "Error in getting proto id for scheduling policy %s for upstream scheduler", tpInstance.UsScheduler.QSchedPolicy)
- return nil, fmt.Errorf("unable to get proto id for scheduling policy %s for upstream scheduler", tpInstance.UsScheduler.QSchedPolicy)
- }
-
- return &tp_pb.SchedulerConfig{
- Direction: dir,
- AdditionalBw: bw,
- Priority: tpInstance.UsScheduler.Priority,
- Weight: tpInstance.UsScheduler.Weight,
- SchedPolicy: policy}, nil
-}
-
-func (t *TechProfileMgr) GetDsScheduler(ctx context.Context, tpInstance *TechProfile) (*tp_pb.SchedulerConfig, error) {
-
- dir := tp_pb.Direction(t.GetprotoBufParamValue(ctx, "direction", tpInstance.DsScheduler.Direction))
- if dir == -1 {
- logger.Errorf(ctx, "Error in getting proto id for direction %s for downstream scheduler", tpInstance.DsScheduler.Direction)
- return nil, fmt.Errorf("unable to get proto id for direction %s for downstream scheduler", tpInstance.DsScheduler.Direction)
- }
-
- bw := tp_pb.AdditionalBW(t.GetprotoBufParamValue(ctx, "additional_bw", tpInstance.DsScheduler.AdditionalBw))
- if bw == -1 {
- logger.Errorf(ctx, "Error in getting proto id for bandwidth %s for downstream scheduler", tpInstance.DsScheduler.AdditionalBw)
- return nil, fmt.Errorf("unable to get proto id for bandwidth %s for downstream scheduler", tpInstance.DsScheduler.AdditionalBw)
- }
-
- policy := tp_pb.SchedulingPolicy(t.GetprotoBufParamValue(ctx, "sched_policy", tpInstance.DsScheduler.QSchedPolicy))
- if policy == -1 {
- logger.Errorf(ctx, "Error in getting proto id for scheduling policy %s for downstream scheduler", tpInstance.DsScheduler.QSchedPolicy)
- return nil, fmt.Errorf("unable to get proto id for scheduling policy %s for downstream scheduler", tpInstance.DsScheduler.QSchedPolicy)
- }
-
- return &tp_pb.SchedulerConfig{
- Direction: dir,
- AdditionalBw: bw,
- Priority: tpInstance.DsScheduler.Priority,
- Weight: tpInstance.DsScheduler.Weight,
- SchedPolicy: policy}, nil
-}
-
-func (t *TechProfileMgr) GetTrafficScheduler(tpInstance *TechProfile, SchedCfg *tp_pb.SchedulerConfig,
- ShapingCfg *tp_pb.TrafficShapingInfo) *tp_pb.TrafficScheduler {
-
- tSched := &tp_pb.TrafficScheduler{
- Direction: SchedCfg.Direction,
- AllocId: tpInstance.UsScheduler.AllocID,
- TrafficShapingInfo: ShapingCfg,
- Scheduler: SchedCfg}
-
- return tSched
-}
-
-func (tpm *TechProfileMgr) GetTrafficQueues(ctx context.Context, tp *TechProfile, Dir tp_pb.Direction) ([]*tp_pb.TrafficQueue, error) {
-
- var encryp bool
- if Dir == tp_pb.Direction_UPSTREAM {
- // upstream GEM ports
- NumGemPorts := len(tp.UpstreamGemPortAttributeList)
- GemPorts := make([]*tp_pb.TrafficQueue, 0)
- for Count := 0; Count < NumGemPorts; Count++ {
- if tp.UpstreamGemPortAttributeList[Count].AesEncryption == "True" {
- encryp = true
- } else {
- encryp = false
- }
-
- schedPolicy := tpm.GetprotoBufParamValue(ctx, "sched_policy", tp.UpstreamGemPortAttributeList[Count].SchedulingPolicy)
- if schedPolicy == -1 {
- logger.Errorf(ctx, "Error in getting Proto Id for scheduling policy %s for Upstream Gem Port %d", tp.UpstreamGemPortAttributeList[Count].SchedulingPolicy, Count)
- return nil, fmt.Errorf("upstream gem port traffic queue creation failed due to unrecognized scheduling policy %s", tp.UpstreamGemPortAttributeList[Count].SchedulingPolicy)
- }
-
- discardPolicy := tpm.GetprotoBufParamValue(ctx, "discard_policy", tp.UpstreamGemPortAttributeList[Count].DiscardPolicy)
- if discardPolicy == -1 {
- logger.Errorf(ctx, "Error in getting Proto Id for discard policy %s for Upstream Gem Port %d", tp.UpstreamGemPortAttributeList[Count].DiscardPolicy, Count)
- return nil, fmt.Errorf("upstream gem port traffic queue creation failed due to unrecognized discard policy %s", tp.UpstreamGemPortAttributeList[Count].DiscardPolicy)
- }
-
- GemPorts = append(GemPorts, &tp_pb.TrafficQueue{
- Direction: tp_pb.Direction(tpm.GetprotoBufParamValue(ctx, "direction", tp.UsScheduler.Direction)),
- GemportId: tp.UpstreamGemPortAttributeList[Count].GemportID,
- PbitMap: tp.UpstreamGemPortAttributeList[Count].PbitMap,
- AesEncryption: encryp,
- SchedPolicy: tp_pb.SchedulingPolicy(schedPolicy),
- Priority: tp.UpstreamGemPortAttributeList[Count].PriorityQueue,
- Weight: tp.UpstreamGemPortAttributeList[Count].Weight,
- DiscardPolicy: tp_pb.DiscardPolicy(discardPolicy),
- })
- }
- logger.Debugw(ctx, "Upstream Traffic queue list ", log.Fields{"queuelist": GemPorts})
- return GemPorts, nil
- } else if Dir == tp_pb.Direction_DOWNSTREAM {
- //downstream GEM ports
- NumGemPorts := len(tp.DownstreamGemPortAttributeList)
- GemPorts := make([]*tp_pb.TrafficQueue, 0)
- for Count := 0; Count < NumGemPorts; Count++ {
- if isMulticastGem(tp.DownstreamGemPortAttributeList[Count].IsMulticast) {
- //do not take multicast GEM ports. They are handled separately.
- continue
- }
- if tp.DownstreamGemPortAttributeList[Count].AesEncryption == "True" {
- encryp = true
- } else {
- encryp = false
- }
-
- schedPolicy := tpm.GetprotoBufParamValue(ctx, "sched_policy", tp.DownstreamGemPortAttributeList[Count].SchedulingPolicy)
- if schedPolicy == -1 {
- logger.Errorf(ctx, "Error in getting Proto Id for scheduling policy %s for Downstream Gem Port %d", tp.DownstreamGemPortAttributeList[Count].SchedulingPolicy, Count)
- return nil, fmt.Errorf("downstream gem port traffic queue creation failed due to unrecognized scheduling policy %s", tp.DownstreamGemPortAttributeList[Count].SchedulingPolicy)
- }
-
- discardPolicy := tpm.GetprotoBufParamValue(ctx, "discard_policy", tp.DownstreamGemPortAttributeList[Count].DiscardPolicy)
- if discardPolicy == -1 {
- logger.Errorf(ctx, "Error in getting Proto Id for discard policy %s for Downstream Gem Port %d", tp.DownstreamGemPortAttributeList[Count].DiscardPolicy, Count)
- return nil, fmt.Errorf("downstream gem port traffic queue creation failed due to unrecognized discard policy %s", tp.DownstreamGemPortAttributeList[Count].DiscardPolicy)
- }
-
- GemPorts = append(GemPorts, &tp_pb.TrafficQueue{
- Direction: tp_pb.Direction(tpm.GetprotoBufParamValue(ctx, "direction", tp.DsScheduler.Direction)),
- GemportId: tp.DownstreamGemPortAttributeList[Count].GemportID,
- PbitMap: tp.DownstreamGemPortAttributeList[Count].PbitMap,
- AesEncryption: encryp,
- SchedPolicy: tp_pb.SchedulingPolicy(schedPolicy),
- Priority: tp.DownstreamGemPortAttributeList[Count].PriorityQueue,
- Weight: tp.DownstreamGemPortAttributeList[Count].Weight,
- DiscardPolicy: tp_pb.DiscardPolicy(discardPolicy),
- })
- }
- logger.Debugw(ctx, "Downstream Traffic queue list ", log.Fields{"queuelist": GemPorts})
- return GemPorts, nil
- }
-
- logger.Errorf(ctx, "Unsupported direction %s used for generating Traffic Queue list", Dir)
- return nil, fmt.Errorf("downstream gem port traffic queue creation failed due to unsupported direction %s", Dir)
-}
-
-//isMulticastGem returns true if isMulticast attribute value of a GEM port is true; false otherwise
-func isMulticastGem(isMulticastAttrValue string) bool {
- return isMulticastAttrValue != "" &&
- (isMulticastAttrValue == "True" || isMulticastAttrValue == "true" || isMulticastAttrValue == "TRUE")
-}
-
-func (tpm *TechProfileMgr) GetMulticastTrafficQueues(ctx context.Context, tp *TechProfile) []*tp_pb.TrafficQueue {
- var encryp bool
- NumGemPorts := len(tp.DownstreamGemPortAttributeList)
- mcastTrafficQueues := make([]*tp_pb.TrafficQueue, 0)
- for Count := 0; Count < NumGemPorts; Count++ {
- if !isMulticastGem(tp.DownstreamGemPortAttributeList[Count].IsMulticast) {
- continue
- }
- if tp.DownstreamGemPortAttributeList[Count].AesEncryption == "True" {
- encryp = true
- } else {
- encryp = false
- }
- mcastTrafficQueues = append(mcastTrafficQueues, &tp_pb.TrafficQueue{
- Direction: tp_pb.Direction(tpm.GetprotoBufParamValue(ctx, "direction", tp.DsScheduler.Direction)),
- GemportId: tp.DownstreamGemPortAttributeList[Count].McastGemID,
- PbitMap: tp.DownstreamGemPortAttributeList[Count].PbitMap,
- AesEncryption: encryp,
- SchedPolicy: tp_pb.SchedulingPolicy(tpm.GetprotoBufParamValue(ctx, "sched_policy", tp.DownstreamGemPortAttributeList[Count].SchedulingPolicy)),
- Priority: tp.DownstreamGemPortAttributeList[Count].PriorityQueue,
- Weight: tp.DownstreamGemPortAttributeList[Count].Weight,
- DiscardPolicy: tp_pb.DiscardPolicy(tpm.GetprotoBufParamValue(ctx, "discard_policy", tp.DownstreamGemPortAttributeList[Count].DiscardPolicy)),
- })
- }
- logger.Debugw(ctx, "Downstream Multicast Traffic queue list ", log.Fields{"queuelist": mcastTrafficQueues})
- return mcastTrafficQueues
-}
-
-func (tpm *TechProfileMgr) GetUsTrafficScheduler(ctx context.Context, tp *TechProfile) *tp_pb.TrafficScheduler {
- UsScheduler, _ := tpm.GetUsScheduler(ctx, tp)
-
- return &tp_pb.TrafficScheduler{Direction: UsScheduler.Direction,
- AllocId: tp.UsScheduler.AllocID,
- Scheduler: UsScheduler}
-}
-
-func (t *TechProfileMgr) GetGemportForPbit(ctx context.Context, tp interface{}, dir tp_pb.Direction, pbit uint32) interface{} {
- /*
- Function to get the Gemport mapped to a pbit.
- */
- switch tp := tp.(type) {
- case *TechProfile:
- if dir == tp_pb.Direction_UPSTREAM {
- // upstream GEM ports
- numGemPorts := len(tp.UpstreamGemPortAttributeList)
- for gemCnt := 0; gemCnt < numGemPorts; gemCnt++ {
- lenOfPbitMap := len(tp.UpstreamGemPortAttributeList[gemCnt].PbitMap)
- for pbitMapIdx := 2; pbitMapIdx < lenOfPbitMap; pbitMapIdx++ {
- // Given a sample pbit map string "0b00000001", lenOfPbitMap is 10
- // "lenOfPbitMap - pbitMapIdx + 1" will give pbit-i th value from LSB position in the pbit map string
- if p, err := strconv.Atoi(string(tp.UpstreamGemPortAttributeList[gemCnt].PbitMap[lenOfPbitMap-pbitMapIdx+1])); err == nil {
- if uint32(pbitMapIdx-2) == pbit && p == 1 { // Check this p-bit is set
- logger.Debugw(ctx, "Found-US-GEMport-for-Pcp", log.Fields{"pbit": pbit, "GEMport": tp.UpstreamGemPortAttributeList[gemCnt].GemportID})
- return tp.UpstreamGemPortAttributeList[gemCnt]
- }
- }
- }
- }
- } else if dir == tp_pb.Direction_DOWNSTREAM {
- //downstream GEM ports
- numGemPorts := len(tp.DownstreamGemPortAttributeList)
- for gemCnt := 0; gemCnt < numGemPorts; gemCnt++ {
- lenOfPbitMap := len(tp.DownstreamGemPortAttributeList[gemCnt].PbitMap)
- for pbitMapIdx := 2; pbitMapIdx < lenOfPbitMap; pbitMapIdx++ {
- // Given a sample pbit map string "0b00000001", lenOfPbitMap is 10
- // "lenOfPbitMap - pbitMapIdx + 1" will give pbit-i th value from LSB position in the pbit map string
- if p, err := strconv.Atoi(string(tp.DownstreamGemPortAttributeList[gemCnt].PbitMap[lenOfPbitMap-pbitMapIdx+1])); err == nil {
- if uint32(pbitMapIdx-2) == pbit && p == 1 { // Check this p-bit is set
- logger.Debugw(ctx, "Found-DS-GEMport-for-Pcp", log.Fields{"pbit": pbit, "GEMport": tp.DownstreamGemPortAttributeList[gemCnt].GemportID})
- return tp.DownstreamGemPortAttributeList[gemCnt]
- }
- }
- }
- }
- }
- logger.Errorw(ctx, "No-GemportId-Found-For-Pcp", log.Fields{"pcpVlan": pbit})
- case *EponProfile:
- if dir == tp_pb.Direction_UPSTREAM {
- // upstream GEM ports
- numGemPorts := len(tp.UpstreamQueueAttributeList)
- for gemCnt := 0; gemCnt < numGemPorts; gemCnt++ {
- lenOfPbitMap := len(tp.UpstreamQueueAttributeList[gemCnt].PbitMap)
- for pbitMapIdx := 2; pbitMapIdx < lenOfPbitMap; pbitMapIdx++ {
- // Given a sample pbit map string "0b00000001", lenOfPbitMap is 10
- // "lenOfPbitMap - pbitMapIdx + 1" will give pbit-i th value from LSB position in the pbit map string
- if p, err := strconv.Atoi(string(tp.UpstreamQueueAttributeList[gemCnt].PbitMap[lenOfPbitMap-pbitMapIdx+1])); err == nil {
- if uint32(pbitMapIdx-2) == pbit && p == 1 { // Check this p-bit is set
- logger.Debugw(ctx, "Found-US-Queue-for-Pcp", log.Fields{"pbit": pbit, "Queue": tp.UpstreamQueueAttributeList[gemCnt].GemportID})
- return tp.UpstreamQueueAttributeList[gemCnt]
- }
- }
- }
- }
- } else if dir == tp_pb.Direction_DOWNSTREAM {
- //downstream GEM ports
- numGemPorts := len(tp.DownstreamQueueAttributeList)
- for gemCnt := 0; gemCnt < numGemPorts; gemCnt++ {
- lenOfPbitMap := len(tp.DownstreamQueueAttributeList[gemCnt].PbitMap)
- for pbitMapIdx := 2; pbitMapIdx < lenOfPbitMap; pbitMapIdx++ {
- // Given a sample pbit map string "0b00000001", lenOfPbitMap is 10
- // "lenOfPbitMap - pbitMapIdx + 1" will give pbit-i th value from LSB position in the pbit map string
- if p, err := strconv.Atoi(string(tp.DownstreamQueueAttributeList[gemCnt].PbitMap[lenOfPbitMap-pbitMapIdx+1])); err == nil {
- if uint32(pbitMapIdx-2) == pbit && p == 1 { // Check this p-bit is set
- logger.Debugw(ctx, "Found-DS-Queue-for-Pcp", log.Fields{"pbit": pbit, "Queue": tp.DownstreamQueueAttributeList[gemCnt].GemportID})
- return tp.DownstreamQueueAttributeList[gemCnt]
- }
- }
- }
- }
- }
- logger.Errorw(ctx, "No-QueueId-Found-For-Pcp", log.Fields{"pcpVlan": pbit})
- default:
- logger.Errorw(ctx, "unknown-tech", log.Fields{"tp": tp})
- }
- return nil
-}
-
-// FindAllTpInstances returns all TechProfile instances for a given TechProfile table-id, pon interface ID and onu ID.
-func (t *TechProfileMgr) FindAllTpInstances(ctx context.Context, oltDeviceID string, tpID uint32, ponIntf uint32, onuID uint32) interface{} {
- var tpTech TechProfile
- var tpEpon EponProfile
-
- onuTpInstancePath := fmt.Sprintf("%s/%d/olt-{%s}/pon-{%d}/onu-{%d}", t.resourceMgr.GetTechnology(), tpID, oltDeviceID, ponIntf, onuID)
-
- if kvPairs, _ := t.config.KVBackend.List(ctx, onuTpInstancePath); kvPairs != nil {
- tech := t.resourceMgr.GetTechnology()
- tpInstancesTech := make([]TechProfile, 0, len(kvPairs))
- tpInstancesEpon := make([]EponProfile, 0, len(kvPairs))
-
- for kvPath, kvPair := range kvPairs {
- if value, err := kvstore.ToByte(kvPair.Value); err == nil {
- if tech == xgspon || tech == gpon {
- if err = json.Unmarshal(value, &tpTech); err != nil {
- logger.Errorw(ctx, "error-unmarshal-kv-pair", log.Fields{"kvPath": kvPath, "value": value})
- continue
- } else {
- tpInstancesTech = append(tpInstancesTech, tpTech)
- }
- } else if tech == epon {
- if err = json.Unmarshal(value, &tpEpon); err != nil {
- logger.Errorw(ctx, "error-unmarshal-kv-pair", log.Fields{"kvPath": kvPath, "value": value})
- continue
- } else {
- tpInstancesEpon = append(tpInstancesEpon, tpEpon)
- }
- }
- }
- }
-
- switch tech {
- case xgspon, gpon:
- return tpInstancesTech
- case epon:
- return tpInstancesEpon
- default:
- logger.Errorw(ctx, "unknown-technology", log.Fields{"tech": tech})
- return nil
- }
- }
- return nil
-}
-
-func (t *TechProfileMgr) GetResourceID(ctx context.Context, IntfID uint32, ResourceType string, NumIDs uint32) ([]uint32, error) {
- logger.Debugw(ctx, "getting-resource-id", log.Fields{
- "intf-id": IntfID,
- "resource-type": ResourceType,
- "num": NumIDs,
- })
- var err error
- var ids []uint32
- switch ResourceType {
- case t.resourceMgr.GetResourceTypeAllocID():
- t.AllocIDMgmtLock.Lock()
- ids, err = t.resourceMgr.GetResourceID(ctx, IntfID, ResourceType, NumIDs)
- t.AllocIDMgmtLock.Unlock()
- case t.resourceMgr.GetResourceTypeGemPortID():
- t.GemPortIDMgmtLock.Lock()
- ids, err = t.resourceMgr.GetResourceID(ctx, IntfID, ResourceType, NumIDs)
- t.GemPortIDMgmtLock.Unlock()
- case t.resourceMgr.GetResourceTypeOnuID():
- t.OnuIDMgmtLock.Lock()
- ids, err = t.resourceMgr.GetResourceID(ctx, IntfID, ResourceType, NumIDs)
- t.OnuIDMgmtLock.Unlock()
- default:
- return nil, fmt.Errorf("ResourceType %s not supported", ResourceType)
- }
- if err != nil {
- return nil, err
- }
- return ids, nil
-}
-
-func (t *TechProfileMgr) FreeResourceID(ctx context.Context, IntfID uint32, ResourceType string, ReleaseContent []uint32) error {
- logger.Debugw(ctx, "freeing-resource-id", log.Fields{
- "intf-id": IntfID,
- "resource-type": ResourceType,
- "release-content": ReleaseContent,
- })
- var err error
- switch ResourceType {
- case t.resourceMgr.GetResourceTypeAllocID():
- t.AllocIDMgmtLock.Lock()
- err = t.resourceMgr.FreeResourceID(ctx, IntfID, ResourceType, ReleaseContent)
- t.AllocIDMgmtLock.Unlock()
- case t.resourceMgr.GetResourceTypeGemPortID():
- t.GemPortIDMgmtLock.Lock()
- err = t.resourceMgr.FreeResourceID(ctx, IntfID, ResourceType, ReleaseContent)
- t.GemPortIDMgmtLock.Unlock()
- case t.resourceMgr.GetResourceTypeOnuID():
- t.OnuIDMgmtLock.Lock()
- err = t.resourceMgr.FreeResourceID(ctx, IntfID, ResourceType, ReleaseContent)
- t.OnuIDMgmtLock.Unlock()
- default:
- return fmt.Errorf("ResourceType %s not supported", ResourceType)
- }
- if err != nil {
- return err
- }
- return nil
-}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/techprofile/tech_profile_if.go b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/techprofile/tech_profile_if.go
deleted file mode 100644
index 9aa3cbe..0000000
--- a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/techprofile/tech_profile_if.go
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Copyright 2019-present Open Networking Foundation
-
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
-
- * http://www.apache.org/licenses/LICENSE-2.0
-
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package techprofile
-
-import (
- "context"
-
- "github.com/opencord/voltha-lib-go/v4/pkg/db"
- tp_pb "github.com/opencord/voltha-protos/v4/go/tech_profile"
-)
-
-type TechProfileIf interface {
- SetKVClient(ctx context.Context, pathPrefix string) *db.Backend
- GetTechProfileInstanceKVPath(ctx context.Context, techProfiletblID uint32, uniPortName string) string
- GetTPInstanceFromKVStore(ctx context.Context, techProfiletblID uint32, path string) (interface{}, error)
- CreateTechProfInstance(ctx context.Context, techProfiletblID uint32, uniPortName string, intfId uint32) (interface{}, error)
- DeleteTechProfileInstance(ctx context.Context, techProfiletblID uint32, uniPortName string) error
- GetprotoBufParamValue(ctx context.Context, paramType string, paramKey string) int32
- GetUsScheduler(ctx context.Context, tpInstance *TechProfile) (*tp_pb.SchedulerConfig, error)
- GetDsScheduler(ctx context.Context, tpInstance *TechProfile) (*tp_pb.SchedulerConfig, error)
- GetTrafficScheduler(tpInstance *TechProfile, SchedCfg *tp_pb.SchedulerConfig,
- ShapingCfg *tp_pb.TrafficShapingInfo) *tp_pb.TrafficScheduler
- GetTrafficQueues(ctx context.Context, tp *TechProfile, Dir tp_pb.Direction) ([]*tp_pb.TrafficQueue, error)
- GetMulticastTrafficQueues(ctx context.Context, tp *TechProfile) []*tp_pb.TrafficQueue
- GetGemportForPbit(ctx context.Context, tp interface{}, Dir tp_pb.Direction, pbit uint32) interface{}
- FindAllTpInstances(ctx context.Context, oltDeviceID string, tpID uint32, ponIntf uint32, onuID uint32) interface{}
- GetResourceID(ctx context.Context, IntfID uint32, ResourceType string, NumIDs uint32) ([]uint32, error)
- FreeResourceID(ctx context.Context, IntfID uint32, ResourceType string, ReleaseContent []uint32) error
-}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/adapters/adapterif/adapter_proxy_if.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/adapterif/adapter_proxy_if.go
similarity index 80%
rename from vendor/github.com/opencord/voltha-lib-go/v4/pkg/adapters/adapterif/adapter_proxy_if.go
rename to vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/adapterif/adapter_proxy_if.go
index 30fcead..c514d6d 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/adapters/adapterif/adapter_proxy_if.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/adapterif/adapter_proxy_if.go
@@ -33,4 +33,13 @@
toDeviceID string,
proxyDeviceID string,
messageID string) error
+ TechProfileInstanceRequest(ctx context.Context,
+ tpPath string,
+ ponIntfID uint32,
+ onuID uint32,
+ uniID uint32,
+ fromAdapter string,
+ toAdapter string,
+ toDeviceID string,
+ proxyDeviceID string) (*ic.InterAdapterTechProfileDownloadMessage, error)
}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/adapters/adapterif/core_proxy_if.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/adapterif/core_proxy_if.go
similarity index 100%
rename from vendor/github.com/opencord/voltha-lib-go/v4/pkg/adapters/adapterif/core_proxy_if.go
rename to vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/adapterif/core_proxy_if.go
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/common/adapter_proxy.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/common/adapter_proxy.go
new file mode 100644
index 0000000..fc31041
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/common/adapter_proxy.go
@@ -0,0 +1,165 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package common
+
+import (
+ "context"
+ "github.com/opencord/voltha-lib-go/v5/pkg/db"
+ "google.golang.org/grpc/status"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/golang/protobuf/ptypes"
+ "github.com/golang/protobuf/ptypes/any"
+ "github.com/google/uuid"
+ "github.com/opencord/voltha-lib-go/v5/pkg/kafka"
+ "github.com/opencord/voltha-lib-go/v5/pkg/log"
+ ic "github.com/opencord/voltha-protos/v4/go/inter_container"
+)
+
+type AdapterProxy struct {
+ kafkaICProxy kafka.InterContainerProxy
+ coreTopic string
+ endpointMgr kafka.EndpointManager
+}
+
+func NewAdapterProxy(ctx context.Context, kafkaProxy kafka.InterContainerProxy, coreTopic string, backend *db.Backend) *AdapterProxy {
+ proxy := AdapterProxy{
+ kafkaICProxy: kafkaProxy,
+ coreTopic: coreTopic,
+ endpointMgr: kafka.NewEndpointManager(backend),
+ }
+ logger.Debugw(ctx, "topics", log.Fields{"core": proxy.coreTopic})
+ return &proxy
+}
+
+func (ap *AdapterProxy) SendInterAdapterMessage(ctx context.Context,
+ msg proto.Message,
+ msgType ic.InterAdapterMessageType_Types,
+ fromAdapter string,
+ toAdapter string,
+ toDeviceId string,
+ proxyDeviceId string,
+ messageId string) error {
+ logger.Debugw(ctx, "sending-inter-adapter-message", log.Fields{"type": msgType, "from": fromAdapter,
+ "to": toAdapter, "toDevice": toDeviceId, "proxyDevice": proxyDeviceId})
+
+ //Marshal the message
+ var marshalledMsg *any.Any
+ var err error
+ if marshalledMsg, err = ptypes.MarshalAny(msg); err != nil {
+ logger.Warnw(ctx, "cannot-marshal-msg", log.Fields{"error": err})
+ return err
+ }
+
+ // Set up the required rpc arguments
+ endpoint, err := ap.endpointMgr.GetEndpoint(ctx, toDeviceId, toAdapter)
+ if err != nil {
+ return err
+ }
+
+ //Build the inter adapter message
+ header := &ic.InterAdapterHeader{
+ Type: msgType,
+ FromTopic: fromAdapter,
+ ToTopic: string(endpoint),
+ ToDeviceId: toDeviceId,
+ ProxyDeviceId: proxyDeviceId,
+ }
+ if messageId != "" {
+ header.Id = messageId
+ } else {
+ header.Id = uuid.New().String()
+ }
+ header.Timestamp = ptypes.TimestampNow()
+ iaMsg := &ic.InterAdapterMessage{
+ Header: header,
+ Body: marshalledMsg,
+ }
+ args := make([]*kafka.KVArg, 1)
+ args[0] = &kafka.KVArg{
+ Key: "msg",
+ Value: iaMsg,
+ }
+
+ topic := kafka.Topic{Name: string(endpoint)}
+ replyToTopic := kafka.Topic{Name: fromAdapter}
+ rpc := "process_inter_adapter_message"
+
+ // Add a indication in context to differentiate this Inter Adapter message during Span processing in Kafka IC proxy
+ ctx = context.WithValue(ctx, "inter-adapter-msg-type", msgType)
+ success, result := ap.kafkaICProxy.InvokeRPC(ctx, rpc, &topic, &replyToTopic, true, proxyDeviceId, args...)
+ logger.Debugw(ctx, "inter-adapter-msg-response", log.Fields{"replyTopic": replyToTopic, "success": success})
+ return unPackResponse(ctx, rpc, "", success, result)
+}
+
+func (ap *AdapterProxy) TechProfileInstanceRequest(ctx context.Context,
+ tpPath string,
+ parentPonPort uint32,
+ onuID uint32,
+ uniID uint32,
+ fromAdapter string,
+ toAdapter string,
+ toDeviceId string,
+ proxyDeviceId string) (*ic.InterAdapterTechProfileDownloadMessage, error) {
+ logger.Debugw(ctx, "sending-tech-profile-instance-request-message", log.Fields{"from": fromAdapter,
+ "to": toAdapter, "toDevice": toDeviceId, "proxyDevice": proxyDeviceId})
+
+ // Set up the required rpc arguments
+ endpoint, err := ap.endpointMgr.GetEndpoint(ctx, toDeviceId, toAdapter)
+ if err != nil {
+ return nil, err
+ }
+
+ //Build the inter adapter message
+ tpReqMsg := &ic.InterAdapterTechProfileInstanceRequestMessage{
+ TpInstancePath: tpPath,
+ ParentDeviceId: toDeviceId,
+ ParentPonPort: parentPonPort,
+ OnuId: onuID,
+ UniId: uniID,
+ }
+
+ args := make([]*kafka.KVArg, 1)
+ args[0] = &kafka.KVArg{
+ Key: "msg",
+ Value: tpReqMsg,
+ }
+
+ topic := kafka.Topic{Name: string(endpoint)}
+ replyToTopic := kafka.Topic{Name: fromAdapter}
+ rpc := "process_tech_profile_instance_request"
+
+ ctx = context.WithValue(ctx, "inter-adapter-tp-req-msg", tpPath)
+ success, result := ap.kafkaICProxy.InvokeRPC(ctx, rpc, &topic, &replyToTopic, true, proxyDeviceId, args...)
+ logger.Debugw(ctx, "inter-adapter-msg-response", log.Fields{"replyTopic": replyToTopic, "success": success})
+ if success {
+ tpDwnldMsg := &ic.InterAdapterTechProfileDownloadMessage{}
+ if err := ptypes.UnmarshalAny(result, tpDwnldMsg); err != nil {
+ logger.Warnw(ctx, "cannot-unmarshal-response", log.Fields{"error": err})
+ return nil, err
+ }
+ return tpDwnldMsg, nil
+ } else {
+ unpackResult := &ic.Error{}
+ var err error
+ if err = ptypes.UnmarshalAny(result, unpackResult); err != nil {
+ logger.Warnw(ctx, "cannot-unmarshal-response", log.Fields{"error": err})
+ }
+ logger.Debugw(ctx, "TechProfileInstanceRequest-return", log.Fields{"tpPath": tpPath, "success": success, "error": err})
+
+ return nil, status.Error(ICProxyErrorCodeToGrpcErrorCode(ctx, unpackResult.Code), unpackResult.Reason)
+ }
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/adapters/common/common.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/common/common.go
similarity index 94%
rename from vendor/github.com/opencord/voltha-lib-go/v4/pkg/adapters/common/common.go
rename to vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/common/common.go
index 5d7d7f8..98085bb 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/adapters/common/common.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/common/common.go
@@ -16,7 +16,7 @@
package common
import (
- "github.com/opencord/voltha-lib-go/v4/pkg/log"
+ "github.com/opencord/voltha-lib-go/v5/pkg/log"
)
var logger log.CLogger
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/adapters/common/core_proxy.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/common/core_proxy.go
similarity index 99%
rename from vendor/github.com/opencord/voltha-lib-go/v4/pkg/adapters/common/core_proxy.go
rename to vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/common/core_proxy.go
index 1077226..589d951 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/adapters/common/core_proxy.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/common/core_proxy.go
@@ -21,8 +21,8 @@
"github.com/golang/protobuf/ptypes"
a "github.com/golang/protobuf/ptypes/any"
- "github.com/opencord/voltha-lib-go/v4/pkg/kafka"
- "github.com/opencord/voltha-lib-go/v4/pkg/log"
+ "github.com/opencord/voltha-lib-go/v5/pkg/kafka"
+ "github.com/opencord/voltha-lib-go/v5/pkg/log"
ic "github.com/opencord/voltha-protos/v4/go/inter_container"
"github.com/opencord/voltha-protos/v4/go/voltha"
"google.golang.org/grpc/codes"
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/adapters/common/performance_metrics.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/common/performance_metrics.go
similarity index 100%
rename from vendor/github.com/opencord/voltha-lib-go/v4/pkg/adapters/common/performance_metrics.go
rename to vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/common/performance_metrics.go
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/adapters/common/request_handler.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/common/request_handler.go
similarity index 95%
rename from vendor/github.com/opencord/voltha-lib-go/v4/pkg/adapters/common/request_handler.go
rename to vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/common/request_handler.go
index b6cf1c0..90f575b 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/adapters/common/request_handler.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/common/request_handler.go
@@ -21,10 +21,10 @@
"github.com/golang/protobuf/ptypes"
"github.com/golang/protobuf/ptypes/empty"
- "github.com/opencord/voltha-lib-go/v4/pkg/adapters"
- "github.com/opencord/voltha-lib-go/v4/pkg/adapters/adapterif"
- "github.com/opencord/voltha-lib-go/v4/pkg/kafka"
- "github.com/opencord/voltha-lib-go/v4/pkg/log"
+ "github.com/opencord/voltha-lib-go/v5/pkg/adapters"
+ "github.com/opencord/voltha-lib-go/v5/pkg/adapters/adapterif"
+ "github.com/opencord/voltha-lib-go/v5/pkg/kafka"
+ "github.com/opencord/voltha-lib-go/v5/pkg/log"
"github.com/opencord/voltha-protos/v4/go/extension"
ic "github.com/opencord/voltha-protos/v4/go/inter_container"
"github.com/opencord/voltha-protos/v4/go/openflow_13"
@@ -559,6 +559,37 @@
return new(empty.Empty), nil
}
+func (rhp *RequestHandlerProxy) Process_tech_profile_instance_request(ctx context.Context, args []*ic.Argument) (*ic.InterAdapterTechProfileDownloadMessage, error) {
+ if len(args) < 2 {
+ logger.Warn(ctx, "invalid-number-of-args", log.Fields{"args": args})
+ err := errors.New("invalid-number-of-args")
+ return nil, err
+ }
+ iaTpReqMsg := &ic.InterAdapterTechProfileInstanceRequestMessage{}
+ transactionID := &ic.StrType{}
+ for _, arg := range args {
+ switch arg.Key {
+ case "msg":
+ if err := ptypes.UnmarshalAny(arg.Value, iaTpReqMsg); err != nil {
+ logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
+ return nil, err
+ }
+ case kafka.TransactionKey:
+ if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
+ logger.Warnw(ctx, "cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+ return nil, err
+ }
+ }
+ }
+
+ logger.Debugw(ctx, "Process_tech_profile_instance_request", log.Fields{"tpPath": iaTpReqMsg.TpInstancePath})
+
+ //Invoke the tech profile instance request
+ tpInst := rhp.adapter.Process_tech_profile_instance_request(ctx, iaTpReqMsg)
+
+ return tpInst, nil
+}
+
func (rhp *RequestHandlerProxy) Download_image(ctx context.Context, args []*ic.Argument) (*voltha.ImageDownload, error) {
device, image, err := unMarshalImageDowload(args, ctx)
if err != nil {
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/adapters/common/utils.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/common/utils.go
similarity index 97%
rename from vendor/github.com/opencord/voltha-lib-go/v4/pkg/adapters/common/utils.go
rename to vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/common/utils.go
index 65b432c..35f227e 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/adapters/common/utils.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/common/utils.go
@@ -18,7 +18,7 @@
import (
"context"
"fmt"
- "github.com/opencord/voltha-lib-go/v4/pkg/log"
+ "github.com/opencord/voltha-lib-go/v5/pkg/log"
ic "github.com/opencord/voltha-protos/v4/go/inter_container"
"google.golang.org/grpc/codes"
"math/rand"
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/adapters/iAdapter.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/iAdapter.go
similarity index 96%
rename from vendor/github.com/opencord/voltha-lib-go/v4/pkg/adapters/iAdapter.go
rename to vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/iAdapter.go
index fbf2b5d..aca4271 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/adapters/iAdapter.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/iAdapter.go
@@ -46,6 +46,7 @@
Unsuppress_event(ctx context.Context, filter *voltha.EventFilter) error
Get_ofp_device_info(ctx context.Context, device *voltha.Device) (*ic.SwitchCapability, error)
Process_inter_adapter_message(ctx context.Context, msg *ic.InterAdapterMessage) error
+ Process_tech_profile_instance_request(ctx context.Context, msg *ic.InterAdapterTechProfileInstanceRequestMessage) *ic.InterAdapterTechProfileDownloadMessage
Download_image(ctx context.Context, device *voltha.Device, request *voltha.ImageDownload) (*voltha.ImageDownload, error)
Get_image_download_status(ctx context.Context, device *voltha.Device, request *voltha.ImageDownload) (*voltha.ImageDownload, error)
Cancel_image_download(ctx context.Context, device *voltha.Device, request *voltha.ImageDownload) (*voltha.ImageDownload, error)
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/config/common.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/config/common.go
similarity index 94%
rename from vendor/github.com/opencord/voltha-lib-go/v4/pkg/config/common.go
rename to vendor/github.com/opencord/voltha-lib-go/v5/pkg/config/common.go
index 294a4bd..606d18c 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/config/common.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/config/common.go
@@ -16,7 +16,7 @@
package config
import (
- "github.com/opencord/voltha-lib-go/v4/pkg/log"
+ "github.com/opencord/voltha-lib-go/v5/pkg/log"
)
var logger log.CLogger
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/config/configmanager.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/config/configmanager.go
similarity index 98%
rename from vendor/github.com/opencord/voltha-lib-go/v4/pkg/config/configmanager.go
rename to vendor/github.com/opencord/voltha-lib-go/v5/pkg/config/configmanager.go
index 8350225..f5efa36 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/config/configmanager.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/config/configmanager.go
@@ -22,9 +22,9 @@
"strings"
"time"
- "github.com/opencord/voltha-lib-go/v4/pkg/db"
- "github.com/opencord/voltha-lib-go/v4/pkg/db/kvstore"
- "github.com/opencord/voltha-lib-go/v4/pkg/log"
+ "github.com/opencord/voltha-lib-go/v5/pkg/db"
+ "github.com/opencord/voltha-lib-go/v5/pkg/db/kvstore"
+ "github.com/opencord/voltha-lib-go/v5/pkg/log"
)
const (
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/config/logcontroller.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/config/logcontroller.go
similarity index 99%
rename from vendor/github.com/opencord/voltha-lib-go/v4/pkg/config/logcontroller.go
rename to vendor/github.com/opencord/voltha-lib-go/v5/pkg/config/logcontroller.go
index 8187edc..68bfb32 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/config/logcontroller.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/config/logcontroller.go
@@ -26,7 +26,7 @@
"crypto/md5"
"encoding/json"
"errors"
- "github.com/opencord/voltha-lib-go/v4/pkg/log"
+ "github.com/opencord/voltha-lib-go/v5/pkg/log"
"os"
"sort"
"strings"
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/config/logfeaturescontroller.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/config/logfeaturescontroller.go
similarity index 99%
rename from vendor/github.com/opencord/voltha-lib-go/v4/pkg/config/logfeaturescontroller.go
rename to vendor/github.com/opencord/voltha-lib-go/v5/pkg/config/logfeaturescontroller.go
index 353ae5c..95c5bde 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/config/logfeaturescontroller.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/config/logfeaturescontroller.go
@@ -19,7 +19,7 @@
import (
"context"
"errors"
- "github.com/opencord/voltha-lib-go/v4/pkg/log"
+ "github.com/opencord/voltha-lib-go/v5/pkg/log"
"os"
"strings"
)
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/db/backend.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/db/backend.go
similarity index 98%
rename from vendor/github.com/opencord/voltha-lib-go/v4/pkg/db/backend.go
rename to vendor/github.com/opencord/voltha-lib-go/v5/pkg/db/backend.go
index bf30a48..ff0b5b7 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/db/backend.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/db/backend.go
@@ -23,8 +23,8 @@
"sync"
"time"
- "github.com/opencord/voltha-lib-go/v4/pkg/db/kvstore"
- "github.com/opencord/voltha-lib-go/v4/pkg/log"
+ "github.com/opencord/voltha-lib-go/v5/pkg/db/kvstore"
+ "github.com/opencord/voltha-lib-go/v5/pkg/log"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/db/common.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/db/common.go
similarity index 94%
rename from vendor/github.com/opencord/voltha-lib-go/v4/pkg/db/common.go
rename to vendor/github.com/opencord/voltha-lib-go/v5/pkg/db/common.go
index 25cddf5..4bc92b1 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/db/common.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/db/common.go
@@ -16,7 +16,7 @@
package db
import (
- "github.com/opencord/voltha-lib-go/v4/pkg/log"
+ "github.com/opencord/voltha-lib-go/v5/pkg/log"
)
var logger log.CLogger
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/db/kvstore/client.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/db/kvstore/client.go
similarity index 95%
rename from vendor/github.com/opencord/voltha-lib-go/v4/pkg/db/kvstore/client.go
rename to vendor/github.com/opencord/voltha-lib-go/v5/pkg/db/kvstore/client.go
index b35f1f3..e4b1fff 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/db/kvstore/client.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/db/kvstore/client.go
@@ -79,14 +79,17 @@
Put(ctx context.Context, key string, value interface{}) error
Delete(ctx context.Context, key string) error
DeleteWithPrefix(ctx context.Context, prefixKey string) error
+ Watch(ctx context.Context, key string, withPrefix bool) chan *Event
+ IsConnectionUp(ctx context.Context) bool // timeout in second
+ CloseWatch(ctx context.Context, key string, ch chan *Event)
+ Close(ctx context.Context)
+
+ // These APIs are not used. They will be cleaned up in release Voltha 2.9.
+ // It's not cleaned now to limit changes in all components
Reserve(ctx context.Context, key string, value interface{}, ttl time.Duration) (interface{}, error)
ReleaseReservation(ctx context.Context, key string) error
ReleaseAllReservations(ctx context.Context) error
RenewReservation(ctx context.Context, key string) error
- Watch(ctx context.Context, key string, withPrefix bool) chan *Event
AcquireLock(ctx context.Context, lockName string, timeout time.Duration) error
ReleaseLock(lockName string) error
- IsConnectionUp(ctx context.Context) bool // timeout in second
- CloseWatch(ctx context.Context, key string, ch chan *Event)
- Close(ctx context.Context)
}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/db/kvstore/common.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/db/kvstore/common.go
similarity index 94%
rename from vendor/github.com/opencord/voltha-lib-go/v4/pkg/db/kvstore/common.go
rename to vendor/github.com/opencord/voltha-lib-go/v5/pkg/db/kvstore/common.go
index 99c603d..b8509db 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/db/kvstore/common.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/db/kvstore/common.go
@@ -16,7 +16,7 @@
package kvstore
import (
- "github.com/opencord/voltha-lib-go/v4/pkg/log"
+ "github.com/opencord/voltha-lib-go/v5/pkg/log"
)
var logger log.CLogger
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/db/kvstore/etcdclient.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/db/kvstore/etcdclient.go
new file mode 100644
index 0000000..96ffc2f
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/db/kvstore/etcdclient.go
@@ -0,0 +1,473 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package kvstore
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "github.com/opencord/voltha-lib-go/v5/pkg/log"
+ v3Client "go.etcd.io/etcd/clientv3"
+ v3rpcTypes "go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes"
+ "os"
+ "strconv"
+ "sync"
+ "time"
+)
+
+const (
+ poolCapacityEnvName = "VOLTHA_ETCD_CLIENT_POOL_CAPACITY"
+ maxUsageEnvName = "VOLTHA_ETCD_CLIENT_MAX_USAGE"
+)
+
+const (
+ defaultMaxPoolCapacity = 1000 // Default size of an Etcd Client pool
+ defaultMaxPoolUsage = 100 // Maximum concurrent request an Etcd Client is allowed to process
+)
+
+// EtcdClient represents the Etcd KV store client
+type EtcdClient struct {
+ pool EtcdClientAllocator
+ watchedChannels sync.Map
+ watchedClients map[string]*v3Client.Client
+ watchedClientsLock sync.RWMutex
+}
+
+// NewEtcdCustomClient returns a new client for the Etcd KV store allowing
+// the called to specify etcd client configuration
+func NewEtcdCustomClient(ctx context.Context, addr string, timeout time.Duration, level log.LogLevel) (*EtcdClient, error) {
+ // Get the capacity and max usage from the environment
+ capacity := defaultMaxPoolCapacity
+ maxUsage := defaultMaxPoolUsage
+ if capacityStr, present := os.LookupEnv(poolCapacityEnvName); present {
+ if val, err := strconv.Atoi(capacityStr); err == nil {
+ capacity = val
+ logger.Infow(ctx, "env-variable-set", log.Fields{"pool-capacity": capacity})
+ } else {
+ logger.Warnw(ctx, "invalid-capacity-value", log.Fields{"error": err, "capacity": capacityStr})
+ }
+ }
+ if maxUsageStr, present := os.LookupEnv(maxUsageEnvName); present {
+ if val, err := strconv.Atoi(maxUsageStr); err == nil {
+ maxUsage = val
+ logger.Infow(ctx, "env-variable-set", log.Fields{"max-usage": maxUsage})
+ } else {
+ logger.Warnw(ctx, "invalid-max-usage-value", log.Fields{"error": err, "max-usage": maxUsageStr})
+ }
+ }
+
+ var err error
+
+ pool, err := NewRoundRobinEtcdClientAllocator([]string{addr}, timeout, capacity, maxUsage, level)
+ if err != nil {
+ logger.Errorw(ctx, "failed-to-create-rr-client", log.Fields{
+ "error": err,
+ })
+ }
+
+ logger.Infow(ctx, "etcd-pool-created", log.Fields{"capacity": capacity, "max-usage": maxUsage})
+
+ return &EtcdClient{pool: pool,
+ watchedClients: make(map[string]*v3Client.Client),
+ }, nil
+}
+
+// NewEtcdClient returns a new client for the Etcd KV store
+func NewEtcdClient(ctx context.Context, addr string, timeout time.Duration, level log.LogLevel) (*EtcdClient, error) {
+ return NewEtcdCustomClient(ctx, addr, timeout, level)
+}
+
+// IsConnectionUp returns whether the connection to the Etcd KV store is up. If a timeout occurs then
+// it is assumed the connection is down or unreachable.
+func (c *EtcdClient) IsConnectionUp(ctx context.Context) bool {
+ // Let's try to get a non existent key. If the connection is up then there will be no error returned.
+ if _, err := c.Get(ctx, "non-existent-key"); err != nil {
+ return false
+ }
+ return true
+}
+
+// List returns an array of key-value pairs with key as a prefix. Timeout defines how long the function will
+// wait for a response
+func (c *EtcdClient) List(ctx context.Context, key string) (map[string]*KVPair, error) {
+ client, err := c.pool.Get(ctx)
+ if err != nil {
+ return nil, err
+ }
+ defer c.pool.Put(client)
+ resp, err := client.Get(ctx, key, v3Client.WithPrefix())
+
+ if err != nil {
+ logger.Error(ctx, err)
+ return nil, err
+ }
+ m := make(map[string]*KVPair)
+ for _, ev := range resp.Kvs {
+ m[string(ev.Key)] = NewKVPair(string(ev.Key), ev.Value, "", ev.Lease, ev.Version)
+ }
+ return m, nil
+}
+
+// Get returns a key-value pair for a given key. Timeout defines how long the function will
+// wait for a response
+func (c *EtcdClient) Get(ctx context.Context, key string) (*KVPair, error) {
+ client, err := c.pool.Get(ctx)
+ if err != nil {
+ return nil, err
+ }
+ defer c.pool.Put(client)
+
+ attempt := 0
+
+startLoop:
+ for {
+ resp, err := client.Get(ctx, key)
+ if err != nil {
+ switch err {
+ case context.Canceled:
+ logger.Warnw(ctx, "context-cancelled", log.Fields{"error": err})
+ case context.DeadlineExceeded:
+ logger.Warnw(ctx, "context-deadline-exceeded", log.Fields{"error": err, "context": ctx})
+ case v3rpcTypes.ErrEmptyKey:
+ logger.Warnw(ctx, "etcd-client-error", log.Fields{"error": err})
+ case v3rpcTypes.ErrLeaderChanged,
+ v3rpcTypes.ErrGRPCNoLeader,
+ v3rpcTypes.ErrTimeout,
+ v3rpcTypes.ErrTimeoutDueToLeaderFail,
+ v3rpcTypes.ErrTimeoutDueToConnectionLost:
+ // Retry for these server errors
+ attempt += 1
+ if er := backoff(ctx, attempt); er != nil {
+ logger.Warnw(ctx, "get-retries-failed", log.Fields{"key": key, "error": er, "attempt": attempt})
+ return nil, err
+ }
+ logger.Warnw(ctx, "retrying-get", log.Fields{"key": key, "error": err, "attempt": attempt})
+ goto startLoop
+ default:
+ logger.Warnw(ctx, "etcd-server-error", log.Fields{"error": err})
+ }
+ return nil, err
+ }
+
+ for _, ev := range resp.Kvs {
+ // Only one value is returned
+ return NewKVPair(string(ev.Key), ev.Value, "", ev.Lease, ev.Version), nil
+ }
+ return nil, nil
+ }
+}
+
+// Put writes a key-value pair to the KV store. Value can only be a string or []byte since the etcd API
+// accepts only a string as a value for a put operation. Timeout defines how long the function will
+// wait for a response
+func (c *EtcdClient) Put(ctx context.Context, key string, value interface{}) error {
+
+ // Validate that we can convert value to a string as etcd API expects a string
+ var val string
+ var err error
+ if val, err = ToString(value); err != nil {
+ return fmt.Errorf("unexpected-type-%T", value)
+ }
+
+ client, err := c.pool.Get(ctx)
+ if err != nil {
+ return err
+ }
+ defer c.pool.Put(client)
+
+ attempt := 0
+startLoop:
+ for {
+ _, err = client.Put(ctx, key, val)
+ if err != nil {
+ switch err {
+ case context.Canceled:
+ logger.Warnw(ctx, "context-cancelled", log.Fields{"error": err})
+ case context.DeadlineExceeded:
+ logger.Warnw(ctx, "context-deadline-exceeded", log.Fields{"error": err, "context": ctx})
+ case v3rpcTypes.ErrEmptyKey:
+ logger.Warnw(ctx, "etcd-client-error", log.Fields{"error": err})
+ case v3rpcTypes.ErrLeaderChanged,
+ v3rpcTypes.ErrGRPCNoLeader,
+ v3rpcTypes.ErrTimeout,
+ v3rpcTypes.ErrTimeoutDueToLeaderFail,
+ v3rpcTypes.ErrTimeoutDueToConnectionLost:
+ // Retry for these server errors
+ attempt += 1
+ if er := backoff(ctx, attempt); er != nil {
+ logger.Warnw(ctx, "put-retries-failed", log.Fields{"key": key, "error": er, "attempt": attempt})
+ return err
+ }
+ logger.Warnw(ctx, "retrying-put", log.Fields{"key": key, "error": err, "attempt": attempt})
+ goto startLoop
+ default:
+ logger.Warnw(ctx, "etcd-server-error", log.Fields{"error": err})
+ }
+ return err
+ }
+ return nil
+ }
+}
+
+// Delete removes a key from the KV store. Timeout defines how long the function will
+// wait for a response
+func (c *EtcdClient) Delete(ctx context.Context, key string) error {
+ client, err := c.pool.Get(ctx)
+ if err != nil {
+ return err
+ }
+ defer c.pool.Put(client)
+
+ attempt := 0
+startLoop:
+ for {
+ _, err = client.Delete(ctx, key)
+ if err != nil {
+ switch err {
+ case context.Canceled:
+ logger.Warnw(ctx, "context-cancelled", log.Fields{"error": err})
+ case context.DeadlineExceeded:
+ logger.Warnw(ctx, "context-deadline-exceeded", log.Fields{"error": err, "context": ctx})
+ case v3rpcTypes.ErrEmptyKey:
+ logger.Warnw(ctx, "etcd-client-error", log.Fields{"error": err})
+ case v3rpcTypes.ErrLeaderChanged,
+ v3rpcTypes.ErrGRPCNoLeader,
+ v3rpcTypes.ErrTimeout,
+ v3rpcTypes.ErrTimeoutDueToLeaderFail,
+ v3rpcTypes.ErrTimeoutDueToConnectionLost:
+ // Retry for these server errors
+ attempt += 1
+ if er := backoff(ctx, attempt); er != nil {
+ logger.Warnw(ctx, "delete-retries-failed", log.Fields{"key": key, "error": er, "attempt": attempt})
+ return err
+ }
+ logger.Warnw(ctx, "retrying-delete", log.Fields{"key": key, "error": err, "attempt": attempt})
+ goto startLoop
+ default:
+ logger.Warnw(ctx, "etcd-server-error", log.Fields{"error": err})
+ }
+ return err
+ }
+ logger.Debugw(ctx, "key(s)-deleted", log.Fields{"key": key})
+ return nil
+ }
+}
+
+func (c *EtcdClient) DeleteWithPrefix(ctx context.Context, prefixKey string) error {
+
+ client, err := c.pool.Get(ctx)
+ if err != nil {
+ return err
+ }
+ defer c.pool.Put(client)
+
+ //delete the prefix
+ if _, err := client.Delete(ctx, prefixKey, v3Client.WithPrefix()); err != nil {
+ logger.Errorw(ctx, "failed-to-delete-prefix-key", log.Fields{"key": prefixKey, "error": err})
+ return err
+ }
+ logger.Debugw(ctx, "key(s)-deleted", log.Fields{"key": prefixKey})
+ return nil
+}
+
+// Watch provides the watch capability on a given key. It returns a channel onto which the callee needs to
+// listen to receive Events.
+func (c *EtcdClient) Watch(ctx context.Context, key string, withPrefix bool) chan *Event {
+ var err error
+ // Reuse the Etcd client when multiple callees are watching the same key.
+ c.watchedClientsLock.Lock()
+ client, exist := c.watchedClients[key]
+ if !exist {
+ client, err = c.pool.Get(ctx)
+ if err != nil {
+ logger.Errorw(ctx, "failed-to-an-etcd-client", log.Fields{"key": key, "error": err})
+ c.watchedClientsLock.Unlock()
+ return nil
+ }
+ c.watchedClients[key] = client
+ }
+ c.watchedClientsLock.Unlock()
+
+ w := v3Client.NewWatcher(client)
+ ctx, cancel := context.WithCancel(ctx)
+ var channel v3Client.WatchChan
+ if withPrefix {
+ channel = w.Watch(ctx, key, v3Client.WithPrefix())
+ } else {
+ channel = w.Watch(ctx, key)
+ }
+
+ // Create a new channel
+ ch := make(chan *Event, maxClientChannelBufferSize)
+
+ // Keep track of the created channels so they can be closed when required
+ channelMap := make(map[chan *Event]v3Client.Watcher)
+ channelMap[ch] = w
+ channelMaps := c.addChannelMap(key, channelMap)
+
+ // Changing the log field (from channelMaps) as the underlying logger cannot format the map of channels into a
+ // json format.
+ logger.Debugw(ctx, "watched-channels", log.Fields{"len": len(channelMaps)})
+ // Launch a go routine to listen for updates
+ go c.listenForKeyChange(ctx, channel, ch, cancel)
+
+ return ch
+
+}
+
+func (c *EtcdClient) addChannelMap(key string, channelMap map[chan *Event]v3Client.Watcher) []map[chan *Event]v3Client.Watcher {
+ var channels interface{}
+ var exists bool
+
+ if channels, exists = c.watchedChannels.Load(key); exists {
+ channels = append(channels.([]map[chan *Event]v3Client.Watcher), channelMap)
+ } else {
+ channels = []map[chan *Event]v3Client.Watcher{channelMap}
+ }
+ c.watchedChannels.Store(key, channels)
+
+ return channels.([]map[chan *Event]v3Client.Watcher)
+}
+
+func (c *EtcdClient) removeChannelMap(key string, pos int) []map[chan *Event]v3Client.Watcher {
+ var channels interface{}
+ var exists bool
+
+ if channels, exists = c.watchedChannels.Load(key); exists {
+ channels = append(channels.([]map[chan *Event]v3Client.Watcher)[:pos], channels.([]map[chan *Event]v3Client.Watcher)[pos+1:]...)
+ c.watchedChannels.Store(key, channels)
+ }
+
+ return channels.([]map[chan *Event]v3Client.Watcher)
+}
+
+func (c *EtcdClient) getChannelMaps(key string) ([]map[chan *Event]v3Client.Watcher, bool) {
+ var channels interface{}
+ var exists bool
+
+ channels, exists = c.watchedChannels.Load(key)
+
+ if channels == nil {
+ return nil, exists
+ }
+
+ return channels.([]map[chan *Event]v3Client.Watcher), exists
+}
+
+// CloseWatch closes a specific watch. Both the key and the channel are required when closing a watch as there
+// may be multiple listeners on the same key. The previously created channel serves as a key
+func (c *EtcdClient) CloseWatch(ctx context.Context, key string, ch chan *Event) {
+ // Get the array of channels mapping
+ var watchedChannels []map[chan *Event]v3Client.Watcher
+ var ok bool
+
+ if watchedChannels, ok = c.getChannelMaps(key); !ok {
+ logger.Warnw(ctx, "key-has-no-watched-channels", log.Fields{"key": key})
+ return
+ }
+ // Look for the channels
+ var pos = -1
+ for i, chMap := range watchedChannels {
+ if t, ok := chMap[ch]; ok {
+ logger.Debug(ctx, "channel-found")
+ // Close the etcd watcher before the client channel. This should close the etcd channel as well
+ if err := t.Close(); err != nil {
+ logger.Errorw(ctx, "watcher-cannot-be-closed", log.Fields{"key": key, "error": err})
+ }
+ pos = i
+ break
+ }
+ }
+
+ channelMaps, _ := c.getChannelMaps(key)
+ // Remove that entry if present
+ if pos >= 0 {
+ channelMaps = c.removeChannelMap(key, pos)
+ }
+
+ // If we don't have any keys being watched then return the Etcd client to the pool
+ if len(channelMaps) == 0 {
+ c.watchedClientsLock.Lock()
+ // Sanity
+ if client, ok := c.watchedClients[key]; ok {
+ c.pool.Put(client)
+ delete(c.watchedClients, key)
+ }
+ c.watchedClientsLock.Unlock()
+ }
+ logger.Infow(ctx, "watcher-channel-exiting", log.Fields{"key": key, "channel": channelMaps})
+}
+
+func (c *EtcdClient) listenForKeyChange(ctx context.Context, channel v3Client.WatchChan, ch chan<- *Event, cancel context.CancelFunc) {
+ logger.Debug(ctx, "start-listening-on-channel ...")
+ defer cancel()
+ defer close(ch)
+ for resp := range channel {
+ for _, ev := range resp.Events {
+ ch <- NewEvent(getEventType(ev), ev.Kv.Key, ev.Kv.Value, ev.Kv.Version)
+ }
+ }
+ logger.Debug(ctx, "stop-listening-on-channel ...")
+}
+
+func getEventType(event *v3Client.Event) int {
+ switch event.Type {
+ case v3Client.EventTypePut:
+ return PUT
+ case v3Client.EventTypeDelete:
+ return DELETE
+ }
+ return UNKNOWN
+}
+
+// Close closes all the connection in the pool store client
+func (c *EtcdClient) Close(ctx context.Context) {
+ logger.Debug(ctx, "closing-etcd-pool")
+ c.pool.Close(ctx)
+}
+
+// The APIs below are not used
+var errUnimplemented = errors.New("deprecated")
+
+// Reserve is deprecated
+func (c *EtcdClient) Reserve(ctx context.Context, key string, value interface{}, ttl time.Duration) (interface{}, error) {
+ return nil, errUnimplemented
+}
+
+// ReleaseAllReservations is deprecated
+func (c *EtcdClient) ReleaseAllReservations(ctx context.Context) error {
+ return errUnimplemented
+}
+
+// ReleaseReservation is deprecated
+func (c *EtcdClient) ReleaseReservation(ctx context.Context, key string) error {
+ return errUnimplemented
+}
+
+// RenewReservation is deprecated
+func (c *EtcdClient) RenewReservation(ctx context.Context, key string) error {
+ return errUnimplemented
+}
+
+// AcquireLock is deprecated
+func (c *EtcdClient) AcquireLock(ctx context.Context, lockName string, timeout time.Duration) error {
+ return errUnimplemented
+}
+
+// ReleaseLock is deprecated
+func (c *EtcdClient) ReleaseLock(lockName string) error {
+ return errUnimplemented
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/db/kvstore/etcdpool.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/db/kvstore/etcdpool.go
new file mode 100644
index 0000000..6af7d3d
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/db/kvstore/etcdpool.go
@@ -0,0 +1,239 @@
+/*
+ * Copyright 2021-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package kvstore
+
+import (
+ "container/list"
+ "context"
+ "errors"
+ "github.com/opencord/voltha-lib-go/v5/pkg/log"
+ "go.etcd.io/etcd/clientv3"
+ "sync"
+ "time"
+)
+
+// EtcdClientAllocator represents a generic interface to allocate an Etcd Client
+type EtcdClientAllocator interface {
+ Get(context.Context) (*clientv3.Client, error)
+ Put(*clientv3.Client)
+ Close(ctx context.Context)
+}
+
+// NewRoundRobinEtcdClientAllocator creates a new ETCD Client Allocator using a Round Robin scheme
+func NewRoundRobinEtcdClientAllocator(endpoints []string, timeout time.Duration, capacity, maxUsage int, level log.LogLevel) (EtcdClientAllocator, error) {
+ return &roundRobin{
+ all: make(map[*clientv3.Client]*rrEntry),
+ full: make(map[*clientv3.Client]*rrEntry),
+ waitList: list.New(),
+ max: maxUsage,
+ capacity: capacity,
+ timeout: timeout,
+ endpoints: endpoints,
+ logLevel: level,
+ closingCh: make(chan struct{}, capacity*maxUsage),
+ stopCh: make(chan struct{}),
+ }, nil
+}
+
+type rrEntry struct {
+ client *clientv3.Client
+ count int
+ age time.Time
+}
+
+type roundRobin struct {
+ //block chan struct{}
+ sync.Mutex
+ available []*rrEntry
+ all map[*clientv3.Client]*rrEntry
+ full map[*clientv3.Client]*rrEntry
+ waitList *list.List
+ max int
+ capacity int
+ timeout time.Duration
+ //ageOut time.Duration
+ endpoints []string
+ size int
+ logLevel log.LogLevel
+ closing bool
+ closingCh chan struct{}
+ stopCh chan struct{}
+}
+
+// Get returns an Etcd client. If not is available, it will create one
+// until the maximum allowed capacity. If maximum capacity has been
+// reached then it will wait until s used one is freed.
+func (r *roundRobin) Get(ctx context.Context) (*clientv3.Client, error) {
+ r.Lock()
+
+ if r.closing {
+ r.Unlock()
+ return nil, errors.New("pool-is-closing")
+ }
+
+ // first determine if we need to block, which would mean the
+ // available queue is empty and we are at capacity
+ if len(r.available) == 0 && r.size >= r.capacity {
+
+ // create a channel on which to wait and
+ // add it to the list
+ ch := make(chan struct{})
+ element := r.waitList.PushBack(ch)
+ r.Unlock()
+
+ // block until it is our turn or context
+ // expires or is canceled
+ select {
+ case <-r.stopCh:
+ logger.Info(ctx, "stop-waiting-pool-is-closing")
+ r.waitList.Remove(element)
+ return nil, errors.New("stop-waiting-pool-is-closing")
+ case <-ch:
+ r.waitList.Remove(element)
+ case <-ctx.Done():
+ r.waitList.Remove(element)
+ return nil, ctx.Err()
+ }
+ r.Lock()
+ }
+
+ defer r.Unlock()
+ if len(r.available) > 0 {
+ // pull off back end as it is operationally quicker
+ last := len(r.available) - 1
+ entry := r.available[last]
+ entry.count++
+ if entry.count >= r.max {
+ r.available = r.available[:last]
+ r.full[entry.client] = entry
+ }
+ entry.age = time.Now()
+ return entry.client, nil
+ }
+
+ logConfig := log.ConstructZapConfig(log.JSON, r.logLevel, log.Fields{})
+ // increase capacity
+ client, err := clientv3.New(clientv3.Config{
+ Endpoints: r.endpoints,
+ DialTimeout: r.timeout,
+ LogConfig: &logConfig,
+ })
+ if err != nil {
+ return nil, err
+ }
+ entry := &rrEntry{
+ client: client,
+ count: 1,
+ }
+ r.all[entry.client] = entry
+
+ if r.max > 1 {
+ r.available = append(r.available, entry)
+ } else {
+ r.full[entry.client] = entry
+ }
+ r.size++
+ return client, nil
+}
+
+// Put returns the Etcd Client back to the pool
+func (r *roundRobin) Put(client *clientv3.Client) {
+ r.Lock()
+
+ entry := r.all[client]
+ entry.count--
+
+ if r.closing {
+ // Close client if count is 0
+ if entry.count == 0 {
+ if err := entry.client.Close(); err != nil {
+ logger.Warnw(context.Background(), "error-closing-client", log.Fields{"error": err})
+ }
+ delete(r.all, entry.client)
+ }
+ // Notify Close function that a client was returned to the pool
+ r.closingCh <- struct{}{}
+ r.Unlock()
+ return
+ }
+
+ // This entry is now available for use, so
+ // if in full map add it to available and
+ // remove from full
+ if _, ok := r.full[client]; ok {
+ r.available = append(r.available, entry)
+ delete(r.full, client)
+ }
+
+ front := r.waitList.Front()
+ if front != nil {
+ ch := r.waitList.Remove(front)
+ r.Unlock()
+ // need to unblock if someone is waiting
+ ch.(chan struct{}) <- struct{}{}
+ return
+ }
+ r.Unlock()
+}
+
+func (r *roundRobin) Close(ctx context.Context) {
+ r.Lock()
+ r.closing = true
+
+ // Notify anyone waiting for a client to stop waiting
+ close(r.stopCh)
+
+ // Clean-up unused clients
+ for i := 0; i < len(r.available); i++ {
+ // Count 0 means no one is using that client
+ if r.available[i].count == 0 {
+ if err := r.available[i].client.Close(); err != nil {
+ logger.Warnw(ctx, "failure-closing-client", log.Fields{"client": r.available[i].client, "error": err})
+ }
+ // Remove client for all list
+ delete(r.all, r.available[i].client)
+ }
+ }
+
+ // Figure out how many clients are in use
+ numberInUse := 0
+ for _, rrEntry := range r.all {
+ numberInUse += rrEntry.count
+ }
+ r.Unlock()
+
+ if numberInUse == 0 {
+ logger.Info(ctx, "no-connection-in-use")
+ return
+ }
+
+ logger.Infow(ctx, "waiting-for-clients-return", log.Fields{"count": numberInUse})
+
+ // Wait for notifications when a client is returned to the pool
+ for {
+ select {
+ case <-r.closingCh:
+ numberInUse--
+ if numberInUse == 0 {
+ logger.Info(ctx, "all-connections-closed")
+ return
+ }
+ case <-ctx.Done():
+ logger.Warnw(ctx, "context-done", log.Fields{"error": ctx.Err()})
+ return
+ }
+ }
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/db/kvstore/kvutils.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/db/kvstore/kvutils.go
similarity index 65%
rename from vendor/github.com/opencord/voltha-lib-go/v4/pkg/db/kvstore/kvutils.go
rename to vendor/github.com/opencord/voltha-lib-go/v5/pkg/db/kvstore/kvutils.go
index 70bd977..ca57542 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/db/kvstore/kvutils.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/db/kvstore/kvutils.go
@@ -16,8 +16,18 @@
package kvstore
import (
- "bytes"
+ "context"
"fmt"
+ "math"
+ "math/rand"
+ "time"
+)
+
+const (
+ minRetryInterval = 100
+ maxRetryInterval = 5000
+ incrementalFactor = 1.2
+ jitter = 0.2
)
// ToString converts an interface value to a string. The interface should either be of
@@ -46,13 +56,23 @@
}
}
-// Helper function to verify mostly whether the content of two interface types are the same. Focus is []byte and
-// string types
-func isEqual(val1 interface{}, val2 interface{}) bool {
- b1, err := ToByte(val1)
- b2, er := ToByte(val2)
- if err == nil && er == nil {
- return bytes.Equal(b1, b2)
+// backoff waits an amount of time that is proportional to the attempt value. The wait time in a range of
+// minRetryInterval and maxRetryInterval.
+func backoff(ctx context.Context, attempt int) error {
+ if attempt == 0 {
+ return nil
}
- return val1 == val2
+ backoff := int(minRetryInterval + incrementalFactor*math.Exp(float64(attempt)))
+ backoff *= 1 + int(jitter*(rand.Float64()*2-1))
+ if backoff > maxRetryInterval {
+ backoff = maxRetryInterval
+ }
+ ticker := time.NewTicker(time.Duration(backoff) * time.Millisecond)
+ defer ticker.Stop()
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ case <-ticker.C:
+ }
+ return nil
}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/events/common.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/events/common.go
similarity index 94%
rename from vendor/github.com/opencord/voltha-lib-go/v4/pkg/events/common.go
rename to vendor/github.com/opencord/voltha-lib-go/v5/pkg/events/common.go
index 489a493..df3e839 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/events/common.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/events/common.go
@@ -16,7 +16,7 @@
package events
import (
- "github.com/opencord/voltha-lib-go/v4/pkg/log"
+ "github.com/opencord/voltha-lib-go/v5/pkg/log"
)
var logger log.CLogger
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/events/eventif/events_proxy_if.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/events/eventif/events_proxy_if.go
similarity index 100%
rename from vendor/github.com/opencord/voltha-lib-go/v4/pkg/events/eventif/events_proxy_if.go
rename to vendor/github.com/opencord/voltha-lib-go/v5/pkg/events/eventif/events_proxy_if.go
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/events/events_proxy.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/events/events_proxy.go
similarity index 98%
rename from vendor/github.com/opencord/voltha-lib-go/v4/pkg/events/events_proxy.go
rename to vendor/github.com/opencord/voltha-lib-go/v5/pkg/events/events_proxy.go
index 910fec3..19a4f26 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/events/events_proxy.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/events/events_proxy.go
@@ -27,9 +27,9 @@
"time"
"github.com/golang/protobuf/ptypes"
- "github.com/opencord/voltha-lib-go/v4/pkg/events/eventif"
- "github.com/opencord/voltha-lib-go/v4/pkg/kafka"
- "github.com/opencord/voltha-lib-go/v4/pkg/log"
+ "github.com/opencord/voltha-lib-go/v5/pkg/events/eventif"
+ "github.com/opencord/voltha-lib-go/v5/pkg/kafka"
+ "github.com/opencord/voltha-lib-go/v5/pkg/log"
"github.com/opencord/voltha-protos/v4/go/voltha"
)
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/events/utils.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/events/utils.go
similarity index 100%
rename from vendor/github.com/opencord/voltha-lib-go/v4/pkg/events/utils.go
rename to vendor/github.com/opencord/voltha-lib-go/v5/pkg/events/utils.go
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/flows/common.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/flows/common.go
similarity index 94%
rename from vendor/github.com/opencord/voltha-lib-go/v4/pkg/flows/common.go
rename to vendor/github.com/opencord/voltha-lib-go/v5/pkg/flows/common.go
index fdc93bd..beb0574 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/flows/common.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/flows/common.go
@@ -16,7 +16,7 @@
package flows
import (
- "github.com/opencord/voltha-lib-go/v4/pkg/log"
+ "github.com/opencord/voltha-lib-go/v5/pkg/log"
)
var logger log.CLogger
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/flows/flow_utils.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/flows/flow_utils.go
similarity index 97%
rename from vendor/github.com/opencord/voltha-lib-go/v4/pkg/flows/flow_utils.go
rename to vendor/github.com/opencord/voltha-lib-go/v5/pkg/flows/flow_utils.go
index 98fad49..ff6aaf0 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/flows/flow_utils.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/flows/flow_utils.go
@@ -26,7 +26,7 @@
"github.com/cevaris/ordered_map"
"github.com/gogo/protobuf/proto"
- "github.com/opencord/voltha-lib-go/v4/pkg/log"
+ "github.com/opencord/voltha-lib-go/v5/pkg/log"
ofp "github.com/opencord/voltha-protos/v4/go/openflow_13"
)
@@ -1576,3 +1576,33 @@
}
return b.Bytes()
}
+
+func GetMeterIdFromWriteMetadata(ctx context.Context, flow *ofp.OfpFlowStats) uint32 {
+ /*
+ Write metadata instruction value (metadata) is 8 bytes:
+ MS 2 bytes: C Tag
+ Next 2 bytes: Technology Profile Id
+ Next 4 bytes: Port number (uni or nni) or MeterId
+ This is set in the ONOS OltPipeline as a write metadata instruction
+ */
+ var meterID uint32 = 0
+ md := GetMetadataFromWriteMetadataAction(ctx, flow)
+ logger.Debugw(ctx, "found-metadata-for-egress/uni-port", log.Fields{"metadata": md})
+ if md != 0 {
+ meterID = uint32(md & 0xFFFFFFFF)
+ logger.Debugw(ctx, "found-meterID-in-write-metadata-action", log.Fields{"meterID": meterID})
+ }
+ return meterID
+}
+
+func SetMeterIdToFlow(flow *ofp.OfpFlowStats, meterId uint32) {
+ if flow != nil {
+ for _, instruction := range flow.Instructions {
+ if instruction.Type == uint32(METER_ACTION) {
+ if meterInst := instruction.GetMeter(); meterInst != nil {
+ meterInst.MeterId = meterId
+ }
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/kafka/client.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/kafka/client.go
similarity index 100%
rename from vendor/github.com/opencord/voltha-lib-go/v4/pkg/kafka/client.go
rename to vendor/github.com/opencord/voltha-lib-go/v5/pkg/kafka/client.go
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/kafka/common.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/kafka/common.go
similarity index 94%
rename from vendor/github.com/opencord/voltha-lib-go/v4/pkg/kafka/common.go
rename to vendor/github.com/opencord/voltha-lib-go/v5/pkg/kafka/common.go
index 5db364d..f4d7661 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/kafka/common.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/kafka/common.go
@@ -16,7 +16,7 @@
package kafka
import (
- "github.com/opencord/voltha-lib-go/v4/pkg/log"
+ "github.com/opencord/voltha-lib-go/v5/pkg/log"
)
var logger log.CLogger
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/kafka/endpoint_manager.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/kafka/endpoint_manager.go
similarity index 98%
rename from vendor/github.com/opencord/voltha-lib-go/v4/pkg/kafka/endpoint_manager.go
rename to vendor/github.com/opencord/voltha-lib-go/v5/pkg/kafka/endpoint_manager.go
index 796eb72..962b932 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/kafka/endpoint_manager.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/kafka/endpoint_manager.go
@@ -21,8 +21,8 @@
"github.com/buraksezer/consistent"
"github.com/cespare/xxhash"
"github.com/golang/protobuf/proto"
- "github.com/opencord/voltha-lib-go/v4/pkg/db"
- "github.com/opencord/voltha-lib-go/v4/pkg/log"
+ "github.com/opencord/voltha-lib-go/v5/pkg/db"
+ "github.com/opencord/voltha-lib-go/v5/pkg/log"
"github.com/opencord/voltha-protos/v4/go/voltha"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/kafka/kafka_inter_container_library.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/kafka/kafka_inter_container_library.go
similarity index 99%
rename from vendor/github.com/opencord/voltha-lib-go/v4/pkg/kafka/kafka_inter_container_library.go
rename to vendor/github.com/opencord/voltha-lib-go/v5/pkg/kafka/kafka_inter_container_library.go
index 3af35d7..b149e7d 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/kafka/kafka_inter_container_library.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/kafka/kafka_inter_container_library.go
@@ -31,7 +31,7 @@
"github.com/golang/protobuf/ptypes"
"github.com/golang/protobuf/ptypes/any"
"github.com/google/uuid"
- "github.com/opencord/voltha-lib-go/v4/pkg/log"
+ "github.com/opencord/voltha-lib-go/v5/pkg/log"
ic "github.com/opencord/voltha-protos/v4/go/inter_container"
"github.com/opentracing/opentracing-go"
)
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/kafka/sarama_client.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/kafka/sarama_client.go
similarity index 99%
rename from vendor/github.com/opencord/voltha-lib-go/v4/pkg/kafka/sarama_client.go
rename to vendor/github.com/opencord/voltha-lib-go/v5/pkg/kafka/sarama_client.go
index cd6d27b..3273470 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/kafka/sarama_client.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/kafka/sarama_client.go
@@ -29,7 +29,7 @@
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes"
"github.com/google/uuid"
- "github.com/opencord/voltha-lib-go/v4/pkg/log"
+ "github.com/opencord/voltha-lib-go/v5/pkg/log"
ic "github.com/opencord/voltha-protos/v4/go/inter_container"
)
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/kafka/utils.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/kafka/utils.go
similarity index 100%
rename from vendor/github.com/opencord/voltha-lib-go/v4/pkg/kafka/utils.go
rename to vendor/github.com/opencord/voltha-lib-go/v5/pkg/kafka/utils.go
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/log/common.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/log/common.go
similarity index 100%
rename from vendor/github.com/opencord/voltha-lib-go/v4/pkg/log/common.go
rename to vendor/github.com/opencord/voltha-lib-go/v5/pkg/log/common.go
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/log/log.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/log/log.go
similarity index 100%
rename from vendor/github.com/opencord/voltha-lib-go/v4/pkg/log/log.go
rename to vendor/github.com/opencord/voltha-lib-go/v5/pkg/log/log.go
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/log/utils.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/log/utils.go
similarity index 100%
rename from vendor/github.com/opencord/voltha-lib-go/v4/pkg/log/utils.go
rename to vendor/github.com/opencord/voltha-lib-go/v5/pkg/log/utils.go
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/meters/common.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/meters/common.go
similarity index 94%
rename from vendor/github.com/opencord/voltha-lib-go/v4/pkg/meters/common.go
rename to vendor/github.com/opencord/voltha-lib-go/v5/pkg/meters/common.go
index 0a171f6..e058e48 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/meters/common.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/meters/common.go
@@ -16,7 +16,7 @@
package meters
import (
- "github.com/opencord/voltha-lib-go/v4/pkg/log"
+ "github.com/opencord/voltha-lib-go/v5/pkg/log"
)
var logger log.CLogger
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/meters/meter_utils.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/meters/meter_utils.go
similarity index 98%
rename from vendor/github.com/opencord/voltha-lib-go/v4/pkg/meters/meter_utils.go
rename to vendor/github.com/opencord/voltha-lib-go/v5/pkg/meters/meter_utils.go
index 38f35b9..d220c0b 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/meters/meter_utils.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/meters/meter_utils.go
@@ -18,7 +18,7 @@
import (
"context"
"fmt"
- "github.com/opencord/voltha-lib-go/v4/pkg/log"
+ "github.com/opencord/voltha-lib-go/v5/pkg/log"
ofp "github.com/opencord/voltha-protos/v4/go/openflow_13"
tp_pb "github.com/opencord/voltha-protos/v4/go/tech_profile"
)
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/pmmetrics/performance_metrics.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/pmmetrics/performance_metrics.go
similarity index 100%
rename from vendor/github.com/opencord/voltha-lib-go/v4/pkg/pmmetrics/performance_metrics.go
rename to vendor/github.com/opencord/voltha-lib-go/v5/pkg/pmmetrics/performance_metrics.go
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/ponresourcemanager/common.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/ponresourcemanager/common.go
similarity index 94%
rename from vendor/github.com/opencord/voltha-lib-go/v4/pkg/ponresourcemanager/common.go
rename to vendor/github.com/opencord/voltha-lib-go/v5/pkg/ponresourcemanager/common.go
index 1c9a5b1..76207a0 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/ponresourcemanager/common.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/ponresourcemanager/common.go
@@ -16,7 +16,7 @@
package ponresourcemanager
import (
- "github.com/opencord/voltha-lib-go/v4/pkg/log"
+ "github.com/opencord/voltha-lib-go/v5/pkg/log"
)
var logger log.CLogger
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/ponresourcemanager/ponresourcemanager.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/ponresourcemanager/ponresourcemanager.go
similarity index 94%
rename from vendor/github.com/opencord/voltha-lib-go/v4/pkg/ponresourcemanager/ponresourcemanager.go
rename to vendor/github.com/opencord/voltha-lib-go/v5/pkg/ponresourcemanager/ponresourcemanager.go
index 70ed8e6..804a6f3 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/ponresourcemanager/ponresourcemanager.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/ponresourcemanager/ponresourcemanager.go
@@ -25,10 +25,10 @@
"time"
bitmap "github.com/boljen/go-bitmap"
- "github.com/opencord/voltha-lib-go/v4/pkg/db"
- "github.com/opencord/voltha-lib-go/v4/pkg/db/kvstore"
- "github.com/opencord/voltha-lib-go/v4/pkg/log"
- tp "github.com/opencord/voltha-lib-go/v4/pkg/techprofile"
+ "github.com/opencord/voltha-lib-go/v5/pkg/db"
+ "github.com/opencord/voltha-lib-go/v5/pkg/db/kvstore"
+ "github.com/opencord/voltha-lib-go/v5/pkg/log"
+ tp "github.com/opencord/voltha-lib-go/v5/pkg/techprofile"
)
const (
@@ -118,10 +118,6 @@
//Format: <device_id>/flow_id_info/<(pon_intf_id, onu_id)><flow_id>
FLOW_ID_INFO_PATH = FLOW_ID_INFO_PATH_PREFIX + "/{%s}/{%d}"
- //path on the kvstore to store onugem info map
- //format: <device-id>/onu_gem_info/<intfid>
- ONU_GEM_INFO_PATH = "{%s}/onu_gem_info/{%d}" // onu_gem/<(intfid)>
-
//Constants for internal usage.
PON_INTF_ID = "pon_intf_id"
START_IDX = "start_idx"
@@ -428,6 +424,43 @@
return err
}
+func (PONRMgr *PONResourceManager) InitDeviceResourcePoolForIntf(ctx context.Context, intfID uint32) error {
+
+ logger.Debug(ctx, "Init resource ranges for intf %d", intfID)
+
+ var err error
+
+ if err = PONRMgr.InitResourceIDPool(ctx, intfID, ONU_ID,
+ PONRMgr.PonResourceRanges[ONU_ID_START_IDX].(uint32),
+ PONRMgr.PonResourceRanges[ONU_ID_END_IDX].(uint32)); err != nil {
+ logger.Error(ctx, "Failed to init ONU ID resource pool")
+ return err
+ }
+
+ if err = PONRMgr.InitResourceIDPool(ctx, intfID, ALLOC_ID,
+ PONRMgr.PonResourceRanges[ALLOC_ID_START_IDX].(uint32),
+ PONRMgr.PonResourceRanges[ALLOC_ID_END_IDX].(uint32)); err != nil {
+ logger.Error(ctx, "Failed to init ALLOC ID resource pool ")
+ return err
+ }
+
+ if err = PONRMgr.InitResourceIDPool(ctx, intfID, GEMPORT_ID,
+ PONRMgr.PonResourceRanges[GEMPORT_ID_START_IDX].(uint32),
+ PONRMgr.PonResourceRanges[GEMPORT_ID_END_IDX].(uint32)); err != nil {
+ logger.Error(ctx, "Failed to init GEMPORT ID resource pool")
+ return err
+ }
+
+ if err = PONRMgr.InitResourceIDPool(ctx, intfID, FLOW_ID,
+ PONRMgr.PonResourceRanges[FLOW_ID_START_IDX].(uint32),
+ PONRMgr.PonResourceRanges[FLOW_ID_END_IDX].(uint32)); err != nil {
+ logger.Error(ctx, "Failed to init FLOW ID resource pool")
+ return err
+ }
+
+ return nil
+}
+
func (PONRMgr *PONResourceManager) ClearDeviceResourcePool(ctx context.Context) error {
//Clear resource pool for all PON ports.
@@ -491,6 +524,33 @@
return nil
}
+func (PONRMgr *PONResourceManager) ClearDeviceResourcePoolForIntf(ctx context.Context, intfID uint32) error {
+
+ logger.Debugf(ctx, "Clear resource ranges for intf %d", intfID)
+
+ if status := PONRMgr.ClearResourceIDPool(ctx, intfID, ONU_ID); !status {
+ logger.Error(ctx, "Failed to clear ONU ID resource pool")
+ return errors.New("Failed to clear ONU ID resource pool")
+ }
+
+ if status := PONRMgr.ClearResourceIDPool(ctx, intfID, ALLOC_ID); !status {
+ logger.Error(ctx, "Failed to clear ALLOC ID resource pool ")
+ return errors.New("Failed to clear ALLOC ID resource pool")
+ }
+
+ if status := PONRMgr.ClearResourceIDPool(ctx, intfID, GEMPORT_ID); !status {
+ logger.Error(ctx, "Failed to clear GEMPORT ID resource pool")
+ return errors.New("Failed to clear GEMPORT ID resource pool")
+ }
+
+ if status := PONRMgr.ClearResourceIDPool(ctx, intfID, FLOW_ID); !status {
+ logger.Error(ctx, "Failed to clear FLOW ID resource pool")
+ return errors.New("Failed to clear FLOW ID resource pool")
+ }
+
+ return nil
+}
+
func (PONRMgr *PONResourceManager) InitResourceIDPool(ctx context.Context, Intf uint32, ResourceType string, StartID uint32, EndID uint32) error {
/*Initialize Resource ID pool for a given Resource Type on a given PON Port
@@ -1305,69 +1365,3 @@
return "", fmt.Errorf("unexpected-type-%T", t)
}
}
-
-func (PONRMgr *PONResourceManager) AddOnuGemInfo(ctx context.Context, intfID uint32, onuGemData interface{}) error {
- /*
- Update onugem info map,
- :param pon_intf_id: reference of PON interface id
- :param onuegmdata: onugem info map
- */
- var Value []byte
- var err error
- Path := fmt.Sprintf(ONU_GEM_INFO_PATH, PONRMgr.DeviceID, intfID)
- Value, err = json.Marshal(onuGemData)
- if err != nil {
- logger.Error(ctx, "failed to Marshal")
- return err
- }
-
- if err = PONRMgr.KVStore.Put(ctx, Path, Value); err != nil {
- logger.Errorf(ctx, "Failed to update resource %s", Path)
- return err
- }
- return err
-}
-
-func (PONRMgr *PONResourceManager) GetOnuGemInfo(ctx context.Context, IntfId uint32, onuGemInfo interface{}) error {
- /*
- Get onugeminfo map from kvstore
- :param intfid: refremce pon intfid
- :param onuGemInfo: onugem info to return from kv strore.
- */
- var Val []byte
-
- path := fmt.Sprintf(ONU_GEM_INFO_PATH, PONRMgr.DeviceID, IntfId)
- value, err := PONRMgr.KVStore.Get(ctx, path)
- if err != nil {
- logger.Errorw(ctx, "Failed to get from kv store", log.Fields{"path": path})
- return err
- } else if value == nil {
- logger.Debug(ctx, "No onuinfo for path", log.Fields{"path": path})
- return nil // returning nil as this could happen if there are no onus for the interface yet
- }
- if Val, err = kvstore.ToByte(value.Value); err != nil {
- logger.Error(ctx, "Failed to convert to byte array")
- return err
- }
-
- if err = json.Unmarshal(Val, &onuGemInfo); err != nil {
- logger.Error(ctx, "Failed to unmarshall")
- return err
- }
- logger.Debugw(ctx, "found onuinfo from path", log.Fields{"path": path, "onuinfo": onuGemInfo})
- return err
-}
-
-func (PONRMgr *PONResourceManager) DelOnuGemInfoForIntf(ctx context.Context, intfId uint32) error {
- /*
- delete onugem info for an interface from kvstore
- :param intfid: refremce pon intfid
- */
-
- path := fmt.Sprintf(ONU_GEM_INFO_PATH, PONRMgr.DeviceID, intfId)
- if err := PONRMgr.KVStore.Delete(ctx, path); err != nil {
- logger.Errorf(ctx, "Falied to remove resource %s", path)
- return err
- }
- return nil
-}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/probe/common.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/probe/common.go
similarity index 94%
rename from vendor/github.com/opencord/voltha-lib-go/v4/pkg/probe/common.go
rename to vendor/github.com/opencord/voltha-lib-go/v5/pkg/probe/common.go
index d9739af..119d78e 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/probe/common.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/probe/common.go
@@ -16,7 +16,7 @@
package probe
import (
- "github.com/opencord/voltha-lib-go/v4/pkg/log"
+ "github.com/opencord/voltha-lib-go/v5/pkg/log"
)
var logger log.CLogger
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/probe/probe.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/probe/probe.go
similarity index 99%
rename from vendor/github.com/opencord/voltha-lib-go/v4/pkg/probe/probe.go
rename to vendor/github.com/opencord/voltha-lib-go/v5/pkg/probe/probe.go
index f13f257..b66f398 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/probe/probe.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/probe/probe.go
@@ -18,7 +18,7 @@
import (
"context"
"fmt"
- "github.com/opencord/voltha-lib-go/v4/pkg/log"
+ "github.com/opencord/voltha-lib-go/v5/pkg/log"
"net/http"
"sync"
)
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/techprofile/4QueueHybridProfileMap1.json b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/techprofile/4QueueHybridProfileMap1.json
similarity index 100%
rename from vendor/github.com/opencord/voltha-lib-go/v4/pkg/techprofile/4QueueHybridProfileMap1.json
rename to vendor/github.com/opencord/voltha-lib-go/v5/pkg/techprofile/4QueueHybridProfileMap1.json
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/techprofile/README.md b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/techprofile/README.md
similarity index 100%
rename from vendor/github.com/opencord/voltha-lib-go/v4/pkg/techprofile/README.md
rename to vendor/github.com/opencord/voltha-lib-go/v5/pkg/techprofile/README.md
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/techprofile/SingleQueueEponProfile.json b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/techprofile/SingleQueueEponProfile.json
similarity index 99%
rename from vendor/github.com/opencord/voltha-lib-go/v4/pkg/techprofile/SingleQueueEponProfile.json
rename to vendor/github.com/opencord/voltha-lib-go/v5/pkg/techprofile/SingleQueueEponProfile.json
index 00476a2..4015251 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/techprofile/SingleQueueEponProfile.json
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/techprofile/SingleQueueEponProfile.json
@@ -58,4 +58,4 @@
}
}
]
-}
\ No newline at end of file
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/techprofile/common.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/techprofile/common.go
similarity index 94%
rename from vendor/github.com/opencord/voltha-lib-go/v4/pkg/techprofile/common.go
rename to vendor/github.com/opencord/voltha-lib-go/v5/pkg/techprofile/common.go
index 544c780..1e89822 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/techprofile/common.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/techprofile/common.go
@@ -16,7 +16,7 @@
package techprofile
import (
- "github.com/opencord/voltha-lib-go/v4/pkg/log"
+ "github.com/opencord/voltha-lib-go/v5/pkg/log"
)
var logger log.CLogger
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/techprofile/config.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/techprofile/config.go
similarity index 69%
rename from vendor/github.com/opencord/voltha-lib-go/v4/pkg/techprofile/config.go
rename to vendor/github.com/opencord/voltha-lib-go/v5/pkg/techprofile/config.go
index 438ea4a..d13a876 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/techprofile/config.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/techprofile/config.go
@@ -17,7 +17,7 @@
import (
"fmt"
- "github.com/opencord/voltha-lib-go/v4/pkg/db"
+ "github.com/opencord/voltha-lib-go/v5/pkg/db"
"time"
)
@@ -39,12 +39,12 @@
// Tech profile path prefix in kv store (for TP instances)
defaultKVPathPrefix = "%s/technology_profiles"
+ // Resource instance path prefix in KV store (for Resource Instances)
+ defaultResourceInstancePathPrefix = "%s/resource_instances"
+
// Tech profile path in kv store
defaultTechProfileKVPath = "%s/%d" // <technology>/<tech_profile_tableID>
- // Tech profile instance path in kv store
- // Format: <technology>/<tech_profile_tableID>/<uni_port_name>
- defaultTPInstanceKVPath = "%s/%d/%s"
)
//Tech-Profile JSON String Keys
@@ -98,41 +98,42 @@
// TechprofileFlags represents the set of configurations used
type TechProfileFlags struct {
- KVStoreAddress string
- KVStoreType string
- KVStoreTimeout time.Duration
- KVBackend *db.Backend // this is the backend used to store TP instances
- DefaultTpKVBackend *db.Backend // this is the backend used to read the TP profile
- TPKVPathPrefix string
- defaultTpKvPathPrefix string
- TPFileKVPath string
- TPInstanceKVPath string
- DefaultTPName string
- TPVersion int
- NumGemPorts uint32
- DefaultPbits []string
- LogLevel int
- DefaultTechProfileID uint32
- DefaultNumGemPorts uint32
+ KVStoreAddress string
+ KVStoreType string
+ KVStoreTimeout time.Duration
+ KVBackend *db.Backend // this is the backend used to store TP instances
+ DefaultTpKVBackend *db.Backend // this is the backend used to read the TP profile
+ ResourceInstanceKVBacked *db.Backend // this is the backed used to read/write Resource Instances
+ TPKVPathPrefix string
+ defaultTpKvPathPrefix string
+ TPFileKVPath string
+ ResourceInstanceKVPathPrefix string
+ DefaultTPName string
+ TPVersion uint32
+ NumGemPorts uint32
+ DefaultPbits []string
+ LogLevel int
+ DefaultTechProfileID uint32
+ DefaultNumGemPorts uint32
}
func NewTechProfileFlags(KVStoreType string, KVStoreAddress string, basePathKvStore string) *TechProfileFlags {
// initialize with default values
var techProfileFlags = TechProfileFlags{
- KVBackend: nil,
- KVStoreAddress: KVStoreAddress,
- KVStoreType: KVStoreType,
- KVStoreTimeout: defaultKVStoreTimeout,
- DefaultTPName: defaultTechProfileName,
- TPKVPathPrefix: fmt.Sprintf(defaultKVPathPrefix, basePathKvStore),
- defaultTpKvPathPrefix: defaultTpKvPathPrefix,
- TPVersion: defaultVersion,
- TPFileKVPath: defaultTechProfileKVPath,
- TPInstanceKVPath: defaultTPInstanceKVPath,
- DefaultTechProfileID: DEFAULT_TECH_PROFILE_TABLE_ID,
- DefaultNumGemPorts: defaultGemportsCount,
- DefaultPbits: []string{defaultPbits},
- LogLevel: defaultLogLevel,
+ KVBackend: nil,
+ KVStoreAddress: KVStoreAddress,
+ KVStoreType: KVStoreType,
+ KVStoreTimeout: defaultKVStoreTimeout,
+ DefaultTPName: defaultTechProfileName,
+ TPKVPathPrefix: fmt.Sprintf(defaultKVPathPrefix, basePathKvStore),
+ defaultTpKvPathPrefix: defaultTpKvPathPrefix,
+ TPVersion: defaultVersion,
+ TPFileKVPath: defaultTechProfileKVPath,
+ ResourceInstanceKVPathPrefix: fmt.Sprintf(defaultResourceInstancePathPrefix, basePathKvStore),
+ DefaultTechProfileID: DEFAULT_TECH_PROFILE_TABLE_ID,
+ DefaultNumGemPorts: defaultGemportsCount,
+ DefaultPbits: []string{defaultPbits},
+ LogLevel: defaultLogLevel,
}
return &techProfileFlags
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/techprofile/tech_profile.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/techprofile/tech_profile.go
new file mode 100644
index 0000000..757118a
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/techprofile/tech_profile.go
@@ -0,0 +1,1499 @@
+/*
+ * Copyright 2019-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package techprofile
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "fmt"
+ "github.com/gogo/protobuf/proto"
+ "github.com/golang/protobuf/jsonpb"
+ "github.com/opencord/voltha-protos/v4/go/openolt"
+ "regexp"
+ "strconv"
+ "sync"
+ "time"
+
+ "github.com/opencord/voltha-lib-go/v5/pkg/db"
+
+ "github.com/opencord/voltha-lib-go/v5/pkg/db/kvstore"
+ "github.com/opencord/voltha-lib-go/v5/pkg/log"
+ tp_pb "github.com/opencord/voltha-protos/v4/go/tech_profile"
+)
+
+// Interface to pon resource manager APIs
+type iPonResourceMgr interface {
+ GetResourceID(ctx context.Context, intfID uint32, resourceType string, numIDs uint32) ([]uint32, error)
+ FreeResourceID(ctx context.Context, intfID uint32, resourceType string, ReleaseContent []uint32) error
+ GetResourceTypeAllocID() string
+ GetResourceTypeGemPortID() string
+ GetResourceTypeOnuID() string
+ GetTechnology() string
+}
+
+type SchedulingPolicy int32
+
+const (
+ SchedulingPolicy_WRR SchedulingPolicy = 0
+ SchedulingPolicy_StrictPriority SchedulingPolicy = 1
+ SchedulingPolicy_Hybrid SchedulingPolicy = 2
+)
+
+type AdditionalBW int32
+
+const (
+ AdditionalBW_AdditionalBW_None AdditionalBW = 0
+ AdditionalBW_AdditionalBW_NA AdditionalBW = 1
+ AdditionalBW_AdditionalBW_BestEffort AdditionalBW = 2
+ AdditionalBW_AdditionalBW_Auto AdditionalBW = 3
+)
+
+type DiscardPolicy int32
+
+const (
+ DiscardPolicy_TailDrop DiscardPolicy = 0
+ DiscardPolicy_WTailDrop DiscardPolicy = 1
+ DiscardPolicy_Red DiscardPolicy = 2
+ DiscardPolicy_WRed DiscardPolicy = 3
+)
+
+// Required uniPortName format
+var uniPortNameFormatRegexp = regexp.MustCompile(`^olt-{[a-z0-9\-]+}/pon-{[0-9]+}/onu-{[0-9]+}/uni-{[0-9]+}$`)
+
+// instance control defaults
+const (
+ defaultOnuInstance = "multi-instance"
+ defaultUniInstance = "single-instance"
+ defaultGemPayloadSize = "auto"
+)
+
+// default discard config constants
+const (
+ defaultMinThreshold = 0
+ defaultMaxThreshold = 0
+ defaultMaxProbability = 0
+)
+
+// default scheduler contants
+const (
+ defaultPriority = 0
+ defaultWeight = 0
+)
+
+// default GEM attribute constants
+const (
+ defaultAESEncryption = "True"
+ defaultPriorityQueue = 0
+ defaultQueueWeight = 0
+ defaultMaxQueueSize = "auto"
+ defaultIsMulticast = "False"
+ defaultAccessControlList = "224.0.0.0-239.255.255.255"
+ defaultMcastGemID = 4069
+)
+
+// Default EPON constants
+const (
+ defaultPakageType = "B"
+)
+const (
+ defaultTrafficType = "BE"
+ defaultUnsolicitedGrantSize = 0
+ defaultNominalInterval = 0
+ defaultToleratedPollJitter = 0
+ defaultRequestTransmissionPolicy = 0
+ defaultNumQueueSet = 2
+)
+const (
+ defaultQThreshold1 = 5500
+ defaultQThreshold2 = 0
+ defaultQThreshold3 = 0
+ defaultQThreshold4 = 0
+ defaultQThreshold5 = 0
+ defaultQThreshold6 = 0
+ defaultQThreshold7 = 0
+)
+
+const (
+ xgspon = "XGS-PON"
+ xgpon = "XGPON"
+ gpon = "GPON"
+ epon = "EPON"
+)
+
+const (
+ MaxUniPortPerOnu = 16 // TODO: Adapter uses its own constant for MaxUniPort. How to synchronize this and have a single source of truth?
+)
+
+type TechProfileMgr struct {
+ config *TechProfileFlags
+ resourceMgr iPonResourceMgr
+ OnuIDMgmtLock sync.RWMutex
+ GemPortIDMgmtLock sync.RWMutex
+ AllocIDMgmtLock sync.RWMutex
+ tpInstanceMap map[string]*tp_pb.TechProfileInstance // Map of tp path to tp instance
+ tpInstanceMapLock sync.RWMutex
+ eponTpInstanceMap map[string]*tp_pb.EponTechProfileInstance // Map of tp path to epon tp instance
+ epontpInstanceMapLock sync.RWMutex
+ tpMap map[uint32]*tp_pb.TechProfile // Map of tp id to tp
+ tpMapLock sync.RWMutex
+ eponTpMap map[uint32]*tp_pb.EponTechProfile // map of tp id to epon tp
+ eponTpMapLock sync.RWMutex
+}
+
+func (t *TechProfileMgr) SetKVClient(ctx context.Context, pathPrefix string) *db.Backend {
+ kvClient, err := newKVClient(ctx, t.config.KVStoreType, t.config.KVStoreAddress, t.config.KVStoreTimeout)
+ if err != nil {
+ logger.Errorw(ctx, "failed-to-create-kv-client",
+ log.Fields{
+ "type": t.config.KVStoreType, "address": t.config.KVStoreAddress,
+ "timeout": t.config.KVStoreTimeout, "prefix": pathPrefix,
+ "error": err.Error(),
+ })
+ return nil
+ }
+ return &db.Backend{
+ Client: kvClient,
+ StoreType: t.config.KVStoreType,
+ Address: t.config.KVStoreAddress,
+ Timeout: t.config.KVStoreTimeout,
+ PathPrefix: pathPrefix}
+
+ /* TODO : Make sure direct call to NewBackend is working fine with backend , currently there is some
+ issue between kv store and backend , core is not calling NewBackend directly
+ kv := model.NewBackend(t.config.kvStoreType, t.config.KVStoreHost, t.config.KVStorePort,
+ t.config.KVStoreTimeout, kvStoreTechProfilePathPrefix)
+ */
+}
+
+func NewTechProfile(ctx context.Context, resourceMgr iPonResourceMgr, kvStoreType string, kvStoreAddress string, basePathKvStore string) (*TechProfileMgr, error) {
+ var techprofileObj TechProfileMgr
+ logger.Debug(ctx, "initializing-techprofile-mananger")
+ techprofileObj.config = NewTechProfileFlags(kvStoreType, kvStoreAddress, basePathKvStore)
+ techprofileObj.config.KVBackend = techprofileObj.SetKVClient(ctx, techprofileObj.config.TPKVPathPrefix)
+ techprofileObj.config.DefaultTpKVBackend = techprofileObj.SetKVClient(ctx, techprofileObj.config.defaultTpKvPathPrefix)
+ if techprofileObj.config.KVBackend == nil {
+ logger.Error(ctx, "failed-to-initialize-backend")
+ return nil, errors.New("kv-backend-init-failed")
+ }
+ techprofileObj.config.ResourceInstanceKVBacked = techprofileObj.SetKVClient(ctx, techprofileObj.config.ResourceInstanceKVPathPrefix)
+ if techprofileObj.config.ResourceInstanceKVBacked == nil {
+ logger.Error(ctx, "failed-to-initialize-resource-instance-kv-backend")
+ return nil, errors.New("resource-instance-kv-backend-init-failed")
+ }
+ techprofileObj.resourceMgr = resourceMgr
+ techprofileObj.tpInstanceMap = make(map[string]*tp_pb.TechProfileInstance)
+ techprofileObj.eponTpInstanceMap = make(map[string]*tp_pb.EponTechProfileInstance)
+ techprofileObj.tpMap = make(map[uint32]*tp_pb.TechProfile)
+ techprofileObj.eponTpMap = make(map[uint32]*tp_pb.EponTechProfile)
+ logger.Debug(ctx, "reconcile-tp-instance-cache-start")
+ if err := techprofileObj.reconcileTpInstancesToCache(ctx); err != nil {
+ logger.Errorw(ctx, "failed-to-reconcile-tp-instances", log.Fields{"err": err})
+ return nil, err
+ }
+ logger.Debug(ctx, "reconcile-tp-instance-cache-end")
+ logger.Debug(ctx, "initializing-tech-profile-manager-object-success")
+ return &techprofileObj, nil
+}
+
+// GetTechProfileInstanceKey returns the tp instance key that is used to reference TP Instance Map
+func (t *TechProfileMgr) GetTechProfileInstanceKey(ctx context.Context, tpID uint32, uniPortName string) string {
+ logger.Debugw(ctx, "get-tp-instance-kv-key", log.Fields{
+ "uniPortName": uniPortName,
+ "tpId": tpID,
+ })
+ // Make sure the uniPortName is as per format olt-{[a-z0-9\-]+}/pon-{[0-9]+}/onu-{[0-9]+}/uni-{[0-9]+}
+ if !uniPortNameFormatRegexp.Match([]byte(uniPortName)) {
+ logger.Warnw(ctx, "uni-port-name-not-confirming-to-format", log.Fields{"uniPortName": uniPortName})
+ }
+ // The key path prefix (like service/voltha/technology_profiles or service/voltha_voltha/technology_profiles)
+ // is expected to be attached by the components that use this path as part of the KVBackend configuration.
+ resourceInstanceKvPathSuffix := "%s/%d/%s" // <technology>/<tpID>/<uni-port-name>
+ // <uni-port-name> must be of the format pon-{\d+}/onu-{\d+}/uni-{\d+}
+ return fmt.Sprintf(resourceInstanceKvPathSuffix, t.resourceMgr.GetTechnology(), tpID, uniPortName)
+}
+
+// GetTPInstance gets TP instance from cache if found
+func (t *TechProfileMgr) GetTPInstance(ctx context.Context, path string) (interface{}, error) {
+ tech := t.resourceMgr.GetTechnology()
+ switch tech {
+ case xgspon, xgpon, gpon:
+ t.tpInstanceMapLock.RLock()
+ defer t.tpInstanceMapLock.RUnlock()
+ tpInst, ok := t.tpInstanceMap[path]
+ if !ok {
+ return nil, fmt.Errorf("tp-instance-not-found-tp-path-%v", path)
+ }
+ return tpInst, nil
+ case epon:
+ t.epontpInstanceMapLock.RLock()
+ defer t.epontpInstanceMapLock.RUnlock()
+ tpInst, ok := t.eponTpInstanceMap[path]
+ if !ok {
+ return nil, fmt.Errorf("tp-instance-not-found-tp-path-%v", path)
+ }
+ return tpInst, nil
+ default:
+ logger.Errorw(ctx, "unknown-tech", log.Fields{"tech": tech})
+ return nil, fmt.Errorf("unknown-tech-%s-tp-path-%v", tech, path)
+ }
+}
+
+// CreateTechProfileInstance creates a new TP instance.
+func (t *TechProfileMgr) CreateTechProfileInstance(ctx context.Context, tpID uint32, uniPortName string, intfID uint32) (interface{}, error) {
+ var tpInstance *tp_pb.TechProfileInstance
+ var eponTpInstance *tp_pb.EponTechProfileInstance
+
+ logger.Infow(ctx, "creating-tp-instance", log.Fields{"tpID": tpID, "uni": uniPortName, "intId": intfID})
+
+ // Make sure the uniPortName is as per format olt-{[a-z0-9\-]+}/pon-{[0-9]+}/onu-{[0-9]+}/uni-{[0-9]+}
+ if !uniPortNameFormatRegexp.Match([]byte(uniPortName)) {
+ logger.Errorw(ctx, "uni-port-name-not-confirming-to-format", log.Fields{"uniPortName": uniPortName})
+ return nil, fmt.Errorf("uni-port-name-not-confirming-to-format-%s", uniPortName)
+ }
+ tpInstancePathSuffix := t.GetTechProfileInstanceKey(ctx, tpID, uniPortName)
+
+ if t.resourceMgr.GetTechnology() == epon {
+ tp := t.getEponTPFromKVStore(ctx, tpID)
+ if tp != nil {
+ if err := t.validateInstanceControlAttr(ctx, *tp.InstanceControl); err != nil {
+ logger.Error(ctx, "invalid-instance-ctrl-attr-using-default-tp")
+ tp = t.getDefaultEponProfile(ctx)
+ } else {
+ logger.Infow(ctx, "using-specified-tp-from-kv-store", log.Fields{"tpID": tpID})
+ }
+ } else {
+ logger.Info(ctx, "tp-not-found-on-kv--creating-default-tp")
+ tp = t.getDefaultEponProfile(ctx)
+ }
+ // Store TP in cache
+ t.eponTpMapLock.Lock()
+ t.eponTpMap[tpID] = tp
+ t.eponTpMapLock.Unlock()
+
+ if eponTpInstance = t.allocateEponTPInstance(ctx, uniPortName, tp, intfID, tpInstancePathSuffix); eponTpInstance == nil {
+ logger.Error(ctx, "tp-instance-allocation-failed")
+ return nil, errors.New("tp-instance-allocation-failed")
+ }
+ t.epontpInstanceMapLock.Lock()
+ t.eponTpInstanceMap[tpInstancePathSuffix] = eponTpInstance
+ t.epontpInstanceMapLock.Unlock()
+ resInst := tp_pb.ResourceInstance{
+ TpId: tpID,
+ ProfileType: eponTpInstance.ProfileType,
+ SubscriberIdentifier: eponTpInstance.SubscriberIdentifier,
+ AllocId: eponTpInstance.AllocId,
+ }
+ for _, usQAttr := range eponTpInstance.UpstreamQueueAttributeList {
+ resInst.GemportIds = append(resInst.GemportIds, usQAttr.GemportId)
+ }
+
+ logger.Infow(ctx, "epon-tp-instance-created-successfully",
+ log.Fields{"tpID": tpID, "uni": uniPortName, "intfID": intfID})
+ if err := t.addResourceInstanceToKVStore(ctx, tpID, uniPortName, resInst); err != nil {
+ logger.Errorw(ctx, "failed-to-update-resource-instance-to-kv-store--freeing-up-resources", log.Fields{"err": err, "tpID": tpID, "uniPortName": uniPortName})
+ allocIDs := make([]uint32, 0)
+ allocIDs = append(allocIDs, resInst.AllocId)
+ errList := make([]error, 0)
+ errList = append(errList, t.FreeResourceID(ctx, intfID, t.resourceMgr.GetResourceTypeAllocID(), allocIDs))
+ errList = append(errList, t.FreeResourceID(ctx, intfID, t.resourceMgr.GetResourceTypeGemPortID(), resInst.GemportIds))
+ if len(errList) > 0 {
+ logger.Errorw(ctx, "failed-to-free-up-resources-on-kv-store--system-behavior-has-become-erratic", log.Fields{"tpID": tpID, "uniPortName": uniPortName, "errList": errList})
+ }
+ return nil, err
+ }
+ return eponTpInstance, nil
+ } else {
+ tp := t.getTPFromKVStore(ctx, tpID)
+ if tp != nil {
+ if err := t.validateInstanceControlAttr(ctx, *tp.InstanceControl); err != nil {
+ logger.Error(ctx, "invalid-instance-ctrl-attr--using-default-tp")
+ tp = t.getDefaultTechProfile(ctx)
+ } else {
+ logger.Infow(ctx, "using-specified-tp-from-kv-store", log.Fields{"tpID": tpID})
+ }
+ } else {
+ logger.Info(ctx, "tp-not-found-on-kv--creating-default-tp")
+ tp = t.getDefaultTechProfile(ctx)
+ }
+ // Store TP in cache
+ t.tpMapLock.Lock()
+ t.tpMap[tpID] = tp
+ t.tpMapLock.Unlock()
+
+ if tpInstance = t.allocateTPInstance(ctx, uniPortName, tp, intfID, tpInstancePathSuffix); tpInstance == nil {
+ logger.Error(ctx, "tp-instance-allocation-failed")
+ return nil, errors.New("tp-instance-allocation-failed")
+ }
+ t.tpInstanceMapLock.Lock()
+ t.tpInstanceMap[tpInstancePathSuffix] = tpInstance
+ t.tpInstanceMapLock.Unlock()
+
+ resInst := tp_pb.ResourceInstance{
+ TpId: tpID,
+ ProfileType: tpInstance.ProfileType,
+ SubscriberIdentifier: tpInstance.SubscriberIdentifier,
+ AllocId: tpInstance.UsScheduler.AllocId,
+ }
+ for _, usQAttr := range tpInstance.UpstreamGemPortAttributeList {
+ resInst.GemportIds = append(resInst.GemportIds, usQAttr.GemportId)
+ }
+
+ logger.Infow(ctx, "tp-instance-created-successfully",
+ log.Fields{"tpID": tpID, "uni": uniPortName, "intfID": intfID})
+ if err := t.addResourceInstanceToKVStore(ctx, tpID, uniPortName, resInst); err != nil {
+ logger.Errorw(ctx, "failed-to-update-resource-instance-to-kv-store--freeing-up-resources", log.Fields{"err": err, "tpID": tpID, "uniPortName": uniPortName})
+ allocIDs := make([]uint32, 0)
+ allocIDs = append(allocIDs, resInst.AllocId)
+ errList := make([]error, 0)
+ errList = append(errList, t.FreeResourceID(ctx, intfID, t.resourceMgr.GetResourceTypeAllocID(), allocIDs))
+ errList = append(errList, t.FreeResourceID(ctx, intfID, t.resourceMgr.GetResourceTypeGemPortID(), resInst.GemportIds))
+ if len(errList) > 0 {
+ logger.Fatalw(ctx, "failed-to-free-up-resources-on-kv-store--system-behavior-has-become-erratic", log.Fields{"err": err, "tpID": tpID, "uniPortName": uniPortName})
+ }
+ return nil, err
+ }
+
+ logger.Infow(ctx, "resource-instance-added-to-kv-store-successfully",
+ log.Fields{"tpID": tpID, "uni": uniPortName, "intfID": intfID})
+ return tpInstance, nil
+ }
+}
+
+// DeleteTechProfileInstance deletes the TP instance from the local cache as well as deletes the corresponding
+// resource instance from the KV store.
+func (t *TechProfileMgr) DeleteTechProfileInstance(ctx context.Context, tpID uint32, uniPortName string) error {
+ // Make sure the uniPortName is as per format olt-{[a-z0-9\-]+}/pon-{[0-9]+}/onu-{[0-9]+}/uni-{[0-9]+}
+ if !uniPortNameFormatRegexp.Match([]byte(uniPortName)) {
+ logger.Errorw(ctx, "uni-port-name-not-confirming-to-format", log.Fields{"uniPortName": uniPortName})
+ return fmt.Errorf("uni-port-name-not-confirming-to-format--%s", uniPortName)
+ }
+ path := t.GetTechProfileInstanceKey(ctx, tpID, uniPortName)
+ logger.Infow(ctx, "delete-tp-instance-from-cache", log.Fields{"key": path})
+ t.tpInstanceMapLock.Lock()
+ delete(t.tpInstanceMap, path)
+ t.tpInstanceMapLock.Unlock()
+ if err := t.removeResourceInstanceFromKVStore(ctx, tpID, uniPortName); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (t *TechProfileMgr) GetMulticastTrafficQueues(ctx context.Context, tp *tp_pb.TechProfileInstance) []*tp_pb.TrafficQueue {
+ var encryp bool
+ NumGemPorts := len(tp.DownstreamGemPortAttributeList)
+ mcastTrafficQueues := make([]*tp_pb.TrafficQueue, 0)
+ for Count := 0; Count < NumGemPorts; Count++ {
+ if !isMulticastGem(tp.DownstreamGemPortAttributeList[Count].IsMulticast) {
+ continue
+ }
+ if tp.DownstreamGemPortAttributeList[Count].AesEncryption == "True" {
+ encryp = true
+ } else {
+ encryp = false
+ }
+ mcastTrafficQueues = append(mcastTrafficQueues, &tp_pb.TrafficQueue{
+ Direction: tp_pb.Direction_DOWNSTREAM,
+ GemportId: tp.DownstreamGemPortAttributeList[Count].MulticastGemId,
+ PbitMap: tp.DownstreamGemPortAttributeList[Count].PbitMap,
+ AesEncryption: encryp,
+ SchedPolicy: tp.DownstreamGemPortAttributeList[Count].SchedulingPolicy,
+ Priority: tp.DownstreamGemPortAttributeList[Count].PriorityQ,
+ Weight: tp.DownstreamGemPortAttributeList[Count].Weight,
+ DiscardPolicy: tp.DownstreamGemPortAttributeList[Count].DiscardPolicy,
+ })
+ }
+ logger.Debugw(ctx, "Downstream Multicast Traffic queue list ", log.Fields{"queuelist": mcastTrafficQueues})
+ return mcastTrafficQueues
+}
+
+func (t *TechProfileMgr) GetGemportForPbit(ctx context.Context, tp interface{}, dir tp_pb.Direction, pbit uint32) interface{} {
+ /*
+ Function to get the Gemport mapped to a pbit.
+ */
+ switch tp := tp.(type) {
+ case *tp_pb.TechProfileInstance:
+ if dir == tp_pb.Direction_UPSTREAM {
+ // upstream GEM ports
+ numGemPorts := len(tp.UpstreamGemPortAttributeList)
+ for gemCnt := 0; gemCnt < numGemPorts; gemCnt++ {
+ lenOfPbitMap := len(tp.UpstreamGemPortAttributeList[gemCnt].PbitMap)
+ for pbitMapIdx := 2; pbitMapIdx < lenOfPbitMap; pbitMapIdx++ {
+ // Given a sample pbit map string "0b00000001", lenOfPbitMap is 10
+ // "lenOfPbitMap - pbitMapIdx + 1" will give pbit-i th value from LSB position in the pbit map string
+ if p, err := strconv.Atoi(string(tp.UpstreamGemPortAttributeList[gemCnt].PbitMap[lenOfPbitMap-pbitMapIdx+1])); err == nil {
+ if uint32(pbitMapIdx-2) == pbit && p == 1 { // Check this p-bit is set
+ logger.Debugw(ctx, "Found-US-GEMport-for-Pcp", log.Fields{"pbit": pbit, "GEMport": tp.UpstreamGemPortAttributeList[gemCnt].GemportId})
+ return tp.UpstreamGemPortAttributeList[gemCnt]
+ }
+ }
+ }
+ }
+ } else if dir == tp_pb.Direction_DOWNSTREAM {
+ //downstream GEM ports
+ numGemPorts := len(tp.DownstreamGemPortAttributeList)
+ for gemCnt := 0; gemCnt < numGemPorts; gemCnt++ {
+ lenOfPbitMap := len(tp.DownstreamGemPortAttributeList[gemCnt].PbitMap)
+ for pbitMapIdx := 2; pbitMapIdx < lenOfPbitMap; pbitMapIdx++ {
+ // Given a sample pbit map string "0b00000001", lenOfPbitMap is 10
+ // "lenOfPbitMap - pbitMapIdx + 1" will give pbit-i th value from LSB position in the pbit map string
+ if p, err := strconv.Atoi(string(tp.DownstreamGemPortAttributeList[gemCnt].PbitMap[lenOfPbitMap-pbitMapIdx+1])); err == nil {
+ if uint32(pbitMapIdx-2) == pbit && p == 1 { // Check this p-bit is set
+ logger.Debugw(ctx, "Found-DS-GEMport-for-Pcp", log.Fields{"pbit": pbit, "GEMport": tp.DownstreamGemPortAttributeList[gemCnt].GemportId})
+ return tp.DownstreamGemPortAttributeList[gemCnt]
+ }
+ }
+ }
+ }
+ }
+ logger.Errorw(ctx, "No-GemportId-Found-For-Pcp", log.Fields{"pcpVlan": pbit})
+ case *openolt.EponTechProfileInstance:
+ if dir == tp_pb.Direction_UPSTREAM {
+ // upstream GEM ports
+ numGemPorts := len(tp.UpstreamQueueAttributeList)
+ for gemCnt := 0; gemCnt < numGemPorts; gemCnt++ {
+ lenOfPbitMap := len(tp.UpstreamQueueAttributeList[gemCnt].PbitMap)
+ for pbitMapIdx := 2; pbitMapIdx < lenOfPbitMap; pbitMapIdx++ {
+ // Given a sample pbit map string "0b00000001", lenOfPbitMap is 10
+ // "lenOfPbitMap - pbitMapIdx + 1" will give pbit-i th value from LSB position in the pbit map string
+ if p, err := strconv.Atoi(string(tp.UpstreamQueueAttributeList[gemCnt].PbitMap[lenOfPbitMap-pbitMapIdx+1])); err == nil {
+ if uint32(pbitMapIdx-2) == pbit && p == 1 { // Check this p-bit is set
+ logger.Debugw(ctx, "Found-US-Queue-for-Pcp", log.Fields{"pbit": pbit, "Queue": tp.UpstreamQueueAttributeList[gemCnt].GemportId})
+ return tp.UpstreamQueueAttributeList[gemCnt]
+ }
+ }
+ }
+ }
+ } else if dir == tp_pb.Direction_DOWNSTREAM {
+ //downstream GEM ports
+ numGemPorts := len(tp.DownstreamQueueAttributeList)
+ for gemCnt := 0; gemCnt < numGemPorts; gemCnt++ {
+ lenOfPbitMap := len(tp.DownstreamQueueAttributeList[gemCnt].PbitMap)
+ for pbitMapIdx := 2; pbitMapIdx < lenOfPbitMap; pbitMapIdx++ {
+ // Given a sample pbit map string "0b00000001", lenOfPbitMap is 10
+ // "lenOfPbitMap - pbitMapIdx + 1" will give pbit-i th value from LSB position in the pbit map string
+ if p, err := strconv.Atoi(string(tp.DownstreamQueueAttributeList[gemCnt].PbitMap[lenOfPbitMap-pbitMapIdx+1])); err == nil {
+ if uint32(pbitMapIdx-2) == pbit && p == 1 { // Check this p-bit is set
+ logger.Debugw(ctx, "Found-DS-Queue-for-Pcp", log.Fields{"pbit": pbit, "Queue": tp.DownstreamQueueAttributeList[gemCnt].GemportId})
+ return tp.DownstreamQueueAttributeList[gemCnt]
+ }
+ }
+ }
+ }
+ }
+ logger.Errorw(ctx, "No-QueueId-Found-For-Pcp", log.Fields{"pcpVlan": pbit})
+ default:
+ logger.Errorw(ctx, "unknown-tech", log.Fields{"tp": tp})
+ }
+ return nil
+}
+
+// FindAllTpInstances returns all TechProfile instances for a given TechProfile table-id, pon interface ID and onu ID.
+func (t *TechProfileMgr) FindAllTpInstances(ctx context.Context, oltDeviceID string, tpID uint32, intfID uint32, onuID uint32) interface{} {
+ onuTpInstancePathSuffix := fmt.Sprintf("%s/%d/olt-{%s}/pon-{%d}/onu-{%d}", t.resourceMgr.GetTechnology(), tpID, oltDeviceID, intfID, onuID)
+ tech := t.resourceMgr.GetTechnology()
+ if tech == xgspon || tech == xgpon || tech == gpon {
+ t.tpInstanceMapLock.RLock()
+ defer t.tpInstanceMapLock.RUnlock()
+ tpInstancesTech := make([]tp_pb.TechProfileInstance, 0)
+ for i := 0; i < MaxUniPortPerOnu; i++ {
+ key := onuTpInstancePathSuffix + fmt.Sprintf("/uni-{%d}", i)
+ if tpInst, ok := t.tpInstanceMap[key]; ok {
+ tpInstancesTech = append(tpInstancesTech, *tpInst)
+ }
+ }
+ return tpInstancesTech
+ } else if tech == epon {
+ t.epontpInstanceMapLock.RLock()
+ defer t.epontpInstanceMapLock.RUnlock()
+ tpInstancesTech := make([]tp_pb.EponTechProfileInstance, 0)
+ for i := 0; i < MaxUniPortPerOnu; i++ {
+ key := onuTpInstancePathSuffix + fmt.Sprintf("/uni-{%d}", i)
+ if tpInst, ok := t.eponTpInstanceMap[key]; ok {
+ tpInstancesTech = append(tpInstancesTech, *tpInst)
+ }
+ }
+ return tpInstancesTech
+ } else {
+ logger.Errorw(ctx, "unknown-tech", log.Fields{"tech": tech, "tpID": tpID, "onuID": onuID, "intfID": intfID})
+ }
+ return nil
+}
+
+func (t *TechProfileMgr) GetResourceID(ctx context.Context, intfID uint32, resourceType string, numIDs uint32) ([]uint32, error) {
+ logger.Debugw(ctx, "getting-resource-id", log.Fields{
+ "intf-id": intfID,
+ "resource-type": resourceType,
+ "num": numIDs,
+ })
+ var err error
+ var ids []uint32
+ switch resourceType {
+ case t.resourceMgr.GetResourceTypeAllocID():
+ t.AllocIDMgmtLock.Lock()
+ ids, err = t.resourceMgr.GetResourceID(ctx, intfID, resourceType, numIDs)
+ t.AllocIDMgmtLock.Unlock()
+ case t.resourceMgr.GetResourceTypeGemPortID():
+ t.GemPortIDMgmtLock.Lock()
+ ids, err = t.resourceMgr.GetResourceID(ctx, intfID, resourceType, numIDs)
+ t.GemPortIDMgmtLock.Unlock()
+ case t.resourceMgr.GetResourceTypeOnuID():
+ t.OnuIDMgmtLock.Lock()
+ ids, err = t.resourceMgr.GetResourceID(ctx, intfID, resourceType, numIDs)
+ t.OnuIDMgmtLock.Unlock()
+ default:
+ return nil, fmt.Errorf("resourceType %s not supported", resourceType)
+ }
+ if err != nil {
+ return nil, err
+ }
+ return ids, nil
+}
+
+func (t *TechProfileMgr) FreeResourceID(ctx context.Context, intfID uint32, resourceType string, ReleaseContent []uint32) error {
+ logger.Debugw(ctx, "freeing-resource-id", log.Fields{
+ "intf-id": intfID,
+ "resource-type": resourceType,
+ "release-content": ReleaseContent,
+ })
+ var err error
+ switch resourceType {
+ case t.resourceMgr.GetResourceTypeAllocID():
+ t.AllocIDMgmtLock.Lock()
+ err = t.resourceMgr.FreeResourceID(ctx, intfID, resourceType, ReleaseContent)
+ t.AllocIDMgmtLock.Unlock()
+ case t.resourceMgr.GetResourceTypeGemPortID():
+ t.GemPortIDMgmtLock.Lock()
+ err = t.resourceMgr.FreeResourceID(ctx, intfID, resourceType, ReleaseContent)
+ t.GemPortIDMgmtLock.Unlock()
+ case t.resourceMgr.GetResourceTypeOnuID():
+ t.OnuIDMgmtLock.Lock()
+ err = t.resourceMgr.FreeResourceID(ctx, intfID, resourceType, ReleaseContent)
+ t.OnuIDMgmtLock.Unlock()
+ default:
+ return fmt.Errorf("resourceType %s not supported", resourceType)
+ }
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+func (t *TechProfileMgr) GetUsScheduler(tpInstance *tp_pb.TechProfileInstance) *tp_pb.SchedulerConfig {
+ return &tp_pb.SchedulerConfig{
+ Direction: tpInstance.UsScheduler.Direction,
+ AdditionalBw: tpInstance.UsScheduler.AdditionalBw,
+ Priority: tpInstance.UsScheduler.Priority,
+ Weight: tpInstance.UsScheduler.Weight,
+ SchedPolicy: tpInstance.UsScheduler.QSchedPolicy}
+}
+
+func (t *TechProfileMgr) GetDsScheduler(tpInstance *tp_pb.TechProfileInstance) *tp_pb.SchedulerConfig {
+ return &tp_pb.SchedulerConfig{
+ Direction: tpInstance.DsScheduler.Direction,
+ AdditionalBw: tpInstance.DsScheduler.AdditionalBw,
+ Priority: tpInstance.DsScheduler.Priority,
+ Weight: tpInstance.DsScheduler.Weight,
+ SchedPolicy: tpInstance.DsScheduler.QSchedPolicy}
+}
+
+func (t *TechProfileMgr) GetTrafficScheduler(tpInstance *tp_pb.TechProfileInstance, SchedCfg *tp_pb.SchedulerConfig,
+ ShapingCfg *tp_pb.TrafficShapingInfo) *tp_pb.TrafficScheduler {
+
+ tSched := &tp_pb.TrafficScheduler{
+ Direction: SchedCfg.Direction,
+ AllocId: tpInstance.UsScheduler.AllocId,
+ TrafficShapingInfo: ShapingCfg,
+ Scheduler: SchedCfg}
+
+ return tSched
+}
+
+func (t *TechProfileMgr) GetTrafficQueues(ctx context.Context, tp *tp_pb.TechProfileInstance, direction tp_pb.Direction) ([]*tp_pb.TrafficQueue, error) {
+
+ var encryp bool
+ if direction == tp_pb.Direction_UPSTREAM {
+ // upstream GEM ports
+ NumGemPorts := len(tp.UpstreamGemPortAttributeList)
+ GemPorts := make([]*tp_pb.TrafficQueue, 0)
+ for Count := 0; Count < NumGemPorts; Count++ {
+ if tp.UpstreamGemPortAttributeList[Count].AesEncryption == "True" {
+ encryp = true
+ } else {
+ encryp = false
+ }
+
+ GemPorts = append(GemPorts, &tp_pb.TrafficQueue{
+ Direction: direction,
+ GemportId: tp.UpstreamGemPortAttributeList[Count].GemportId,
+ PbitMap: tp.UpstreamGemPortAttributeList[Count].PbitMap,
+ AesEncryption: encryp,
+ SchedPolicy: tp.UpstreamGemPortAttributeList[Count].SchedulingPolicy,
+ Priority: tp.UpstreamGemPortAttributeList[Count].PriorityQ,
+ Weight: tp.UpstreamGemPortAttributeList[Count].Weight,
+ DiscardPolicy: tp.UpstreamGemPortAttributeList[Count].DiscardPolicy,
+ })
+ }
+ logger.Debugw(ctx, "Upstream Traffic queue list ", log.Fields{"queuelist": GemPorts})
+ return GemPorts, nil
+ } else if direction == tp_pb.Direction_DOWNSTREAM {
+ //downstream GEM ports
+ NumGemPorts := len(tp.DownstreamGemPortAttributeList)
+ GemPorts := make([]*tp_pb.TrafficQueue, 0)
+ for Count := 0; Count < NumGemPorts; Count++ {
+ if isMulticastGem(tp.DownstreamGemPortAttributeList[Count].IsMulticast) {
+ //do not take multicast GEM ports. They are handled separately.
+ continue
+ }
+ if tp.DownstreamGemPortAttributeList[Count].AesEncryption == "True" {
+ encryp = true
+ } else {
+ encryp = false
+ }
+
+ GemPorts = append(GemPorts, &tp_pb.TrafficQueue{
+ Direction: direction,
+ GemportId: tp.DownstreamGemPortAttributeList[Count].GemportId,
+ PbitMap: tp.DownstreamGemPortAttributeList[Count].PbitMap,
+ AesEncryption: encryp,
+ SchedPolicy: tp.DownstreamGemPortAttributeList[Count].SchedulingPolicy,
+ Priority: tp.DownstreamGemPortAttributeList[Count].PriorityQ,
+ Weight: tp.DownstreamGemPortAttributeList[Count].Weight,
+ DiscardPolicy: tp.DownstreamGemPortAttributeList[Count].DiscardPolicy,
+ })
+ }
+ logger.Debugw(ctx, "Downstream Traffic queue list ", log.Fields{"queuelist": GemPorts})
+ return GemPorts, nil
+ }
+
+ logger.Errorf(ctx, "Unsupported direction %s used for generating Traffic Queue list", direction)
+ return nil, fmt.Errorf("downstream gem port traffic queue creation failed due to unsupported direction %s", direction)
+}
+
+func (t *TechProfileMgr) validateInstanceControlAttr(ctx context.Context, instCtl tp_pb.InstanceControl) error {
+ if instCtl.Onu != "single-instance" && instCtl.Onu != "multi-instance" {
+ logger.Errorw(ctx, "invalid-onu-instance-control-attribute", log.Fields{"onu-inst": instCtl.Onu})
+ return errors.New("invalid-onu-instance-ctl-attr")
+ }
+
+ if instCtl.Uni != "single-instance" && instCtl.Uni != "multi-instance" {
+ logger.Errorw(ctx, "invalid-uni-instance-control-attribute", log.Fields{"uni-inst": instCtl.Uni})
+ return errors.New("invalid-uni-instance-ctl-attr")
+ }
+
+ if instCtl.Uni == "multi-instance" {
+ logger.Error(ctx, "uni-multi-instance-tp-not-supported")
+ return errors.New("uni-multi-instance-tp-not-supported")
+ }
+
+ return nil
+}
+
+// allocateTPInstance for GPON, XGPON and XGS-PON technology
+func (t *TechProfileMgr) allocateTPInstance(ctx context.Context, uniPortName string, tp *tp_pb.TechProfile, intfID uint32, tpInstPathSuffix string) *tp_pb.TechProfileInstance {
+
+ var usGemPortAttributeList []*tp_pb.GemPortAttributes
+ var dsGemPortAttributeList []*tp_pb.GemPortAttributes
+ var dsMulticastGemAttributeList []*tp_pb.GemPortAttributes
+ var dsUnicastGemAttributeList []*tp_pb.GemPortAttributes
+ var tcontIDs []uint32
+ var gemPorts []uint32
+ var err error
+
+ logger.Infow(ctx, "Allocating TechProfileMgr instance from techprofile template", log.Fields{"uniPortName": uniPortName, "intfID": intfID, "numGem": tp.NumGemPorts})
+
+ if tp.InstanceControl.Onu == "multi-instance" {
+ tcontIDs, err = t.GetResourceID(ctx, intfID, t.resourceMgr.GetResourceTypeAllocID(), 1)
+ if err != nil {
+ logger.Errorw(ctx, "Error getting alloc id from rsrcrMgr", log.Fields{"err": err, "intfID": intfID})
+ return nil
+ }
+ } else { // "single-instance"
+ if tpInst := t.getSingleInstanceTp(ctx, tpInstPathSuffix); tpInst == nil {
+ // No "single-instance" tp found on one any uni port for the given TP ID
+ // Allocate a new TcontID or AllocID
+ tcontIDs, err = t.GetResourceID(ctx, intfID, t.resourceMgr.GetResourceTypeAllocID(), 1)
+ if err != nil {
+ logger.Errorw(ctx, "Error getting alloc id from rsrcrMgr", log.Fields{"err": err, "intfID": intfID})
+ return nil
+ }
+ } else {
+ // Use the alloc-id from the existing TpInstance
+ tcontIDs = append(tcontIDs, tpInst.UsScheduler.AllocId)
+ }
+ }
+ logger.Debugw(ctx, "Num GEM ports in TP:", log.Fields{"NumGemPorts": tp.NumGemPorts})
+ gemPorts, err = t.GetResourceID(ctx, intfID, t.resourceMgr.GetResourceTypeGemPortID(), tp.NumGemPorts)
+ if err != nil {
+ logger.Errorw(ctx, "Error getting gemport ids from rsrcrMgr", log.Fields{"err": err, "intfID": intfID, "numGemports": tp.NumGemPorts})
+ return nil
+ }
+ logger.Infow(ctx, "Allocated tconts and GEM ports successfully", log.Fields{"tconts": tcontIDs, "gemports": gemPorts})
+ for index := 0; index < int(tp.NumGemPorts); index++ {
+ usGemPortAttributeList = append(usGemPortAttributeList,
+ &tp_pb.GemPortAttributes{GemportId: gemPorts[index],
+ MaxQSize: tp.UpstreamGemPortAttributeList[index].MaxQSize,
+ PbitMap: tp.UpstreamGemPortAttributeList[index].PbitMap,
+ AesEncryption: tp.UpstreamGemPortAttributeList[index].AesEncryption,
+ SchedulingPolicy: tp.UpstreamGemPortAttributeList[index].SchedulingPolicy,
+ PriorityQ: tp.UpstreamGemPortAttributeList[index].PriorityQ,
+ Weight: tp.UpstreamGemPortAttributeList[index].Weight,
+ DiscardPolicy: tp.UpstreamGemPortAttributeList[index].DiscardPolicy,
+ DiscardConfig: tp.UpstreamGemPortAttributeList[index].DiscardConfig})
+ }
+
+ logger.Info(ctx, "length of DownstreamGemPortAttributeList", len(tp.DownstreamGemPortAttributeList))
+ //put multicast and unicast downstream GEM port attributes in different lists first
+ for index := 0; index < len(tp.DownstreamGemPortAttributeList); index++ {
+ if isMulticastGem(tp.DownstreamGemPortAttributeList[index].IsMulticast) {
+ dsMulticastGemAttributeList = append(dsMulticastGemAttributeList,
+ &tp_pb.GemPortAttributes{
+ MulticastGemId: tp.DownstreamGemPortAttributeList[index].MulticastGemId,
+ MaxQSize: tp.DownstreamGemPortAttributeList[index].MaxQSize,
+ PbitMap: tp.DownstreamGemPortAttributeList[index].PbitMap,
+ AesEncryption: tp.DownstreamGemPortAttributeList[index].AesEncryption,
+ SchedulingPolicy: tp.DownstreamGemPortAttributeList[index].SchedulingPolicy,
+ PriorityQ: tp.DownstreamGemPortAttributeList[index].PriorityQ,
+ Weight: tp.DownstreamGemPortAttributeList[index].Weight,
+ DiscardPolicy: tp.DownstreamGemPortAttributeList[index].DiscardPolicy,
+ DiscardConfig: tp.DownstreamGemPortAttributeList[index].DiscardConfig,
+ IsMulticast: tp.DownstreamGemPortAttributeList[index].IsMulticast,
+ DynamicAccessControlList: tp.DownstreamGemPortAttributeList[index].DynamicAccessControlList,
+ StaticAccessControlList: tp.DownstreamGemPortAttributeList[index].StaticAccessControlList})
+ } else {
+ dsUnicastGemAttributeList = append(dsUnicastGemAttributeList,
+ &tp_pb.GemPortAttributes{
+ MaxQSize: tp.DownstreamGemPortAttributeList[index].MaxQSize,
+ PbitMap: tp.DownstreamGemPortAttributeList[index].PbitMap,
+ AesEncryption: tp.DownstreamGemPortAttributeList[index].AesEncryption,
+ SchedulingPolicy: tp.DownstreamGemPortAttributeList[index].SchedulingPolicy,
+ PriorityQ: tp.DownstreamGemPortAttributeList[index].PriorityQ,
+ Weight: tp.DownstreamGemPortAttributeList[index].Weight,
+ DiscardPolicy: tp.DownstreamGemPortAttributeList[index].DiscardPolicy,
+ DiscardConfig: tp.DownstreamGemPortAttributeList[index].DiscardConfig})
+ }
+ }
+ //add unicast downstream GEM ports to dsGemPortAttributeList
+ if dsUnicastGemAttributeList != nil {
+ for index := 0; index < int(tp.NumGemPorts); index++ {
+ dsGemPortAttributeList = append(dsGemPortAttributeList,
+ &tp_pb.GemPortAttributes{GemportId: gemPorts[index],
+ MaxQSize: dsUnicastGemAttributeList[index].MaxQSize,
+ PbitMap: dsUnicastGemAttributeList[index].PbitMap,
+ AesEncryption: dsUnicastGemAttributeList[index].AesEncryption,
+ SchedulingPolicy: dsUnicastGemAttributeList[index].SchedulingPolicy,
+ PriorityQ: dsUnicastGemAttributeList[index].PriorityQ,
+ Weight: dsUnicastGemAttributeList[index].Weight,
+ DiscardPolicy: dsUnicastGemAttributeList[index].DiscardPolicy,
+ DiscardConfig: dsUnicastGemAttributeList[index].DiscardConfig})
+ }
+ }
+ //add multicast GEM ports to dsGemPortAttributeList afterwards
+ for k := range dsMulticastGemAttributeList {
+ dsGemPortAttributeList = append(dsGemPortAttributeList, dsMulticastGemAttributeList[k])
+ }
+
+ return &tp_pb.TechProfileInstance{
+ SubscriberIdentifier: uniPortName,
+ Name: tp.Name,
+ ProfileType: tp.ProfileType,
+ Version: tp.Version,
+ NumGemPorts: tp.NumGemPorts,
+ InstanceControl: tp.InstanceControl,
+ UsScheduler: &tp_pb.SchedulerAttributes{
+ AllocId: tcontIDs[0],
+ Direction: tp.UsScheduler.Direction,
+ AdditionalBw: tp.UsScheduler.AdditionalBw,
+ Priority: tp.UsScheduler.Priority,
+ Weight: tp.UsScheduler.Weight,
+ QSchedPolicy: tp.UsScheduler.QSchedPolicy},
+ DsScheduler: &tp_pb.SchedulerAttributes{
+ AllocId: tcontIDs[0],
+ Direction: tp.DsScheduler.Direction,
+ AdditionalBw: tp.DsScheduler.AdditionalBw,
+ Priority: tp.DsScheduler.Priority,
+ Weight: tp.DsScheduler.Weight,
+ QSchedPolicy: tp.DsScheduler.QSchedPolicy},
+ UpstreamGemPortAttributeList: usGemPortAttributeList,
+ DownstreamGemPortAttributeList: dsGemPortAttributeList}
+}
+
+// allocateTPInstance function for EPON
+func (t *TechProfileMgr) allocateEponTPInstance(ctx context.Context, uniPortName string, tp *tp_pb.EponTechProfile, intfID uint32, tpInstPath string) *tp_pb.EponTechProfileInstance {
+
+ var usQueueAttributeList []*tp_pb.EPONQueueAttributes
+ var dsQueueAttributeList []*tp_pb.EPONQueueAttributes
+ var tcontIDs []uint32
+ var gemPorts []uint32
+ var err error
+
+ logger.Infow(ctx, "allocating-tp-instance-from-tp-template", log.Fields{"uniPortName": uniPortName, "intfID": intfID, "numGem": tp.NumGemPorts})
+
+ if tp.InstanceControl.Onu == "multi-instance" {
+ if tcontIDs, err = t.GetResourceID(ctx, intfID, t.resourceMgr.GetResourceTypeAllocID(), 1); err != nil {
+ logger.Errorw(ctx, "Error getting alloc id from rsrcrMgr", log.Fields{"err": err, "intfID": intfID})
+ return nil
+ }
+ } else { // "single-instance"
+ if tpInst := t.getSingleInstanceEponTp(ctx, tpInstPath); tpInst == nil {
+ // No "single-instance" tp found on one any uni port for the given TP ID
+ // Allocate a new TcontID or AllocID
+ if tcontIDs, err = t.GetResourceID(ctx, intfID, t.resourceMgr.GetResourceTypeAllocID(), 1); err != nil {
+ logger.Errorw(ctx, "error-getting-alloc-id-from-resource-mgr", log.Fields{"err": err, "intfID": intfID})
+ return nil
+ }
+ } else {
+ // Use the alloc-id from the existing TpInstance
+ tcontIDs = append(tcontIDs, tpInst.AllocId)
+ }
+ }
+ logger.Debugw(ctx, "Num GEM ports in TP:", log.Fields{"NumGemPorts": tp.NumGemPorts})
+ if gemPorts, err = t.GetResourceID(ctx, intfID, t.resourceMgr.GetResourceTypeGemPortID(), tp.NumGemPorts); err != nil {
+ logger.Errorw(ctx, "error-getting-gemport-id-from-resource-mgr", log.Fields{"err": err, "intfID": intfID, "numGemports": tp.NumGemPorts})
+ return nil
+ }
+ logger.Infow(ctx, "allocated-alloc-id-and-gemport-successfully", log.Fields{"tconts": tcontIDs, "gemports": gemPorts})
+ for index := 0; index < int(tp.NumGemPorts); index++ {
+ usQueueAttributeList = append(usQueueAttributeList,
+ &tp_pb.EPONQueueAttributes{GemportId: gemPorts[index],
+ MaxQSize: tp.UpstreamQueueAttributeList[index].MaxQSize,
+ PbitMap: tp.UpstreamQueueAttributeList[index].PbitMap,
+ AesEncryption: tp.UpstreamQueueAttributeList[index].AesEncryption,
+ TrafficType: tp.UpstreamQueueAttributeList[index].TrafficType,
+ UnsolicitedGrantSize: tp.UpstreamQueueAttributeList[index].UnsolicitedGrantSize,
+ NominalInterval: tp.UpstreamQueueAttributeList[index].NominalInterval,
+ ToleratedPollJitter: tp.UpstreamQueueAttributeList[index].ToleratedPollJitter,
+ RequestTransmissionPolicy: tp.UpstreamQueueAttributeList[index].RequestTransmissionPolicy,
+ NumQSets: tp.UpstreamQueueAttributeList[index].NumQSets,
+ QThresholds: tp.UpstreamQueueAttributeList[index].QThresholds,
+ SchedulingPolicy: tp.UpstreamQueueAttributeList[index].SchedulingPolicy,
+ PriorityQ: tp.UpstreamQueueAttributeList[index].PriorityQ,
+ Weight: tp.UpstreamQueueAttributeList[index].Weight,
+ DiscardPolicy: tp.UpstreamQueueAttributeList[index].DiscardPolicy,
+ DiscardConfig: tp.UpstreamQueueAttributeList[index].DiscardConfig})
+ }
+
+ logger.Info(ctx, "length-of-downstream-gemport-attribute-list", len(tp.DownstreamQueueAttributeList))
+ for index := 0; index < int(tp.NumGemPorts); index++ {
+ dsQueueAttributeList = append(dsQueueAttributeList,
+ &tp_pb.EPONQueueAttributes{GemportId: gemPorts[index],
+ MaxQSize: tp.DownstreamQueueAttributeList[index].MaxQSize,
+ PbitMap: tp.DownstreamQueueAttributeList[index].PbitMap,
+ AesEncryption: tp.DownstreamQueueAttributeList[index].AesEncryption,
+ SchedulingPolicy: tp.DownstreamQueueAttributeList[index].SchedulingPolicy,
+ PriorityQ: tp.DownstreamQueueAttributeList[index].PriorityQ,
+ Weight: tp.DownstreamQueueAttributeList[index].Weight,
+ DiscardPolicy: tp.DownstreamQueueAttributeList[index].DiscardPolicy,
+ DiscardConfig: tp.DownstreamQueueAttributeList[index].DiscardConfig})
+ }
+
+ return &tp_pb.EponTechProfileInstance{
+ SubscriberIdentifier: uniPortName,
+ Name: tp.Name,
+ ProfileType: tp.ProfileType,
+ Version: tp.Version,
+ NumGemPorts: tp.NumGemPorts,
+ InstanceControl: tp.InstanceControl,
+ PackageType: tp.PackageType,
+ AllocId: tcontIDs[0],
+ UpstreamQueueAttributeList: usQueueAttributeList,
+ DownstreamQueueAttributeList: dsQueueAttributeList}
+}
+
+// getSingleInstanceTp returns another TpInstance (GPON, XGPON, XGS-PON) for an ONU on a different
+// uni port for the same TP ID, if it finds one, else nil.
+func (t *TechProfileMgr) getSingleInstanceTp(ctx context.Context, tpPathSuffix string) *tp_pb.TechProfileInstance {
+
+ // For example:
+ // tpPathSuffix like "XGS-PON/64/olt-{1234}/pon-{0}/onu-{1}/uni-{1}"
+ // is broken into ["XGS-PON/64/olt-{1234}/pon-{0}/onu-{1}" ""]
+ uniPathSlice := regexp.MustCompile(`/uni-{[0-9]+}$`).Split(tpPathSuffix, 2)
+
+ t.tpInstanceMapLock.RLock()
+ defer t.tpInstanceMapLock.RUnlock()
+ for i := 0; i < MaxUniPortPerOnu; i++ {
+ key := fmt.Sprintf(uniPathSlice[0]+"/uni-{%d}", i)
+ if tpInst, ok := t.tpInstanceMap[key]; ok {
+ logger.Debugw(ctx, "found-single-instance-tp", log.Fields{"key": key})
+ return tpInst
+ }
+ }
+ return nil
+}
+
+// getSingleInstanceTp returns another TpInstance (EPON) for an ONU on a different
+// uni port for the same TP ID, if it finds one, else nil.
+func (t *TechProfileMgr) getSingleInstanceEponTp(ctx context.Context, tpPathSuffix string) *tp_pb.EponTechProfileInstance {
+ // For example:
+ // tpPathSuffix like "EPON/64/olt-{1234}/pon-{0}/onu-{1}/uni-{1}"
+ // is broken into ["EPON/64/-{1234}/pon-{0}/onu-{1}" ""]
+ uniPathSlice := regexp.MustCompile(`/uni-{[0-9]+}$`).Split(tpPathSuffix, 2)
+
+ t.epontpInstanceMapLock.RLock()
+ defer t.epontpInstanceMapLock.RUnlock()
+ for i := 0; i < MaxUniPortPerOnu; i++ {
+ key := fmt.Sprintf(uniPathSlice[0]+"/uni-{%d}", i)
+ if tpInst, ok := t.eponTpInstanceMap[key]; ok {
+ logger.Debugw(ctx, "found-single-instance-tp", log.Fields{"key": key})
+ return tpInst
+ }
+ }
+ return nil
+}
+
+// getDefaultTechProfile returns a default TechProfile for GPON, XGPON, XGS-PON
+func (t *TechProfileMgr) getDefaultTechProfile(ctx context.Context) *tp_pb.TechProfile {
+ var usGemPortAttributeList []*tp_pb.GemPortAttributes
+ var dsGemPortAttributeList []*tp_pb.GemPortAttributes
+
+ for _, pbit := range t.config.DefaultPbits {
+ logger.Debugw(ctx, "creating-gem-port-profile-profile", log.Fields{"pbit": pbit})
+ usGemPortAttributeList = append(usGemPortAttributeList,
+ &tp_pb.GemPortAttributes{
+ MaxQSize: defaultMaxQueueSize,
+ PbitMap: pbit,
+ AesEncryption: defaultAESEncryption,
+ SchedulingPolicy: tp_pb.SchedulingPolicy_WRR,
+ PriorityQ: defaultPriorityQueue,
+ Weight: defaultQueueWeight,
+ DiscardPolicy: tp_pb.DiscardPolicy_TailDrop,
+ DiscardConfigV2: &tp_pb.DiscardConfig{
+ DiscardPolicy: tp_pb.DiscardPolicy_Red,
+ DiscardConfig: &tp_pb.DiscardConfig_RedDiscardConfig{
+ RedDiscardConfig: &tp_pb.RedDiscardConfig{
+ MinThreshold: defaultMinThreshold,
+ MaxThreshold: defaultMaxThreshold,
+ MaxProbability: defaultMaxProbability,
+ },
+ },
+ },
+ DiscardConfig: &tp_pb.RedDiscardConfig{
+ MinThreshold: defaultMinThreshold,
+ MaxThreshold: defaultMaxThreshold,
+ MaxProbability: defaultMaxProbability,
+ },
+ })
+ dsGemPortAttributeList = append(dsGemPortAttributeList,
+ &tp_pb.GemPortAttributes{
+ MaxQSize: defaultMaxQueueSize,
+ PbitMap: pbit,
+ AesEncryption: defaultAESEncryption,
+ SchedulingPolicy: tp_pb.SchedulingPolicy_WRR,
+ PriorityQ: defaultPriorityQueue,
+ Weight: defaultQueueWeight,
+ DiscardPolicy: tp_pb.DiscardPolicy_TailDrop,
+ DiscardConfigV2: &tp_pb.DiscardConfig{
+ DiscardPolicy: tp_pb.DiscardPolicy_Red,
+ DiscardConfig: &tp_pb.DiscardConfig_RedDiscardConfig{
+ RedDiscardConfig: &tp_pb.RedDiscardConfig{
+ MinThreshold: defaultMinThreshold,
+ MaxThreshold: defaultMaxThreshold,
+ MaxProbability: defaultMaxProbability,
+ },
+ },
+ },
+ DiscardConfig: &tp_pb.RedDiscardConfig{
+ MinThreshold: defaultMinThreshold,
+ MaxThreshold: defaultMaxThreshold,
+ MaxProbability: defaultMaxProbability,
+ },
+ IsMulticast: defaultIsMulticast,
+ DynamicAccessControlList: defaultAccessControlList,
+ StaticAccessControlList: defaultAccessControlList,
+ MulticastGemId: defaultMcastGemID})
+ }
+ return &tp_pb.TechProfile{
+ Name: t.config.DefaultTPName,
+ ProfileType: t.resourceMgr.GetTechnology(),
+ Version: t.config.TPVersion,
+ NumGemPorts: uint32(len(usGemPortAttributeList)),
+ InstanceControl: &tp_pb.InstanceControl{
+ Onu: defaultOnuInstance,
+ Uni: defaultUniInstance,
+ MaxGemPayloadSize: defaultGemPayloadSize},
+ UsScheduler: &tp_pb.SchedulerAttributes{
+ Direction: tp_pb.Direction_UPSTREAM,
+ AdditionalBw: tp_pb.AdditionalBW_AdditionalBW_BestEffort,
+ Priority: defaultPriority,
+ Weight: defaultWeight,
+ QSchedPolicy: tp_pb.SchedulingPolicy_Hybrid},
+ DsScheduler: &tp_pb.SchedulerAttributes{
+ Direction: tp_pb.Direction_DOWNSTREAM,
+ AdditionalBw: tp_pb.AdditionalBW_AdditionalBW_BestEffort,
+ Priority: defaultPriority,
+ Weight: defaultWeight,
+ QSchedPolicy: tp_pb.SchedulingPolicy_Hybrid},
+ UpstreamGemPortAttributeList: usGemPortAttributeList,
+ DownstreamGemPortAttributeList: dsGemPortAttributeList}
+}
+
+// getDefaultEponProfile returns a default TechProfile for EPON
+func (t *TechProfileMgr) getDefaultEponProfile(ctx context.Context) *tp_pb.EponTechProfile {
+
+ var usQueueAttributeList []*tp_pb.EPONQueueAttributes
+ var dsQueueAttributeList []*tp_pb.EPONQueueAttributes
+
+ for _, pbit := range t.config.DefaultPbits {
+ logger.Debugw(ctx, "Creating Queue", log.Fields{"pbit": pbit})
+ usQueueAttributeList = append(usQueueAttributeList,
+ &tp_pb.EPONQueueAttributes{
+ MaxQSize: defaultMaxQueueSize,
+ PbitMap: pbit,
+ AesEncryption: defaultAESEncryption,
+ TrafficType: defaultTrafficType,
+ UnsolicitedGrantSize: defaultUnsolicitedGrantSize,
+ NominalInterval: defaultNominalInterval,
+ ToleratedPollJitter: defaultToleratedPollJitter,
+ RequestTransmissionPolicy: defaultRequestTransmissionPolicy,
+ NumQSets: defaultNumQueueSet,
+ QThresholds: &tp_pb.QThresholds{
+ QThreshold1: defaultQThreshold1,
+ QThreshold2: defaultQThreshold2,
+ QThreshold3: defaultQThreshold3,
+ QThreshold4: defaultQThreshold4,
+ QThreshold5: defaultQThreshold5,
+ QThreshold6: defaultQThreshold6,
+ QThreshold7: defaultQThreshold7},
+ SchedulingPolicy: tp_pb.SchedulingPolicy_WRR,
+ PriorityQ: defaultPriorityQueue,
+ Weight: defaultQueueWeight,
+ DiscardPolicy: tp_pb.DiscardPolicy_TailDrop,
+ DiscardConfigV2: &tp_pb.DiscardConfig{
+ DiscardPolicy: tp_pb.DiscardPolicy_Red,
+ DiscardConfig: &tp_pb.DiscardConfig_RedDiscardConfig{
+ RedDiscardConfig: &tp_pb.RedDiscardConfig{
+ MinThreshold: defaultMinThreshold,
+ MaxThreshold: defaultMaxThreshold,
+ MaxProbability: defaultMaxProbability,
+ },
+ },
+ },
+ DiscardConfig: &tp_pb.RedDiscardConfig{
+ MinThreshold: defaultMinThreshold,
+ MaxThreshold: defaultMaxThreshold,
+ MaxProbability: defaultMaxProbability,
+ }})
+ dsQueueAttributeList = append(dsQueueAttributeList,
+ &tp_pb.EPONQueueAttributes{
+ MaxQSize: defaultMaxQueueSize,
+ PbitMap: pbit,
+ AesEncryption: defaultAESEncryption,
+ SchedulingPolicy: tp_pb.SchedulingPolicy_WRR,
+ PriorityQ: defaultPriorityQueue,
+ Weight: defaultQueueWeight,
+ DiscardPolicy: tp_pb.DiscardPolicy_TailDrop,
+ DiscardConfigV2: &tp_pb.DiscardConfig{
+ DiscardPolicy: tp_pb.DiscardPolicy_Red,
+ DiscardConfig: &tp_pb.DiscardConfig_RedDiscardConfig{
+ RedDiscardConfig: &tp_pb.RedDiscardConfig{
+ MinThreshold: defaultMinThreshold,
+ MaxThreshold: defaultMaxThreshold,
+ MaxProbability: defaultMaxProbability,
+ },
+ },
+ },
+ DiscardConfig: &tp_pb.RedDiscardConfig{
+ MinThreshold: defaultMinThreshold,
+ MaxThreshold: defaultMaxThreshold,
+ MaxProbability: defaultMaxProbability,
+ }})
+ }
+ return &tp_pb.EponTechProfile{
+ Name: t.config.DefaultTPName,
+ ProfileType: t.resourceMgr.GetTechnology(),
+ Version: t.config.TPVersion,
+ NumGemPorts: uint32(len(usQueueAttributeList)),
+ InstanceControl: &tp_pb.InstanceControl{
+ Onu: defaultOnuInstance,
+ Uni: defaultUniInstance,
+ MaxGemPayloadSize: defaultGemPayloadSize},
+ PackageType: defaultPakageType,
+ UpstreamQueueAttributeList: usQueueAttributeList,
+ DownstreamQueueAttributeList: dsQueueAttributeList}
+}
+
+//isMulticastGem returns true if isMulticast attribute value of a GEM port is true; false otherwise
+func isMulticastGem(isMulticastAttrValue string) bool {
+ return isMulticastAttrValue != "" &&
+ (isMulticastAttrValue == "True" || isMulticastAttrValue == "true" || isMulticastAttrValue == "TRUE")
+}
+
+func (t *TechProfileMgr) addResourceInstanceToKVStore(ctx context.Context, tpID uint32, uniPortName string, resInst tp_pb.ResourceInstance) error {
+ logger.Debugw(ctx, "adding-resource-instance-to-kv-store", log.Fields{"tpID": tpID, "uniPortName": uniPortName, "resInst": resInst})
+ val, err := proto.Marshal(&resInst)
+ if err != nil {
+ logger.Errorw(ctx, "failed-to-marshall-resource-instance", log.Fields{"err": err, "tpID": tpID, "uniPortName": uniPortName, "resInst": resInst})
+ return err
+ }
+ err = t.config.ResourceInstanceKVBacked.Put(ctx, fmt.Sprintf("%s/%d/%s", t.resourceMgr.GetTechnology(), tpID, uniPortName), val)
+ return err
+}
+
+func (t *TechProfileMgr) removeResourceInstanceFromKVStore(ctx context.Context, tpID uint32, uniPortName string) error {
+ logger.Debugw(ctx, "removing-resource-instance-to-kv-store", log.Fields{"tpID": tpID, "uniPortName": uniPortName})
+ if err := t.config.ResourceInstanceKVBacked.Delete(ctx, fmt.Sprintf("%s/%d/%s", t.resourceMgr.GetTechnology(), tpID, uniPortName)); err != nil {
+ logger.Errorw(ctx, "error-removing-resource-instance-to-kv-store", log.Fields{"err": err, "tpID": tpID, "uniPortName": uniPortName})
+ return err
+ }
+ return nil
+}
+
+func (t *TechProfileMgr) getTPFromKVStore(ctx context.Context, tpID uint32) *tp_pb.TechProfile {
+ var tp *tp_pb.TechProfile
+ t.tpMapLock.RLock()
+ tp, ok := t.tpMap[tpID]
+ t.tpMapLock.RUnlock()
+ if ok {
+ logger.Debugw(ctx, "found-tp-in-cache", log.Fields{"tpID": tpID})
+ return tp
+ }
+ key := fmt.Sprintf(t.config.TPFileKVPath, t.resourceMgr.GetTechnology(), tpID)
+ logger.Debugw(ctx, "getting-tp-from-kv-store", log.Fields{"tpID": tpID, "Key": key})
+ kvresult, err := t.config.DefaultTpKVBackend.Get(ctx, key)
+ if err != nil {
+ logger.Errorw(ctx, "error-fetching-from-kv-store", log.Fields{"err": err, "key": key})
+ return nil
+ }
+ if kvresult != nil {
+ /* Backend will return Value in string format,needs to be converted to []byte before unmarshal*/
+ if value, err := kvstore.ToByte(kvresult.Value); err == nil {
+ lTp := &tp_pb.TechProfile{}
+ reader := bytes.NewReader(value)
+ if err = jsonpb.Unmarshal(reader, lTp); err != nil {
+ logger.Errorw(ctx, "error-unmarshalling-tp-from-kv-store", log.Fields{"err": err, "tpID": tpID, "error": err})
+ return nil
+ }
+
+ logger.Debugw(ctx, "success-fetched-tp-from-kv-store", log.Fields{"tpID": tpID, "value": *lTp})
+ return lTp
+ } else {
+ logger.Errorw(ctx, "error-decoding-tp", log.Fields{"err": err, "tpID": tpID})
+ // We we create a default profile in this case.
+ }
+ }
+
+ return nil
+}
+
+func (t *TechProfileMgr) getEponTPFromKVStore(ctx context.Context, tpID uint32) *tp_pb.EponTechProfile {
+ var eponTp *tp_pb.EponTechProfile
+ t.eponTpMapLock.RLock()
+ eponTp, ok := t.eponTpMap[tpID]
+ t.eponTpMapLock.RUnlock()
+ if ok {
+ logger.Debugw(ctx, "found-tp-in-cache", log.Fields{"tpID": tpID})
+ return eponTp
+ }
+ key := fmt.Sprintf(t.config.TPFileKVPath, t.resourceMgr.GetTechnology(), tpID)
+ logger.Debugw(ctx, "getting-epon-tp-from-kv-store", log.Fields{"tpID": tpID, "Key": key})
+ kvresult, err := t.config.DefaultTpKVBackend.Get(ctx, key)
+ if err != nil {
+ logger.Errorw(ctx, "error-fetching-from-kv-store", log.Fields{"err": err, "key": key})
+ return nil
+ }
+ if kvresult != nil {
+ /* Backend will return Value in string format,needs to be converted to []byte before unmarshal*/
+ if value, err := kvstore.ToByte(kvresult.Value); err == nil {
+ lEponTp := &tp_pb.EponTechProfile{}
+ reader := bytes.NewReader(value)
+ if err = jsonpb.Unmarshal(reader, lEponTp); err != nil {
+ logger.Errorw(ctx, "error-unmarshalling-epon-tp-from-kv-store", log.Fields{"err": err, "tpID": tpID, "error": err})
+ return nil
+ }
+
+ logger.Debugw(ctx, "success-fetching-epon-tp-from-kv-store", log.Fields{"tpID": tpID, "value": *lEponTp})
+ return lEponTp
+ }
+ }
+ return nil
+}
+
+func newKVClient(ctx context.Context, storeType string, address string, timeout time.Duration) (kvstore.Client, error) {
+
+ logger.Infow(ctx, "kv-store", log.Fields{"storeType": storeType, "address": address})
+ switch storeType {
+ case "etcd":
+ return kvstore.NewEtcdClient(ctx, address, timeout, log.WarnLevel)
+ }
+ return nil, errors.New("unsupported-kv-store")
+}
+
+// buildTpInstanceFromResourceInstance for GPON, XGPON and XGS-PON technology - build TpInstance from TechProfile template and ResourceInstance
+func (t *TechProfileMgr) buildTpInstanceFromResourceInstance(ctx context.Context, tp *tp_pb.TechProfile, resInst *tp_pb.ResourceInstance) *tp_pb.TechProfileInstance {
+
+ var usGemPortAttributeList []*tp_pb.GemPortAttributes
+ var dsGemPortAttributeList []*tp_pb.GemPortAttributes
+ var dsMulticastGemAttributeList []*tp_pb.GemPortAttributes
+ var dsUnicastGemAttributeList []*tp_pb.GemPortAttributes
+
+ if len(resInst.GemportIds) != int(tp.NumGemPorts) {
+ logger.Errorw(ctx, "mismatch-in-number-of-gemports-between-template-and-resource-instance",
+ log.Fields{"tpID": resInst.TpId, "totalResInstGemPortIDs": len(resInst.GemportIds), "totalTpTemplateGemPorts": tp.NumGemPorts})
+ return nil
+ }
+ for index := 0; index < int(tp.NumGemPorts); index++ {
+ usGemPortAttributeList = append(usGemPortAttributeList,
+ &tp_pb.GemPortAttributes{GemportId: resInst.GemportIds[index],
+ MaxQSize: tp.UpstreamGemPortAttributeList[index].MaxQSize,
+ PbitMap: tp.UpstreamGemPortAttributeList[index].PbitMap,
+ AesEncryption: tp.UpstreamGemPortAttributeList[index].AesEncryption,
+ SchedulingPolicy: tp.UpstreamGemPortAttributeList[index].SchedulingPolicy,
+ PriorityQ: tp.UpstreamGemPortAttributeList[index].PriorityQ,
+ Weight: tp.UpstreamGemPortAttributeList[index].Weight,
+ DiscardPolicy: tp.UpstreamGemPortAttributeList[index].DiscardPolicy,
+ DiscardConfig: tp.UpstreamGemPortAttributeList[index].DiscardConfig})
+ }
+
+ //put multicast and unicast downstream GEM port attributes in different lists first
+ for index := 0; index < len(tp.DownstreamGemPortAttributeList); index++ {
+ if isMulticastGem(tp.DownstreamGemPortAttributeList[index].IsMulticast) {
+ dsMulticastGemAttributeList = append(dsMulticastGemAttributeList,
+ &tp_pb.GemPortAttributes{
+ MulticastGemId: tp.DownstreamGemPortAttributeList[index].MulticastGemId,
+ MaxQSize: tp.DownstreamGemPortAttributeList[index].MaxQSize,
+ PbitMap: tp.DownstreamGemPortAttributeList[index].PbitMap,
+ AesEncryption: tp.DownstreamGemPortAttributeList[index].AesEncryption,
+ SchedulingPolicy: tp.DownstreamGemPortAttributeList[index].SchedulingPolicy,
+ PriorityQ: tp.DownstreamGemPortAttributeList[index].PriorityQ,
+ Weight: tp.DownstreamGemPortAttributeList[index].Weight,
+ DiscardPolicy: tp.DownstreamGemPortAttributeList[index].DiscardPolicy,
+ DiscardConfig: tp.DownstreamGemPortAttributeList[index].DiscardConfig,
+ IsMulticast: tp.DownstreamGemPortAttributeList[index].IsMulticast,
+ DynamicAccessControlList: tp.DownstreamGemPortAttributeList[index].DynamicAccessControlList,
+ StaticAccessControlList: tp.DownstreamGemPortAttributeList[index].StaticAccessControlList})
+ } else {
+ dsUnicastGemAttributeList = append(dsUnicastGemAttributeList,
+ &tp_pb.GemPortAttributes{
+ MaxQSize: tp.DownstreamGemPortAttributeList[index].MaxQSize,
+ PbitMap: tp.DownstreamGemPortAttributeList[index].PbitMap,
+ AesEncryption: tp.DownstreamGemPortAttributeList[index].AesEncryption,
+ SchedulingPolicy: tp.DownstreamGemPortAttributeList[index].SchedulingPolicy,
+ PriorityQ: tp.DownstreamGemPortAttributeList[index].PriorityQ,
+ Weight: tp.DownstreamGemPortAttributeList[index].Weight,
+ DiscardPolicy: tp.DownstreamGemPortAttributeList[index].DiscardPolicy,
+ DiscardConfig: tp.DownstreamGemPortAttributeList[index].DiscardConfig})
+ }
+ }
+ //add unicast downstream GEM ports to dsGemPortAttributeList
+ if dsUnicastGemAttributeList != nil {
+ for index := 0; index < int(tp.NumGemPorts); index++ {
+ dsGemPortAttributeList = append(dsGemPortAttributeList,
+ &tp_pb.GemPortAttributes{GemportId: resInst.GemportIds[index],
+ MaxQSize: dsUnicastGemAttributeList[index].MaxQSize,
+ PbitMap: dsUnicastGemAttributeList[index].PbitMap,
+ AesEncryption: dsUnicastGemAttributeList[index].AesEncryption,
+ SchedulingPolicy: dsUnicastGemAttributeList[index].SchedulingPolicy,
+ PriorityQ: dsUnicastGemAttributeList[index].PriorityQ,
+ Weight: dsUnicastGemAttributeList[index].Weight,
+ DiscardPolicy: dsUnicastGemAttributeList[index].DiscardPolicy,
+ DiscardConfig: dsUnicastGemAttributeList[index].DiscardConfig})
+ }
+ }
+ //add multicast GEM ports to dsGemPortAttributeList afterwards
+ for k := range dsMulticastGemAttributeList {
+ dsGemPortAttributeList = append(dsGemPortAttributeList, dsMulticastGemAttributeList[k])
+ }
+
+ return &tp_pb.TechProfileInstance{
+ SubscriberIdentifier: resInst.SubscriberIdentifier,
+ Name: tp.Name,
+ ProfileType: tp.ProfileType,
+ Version: tp.Version,
+ NumGemPorts: tp.NumGemPorts,
+ InstanceControl: tp.InstanceControl,
+ UsScheduler: &tp_pb.SchedulerAttributes{
+ AllocId: resInst.AllocId,
+ Direction: tp.UsScheduler.Direction,
+ AdditionalBw: tp.UsScheduler.AdditionalBw,
+ Priority: tp.UsScheduler.Priority,
+ Weight: tp.UsScheduler.Weight,
+ QSchedPolicy: tp.UsScheduler.QSchedPolicy},
+ DsScheduler: &tp_pb.SchedulerAttributes{
+ AllocId: resInst.AllocId,
+ Direction: tp.DsScheduler.Direction,
+ AdditionalBw: tp.DsScheduler.AdditionalBw,
+ Priority: tp.DsScheduler.Priority,
+ Weight: tp.DsScheduler.Weight,
+ QSchedPolicy: tp.DsScheduler.QSchedPolicy},
+ UpstreamGemPortAttributeList: usGemPortAttributeList,
+ DownstreamGemPortAttributeList: dsGemPortAttributeList}
+}
+
+// buildEponTpInstanceFromResourceInstance for EPON technology - build EponTpInstance from EponTechProfile template and ResourceInstance
+func (t *TechProfileMgr) buildEponTpInstanceFromResourceInstance(ctx context.Context, tp *tp_pb.EponTechProfile, resInst *tp_pb.ResourceInstance) *tp_pb.EponTechProfileInstance {
+
+ var usQueueAttributeList []*tp_pb.EPONQueueAttributes
+ var dsQueueAttributeList []*tp_pb.EPONQueueAttributes
+
+ if len(resInst.GemportIds) != int(tp.NumGemPorts) {
+ logger.Errorw(ctx, "mismatch-in-number-of-gemports-between-epon-tp-template-and-resource-instance",
+ log.Fields{"tpID": resInst.TpId, "totalResInstGemPortIDs": len(resInst.GemportIds), "totalTpTemplateGemPorts": tp.NumGemPorts})
+ return nil
+ }
+
+ for index := 0; index < int(tp.NumGemPorts); index++ {
+ usQueueAttributeList = append(usQueueAttributeList,
+ &tp_pb.EPONQueueAttributes{GemportId: resInst.GemportIds[index],
+ MaxQSize: tp.UpstreamQueueAttributeList[index].MaxQSize,
+ PbitMap: tp.UpstreamQueueAttributeList[index].PbitMap,
+ AesEncryption: tp.UpstreamQueueAttributeList[index].AesEncryption,
+ TrafficType: tp.UpstreamQueueAttributeList[index].TrafficType,
+ UnsolicitedGrantSize: tp.UpstreamQueueAttributeList[index].UnsolicitedGrantSize,
+ NominalInterval: tp.UpstreamQueueAttributeList[index].NominalInterval,
+ ToleratedPollJitter: tp.UpstreamQueueAttributeList[index].ToleratedPollJitter,
+ RequestTransmissionPolicy: tp.UpstreamQueueAttributeList[index].RequestTransmissionPolicy,
+ NumQSets: tp.UpstreamQueueAttributeList[index].NumQSets,
+ QThresholds: tp.UpstreamQueueAttributeList[index].QThresholds,
+ SchedulingPolicy: tp.UpstreamQueueAttributeList[index].SchedulingPolicy,
+ PriorityQ: tp.UpstreamQueueAttributeList[index].PriorityQ,
+ Weight: tp.UpstreamQueueAttributeList[index].Weight,
+ DiscardPolicy: tp.UpstreamQueueAttributeList[index].DiscardPolicy,
+ DiscardConfig: tp.UpstreamQueueAttributeList[index].DiscardConfig})
+ }
+
+ for index := 0; index < int(tp.NumGemPorts); index++ {
+ dsQueueAttributeList = append(dsQueueAttributeList,
+ &tp_pb.EPONQueueAttributes{GemportId: resInst.GemportIds[index],
+ MaxQSize: tp.DownstreamQueueAttributeList[index].MaxQSize,
+ PbitMap: tp.DownstreamQueueAttributeList[index].PbitMap,
+ AesEncryption: tp.DownstreamQueueAttributeList[index].AesEncryption,
+ SchedulingPolicy: tp.DownstreamQueueAttributeList[index].SchedulingPolicy,
+ PriorityQ: tp.DownstreamQueueAttributeList[index].PriorityQ,
+ Weight: tp.DownstreamQueueAttributeList[index].Weight,
+ DiscardPolicy: tp.DownstreamQueueAttributeList[index].DiscardPolicy,
+ DiscardConfig: tp.DownstreamQueueAttributeList[index].DiscardConfig})
+ }
+
+ return &tp_pb.EponTechProfileInstance{
+ SubscriberIdentifier: resInst.SubscriberIdentifier,
+ Name: tp.Name,
+ ProfileType: tp.ProfileType,
+ Version: tp.Version,
+ NumGemPorts: tp.NumGemPorts,
+ InstanceControl: tp.InstanceControl,
+ PackageType: tp.PackageType,
+ AllocId: resInst.AllocId,
+ UpstreamQueueAttributeList: usQueueAttributeList,
+ DownstreamQueueAttributeList: dsQueueAttributeList}
+}
+
+func (t *TechProfileMgr) getTpInstanceFromResourceInstance(ctx context.Context, resInst *tp_pb.ResourceInstance) *tp_pb.TechProfileInstance {
+ if resInst == nil {
+ logger.Error(ctx, "resource-instance-nil")
+ return nil
+ }
+ tp := t.getTPFromKVStore(ctx, resInst.TpId)
+ if tp == nil {
+ logger.Warnw(ctx, "tp-not-found-on-kv--creating-default-tp", log.Fields{"tpID": resInst.TpId})
+ tp = t.getDefaultTechProfile(ctx)
+ }
+ return t.buildTpInstanceFromResourceInstance(ctx, tp, resInst)
+}
+
+func (t *TechProfileMgr) getEponTpInstanceFromResourceInstance(ctx context.Context, resInst *tp_pb.ResourceInstance) *tp_pb.EponTechProfileInstance {
+ if resInst == nil {
+ logger.Error(ctx, "resource-instance-nil")
+ return nil
+ }
+ eponTp := t.getEponTPFromKVStore(ctx, resInst.TpId)
+ if eponTp == nil {
+ logger.Warnw(ctx, "tp-not-found-on-kv--creating-default-tp", log.Fields{"tpID": resInst.TpId})
+ eponTp = t.getDefaultEponProfile(ctx)
+ }
+ return t.buildEponTpInstanceFromResourceInstance(ctx, eponTp, resInst)
+}
+
+func (t *TechProfileMgr) reconcileTpInstancesToCache(ctx context.Context) error {
+
+ tech := t.resourceMgr.GetTechnology()
+ newCtx, cancel := context.WithTimeout(ctx, 30*time.Second)
+ defer cancel()
+ kvPairs, _ := t.config.ResourceInstanceKVBacked.List(newCtx, tech)
+
+ if tech == xgspon || tech == xgpon || tech == gpon {
+ for keyPath, kvPair := range kvPairs {
+ logger.Debugw(ctx, "attempting-to-reconcile-tp-instance-from-resource-instance", log.Fields{"resourceInstPath": keyPath})
+ if value, err := kvstore.ToByte(kvPair.Value); err == nil {
+ var resInst tp_pb.ResourceInstance
+ if err = proto.Unmarshal(value, &resInst); err != nil {
+ logger.Errorw(ctx, "error-unmarshal-kv-pair", log.Fields{"err": err, "keyPath": keyPath, "value": value})
+ continue
+ } else {
+ if tpInst := t.getTpInstanceFromResourceInstance(ctx, &resInst); tpInst != nil {
+ // Trim the kv path by removing the default prefix part and get only the suffix part to reference the internal cache
+ keySuffixSlice := regexp.MustCompile(t.config.ResourceInstanceKVPathPrefix+"/").Split(keyPath, 2)
+ if len(keySuffixSlice) == 2 {
+ keySuffixFormatRegexp := regexp.MustCompile(`^[a-zA-Z\-]+/[0-9]+/olt-{[a-z0-9\-]+}/pon-{[0-9]+}/onu-{[0-9]+}/uni-{[0-9]+}$`)
+ // Make sure the keySuffixSlice is as per format [a-zA-Z-+]/[\d+]/olt-{[a-z0-9\-]+}/pon-{[0-9]+}/onu-{[0-9]+}/uni-{[0-9]+}
+ if !keySuffixFormatRegexp.Match([]byte(keySuffixSlice[1])) {
+ logger.Errorw(ctx, "kv-path-not-confirming-to-format", log.Fields{"kvPath": keySuffixSlice[1]})
+ continue
+ }
+ } else {
+ logger.Errorw(ctx, "kv-instance-key-path-not-in-the-expected-format", log.Fields{"kvPath": keyPath})
+ continue
+ }
+ t.tpInstanceMapLock.Lock()
+ t.tpInstanceMap[keySuffixSlice[1]] = tpInst
+ t.tpInstanceMapLock.Unlock()
+ logger.Debugw(ctx, "reconciled-tp-success", log.Fields{"keyPath": keyPath})
+ }
+ }
+ } else {
+ logger.Errorw(ctx, "error-converting-kv-pair-value-to-byte", log.Fields{"err": err})
+ }
+ }
+ } else if tech == epon {
+ for keyPath, kvPair := range kvPairs {
+ logger.Debugw(ctx, "attempting-to-reconcile-epon-tp-instance", log.Fields{"keyPath": keyPath})
+ if value, err := kvstore.ToByte(kvPair.Value); err == nil {
+ var resInst tp_pb.ResourceInstance
+ if err = proto.Unmarshal(value, &resInst); err != nil {
+ logger.Errorw(ctx, "error-unmarshal-kv-pair", log.Fields{"keyPath": keyPath, "value": value})
+ continue
+ } else {
+ if eponTpInst := t.getEponTpInstanceFromResourceInstance(ctx, &resInst); eponTpInst != nil {
+ // Trim the kv path by removing the default prefix part and get only the suffix part to reference the internal cache
+ keySuffixSlice := regexp.MustCompile(t.config.ResourceInstanceKVPathPrefix+"/").Split(keyPath, 2)
+ if len(keySuffixSlice) == 2 {
+ keySuffixFormatRegexp := regexp.MustCompile(`^[a-zA-Z\-]+/[0-9]+/olt-{[a-z0-9\-]+}/pon-{[0-9]+}/onu-{[0-9]+}/uni-{[0-9]+}$`)
+ // Make sure the keySuffixSlice is as per format [a-zA-Z-+]/[\d+]/olt-{[a-z0-9\-]+}/pon-{[0-9]+}/onu-{[0-9]+}/uni-{[0-9]+}
+ if !keySuffixFormatRegexp.Match([]byte(keySuffixSlice[1])) {
+ logger.Errorw(ctx, "kv-path-not-confirming-to-format", log.Fields{"kvPath": keySuffixSlice[1]})
+ continue
+ }
+ } else {
+ logger.Errorw(ctx, "kv-instance-key-path-not-in-the-expected-format", log.Fields{"kvPath": keyPath})
+ continue
+ }
+ t.epontpInstanceMapLock.Lock()
+ t.eponTpInstanceMap[keySuffixSlice[1]] = eponTpInst
+ t.epontpInstanceMapLock.Unlock()
+ logger.Debugw(ctx, "reconciled-epon-tp-success", log.Fields{"keyPath": keyPath})
+ }
+ }
+ } else {
+ logger.Errorw(ctx, "error-converting-kv-pair-value-to-byte", log.Fields{"err": err})
+ }
+ }
+ } else {
+ logger.Errorw(ctx, "unknown-tech", log.Fields{"tech": tech})
+ return fmt.Errorf("unknown-tech-%v", tech)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/techprofile/tech_profile_if.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/techprofile/tech_profile_if.go
new file mode 100644
index 0000000..5622345
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/techprofile/tech_profile_if.go
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2019-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package techprofile
+
+import (
+ "context"
+ "github.com/opencord/voltha-lib-go/v5/pkg/db"
+ tp_pb "github.com/opencord/voltha-protos/v4/go/tech_profile"
+)
+
+type TechProfileIf interface {
+ SetKVClient(ctx context.Context, pathPrefix string) *db.Backend
+ GetTechProfileInstanceKey(ctx context.Context, tpID uint32, uniPortName string) string
+ GetTPInstance(ctx context.Context, path string) (interface{}, error)
+ CreateTechProfileInstance(ctx context.Context, tpID uint32, uniPortName string, intfID uint32) (interface{}, error)
+ DeleteTechProfileInstance(ctx context.Context, tpID uint32, uniPortName string) error
+ GetUsScheduler(tpInstance *tp_pb.TechProfileInstance) *tp_pb.SchedulerConfig
+ GetDsScheduler(tpInstance *tp_pb.TechProfileInstance) *tp_pb.SchedulerConfig
+ GetTrafficScheduler(tpInstance *tp_pb.TechProfileInstance, SchedCfg *tp_pb.SchedulerConfig, ShapingCfg *tp_pb.TrafficShapingInfo) *tp_pb.TrafficScheduler
+ GetTrafficQueues(ctx context.Context, tp *tp_pb.TechProfileInstance, Dir tp_pb.Direction) ([]*tp_pb.TrafficQueue, error)
+ GetMulticastTrafficQueues(ctx context.Context, tp *tp_pb.TechProfileInstance) []*tp_pb.TrafficQueue
+ GetGemportForPbit(ctx context.Context, tp interface{}, Dir tp_pb.Direction, pbit uint32) interface{}
+ FindAllTpInstances(ctx context.Context, oltDeviceID string, tpID uint32, ponIntf uint32, onuID uint32) interface{}
+ GetResourceID(ctx context.Context, IntfID uint32, ResourceType string, NumIDs uint32) ([]uint32, error)
+ FreeResourceID(ctx context.Context, IntfID uint32, ResourceType string, ReleaseContent []uint32) error
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/version/version.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/version/version.go
similarity index 100%
rename from vendor/github.com/opencord/voltha-lib-go/v4/pkg/version/version.go
rename to vendor/github.com/opencord/voltha-lib-go/v5/pkg/version/version.go
diff --git a/vendor/github.com/opencord/voltha-protos/v4/go/common/common.pb.go b/vendor/github.com/opencord/voltha-protos/v4/go/common/common.pb.go
index a370497..b57e775 100644
--- a/vendor/github.com/opencord/voltha-protos/v4/go/common/common.pb.go
+++ b/vendor/github.com/opencord/voltha-protos/v4/go/common/common.pb.go
@@ -101,6 +101,8 @@
OperStatus_FAILED OperStatus_Types = 5
// The device is reconciling
OperStatus_RECONCILING OperStatus_Types = 6
+ // The device is in reconciling failed
+ OperStatus_RECONCILING_FAILED OperStatus_Types = 7
)
var OperStatus_Types_name = map[int32]string{
@@ -111,16 +113,18 @@
4: "ACTIVE",
5: "FAILED",
6: "RECONCILING",
+ 7: "RECONCILING_FAILED",
}
var OperStatus_Types_value = map[string]int32{
- "UNKNOWN": 0,
- "DISCOVERED": 1,
- "ACTIVATING": 2,
- "TESTING": 3,
- "ACTIVE": 4,
- "FAILED": 5,
- "RECONCILING": 6,
+ "UNKNOWN": 0,
+ "DISCOVERED": 1,
+ "ACTIVATING": 2,
+ "TESTING": 3,
+ "ACTIVE": 4,
+ "FAILED": 5,
+ "RECONCILING": 6,
+ "RECONCILING_FAILED": 7,
}
func (x OperStatus_Types) String() string {
@@ -603,44 +607,45 @@
func init() { proto.RegisterFile("voltha_protos/common.proto", fileDescriptor_c2e3fd231961e826) }
var fileDescriptor_c2e3fd231961e826 = []byte{
- // 619 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x53, 0x5f, 0x4f, 0xdb, 0x3e,
- 0x14, 0x6d, 0x9b, 0xb6, 0x3f, 0x7a, 0x4b, 0x43, 0x7e, 0x06, 0xa6, 0x0e, 0x4d, 0x5a, 0x95, 0x17,
- 0xd8, 0xc4, 0x5a, 0x89, 0xf1, 0xba, 0x87, 0x90, 0x78, 0x9d, 0x05, 0x38, 0x91, 0x93, 0x14, 0x8d,
- 0x97, 0x2a, 0x34, 0x06, 0x32, 0xd1, 0x38, 0x4a, 0x5c, 0x34, 0xbe, 0xf6, 0x3e, 0xc1, 0x64, 0xa7,
- 0xfc, 0x9b, 0x78, 0x49, 0x7c, 0xee, 0x39, 0xb9, 0x47, 0xe7, 0x3a, 0x17, 0xf6, 0xee, 0xc5, 0x9d,
- 0xbc, 0x4d, 0xe6, 0x45, 0x29, 0xa4, 0xa8, 0x26, 0x0b, 0xb1, 0x5c, 0x8a, 0x7c, 0xac, 0x11, 0xea,
- 0xd6, 0xc8, 0xde, 0x81, 0x16, 0xf1, 0x90, 0x09, 0xad, 0x2c, 0x1d, 0x36, 0x47, 0xcd, 0x83, 0x1e,
- 0x6b, 0x65, 0xa9, 0xbd, 0x0f, 0x06, 0xf1, 0x2a, 0x34, 0x82, 0x4e, 0x26, 0xf9, 0xb2, 0x1a, 0x36,
- 0x47, 0xc6, 0x41, 0xff, 0x08, 0xc6, 0xeb, 0x16, 0xc4, 0x63, 0x35, 0x61, 0xdf, 0x02, 0x38, 0xe9,
- 0x32, 0xcb, 0x43, 0x99, 0x48, 0x6e, 0x5f, 0x42, 0x27, 0x7a, 0x28, 0x78, 0x85, 0xfa, 0xf0, 0x5f,
- 0x4c, 0x4f, 0xa9, 0x7f, 0x41, 0xad, 0x06, 0x42, 0x60, 0x06, 0x0c, 0x07, 0xcc, 0x9f, 0x91, 0x90,
- 0xf8, 0x14, 0x7b, 0x56, 0x53, 0x09, 0x30, 0x75, 0x4e, 0xce, 0xb0, 0x67, 0xb5, 0xd0, 0x26, 0x6c,
- 0x78, 0x24, 0xac, 0x91, 0x81, 0x76, 0xe1, 0x7f, 0xcf, 0xbf, 0xa0, 0x67, 0xbe, 0xe3, 0x11, 0x3a,
- 0x9d, 0x93, 0x73, 0x67, 0x8a, 0xad, 0xb6, 0xfd, 0x1b, 0xc0, 0x2f, 0x78, 0xa9, 0x8c, 0x56, 0x95,
- 0xfd, 0xeb, 0x4d, 0x27, 0x13, 0xc0, 0x23, 0xa1, 0xeb, 0xcf, 0x30, 0xd3, 0x2e, 0x26, 0x80, 0xe3,
- 0x46, 0x64, 0xe6, 0x44, 0x84, 0x4e, 0xad, 0x96, 0x12, 0x47, 0x38, 0xd4, 0xc0, 0x40, 0x00, 0x5d,
- 0x4d, 0x62, 0xab, 0xad, 0xce, 0xdf, 0x1d, 0xa2, 0xfc, 0x3b, 0x68, 0x0b, 0xfa, 0x0c, 0xbb, 0x3e,
- 0x75, 0xc9, 0x99, 0x12, 0x76, 0x6d, 0x0c, 0x03, 0x57, 0xe4, 0x39, 0x5f, 0xc8, 0xb5, 0xf9, 0xf1,
- 0x9b, 0xe6, 0x5b, 0xd0, 0x8f, 0x29, 0xc3, 0x8e, 0xfb, 0x43, 0x25, 0xb1, 0x9a, 0x68, 0x00, 0xbd,
- 0x67, 0xd8, 0xb2, 0xff, 0x34, 0x61, 0xa0, 0x12, 0x24, 0x32, 0x13, 0x39, 0xe3, 0x55, 0x81, 0xbe,
- 0x41, 0x7b, 0x21, 0x52, 0xae, 0xe7, 0x6e, 0x1e, 0x7d, 0x7a, 0x9c, 0xee, 0x2b, 0xd1, 0x4b, 0x24,
- 0x57, 0x65, 0xee, 0x8a, 0x94, 0x33, 0xfd, 0x19, 0xda, 0x87, 0xad, 0x24, 0x4d, 0x33, 0xc5, 0x25,
- 0x77, 0xf3, 0x2c, 0xbf, 0x16, 0xc3, 0x96, 0xbe, 0x41, 0xf3, 0xb9, 0x4c, 0xf2, 0x6b, 0x61, 0x3f,
- 0xc0, 0xf6, 0x1b, 0x5d, 0xd4, 0xa0, 0xfd, 0x00, 0x33, 0x27, 0x22, 0x3e, 0x9d, 0x87, 0xb1, 0xeb,
- 0xe2, 0x30, 0xb4, 0x1a, 0xaf, 0xcb, 0x6a, 0x2a, 0x31, 0x53, 0x69, 0xde, 0xc3, 0xee, 0x73, 0x39,
- 0xa6, 0x61, 0x1c, 0x04, 0x3e, 0x8b, 0xf4, 0xfd, 0xbd, 0xa2, 0x08, 0x9d, 0x07, 0xcc, 0x9f, 0x32,
- 0xd5, 0xcc, 0xb0, 0x0f, 0xa1, 0x37, 0x4b, 0xee, 0x56, 0x5c, 0xcd, 0xcb, 0xfe, 0x08, 0x6d, 0xf5,
- 0x46, 0x3d, 0xe8, 0xe0, 0xf3, 0x20, 0xfa, 0x69, 0x35, 0xd6, 0x57, 0x1f, 0x39, 0xd4, 0xc5, 0x56,
- 0xd3, 0xa6, 0x60, 0x6a, 0x75, 0x58, 0xf0, 0x45, 0x76, 0x9d, 0xf1, 0xf2, 0xdf, 0x1f, 0x13, 0x1d,
- 0x42, 0xe7, 0x5e, 0x29, 0x74, 0x52, 0xf3, 0xe8, 0xdd, 0xe3, 0xcc, 0x9e, 0x4c, 0xc6, 0xea, 0xc1,
- 0x6a, 0x91, 0x2d, 0x61, 0xb3, 0xce, 0xab, 0xe9, 0x0a, 0x59, 0x60, 0x84, 0x5c, 0xea, 0x76, 0x03,
- 0xa6, 0x8e, 0x68, 0x04, 0xfd, 0x38, 0xaf, 0x56, 0x45, 0x21, 0x4a, 0xc9, 0x53, 0xdd, 0x75, 0xc0,
- 0x5e, 0x96, 0xd0, 0x0e, 0x74, 0x70, 0x59, 0x8a, 0x72, 0x68, 0x68, 0xae, 0x06, 0x68, 0x0f, 0x36,
- 0xbc, 0xac, 0x92, 0x49, 0xbe, 0xe0, 0xc3, 0xb6, 0x26, 0x9e, 0xf0, 0xe7, 0x0f, 0xb0, 0x19, 0xf1,
- 0x4a, 0x9e, 0x8b, 0x94, 0x9f, 0xf2, 0x87, 0x4a, 0x65, 0x4c, 0x8a, 0x6c, 0x2e, 0x79, 0x25, 0xad,
- 0xc6, 0x09, 0x86, 0x6d, 0x51, 0xde, 0x8c, 0x45, 0xc1, 0xf3, 0x85, 0x28, 0xd3, 0x71, 0xbd, 0xa3,
- 0x97, 0xe3, 0x9b, 0x4c, 0xde, 0xae, 0xae, 0x54, 0x9e, 0xc9, 0x23, 0x37, 0xa9, 0xb9, 0x2f, 0xeb,
- 0xfd, 0xbd, 0x3f, 0x9e, 0xdc, 0x88, 0xf5, 0x16, 0x5f, 0x75, 0x75, 0xf1, 0xeb, 0xdf, 0x00, 0x00,
- 0x00, 0xff, 0xff, 0x4d, 0x6f, 0x2b, 0x79, 0xe4, 0x03, 0x00, 0x00,
+ // 634 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x53, 0x5d, 0x4f, 0xe3, 0x3a,
+ 0x10, 0x6d, 0xfa, 0x05, 0x9d, 0xd2, 0x90, 0x6b, 0x3e, 0xd4, 0x8b, 0xae, 0x74, 0xab, 0xbc, 0xc0,
+ 0xbd, 0x62, 0x5b, 0x89, 0xe5, 0x75, 0x1f, 0x42, 0xe2, 0xed, 0x5a, 0x80, 0x53, 0x39, 0x49, 0xd1,
+ 0xf2, 0x12, 0x85, 0xc6, 0x40, 0x24, 0x1a, 0x47, 0x89, 0x8b, 0xc4, 0xeb, 0xfe, 0x83, 0xfd, 0xab,
+ 0xfb, 0x0b, 0x56, 0x76, 0xca, 0xd7, 0x8a, 0x97, 0xd6, 0x67, 0xce, 0xc9, 0x8c, 0xcf, 0x8c, 0x07,
+ 0x0e, 0x1e, 0xc5, 0x83, 0xbc, 0x4f, 0xe2, 0xa2, 0x14, 0x52, 0x54, 0x93, 0x85, 0x58, 0x2e, 0x45,
+ 0x3e, 0xd6, 0x08, 0x75, 0x6b, 0x64, 0xef, 0x42, 0x93, 0x78, 0xc8, 0x84, 0x66, 0x96, 0x0e, 0x8d,
+ 0x91, 0x71, 0xd4, 0x63, 0xcd, 0x2c, 0xb5, 0x0f, 0xa1, 0x45, 0xbc, 0x0a, 0x8d, 0xa0, 0x93, 0x49,
+ 0xbe, 0xac, 0x86, 0xc6, 0xa8, 0x75, 0xd4, 0x3f, 0x81, 0xf1, 0x3a, 0x05, 0xf1, 0x58, 0x4d, 0xd8,
+ 0xf7, 0x00, 0x4e, 0xba, 0xcc, 0xf2, 0x40, 0x26, 0x92, 0xdb, 0xd7, 0xd0, 0x09, 0x9f, 0x0a, 0x5e,
+ 0xa1, 0x3e, 0x6c, 0x44, 0xf4, 0x9c, 0xfa, 0x57, 0xd4, 0x6a, 0x20, 0x04, 0xe6, 0x8c, 0xe1, 0x19,
+ 0xf3, 0xe7, 0x24, 0x20, 0x3e, 0xc5, 0x9e, 0x65, 0x28, 0x01, 0xa6, 0xce, 0xd9, 0x05, 0xf6, 0xac,
+ 0x26, 0xda, 0x82, 0x4d, 0x8f, 0x04, 0x35, 0x6a, 0xa1, 0x3d, 0xf8, 0xcb, 0xf3, 0xaf, 0xe8, 0x85,
+ 0xef, 0x78, 0x84, 0x4e, 0x63, 0x72, 0xe9, 0x4c, 0xb1, 0xd5, 0xb6, 0x7f, 0x1a, 0x00, 0x7e, 0xc1,
+ 0x4b, 0x55, 0x69, 0x55, 0xd9, 0x3f, 0x8c, 0x0f, 0x6b, 0x99, 0x00, 0x1e, 0x09, 0x5c, 0x7f, 0x8e,
+ 0x99, 0xae, 0x63, 0x02, 0x38, 0x6e, 0x48, 0xe6, 0x4e, 0x48, 0xe8, 0xd4, 0x6a, 0x2a, 0x71, 0x88,
+ 0x03, 0x0d, 0x5a, 0x08, 0xa0, 0xab, 0x49, 0x6c, 0xb5, 0xd5, 0xf9, 0xab, 0x43, 0xd4, 0x0d, 0x3a,
+ 0x68, 0x1b, 0xfa, 0x0c, 0xbb, 0x3e, 0x75, 0xc9, 0x85, 0x12, 0x76, 0xd1, 0x3e, 0xa0, 0x37, 0x81,
+ 0x78, 0x2d, 0xdc, 0xb0, 0x31, 0x0c, 0x5c, 0x91, 0xe7, 0x7c, 0x21, 0xd7, 0xb7, 0x3a, 0xfd, 0xf0,
+ 0x52, 0xdb, 0xd0, 0x8f, 0x28, 0xc3, 0x8e, 0xfb, 0x4d, 0x79, 0xb4, 0x0c, 0x34, 0x80, 0xde, 0x2b,
+ 0x6c, 0xda, 0xbf, 0x0c, 0x18, 0x28, 0x6b, 0x89, 0xcc, 0x44, 0xce, 0x78, 0x55, 0xa0, 0x2f, 0xd0,
+ 0x5e, 0x88, 0x94, 0xeb, 0x89, 0x98, 0x27, 0xff, 0x3d, 0xf7, 0xfd, 0x9d, 0xe8, 0x2d, 0x92, 0xab,
+ 0x32, 0x77, 0x45, 0xca, 0x99, 0xfe, 0x0c, 0x1d, 0xc2, 0x76, 0x92, 0xa6, 0x99, 0xe2, 0x92, 0x87,
+ 0x38, 0xcb, 0x6f, 0xc5, 0xb0, 0xa9, 0x67, 0x6b, 0xbe, 0x86, 0x49, 0x7e, 0x2b, 0xec, 0x27, 0xd8,
+ 0xf9, 0x20, 0x8b, 0x1a, 0x81, 0x3f, 0xc3, 0xcc, 0x09, 0x89, 0x4f, 0xe3, 0x20, 0x72, 0x5d, 0x1c,
+ 0x04, 0x56, 0xe3, 0x7d, 0x58, 0x35, 0x21, 0x62, 0xca, 0xcd, 0xdf, 0xb0, 0xf7, 0x1a, 0x8e, 0x68,
+ 0x10, 0xcd, 0x66, 0x3e, 0x0b, 0xf5, 0x64, 0xdf, 0x51, 0x84, 0xc6, 0x33, 0xe6, 0x4f, 0x99, 0x4a,
+ 0xd6, 0xb2, 0x8f, 0xa1, 0x37, 0x4f, 0x1e, 0x56, 0x5c, 0xf5, 0xcb, 0xfe, 0x17, 0xda, 0xea, 0x1f,
+ 0xf5, 0xa0, 0x83, 0x2f, 0x67, 0xe1, 0x77, 0xab, 0xb1, 0x7e, 0x14, 0xa1, 0x43, 0x5d, 0x6c, 0x19,
+ 0x36, 0x05, 0x53, 0xab, 0x83, 0x82, 0x2f, 0xb2, 0xdb, 0x8c, 0x97, 0x7f, 0x3e, 0x59, 0x74, 0x0c,
+ 0x9d, 0x47, 0xa5, 0xd0, 0x4e, 0xcd, 0x93, 0xfd, 0xe7, 0x9e, 0xbd, 0x14, 0x19, 0xab, 0x1f, 0x56,
+ 0x8b, 0x6c, 0x09, 0x5b, 0xb5, 0x5f, 0x4d, 0x57, 0xc8, 0x82, 0x56, 0xc0, 0xa5, 0x4e, 0x37, 0x60,
+ 0xea, 0x88, 0x46, 0xd0, 0x8f, 0xf2, 0x6a, 0x55, 0x14, 0xa2, 0x94, 0x3c, 0xd5, 0x59, 0x07, 0xec,
+ 0x6d, 0x08, 0xed, 0x42, 0x07, 0x97, 0xa5, 0x28, 0x87, 0x2d, 0xcd, 0xd5, 0x00, 0x1d, 0xc0, 0xa6,
+ 0x97, 0x55, 0x32, 0xc9, 0x17, 0x7c, 0xd8, 0xd6, 0xc4, 0x0b, 0xfe, 0xff, 0x1f, 0xd8, 0x0a, 0x79,
+ 0x25, 0x2f, 0x45, 0xca, 0xcf, 0xf9, 0x53, 0xa5, 0x3c, 0x26, 0x45, 0x16, 0x4b, 0x5e, 0x49, 0xab,
+ 0x71, 0x86, 0x61, 0x47, 0x94, 0x77, 0x63, 0x51, 0xf0, 0x7c, 0x21, 0xca, 0x74, 0x5c, 0x6f, 0xef,
+ 0xf5, 0xf8, 0x2e, 0x93, 0xf7, 0xab, 0x1b, 0xe5, 0x67, 0xf2, 0xcc, 0x4d, 0x6a, 0xee, 0xd3, 0x7a,
+ 0xb3, 0x1f, 0x4f, 0x27, 0x77, 0x62, 0xbd, 0xdf, 0x37, 0x5d, 0x1d, 0xfc, 0xfc, 0x3b, 0x00, 0x00,
+ 0xff, 0xff, 0x29, 0xd3, 0x39, 0x3c, 0xfe, 0x03, 0x00, 0x00,
}
diff --git a/vendor/github.com/opencord/voltha-protos/v4/go/inter_container/inter_container.pb.go b/vendor/github.com/opencord/voltha-protos/v4/go/inter_container/inter_container.pb.go
index 04f2b14..403d9e6 100644
--- a/vendor/github.com/opencord/voltha-protos/v4/go/inter_container/inter_container.pb.go
+++ b/vendor/github.com/opencord/voltha-protos/v4/go/inter_container/inter_container.pb.go
@@ -10,6 +10,7 @@
timestamp "github.com/golang/protobuf/ptypes/timestamp"
common "github.com/opencord/voltha-protos/v4/go/common"
openflow_13 "github.com/opencord/voltha-protos/v4/go/openflow_13"
+ tech_profile "github.com/opencord/voltha-protos/v4/go/tech_profile"
voltha "github.com/opencord/voltha-protos/v4/go/voltha"
math "math"
)
@@ -85,6 +86,7 @@
const OperStatus_ACTIVE = OperStatus_Types(common.OperStatus_ACTIVE)
const OperStatus_FAILED = OperStatus_Types(common.OperStatus_FAILED)
const OperStatus_RECONCILING = OperStatus_Types(common.OperStatus_RECONCILING)
+const OperStatus_RECONCILING_FAILED = OperStatus_Types(common.OperStatus_RECONCILING_FAILED)
// ConnectStatus_Types from public import voltha_protos/common.proto
type ConnectStatus_Types = common.ConnectStatus_Types
@@ -1130,21 +1132,100 @@
return nil
}
-type InterAdapterTechProfileDownloadMessage struct {
- UniId uint32 `protobuf:"varint,1,opt,name=uni_id,json=uniId,proto3" json:"uni_id,omitempty"`
- Path string `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"`
+type InterAdapterTechProfileInstanceRequestMessage struct {
+ TpInstancePath string `protobuf:"bytes,1,opt,name=tp_instance_path,json=tpInstancePath,proto3" json:"tp_instance_path,omitempty"`
+ ParentDeviceId string `protobuf:"bytes,2,opt,name=parent_device_id,json=parentDeviceId,proto3" json:"parent_device_id,omitempty"`
+ ParentPonPort uint32 `protobuf:"varint,3,opt,name=parent_pon_port,json=parentPonPort,proto3" json:"parent_pon_port,omitempty"`
+ OnuId uint32 `protobuf:"varint,4,opt,name=onu_id,json=onuId,proto3" json:"onu_id,omitempty"`
+ UniId uint32 `protobuf:"varint,5,opt,name=uni_id,json=uniId,proto3" json:"uni_id,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
+func (m *InterAdapterTechProfileInstanceRequestMessage) Reset() {
+ *m = InterAdapterTechProfileInstanceRequestMessage{}
+}
+func (m *InterAdapterTechProfileInstanceRequestMessage) String() string {
+ return proto.CompactTextString(m)
+}
+func (*InterAdapterTechProfileInstanceRequestMessage) ProtoMessage() {}
+func (*InterAdapterTechProfileInstanceRequestMessage) Descriptor() ([]byte, []int) {
+ return fileDescriptor_941f0031a549667f, []int{16}
+}
+
+func (m *InterAdapterTechProfileInstanceRequestMessage) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_InterAdapterTechProfileInstanceRequestMessage.Unmarshal(m, b)
+}
+func (m *InterAdapterTechProfileInstanceRequestMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_InterAdapterTechProfileInstanceRequestMessage.Marshal(b, m, deterministic)
+}
+func (m *InterAdapterTechProfileInstanceRequestMessage) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_InterAdapterTechProfileInstanceRequestMessage.Merge(m, src)
+}
+func (m *InterAdapterTechProfileInstanceRequestMessage) XXX_Size() int {
+ return xxx_messageInfo_InterAdapterTechProfileInstanceRequestMessage.Size(m)
+}
+func (m *InterAdapterTechProfileInstanceRequestMessage) XXX_DiscardUnknown() {
+ xxx_messageInfo_InterAdapterTechProfileInstanceRequestMessage.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_InterAdapterTechProfileInstanceRequestMessage proto.InternalMessageInfo
+
+func (m *InterAdapterTechProfileInstanceRequestMessage) GetTpInstancePath() string {
+ if m != nil {
+ return m.TpInstancePath
+ }
+ return ""
+}
+
+func (m *InterAdapterTechProfileInstanceRequestMessage) GetParentDeviceId() string {
+ if m != nil {
+ return m.ParentDeviceId
+ }
+ return ""
+}
+
+func (m *InterAdapterTechProfileInstanceRequestMessage) GetParentPonPort() uint32 {
+ if m != nil {
+ return m.ParentPonPort
+ }
+ return 0
+}
+
+func (m *InterAdapterTechProfileInstanceRequestMessage) GetOnuId() uint32 {
+ if m != nil {
+ return m.OnuId
+ }
+ return 0
+}
+
+func (m *InterAdapterTechProfileInstanceRequestMessage) GetUniId() uint32 {
+ if m != nil {
+ return m.UniId
+ }
+ return 0
+}
+
+type InterAdapterTechProfileDownloadMessage struct {
+ UniId uint32 `protobuf:"varint,1,opt,name=uni_id,json=uniId,proto3" json:"uni_id,omitempty"`
+ TpInstancePath string `protobuf:"bytes,2,opt,name=tp_instance_path,json=tpInstancePath,proto3" json:"tp_instance_path,omitempty"`
+ // Types that are valid to be assigned to TechTpInstance:
+ // *InterAdapterTechProfileDownloadMessage_TpInstance
+ // *InterAdapterTechProfileDownloadMessage_EponTpInstance
+ TechTpInstance isInterAdapterTechProfileDownloadMessage_TechTpInstance `protobuf_oneof:"tech_tp_instance"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
func (m *InterAdapterTechProfileDownloadMessage) Reset() {
*m = InterAdapterTechProfileDownloadMessage{}
}
func (m *InterAdapterTechProfileDownloadMessage) String() string { return proto.CompactTextString(m) }
func (*InterAdapterTechProfileDownloadMessage) ProtoMessage() {}
func (*InterAdapterTechProfileDownloadMessage) Descriptor() ([]byte, []int) {
- return fileDescriptor_941f0031a549667f, []int{16}
+ return fileDescriptor_941f0031a549667f, []int{17}
}
func (m *InterAdapterTechProfileDownloadMessage) XXX_Unmarshal(b []byte) error {
@@ -1172,16 +1253,63 @@
return 0
}
-func (m *InterAdapterTechProfileDownloadMessage) GetPath() string {
+func (m *InterAdapterTechProfileDownloadMessage) GetTpInstancePath() string {
if m != nil {
- return m.Path
+ return m.TpInstancePath
}
return ""
}
+type isInterAdapterTechProfileDownloadMessage_TechTpInstance interface {
+ isInterAdapterTechProfileDownloadMessage_TechTpInstance()
+}
+
+type InterAdapterTechProfileDownloadMessage_TpInstance struct {
+ TpInstance *tech_profile.TechProfileInstance `protobuf:"bytes,3,opt,name=tp_instance,json=tpInstance,proto3,oneof"`
+}
+
+type InterAdapterTechProfileDownloadMessage_EponTpInstance struct {
+ EponTpInstance *tech_profile.EponTechProfileInstance `protobuf:"bytes,4,opt,name=epon_tp_instance,json=eponTpInstance,proto3,oneof"`
+}
+
+func (*InterAdapterTechProfileDownloadMessage_TpInstance) isInterAdapterTechProfileDownloadMessage_TechTpInstance() {
+}
+
+func (*InterAdapterTechProfileDownloadMessage_EponTpInstance) isInterAdapterTechProfileDownloadMessage_TechTpInstance() {
+}
+
+func (m *InterAdapterTechProfileDownloadMessage) GetTechTpInstance() isInterAdapterTechProfileDownloadMessage_TechTpInstance {
+ if m != nil {
+ return m.TechTpInstance
+ }
+ return nil
+}
+
+func (m *InterAdapterTechProfileDownloadMessage) GetTpInstance() *tech_profile.TechProfileInstance {
+ if x, ok := m.GetTechTpInstance().(*InterAdapterTechProfileDownloadMessage_TpInstance); ok {
+ return x.TpInstance
+ }
+ return nil
+}
+
+func (m *InterAdapterTechProfileDownloadMessage) GetEponTpInstance() *tech_profile.EponTechProfileInstance {
+ if x, ok := m.GetTechTpInstance().(*InterAdapterTechProfileDownloadMessage_EponTpInstance); ok {
+ return x.EponTpInstance
+ }
+ return nil
+}
+
+// XXX_OneofWrappers is for the internal use of the proto package.
+func (*InterAdapterTechProfileDownloadMessage) XXX_OneofWrappers() []interface{} {
+ return []interface{}{
+ (*InterAdapterTechProfileDownloadMessage_TpInstance)(nil),
+ (*InterAdapterTechProfileDownloadMessage_EponTpInstance)(nil),
+ }
+}
+
type InterAdapterDeleteGemPortMessage struct {
UniId uint32 `protobuf:"varint,1,opt,name=uni_id,json=uniId,proto3" json:"uni_id,omitempty"`
- TpPath string `protobuf:"bytes,2,opt,name=tp_path,json=tpPath,proto3" json:"tp_path,omitempty"`
+ TpInstancePath string `protobuf:"bytes,2,opt,name=tp_instance_path,json=tpInstancePath,proto3" json:"tp_instance_path,omitempty"`
GemPortId uint32 `protobuf:"varint,3,opt,name=gem_port_id,json=gemPortId,proto3" json:"gem_port_id,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
@@ -1192,7 +1320,7 @@
func (m *InterAdapterDeleteGemPortMessage) String() string { return proto.CompactTextString(m) }
func (*InterAdapterDeleteGemPortMessage) ProtoMessage() {}
func (*InterAdapterDeleteGemPortMessage) Descriptor() ([]byte, []int) {
- return fileDescriptor_941f0031a549667f, []int{17}
+ return fileDescriptor_941f0031a549667f, []int{18}
}
func (m *InterAdapterDeleteGemPortMessage) XXX_Unmarshal(b []byte) error {
@@ -1220,9 +1348,9 @@
return 0
}
-func (m *InterAdapterDeleteGemPortMessage) GetTpPath() string {
+func (m *InterAdapterDeleteGemPortMessage) GetTpInstancePath() string {
if m != nil {
- return m.TpPath
+ return m.TpInstancePath
}
return ""
}
@@ -1236,7 +1364,7 @@
type InterAdapterDeleteTcontMessage struct {
UniId uint32 `protobuf:"varint,1,opt,name=uni_id,json=uniId,proto3" json:"uni_id,omitempty"`
- TpPath string `protobuf:"bytes,2,opt,name=tp_path,json=tpPath,proto3" json:"tp_path,omitempty"`
+ TpInstancePath string `protobuf:"bytes,2,opt,name=tp_instance_path,json=tpInstancePath,proto3" json:"tp_instance_path,omitempty"`
AllocId uint32 `protobuf:"varint,3,opt,name=alloc_id,json=allocId,proto3" json:"alloc_id,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
@@ -1247,7 +1375,7 @@
func (m *InterAdapterDeleteTcontMessage) String() string { return proto.CompactTextString(m) }
func (*InterAdapterDeleteTcontMessage) ProtoMessage() {}
func (*InterAdapterDeleteTcontMessage) Descriptor() ([]byte, []int) {
- return fileDescriptor_941f0031a549667f, []int{18}
+ return fileDescriptor_941f0031a549667f, []int{19}
}
func (m *InterAdapterDeleteTcontMessage) XXX_Unmarshal(b []byte) error {
@@ -1275,9 +1403,9 @@
return 0
}
-func (m *InterAdapterDeleteTcontMessage) GetTpPath() string {
+func (m *InterAdapterDeleteTcontMessage) GetTpInstancePath() string {
if m != nil {
- return m.TpPath
+ return m.TpInstancePath
}
return ""
}
@@ -1304,7 +1432,7 @@
func (m *InterAdapterResponseBody) String() string { return proto.CompactTextString(m) }
func (*InterAdapterResponseBody) ProtoMessage() {}
func (*InterAdapterResponseBody) Descriptor() ([]byte, []int) {
- return fileDescriptor_941f0031a549667f, []int{19}
+ return fileDescriptor_941f0031a549667f, []int{20}
}
func (m *InterAdapterResponseBody) XXX_Unmarshal(b []byte) error {
@@ -1389,7 +1517,7 @@
func (m *InterAdapterMessage) String() string { return proto.CompactTextString(m) }
func (*InterAdapterMessage) ProtoMessage() {}
func (*InterAdapterMessage) Descriptor() ([]byte, []int) {
- return fileDescriptor_941f0031a549667f, []int{20}
+ return fileDescriptor_941f0031a549667f, []int{21}
}
func (m *InterAdapterMessage) XXX_Unmarshal(b []byte) error {
@@ -1444,6 +1572,7 @@
proto.RegisterType((*InterAdapterMessageType)(nil), "voltha.InterAdapterMessageType")
proto.RegisterType((*InterAdapterHeader)(nil), "voltha.InterAdapterHeader")
proto.RegisterType((*InterAdapterOmciMessage)(nil), "voltha.InterAdapterOmciMessage")
+ proto.RegisterType((*InterAdapterTechProfileInstanceRequestMessage)(nil), "voltha.InterAdapterTechProfileInstanceRequestMessage")
proto.RegisterType((*InterAdapterTechProfileDownloadMessage)(nil), "voltha.InterAdapterTechProfileDownloadMessage")
proto.RegisterType((*InterAdapterDeleteGemPortMessage)(nil), "voltha.InterAdapterDeleteGemPortMessage")
proto.RegisterType((*InterAdapterDeleteTcontMessage)(nil), "voltha.InterAdapterDeleteTcontMessage")
@@ -1456,88 +1585,97 @@
}
var fileDescriptor_941f0031a549667f = []byte{
- // 1328 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0xdd, 0x72, 0xdb, 0x44,
- 0x14, 0xae, 0xff, 0xed, 0xe3, 0xc4, 0x75, 0x37, 0x4d, 0xe3, 0x24, 0xfd, 0x09, 0xa2, 0x94, 0xd0,
- 0x82, 0x33, 0xb8, 0x30, 0xc0, 0x15, 0x38, 0xb6, 0xda, 0x68, 0xc6, 0xb1, 0x8d, 0xec, 0xb4, 0x0c,
- 0xc3, 0x8c, 0x46, 0x91, 0x36, 0xb6, 0x26, 0xb2, 0x56, 0x5d, 0xad, 0x53, 0x74, 0xc3, 0x0c, 0x77,
- 0xbc, 0x04, 0x33, 0x5c, 0xf1, 0x0e, 0xbc, 0x06, 0x4f, 0xc4, 0xec, 0x8f, 0x6c, 0xd9, 0x6d, 0xe8,
- 0x50, 0xee, 0x74, 0xce, 0xf7, 0xed, 0x39, 0xda, 0x73, 0xf6, 0x7c, 0xbb, 0xf0, 0xe1, 0x15, 0xf1,
- 0xd9, 0xd4, 0xb6, 0x42, 0x4a, 0x18, 0x89, 0x8e, 0xbc, 0x80, 0x61, 0x6a, 0x39, 0x24, 0x60, 0xb6,
- 0x17, 0x60, 0xda, 0x14, 0x6e, 0x54, 0x94, 0xa4, 0xbd, 0xbd, 0x55, 0xb2, 0x43, 0x66, 0x33, 0x12,
- 0x48, 0xce, 0x3a, 0x26, 0x2d, 0x85, 0xed, 0x4e, 0x08, 0x99, 0xf8, 0xf8, 0x48, 0x58, 0xe7, 0xf3,
- 0x8b, 0x23, 0x3b, 0x88, 0x15, 0xf4, 0x60, 0x75, 0x19, 0x09, 0x71, 0x70, 0xe1, 0x93, 0xd7, 0xd6,
- 0xe7, 0x4f, 0x15, 0x41, 0x5b, 0x25, 0xf8, 0x64, 0xe2, 0x39, 0xb6, 0x6f, 0xb9, 0xf8, 0xca, 0x73,
- 0x70, 0x12, 0x64, 0x3d, 0x3e, 0xf3, 0x66, 0x38, 0x62, 0xf6, 0x2c, 0x94, 0x04, 0x6d, 0x1f, 0x4a,
- 0x23, 0x46, 0xc7, 0x71, 0x88, 0x51, 0x1d, 0x72, 0x57, 0xb6, 0xdf, 0xc8, 0x1c, 0x64, 0x0e, 0x2b,
- 0x26, 0xff, 0xe4, 0xa0, 0x11, 0xb0, 0x75, 0x30, 0x27, 0xc1, 0xbb, 0x50, 0x3e, 0x26, 0xc4, 0x5f,
- 0x47, 0xcb, 0x12, 0xd5, 0xa0, 0x38, 0xb4, 0x9d, 0x4b, 0xcc, 0x50, 0x03, 0x4a, 0xa1, 0x1d, 0xfb,
- 0xc4, 0x76, 0x05, 0xbe, 0x61, 0x26, 0xa6, 0xf6, 0x13, 0x54, 0x74, 0x4a, 0x09, 0xed, 0x10, 0x17,
- 0x6b, 0x03, 0x28, 0x38, 0xc4, 0xc5, 0x11, 0xda, 0x81, 0xad, 0xb3, 0xfe, 0xe8, 0x6c, 0x38, 0x1c,
- 0x98, 0x63, 0xbd, 0x6b, 0x99, 0xfa, 0xf7, 0x67, 0xfa, 0x68, 0x5c, 0xbf, 0x81, 0xee, 0x00, 0x32,
- 0xfa, 0x2f, 0xda, 0x3d, 0xa3, 0x6b, 0x0d, 0xdb, 0x66, 0xfb, 0x54, 0x1f, 0xeb, 0xe6, 0xa8, 0x9e,
- 0x41, 0xdb, 0x70, 0xab, 0xab, 0xb7, 0xbb, 0x3d, 0xa3, 0xaf, 0x5b, 0xfa, 0x0f, 0x1d, 0x5d, 0xef,
- 0xea, 0xdd, 0x7a, 0x56, 0xeb, 0x41, 0x41, 0x44, 0x47, 0x4f, 0x20, 0xcf, 0x23, 0x8b, 0xec, 0xb5,
- 0xd6, 0x4e, 0x53, 0x35, 0x60, 0x91, 0xba, 0x29, 0xf2, 0x9a, 0x82, 0x84, 0xee, 0x40, 0x91, 0x62,
- 0x3b, 0x22, 0x41, 0x23, 0x2b, 0xea, 0xa0, 0x2c, 0xed, 0xef, 0x0c, 0x14, 0x4f, 0xb0, 0xed, 0x62,
- 0x8a, 0x6a, 0x90, 0xf5, 0x5c, 0x55, 0xa6, 0xac, 0xe7, 0xa2, 0x8f, 0x21, 0xcf, 0xe2, 0x10, 0x8b,
- 0x05, 0xb5, 0xd6, 0x56, 0x12, 0xff, 0x14, 0x47, 0x91, 0x3d, 0xc1, 0xbc, 0x3e, 0xa6, 0x20, 0xa0,
- 0x7b, 0x00, 0x17, 0x94, 0xcc, 0x2c, 0x46, 0x42, 0xcf, 0x69, 0xe4, 0x44, 0x80, 0x0a, 0xf7, 0x8c,
- 0xb9, 0x03, 0xed, 0x42, 0x99, 0x11, 0x05, 0xe6, 0x05, 0x58, 0x62, 0x44, 0x42, 0xfb, 0x50, 0xb9,
- 0xc4, 0xb1, 0xc2, 0x0a, 0x02, 0x2b, 0x5f, 0xe2, 0x58, 0x82, 0x5f, 0x43, 0x65, 0xd1, 0xd5, 0x46,
- 0xf1, 0x20, 0x73, 0x58, 0x6d, 0xed, 0x35, 0x65, 0xdf, 0x9b, 0x49, 0xdf, 0x9b, 0xe3, 0x84, 0x61,
- 0x2e, 0xc9, 0xda, 0x09, 0x94, 0xdb, 0x74, 0x32, 0x9f, 0xe1, 0x80, 0xf1, 0x16, 0x5e, 0xe2, 0x38,
- 0xe9, 0xfe, 0x25, 0x8e, 0xd1, 0x63, 0x28, 0x5c, 0xd9, 0xfe, 0x5c, 0x6e, 0xac, 0xda, 0xba, 0xfd,
- 0x46, 0xcc, 0x76, 0x10, 0x9b, 0x92, 0xa2, 0x79, 0xb0, 0x6d, 0xf0, 0x01, 0xe9, 0x24, 0xf3, 0xa1,
- 0x76, 0x8f, 0x1e, 0x41, 0x71, 0x2a, 0xca, 0x26, 0x22, 0x57, 0x5b, 0xb5, 0xa4, 0x3c, 0xb2, 0x98,
- 0xa6, 0x42, 0xd1, 0x21, 0xe4, 0xcf, 0x89, 0x1b, 0xff, 0x6b, 0x2e, 0xc1, 0xd0, 0xfe, 0xcc, 0xc0,
- 0xee, 0x6a, 0x2e, 0x13, 0xbf, 0x9a, 0xe3, 0x88, 0x1d, 0x13, 0x37, 0xe6, 0xdb, 0xa0, 0xa1, 0xa3,
- 0x9a, 0xc7, 0x3f, 0xd1, 0x43, 0xc8, 0xdb, 0x74, 0x12, 0x35, 0x72, 0x07, 0xb9, 0xc3, 0x6a, 0xab,
- 0x9e, 0xe4, 0x4f, 0x36, 0x6e, 0x0a, 0x14, 0x3d, 0x81, 0x5b, 0x14, 0x47, 0x21, 0x09, 0x22, 0x6c,
- 0x51, 0xfc, 0x6a, 0xee, 0x51, 0xec, 0x8a, 0x2e, 0x94, 0xcd, 0x7a, 0x02, 0x98, 0xca, 0x8f, 0x1e,
- 0x42, 0x8d, 0xe2, 0xd0, 0xe7, 0x0d, 0x59, 0xe9, 0xc9, 0x86, 0xf0, 0x8e, 0x65, 0xd3, 0x34, 0x17,
- 0xf6, 0xd6, 0xff, 0x53, 0xc6, 0x11, 0x3f, 0xda, 0x80, 0x52, 0x34, 0x77, 0x1c, 0x1c, 0x45, 0x6a,
- 0x6c, 0x12, 0x13, 0x7d, 0xca, 0x8f, 0x60, 0x34, 0xf7, 0x99, 0x38, 0x22, 0xd7, 0x15, 0x43, 0x71,
- 0xb4, 0xdf, 0x32, 0x50, 0x1f, 0xbd, 0xf6, 0x98, 0x33, 0xed, 0xd8, 0xa1, 0x7d, 0xee, 0xf9, 0x1e,
- 0x8b, 0xd1, 0x27, 0x90, 0x77, 0x71, 0xe4, 0xa8, 0x9a, 0x6f, 0x37, 0xd3, 0xe2, 0x41, 0x2e, 0x42,
- 0x8b, 0x83, 0xa6, 0xa0, 0x20, 0x03, 0x6e, 0x46, 0x62, 0xb9, 0x75, 0x81, 0x6d, 0x36, 0xa7, 0x38,
- 0x52, 0x3d, 0x38, 0x78, 0x63, 0xd5, 0x1a, 0xcf, 0xac, 0x49, 0xc7, 0x33, 0x65, 0x6b, 0xbf, 0x40,
- 0xbd, 0x2b, 0xc4, 0xa7, 0xeb, 0x45, 0x0e, 0xb9, 0xc2, 0xbc, 0x54, 0xeb, 0xc3, 0xb2, 0x0f, 0x95,
- 0xd0, 0xa6, 0x38, 0x60, 0x96, 0xe7, 0xaa, 0x2e, 0x95, 0xa5, 0xc3, 0x70, 0xd1, 0x03, 0xa8, 0x4a,
- 0xf5, 0xb2, 0xc4, 0x40, 0xc9, 0x09, 0x01, 0xe9, 0x12, 0x3a, 0x73, 0x17, 0x2a, 0xe1, 0xfc, 0xdc,
- 0xf7, 0xa2, 0x29, 0xa6, 0x6a, 0x46, 0x96, 0x0e, 0xed, 0xf7, 0x2c, 0xec, 0x88, 0x8a, 0xb7, 0x5d,
- 0x3b, 0x64, 0x8b, 0x33, 0xc8, 0x57, 0x6a, 0xbf, 0x66, 0xa1, 0xc0, 0x3f, 0x22, 0x54, 0x87, 0x8d,
- 0x67, 0xbd, 0xc1, 0xcb, 0x94, 0xb0, 0xdc, 0x82, 0x4d, 0xe5, 0x19, 0x0d, 0x07, 0xfd, 0x91, 0x5e,
- 0xcf, 0x70, 0xd2, 0xe0, 0xb4, 0x63, 0x2c, 0x48, 0x59, 0x4e, 0x52, 0x1e, 0x45, 0xca, 0xa1, 0x2d,
- 0xb8, 0x79, 0xaa, 0x8f, 0x4d, 0xa3, 0x33, 0x5a, 0xf0, 0xf2, 0xe8, 0x36, 0xd4, 0x97, 0x4e, 0x45,
- 0x2d, 0x70, 0xea, 0xa0, 0x7f, 0x66, 0x19, 0xfd, 0xa5, 0xa0, 0x15, 0x39, 0x75, 0xe9, 0x54, 0xd4,
- 0x12, 0xfa, 0x00, 0xee, 0x8d, 0xf5, 0xce, 0x89, 0x35, 0x34, 0x07, 0xcf, 0x8c, 0x9e, 0x6e, 0x75,
- 0x07, 0x2f, 0xfb, 0xbd, 0x41, 0x7b, 0xb9, 0xb0, 0x8c, 0xf6, 0x61, 0xa7, 0xab, 0xf7, 0xf4, 0xb1,
- 0x6e, 0x3d, 0xd7, 0x4f, 0x2d, 0x2e, 0x94, 0x0b, 0xb0, 0x82, 0x1a, 0x70, 0x5b, 0x81, 0xe3, 0xce,
- 0xa0, 0xbf, 0x44, 0x80, 0xd7, 0x07, 0xa5, 0xeb, 0x73, 0x8d, 0x9e, 0x7d, 0xb3, 0xa2, 0x67, 0x1f,
- 0x25, 0x03, 0x73, 0x4d, 0x65, 0x9b, 0xa2, 0xaa, 0xff, 0x5b, 0xe1, 0x0e, 0x60, 0x83, 0x11, 0x75,
- 0x77, 0xf1, 0xa3, 0x21, 0x07, 0x0a, 0x18, 0x91, 0x27, 0xca, 0x70, 0xd1, 0x23, 0xb8, 0x19, 0x52,
- 0xf2, 0x73, 0x9c, 0x22, 0x15, 0x05, 0x69, 0x53, 0xb8, 0x17, 0xbc, 0x15, 0x39, 0x2c, 0xfd, 0x17,
- 0x39, 0xfc, 0x2b, 0xb3, 0x7a, 0x7e, 0x06, 0x33, 0xc7, 0x4b, 0x74, 0xac, 0x01, 0xa5, 0x99, 0xfc,
- 0x4c, 0x6e, 0x31, 0x65, 0xa2, 0x63, 0xa8, 0x39, 0x24, 0x08, 0xb0, 0xc3, 0xac, 0x88, 0xd9, 0x6c,
- 0x1e, 0xa9, 0xc2, 0xed, 0x37, 0xd5, 0x2b, 0xa0, 0x23, 0xd1, 0x91, 0x00, 0x55, 0xb9, 0x36, 0x9d,
- 0xb4, 0x13, 0x7d, 0x07, 0x72, 0x13, 0x96, 0xed, 0xba, 0x94, 0x4b, 0x82, 0x9c, 0xfc, 0xfd, 0xa4,
- 0xf6, 0x72, 0x73, 0xcd, 0x21, 0xe7, 0xb4, 0x25, 0xc5, 0xdc, 0x08, 0x53, 0x96, 0x36, 0x82, 0x47,
- 0xe9, 0x5f, 0x1f, 0x63, 0x67, 0x3a, 0xa4, 0xe4, 0xc2, 0xf3, 0x71, 0x97, 0xbc, 0x0e, 0xf8, 0x75,
- 0x9b, 0xec, 0x64, 0x1b, 0x8a, 0xf3, 0xc0, 0xb3, 0x54, 0xcb, 0x37, 0xcd, 0xc2, 0x3c, 0xf0, 0x0c,
- 0x17, 0x21, 0xc8, 0x87, 0x36, 0x9b, 0xaa, 0x99, 0x14, 0xdf, 0x1a, 0x85, 0x83, 0x74, 0xd0, 0x2e,
- 0xf6, 0x31, 0xc3, 0xcf, 0xf1, 0x6c, 0x48, 0x28, 0x7b, 0x47, 0xb8, 0x1d, 0x28, 0xb1, 0xd0, 0x4a,
- 0x45, 0x2c, 0xb2, 0x70, 0x68, 0xb3, 0x29, 0xba, 0x0f, 0xd5, 0x09, 0x9e, 0x59, 0x21, 0xa1, 0x42,
- 0x02, 0x72, 0x62, 0x51, 0x65, 0x22, 0x83, 0x1a, 0xae, 0x76, 0x09, 0xf7, 0xdf, 0xcc, 0x39, 0xe6,
- 0xef, 0xae, 0xf7, 0xcd, 0xb8, 0x0b, 0x65, 0xdb, 0xf7, 0x89, 0xb3, 0x4c, 0x57, 0x12, 0xb6, 0xe1,
- 0x6a, 0x7f, 0x64, 0xa0, 0x91, 0xce, 0xb6, 0xa2, 0xd0, 0x77, 0xa0, 0xa8, 0x1a, 0x2a, 0x05, 0x5a,
- 0x59, 0xe8, 0xf1, 0xbb, 0xaf, 0xaa, 0x93, 0x1b, 0xf2, 0xb2, 0x42, 0x5f, 0x42, 0x9e, 0xcc, 0x1c,
- 0x4f, 0xf5, 0xf3, 0xc1, 0xdb, 0x66, 0x29, 0x75, 0xca, 0xf8, 0x32, 0x4e, 0x3f, 0xae, 0x2c, 0xde,
- 0x4c, 0x5a, 0x04, 0x5b, 0x6f, 0x99, 0x3c, 0xd4, 0x5a, 0xbb, 0x57, 0xf7, 0xde, 0x16, 0xfa, 0x7d,
- 0xef, 0xd8, 0xc7, 0xdf, 0x42, 0x35, 0x35, 0xe2, 0xa8, 0x0a, 0xa5, 0xa5, 0x5a, 0x6e, 0x40, 0x39,
- 0x25, 0x94, 0xe2, 0xf1, 0xf5, 0xc2, 0xe8, 0xe8, 0x56, 0xd7, 0x18, 0x75, 0x06, 0x2f, 0x74, 0x93,
- 0x3f, 0xbe, 0x8e, 0xfb, 0xb0, 0x45, 0xe8, 0x44, 0xdc, 0x20, 0x0e, 0xa1, 0xae, 0xfa, 0xb9, 0x1f,
- 0xbf, 0x9a, 0x78, 0x6c, 0x3a, 0x3f, 0xe7, 0x93, 0x71, 0x94, 0x60, 0xea, 0x41, 0xfc, 0x59, 0xf2,
- 0x3c, 0xfe, 0xe2, 0x68, 0x42, 0xd6, 0x5f, 0xdb, 0xc3, 0x1b, 0xc3, 0xcc, 0x30, 0x7f, 0x5e, 0x14,
- 0x9c, 0xa7, 0xff, 0x04, 0x00, 0x00, 0xff, 0xff, 0x63, 0xbd, 0xf4, 0x0f, 0x9b, 0x0b, 0x00, 0x00,
+ // 1468 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x57, 0xdd, 0x72, 0xdb, 0x44,
+ 0x14, 0x8e, 0x1d, 0xff, 0x1e, 0x27, 0xae, 0xbb, 0x69, 0x1a, 0x27, 0xe9, 0x4f, 0x2a, 0xda, 0x12,
+ 0x5a, 0xea, 0x0c, 0x29, 0x0c, 0x70, 0x05, 0x8e, 0xad, 0x36, 0x9a, 0x49, 0x6c, 0x55, 0x56, 0x5a,
+ 0x86, 0x61, 0x46, 0xa3, 0x48, 0x1b, 0x5b, 0x13, 0x59, 0xab, 0x4a, 0xab, 0x14, 0xcf, 0x30, 0xcc,
+ 0x30, 0xdc, 0xf0, 0x04, 0xdc, 0x31, 0xc3, 0x15, 0xef, 0xc0, 0x6b, 0x70, 0xc7, 0xdb, 0x30, 0xfb,
+ 0x23, 0x5b, 0x76, 0x13, 0x18, 0xa0, 0x77, 0xda, 0xf3, 0x7d, 0xfb, 0xed, 0xee, 0x39, 0x7b, 0xce,
+ 0x1e, 0xc1, 0x7b, 0x17, 0xc4, 0xa7, 0x23, 0xdb, 0x0a, 0x23, 0x42, 0x49, 0xbc, 0xe7, 0x05, 0x14,
+ 0x47, 0x96, 0x43, 0x02, 0x6a, 0x7b, 0x01, 0x8e, 0x5a, 0xdc, 0x8c, 0x4a, 0x82, 0xb4, 0xb5, 0x35,
+ 0x4f, 0x76, 0xc8, 0x78, 0x4c, 0x02, 0xc1, 0x59, 0xc4, 0xc4, 0x48, 0x62, 0x9b, 0x43, 0x42, 0x86,
+ 0x3e, 0xde, 0xe3, 0xa3, 0xd3, 0xe4, 0x6c, 0xcf, 0x0e, 0x26, 0x12, 0xba, 0x3b, 0x3f, 0x8d, 0x84,
+ 0x38, 0x38, 0xf3, 0xc9, 0x1b, 0xeb, 0xa3, 0xa7, 0x92, 0xa0, 0xcc, 0x13, 0x7c, 0x32, 0xf4, 0x1c,
+ 0xdb, 0xb7, 0x5c, 0x7c, 0xe1, 0x39, 0x38, 0x15, 0x59, 0xd4, 0xa7, 0xde, 0x18, 0xc7, 0xd4, 0x1e,
+ 0x87, 0x92, 0xb0, 0x33, 0x2f, 0x42, 0xb1, 0x33, 0x62, 0xdf, 0x67, 0x9e, 0x2f, 0x25, 0x94, 0x6d,
+ 0x28, 0x0f, 0x68, 0x64, 0x4e, 0x42, 0x8c, 0x1a, 0xb0, 0x7c, 0x61, 0xfb, 0xcd, 0xdc, 0x4e, 0x6e,
+ 0xb7, 0x6a, 0xb0, 0x4f, 0x06, 0x6a, 0x01, 0x5d, 0x04, 0x97, 0x05, 0x78, 0x0b, 0x2a, 0x07, 0x84,
+ 0xf8, 0x8b, 0x68, 0x45, 0xa0, 0x0a, 0x94, 0x74, 0xdb, 0x39, 0xc7, 0x14, 0x35, 0xa1, 0x1c, 0xda,
+ 0x13, 0x9f, 0xd8, 0x2e, 0xc7, 0x57, 0x8c, 0x74, 0xa8, 0x7c, 0x03, 0x55, 0x35, 0x8a, 0x48, 0xd4,
+ 0x21, 0x2e, 0x56, 0xfa, 0x50, 0x74, 0x88, 0x8b, 0x63, 0xb4, 0x01, 0x6b, 0x27, 0xbd, 0xc1, 0x89,
+ 0xae, 0xf7, 0x0d, 0x53, 0xed, 0x5a, 0x86, 0xfa, 0xe2, 0x44, 0x1d, 0x98, 0x8d, 0x25, 0x74, 0x13,
+ 0x90, 0xd6, 0x7b, 0xd9, 0x3e, 0xd2, 0xba, 0x96, 0xde, 0x36, 0xda, 0xc7, 0xaa, 0xa9, 0x1a, 0x83,
+ 0x46, 0x0e, 0xad, 0xc3, 0xf5, 0xae, 0xda, 0xee, 0x1e, 0x69, 0x3d, 0xd5, 0x52, 0xbf, 0xea, 0xa8,
+ 0x6a, 0x57, 0xed, 0x36, 0xf2, 0xca, 0x11, 0x14, 0xb9, 0x3a, 0x7a, 0x0c, 0x05, 0xa6, 0xcc, 0x57,
+ 0xaf, 0xef, 0x6f, 0xb4, 0x64, 0x88, 0xa6, 0x4b, 0xb7, 0xf8, 0xba, 0x06, 0x27, 0xa1, 0x9b, 0x50,
+ 0x8a, 0xb0, 0x1d, 0x93, 0xa0, 0x99, 0xe7, 0x7e, 0x90, 0x23, 0xe5, 0x8f, 0x1c, 0x94, 0x0e, 0xb1,
+ 0xed, 0xe2, 0x08, 0xd5, 0x21, 0xef, 0xb9, 0xd2, 0x4d, 0x79, 0xcf, 0x45, 0xef, 0x43, 0x81, 0x4e,
+ 0x42, 0xcc, 0x27, 0xd4, 0xf7, 0xd7, 0x52, 0xfd, 0x63, 0x1c, 0xc7, 0xf6, 0x10, 0x33, 0xff, 0x18,
+ 0x9c, 0x80, 0x6e, 0x03, 0x9c, 0x45, 0x64, 0x6c, 0x51, 0x12, 0x7a, 0x4e, 0x73, 0x99, 0x0b, 0x54,
+ 0x99, 0xc5, 0x64, 0x06, 0xb4, 0x09, 0x15, 0x4a, 0x24, 0x58, 0xe0, 0x60, 0x99, 0x12, 0x01, 0x6d,
+ 0x43, 0xf5, 0x1c, 0x4f, 0x24, 0x56, 0xe4, 0x58, 0xe5, 0x1c, 0x4f, 0x04, 0xf8, 0x19, 0x54, 0xa7,
+ 0x71, 0x6f, 0x96, 0x76, 0x72, 0xbb, 0xb5, 0xfd, 0xad, 0x96, 0xb8, 0x19, 0xad, 0xf4, 0x66, 0xb4,
+ 0xcc, 0x94, 0x61, 0xcc, 0xc8, 0xca, 0x21, 0x54, 0xda, 0xd1, 0x30, 0x19, 0xe3, 0x80, 0xb2, 0x10,
+ 0x9e, 0xe3, 0x49, 0x1a, 0xfd, 0x73, 0x3c, 0x41, 0x8f, 0xa0, 0x78, 0x61, 0xfb, 0x89, 0x38, 0x58,
+ 0x6d, 0xff, 0xc6, 0x5b, 0x9a, 0xed, 0x60, 0x62, 0x08, 0x8a, 0xe2, 0xc1, 0xba, 0xc6, 0x52, 0xa8,
+ 0x93, 0x66, 0x90, 0x3c, 0x3d, 0x7a, 0x08, 0xa5, 0x11, 0x77, 0x1b, 0x57, 0xae, 0xed, 0xd7, 0x53,
+ 0xf7, 0x08, 0x67, 0x1a, 0x12, 0x45, 0xbb, 0x50, 0x38, 0x25, 0xee, 0xe4, 0x6f, 0xd7, 0xe2, 0x0c,
+ 0xe5, 0xb7, 0x1c, 0x6c, 0xce, 0xaf, 0x65, 0xe0, 0xd7, 0x09, 0x8e, 0xe9, 0x01, 0x71, 0x27, 0xec,
+ 0x18, 0x51, 0xe8, 0xc8, 0xe0, 0xb1, 0x4f, 0x74, 0x1f, 0x0a, 0x76, 0x34, 0x8c, 0x9b, 0xcb, 0x3b,
+ 0xcb, 0xbb, 0xb5, 0xfd, 0x46, 0xba, 0x7e, 0x7a, 0x70, 0x83, 0xa3, 0xe8, 0x31, 0x5c, 0x8f, 0x70,
+ 0x1c, 0x92, 0x20, 0xc6, 0x56, 0x84, 0x5f, 0x27, 0x5e, 0x84, 0x5d, 0x1e, 0x85, 0x8a, 0xd1, 0x48,
+ 0x01, 0x43, 0xda, 0xd1, 0x7d, 0xa8, 0x47, 0x38, 0xf4, 0x59, 0x40, 0xe6, 0x62, 0xb2, 0xc2, 0xad,
+ 0xa6, 0x08, 0x9a, 0xe2, 0xc2, 0xd6, 0xe2, 0x3e, 0x85, 0x0e, 0xdf, 0x68, 0x13, 0xca, 0x71, 0xe2,
+ 0x38, 0x38, 0x8e, 0x65, 0xda, 0xa4, 0x43, 0xf4, 0x21, 0xbb, 0x82, 0x71, 0xe2, 0x53, 0x7e, 0x45,
+ 0xae, 0x72, 0x86, 0xe4, 0x28, 0x3f, 0xe5, 0xa0, 0x31, 0x78, 0xe3, 0x51, 0x67, 0xd4, 0xb1, 0x43,
+ 0xfb, 0xd4, 0xf3, 0x3d, 0x3a, 0x41, 0x1f, 0x40, 0xc1, 0xc5, 0xb1, 0x23, 0x7d, 0xbe, 0xde, 0xca,
+ 0x96, 0x17, 0x72, 0x16, 0x5a, 0x0c, 0x34, 0x38, 0x05, 0x69, 0x70, 0x2d, 0xe6, 0xd3, 0xad, 0x33,
+ 0x6c, 0xd3, 0x24, 0xc2, 0xb1, 0x8c, 0xc1, 0xce, 0x5b, 0xb3, 0x16, 0x78, 0x46, 0x5d, 0x18, 0x9e,
+ 0xc9, 0xb1, 0xf2, 0x3d, 0x34, 0xba, 0xbc, 0x3c, 0x75, 0xbd, 0xd8, 0x21, 0x17, 0x98, 0xb9, 0x6a,
+ 0x31, 0x59, 0xb6, 0xa1, 0x1a, 0xda, 0x11, 0x0e, 0xa8, 0xe5, 0xb9, 0x32, 0x4a, 0x15, 0x61, 0xd0,
+ 0x5c, 0x74, 0x17, 0x6a, 0xa2, 0xbe, 0x59, 0x3c, 0xa1, 0x44, 0x86, 0x80, 0x30, 0xf1, 0x3a, 0x73,
+ 0x0b, 0xaa, 0x61, 0x72, 0xea, 0x7b, 0xf1, 0x08, 0x47, 0x32, 0x47, 0x66, 0x06, 0xe5, 0x97, 0x3c,
+ 0x6c, 0x70, 0x8f, 0xb7, 0x5d, 0x3b, 0xa4, 0xd3, 0x3b, 0xc8, 0x66, 0x2a, 0x3f, 0xe4, 0xa1, 0xc8,
+ 0x3e, 0x62, 0xd4, 0x80, 0x95, 0x67, 0x47, 0xfd, 0x57, 0x99, 0xc2, 0x72, 0x1d, 0x56, 0xa5, 0x65,
+ 0xa0, 0xf7, 0x7b, 0x03, 0xb5, 0x91, 0x63, 0xa4, 0xfe, 0x71, 0x47, 0x9b, 0x92, 0xf2, 0x8c, 0x24,
+ 0x2d, 0x92, 0xb4, 0x8c, 0xd6, 0xe0, 0xda, 0xb1, 0x6a, 0x1a, 0x5a, 0x67, 0x30, 0xe5, 0x15, 0xd0,
+ 0x0d, 0x68, 0xcc, 0x8c, 0x92, 0x5a, 0x64, 0xd4, 0x7e, 0xef, 0xc4, 0xd2, 0x7a, 0xb3, 0x82, 0x56,
+ 0x62, 0xd4, 0x99, 0x51, 0x52, 0xcb, 0xe8, 0x1e, 0xdc, 0x36, 0xd5, 0xce, 0xa1, 0xa5, 0x1b, 0xfd,
+ 0x67, 0xda, 0x91, 0x6a, 0x75, 0xfb, 0xaf, 0x7a, 0x47, 0xfd, 0xf6, 0x6c, 0x62, 0x05, 0x6d, 0xc3,
+ 0x46, 0x57, 0x3d, 0x52, 0x4d, 0xd5, 0x7a, 0xae, 0x1e, 0x5b, 0xac, 0x50, 0x4e, 0xc1, 0x2a, 0x6a,
+ 0xc2, 0x0d, 0x09, 0x9a, 0x9d, 0x7e, 0x6f, 0x86, 0x00, 0xf3, 0x0f, 0xca, 0xfa, 0xe7, 0x8a, 0x7a,
+ 0xf6, 0xf9, 0x5c, 0x3d, 0x7b, 0x90, 0x26, 0xcc, 0x15, 0x9e, 0x6d, 0x71, 0xaf, 0xfe, 0xef, 0x0a,
+ 0xb7, 0x03, 0x2b, 0x94, 0xc8, 0xd7, 0x8d, 0x5d, 0x0d, 0x91, 0x50, 0x40, 0x89, 0xb8, 0x51, 0x9a,
+ 0x8b, 0x1e, 0xc2, 0xb5, 0x30, 0x22, 0xdf, 0x4e, 0x32, 0xa4, 0x12, 0x27, 0xad, 0x72, 0xf3, 0x94,
+ 0x37, 0x57, 0x0e, 0xcb, 0xff, 0xa6, 0x1c, 0xfe, 0x9e, 0x9b, 0xbf, 0x3f, 0xfd, 0xb1, 0xe3, 0xa5,
+ 0x75, 0xac, 0x09, 0xe5, 0xb1, 0xf8, 0x4c, 0x5f, 0x31, 0x39, 0x44, 0x07, 0x50, 0x77, 0x48, 0x10,
+ 0x60, 0x87, 0x5a, 0x31, 0xb5, 0x69, 0x12, 0x4b, 0xc7, 0x6d, 0xb7, 0x64, 0x9f, 0xd0, 0x11, 0xe8,
+ 0x80, 0x83, 0xd2, 0x5d, 0xab, 0x4e, 0xd6, 0x88, 0xbe, 0x04, 0x71, 0x08, 0xcb, 0x76, 0xdd, 0x88,
+ 0x95, 0x04, 0x91, 0xf9, 0xdb, 0xa9, 0xef, 0xc5, 0xe1, 0x5a, 0x3a, 0xe3, 0xb4, 0x05, 0xc5, 0x58,
+ 0x09, 0x33, 0x23, 0xe5, 0xcf, 0x1c, 0x3c, 0xc9, 0xee, 0xdd, 0xc4, 0xce, 0x48, 0x17, 0x2f, 0xbd,
+ 0x16, 0xc4, 0xd4, 0x0e, 0x1c, 0x2c, 0xcb, 0x64, 0x7a, 0xa2, 0x5d, 0x68, 0xd0, 0xd0, 0xf2, 0x24,
+ 0x68, 0x85, 0x36, 0x1d, 0xc9, 0x4b, 0x50, 0xa7, 0x61, 0x3a, 0x47, 0xb7, 0xe9, 0x88, 0x31, 0x65,
+ 0xce, 0xce, 0x5c, 0x2f, 0x52, 0xb7, 0x2e, 0xec, 0x73, 0x31, 0x12, 0xcc, 0x90, 0x04, 0x56, 0x48,
+ 0x22, 0x51, 0xc3, 0x56, 0x8d, 0x55, 0x61, 0xd6, 0x49, 0xa0, 0x93, 0x88, 0xa2, 0x75, 0x28, 0x91,
+ 0x20, 0x61, 0x3a, 0x05, 0x0e, 0x17, 0x49, 0x90, 0x68, 0x2e, 0x33, 0x27, 0x81, 0x97, 0x86, 0x7f,
+ 0xd5, 0x28, 0x26, 0x81, 0xa7, 0xb9, 0xca, 0xcf, 0x79, 0x78, 0x78, 0xc5, 0xd9, 0xba, 0xe4, 0x4d,
+ 0xc0, 0x7a, 0x89, 0xf4, 0x50, 0x33, 0x85, 0x5c, 0x46, 0xe1, 0xd2, 0xb3, 0xe6, 0x2f, 0x3d, 0x6b,
+ 0x17, 0x6a, 0x19, 0xa6, 0x8c, 0xc3, 0xbd, 0xd6, 0x5c, 0xe7, 0x74, 0x89, 0x6f, 0x0f, 0x97, 0x0c,
+ 0x98, 0x29, 0xa1, 0x17, 0xd0, 0xc0, 0xcc, 0x03, 0x59, 0xa9, 0x02, 0x97, 0x7a, 0x30, 0x2f, 0xa5,
+ 0x86, 0x24, 0xb8, 0x5c, 0xae, 0xce, 0x04, 0xcc, 0xa9, 0xe4, 0x01, 0x82, 0x06, 0x9f, 0x99, 0x91,
+ 0x54, 0x7e, 0xcc, 0xc1, 0x4e, 0xd6, 0x31, 0x5d, 0xec, 0x63, 0x8a, 0x9f, 0xe3, 0x31, 0x73, 0xf2,
+ 0x3b, 0x73, 0xc9, 0x1d, 0xa8, 0x0d, 0xf1, 0x98, 0x47, 0x93, 0xa9, 0x88, 0x80, 0x56, 0x87, 0x62,
+ 0x15, 0xcd, 0x55, 0xbe, 0x83, 0x3b, 0x6f, 0x6f, 0xc2, 0x64, 0xbd, 0xf4, 0x3b, 0xdb, 0xc2, 0x26,
+ 0x54, 0x6c, 0xdf, 0x27, 0xce, 0x6c, 0xfd, 0x32, 0x1f, 0x6b, 0xae, 0xf2, 0x6b, 0x0e, 0x9a, 0xd9,
+ 0xe5, 0xe7, 0x1e, 0xd9, 0x9b, 0x50, 0x92, 0x39, 0x29, 0xde, 0x58, 0x39, 0x42, 0x8f, 0xfe, 0xb9,
+ 0xdb, 0x38, 0x5c, 0x12, 0xfd, 0x06, 0xfa, 0x04, 0x0a, 0x64, 0xec, 0x78, 0xf2, 0x2a, 0xdc, 0xbd,
+ 0xac, 0x1c, 0x66, 0x0a, 0x05, 0x9b, 0xc6, 0xe8, 0x07, 0xd5, 0x69, 0xdb, 0xab, 0xc4, 0xb0, 0x76,
+ 0x49, 0xf1, 0x44, 0xfb, 0x0b, 0xad, 0xd1, 0xd6, 0x65, 0xd2, 0xff, 0xb5, 0x4d, 0x7a, 0xf4, 0x05,
+ 0xd4, 0x32, 0x55, 0x1a, 0xd5, 0xa0, 0x3c, 0x7b, 0xf0, 0x56, 0xa0, 0x92, 0x79, 0xeb, 0x78, 0xff,
+ 0xfc, 0x52, 0xeb, 0xa8, 0x56, 0x57, 0x1b, 0x74, 0xfa, 0x2f, 0x55, 0x83, 0xf5, 0xcf, 0x07, 0x3d,
+ 0x58, 0x23, 0xd1, 0x90, 0x37, 0x01, 0x0e, 0x89, 0x5c, 0xb9, 0xb9, 0xaf, 0x3f, 0x1d, 0x7a, 0x74,
+ 0x94, 0x9c, 0xb2, 0xe2, 0xb6, 0x97, 0x62, 0xf2, 0xaf, 0xe7, 0x49, 0xfa, 0x0f, 0xf4, 0xf1, 0xde,
+ 0x90, 0x2c, 0xfe, 0x52, 0xe9, 0x4b, 0x7a, 0x4e, 0x2f, 0x9c, 0x96, 0x38, 0xe7, 0xe9, 0x5f, 0x01,
+ 0x00, 0x00, 0xff, 0xff, 0xb3, 0x9d, 0x9e, 0x3e, 0x80, 0x0d, 0x00, 0x00,
}
diff --git a/vendor/github.com/opencord/voltha-protos/v4/go/openolt/openolt.pb.go b/vendor/github.com/opencord/voltha-protos/v4/go/openolt/openolt.pb.go
index a7d17e9..2c8af68 100644
--- a/vendor/github.com/opencord/voltha-protos/v4/go/openolt/openolt.pb.go
+++ b/vendor/github.com/opencord/voltha-protos/v4/go/openolt/openolt.pb.go
@@ -61,6 +61,36 @@
// TrafficQueues from public import voltha_protos/tech_profile.proto
type TrafficQueues = tech_profile.TrafficQueues
+// InstanceControl from public import voltha_protos/tech_profile.proto
+type InstanceControl = tech_profile.InstanceControl
+
+// QThresholds from public import voltha_protos/tech_profile.proto
+type QThresholds = tech_profile.QThresholds
+
+// GemPortAttributes from public import voltha_protos/tech_profile.proto
+type GemPortAttributes = tech_profile.GemPortAttributes
+
+// SchedulerAttributes from public import voltha_protos/tech_profile.proto
+type SchedulerAttributes = tech_profile.SchedulerAttributes
+
+// EPONQueueAttributes from public import voltha_protos/tech_profile.proto
+type EPONQueueAttributes = tech_profile.EPONQueueAttributes
+
+// TechProfile from public import voltha_protos/tech_profile.proto
+type TechProfile = tech_profile.TechProfile
+
+// EponTechProfile from public import voltha_protos/tech_profile.proto
+type EponTechProfile = tech_profile.EponTechProfile
+
+// TechProfileInstance from public import voltha_protos/tech_profile.proto
+type TechProfileInstance = tech_profile.TechProfileInstance
+
+// EponTechProfileInstance from public import voltha_protos/tech_profile.proto
+type EponTechProfileInstance = tech_profile.EponTechProfileInstance
+
+// ResourceInstance from public import voltha_protos/tech_profile.proto
+type ResourceInstance = tech_profile.ResourceInstance
+
// Direction from public import voltha_protos/tech_profile.proto
type Direction = tech_profile.Direction
@@ -173,6 +203,7 @@
const OperStatus_ACTIVE = OperStatus_Types(common.OperStatus_ACTIVE)
const OperStatus_FAILED = OperStatus_Types(common.OperStatus_FAILED)
const OperStatus_RECONCILING = OperStatus_Types(common.OperStatus_RECONCILING)
+const OperStatus_RECONCILING_FAILED = OperStatus_Types(common.OperStatus_RECONCILING_FAILED)
// ConnectStatus_Types from public import voltha_protos/common.proto
type ConnectStatus_Types = common.ConnectStatus_Types
diff --git a/vendor/github.com/opencord/voltha-protos/v4/go/tech_profile/tech_profile.pb.go b/vendor/github.com/opencord/voltha-protos/v4/go/tech_profile/tech_profile.pb.go
index d60ed83..fc70ea8 100644
--- a/vendor/github.com/opencord/voltha-protos/v4/go/tech_profile/tech_profile.pb.go
+++ b/vendor/github.com/opencord/voltha-protos/v4/go/tech_profile/tech_profile.pb.go
@@ -893,6 +893,1033 @@
return 0
}
+type InstanceControl struct {
+ Onu string `protobuf:"bytes,1,opt,name=onu,proto3" json:"onu,omitempty"`
+ Uni string `protobuf:"bytes,2,opt,name=uni,proto3" json:"uni,omitempty"`
+ MaxGemPayloadSize string `protobuf:"bytes,3,opt,name=max_gem_payload_size,json=maxGemPayloadSize,proto3" json:"max_gem_payload_size,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *InstanceControl) Reset() { *m = InstanceControl{} }
+func (m *InstanceControl) String() string { return proto.CompactTextString(m) }
+func (*InstanceControl) ProtoMessage() {}
+func (*InstanceControl) Descriptor() ([]byte, []int) {
+ return fileDescriptor_d019a68bffe14cae, []int{10}
+}
+
+func (m *InstanceControl) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_InstanceControl.Unmarshal(m, b)
+}
+func (m *InstanceControl) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_InstanceControl.Marshal(b, m, deterministic)
+}
+func (m *InstanceControl) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_InstanceControl.Merge(m, src)
+}
+func (m *InstanceControl) XXX_Size() int {
+ return xxx_messageInfo_InstanceControl.Size(m)
+}
+func (m *InstanceControl) XXX_DiscardUnknown() {
+ xxx_messageInfo_InstanceControl.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_InstanceControl proto.InternalMessageInfo
+
+func (m *InstanceControl) GetOnu() string {
+ if m != nil {
+ return m.Onu
+ }
+ return ""
+}
+
+func (m *InstanceControl) GetUni() string {
+ if m != nil {
+ return m.Uni
+ }
+ return ""
+}
+
+func (m *InstanceControl) GetMaxGemPayloadSize() string {
+ if m != nil {
+ return m.MaxGemPayloadSize
+ }
+ return ""
+}
+
+type QThresholds struct {
+ QThreshold1 uint32 `protobuf:"varint,1,opt,name=q_threshold1,json=qThreshold1,proto3" json:"q_threshold1,omitempty"`
+ QThreshold2 uint32 `protobuf:"varint,2,opt,name=q_threshold2,json=qThreshold2,proto3" json:"q_threshold2,omitempty"`
+ QThreshold3 uint32 `protobuf:"varint,3,opt,name=q_threshold3,json=qThreshold3,proto3" json:"q_threshold3,omitempty"`
+ QThreshold4 uint32 `protobuf:"varint,4,opt,name=q_threshold4,json=qThreshold4,proto3" json:"q_threshold4,omitempty"`
+ QThreshold5 uint32 `protobuf:"varint,5,opt,name=q_threshold5,json=qThreshold5,proto3" json:"q_threshold5,omitempty"`
+ QThreshold6 uint32 `protobuf:"varint,6,opt,name=q_threshold6,json=qThreshold6,proto3" json:"q_threshold6,omitempty"`
+ QThreshold7 uint32 `protobuf:"varint,7,opt,name=q_threshold7,json=qThreshold7,proto3" json:"q_threshold7,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *QThresholds) Reset() { *m = QThresholds{} }
+func (m *QThresholds) String() string { return proto.CompactTextString(m) }
+func (*QThresholds) ProtoMessage() {}
+func (*QThresholds) Descriptor() ([]byte, []int) {
+ return fileDescriptor_d019a68bffe14cae, []int{11}
+}
+
+func (m *QThresholds) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_QThresholds.Unmarshal(m, b)
+}
+func (m *QThresholds) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_QThresholds.Marshal(b, m, deterministic)
+}
+func (m *QThresholds) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_QThresholds.Merge(m, src)
+}
+func (m *QThresholds) XXX_Size() int {
+ return xxx_messageInfo_QThresholds.Size(m)
+}
+func (m *QThresholds) XXX_DiscardUnknown() {
+ xxx_messageInfo_QThresholds.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_QThresholds proto.InternalMessageInfo
+
+func (m *QThresholds) GetQThreshold1() uint32 {
+ if m != nil {
+ return m.QThreshold1
+ }
+ return 0
+}
+
+func (m *QThresholds) GetQThreshold2() uint32 {
+ if m != nil {
+ return m.QThreshold2
+ }
+ return 0
+}
+
+func (m *QThresholds) GetQThreshold3() uint32 {
+ if m != nil {
+ return m.QThreshold3
+ }
+ return 0
+}
+
+func (m *QThresholds) GetQThreshold4() uint32 {
+ if m != nil {
+ return m.QThreshold4
+ }
+ return 0
+}
+
+func (m *QThresholds) GetQThreshold5() uint32 {
+ if m != nil {
+ return m.QThreshold5
+ }
+ return 0
+}
+
+func (m *QThresholds) GetQThreshold6() uint32 {
+ if m != nil {
+ return m.QThreshold6
+ }
+ return 0
+}
+
+func (m *QThresholds) GetQThreshold7() uint32 {
+ if m != nil {
+ return m.QThreshold7
+ }
+ return 0
+}
+
+type GemPortAttributes struct {
+ GemportId uint32 `protobuf:"fixed32,1,opt,name=gemport_id,json=gemportId,proto3" json:"gemport_id,omitempty"`
+ MaxQSize string `protobuf:"bytes,2,opt,name=max_q_size,json=maxQSize,proto3" json:"max_q_size,omitempty"`
+ PbitMap string `protobuf:"bytes,3,opt,name=pbit_map,json=pbitMap,proto3" json:"pbit_map,omitempty"`
+ AesEncryption string `protobuf:"bytes,4,opt,name=aes_encryption,json=aesEncryption,proto3" json:"aes_encryption,omitempty"`
+ SchedulingPolicy SchedulingPolicy `protobuf:"varint,5,opt,name=scheduling_policy,json=schedulingPolicy,proto3,enum=tech_profile.SchedulingPolicy" json:"scheduling_policy,omitempty"`
+ PriorityQ uint32 `protobuf:"fixed32,6,opt,name=priority_q,json=priorityQ,proto3" json:"priority_q,omitempty"`
+ Weight uint32 `protobuf:"fixed32,7,opt,name=weight,proto3" json:"weight,omitempty"`
+ DiscardPolicy DiscardPolicy `protobuf:"varint,8,opt,name=discard_policy,json=discardPolicy,proto3,enum=tech_profile.DiscardPolicy" json:"discard_policy,omitempty"`
+ DiscardConfig *RedDiscardConfig `protobuf:"bytes,9,opt,name=discard_config,json=discardConfig,proto3" json:"discard_config,omitempty"`
+ DiscardConfigV2 *DiscardConfig `protobuf:"bytes,14,opt,name=discard_config_v2,json=discardConfigV2,proto3" json:"discard_config_v2,omitempty"`
+ IsMulticast string `protobuf:"bytes,10,opt,name=is_multicast,json=isMulticast,proto3" json:"is_multicast,omitempty"`
+ MulticastGemId uint32 `protobuf:"fixed32,11,opt,name=multicast_gem_id,json=multicastGemId,proto3" json:"multicast_gem_id,omitempty"`
+ DynamicAccessControlList string `protobuf:"bytes,12,opt,name=dynamic_access_control_list,json=dynamicAccessControlList,proto3" json:"dynamic_access_control_list,omitempty"`
+ StaticAccessControlList string `protobuf:"bytes,13,opt,name=static_access_control_list,json=staticAccessControlList,proto3" json:"static_access_control_list,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *GemPortAttributes) Reset() { *m = GemPortAttributes{} }
+func (m *GemPortAttributes) String() string { return proto.CompactTextString(m) }
+func (*GemPortAttributes) ProtoMessage() {}
+func (*GemPortAttributes) Descriptor() ([]byte, []int) {
+ return fileDescriptor_d019a68bffe14cae, []int{12}
+}
+
+func (m *GemPortAttributes) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_GemPortAttributes.Unmarshal(m, b)
+}
+func (m *GemPortAttributes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_GemPortAttributes.Marshal(b, m, deterministic)
+}
+func (m *GemPortAttributes) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_GemPortAttributes.Merge(m, src)
+}
+func (m *GemPortAttributes) XXX_Size() int {
+ return xxx_messageInfo_GemPortAttributes.Size(m)
+}
+func (m *GemPortAttributes) XXX_DiscardUnknown() {
+ xxx_messageInfo_GemPortAttributes.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GemPortAttributes proto.InternalMessageInfo
+
+func (m *GemPortAttributes) GetGemportId() uint32 {
+ if m != nil {
+ return m.GemportId
+ }
+ return 0
+}
+
+func (m *GemPortAttributes) GetMaxQSize() string {
+ if m != nil {
+ return m.MaxQSize
+ }
+ return ""
+}
+
+func (m *GemPortAttributes) GetPbitMap() string {
+ if m != nil {
+ return m.PbitMap
+ }
+ return ""
+}
+
+func (m *GemPortAttributes) GetAesEncryption() string {
+ if m != nil {
+ return m.AesEncryption
+ }
+ return ""
+}
+
+func (m *GemPortAttributes) GetSchedulingPolicy() SchedulingPolicy {
+ if m != nil {
+ return m.SchedulingPolicy
+ }
+ return SchedulingPolicy_WRR
+}
+
+func (m *GemPortAttributes) GetPriorityQ() uint32 {
+ if m != nil {
+ return m.PriorityQ
+ }
+ return 0
+}
+
+func (m *GemPortAttributes) GetWeight() uint32 {
+ if m != nil {
+ return m.Weight
+ }
+ return 0
+}
+
+func (m *GemPortAttributes) GetDiscardPolicy() DiscardPolicy {
+ if m != nil {
+ return m.DiscardPolicy
+ }
+ return DiscardPolicy_TailDrop
+}
+
+func (m *GemPortAttributes) GetDiscardConfig() *RedDiscardConfig {
+ if m != nil {
+ return m.DiscardConfig
+ }
+ return nil
+}
+
+func (m *GemPortAttributes) GetDiscardConfigV2() *DiscardConfig {
+ if m != nil {
+ return m.DiscardConfigV2
+ }
+ return nil
+}
+
+func (m *GemPortAttributes) GetIsMulticast() string {
+ if m != nil {
+ return m.IsMulticast
+ }
+ return ""
+}
+
+func (m *GemPortAttributes) GetMulticastGemId() uint32 {
+ if m != nil {
+ return m.MulticastGemId
+ }
+ return 0
+}
+
+func (m *GemPortAttributes) GetDynamicAccessControlList() string {
+ if m != nil {
+ return m.DynamicAccessControlList
+ }
+ return ""
+}
+
+func (m *GemPortAttributes) GetStaticAccessControlList() string {
+ if m != nil {
+ return m.StaticAccessControlList
+ }
+ return ""
+}
+
+type SchedulerAttributes struct {
+ Direction Direction `protobuf:"varint,1,opt,name=direction,proto3,enum=tech_profile.Direction" json:"direction,omitempty"`
+ AllocId uint32 `protobuf:"varint,2,opt,name=alloc_id,json=allocId,proto3" json:"alloc_id,omitempty"`
+ AdditionalBw AdditionalBW `protobuf:"varint,3,opt,name=additional_bw,json=additionalBw,proto3,enum=tech_profile.AdditionalBW" json:"additional_bw,omitempty"`
+ Priority uint32 `protobuf:"fixed32,4,opt,name=priority,proto3" json:"priority,omitempty"`
+ Weight uint32 `protobuf:"fixed32,5,opt,name=weight,proto3" json:"weight,omitempty"`
+ QSchedPolicy SchedulingPolicy `protobuf:"varint,6,opt,name=q_sched_policy,json=qSchedPolicy,proto3,enum=tech_profile.SchedulingPolicy" json:"q_sched_policy,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *SchedulerAttributes) Reset() { *m = SchedulerAttributes{} }
+func (m *SchedulerAttributes) String() string { return proto.CompactTextString(m) }
+func (*SchedulerAttributes) ProtoMessage() {}
+func (*SchedulerAttributes) Descriptor() ([]byte, []int) {
+ return fileDescriptor_d019a68bffe14cae, []int{13}
+}
+
+func (m *SchedulerAttributes) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_SchedulerAttributes.Unmarshal(m, b)
+}
+func (m *SchedulerAttributes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_SchedulerAttributes.Marshal(b, m, deterministic)
+}
+func (m *SchedulerAttributes) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SchedulerAttributes.Merge(m, src)
+}
+func (m *SchedulerAttributes) XXX_Size() int {
+ return xxx_messageInfo_SchedulerAttributes.Size(m)
+}
+func (m *SchedulerAttributes) XXX_DiscardUnknown() {
+ xxx_messageInfo_SchedulerAttributes.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SchedulerAttributes proto.InternalMessageInfo
+
+func (m *SchedulerAttributes) GetDirection() Direction {
+ if m != nil {
+ return m.Direction
+ }
+ return Direction_UPSTREAM
+}
+
+func (m *SchedulerAttributes) GetAllocId() uint32 {
+ if m != nil {
+ return m.AllocId
+ }
+ return 0
+}
+
+func (m *SchedulerAttributes) GetAdditionalBw() AdditionalBW {
+ if m != nil {
+ return m.AdditionalBw
+ }
+ return AdditionalBW_AdditionalBW_None
+}
+
+func (m *SchedulerAttributes) GetPriority() uint32 {
+ if m != nil {
+ return m.Priority
+ }
+ return 0
+}
+
+func (m *SchedulerAttributes) GetWeight() uint32 {
+ if m != nil {
+ return m.Weight
+ }
+ return 0
+}
+
+func (m *SchedulerAttributes) GetQSchedPolicy() SchedulingPolicy {
+ if m != nil {
+ return m.QSchedPolicy
+ }
+ return SchedulingPolicy_WRR
+}
+
+type EPONQueueAttributes struct {
+ MaxQSize string `protobuf:"bytes,1,opt,name=max_q_size,json=maxQSize,proto3" json:"max_q_size,omitempty"`
+ PbitMap string `protobuf:"bytes,2,opt,name=pbit_map,json=pbitMap,proto3" json:"pbit_map,omitempty"`
+ GemportId uint32 `protobuf:"varint,3,opt,name=gemport_id,json=gemportId,proto3" json:"gemport_id,omitempty"`
+ AesEncryption string `protobuf:"bytes,4,opt,name=aes_encryption,json=aesEncryption,proto3" json:"aes_encryption,omitempty"`
+ TrafficType string `protobuf:"bytes,5,opt,name=traffic_type,json=trafficType,proto3" json:"traffic_type,omitempty"`
+ UnsolicitedGrantSize uint32 `protobuf:"varint,6,opt,name=unsolicited_grant_size,json=unsolicitedGrantSize,proto3" json:"unsolicited_grant_size,omitempty"`
+ NominalInterval uint32 `protobuf:"varint,7,opt,name=nominal_interval,json=nominalInterval,proto3" json:"nominal_interval,omitempty"`
+ ToleratedPollJitter uint32 `protobuf:"varint,8,opt,name=tolerated_poll_jitter,json=toleratedPollJitter,proto3" json:"tolerated_poll_jitter,omitempty"`
+ RequestTransmissionPolicy uint32 `protobuf:"varint,9,opt,name=request_transmission_policy,json=requestTransmissionPolicy,proto3" json:"request_transmission_policy,omitempty"`
+ NumQSets uint32 `protobuf:"varint,10,opt,name=num_q_sets,json=numQSets,proto3" json:"num_q_sets,omitempty"`
+ QThresholds *QThresholds `protobuf:"bytes,11,opt,name=q_thresholds,json=qThresholds,proto3" json:"q_thresholds,omitempty"`
+ SchedulingPolicy SchedulingPolicy `protobuf:"varint,12,opt,name=scheduling_policy,json=schedulingPolicy,proto3,enum=tech_profile.SchedulingPolicy" json:"scheduling_policy,omitempty"`
+ PriorityQ uint32 `protobuf:"varint,13,opt,name=priority_q,json=priorityQ,proto3" json:"priority_q,omitempty"`
+ Weight uint32 `protobuf:"varint,14,opt,name=weight,proto3" json:"weight,omitempty"`
+ DiscardPolicy DiscardPolicy `protobuf:"varint,15,opt,name=discard_policy,json=discardPolicy,proto3,enum=tech_profile.DiscardPolicy" json:"discard_policy,omitempty"`
+ DiscardConfig *RedDiscardConfig `protobuf:"bytes,16,opt,name=discard_config,json=discardConfig,proto3" json:"discard_config,omitempty"`
+ DiscardConfigV2 *DiscardConfig `protobuf:"bytes,17,opt,name=discard_config_v2,json=discardConfigV2,proto3" json:"discard_config_v2,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *EPONQueueAttributes) Reset() { *m = EPONQueueAttributes{} }
+func (m *EPONQueueAttributes) String() string { return proto.CompactTextString(m) }
+func (*EPONQueueAttributes) ProtoMessage() {}
+func (*EPONQueueAttributes) Descriptor() ([]byte, []int) {
+ return fileDescriptor_d019a68bffe14cae, []int{14}
+}
+
+func (m *EPONQueueAttributes) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_EPONQueueAttributes.Unmarshal(m, b)
+}
+func (m *EPONQueueAttributes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_EPONQueueAttributes.Marshal(b, m, deterministic)
+}
+func (m *EPONQueueAttributes) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_EPONQueueAttributes.Merge(m, src)
+}
+func (m *EPONQueueAttributes) XXX_Size() int {
+ return xxx_messageInfo_EPONQueueAttributes.Size(m)
+}
+func (m *EPONQueueAttributes) XXX_DiscardUnknown() {
+ xxx_messageInfo_EPONQueueAttributes.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EPONQueueAttributes proto.InternalMessageInfo
+
+func (m *EPONQueueAttributes) GetMaxQSize() string {
+ if m != nil {
+ return m.MaxQSize
+ }
+ return ""
+}
+
+func (m *EPONQueueAttributes) GetPbitMap() string {
+ if m != nil {
+ return m.PbitMap
+ }
+ return ""
+}
+
+func (m *EPONQueueAttributes) GetGemportId() uint32 {
+ if m != nil {
+ return m.GemportId
+ }
+ return 0
+}
+
+func (m *EPONQueueAttributes) GetAesEncryption() string {
+ if m != nil {
+ return m.AesEncryption
+ }
+ return ""
+}
+
+func (m *EPONQueueAttributes) GetTrafficType() string {
+ if m != nil {
+ return m.TrafficType
+ }
+ return ""
+}
+
+func (m *EPONQueueAttributes) GetUnsolicitedGrantSize() uint32 {
+ if m != nil {
+ return m.UnsolicitedGrantSize
+ }
+ return 0
+}
+
+func (m *EPONQueueAttributes) GetNominalInterval() uint32 {
+ if m != nil {
+ return m.NominalInterval
+ }
+ return 0
+}
+
+func (m *EPONQueueAttributes) GetToleratedPollJitter() uint32 {
+ if m != nil {
+ return m.ToleratedPollJitter
+ }
+ return 0
+}
+
+func (m *EPONQueueAttributes) GetRequestTransmissionPolicy() uint32 {
+ if m != nil {
+ return m.RequestTransmissionPolicy
+ }
+ return 0
+}
+
+func (m *EPONQueueAttributes) GetNumQSets() uint32 {
+ if m != nil {
+ return m.NumQSets
+ }
+ return 0
+}
+
+func (m *EPONQueueAttributes) GetQThresholds() *QThresholds {
+ if m != nil {
+ return m.QThresholds
+ }
+ return nil
+}
+
+func (m *EPONQueueAttributes) GetSchedulingPolicy() SchedulingPolicy {
+ if m != nil {
+ return m.SchedulingPolicy
+ }
+ return SchedulingPolicy_WRR
+}
+
+func (m *EPONQueueAttributes) GetPriorityQ() uint32 {
+ if m != nil {
+ return m.PriorityQ
+ }
+ return 0
+}
+
+func (m *EPONQueueAttributes) GetWeight() uint32 {
+ if m != nil {
+ return m.Weight
+ }
+ return 0
+}
+
+func (m *EPONQueueAttributes) GetDiscardPolicy() DiscardPolicy {
+ if m != nil {
+ return m.DiscardPolicy
+ }
+ return DiscardPolicy_TailDrop
+}
+
+func (m *EPONQueueAttributes) GetDiscardConfig() *RedDiscardConfig {
+ if m != nil {
+ return m.DiscardConfig
+ }
+ return nil
+}
+
+func (m *EPONQueueAttributes) GetDiscardConfigV2() *DiscardConfig {
+ if m != nil {
+ return m.DiscardConfigV2
+ }
+ return nil
+}
+
+// TechProfile definition (relevant for GPON, XGPON and XGS-PON technologies)
+type TechProfile struct {
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ Version uint32 `protobuf:"varint,2,opt,name=version,proto3" json:"version,omitempty"`
+ ProfileType string `protobuf:"bytes,3,opt,name=profile_type,json=profileType,proto3" json:"profile_type,omitempty"`
+ NumGemPorts uint32 `protobuf:"varint,4,opt,name=num_gem_ports,json=numGemPorts,proto3" json:"num_gem_ports,omitempty"`
+ InstanceControl *InstanceControl `protobuf:"bytes,5,opt,name=instance_control,json=instanceControl,proto3" json:"instance_control,omitempty"`
+ UsScheduler *SchedulerAttributes `protobuf:"bytes,6,opt,name=us_scheduler,json=usScheduler,proto3" json:"us_scheduler,omitempty"`
+ DsScheduler *SchedulerAttributes `protobuf:"bytes,7,opt,name=ds_scheduler,json=dsScheduler,proto3" json:"ds_scheduler,omitempty"`
+ UpstreamGemPortAttributeList []*GemPortAttributes `protobuf:"bytes,8,rep,name=upstream_gem_port_attribute_list,json=upstreamGemPortAttributeList,proto3" json:"upstream_gem_port_attribute_list,omitempty"`
+ DownstreamGemPortAttributeList []*GemPortAttributes `protobuf:"bytes,9,rep,name=downstream_gem_port_attribute_list,json=downstreamGemPortAttributeList,proto3" json:"downstream_gem_port_attribute_list,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *TechProfile) Reset() { *m = TechProfile{} }
+func (m *TechProfile) String() string { return proto.CompactTextString(m) }
+func (*TechProfile) ProtoMessage() {}
+func (*TechProfile) Descriptor() ([]byte, []int) {
+ return fileDescriptor_d019a68bffe14cae, []int{15}
+}
+
+func (m *TechProfile) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_TechProfile.Unmarshal(m, b)
+}
+func (m *TechProfile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_TechProfile.Marshal(b, m, deterministic)
+}
+func (m *TechProfile) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_TechProfile.Merge(m, src)
+}
+func (m *TechProfile) XXX_Size() int {
+ return xxx_messageInfo_TechProfile.Size(m)
+}
+func (m *TechProfile) XXX_DiscardUnknown() {
+ xxx_messageInfo_TechProfile.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TechProfile proto.InternalMessageInfo
+
+func (m *TechProfile) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *TechProfile) GetVersion() uint32 {
+ if m != nil {
+ return m.Version
+ }
+ return 0
+}
+
+func (m *TechProfile) GetProfileType() string {
+ if m != nil {
+ return m.ProfileType
+ }
+ return ""
+}
+
+func (m *TechProfile) GetNumGemPorts() uint32 {
+ if m != nil {
+ return m.NumGemPorts
+ }
+ return 0
+}
+
+func (m *TechProfile) GetInstanceControl() *InstanceControl {
+ if m != nil {
+ return m.InstanceControl
+ }
+ return nil
+}
+
+func (m *TechProfile) GetUsScheduler() *SchedulerAttributes {
+ if m != nil {
+ return m.UsScheduler
+ }
+ return nil
+}
+
+func (m *TechProfile) GetDsScheduler() *SchedulerAttributes {
+ if m != nil {
+ return m.DsScheduler
+ }
+ return nil
+}
+
+func (m *TechProfile) GetUpstreamGemPortAttributeList() []*GemPortAttributes {
+ if m != nil {
+ return m.UpstreamGemPortAttributeList
+ }
+ return nil
+}
+
+func (m *TechProfile) GetDownstreamGemPortAttributeList() []*GemPortAttributes {
+ if m != nil {
+ return m.DownstreamGemPortAttributeList
+ }
+ return nil
+}
+
+// EPON TechProfile definition
+type EponTechProfile struct {
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ Version uint32 `protobuf:"varint,2,opt,name=version,proto3" json:"version,omitempty"`
+ ProfileType string `protobuf:"bytes,3,opt,name=profile_type,json=profileType,proto3" json:"profile_type,omitempty"`
+ NumGemPorts uint32 `protobuf:"varint,4,opt,name=num_gem_ports,json=numGemPorts,proto3" json:"num_gem_ports,omitempty"`
+ InstanceControl *InstanceControl `protobuf:"bytes,5,opt,name=instance_control,json=instanceControl,proto3" json:"instance_control,omitempty"`
+ PackageType string `protobuf:"bytes,6,opt,name=package_type,json=packageType,proto3" json:"package_type,omitempty"`
+ UpstreamQueueAttributeList []*EPONQueueAttributes `protobuf:"bytes,7,rep,name=upstream_queue_attribute_list,json=upstreamQueueAttributeList,proto3" json:"upstream_queue_attribute_list,omitempty"`
+ DownstreamQueueAttributeList []*EPONQueueAttributes `protobuf:"bytes,8,rep,name=downstream_queue_attribute_list,json=downstreamQueueAttributeList,proto3" json:"downstream_queue_attribute_list,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *EponTechProfile) Reset() { *m = EponTechProfile{} }
+func (m *EponTechProfile) String() string { return proto.CompactTextString(m) }
+func (*EponTechProfile) ProtoMessage() {}
+func (*EponTechProfile) Descriptor() ([]byte, []int) {
+ return fileDescriptor_d019a68bffe14cae, []int{16}
+}
+
+func (m *EponTechProfile) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_EponTechProfile.Unmarshal(m, b)
+}
+func (m *EponTechProfile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_EponTechProfile.Marshal(b, m, deterministic)
+}
+func (m *EponTechProfile) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_EponTechProfile.Merge(m, src)
+}
+func (m *EponTechProfile) XXX_Size() int {
+ return xxx_messageInfo_EponTechProfile.Size(m)
+}
+func (m *EponTechProfile) XXX_DiscardUnknown() {
+ xxx_messageInfo_EponTechProfile.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EponTechProfile proto.InternalMessageInfo
+
+func (m *EponTechProfile) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *EponTechProfile) GetVersion() uint32 {
+ if m != nil {
+ return m.Version
+ }
+ return 0
+}
+
+func (m *EponTechProfile) GetProfileType() string {
+ if m != nil {
+ return m.ProfileType
+ }
+ return ""
+}
+
+func (m *EponTechProfile) GetNumGemPorts() uint32 {
+ if m != nil {
+ return m.NumGemPorts
+ }
+ return 0
+}
+
+func (m *EponTechProfile) GetInstanceControl() *InstanceControl {
+ if m != nil {
+ return m.InstanceControl
+ }
+ return nil
+}
+
+func (m *EponTechProfile) GetPackageType() string {
+ if m != nil {
+ return m.PackageType
+ }
+ return ""
+}
+
+func (m *EponTechProfile) GetUpstreamQueueAttributeList() []*EPONQueueAttributes {
+ if m != nil {
+ return m.UpstreamQueueAttributeList
+ }
+ return nil
+}
+
+func (m *EponTechProfile) GetDownstreamQueueAttributeList() []*EPONQueueAttributes {
+ if m != nil {
+ return m.DownstreamQueueAttributeList
+ }
+ return nil
+}
+
+// TechProfile Instance definition (relevant for GPON, XGPON and XGS-PON technologies)
+type TechProfileInstance struct {
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ Version uint32 `protobuf:"varint,2,opt,name=version,proto3" json:"version,omitempty"`
+ SubscriberIdentifier string `protobuf:"bytes,3,opt,name=subscriber_identifier,json=subscriberIdentifier,proto3" json:"subscriber_identifier,omitempty"`
+ ProfileType string `protobuf:"bytes,4,opt,name=profile_type,json=profileType,proto3" json:"profile_type,omitempty"`
+ NumGemPorts uint32 `protobuf:"varint,5,opt,name=num_gem_ports,json=numGemPorts,proto3" json:"num_gem_ports,omitempty"`
+ InstanceControl *InstanceControl `protobuf:"bytes,6,opt,name=instance_control,json=instanceControl,proto3" json:"instance_control,omitempty"`
+ UsScheduler *SchedulerAttributes `protobuf:"bytes,7,opt,name=us_scheduler,json=usScheduler,proto3" json:"us_scheduler,omitempty"`
+ DsScheduler *SchedulerAttributes `protobuf:"bytes,8,opt,name=ds_scheduler,json=dsScheduler,proto3" json:"ds_scheduler,omitempty"`
+ UpstreamGemPortAttributeList []*GemPortAttributes `protobuf:"bytes,9,rep,name=upstream_gem_port_attribute_list,json=upstreamGemPortAttributeList,proto3" json:"upstream_gem_port_attribute_list,omitempty"`
+ DownstreamGemPortAttributeList []*GemPortAttributes `protobuf:"bytes,10,rep,name=downstream_gem_port_attribute_list,json=downstreamGemPortAttributeList,proto3" json:"downstream_gem_port_attribute_list,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *TechProfileInstance) Reset() { *m = TechProfileInstance{} }
+func (m *TechProfileInstance) String() string { return proto.CompactTextString(m) }
+func (*TechProfileInstance) ProtoMessage() {}
+func (*TechProfileInstance) Descriptor() ([]byte, []int) {
+ return fileDescriptor_d019a68bffe14cae, []int{17}
+}
+
+func (m *TechProfileInstance) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_TechProfileInstance.Unmarshal(m, b)
+}
+func (m *TechProfileInstance) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_TechProfileInstance.Marshal(b, m, deterministic)
+}
+func (m *TechProfileInstance) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_TechProfileInstance.Merge(m, src)
+}
+func (m *TechProfileInstance) XXX_Size() int {
+ return xxx_messageInfo_TechProfileInstance.Size(m)
+}
+func (m *TechProfileInstance) XXX_DiscardUnknown() {
+ xxx_messageInfo_TechProfileInstance.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TechProfileInstance proto.InternalMessageInfo
+
+func (m *TechProfileInstance) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *TechProfileInstance) GetVersion() uint32 {
+ if m != nil {
+ return m.Version
+ }
+ return 0
+}
+
+func (m *TechProfileInstance) GetSubscriberIdentifier() string {
+ if m != nil {
+ return m.SubscriberIdentifier
+ }
+ return ""
+}
+
+func (m *TechProfileInstance) GetProfileType() string {
+ if m != nil {
+ return m.ProfileType
+ }
+ return ""
+}
+
+func (m *TechProfileInstance) GetNumGemPorts() uint32 {
+ if m != nil {
+ return m.NumGemPorts
+ }
+ return 0
+}
+
+func (m *TechProfileInstance) GetInstanceControl() *InstanceControl {
+ if m != nil {
+ return m.InstanceControl
+ }
+ return nil
+}
+
+func (m *TechProfileInstance) GetUsScheduler() *SchedulerAttributes {
+ if m != nil {
+ return m.UsScheduler
+ }
+ return nil
+}
+
+func (m *TechProfileInstance) GetDsScheduler() *SchedulerAttributes {
+ if m != nil {
+ return m.DsScheduler
+ }
+ return nil
+}
+
+func (m *TechProfileInstance) GetUpstreamGemPortAttributeList() []*GemPortAttributes {
+ if m != nil {
+ return m.UpstreamGemPortAttributeList
+ }
+ return nil
+}
+
+func (m *TechProfileInstance) GetDownstreamGemPortAttributeList() []*GemPortAttributes {
+ if m != nil {
+ return m.DownstreamGemPortAttributeList
+ }
+ return nil
+}
+
+// EPON TechProfile Instance definition.
+type EponTechProfileInstance struct {
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ Version uint32 `protobuf:"varint,2,opt,name=version,proto3" json:"version,omitempty"`
+ SubscriberIdentifier string `protobuf:"bytes,3,opt,name=subscriber_identifier,json=subscriberIdentifier,proto3" json:"subscriber_identifier,omitempty"`
+ ProfileType string `protobuf:"bytes,4,opt,name=profile_type,json=profileType,proto3" json:"profile_type,omitempty"`
+ NumGemPorts uint32 `protobuf:"varint,5,opt,name=num_gem_ports,json=numGemPorts,proto3" json:"num_gem_ports,omitempty"`
+ AllocId uint32 `protobuf:"varint,6,opt,name=alloc_id,json=allocId,proto3" json:"alloc_id,omitempty"`
+ InstanceControl *InstanceControl `protobuf:"bytes,7,opt,name=instance_control,json=instanceControl,proto3" json:"instance_control,omitempty"`
+ PackageType string `protobuf:"bytes,8,opt,name=package_type,json=packageType,proto3" json:"package_type,omitempty"`
+ UpstreamQueueAttributeList []*EPONQueueAttributes `protobuf:"bytes,9,rep,name=upstream_queue_attribute_list,json=upstreamQueueAttributeList,proto3" json:"upstream_queue_attribute_list,omitempty"`
+ DownstreamQueueAttributeList []*EPONQueueAttributes `protobuf:"bytes,10,rep,name=downstream_queue_attribute_list,json=downstreamQueueAttributeList,proto3" json:"downstream_queue_attribute_list,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *EponTechProfileInstance) Reset() { *m = EponTechProfileInstance{} }
+func (m *EponTechProfileInstance) String() string { return proto.CompactTextString(m) }
+func (*EponTechProfileInstance) ProtoMessage() {}
+func (*EponTechProfileInstance) Descriptor() ([]byte, []int) {
+ return fileDescriptor_d019a68bffe14cae, []int{18}
+}
+
+func (m *EponTechProfileInstance) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_EponTechProfileInstance.Unmarshal(m, b)
+}
+func (m *EponTechProfileInstance) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_EponTechProfileInstance.Marshal(b, m, deterministic)
+}
+func (m *EponTechProfileInstance) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_EponTechProfileInstance.Merge(m, src)
+}
+func (m *EponTechProfileInstance) XXX_Size() int {
+ return xxx_messageInfo_EponTechProfileInstance.Size(m)
+}
+func (m *EponTechProfileInstance) XXX_DiscardUnknown() {
+ xxx_messageInfo_EponTechProfileInstance.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EponTechProfileInstance proto.InternalMessageInfo
+
+func (m *EponTechProfileInstance) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *EponTechProfileInstance) GetVersion() uint32 {
+ if m != nil {
+ return m.Version
+ }
+ return 0
+}
+
+func (m *EponTechProfileInstance) GetSubscriberIdentifier() string {
+ if m != nil {
+ return m.SubscriberIdentifier
+ }
+ return ""
+}
+
+func (m *EponTechProfileInstance) GetProfileType() string {
+ if m != nil {
+ return m.ProfileType
+ }
+ return ""
+}
+
+func (m *EponTechProfileInstance) GetNumGemPorts() uint32 {
+ if m != nil {
+ return m.NumGemPorts
+ }
+ return 0
+}
+
+func (m *EponTechProfileInstance) GetAllocId() uint32 {
+ if m != nil {
+ return m.AllocId
+ }
+ return 0
+}
+
+func (m *EponTechProfileInstance) GetInstanceControl() *InstanceControl {
+ if m != nil {
+ return m.InstanceControl
+ }
+ return nil
+}
+
+func (m *EponTechProfileInstance) GetPackageType() string {
+ if m != nil {
+ return m.PackageType
+ }
+ return ""
+}
+
+func (m *EponTechProfileInstance) GetUpstreamQueueAttributeList() []*EPONQueueAttributes {
+ if m != nil {
+ return m.UpstreamQueueAttributeList
+ }
+ return nil
+}
+
+func (m *EponTechProfileInstance) GetDownstreamQueueAttributeList() []*EPONQueueAttributes {
+ if m != nil {
+ return m.DownstreamQueueAttributeList
+ }
+ return nil
+}
+
+// Resource Instance definition
+type ResourceInstance struct {
+ TpId uint32 `protobuf:"varint,1,opt,name=tp_id,json=tpId,proto3" json:"tp_id,omitempty"`
+ ProfileType string `protobuf:"bytes,2,opt,name=profile_type,json=profileType,proto3" json:"profile_type,omitempty"`
+ SubscriberIdentifier string `protobuf:"bytes,3,opt,name=subscriber_identifier,json=subscriberIdentifier,proto3" json:"subscriber_identifier,omitempty"`
+ AllocId uint32 `protobuf:"varint,4,opt,name=alloc_id,json=allocId,proto3" json:"alloc_id,omitempty"`
+ GemportIds []uint32 `protobuf:"varint,5,rep,packed,name=gemport_ids,json=gemportIds,proto3" json:"gemport_ids,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ResourceInstance) Reset() { *m = ResourceInstance{} }
+func (m *ResourceInstance) String() string { return proto.CompactTextString(m) }
+func (*ResourceInstance) ProtoMessage() {}
+func (*ResourceInstance) Descriptor() ([]byte, []int) {
+ return fileDescriptor_d019a68bffe14cae, []int{19}
+}
+
+func (m *ResourceInstance) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_ResourceInstance.Unmarshal(m, b)
+}
+func (m *ResourceInstance) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_ResourceInstance.Marshal(b, m, deterministic)
+}
+func (m *ResourceInstance) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ResourceInstance.Merge(m, src)
+}
+func (m *ResourceInstance) XXX_Size() int {
+ return xxx_messageInfo_ResourceInstance.Size(m)
+}
+func (m *ResourceInstance) XXX_DiscardUnknown() {
+ xxx_messageInfo_ResourceInstance.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ResourceInstance proto.InternalMessageInfo
+
+func (m *ResourceInstance) GetTpId() uint32 {
+ if m != nil {
+ return m.TpId
+ }
+ return 0
+}
+
+func (m *ResourceInstance) GetProfileType() string {
+ if m != nil {
+ return m.ProfileType
+ }
+ return ""
+}
+
+func (m *ResourceInstance) GetSubscriberIdentifier() string {
+ if m != nil {
+ return m.SubscriberIdentifier
+ }
+ return ""
+}
+
+func (m *ResourceInstance) GetAllocId() uint32 {
+ if m != nil {
+ return m.AllocId
+ }
+ return 0
+}
+
+func (m *ResourceInstance) GetGemportIds() []uint32 {
+ if m != nil {
+ return m.GemportIds
+ }
+ return nil
+}
+
func init() {
proto.RegisterEnum("tech_profile.Direction", Direction_name, Direction_value)
proto.RegisterEnum("tech_profile.SchedulingPolicy", SchedulingPolicy_name, SchedulingPolicy_value)
@@ -909,82 +1936,154 @@
proto.RegisterType((*DiscardConfig)(nil), "tech_profile.DiscardConfig")
proto.RegisterType((*TrafficQueue)(nil), "tech_profile.TrafficQueue")
proto.RegisterType((*TrafficQueues)(nil), "tech_profile.TrafficQueues")
+ proto.RegisterType((*InstanceControl)(nil), "tech_profile.InstanceControl")
+ proto.RegisterType((*QThresholds)(nil), "tech_profile.QThresholds")
+ proto.RegisterType((*GemPortAttributes)(nil), "tech_profile.GemPortAttributes")
+ proto.RegisterType((*SchedulerAttributes)(nil), "tech_profile.SchedulerAttributes")
+ proto.RegisterType((*EPONQueueAttributes)(nil), "tech_profile.EPONQueueAttributes")
+ proto.RegisterType((*TechProfile)(nil), "tech_profile.TechProfile")
+ proto.RegisterType((*EponTechProfile)(nil), "tech_profile.EponTechProfile")
+ proto.RegisterType((*TechProfileInstance)(nil), "tech_profile.TechProfileInstance")
+ proto.RegisterType((*EponTechProfileInstance)(nil), "tech_profile.EponTechProfileInstance")
+ proto.RegisterType((*ResourceInstance)(nil), "tech_profile.ResourceInstance")
}
func init() { proto.RegisterFile("voltha_protos/tech_profile.proto", fileDescriptor_d019a68bffe14cae) }
var fileDescriptor_d019a68bffe14cae = []byte{
- // 1139 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0xdd, 0x6e, 0x1b, 0x45,
- 0x14, 0xf6, 0xda, 0x8d, 0x7f, 0x4e, 0x6c, 0x67, 0x33, 0x25, 0xc4, 0xa4, 0x0d, 0x18, 0x97, 0xd2,
- 0xc8, 0x88, 0x18, 0x85, 0xd0, 0x9b, 0x22, 0x55, 0x76, 0x13, 0x29, 0x2b, 0xd1, 0x34, 0xdd, 0x04,
- 0x19, 0x71, 0xc1, 0x6a, 0xbd, 0x33, 0x5e, 0x8f, 0xb4, 0x9e, 0x59, 0x66, 0xc7, 0x75, 0xd2, 0x2b,
- 0x6e, 0x78, 0x0b, 0xb8, 0xe4, 0x09, 0xb8, 0x41, 0x3c, 0x0a, 0x4f, 0xc0, 0x63, 0xa0, 0x99, 0xdd,
- 0xb5, 0xbd, 0xb6, 0x49, 0xa1, 0x82, 0xbb, 0x39, 0xdf, 0x7e, 0xe7, 0xcc, 0xf9, 0x9f, 0x85, 0xe6,
- 0x2b, 0x1e, 0xc8, 0x91, 0xeb, 0x84, 0x82, 0x4b, 0x1e, 0x75, 0x24, 0xf1, 0x46, 0xea, 0x3c, 0xa4,
- 0x01, 0x39, 0xd4, 0x18, 0xaa, 0x2e, 0x62, 0x7b, 0xf7, 0x7d, 0xce, 0xfd, 0x80, 0x74, 0xdc, 0x90,
- 0x76, 0x5c, 0xc6, 0xb8, 0x74, 0x25, 0xe5, 0x2c, 0x8a, 0xb9, 0xad, 0x1f, 0xf2, 0xb0, 0x75, 0xe9,
- 0x8d, 0x08, 0x9e, 0x04, 0x44, 0x3c, 0xe3, 0x6c, 0x48, 0x7d, 0xf4, 0x05, 0x54, 0x30, 0x15, 0xc4,
- 0x53, 0xbc, 0x86, 0xd1, 0x34, 0x0e, 0xea, 0x47, 0xbb, 0x87, 0x99, 0x7b, 0x4e, 0xd2, 0xcf, 0xf6,
- 0x9c, 0x89, 0x9e, 0x42, 0xcd, 0xc5, 0x98, 0xaa, 0xb3, 0x1b, 0x38, 0x83, 0x69, 0x23, 0xaf, 0x55,
- 0xf7, 0xb2, 0xaa, 0xdd, 0x19, 0xa5, 0xd7, 0xb7, 0xab, 0x73, 0x85, 0xde, 0x14, 0xed, 0x41, 0x39,
- 0x14, 0x94, 0x0b, 0x2a, 0x6f, 0x1a, 0x85, 0xa6, 0x71, 0x50, 0xb2, 0x67, 0x32, 0x7a, 0x17, 0x8a,
- 0x53, 0x42, 0xfd, 0x91, 0x6c, 0xdc, 0xd1, 0x5f, 0x12, 0x09, 0x75, 0xa1, 0x1a, 0x29, 0xf7, 0x9d,
- 0x90, 0x07, 0xd4, 0xbb, 0x69, 0x6c, 0xe8, 0x3b, 0xdf, 0xcf, 0xde, 0x99, 0x04, 0x48, 0x99, 0x7f,
- 0xa1, 0x59, 0xf6, 0xa6, 0xd6, 0x89, 0x85, 0xd6, 0x6f, 0x06, 0xa0, 0x2b, 0xe1, 0x0e, 0x87, 0xd4,
- 0xbb, 0x1c, 0xb9, 0x21, 0x65, 0xbe, 0xc5, 0x86, 0x1c, 0x99, 0x50, 0xf0, 0xa8, 0xd0, 0xf1, 0x97,
- 0x6c, 0x75, 0xd4, 0xc8, 0x20, 0xd2, 0x61, 0x29, 0x64, 0x10, 0x29, 0x24, 0xa4, 0x22, 0x71, 0x56,
- 0x1d, 0x35, 0x32, 0x88, 0x12, 0x27, 0xd5, 0x51, 0x21, 0x3e, 0x15, 0xda, 0xb1, 0x92, 0xad, 0x8e,
- 0xe8, 0x0c, 0xc0, 0xc5, 0xd8, 0x19, 0x4c, 0x1d, 0xca, 0x70, 0xa3, 0xa8, 0x3d, 0x6e, 0x67, 0x3d,
- 0xb6, 0xd8, 0x90, 0x08, 0x41, 0x70, 0x9a, 0xad, 0x5e, 0xdf, 0x62, 0x98, 0x7a, 0xba, 0x74, 0x76,
- 0xd9, 0xc5, 0xb8, 0x37, 0xb5, 0x18, 0x6e, 0xfd, 0x9c, 0x07, 0x33, 0x75, 0x3d, 0x2d, 0xe2, 0xdb,
- 0x96, 0xef, 0x3d, 0x28, 0xbb, 0x41, 0xc0, 0x3d, 0x87, 0xe2, 0x24, 0xc4, 0x92, 0x96, 0x2d, 0x8c,
- 0x9e, 0x40, 0x25, 0x4a, 0xcd, 0xeb, 0x60, 0x37, 0x8f, 0xf6, 0xd7, 0x66, 0x38, 0x6d, 0x21, 0x7b,
- 0xce, 0x47, 0x36, 0xbc, 0x23, 0x63, 0x17, 0x9d, 0x28, 0x4e, 0xaf, 0x43, 0xd9, 0x90, 0xeb, 0x14,
- 0x6d, 0x1e, 0x35, 0xb3, 0x76, 0x56, 0xeb, 0x60, 0x23, 0xb9, 0x5a, 0x9b, 0x8f, 0x61, 0x6b, 0x51,
- 0x4d, 0xb9, 0x1c, 0xe7, 0xb7, 0xa6, 0xe0, 0x8b, 0x18, 0xb5, 0x70, 0xeb, 0x77, 0x03, 0xb6, 0x97,
- 0xf3, 0x13, 0xa1, 0x5d, 0x28, 0x51, 0x26, 0x87, 0x4a, 0x2b, 0xae, 0x6e, 0x51, 0x89, 0x16, 0x46,
- 0x3b, 0x50, 0xe4, 0x6c, 0x32, 0x4f, 0xc0, 0x06, 0x67, 0x93, 0x18, 0x9e, 0x30, 0xaa, 0xe0, 0xb8,
- 0xac, 0x1b, 0x13, 0x46, 0x2d, 0xac, 0xcc, 0x84, 0x5c, 0x48, 0x87, 0xf1, 0xe4, 0xf2, 0xa2, 0x12,
- 0xcf, 0x39, 0x3a, 0x85, 0xfa, 0x2c, 0x62, 0x75, 0x6b, 0xd4, 0x28, 0x34, 0x0b, 0x07, 0x9b, 0xcb,
- 0x5d, 0xb9, 0xec, 0x98, 0x5d, 0x93, 0x0b, 0x48, 0xd4, 0x7a, 0x0c, 0x3b, 0x57, 0x2e, 0x0d, 0x4e,
- 0x04, 0x0f, 0x4f, 0x68, 0xe4, 0xb9, 0x02, 0x27, 0xf3, 0xb9, 0x0f, 0xf0, 0xfd, 0x84, 0x4c, 0x88,
- 0x13, 0xd1, 0xd7, 0x24, 0x09, 0xa1, 0xa2, 0x91, 0x4b, 0xfa, 0x9a, 0xb4, 0x7e, 0x34, 0xc0, 0xb4,
- 0x09, 0xce, 0xea, 0x3c, 0x80, 0xda, 0x98, 0x32, 0x47, 0x8e, 0x04, 0x89, 0x46, 0x3c, 0x48, 0x23,
- 0xaf, 0x8e, 0x29, 0xbb, 0x4a, 0x31, 0x4d, 0x72, 0xaf, 0x17, 0x48, 0xf9, 0x84, 0xe4, 0x5e, 0xcf,
- 0x49, 0x8f, 0x60, 0x4b, 0x91, 0x42, 0xc1, 0x07, 0xee, 0x80, 0x06, 0xf3, 0x61, 0xad, 0x8f, 0xdd,
- 0xeb, 0x8b, 0x39, 0xda, 0xfa, 0xd5, 0x80, 0xed, 0xfe, 0x8a, 0x23, 0xc7, 0xb0, 0xe1, 0x0b, 0x42,
- 0xe2, 0xce, 0x5c, 0xc9, 0xc9, 0x32, 0xdd, 0x8e, 0xc9, 0xe8, 0x31, 0x14, 0x6f, 0x48, 0x10, 0xf0,
- 0x78, 0xa9, 0xbc, 0x59, 0x2d, 0x61, 0xa3, 0xcf, 0xa0, 0x20, 0x08, 0x4e, 0x7a, 0xf6, 0x4d, 0x4a,
- 0x8a, 0xda, 0xfa, 0x33, 0x0f, 0xb5, 0xac, 0xc7, 0x3d, 0xa8, 0xe3, 0x18, 0x48, 0x97, 0x4c, 0x3c,
- 0x54, 0xf7, 0x96, 0x87, 0x4a, 0x73, 0x92, 0x0d, 0x53, 0xc3, 0x8b, 0x22, 0xfa, 0x0e, 0x1a, 0xd2,
- 0xa5, 0x81, 0x83, 0x05, 0x0f, 0x9d, 0xd4, 0x9a, 0xa7, 0xed, 0x27, 0x11, 0x3d, 0x58, 0x6a, 0x8e,
- 0x75, 0x95, 0x3f, 0xcb, 0xd9, 0x3b, 0x72, 0x6d, 0x4b, 0x9c, 0x03, 0x12, 0x04, 0x2f, 0x5b, 0xfe,
- 0x47, 0x61, 0x9f, 0xe5, 0x6c, 0x53, 0x2c, 0x57, 0xe9, 0x25, 0xdc, 0x9d, 0xae, 0x31, 0x18, 0xcf,
- 0xec, 0x07, 0x59, 0x83, 0xfd, 0x35, 0x16, 0xb7, 0xa7, 0xcb, 0x26, 0x7b, 0xe6, 0x3c, 0x8d, 0xb1,
- 0xb5, 0xd6, 0x2f, 0x05, 0xa8, 0x26, 0x43, 0xf0, 0x52, 0x75, 0xef, 0xdb, 0x6e, 0xae, 0x7d, 0x00,
- 0x9f, 0x8c, 0xf5, 0x2c, 0xce, 0x46, 0xb7, 0x92, 0x20, 0x16, 0x56, 0x8b, 0x2d, 0x1c, 0x50, 0xe9,
- 0x8c, 0xdd, 0x50, 0x67, 0xa4, 0x62, 0x97, 0x94, 0xfc, 0xdc, 0x0d, 0xd1, 0x43, 0xa8, 0xbb, 0x24,
- 0x72, 0x08, 0xf3, 0xc4, 0x4d, 0xa8, 0x6f, 0x55, 0x11, 0x96, 0xed, 0x9a, 0x4b, 0xa2, 0xd3, 0x19,
- 0xf8, 0x1f, 0x3c, 0x32, 0x99, 0xb7, 0xad, 0xf8, 0xb7, 0x6f, 0x5b, 0x29, 0xf3, 0xb6, 0xad, 0x36,
- 0x5e, 0xf9, 0x5f, 0x37, 0x5e, 0x6f, 0x39, 0xeb, 0x8d, 0x8a, 0xae, 0xe1, 0x7a, 0x1b, 0xc9, 0x20,
- 0xa4, 0x36, 0x62, 0xb1, 0xf5, 0x87, 0x01, 0xb5, 0xc5, 0x3a, 0xfd, 0xff, 0x1b, 0xb4, 0x3b, 0xdf,
- 0xa0, 0x7a, 0xaf, 0x45, 0x8d, 0xa2, 0xde, 0xa0, 0x7b, 0x6b, 0x37, 0xa8, 0x76, 0x6a, 0xb6, 0x3d,
- 0x13, 0x17, 0xd7, 0x3c, 0x11, 0xa5, 0x35, 0x4f, 0x44, 0xfb, 0x4b, 0xa8, 0xcc, 0x9a, 0x0a, 0x55,
- 0xa1, 0xfc, 0xf5, 0xc5, 0xe5, 0x95, 0x7d, 0xda, 0x7d, 0x6e, 0xe6, 0x50, 0x1d, 0xe0, 0xe4, 0x45,
- 0xff, 0x3c, 0x91, 0x0d, 0xb4, 0x0d, 0xb5, 0x9e, 0x75, 0x62, 0xd9, 0xa7, 0xcf, 0xae, 0xac, 0x17,
- 0xe7, 0xdd, 0xaf, 0xcc, 0x7c, 0xfb, 0x09, 0x98, 0xcb, 0x75, 0x47, 0x25, 0x28, 0xf4, 0x6d, 0xdb,
- 0xcc, 0x21, 0x04, 0xf5, 0x4b, 0x29, 0xa8, 0x27, 0x2f, 0x92, 0x4a, 0x9b, 0x06, 0x02, 0x28, 0x9e,
- 0xdd, 0x0c, 0x04, 0xc5, 0x66, 0xbe, 0xcd, 0xa0, 0xba, 0xf8, 0x37, 0x84, 0x76, 0x60, 0x7b, 0x51,
- 0x76, 0xce, 0x39, 0x23, 0x66, 0x0e, 0xdd, 0x85, 0xad, 0x2c, 0xdc, 0x35, 0x0d, 0x74, 0x0f, 0x76,
- 0x33, 0x60, 0x8f, 0x44, 0xf2, 0x74, 0x38, 0xe4, 0x42, 0x9a, 0xf9, 0x15, 0x43, 0xdd, 0x89, 0xe4,
- 0x66, 0xa1, 0xfd, 0x74, 0xb6, 0xd9, 0x12, 0x4f, 0xab, 0x50, 0x4e, 0xf7, 0x8c, 0x99, 0x43, 0x35,
- 0xa8, 0xf4, 0x67, 0xa2, 0xa1, 0xc2, 0xb0, 0x09, 0x36, 0xf3, 0xa8, 0x0c, 0x77, 0xd4, 0x88, 0x9b,
- 0x85, 0xf6, 0x4f, 0x06, 0xdc, 0xbf, 0xed, 0xcf, 0x04, 0x3d, 0x84, 0x0f, 0x6f, 0xfb, 0x9e, 0x46,
- 0x74, 0x00, 0x1f, 0xdd, 0x4a, 0xeb, 0x46, 0xd1, 0x44, 0x10, 0x6c, 0x1a, 0xe8, 0x13, 0x78, 0x74,
- 0x2b, 0x73, 0x31, 0xec, 0xde, 0x37, 0xd0, 0xe4, 0xc2, 0x3f, 0xe4, 0x21, 0x61, 0x1e, 0x17, 0xf8,
- 0x30, 0xfe, 0x51, 0xce, 0xb4, 0xcc, 0xb7, 0xc7, 0x3e, 0x95, 0xa3, 0xc9, 0xe0, 0xd0, 0xe3, 0xe3,
- 0x4e, 0x4a, 0xec, 0xc4, 0xc4, 0x4f, 0x93, 0x3f, 0xea, 0x57, 0xc7, 0x1d, 0x9f, 0x67, 0xfe, 0xab,
- 0x07, 0x45, 0xfd, 0xe9, 0xf3, 0xbf, 0x02, 0x00, 0x00, 0xff, 0xff, 0x1e, 0x89, 0x67, 0x3b, 0x7c,
- 0x0b, 0x00, 0x00,
+ // 2138 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x59, 0x4d, 0x6f, 0x1b, 0xb9,
+ 0xf9, 0xb7, 0x24, 0x5b, 0x2f, 0x8f, 0x24, 0x7b, 0x4c, 0xc7, 0x1b, 0xc5, 0x71, 0x36, 0x8e, 0xf6,
+ 0xbf, 0xff, 0x75, 0x5d, 0x34, 0xee, 0x3a, 0x4e, 0xf6, 0x90, 0x6d, 0x17, 0x52, 0x6c, 0x24, 0x6a,
+ 0x37, 0x8e, 0x3d, 0x76, 0xeb, 0xa2, 0x87, 0x0e, 0x46, 0x33, 0x94, 0xcc, 0xee, 0x88, 0x1c, 0x93,
+ 0x9c, 0x38, 0xde, 0x53, 0x51, 0xa0, 0x9f, 0xa2, 0xed, 0xa5, 0x40, 0xaf, 0xbd, 0xec, 0xa5, 0x68,
+ 0x2f, 0x05, 0xfa, 0x2d, 0xfa, 0x09, 0x0a, 0xf4, 0x4b, 0x14, 0xe4, 0xcc, 0x68, 0x5e, 0xa4, 0x24,
+ 0x76, 0xaa, 0x2c, 0xd0, 0xde, 0xc8, 0x87, 0x3f, 0x3e, 0x7c, 0xf8, 0xbc, 0xfc, 0x48, 0xce, 0xc0,
+ 0xc6, 0x4b, 0xe6, 0xc9, 0x33, 0xdb, 0xf2, 0x39, 0x93, 0x4c, 0x6c, 0x4b, 0xec, 0x9c, 0xa9, 0xf6,
+ 0x80, 0x78, 0xf8, 0xbe, 0x96, 0xa1, 0x46, 0x5a, 0xb6, 0xb6, 0x3e, 0x64, 0x6c, 0xe8, 0xe1, 0x6d,
+ 0xdb, 0x27, 0xdb, 0x36, 0xa5, 0x4c, 0xda, 0x92, 0x30, 0x2a, 0x42, 0x6c, 0xfb, 0x57, 0x45, 0x58,
+ 0x3a, 0x76, 0xce, 0xb0, 0x1b, 0x78, 0x98, 0x3f, 0x61, 0x74, 0x40, 0x86, 0xe8, 0x21, 0xd4, 0x5c,
+ 0xc2, 0xb1, 0xa3, 0x70, 0xad, 0xc2, 0x46, 0x61, 0x73, 0x71, 0xe7, 0xe6, 0xfd, 0xcc, 0x3a, 0x7b,
+ 0xf1, 0xb0, 0x99, 0x20, 0xd1, 0x17, 0xd0, 0xb4, 0x5d, 0x97, 0xa8, 0xb6, 0xed, 0x59, 0xfd, 0x8b,
+ 0x56, 0x51, 0x4f, 0x5d, 0xcb, 0x4e, 0xed, 0x8c, 0x21, 0xdd, 0x53, 0xb3, 0x91, 0x4c, 0xe8, 0x5e,
+ 0xa0, 0x35, 0xa8, 0xfa, 0x9c, 0x30, 0x4e, 0xe4, 0x65, 0xab, 0xb4, 0x51, 0xd8, 0xac, 0x98, 0xe3,
+ 0x3e, 0xfa, 0x00, 0xca, 0x17, 0x98, 0x0c, 0xcf, 0x64, 0x6b, 0x5e, 0x8f, 0x44, 0x3d, 0xd4, 0x81,
+ 0x86, 0x50, 0xe6, 0x5b, 0x3e, 0xf3, 0x88, 0x73, 0xd9, 0x5a, 0xd0, 0x6b, 0x7e, 0x98, 0x5d, 0x33,
+ 0xda, 0x20, 0xa1, 0xc3, 0x43, 0x8d, 0x32, 0xeb, 0x7a, 0x4e, 0xd8, 0x69, 0xff, 0xb9, 0x00, 0xe8,
+ 0x84, 0xdb, 0x83, 0x01, 0x71, 0x8e, 0xcf, 0x6c, 0x9f, 0xd0, 0x61, 0x8f, 0x0e, 0x18, 0x32, 0xa0,
+ 0xe4, 0x10, 0xae, 0xf7, 0x5f, 0x31, 0x55, 0x53, 0x4b, 0xfa, 0x42, 0x6f, 0x4b, 0x49, 0xfa, 0x42,
+ 0x49, 0x7c, 0xc2, 0x23, 0x63, 0x55, 0x53, 0x4b, 0xfa, 0x22, 0x32, 0x52, 0x35, 0x95, 0x64, 0x48,
+ 0xb8, 0x36, 0xac, 0x62, 0xaa, 0x26, 0x7a, 0x06, 0x60, 0xbb, 0xae, 0xd5, 0xbf, 0xb0, 0x08, 0x75,
+ 0x5b, 0x65, 0x6d, 0xf1, 0x56, 0xd6, 0xe2, 0x1e, 0x1d, 0x60, 0xce, 0xb1, 0x1b, 0x7b, 0xab, 0x7b,
+ 0xda, 0xa3, 0x2e, 0x71, 0x74, 0xe8, 0xcc, 0xaa, 0xed, 0xba, 0xdd, 0x8b, 0x1e, 0x75, 0xdb, 0xbf,
+ 0x2f, 0x82, 0x11, 0x9b, 0x1e, 0x07, 0xf1, 0x5d, 0xc3, 0x77, 0x0b, 0xaa, 0xb6, 0xe7, 0x31, 0xc7,
+ 0x22, 0x6e, 0xb4, 0xc5, 0x8a, 0xee, 0xf7, 0x5c, 0xf4, 0x18, 0x6a, 0x22, 0x56, 0xaf, 0x37, 0x5b,
+ 0xdf, 0xb9, 0x33, 0xd5, 0xc3, 0x71, 0x0a, 0x99, 0x09, 0x1e, 0x99, 0x70, 0x43, 0x86, 0x26, 0x5a,
+ 0x22, 0x74, 0xaf, 0x45, 0xe8, 0x80, 0x69, 0x17, 0xd5, 0x77, 0x36, 0xb2, 0x7a, 0x26, 0xe3, 0x60,
+ 0x22, 0x39, 0x19, 0x9b, 0xff, 0x87, 0xa5, 0xf4, 0x34, 0x65, 0x72, 0xe8, 0xdf, 0xa6, 0x12, 0x1f,
+ 0x86, 0xd2, 0x9e, 0xdb, 0xfe, 0x4b, 0x01, 0x96, 0xf3, 0xfe, 0x11, 0xe8, 0x26, 0x54, 0x08, 0x95,
+ 0x03, 0x35, 0x2b, 0x8c, 0x6e, 0x59, 0x75, 0x7b, 0x2e, 0x5a, 0x85, 0x32, 0xa3, 0x41, 0xe2, 0x80,
+ 0x05, 0x46, 0x83, 0x50, 0x1c, 0x50, 0xa2, 0xc4, 0x61, 0x58, 0x17, 0x02, 0x4a, 0x7a, 0xae, 0x52,
+ 0xe3, 0x33, 0x2e, 0x2d, 0xca, 0xa2, 0xc5, 0xcb, 0xaa, 0x7b, 0xc0, 0xd0, 0x3e, 0x2c, 0x8e, 0x77,
+ 0xac, 0x56, 0x15, 0xad, 0xd2, 0x46, 0x69, 0xb3, 0x9e, 0xcf, 0xca, 0xbc, 0x61, 0x66, 0x53, 0xa6,
+ 0x24, 0xa2, 0xfd, 0x08, 0x56, 0x4f, 0x6c, 0xe2, 0xed, 0x71, 0xe6, 0xef, 0x11, 0xe1, 0xd8, 0xdc,
+ 0x8d, 0xea, 0xf3, 0x0e, 0xc0, 0x79, 0x80, 0x03, 0x6c, 0x09, 0xf2, 0x35, 0x8e, 0xb6, 0x50, 0xd3,
+ 0x92, 0x63, 0xf2, 0x35, 0x6e, 0xff, 0xa6, 0x00, 0x86, 0x89, 0xdd, 0xec, 0x9c, 0x8f, 0xa0, 0x39,
+ 0x22, 0xd4, 0x92, 0x67, 0x1c, 0x8b, 0x33, 0xe6, 0xc5, 0x3b, 0x6f, 0x8c, 0x08, 0x3d, 0x89, 0x65,
+ 0x1a, 0x64, 0xbf, 0x4a, 0x81, 0x8a, 0x11, 0xc8, 0x7e, 0x95, 0x80, 0x3e, 0x81, 0x25, 0x05, 0xf2,
+ 0x39, 0xeb, 0xdb, 0x7d, 0xe2, 0x25, 0xc5, 0xba, 0x38, 0xb2, 0x5f, 0x1d, 0x26, 0xd2, 0xf6, 0x37,
+ 0x05, 0x58, 0x3e, 0x9d, 0x30, 0x64, 0x17, 0x16, 0x86, 0x1c, 0xe3, 0x30, 0x33, 0x27, 0x7c, 0x92,
+ 0x87, 0x9b, 0x21, 0x18, 0x3d, 0x82, 0xf2, 0x25, 0xf6, 0x3c, 0x16, 0x92, 0xca, 0xdb, 0xa7, 0x45,
+ 0x68, 0xf4, 0x7d, 0x28, 0x71, 0xec, 0x46, 0x39, 0xfb, 0xb6, 0x49, 0x0a, 0xda, 0xfe, 0x67, 0x11,
+ 0x9a, 0x59, 0x8b, 0xbb, 0xb0, 0xe8, 0x86, 0x82, 0x98, 0x64, 0xc2, 0xa2, 0xba, 0x9d, 0x2f, 0x2a,
+ 0x8d, 0x89, 0x18, 0xa6, 0xe9, 0xa6, 0xbb, 0xe8, 0x17, 0xd0, 0x92, 0x36, 0xf1, 0x2c, 0x97, 0x33,
+ 0xdf, 0x8a, 0xb5, 0x39, 0x5a, 0x7f, 0xb4, 0xa3, 0x8f, 0x72, 0xc9, 0x31, 0x2d, 0xf2, 0xcf, 0xe6,
+ 0xcc, 0x55, 0x39, 0x35, 0x25, 0x0e, 0x00, 0x71, 0xec, 0xe6, 0x35, 0x5f, 0x69, 0xdb, 0xcf, 0xe6,
+ 0x4c, 0x83, 0xe7, 0xa3, 0x74, 0x04, 0x2b, 0x17, 0x53, 0x14, 0x86, 0x35, 0x7b, 0x37, 0xab, 0xf0,
+ 0x74, 0x8a, 0xc6, 0xe5, 0x8b, 0xbc, 0xca, 0xae, 0x91, 0xb8, 0x31, 0xd4, 0xd6, 0xfe, 0x63, 0x09,
+ 0x1a, 0x51, 0x11, 0x1c, 0xa9, 0xec, 0x7d, 0x57, 0xe6, 0xba, 0x03, 0x30, 0xc4, 0x23, 0x5d, 0x8b,
+ 0xe3, 0xd2, 0xad, 0x45, 0x92, 0x9e, 0xab, 0x88, 0xcd, 0xef, 0x13, 0x69, 0x8d, 0x6c, 0x5f, 0x7b,
+ 0xa4, 0x66, 0x56, 0x54, 0xff, 0xb9, 0xed, 0xa3, 0x8f, 0x61, 0xd1, 0xc6, 0xc2, 0xc2, 0xd4, 0xe1,
+ 0x97, 0xbe, 0x5e, 0x55, 0xed, 0xb0, 0x6a, 0x36, 0x6d, 0x2c, 0xf6, 0xc7, 0xc2, 0x19, 0x1c, 0x32,
+ 0x99, 0xb3, 0xad, 0xfc, 0xda, 0xb3, 0xad, 0x92, 0x39, 0xdb, 0x26, 0x13, 0xaf, 0x7a, 0xed, 0xc4,
+ 0xeb, 0xe6, 0xbd, 0xde, 0xaa, 0xe9, 0x18, 0x4e, 0xd7, 0x11, 0x15, 0x42, 0xac, 0x23, 0xec, 0xb6,
+ 0xff, 0x51, 0x80, 0x66, 0x3a, 0x4e, 0xef, 0x9f, 0x41, 0x3b, 0x09, 0x83, 0x6a, 0x5e, 0x13, 0xad,
+ 0xb2, 0x66, 0xd0, 0xb5, 0xa9, 0x0c, 0xaa, 0x8d, 0x1a, 0xb3, 0x67, 0x64, 0xe2, 0x94, 0x23, 0xa2,
+ 0x32, 0xed, 0x88, 0x18, 0xc0, 0x52, 0x8f, 0x0a, 0x69, 0x53, 0x07, 0x3f, 0x61, 0x54, 0x72, 0xe6,
+ 0xa9, 0x13, 0x9b, 0xd1, 0x40, 0xef, 0xac, 0x66, 0xaa, 0xa6, 0x92, 0x04, 0x94, 0xe8, 0x3d, 0xd5,
+ 0x4c, 0xd5, 0x44, 0xdb, 0x70, 0x43, 0xb1, 0xe0, 0x10, 0x8f, 0x2c, 0xdf, 0xbe, 0xf4, 0x98, 0xed,
+ 0x86, 0x6c, 0x1c, 0x26, 0xd8, 0xf2, 0xc8, 0x7e, 0xf5, 0x14, 0x8f, 0x0e, 0xc3, 0x11, 0xcd, 0xca,
+ 0xbf, 0x2e, 0x42, 0xfd, 0x68, 0xcc, 0xa2, 0x02, 0xdd, 0x83, 0xc6, 0x79, 0xc2, 0xb4, 0x9f, 0xea,
+ 0xd5, 0x9a, 0x66, 0xfd, 0x7c, 0x0c, 0xf9, 0x34, 0x07, 0xd9, 0xd1, 0xcb, 0x67, 0x20, 0x3b, 0x39,
+ 0xc8, 0x03, 0xbd, 0x7c, 0x06, 0xf2, 0x20, 0x07, 0xd9, 0xd5, 0x11, 0xc8, 0x40, 0x76, 0x73, 0x90,
+ 0x87, 0x3a, 0x18, 0x19, 0xc8, 0xc3, 0x1c, 0xe4, 0x91, 0xce, 0xe1, 0x0c, 0xe4, 0x51, 0x0e, 0xf2,
+ 0x99, 0x76, 0x77, 0x06, 0xf2, 0x59, 0xfb, 0x9b, 0x05, 0x58, 0x56, 0x7e, 0x61, 0x5c, 0x76, 0xa4,
+ 0xe4, 0xa4, 0x1f, 0x48, 0x2c, 0x72, 0xf5, 0x5b, 0xc8, 0xd7, 0xef, 0x3a, 0x80, 0x72, 0xf5, 0x79,
+ 0xe8, 0xe0, 0x30, 0x06, 0xd5, 0x91, 0xfd, 0xea, 0x48, 0xf9, 0xf5, 0xfa, 0xd5, 0x5d, 0xcb, 0x57,
+ 0xf7, 0x8f, 0x61, 0x59, 0x8c, 0x6b, 0xf7, 0x7a, 0x25, 0x6e, 0x88, 0x9c, 0x44, 0xed, 0x25, 0xae,
+ 0x6b, 0xeb, 0x3c, 0xaa, 0xf4, 0x5a, 0x2c, 0x39, 0x7a, 0xaf, 0xa5, 0xbe, 0xff, 0x9a, 0x52, 0x7f,
+ 0xdb, 0xb1, 0x97, 0xad, 0x76, 0xf4, 0x14, 0x96, 0xb3, 0x6a, 0xac, 0x97, 0x3b, 0xad, 0xc5, 0xb7,
+ 0x93, 0xc6, 0x52, 0x46, 0xcd, 0x4f, 0x75, 0x6e, 0x12, 0x61, 0x8d, 0x02, 0x4f, 0x12, 0xc7, 0x16,
+ 0xb2, 0x05, 0xda, 0xf9, 0x75, 0x22, 0x9e, 0xc7, 0x22, 0xb4, 0x09, 0xc6, 0x78, 0x5c, 0xd7, 0x12,
+ 0x71, 0x5b, 0xf5, 0xe8, 0x32, 0x11, 0xcb, 0x9f, 0xe2, 0x51, 0xcf, 0x45, 0x3f, 0x80, 0xdb, 0xee,
+ 0x25, 0xb5, 0x47, 0xc4, 0xb1, 0x6c, 0xc7, 0xc1, 0x42, 0x28, 0xe3, 0x54, 0xb5, 0x5a, 0x1e, 0x11,
+ 0xb2, 0xd5, 0xd0, 0xba, 0x5b, 0x11, 0xa4, 0xa3, 0x11, 0x51, 0x39, 0x7f, 0x49, 0x84, 0x44, 0x8f,
+ 0x61, 0x4d, 0xa8, 0x87, 0xcf, 0xf4, 0xd9, 0x4d, 0x3d, 0xfb, 0x66, 0x88, 0x98, 0x98, 0xdc, 0xfe,
+ 0x43, 0x11, 0x56, 0xc6, 0xb7, 0xb4, 0x54, 0xde, 0xce, 0xe8, 0xa2, 0xdd, 0x4c, 0x2e, 0xda, 0x13,
+ 0x4f, 0xa8, 0xd2, 0x7f, 0xf0, 0x84, 0x9a, 0x7f, 0xed, 0x31, 0xb3, 0x90, 0xc9, 0xbd, 0x3d, 0x58,
+ 0x3c, 0xb7, 0x32, 0xe7, 0x5b, 0xf9, 0x4a, 0xc9, 0xdf, 0x38, 0x3f, 0x4e, 0xbd, 0xa2, 0xfe, 0x56,
+ 0x86, 0x95, 0xfd, 0xc3, 0x17, 0x07, 0x9a, 0x7e, 0x53, 0x4e, 0xca, 0x56, 0x6f, 0xe1, 0x0d, 0xd5,
+ 0x5b, 0xcc, 0x56, 0x6f, 0x96, 0x15, 0x42, 0x62, 0x4b, 0xb1, 0xc2, 0x15, 0x8b, 0xfb, 0x1e, 0x34,
+ 0xe2, 0x93, 0x44, 0x5e, 0xfa, 0x58, 0x6f, 0xbd, 0x66, 0xd6, 0x23, 0xd9, 0xc9, 0xa5, 0x8f, 0xd1,
+ 0x2e, 0x7c, 0x10, 0x50, 0xa1, 0x76, 0x41, 0x24, 0x76, 0xad, 0x21, 0xb7, 0xa9, 0x0c, 0xad, 0x0d,
+ 0x49, 0xee, 0x46, 0x6a, 0xf4, 0xa9, 0x1a, 0xd4, 0x96, 0x7f, 0x07, 0x0c, 0xca, 0x46, 0x44, 0xc5,
+ 0x89, 0x50, 0x89, 0xf9, 0x4b, 0xdb, 0x8b, 0x18, 0x6f, 0x29, 0x92, 0xf7, 0x22, 0x31, 0xda, 0x81,
+ 0x55, 0xc9, 0x3c, 0xcc, 0x6d, 0x19, 0xba, 0xd8, 0xb3, 0x7e, 0x49, 0xa4, 0xc4, 0x5c, 0xd7, 0x78,
+ 0xd3, 0x5c, 0x19, 0x0f, 0x1e, 0x32, 0xcf, 0xfb, 0x91, 0x1e, 0x42, 0x3f, 0x84, 0xdb, 0x1c, 0x9f,
+ 0x07, 0x58, 0x48, 0x4b, 0x72, 0x9b, 0x8a, 0x11, 0x11, 0x82, 0x30, 0x1a, 0x47, 0xa8, 0xa6, 0x67,
+ 0xde, 0x8a, 0x20, 0x27, 0x29, 0x44, 0x44, 0x06, 0xeb, 0x00, 0x34, 0x18, 0x29, 0xb7, 0x63, 0x29,
+ 0x74, 0xe9, 0x35, 0xcd, 0x2a, 0x0d, 0x46, 0x47, 0xc7, 0x58, 0x0a, 0xf4, 0x79, 0x86, 0xaa, 0x85,
+ 0xae, 0xb9, 0xfa, 0xce, 0xad, 0x6c, 0xc0, 0x53, 0xa7, 0x55, 0x9a, 0xc5, 0xc5, 0x74, 0xc2, 0x6c,
+ 0xcc, 0x84, 0x30, 0x9b, 0x61, 0x98, 0xa7, 0x11, 0xe6, 0xa2, 0x1e, 0x7a, 0x3d, 0x61, 0x2e, 0xcd,
+ 0x80, 0x30, 0x8d, 0x99, 0x11, 0xe6, 0xf2, 0xf5, 0x09, 0xb3, 0xfd, 0xa7, 0x79, 0xa8, 0x9f, 0x24,
+ 0x97, 0x13, 0x84, 0x60, 0x9e, 0xda, 0xa3, 0xb8, 0x68, 0x74, 0x1b, 0xb5, 0xa0, 0xf2, 0x12, 0x73,
+ 0x15, 0xe8, 0x98, 0x3b, 0xa2, 0xae, 0xca, 0xf4, 0xf8, 0xae, 0xa3, 0x33, 0x3d, 0x3c, 0x0c, 0xeb,
+ 0x91, 0x4c, 0x67, 0x7a, 0x1b, 0x9a, 0x2a, 0x29, 0xf4, 0xa5, 0x85, 0x71, 0x29, 0xe2, 0xbb, 0x00,
+ 0x0d, 0x46, 0xd1, 0xa9, 0x2c, 0xd0, 0x33, 0x30, 0x48, 0x74, 0x1f, 0x8a, 0x49, 0x52, 0x17, 0xcd,
+ 0xc4, 0x93, 0x3f, 0x77, 0x6b, 0x32, 0x97, 0x48, 0xee, 0x1a, 0xb5, 0x07, 0x8d, 0x40, 0x58, 0xc9,
+ 0x87, 0x83, 0xb2, 0xd6, 0x72, 0xef, 0x35, 0x1f, 0x0e, 0x12, 0xca, 0x30, 0xeb, 0x81, 0x48, 0xbe,
+ 0x66, 0xec, 0x41, 0xc3, 0x4d, 0x6b, 0xa9, 0x5c, 0x59, 0x8b, 0x9b, 0xd2, 0x32, 0x84, 0x8d, 0xc0,
+ 0x17, 0x92, 0x63, 0x3b, 0xd9, 0xbe, 0x65, 0xc7, 0xe0, 0xf0, 0x14, 0xa8, 0xea, 0x2b, 0x66, 0xee,
+ 0x71, 0x33, 0x71, 0x5b, 0x31, 0xd7, 0x63, 0x45, 0xf9, 0x21, 0x7d, 0xd0, 0x7c, 0x05, 0x6d, 0x97,
+ 0x5d, 0xd0, 0xb7, 0x2c, 0x55, 0xbb, 0xda, 0x52, 0x1f, 0x26, 0xaa, 0xa6, 0x2d, 0xd6, 0xfe, 0x7b,
+ 0x09, 0x96, 0xf6, 0x7d, 0x46, 0xff, 0x87, 0x92, 0x46, 0x19, 0x64, 0x3b, 0x5f, 0xd9, 0xc3, 0xc8,
+ 0xa0, 0x72, 0x64, 0x50, 0x28, 0xd3, 0x06, 0xb9, 0x70, 0x67, 0x1c, 0xcb, 0xf0, 0x3b, 0x48, 0xce,
+ 0xbb, 0x15, 0xed, 0xdd, 0x5c, 0x8a, 0x4c, 0x39, 0x9b, 0xcc, 0xb5, 0x58, 0x4f, 0x76, 0x40, 0x07,
+ 0xf2, 0x0c, 0xee, 0xa6, 0x02, 0x39, 0x75, 0x9d, 0xea, 0x55, 0xd7, 0x59, 0x4f, 0x34, 0x4d, 0xae,
+ 0xd4, 0xfe, 0xd7, 0x3c, 0xac, 0xa4, 0x22, 0x18, 0xbb, 0xe8, 0x9a, 0x91, 0x7c, 0x00, 0xab, 0x22,
+ 0xe8, 0x0b, 0x87, 0x93, 0x3e, 0xe6, 0x16, 0x71, 0x31, 0x95, 0x64, 0x40, 0xa2, 0xef, 0x75, 0x35,
+ 0xf3, 0x46, 0x32, 0xd8, 0x1b, 0x8f, 0x4d, 0x84, 0x7f, 0xfe, 0x0a, 0xe1, 0x5f, 0xb8, 0x5a, 0xf8,
+ 0xcb, 0x33, 0xe1, 0x8c, 0xca, 0x4c, 0x38, 0xa3, 0xfa, 0xde, 0x38, 0xa3, 0xf6, 0xed, 0x71, 0x06,
+ 0xcc, 0x86, 0x33, 0x7e, 0x3b, 0x0f, 0x37, 0x73, 0x9c, 0xf1, 0x5f, 0x98, 0x71, 0xe9, 0x3b, 0x74,
+ 0x39, 0x7b, 0x87, 0x9e, 0x96, 0x8c, 0x95, 0x99, 0x70, 0x51, 0xf5, 0x1d, 0xb8, 0xa8, 0xf6, 0x2d,
+ 0x71, 0x11, 0xcc, 0x86, 0x8b, 0xfe, 0xaa, 0xbf, 0x1d, 0x0b, 0x16, 0x70, 0x27, 0x49, 0x8b, 0x15,
+ 0x58, 0x90, 0x7e, 0xfc, 0x34, 0x6f, 0x9a, 0xf3, 0xd2, 0xef, 0xb9, 0x13, 0x81, 0x2c, 0x4e, 0x06,
+ 0xf2, 0x9d, 0x12, 0x24, 0x1d, 0xd9, 0xf9, 0x6c, 0x64, 0xef, 0x42, 0x3d, 0x79, 0x11, 0xa8, 0xb4,
+ 0x28, 0x6d, 0x36, 0x4d, 0x18, 0x3f, 0x09, 0xc4, 0xd6, 0xe7, 0x50, 0x1b, 0xbf, 0xb8, 0x50, 0x03,
+ 0xaa, 0x3f, 0x39, 0x3c, 0x3e, 0x31, 0xf7, 0x3b, 0xcf, 0x8d, 0x39, 0xb4, 0x08, 0xb0, 0xf7, 0xe2,
+ 0xf4, 0x20, 0xea, 0x17, 0xd0, 0x32, 0x34, 0xbb, 0xbd, 0xbd, 0x9e, 0xb9, 0xff, 0xe4, 0xa4, 0xf7,
+ 0xe2, 0xa0, 0xf3, 0xa5, 0x51, 0xdc, 0x7a, 0x0c, 0x46, 0xfe, 0xbe, 0x8a, 0x2a, 0x50, 0x3a, 0x35,
+ 0x4d, 0x63, 0x0e, 0x21, 0x58, 0x3c, 0x96, 0x9c, 0x38, 0xf2, 0x30, 0xba, 0x9a, 0x1a, 0x05, 0x04,
+ 0x50, 0x7e, 0x76, 0xd9, 0xe7, 0xc4, 0x35, 0x8a, 0x5b, 0x14, 0x1a, 0xe9, 0x67, 0x19, 0x5a, 0x85,
+ 0xe5, 0x74, 0xdf, 0x3a, 0x60, 0x14, 0x1b, 0x73, 0x68, 0x05, 0x96, 0xb2, 0xe2, 0x8e, 0x51, 0x40,
+ 0xb7, 0xe1, 0x66, 0x46, 0xd8, 0xc5, 0x42, 0xee, 0x0f, 0x06, 0x8c, 0x4b, 0xa3, 0x38, 0xa1, 0xa8,
+ 0x13, 0x48, 0x66, 0x94, 0xb6, 0xbe, 0x18, 0x7f, 0xa5, 0x8e, 0x2c, 0x6d, 0x40, 0x35, 0xfe, 0x66,
+ 0x6c, 0xcc, 0xa1, 0x26, 0xd4, 0x4e, 0xc7, 0xdd, 0x82, 0xda, 0x86, 0x89, 0x5d, 0xa3, 0x88, 0xaa,
+ 0x30, 0x7f, 0xaa, 0x5a, 0xa5, 0xad, 0xdf, 0x15, 0x60, 0xfd, 0x4d, 0x7f, 0x99, 0xd0, 0xc7, 0x70,
+ 0xef, 0x4d, 0xe3, 0xf1, 0x8e, 0x36, 0xe1, 0xff, 0xde, 0x08, 0xeb, 0x08, 0x11, 0x70, 0xec, 0x1a,
+ 0x05, 0xf4, 0x5d, 0xf8, 0xe4, 0x8d, 0xc8, 0xf4, 0xb6, 0xbb, 0x3f, 0x83, 0x0d, 0xc6, 0x87, 0xf7,
+ 0x99, 0x8f, 0xa9, 0xc3, 0xb8, 0x7b, 0x3f, 0xfc, 0xe9, 0x99, 0x49, 0xef, 0x9f, 0xef, 0x0e, 0x89,
+ 0x3c, 0x0b, 0xfa, 0xf7, 0x1d, 0x36, 0xda, 0x8e, 0x81, 0xdb, 0x21, 0xf0, 0x7b, 0xd1, 0xdf, 0xd1,
+ 0x97, 0xbb, 0xdb, 0x43, 0x96, 0xf9, 0x47, 0xda, 0x2f, 0xeb, 0xa1, 0x07, 0xff, 0x0e, 0x00, 0x00,
+ 0xff, 0xff, 0xc7, 0x6b, 0x83, 0x2e, 0x48, 0x1d, 0x00, 0x00,
}
diff --git a/vendor/github.com/opencord/voltha-protos/v4/go/voltha/voltha.pb.go b/vendor/github.com/opencord/voltha-protos/v4/go/voltha/voltha.pb.go
index 7e9a2a5..cc7717b 100644
--- a/vendor/github.com/opencord/voltha-protos/v4/go/voltha/voltha.pb.go
+++ b/vendor/github.com/opencord/voltha-protos/v4/go/voltha/voltha.pb.go
@@ -107,6 +107,7 @@
const OperStatus_ACTIVE = OperStatus_Types(common.OperStatus_ACTIVE)
const OperStatus_FAILED = OperStatus_Types(common.OperStatus_FAILED)
const OperStatus_RECONCILING = OperStatus_Types(common.OperStatus_RECONCILING)
+const OperStatus_RECONCILING_FAILED = OperStatus_Types(common.OperStatus_RECONCILING_FAILED)
// ConnectStatus_Types from public import voltha_protos/common.proto
type ConnectStatus_Types = common.ConnectStatus_Types
diff --git a/vendor/go.etcd.io/etcd/clientv3/concurrency/doc.go b/vendor/go.etcd.io/etcd/clientv3/concurrency/doc.go
deleted file mode 100644
index dcdbf51..0000000
--- a/vendor/go.etcd.io/etcd/clientv3/concurrency/doc.go
+++ /dev/null
@@ -1,17 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package concurrency implements concurrency operations on top of
-// etcd such as distributed locks, barriers, and elections.
-package concurrency
diff --git a/vendor/go.etcd.io/etcd/clientv3/concurrency/election.go b/vendor/go.etcd.io/etcd/clientv3/concurrency/election.go
deleted file mode 100644
index 2521db6..0000000
--- a/vendor/go.etcd.io/etcd/clientv3/concurrency/election.go
+++ /dev/null
@@ -1,254 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package concurrency
-
-import (
- "context"
- "errors"
- "fmt"
-
- v3 "go.etcd.io/etcd/clientv3"
- pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
- "go.etcd.io/etcd/mvcc/mvccpb"
-)
-
-var (
- ErrElectionNotLeader = errors.New("election: not leader")
- ErrElectionNoLeader = errors.New("election: no leader")
-)
-
-type Election struct {
- session *Session
-
- keyPrefix string
-
- leaderKey string
- leaderRev int64
- leaderSession *Session
- hdr *pb.ResponseHeader
-}
-
-// NewElection returns a new election on a given key prefix.
-func NewElection(s *Session, pfx string) *Election {
- return &Election{session: s, keyPrefix: pfx + "/"}
-}
-
-// ResumeElection initializes an election with a known leader.
-func ResumeElection(s *Session, pfx string, leaderKey string, leaderRev int64) *Election {
- return &Election{
- keyPrefix: pfx,
- session: s,
- leaderKey: leaderKey,
- leaderRev: leaderRev,
- leaderSession: s,
- }
-}
-
-// Campaign puts a value as eligible for the election on the prefix
-// key.
-// Multiple sessions can participate in the election for the
-// same prefix, but only one can be the leader at a time.
-//
-// If the context is 'context.TODO()/context.Background()', the Campaign
-// will continue to be blocked for other keys to be deleted, unless server
-// returns a non-recoverable error (e.g. ErrCompacted).
-// Otherwise, until the context is not cancelled or timed-out, Campaign will
-// continue to be blocked until it becomes the leader.
-func (e *Election) Campaign(ctx context.Context, val string) error {
- s := e.session
- client := e.session.Client()
-
- k := fmt.Sprintf("%s%x", e.keyPrefix, s.Lease())
- txn := client.Txn(ctx).If(v3.Compare(v3.CreateRevision(k), "=", 0))
- txn = txn.Then(v3.OpPut(k, val, v3.WithLease(s.Lease())))
- txn = txn.Else(v3.OpGet(k))
- resp, err := txn.Commit()
- if err != nil {
- return err
- }
- e.leaderKey, e.leaderRev, e.leaderSession = k, resp.Header.Revision, s
- if !resp.Succeeded {
- kv := resp.Responses[0].GetResponseRange().Kvs[0]
- e.leaderRev = kv.CreateRevision
- if string(kv.Value) != val {
- if err = e.Proclaim(ctx, val); err != nil {
- e.Resign(ctx)
- return err
- }
- }
- }
-
- _, err = waitDeletes(ctx, client, e.keyPrefix, e.leaderRev-1)
- if err != nil {
- // clean up in case of context cancel
- select {
- case <-ctx.Done():
- e.Resign(client.Ctx())
- default:
- e.leaderSession = nil
- }
- return err
- }
- e.hdr = resp.Header
-
- return nil
-}
-
-// Proclaim lets the leader announce a new value without another election.
-func (e *Election) Proclaim(ctx context.Context, val string) error {
- if e.leaderSession == nil {
- return ErrElectionNotLeader
- }
- client := e.session.Client()
- cmp := v3.Compare(v3.CreateRevision(e.leaderKey), "=", e.leaderRev)
- txn := client.Txn(ctx).If(cmp)
- txn = txn.Then(v3.OpPut(e.leaderKey, val, v3.WithLease(e.leaderSession.Lease())))
- tresp, terr := txn.Commit()
- if terr != nil {
- return terr
- }
- if !tresp.Succeeded {
- e.leaderKey = ""
- return ErrElectionNotLeader
- }
-
- e.hdr = tresp.Header
- return nil
-}
-
-// Resign lets a leader start a new election.
-func (e *Election) Resign(ctx context.Context) (err error) {
- if e.leaderSession == nil {
- return nil
- }
- client := e.session.Client()
- cmp := v3.Compare(v3.CreateRevision(e.leaderKey), "=", e.leaderRev)
- resp, err := client.Txn(ctx).If(cmp).Then(v3.OpDelete(e.leaderKey)).Commit()
- if err == nil {
- e.hdr = resp.Header
- }
- e.leaderKey = ""
- e.leaderSession = nil
- return err
-}
-
-// Leader returns the leader value for the current election.
-func (e *Election) Leader(ctx context.Context) (*v3.GetResponse, error) {
- client := e.session.Client()
- resp, err := client.Get(ctx, e.keyPrefix, v3.WithFirstCreate()...)
- if err != nil {
- return nil, err
- } else if len(resp.Kvs) == 0 {
- // no leader currently elected
- return nil, ErrElectionNoLeader
- }
- return resp, nil
-}
-
-// Observe returns a channel that reliably observes ordered leader proposals
-// as GetResponse values on every current elected leader key. It will not
-// necessarily fetch all historical leader updates, but will always post the
-// most recent leader value.
-//
-// The channel closes when the context is canceled or the underlying watcher
-// is otherwise disrupted.
-func (e *Election) Observe(ctx context.Context) <-chan v3.GetResponse {
- retc := make(chan v3.GetResponse)
- go e.observe(ctx, retc)
- return retc
-}
-
-func (e *Election) observe(ctx context.Context, ch chan<- v3.GetResponse) {
- client := e.session.Client()
-
- defer close(ch)
- for {
- resp, err := client.Get(ctx, e.keyPrefix, v3.WithFirstCreate()...)
- if err != nil {
- return
- }
-
- var kv *mvccpb.KeyValue
- var hdr *pb.ResponseHeader
-
- if len(resp.Kvs) == 0 {
- cctx, cancel := context.WithCancel(ctx)
- // wait for first key put on prefix
- opts := []v3.OpOption{v3.WithRev(resp.Header.Revision), v3.WithPrefix()}
- wch := client.Watch(cctx, e.keyPrefix, opts...)
- for kv == nil {
- wr, ok := <-wch
- if !ok || wr.Err() != nil {
- cancel()
- return
- }
- // only accept puts; a delete will make observe() spin
- for _, ev := range wr.Events {
- if ev.Type == mvccpb.PUT {
- hdr, kv = &wr.Header, ev.Kv
- // may have multiple revs; hdr.rev = the last rev
- // set to kv's rev in case batch has multiple Puts
- hdr.Revision = kv.ModRevision
- break
- }
- }
- }
- cancel()
- } else {
- hdr, kv = resp.Header, resp.Kvs[0]
- }
-
- select {
- case ch <- v3.GetResponse{Header: hdr, Kvs: []*mvccpb.KeyValue{kv}}:
- case <-ctx.Done():
- return
- }
-
- cctx, cancel := context.WithCancel(ctx)
- wch := client.Watch(cctx, string(kv.Key), v3.WithRev(hdr.Revision+1))
- keyDeleted := false
- for !keyDeleted {
- wr, ok := <-wch
- if !ok {
- cancel()
- return
- }
- for _, ev := range wr.Events {
- if ev.Type == mvccpb.DELETE {
- keyDeleted = true
- break
- }
- resp.Header = &wr.Header
- resp.Kvs = []*mvccpb.KeyValue{ev.Kv}
- select {
- case ch <- *resp:
- case <-cctx.Done():
- cancel()
- return
- }
- }
- }
- cancel()
- }
-}
-
-// Key returns the leader key if elected, empty string otherwise.
-func (e *Election) Key() string { return e.leaderKey }
-
-// Rev returns the leader key's creation revision, if elected.
-func (e *Election) Rev() int64 { return e.leaderRev }
-
-// Header is the response header from the last successful election proposal.
-func (e *Election) Header() *pb.ResponseHeader { return e.hdr }
diff --git a/vendor/go.etcd.io/etcd/clientv3/concurrency/key.go b/vendor/go.etcd.io/etcd/clientv3/concurrency/key.go
deleted file mode 100644
index e4cf775..0000000
--- a/vendor/go.etcd.io/etcd/clientv3/concurrency/key.go
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package concurrency
-
-import (
- "context"
- "fmt"
-
- v3 "go.etcd.io/etcd/clientv3"
- pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
- "go.etcd.io/etcd/mvcc/mvccpb"
-)
-
-func waitDelete(ctx context.Context, client *v3.Client, key string, rev int64) error {
- cctx, cancel := context.WithCancel(ctx)
- defer cancel()
-
- var wr v3.WatchResponse
- wch := client.Watch(cctx, key, v3.WithRev(rev))
- for wr = range wch {
- for _, ev := range wr.Events {
- if ev.Type == mvccpb.DELETE {
- return nil
- }
- }
- }
- if err := wr.Err(); err != nil {
- return err
- }
- if err := ctx.Err(); err != nil {
- return err
- }
- return fmt.Errorf("lost watcher waiting for delete")
-}
-
-// waitDeletes efficiently waits until all keys matching the prefix and no greater
-// than the create revision.
-func waitDeletes(ctx context.Context, client *v3.Client, pfx string, maxCreateRev int64) (*pb.ResponseHeader, error) {
- getOpts := append(v3.WithLastCreate(), v3.WithMaxCreateRev(maxCreateRev))
- for {
- resp, err := client.Get(ctx, pfx, getOpts...)
- if err != nil {
- return nil, err
- }
- if len(resp.Kvs) == 0 {
- return resp.Header, nil
- }
- lastKey := string(resp.Kvs[0].Key)
- if err = waitDelete(ctx, client, lastKey, resp.Header.Revision); err != nil {
- return nil, err
- }
- }
-}
diff --git a/vendor/go.etcd.io/etcd/clientv3/concurrency/mutex.go b/vendor/go.etcd.io/etcd/clientv3/concurrency/mutex.go
deleted file mode 100644
index 306470b..0000000
--- a/vendor/go.etcd.io/etcd/clientv3/concurrency/mutex.go
+++ /dev/null
@@ -1,153 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package concurrency
-
-import (
- "context"
- "errors"
- "fmt"
- "sync"
-
- v3 "go.etcd.io/etcd/clientv3"
- pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
-)
-
-// ErrLocked is returned by TryLock when Mutex is already locked by another session.
-var ErrLocked = errors.New("mutex: Locked by another session")
-
-// Mutex implements the sync Locker interface with etcd
-type Mutex struct {
- s *Session
-
- pfx string
- myKey string
- myRev int64
- hdr *pb.ResponseHeader
-}
-
-func NewMutex(s *Session, pfx string) *Mutex {
- return &Mutex{s, pfx + "/", "", -1, nil}
-}
-
-// TryLock locks the mutex if not already locked by another session.
-// If lock is held by another session, return immediately after attempting necessary cleanup
-// The ctx argument is used for the sending/receiving Txn RPC.
-func (m *Mutex) TryLock(ctx context.Context) error {
- resp, err := m.tryAcquire(ctx)
- if err != nil {
- return err
- }
- // if no key on prefix / the minimum rev is key, already hold the lock
- ownerKey := resp.Responses[1].GetResponseRange().Kvs
- if len(ownerKey) == 0 || ownerKey[0].CreateRevision == m.myRev {
- m.hdr = resp.Header
- return nil
- }
- client := m.s.Client()
- // Cannot lock, so delete the key
- if _, err := client.Delete(ctx, m.myKey); err != nil {
- return err
- }
- m.myKey = "\x00"
- m.myRev = -1
- return ErrLocked
-}
-
-// Lock locks the mutex with a cancelable context. If the context is canceled
-// while trying to acquire the lock, the mutex tries to clean its stale lock entry.
-func (m *Mutex) Lock(ctx context.Context) error {
- resp, err := m.tryAcquire(ctx)
- if err != nil {
- return err
- }
- // if no key on prefix / the minimum rev is key, already hold the lock
- ownerKey := resp.Responses[1].GetResponseRange().Kvs
- if len(ownerKey) == 0 || ownerKey[0].CreateRevision == m.myRev {
- m.hdr = resp.Header
- return nil
- }
- client := m.s.Client()
- // wait for deletion revisions prior to myKey
- hdr, werr := waitDeletes(ctx, client, m.pfx, m.myRev-1)
- // release lock key if wait failed
- if werr != nil {
- m.Unlock(client.Ctx())
- } else {
- m.hdr = hdr
- }
- return werr
-}
-
-func (m *Mutex) tryAcquire(ctx context.Context) (*v3.TxnResponse, error) {
- s := m.s
- client := m.s.Client()
-
- m.myKey = fmt.Sprintf("%s%x", m.pfx, s.Lease())
- cmp := v3.Compare(v3.CreateRevision(m.myKey), "=", 0)
- // put self in lock waiters via myKey; oldest waiter holds lock
- put := v3.OpPut(m.myKey, "", v3.WithLease(s.Lease()))
- // reuse key in case this session already holds the lock
- get := v3.OpGet(m.myKey)
- // fetch current holder to complete uncontended path with only one RPC
- getOwner := v3.OpGet(m.pfx, v3.WithFirstCreate()...)
- resp, err := client.Txn(ctx).If(cmp).Then(put, getOwner).Else(get, getOwner).Commit()
- if err != nil {
- return nil, err
- }
- m.myRev = resp.Header.Revision
- if !resp.Succeeded {
- m.myRev = resp.Responses[0].GetResponseRange().Kvs[0].CreateRevision
- }
- return resp, nil
-}
-
-func (m *Mutex) Unlock(ctx context.Context) error {
- client := m.s.Client()
- if _, err := client.Delete(ctx, m.myKey); err != nil {
- return err
- }
- m.myKey = "\x00"
- m.myRev = -1
- return nil
-}
-
-func (m *Mutex) IsOwner() v3.Cmp {
- return v3.Compare(v3.CreateRevision(m.myKey), "=", m.myRev)
-}
-
-func (m *Mutex) Key() string { return m.myKey }
-
-// Header is the response header received from etcd on acquiring the lock.
-func (m *Mutex) Header() *pb.ResponseHeader { return m.hdr }
-
-type lockerMutex struct{ *Mutex }
-
-func (lm *lockerMutex) Lock() {
- client := lm.s.Client()
- if err := lm.Mutex.Lock(client.Ctx()); err != nil {
- panic(err)
- }
-}
-func (lm *lockerMutex) Unlock() {
- client := lm.s.Client()
- if err := lm.Mutex.Unlock(client.Ctx()); err != nil {
- panic(err)
- }
-}
-
-// NewLocker creates a sync.Locker backed by an etcd mutex.
-func NewLocker(s *Session, pfx string) sync.Locker {
- return &lockerMutex{NewMutex(s, pfx)}
-}
diff --git a/vendor/go.etcd.io/etcd/clientv3/concurrency/session.go b/vendor/go.etcd.io/etcd/clientv3/concurrency/session.go
deleted file mode 100644
index 97eb763..0000000
--- a/vendor/go.etcd.io/etcd/clientv3/concurrency/session.go
+++ /dev/null
@@ -1,141 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package concurrency
-
-import (
- "context"
- "time"
-
- v3 "go.etcd.io/etcd/clientv3"
-)
-
-const defaultSessionTTL = 60
-
-// Session represents a lease kept alive for the lifetime of a client.
-// Fault-tolerant applications may use sessions to reason about liveness.
-type Session struct {
- client *v3.Client
- opts *sessionOptions
- id v3.LeaseID
-
- cancel context.CancelFunc
- donec <-chan struct{}
-}
-
-// NewSession gets the leased session for a client.
-func NewSession(client *v3.Client, opts ...SessionOption) (*Session, error) {
- ops := &sessionOptions{ttl: defaultSessionTTL, ctx: client.Ctx()}
- for _, opt := range opts {
- opt(ops)
- }
-
- id := ops.leaseID
- if id == v3.NoLease {
- resp, err := client.Grant(ops.ctx, int64(ops.ttl))
- if err != nil {
- return nil, err
- }
- id = resp.ID
- }
-
- ctx, cancel := context.WithCancel(ops.ctx)
- keepAlive, err := client.KeepAlive(ctx, id)
- if err != nil || keepAlive == nil {
- cancel()
- return nil, err
- }
-
- donec := make(chan struct{})
- s := &Session{client: client, opts: ops, id: id, cancel: cancel, donec: donec}
-
- // keep the lease alive until client error or cancelled context
- go func() {
- defer close(donec)
- for range keepAlive {
- // eat messages until keep alive channel closes
- }
- }()
-
- return s, nil
-}
-
-// Client is the etcd client that is attached to the session.
-func (s *Session) Client() *v3.Client {
- return s.client
-}
-
-// Lease is the lease ID for keys bound to the session.
-func (s *Session) Lease() v3.LeaseID { return s.id }
-
-// Done returns a channel that closes when the lease is orphaned, expires, or
-// is otherwise no longer being refreshed.
-func (s *Session) Done() <-chan struct{} { return s.donec }
-
-// Orphan ends the refresh for the session lease. This is useful
-// in case the state of the client connection is indeterminate (revoke
-// would fail) or when transferring lease ownership.
-func (s *Session) Orphan() {
- s.cancel()
- <-s.donec
-}
-
-// Close orphans the session and revokes the session lease.
-func (s *Session) Close() error {
- s.Orphan()
- // if revoke takes longer than the ttl, lease is expired anyway
- ctx, cancel := context.WithTimeout(s.opts.ctx, time.Duration(s.opts.ttl)*time.Second)
- _, err := s.client.Revoke(ctx, s.id)
- cancel()
- return err
-}
-
-type sessionOptions struct {
- ttl int
- leaseID v3.LeaseID
- ctx context.Context
-}
-
-// SessionOption configures Session.
-type SessionOption func(*sessionOptions)
-
-// WithTTL configures the session's TTL in seconds.
-// If TTL is <= 0, the default 60 seconds TTL will be used.
-func WithTTL(ttl int) SessionOption {
- return func(so *sessionOptions) {
- if ttl > 0 {
- so.ttl = ttl
- }
- }
-}
-
-// WithLease specifies the existing leaseID to be used for the session.
-// This is useful in process restart scenario, for example, to reclaim
-// leadership from an election prior to restart.
-func WithLease(leaseID v3.LeaseID) SessionOption {
- return func(so *sessionOptions) {
- so.leaseID = leaseID
- }
-}
-
-// WithContext assigns a context to the session instead of defaulting to
-// using the client context. This is useful for canceling NewSession and
-// Close operations immediately without having to close the client. If the
-// context is canceled before Close() completes, the session's lease will be
-// abandoned and left to expire instead of being revoked.
-func WithContext(ctx context.Context) SessionOption {
- return func(so *sessionOptions) {
- so.ctx = ctx
- }
-}
diff --git a/vendor/go.etcd.io/etcd/clientv3/concurrency/stm.go b/vendor/go.etcd.io/etcd/clientv3/concurrency/stm.go
deleted file mode 100644
index ee11510..0000000
--- a/vendor/go.etcd.io/etcd/clientv3/concurrency/stm.go
+++ /dev/null
@@ -1,387 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package concurrency
-
-import (
- "context"
- "math"
-
- v3 "go.etcd.io/etcd/clientv3"
-)
-
-// STM is an interface for software transactional memory.
-type STM interface {
- // Get returns the value for a key and inserts the key in the txn's read set.
- // If Get fails, it aborts the transaction with an error, never returning.
- Get(key ...string) string
- // Put adds a value for a key to the write set.
- Put(key, val string, opts ...v3.OpOption)
- // Rev returns the revision of a key in the read set.
- Rev(key string) int64
- // Del deletes a key.
- Del(key string)
-
- // commit attempts to apply the txn's changes to the server.
- commit() *v3.TxnResponse
- reset()
-}
-
-// Isolation is an enumeration of transactional isolation levels which
-// describes how transactions should interfere and conflict.
-type Isolation int
-
-const (
- // SerializableSnapshot provides serializable isolation and also checks
- // for write conflicts.
- SerializableSnapshot Isolation = iota
- // Serializable reads within the same transaction attempt return data
- // from the at the revision of the first read.
- Serializable
- // RepeatableReads reads within the same transaction attempt always
- // return the same data.
- RepeatableReads
- // ReadCommitted reads keys from any committed revision.
- ReadCommitted
-)
-
-// stmError safely passes STM errors through panic to the STM error channel.
-type stmError struct{ err error }
-
-type stmOptions struct {
- iso Isolation
- ctx context.Context
- prefetch []string
-}
-
-type stmOption func(*stmOptions)
-
-// WithIsolation specifies the transaction isolation level.
-func WithIsolation(lvl Isolation) stmOption {
- return func(so *stmOptions) { so.iso = lvl }
-}
-
-// WithAbortContext specifies the context for permanently aborting the transaction.
-func WithAbortContext(ctx context.Context) stmOption {
- return func(so *stmOptions) { so.ctx = ctx }
-}
-
-// WithPrefetch is a hint to prefetch a list of keys before trying to apply.
-// If an STM transaction will unconditionally fetch a set of keys, prefetching
-// those keys will save the round-trip cost from requesting each key one by one
-// with Get().
-func WithPrefetch(keys ...string) stmOption {
- return func(so *stmOptions) { so.prefetch = append(so.prefetch, keys...) }
-}
-
-// NewSTM initiates a new STM instance, using serializable snapshot isolation by default.
-func NewSTM(c *v3.Client, apply func(STM) error, so ...stmOption) (*v3.TxnResponse, error) {
- opts := &stmOptions{ctx: c.Ctx()}
- for _, f := range so {
- f(opts)
- }
- if len(opts.prefetch) != 0 {
- f := apply
- apply = func(s STM) error {
- s.Get(opts.prefetch...)
- return f(s)
- }
- }
- return runSTM(mkSTM(c, opts), apply)
-}
-
-func mkSTM(c *v3.Client, opts *stmOptions) STM {
- switch opts.iso {
- case SerializableSnapshot:
- s := &stmSerializable{
- stm: stm{client: c, ctx: opts.ctx},
- prefetch: make(map[string]*v3.GetResponse),
- }
- s.conflicts = func() []v3.Cmp {
- return append(s.rset.cmps(), s.wset.cmps(s.rset.first()+1)...)
- }
- return s
- case Serializable:
- s := &stmSerializable{
- stm: stm{client: c, ctx: opts.ctx},
- prefetch: make(map[string]*v3.GetResponse),
- }
- s.conflicts = func() []v3.Cmp { return s.rset.cmps() }
- return s
- case RepeatableReads:
- s := &stm{client: c, ctx: opts.ctx, getOpts: []v3.OpOption{v3.WithSerializable()}}
- s.conflicts = func() []v3.Cmp { return s.rset.cmps() }
- return s
- case ReadCommitted:
- s := &stm{client: c, ctx: opts.ctx, getOpts: []v3.OpOption{v3.WithSerializable()}}
- s.conflicts = func() []v3.Cmp { return nil }
- return s
- default:
- panic("unsupported stm")
- }
-}
-
-type stmResponse struct {
- resp *v3.TxnResponse
- err error
-}
-
-func runSTM(s STM, apply func(STM) error) (*v3.TxnResponse, error) {
- outc := make(chan stmResponse, 1)
- go func() {
- defer func() {
- if r := recover(); r != nil {
- e, ok := r.(stmError)
- if !ok {
- // client apply panicked
- panic(r)
- }
- outc <- stmResponse{nil, e.err}
- }
- }()
- var out stmResponse
- for {
- s.reset()
- if out.err = apply(s); out.err != nil {
- break
- }
- if out.resp = s.commit(); out.resp != nil {
- break
- }
- }
- outc <- out
- }()
- r := <-outc
- return r.resp, r.err
-}
-
-// stm implements repeatable-read software transactional memory over etcd
-type stm struct {
- client *v3.Client
- ctx context.Context
- // rset holds read key values and revisions
- rset readSet
- // wset holds overwritten keys and their values
- wset writeSet
- // getOpts are the opts used for gets
- getOpts []v3.OpOption
- // conflicts computes the current conflicts on the txn
- conflicts func() []v3.Cmp
-}
-
-type stmPut struct {
- val string
- op v3.Op
-}
-
-type readSet map[string]*v3.GetResponse
-
-func (rs readSet) add(keys []string, txnresp *v3.TxnResponse) {
- for i, resp := range txnresp.Responses {
- rs[keys[i]] = (*v3.GetResponse)(resp.GetResponseRange())
- }
-}
-
-// first returns the store revision from the first fetch
-func (rs readSet) first() int64 {
- ret := int64(math.MaxInt64 - 1)
- for _, resp := range rs {
- if rev := resp.Header.Revision; rev < ret {
- ret = rev
- }
- }
- return ret
-}
-
-// cmps guards the txn from updates to read set
-func (rs readSet) cmps() []v3.Cmp {
- cmps := make([]v3.Cmp, 0, len(rs))
- for k, rk := range rs {
- cmps = append(cmps, isKeyCurrent(k, rk))
- }
- return cmps
-}
-
-type writeSet map[string]stmPut
-
-func (ws writeSet) get(keys ...string) *stmPut {
- for _, key := range keys {
- if wv, ok := ws[key]; ok {
- return &wv
- }
- }
- return nil
-}
-
-// cmps returns a cmp list testing no writes have happened past rev
-func (ws writeSet) cmps(rev int64) []v3.Cmp {
- cmps := make([]v3.Cmp, 0, len(ws))
- for key := range ws {
- cmps = append(cmps, v3.Compare(v3.ModRevision(key), "<", rev))
- }
- return cmps
-}
-
-// puts is the list of ops for all pending writes
-func (ws writeSet) puts() []v3.Op {
- puts := make([]v3.Op, 0, len(ws))
- for _, v := range ws {
- puts = append(puts, v.op)
- }
- return puts
-}
-
-func (s *stm) Get(keys ...string) string {
- if wv := s.wset.get(keys...); wv != nil {
- return wv.val
- }
- return respToValue(s.fetch(keys...))
-}
-
-func (s *stm) Put(key, val string, opts ...v3.OpOption) {
- s.wset[key] = stmPut{val, v3.OpPut(key, val, opts...)}
-}
-
-func (s *stm) Del(key string) { s.wset[key] = stmPut{"", v3.OpDelete(key)} }
-
-func (s *stm) Rev(key string) int64 {
- if resp := s.fetch(key); resp != nil && len(resp.Kvs) != 0 {
- return resp.Kvs[0].ModRevision
- }
- return 0
-}
-
-func (s *stm) commit() *v3.TxnResponse {
- txnresp, err := s.client.Txn(s.ctx).If(s.conflicts()...).Then(s.wset.puts()...).Commit()
- if err != nil {
- panic(stmError{err})
- }
- if txnresp.Succeeded {
- return txnresp
- }
- return nil
-}
-
-func (s *stm) fetch(keys ...string) *v3.GetResponse {
- if len(keys) == 0 {
- return nil
- }
- ops := make([]v3.Op, len(keys))
- for i, key := range keys {
- if resp, ok := s.rset[key]; ok {
- return resp
- }
- ops[i] = v3.OpGet(key, s.getOpts...)
- }
- txnresp, err := s.client.Txn(s.ctx).Then(ops...).Commit()
- if err != nil {
- panic(stmError{err})
- }
- s.rset.add(keys, txnresp)
- return (*v3.GetResponse)(txnresp.Responses[0].GetResponseRange())
-}
-
-func (s *stm) reset() {
- s.rset = make(map[string]*v3.GetResponse)
- s.wset = make(map[string]stmPut)
-}
-
-type stmSerializable struct {
- stm
- prefetch map[string]*v3.GetResponse
-}
-
-func (s *stmSerializable) Get(keys ...string) string {
- if wv := s.wset.get(keys...); wv != nil {
- return wv.val
- }
- firstRead := len(s.rset) == 0
- for _, key := range keys {
- if resp, ok := s.prefetch[key]; ok {
- delete(s.prefetch, key)
- s.rset[key] = resp
- }
- }
- resp := s.stm.fetch(keys...)
- if firstRead {
- // txn's base revision is defined by the first read
- s.getOpts = []v3.OpOption{
- v3.WithRev(resp.Header.Revision),
- v3.WithSerializable(),
- }
- }
- return respToValue(resp)
-}
-
-func (s *stmSerializable) Rev(key string) int64 {
- s.Get(key)
- return s.stm.Rev(key)
-}
-
-func (s *stmSerializable) gets() ([]string, []v3.Op) {
- keys := make([]string, 0, len(s.rset))
- ops := make([]v3.Op, 0, len(s.rset))
- for k := range s.rset {
- keys = append(keys, k)
- ops = append(ops, v3.OpGet(k))
- }
- return keys, ops
-}
-
-func (s *stmSerializable) commit() *v3.TxnResponse {
- keys, getops := s.gets()
- txn := s.client.Txn(s.ctx).If(s.conflicts()...).Then(s.wset.puts()...)
- // use Else to prefetch keys in case of conflict to save a round trip
- txnresp, err := txn.Else(getops...).Commit()
- if err != nil {
- panic(stmError{err})
- }
- if txnresp.Succeeded {
- return txnresp
- }
- // load prefetch with Else data
- s.rset.add(keys, txnresp)
- s.prefetch = s.rset
- s.getOpts = nil
- return nil
-}
-
-func isKeyCurrent(k string, r *v3.GetResponse) v3.Cmp {
- if len(r.Kvs) != 0 {
- return v3.Compare(v3.ModRevision(k), "=", r.Kvs[0].ModRevision)
- }
- return v3.Compare(v3.ModRevision(k), "=", 0)
-}
-
-func respToValue(resp *v3.GetResponse) string {
- if resp == nil || len(resp.Kvs) == 0 {
- return ""
- }
- return string(resp.Kvs[0].Value)
-}
-
-// NewSTMRepeatable is deprecated.
-func NewSTMRepeatable(ctx context.Context, c *v3.Client, apply func(STM) error) (*v3.TxnResponse, error) {
- return NewSTM(c, apply, WithAbortContext(ctx), WithIsolation(RepeatableReads))
-}
-
-// NewSTMSerializable is deprecated.
-func NewSTMSerializable(ctx context.Context, c *v3.Client, apply func(STM) error) (*v3.TxnResponse, error) {
- return NewSTM(c, apply, WithAbortContext(ctx), WithIsolation(Serializable))
-}
-
-// NewSTMReadCommitted is deprecated.
-func NewSTMReadCommitted(ctx context.Context, c *v3.Client, apply func(STM) error) (*v3.TxnResponse, error) {
- return NewSTM(c, apply, WithAbortContext(ctx), WithIsolation(ReadCommitted))
-}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index f037a24..eecb938 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -34,12 +34,14 @@
github.com/gogo/protobuf/protoc-gen-gogo/descriptor
# github.com/golang/protobuf v1.3.2
## explicit
+github.com/golang/protobuf/jsonpb
github.com/golang/protobuf/proto
github.com/golang/protobuf/protoc-gen-go/descriptor
github.com/golang/protobuf/ptypes
github.com/golang/protobuf/ptypes/any
github.com/golang/protobuf/ptypes/duration
github.com/golang/protobuf/ptypes/empty
+github.com/golang/protobuf/ptypes/struct
github.com/golang/protobuf/ptypes/timestamp
# github.com/golang/snappy v0.0.1
github.com/golang/snappy
@@ -56,26 +58,26 @@
# github.com/jcmturner/gofork v1.0.0
github.com/jcmturner/gofork/encoding/asn1
github.com/jcmturner/gofork/x/crypto/pbkdf2
-# github.com/opencord/voltha-lib-go/v4 v4.3.5
+# github.com/opencord/voltha-lib-go/v5 v5.0.2
## explicit
-github.com/opencord/voltha-lib-go/v4/pkg/adapters
-github.com/opencord/voltha-lib-go/v4/pkg/adapters/adapterif
-github.com/opencord/voltha-lib-go/v4/pkg/adapters/common
-github.com/opencord/voltha-lib-go/v4/pkg/config
-github.com/opencord/voltha-lib-go/v4/pkg/db
-github.com/opencord/voltha-lib-go/v4/pkg/db/kvstore
-github.com/opencord/voltha-lib-go/v4/pkg/events
-github.com/opencord/voltha-lib-go/v4/pkg/events/eventif
-github.com/opencord/voltha-lib-go/v4/pkg/flows
-github.com/opencord/voltha-lib-go/v4/pkg/kafka
-github.com/opencord/voltha-lib-go/v4/pkg/log
-github.com/opencord/voltha-lib-go/v4/pkg/meters
-github.com/opencord/voltha-lib-go/v4/pkg/pmmetrics
-github.com/opencord/voltha-lib-go/v4/pkg/ponresourcemanager
-github.com/opencord/voltha-lib-go/v4/pkg/probe
-github.com/opencord/voltha-lib-go/v4/pkg/techprofile
-github.com/opencord/voltha-lib-go/v4/pkg/version
-# github.com/opencord/voltha-protos/v4 v4.1.9
+github.com/opencord/voltha-lib-go/v5/pkg/adapters
+github.com/opencord/voltha-lib-go/v5/pkg/adapters/adapterif
+github.com/opencord/voltha-lib-go/v5/pkg/adapters/common
+github.com/opencord/voltha-lib-go/v5/pkg/config
+github.com/opencord/voltha-lib-go/v5/pkg/db
+github.com/opencord/voltha-lib-go/v5/pkg/db/kvstore
+github.com/opencord/voltha-lib-go/v5/pkg/events
+github.com/opencord/voltha-lib-go/v5/pkg/events/eventif
+github.com/opencord/voltha-lib-go/v5/pkg/flows
+github.com/opencord/voltha-lib-go/v5/pkg/kafka
+github.com/opencord/voltha-lib-go/v5/pkg/log
+github.com/opencord/voltha-lib-go/v5/pkg/meters
+github.com/opencord/voltha-lib-go/v5/pkg/pmmetrics
+github.com/opencord/voltha-lib-go/v5/pkg/ponresourcemanager
+github.com/opencord/voltha-lib-go/v5/pkg/probe
+github.com/opencord/voltha-lib-go/v5/pkg/techprofile
+github.com/opencord/voltha-lib-go/v5/pkg/version
+# github.com/opencord/voltha-protos/v4 v4.2.0
## explicit
github.com/opencord/voltha-protos/v4/go/common
github.com/opencord/voltha-protos/v4/go/ext/config
@@ -126,7 +128,6 @@
go.etcd.io/etcd/clientv3/balancer/connectivity
go.etcd.io/etcd/clientv3/balancer/picker
go.etcd.io/etcd/clientv3/balancer/resolver/endpoint
-go.etcd.io/etcd/clientv3/concurrency
go.etcd.io/etcd/clientv3/credentials
go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes
go.etcd.io/etcd/etcdserver/etcdserverpb