VOL-2191: Implement the right interpretation of instance-control attribute
from tech-profile
- When using "single-instance" type TP, make sure no other gem-port on any
other UNI port is referncing the same alloc-id before releasing the alloc-id.
- When deleting tech-profile instances for the ONU, log any errors with deleting
any TP instance, but do not break the loop (so that other TP instances can be
freed up).
- Use 2.2.17 version of voltha-lib-go
Change-Id: I13901e6e3f21b02af076c4c022d4caafc10d6491
diff --git a/adaptercore/openolt_flowmgr.go b/adaptercore/openolt_flowmgr.go
index d391616..8ee2a11 100644
--- a/adaptercore/openolt_flowmgr.go
+++ b/adaptercore/openolt_flowmgr.go
@@ -448,20 +448,17 @@
allgemPortIDs = f.resourceMgr.GetCurrentGEMPortIDsForOnu(intfID, onuID, uniID)
tpPath := f.getTPpath(intfID, uni, TpID)
+
+ log.Infow("creating-new-tcont-and-gem", log.Fields{"pon": intfID, "onu": onuID, "uni": uniID})
+
// Check tech profile instance already exists for derived port name
- techProfileInstance, err := f.techprofile[intfID].GetTPInstanceFromKVStore(TpID, tpPath)
- if err != nil { // This should not happen, something wrong in KV backend transaction
- log.Errorw("Error in fetching tech profile instance from KV store", log.Fields{"tpID": TpID, "path": tpPath})
- return 0, nil, nil
- }
-
- log.Debug("Creating New TConts and Gem ports", log.Fields{"pon": intfID, "onu": onuID, "uni": uniID})
-
+ techProfileInstance, _ := f.techprofile[intfID].GetTPInstanceFromKVStore(TpID, tpPath)
if techProfileInstance == nil {
- log.Info("Creating tech profile instance", log.Fields{"path": tpPath})
+ log.Infow("tp-instance-not-found--creating-new", log.Fields{"path": tpPath})
techProfileInstance = f.techprofile[intfID].CreateTechProfInstance(TpID, uni, intfID)
if techProfileInstance == nil {
- log.Error("Tech-profile-instance-creation-failed")
+ // This should not happen, something wrong in KV backend transaction
+ log.Error("tp-instance-create-failed")
return 0, nil, nil
}
f.resourceMgr.UpdateTechProfileIDForOnu(intfID, onuID, uniID, TpID)
@@ -944,7 +941,8 @@
for _, tpID := range tpIDList {
if err := f.DeleteTechProfileInstance(intfID, onuID, uniID, uniPortName, tpID); err != nil {
log.Debugw("Failed-to-delete-tp-instance-from-kv-store", log.Fields{"tp-id": tpID, "uni-port-name": uniPortName})
- return err
+ // return err
+ // We should continue to delete tech-profile instances for other TP IDs
}
}
return nil
@@ -1278,7 +1276,7 @@
f.resourceMgr.FreeGemPortID(Intf, uint32(onuID), uint32(uniID), uint32(gemPortID))
f.onuIdsLock.Unlock()
- ok, _ := f.isTechProfileUsedByAnotherGem(Intf, uint32(onuID), uint32(uniID), techprofileInst, uint32(gemPortID))
+ ok, _ := f.isTechProfileUsedByAnotherGem(Intf, uint32(onuID), uint32(uniID), tpID, techprofileInst, uint32(gemPortID))
if !ok {
f.resourceMgr.RemoveTechProfileIDForOnu(Intf, uint32(onuID), uint32(uniID), tpID)
f.RemoveSchedulerQueues(schedQueue{direction: tp_pb.Direction_UPSTREAM, intfID: Intf, onuID: uint32(onuID), uniID: uint32(uniID), tpID: tpID, uniPort: portNum, tpInst: techprofileInst})
@@ -1809,7 +1807,7 @@
return false
}
-func (f *OpenOltFlowMgr) isTechProfileUsedByAnotherGem(ponIntf uint32, onuID uint32, uniID uint32, tpInst *tp.TechProfile, gemPortID uint32) (bool, uint32) {
+func (f *OpenOltFlowMgr) isTechProfileUsedByAnotherGem(ponIntf uint32, onuID uint32, uniID uint32, tpID uint32, tpInst *tp.TechProfile, gemPortID uint32) (bool, uint32) {
currentGemPorts := f.resourceMgr.GetCurrentGEMPortIDsForOnu(ponIntf, onuID, uniID)
tpGemPorts := tpInst.UpstreamGemPortAttributeList
for _, currentGemPort := range currentGemPorts {
@@ -1819,6 +1817,30 @@
}
}
}
+ if tpInst.InstanceCtrl.Onu == "single-instance" {
+ // The TP information for the given TP ID, PON ID, ONU ID, UNI ID should be removed.
+ f.resourceMgr.RemoveTechProfileIDForOnu(ponIntf, uint32(onuID), uint32(uniID), tpID)
+ f.DeleteTechProfileInstance(ponIntf, uint32(onuID), uint32(uniID), "", tpID)
+
+ // Although we cleaned up TP Instance for the given (PON ID, ONU ID, UNI ID), the TP might
+ // still be used on other uni ports.
+ // So, we need to check and make sure that no other gem port is referring to the given TP ID
+ // on any other uni port.
+ tpInstances := f.techprofile[ponIntf].FindAllTpInstances(tpID, ponIntf, onuID)
+ for i := 0; i < len(tpInstances); i++ {
+ tpI := tpInstances[i]
+ tpGemPorts := tpI.UpstreamGemPortAttributeList
+ for _, currentGemPort := range currentGemPorts {
+ for _, tpGemPort := range tpGemPorts {
+ if (currentGemPort == tpGemPort.GemportID) && (currentGemPort != gemPortID) {
+ log.Debugw("tech-profile-is-in-use-by-gem", log.Fields{"gemPort": currentGemPort})
+ return true, currentGemPort
+ }
+ }
+ }
+ }
+ }
+ log.Debug("tech-profile-is-not-in-use-by-any-gem")
return false, 0
}
diff --git a/adaptercore/openolt_flowmgr_test.go b/adaptercore/openolt_flowmgr_test.go
index 57d922b..e6de810 100644
--- a/adaptercore/openolt_flowmgr_test.go
+++ b/adaptercore/openolt_flowmgr_test.go
@@ -95,7 +95,7 @@
// flowMgr := newMockFlowmgr()
tprofile := &tp.TechProfile{Name: "tp1", SubscriberIdentifier: "subscriber1",
- ProfileType: "pt1", NumGemPorts: 1, NumTconts: 1, Version: 1,
+ ProfileType: "pt1", NumGemPorts: 1, Version: 1,
InstanceCtrl: tp.InstanceControl{Onu: "1", Uni: "1", MaxGemPayloadSize: "1"},
}
tprofile.UsScheduler.Direction = "UPSTREAM"
@@ -157,7 +157,7 @@
// flowMgr := newMockFlowmgr()
tprofile := &tp.TechProfile{Name: "tp1", SubscriberIdentifier: "subscriber1",
- ProfileType: "pt1", NumGemPorts: 1, NumTconts: 1, Version: 1,
+ ProfileType: "pt1", NumGemPorts: 1, Version: 1,
InstanceCtrl: tp.InstanceControl{Onu: "1", Uni: "1", MaxGemPayloadSize: "1"},
}
tprofile.UsScheduler.Direction = "UPSTREAM"
@@ -739,7 +739,6 @@
ProfileType: "Mock",
Version: 1,
NumGemPorts: 4,
- NumTconts: 1,
InstanceCtrl: tp.InstanceControl{
Onu: "1",
Uni: "16",
diff --git a/go.mod b/go.mod
index 80430d5..cedfb5c 100644
--- a/go.mod
+++ b/go.mod
@@ -7,9 +7,8 @@
github.com/golang/protobuf v1.3.2
github.com/onsi/ginkgo v1.8.0 // indirect
github.com/onsi/gomega v1.5.0 // indirect
- github.com/opencord/voltha-lib-go/v2 v2.2.13
+ github.com/opencord/voltha-lib-go/v2 v2.2.17
github.com/opencord/voltha-protos/v2 v2.0.1
go.etcd.io/etcd v0.0.0-20190930204107-236ac2a90522
- google.golang.org/appengine v1.4.0 // indirect
google.golang.org/grpc v1.24.0
)
diff --git a/go.sum b/go.sum
index 3cfac10..b3e4feb 100644
--- a/go.sum
+++ b/go.sum
@@ -190,13 +190,15 @@
github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.5.0 h1:izbySO9zDPmjJ8rDjLvkA2zJHIo+HkYXHnf7eN7SSyo=
github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
-github.com/opencord/voltha-lib-go/v2 v2.2.13 h1:8TxMjhqOL2vcDxO5uIaAd6Lj2Ahq/gAFiNNoUkM90cQ=
-github.com/opencord/voltha-lib-go/v2 v2.2.13/go.mod h1:g8WH4WTOJ0f40ZoqYFR4nyOLIAC84dOKDXsbT1ZErY4=
+github.com/opencord/voltha-lib-go/v2 v2.2.17 h1:if2mGx376oUO8+wFI7BZ7KMLElewoeSBj0zWi7Xl/Fk=
+github.com/opencord/voltha-lib-go/v2 v2.2.17/go.mod h1:1NOSy3uX2DcAIJyZZXkbjCwokwliEQJPu0zF3Jo5OEA=
github.com/opencord/voltha-protos/v2 v2.0.1 h1:vcE0XxNVeu0SED0bW2lf2w24k/QMFrFqMexuedIyTEg=
github.com/opencord/voltha-protos/v2 v2.0.1/go.mod h1:6kOcfYi1CadWowFxI2SH5wLfHrsRECZLZlD2MFK6WDI=
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY=
github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
+github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2 h1:JhzVVoYvbOACxoUmOs6V/G4D5nPVUW73rKvXxP4XUJc=
+github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE=
github.com/pierrec/lz4 v0.0.0-20190327172049-315a67e90e41/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc=
github.com/pierrec/lz4 v2.3.0+incompatible h1:CZzRn4Ut9GbUkHlQ7jqBXeZQV41ZSKWFc302ZU6lUTk=
github.com/pierrec/lz4 v2.3.0+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
diff --git a/mocks/mockTechprofile.go b/mocks/mockTechprofile.go
index 7f4a0f9..7bbea66 100644
--- a/mocks/mockTechprofile.go
+++ b/mocks/mockTechprofile.go
@@ -29,25 +29,25 @@
TpID uint32
}
-// SetKVClient to mock tefhprofile SetKVClient method
+// SetKVClient to mock techprofile SetKVClient method
func (m MockTechProfile) SetKVClient() *db.Backend {
return &db.Backend{Client: &MockKVClient{}}
}
-// GetTechProfileInstanceKVPath to mock tefhprofile GetTechProfileInstanceKVPath method
+// GetTechProfileInstanceKVPath to mock techprofile GetTechProfileInstanceKVPath method
func (m MockTechProfile) GetTechProfileInstanceKVPath(techProfiletblID uint32, uniPortName string) string {
return ""
}
-// GetTPInstanceFromKVStore to mock tefhprofile GetTPInstanceFromKVStore method
+// GetTPInstanceFromKVStore to mock techprofile GetTPInstanceFromKVStore method
func (m MockTechProfile) GetTPInstanceFromKVStore(techProfiletblID uint32, path string) (*tp.TechProfile, error) {
log.Debug("Warning Warning Warning: GetTPInstanceFromKVStore")
return nil, nil
}
-// CreateTechProfInstance to mock tefhprofile CreateTechProfInstance method
+// CreateTechProfInstance to mock techprofile CreateTechProfInstance method
func (m MockTechProfile) CreateTechProfInstance(techProfiletblID uint32, uniPortName string, intfID uint32) *tp.TechProfile {
return &tp.TechProfile{
@@ -56,48 +56,52 @@
ProfileType: "mock",
Version: 0,
NumGemPorts: 2,
- NumTconts: 1,
UpstreamGemPortAttributeList: nil,
DownstreamGemPortAttributeList: nil,
}
}
-// DeleteTechProfileInstance to mock tefhprofile DeleteTechProfileInstance method
+// DeleteTechProfileInstance to mock techprofile DeleteTechProfileInstance method
func (m MockTechProfile) DeleteTechProfileInstance(techProfiletblID uint32, uniPortName string) error {
return nil
}
-// GetprotoBufParamValue to mock tefhprofile GetprotoBufParamValue method
+// GetprotoBufParamValue to mock techprofile GetprotoBufParamValue method
func (m MockTechProfile) GetprotoBufParamValue(paramType string, paramKey string) int32 {
return 0
}
-// GetUsScheduler to mock tefhprofile GetUsScheduler method
+// GetUsScheduler to mock techprofile GetUsScheduler method
func (m MockTechProfile) GetUsScheduler(tpInstance *tp.TechProfile) *tp_pb.SchedulerConfig {
return &tp_pb.SchedulerConfig{}
}
-// GetDsScheduler to mock tefhprofile GetDsScheduler method
+// GetDsScheduler to mock techprofile GetDsScheduler method
func (m MockTechProfile) GetDsScheduler(tpInstance *tp.TechProfile) *tp_pb.SchedulerConfig {
return &tp_pb.SchedulerConfig{}
}
-// GetTrafficScheduler to mock tefhprofile GetTrafficScheduler method
+// GetTrafficScheduler to mock techprofile GetTrafficScheduler method
func (m MockTechProfile) GetTrafficScheduler(tpInstance *tp.TechProfile, SchedCfg *tp_pb.SchedulerConfig,
ShapingCfg *tp_pb.TrafficShapingInfo) *tp_pb.TrafficScheduler {
return &tp_pb.TrafficScheduler{}
}
-// GetTrafficQueues to mock tefhprofile GetTrafficQueues method
+// GetTrafficQueues to mock techprofile GetTrafficQueues method
func (m MockTechProfile) GetTrafficQueues(tp *tp.TechProfile, Dir tp_pb.Direction) []*tp_pb.TrafficQueue {
return []*tp_pb.TrafficQueue{{}}
}
-// GetGemportIDForPbit to mock tefhprofile GetGemportIDForPbit method
+// GetGemportIDForPbit to mock techprofile GetGemportIDForPbit method
func (m MockTechProfile) GetGemportIDForPbit(tp *tp.TechProfile, Dir tp_pb.Direction, pbit uint32) uint32 {
return 0
}
+
+// FindAllTpInstances to mock techprofile FindAllTpInstances method
+func (m MockTechProfile) FindAllTpInstances(techProfiletblID uint32, ponIntf uint32, onuID uint32) []tp.TechProfile {
+ return []tp.TechProfile{}
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v2/pkg/db/backend.go b/vendor/github.com/opencord/voltha-lib-go/v2/pkg/db/backend.go
index c319d99..b2547c2 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v2/pkg/db/backend.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v2/pkg/db/backend.go
@@ -17,23 +17,36 @@
package db
import (
+ "context"
"errors"
"fmt"
"github.com/opencord/voltha-lib-go/v2/pkg/db/kvstore"
"github.com/opencord/voltha-lib-go/v2/pkg/log"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
"strconv"
"sync"
+ "time"
+)
+
+const (
+ // Default Minimal Interval for posting alive state of backend kvstore on Liveness Channel
+ DefaultLivenessChannelInterval = time.Second * 30
)
// Backend structure holds details for accessing the kv store
type Backend struct {
sync.RWMutex
- Client kvstore.Client
- StoreType string
- Host string
- Port int
- Timeout int
- PathPrefix string
+ Client kvstore.Client
+ StoreType string
+ Host string
+ Port int
+ Timeout int
+ PathPrefix string
+ alive bool // Is this backend connection alive?
+ liveness chan bool // channel to post alive state
+ LivenessChannelInterval time.Duration // regularly push alive state beyond this interval
+ lastLivenessTime time.Time // Instant of last alive state push
}
// NewBackend creates a new instance of a Backend structure
@@ -41,11 +54,13 @@
var err error
b := &Backend{
- StoreType: storeType,
- Host: host,
- Port: port,
- Timeout: timeout,
- PathPrefix: pathPrefix,
+ StoreType: storeType,
+ Host: host,
+ Port: port,
+ Timeout: timeout,
+ LivenessChannelInterval: DefaultLivenessChannelInterval,
+ PathPrefix: pathPrefix,
+ alive: false, // connection considered down at start
}
address := host + ":" + strconv.Itoa(port)
@@ -76,6 +91,100 @@
return path
}
+func (b *Backend) updateLiveness(alive bool) {
+ // Periodically push stream of liveness data to the channel,
+ // so that in a live state, the core does not timeout and
+ // send a forced liveness message. Push alive state if the
+ // last push to channel was beyond livenessChannelInterval
+ if b.liveness != nil {
+
+ if b.alive != alive {
+ log.Debug("update-liveness-channel-reason-change")
+ b.liveness <- alive
+ b.lastLivenessTime = time.Now()
+ } else if time.Now().Sub(b.lastLivenessTime) > b.LivenessChannelInterval {
+ log.Debug("update-liveness-channel-reason-interval")
+ b.liveness <- alive
+ b.lastLivenessTime = time.Now()
+ }
+ }
+
+ // Emit log message only for alive state change
+ if b.alive != alive {
+ log.Debugw("change-kvstore-alive-status", log.Fields{"alive": alive})
+ b.alive = alive
+ }
+}
+
+// Perform a dummy Key Lookup on kvstore to test Connection Liveness and
+// post on Liveness channel
+func (b *Backend) PerformLivenessCheck(timeout int) bool {
+ alive := b.Client.IsConnectionUp(timeout)
+ log.Debugw("kvstore-liveness-check-result", log.Fields{"alive": alive})
+
+ b.updateLiveness(alive)
+ return alive
+}
+
+// Enable the liveness monitor channel. This channel will report
+// a "true" or "false" on every kvstore operation which indicates whether
+// or not the connection is still Live. This channel is then picked up
+// by the service (i.e. rw_core / ro_core) to update readiness status
+// and/or take other actions.
+func (b *Backend) EnableLivenessChannel() chan bool {
+ log.Debug("enable-kvstore-liveness-channel")
+
+ if b.liveness == nil {
+ log.Debug("create-kvstore-liveness-channel")
+
+ // Channel size of 10 to avoid any possibility of blocking in Load conditions
+ b.liveness = make(chan bool, 10)
+
+ // Post initial alive state
+ b.liveness <- b.alive
+ b.lastLivenessTime = time.Now()
+ }
+
+ return b.liveness
+}
+
+// Extract Alive status of Kvstore based on type of error
+func (b *Backend) isErrorIndicatingAliveKvstore(err error) bool {
+ // Alive unless observed an error indicating so
+ alive := true
+
+ if err != nil {
+
+ // timeout indicates kvstore not reachable/alive
+ if err == context.DeadlineExceeded {
+ alive = false
+ }
+
+ // Need to analyze client-specific errors based on backend type
+ if b.StoreType == "etcd" {
+
+ // For etcd backend, consider not-alive only for errors indicating
+ // timedout request or unavailable/corrupted cluster. For all remaining
+ // error codes listed in https://godoc.org/google.golang.org/grpc/codes#Code,
+ // we would not infer a not-alive backend because such a error may also
+ // occur due to bad client requests or sequence of operations
+ switch status.Code(err) {
+ case codes.DeadlineExceeded:
+ fallthrough
+ case codes.Unavailable:
+ fallthrough
+ case codes.DataLoss:
+ alive = false
+ }
+
+ //} else {
+ // TODO: Implement for consul backend; would it be needed ever?
+ }
+ }
+
+ return alive
+}
+
// List retrieves one or more items that match the specified key
func (b *Backend) List(key string) (map[string]*kvstore.KVPair, error) {
b.Lock()
@@ -84,7 +193,11 @@
formattedPath := b.makePath(key)
log.Debugw("listing-key", log.Fields{"key": key, "path": formattedPath})
- return b.Client.List(formattedPath, b.Timeout)
+ pair, err := b.Client.List(formattedPath, b.Timeout)
+
+ b.updateLiveness(b.isErrorIndicatingAliveKvstore(err))
+
+ return pair, err
}
// Get retrieves an item that matches the specified key
@@ -95,7 +208,11 @@
formattedPath := b.makePath(key)
log.Debugw("getting-key", log.Fields{"key": key, "path": formattedPath})
- return b.Client.Get(formattedPath, b.Timeout)
+ pair, err := b.Client.Get(formattedPath, b.Timeout)
+
+ b.updateLiveness(b.isErrorIndicatingAliveKvstore(err))
+
+ return pair, err
}
// Put stores an item value under the specifed key
@@ -106,7 +223,11 @@
formattedPath := b.makePath(key)
log.Debugw("putting-key", log.Fields{"key": key, "value": string(value.([]byte)), "path": formattedPath})
- return b.Client.Put(formattedPath, value, b.Timeout)
+ err := b.Client.Put(formattedPath, value, b.Timeout)
+
+ b.updateLiveness(b.isErrorIndicatingAliveKvstore(err))
+
+ return err
}
// Delete removes an item under the specified key
@@ -117,7 +238,11 @@
formattedPath := b.makePath(key)
log.Debugw("deleting-key", log.Fields{"key": key, "path": formattedPath})
- return b.Client.Delete(formattedPath, b.Timeout)
+ err := b.Client.Delete(formattedPath, b.Timeout)
+
+ b.updateLiveness(b.isErrorIndicatingAliveKvstore(err))
+
+ return err
}
// CreateWatch starts watching events for the specified key
diff --git a/vendor/github.com/opencord/voltha-lib-go/v2/pkg/flows/flow_utils.go b/vendor/github.com/opencord/voltha-lib-go/v2/pkg/flows/flow_utils.go
index f7d1b43..02a4b0b 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v2/pkg/flows/flow_utils.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v2/pkg/flows/flow_utils.go
@@ -1330,3 +1330,35 @@
}
return matchFields
}
+
+//IsMulticastIp returns true if the ip starts with the byte sequence of 1110;
+//false otherwise.
+func IsMulticastIp(ip uint32) bool {
+ return ip>>28 == 14
+}
+
+//ConvertToMulticastMacInt returns equivalent mac address of the given multicast ip address
+func ConvertToMulticastMacInt(ip uint32) uint64 {
+ //get last 23 bits of ip address by ip & 00000000011111111111111111111111
+ theLast23BitsOfIp := ip & 8388607
+ // perform OR with 0x1005E000000 to build mcast mac address
+ return 1101088686080 | uint64(theLast23BitsOfIp)
+}
+
+//ConvertToMulticastMacBytes returns equivalent mac address of the given multicast ip address
+func ConvertToMulticastMacBytes(ip uint32) []byte {
+ mac := ConvertToMulticastMacInt(ip)
+ var b bytes.Buffer
+ // catalyze (48 bits) in binary:111111110000000000000000000000000000000000000000
+ catalyze := uint64(280375465082880)
+ //convert each octet to decimal
+ for i := 0; i < 6; i++ {
+ if i != 0 {
+ catalyze = catalyze >> 8
+ }
+ octet := mac & catalyze
+ octetDecimal := octet >> uint8(40-i*8)
+ b.WriteByte(byte(octetDecimal))
+ }
+ return b.Bytes()
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v2/pkg/techprofile/config.go b/vendor/github.com/opencord/voltha-lib-go/v2/pkg/techprofile/config.go
index 9c64bd8..b1a8ac5 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v2/pkg/techprofile/config.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v2/pkg/techprofile/config.go
@@ -90,12 +90,10 @@
DefaultTPName string
TPVersion int
NumGemPorts uint32
- NumTconts uint32
DefaultPbits []string
LogLevel int
DefaultTechProfileID uint32
DefaultNumGemPorts uint32
- DefaultNumTconts uint32
}
func NewTechProfileFlags(KVStoreType string, KVStoreHost string, KVStorePort int) *TechProfileFlags {
@@ -113,7 +111,6 @@
TPInstanceKVPath: defaultTPInstanceKVPath,
DefaultTechProfileID: DEFAULT_TECH_PROFILE_TABLE_ID,
DefaultNumGemPorts: defaultGemportsCount,
- DefaultNumTconts: defaultNumTconts,
DefaultPbits: []string{defaultPbits},
LogLevel: defaultLogLevel,
}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v2/pkg/techprofile/tech_profile.go b/vendor/github.com/opencord/voltha-lib-go/v2/pkg/techprofile/tech_profile.go
index 92569f0..2c0edae 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v2/pkg/techprofile/tech_profile.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v2/pkg/techprofile/tech_profile.go
@@ -20,6 +20,7 @@
"encoding/json"
"errors"
"fmt"
+ "regexp"
"strconv"
"github.com/opencord/voltha-lib-go/v2/pkg/db"
@@ -96,6 +97,9 @@
3: "WRed",
}
+// Required uniPortName format
+var uniPortNameFormat = regexp.MustCompile(`^pon-{[0-9]+}/onu-{[0-9]+}/uni-{[0-9]+}$`)
+
/*
type InferredAdditionBWIndication int32
@@ -218,7 +222,6 @@
ProfileType string `json:"profile_type"`
Version int `json:"version"`
NumGemPorts uint32 `json:"num_gem_ports"`
- NumTconts uint32 `json:"num_of_tconts"`
InstanceCtrl InstanceControl `json:"instance_control"`
UsScheduler iScheduler `json:"us_scheduler"`
DsScheduler iScheduler `json:"ds_scheduler"`
@@ -287,24 +290,23 @@
var KvTpIns TechProfile
var resPtr *TechProfile = &KvTpIns
var err error
- /*path := t.GetTechProfileInstanceKVPath(techProfiletblID, uniPortName)*/
- log.Infow("Getting tech profile instance from KV store", log.Fields{"path": path})
- kvresult, err := t.config.KVBackend.Get(path)
- if err != nil {
- log.Errorw("Error while fetching tech-profile instance from KV backend", log.Fields{"key": path})
- return nil, err
- }
- if kvresult == nil {
- log.Infow("Tech profile does not exist in KV store", log.Fields{"key": path})
- resPtr = nil
+ var kvResult *kvstore.KVPair
+
+ kvResult, _ = t.config.KVBackend.Get(path)
+ if kvResult == nil {
+ log.Infow("tp-instance-not-found-on-kv", log.Fields{"key": path})
+ return nil, nil
} else {
- if value, err := kvstore.ToByte(kvresult.Value); err == nil {
+ if value, err := kvstore.ToByte(kvResult.Value); err == nil {
if err = json.Unmarshal(value, resPtr); err != nil {
- log.Errorw("Error while unmarshal KV result", log.Fields{"key": path, "value": value})
+ log.Errorw("error-unmarshal-kv-result", log.Fields{"key": path, "value": value})
+ return nil, errors.New("error-unmarshal-kv-result")
+ } else {
+ return resPtr, nil
}
}
}
- return resPtr, err
+ return nil, err
}
func (t *TechProfileMgr) addTechProfInstanceToKVStore(techProfiletblID uint32, uniPortName string, tpInstance *TechProfile) error {
@@ -342,20 +344,36 @@
}
func (t *TechProfileMgr) CreateTechProfInstance(techProfiletblID uint32, uniPortName string, intfId uint32) *TechProfile {
var tpInstance *TechProfile
- log.Infow("Creating tech profile instance ", log.Fields{"tableid": techProfiletblID, "uni": uniPortName, "intId": intfId})
- tp := t.getTPFromKVStore(techProfiletblID)
- if tp != nil {
- log.Infow("Creating tech profile instance with profile from KV store", log.Fields{"tpid": techProfiletblID})
- } else {
- tp = t.getDefaultTechProfile()
- log.Infow("Creating tech profile instance with default values", log.Fields{"tpid": techProfiletblID})
- }
- tpInstance = t.allocateTPInstance(uniPortName, tp, intfId, t.config.DefaultNumTconts)
- if err := t.addTechProfInstanceToKVStore(techProfiletblID, uniPortName, tpInstance); err != nil {
- log.Errorw("Error in adding tech profile instance to KV ", log.Fields{"tableid": techProfiletblID, "uni": uniPortName})
+ log.Infow("creating-tp-instance", log.Fields{"tableid": techProfiletblID, "uni": uniPortName, "intId": intfId})
+
+ // Make sure the uniPortName is as per format pon-{[0-9]+}/onu-{[0-9]+}/uni-{[0-9]+}
+ if !uniPortNameFormat.Match([]byte(uniPortName)) {
+ log.Errorw("uni-port-name-not-confirming-to-format", log.Fields{"uniPortName": uniPortName})
return nil
}
- log.Infow("Added tech profile instance to KV store successfully ",
+
+ tp := t.getTPFromKVStore(techProfiletblID)
+ if tp != nil {
+ if err := t.validateInstanceControlAttr(tp.InstanceCtrl); err != nil {
+ log.Error("invalid-instance-ctrl-attr--using-default-tp")
+ tp = t.getDefaultTechProfile()
+ } else {
+ log.Infow("using-specified-tp-from-kv-store", log.Fields{"tpid": techProfiletblID})
+ }
+ } else {
+ log.Info("tp-not-found-on-kv--creating-default-tp")
+ tp = t.getDefaultTechProfile()
+ }
+ tpInstancePath := t.GetTechProfileInstanceKVPath(techProfiletblID, uniPortName)
+ if tpInstance = t.allocateTPInstance(uniPortName, tp, intfId, tpInstancePath); tpInstance == nil {
+ log.Error("tp-intance-allocation-failed")
+ return nil
+ }
+ if err := t.addTechProfInstanceToKVStore(techProfiletblID, uniPortName, tpInstance); err != nil {
+ log.Errorw("error-adding-tp-to-kv-store ", log.Fields{"tableid": techProfiletblID, "uni": uniPortName})
+ return nil
+ }
+ log.Infow("tp-added-to-kv-store-successfully",
log.Fields{"tpid": techProfiletblID, "uni": uniPortName, "intfId": intfId})
return tpInstance
}
@@ -365,7 +383,26 @@
return t.config.KVBackend.Delete(path)
}
-func (t *TechProfileMgr) allocateTPInstance(uniPortName string, tp *DefaultTechProfile, intfId uint32, numOfTconts uint32) *TechProfile {
+func (t *TechProfileMgr) validateInstanceControlAttr(instCtl InstanceControl) error {
+ if instCtl.Onu != "single-instance" && instCtl.Onu != "multi-instance" {
+ log.Errorw("invalid-onu-instance-control-attribute", log.Fields{"onu-inst": instCtl.Onu})
+ return errors.New("invalid-onu-instance-ctl-attr")
+ }
+
+ if instCtl.Uni != "single-instance" && instCtl.Uni != "multi-instance" {
+ log.Errorw("invalid-uni-instance-control-attribute", log.Fields{"uni-inst": instCtl.Uni})
+ return errors.New("invalid-uni-instance-ctl-attr")
+ }
+
+ if instCtl.Uni == "multi-instance" {
+ log.Error("uni-multi-instance-tp-not-supported")
+ return errors.New("uni-multi-instance-tp-not-supported")
+ }
+
+ return nil
+}
+
+func (t *TechProfileMgr) allocateTPInstance(uniPortName string, tp *DefaultTechProfile, intfId uint32, tpInstPath string) *TechProfile {
var usGemPortAttributeList []iGemPortAttribute
var dsGemPortAttributeList []iGemPortAttribute
@@ -373,14 +410,26 @@
var gemPorts []uint32
var err error
- log.Infow("Allocating TechProfileMgr instance from techprofile template", log.Fields{"uniPortName": uniPortName, "intfId": intfId, "numOfTconts": numOfTconts, "numGem": tp.NumGemPorts})
- if numOfTconts > 1 {
- log.Errorw("Multiple Tconts not supported currently", log.Fields{"uniPortName": uniPortName, "intfId": intfId})
- return nil
- }
- if tcontIDs, err = t.resourceMgr.GetResourceID(intfId, t.resourceMgr.GetResourceTypeAllocID(), numOfTconts); err != nil {
- log.Errorw("Error getting alloc id from rsrcrMgr", log.Fields{"intfId": intfId, "numTconts": numOfTconts})
- return nil
+ log.Infow("Allocating TechProfileMgr instance from techprofile template", log.Fields{"uniPortName": uniPortName, "intfId": intfId, "numGem": tp.NumGemPorts})
+
+ if tp.InstanceCtrl.Onu == "multi-instance" {
+ if tcontIDs, err = t.resourceMgr.GetResourceID(intfId, t.resourceMgr.GetResourceTypeAllocID(), 1); err != nil {
+ log.Errorw("Error getting alloc id from rsrcrMgr", log.Fields{"intfId": intfId})
+ return nil
+ }
+ } else { // "single-instance"
+ tpInst, err := t.getSingleInstanceTp(tpInstPath)
+ if tpInst == nil {
+ // No "single-instance" tp found on one any uni port for the given TP ID
+ // Allocate a new TcontID or AllocID
+ if tcontIDs, err = t.resourceMgr.GetResourceID(intfId, t.resourceMgr.GetResourceTypeAllocID(), 1); err != nil {
+ log.Errorw("Error getting alloc id from rsrcrMgr", log.Fields{"intfId": intfId})
+ return nil
+ }
+ } else {
+ // Use the alloc-id from the existing TpInstance
+ tcontIDs = append(tcontIDs, tpInst.UsScheduler.AllocID)
+ }
}
log.Debugw("Num GEM ports in TP:", log.Fields{"NumGemPorts": tp.NumGemPorts})
if gemPorts, err = t.resourceMgr.GetResourceID(intfId, t.resourceMgr.GetResourceTypeGemPortID(), tp.NumGemPorts); err != nil {
@@ -416,7 +465,6 @@
ProfileType: tp.ProfileType,
Version: tp.Version,
NumGemPorts: tp.NumGemPorts,
- NumTconts: numOfTconts,
InstanceCtrl: tp.InstanceCtrl,
UsScheduler: iScheduler{
AllocID: tcontIDs[0],
@@ -436,6 +484,32 @@
DownstreamGemPortAttributeList: dsGemPortAttributeList}
}
+// getSingleInstanceTp returns another TpInstance for an ONU on a different
+// uni port for the same TP ID, if it finds one, else nil.
+func (t *TechProfileMgr) getSingleInstanceTp(tpPath string) (*TechProfile, error) {
+ var tpInst TechProfile
+
+ // For example:
+ // tpPath like "service/voltha/technology_profiles/xgspon/64/pon-{0}/onu-{1}/uni-{1}"
+ // is broken into ["service/voltha/technology_profiles/xgspon/64/pon-{0}/onu-{1}" ""]
+ uniPathSlice := regexp.MustCompile(`/uni-{[0-9]+}$`).Split(tpPath, 2)
+ kvPairs, _ := t.config.KVBackend.List(uniPathSlice[0])
+
+ // Find a valid TP Instance among all the UNIs of that ONU for the given TP ID
+ for keyPath, kvPair := range kvPairs {
+ if value, err := kvstore.ToByte(kvPair.Value); err == nil {
+ if err = json.Unmarshal(value, &tpInst); err != nil {
+ log.Errorw("error-unmarshal-kv-pair", log.Fields{"keyPath": keyPath, "value": value})
+ return nil, errors.New("error-unmarshal-kv-pair")
+ } else {
+ log.Debugw("found-valid-tp-instance-on-another-uni", log.Fields{"keyPath": keyPath})
+ return &tpInst, nil
+ }
+ }
+ }
+ return nil, nil
+}
+
func (t *TechProfileMgr) getDefaultTechProfile() *DefaultTechProfile {
var usGemPortAttributeList []GemPortAttribute
@@ -690,3 +764,25 @@
log.Errorw("No-GemportId-Found-For-Pcp", log.Fields{"pcpVlan": pbit})
return 0
}
+
+// FindAllTpInstances returns all TechProfile instances for a given TechProfile table-id, pon interface ID and onu ID.
+func (t *TechProfileMgr) FindAllTpInstances(techProfiletblID uint32, ponIntf uint32, onuID uint32) []TechProfile {
+ var tp TechProfile
+ onuTpInstancePath := fmt.Sprintf("%d/%s/pon-{%d}/onu-{%d}", techProfiletblID, t.resourceMgr.GetTechnology(), ponIntf, onuID)
+
+ if kvPairs, _ := t.config.KVBackend.List(onuTpInstancePath); kvPairs != nil {
+ tpInstances := make([]TechProfile, 0, len(kvPairs))
+ for kvPath, kvPair := range kvPairs {
+ if value, err := kvstore.ToByte(kvPair.Value); err == nil {
+ if err = json.Unmarshal(value, &tp); err != nil {
+ log.Errorw("error-unmarshal-kv-pair", log.Fields{"kvPath": kvPath, "value": value})
+ continue
+ } else {
+ tpInstances = append(tpInstances, tp)
+ }
+ }
+ }
+ return tpInstances
+ }
+ return nil
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v2/pkg/techprofile/tech_profile_if.go b/vendor/github.com/opencord/voltha-lib-go/v2/pkg/techprofile/tech_profile_if.go
index 3267759..a77ea45 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v2/pkg/techprofile/tech_profile_if.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v2/pkg/techprofile/tech_profile_if.go
@@ -34,4 +34,5 @@
ShapingCfg *tp_pb.TrafficShapingInfo) *tp_pb.TrafficScheduler
GetTrafficQueues(tp *TechProfile, Dir tp_pb.Direction) []*tp_pb.TrafficQueue
GetGemportIDForPbit(tp *TechProfile, Dir tp_pb.Direction, pbit uint32) uint32
+ FindAllTpInstances(techProfiletblID uint32, ponIntf uint32, onuID uint32) []TechProfile
}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index bac5483..1ce4e81 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -59,7 +59,7 @@
github.com/mitchellh/go-homedir
# github.com/mitchellh/mapstructure v1.1.2
github.com/mitchellh/mapstructure
-# github.com/opencord/voltha-lib-go/v2 v2.2.13
+# github.com/opencord/voltha-lib-go/v2 v2.2.17
github.com/opencord/voltha-lib-go/v2/pkg/adapters
github.com/opencord/voltha-lib-go/v2/pkg/adapters/adapterif
github.com/opencord/voltha-lib-go/v2/pkg/adapters/common