VOL-2348 Update voltha-openolt-adapter to latest voltha-lib-go;
I/O Errors should trigger unreadiness;
Update tech profile mocks

Change-Id: If245ab38050962af5992e8f0b67823c39e9522e8
diff --git a/VERSION b/VERSION
index f963933..bda8fbe 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-2.2.6-dev
+2.2.6
diff --git a/go.mod b/go.mod
index 321c0db..a8d791e 100644
--- a/go.mod
+++ b/go.mod
@@ -7,7 +7,7 @@
 	github.com/cenkalti/backoff/v3 v3.1.1
 	github.com/gogo/protobuf v1.3.1
 	github.com/golang/protobuf v1.3.2
-	github.com/opencord/voltha-lib-go/v2 v2.2.23
+	github.com/opencord/voltha-lib-go/v2 v2.2.25
 	github.com/opencord/voltha-protos/v2 v2.1.2
 	go.etcd.io/etcd v0.0.0-20190930204107-236ac2a90522
 	google.golang.org/grpc v1.25.1
diff --git a/go.sum b/go.sum
index 29e55c1..8fda01b 100644
--- a/go.sum
+++ b/go.sum
@@ -196,8 +196,8 @@
 github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
 github.com/onsi/gomega v1.4.2 h1:3mYCb7aPxS/RU7TI1y4rkEn1oKmPRjNJLNEXgw7MH2I=
 github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
-github.com/opencord/voltha-lib-go/v2 v2.2.23 h1:iDB6R0aAPeirqHaNAQDtGHtDyb9V/R73wAp2hrbVViQ=
-github.com/opencord/voltha-lib-go/v2 v2.2.23/go.mod h1:CoY2amUEsbO2grCbJRk7G+Fl1Xb7vQLw3/uGLbTz0Ms=
+github.com/opencord/voltha-lib-go/v2 v2.2.25 h1:8YZE2caHN7niAd0ojzegSQpjH+y7/83CkWaZJCojf/M=
+github.com/opencord/voltha-lib-go/v2 v2.2.25/go.mod h1:CoY2amUEsbO2grCbJRk7G+Fl1Xb7vQLw3/uGLbTz0Ms=
 github.com/opencord/voltha-protos/v2 v2.1.0/go.mod h1:6kOcfYi1CadWowFxI2SH5wLfHrsRECZLZlD2MFK6WDI=
 github.com/opencord/voltha-protos/v2 v2.1.2 h1:/eX+kXhANbzxTpBHgC6vjwBUGRKKvGUOQRDdDgROp9E=
 github.com/opencord/voltha-protos/v2 v2.1.2/go.mod h1:6kOcfYi1CadWowFxI2SH5wLfHrsRECZLZlD2MFK6WDI=
diff --git a/mocks/mockTechprofile.go b/mocks/mockTechprofile.go
index 2708592..6fa3c39 100644
--- a/mocks/mockTechprofile.go
+++ b/mocks/mockTechprofile.go
@@ -96,6 +96,11 @@
 	return []*tp_pb.TrafficQueue{{}}, nil
 }
 
+// GetMulticastTrafficQueues to mock techprofile GetMulticastTrafficQueues method
+func (m MockTechProfile) GetMulticastTrafficQueues(tp *tp.TechProfile) []*tp_pb.TrafficQueue {
+	return []*tp_pb.TrafficQueue{{}}
+}
+
 // GetGemportIDForPbit to mock techprofile GetGemportIDForPbit method
 func (m MockTechProfile) GetGemportIDForPbit(tp *tp.TechProfile, Dir tp_pb.Direction, pbit uint32) uint32 {
 	return 0
diff --git a/vendor/github.com/opencord/voltha-lib-go/v2/pkg/kafka/sarama_client.go b/vendor/github.com/opencord/voltha-lib-go/v2/pkg/kafka/sarama_client.go
index ca88dfd..c05df69 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v2/pkg/kafka/sarama_client.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v2/pkg/kafka/sarama_client.go
@@ -532,6 +532,11 @@
 		return true
 	}
 
+	if strings.HasSuffix(err.Error(), "i/o timeout") { // "dial tcp 10.244.1.176:9092: i/o timeout"
+		log.Info("is-liveness-error-io-timeout")
+		return true
+	}
+
 	// Other errors shouldn't trigger a loss of liveness
 
 	log.Infow("is-liveness-error-ignored", log.Fields{"err": err})
diff --git a/vendor/github.com/opencord/voltha-lib-go/v2/pkg/techprofile/tech_profile.go b/vendor/github.com/opencord/voltha-lib-go/v2/pkg/techprofile/tech_profile.go
index 95ac08b..3588838 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v2/pkg/techprofile/tech_profile.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v2/pkg/techprofile/tech_profile.go
@@ -162,12 +162,15 @@
 
 // default GEM attribute constants
 const (
-	defaultAESEncryption  = "True"
-	defaultPriorityQueue  = 0
-	defaultQueueWeight    = 0
-	defaultMaxQueueSize   = "auto"
-	defaultdropPolicy     = DiscardPolicy_TailDrop
-	defaultSchedulePolicy = SchedulingPolicy_WRR
+	defaultAESEncryption     = "True"
+	defaultPriorityQueue     = 0
+	defaultQueueWeight       = 0
+	defaultMaxQueueSize      = "auto"
+	defaultdropPolicy        = DiscardPolicy_TailDrop
+	defaultSchedulePolicy    = SchedulingPolicy_WRR
+	defaultIsMulticast       = "False"
+	defaultAccessControlList = "224.0.0.0-239.255.255.255"
+	defaultMcastGemID        = 4069
 )
 
 type GemPortAttribute struct {
@@ -179,6 +182,10 @@
 	Weight           uint32        `json:"weight"`
 	DiscardPolicy    string        `json:"discard_policy"`
 	DiscardConfig    DiscardConfig `json:"discard_config"`
+	IsMulticast      string        `json:"is_multicast"`
+	DControlList     string        `json:"dynamic_access_control_list"`
+	SControlList     string        `json:"static_access_control_list"`
+	McastGemID       uint32        `json:"multicast_gem_id"`
 }
 
 type iScheduler struct {
@@ -199,6 +206,10 @@
 	Weight           uint32        `json:"weight"`
 	DiscardPolicy    string        `json:"discard_policy"`
 	DiscardConfig    DiscardConfig `json:"discard_config"`
+	IsMulticast      string        `json:"is_multicast"`
+	DControlList     string        `json:"dynamic_access_control_list"`
+	SControlList     string        `json:"static_access_control_list"`
+	McastGemID       uint32        `json:"multicast_gem_id"`
 }
 
 type TechProfileMgr struct {
@@ -410,6 +421,8 @@
 
 	var usGemPortAttributeList []iGemPortAttribute
 	var dsGemPortAttributeList []iGemPortAttribute
+	var dsMulticastGemAttributeList []iGemPortAttribute
+	var dsUnicastGemAttributeList []iGemPortAttribute
 	var tcontIDs []uint32
 	var gemPorts []uint32
 	var err error
@@ -452,17 +465,57 @@
 				Weight:           tp.UpstreamGemPortAttributeList[index].Weight,
 				DiscardPolicy:    tp.UpstreamGemPortAttributeList[index].DiscardPolicy,
 				DiscardConfig:    tp.UpstreamGemPortAttributeList[index].DiscardConfig})
+	}
+
+	log.Info("length of DownstreamGemPortAttributeList", len(tp.DownstreamGemPortAttributeList))
+	//put multicast and unicast downstream GEM port attributes in different lists first
+	for index := 0; index < int(len(tp.DownstreamGemPortAttributeList)); index++ {
+		if isMulticastGem(tp.DownstreamGemPortAttributeList[index].IsMulticast) {
+			dsMulticastGemAttributeList = append(dsMulticastGemAttributeList,
+				iGemPortAttribute{
+					McastGemID:       tp.DownstreamGemPortAttributeList[index].McastGemID,
+					MaxQueueSize:     tp.DownstreamGemPortAttributeList[index].MaxQueueSize,
+					PbitMap:          tp.DownstreamGemPortAttributeList[index].PbitMap,
+					AesEncryption:    tp.DownstreamGemPortAttributeList[index].AesEncryption,
+					SchedulingPolicy: tp.DownstreamGemPortAttributeList[index].SchedulingPolicy,
+					PriorityQueue:    tp.DownstreamGemPortAttributeList[index].PriorityQueue,
+					Weight:           tp.DownstreamGemPortAttributeList[index].Weight,
+					DiscardPolicy:    tp.DownstreamGemPortAttributeList[index].DiscardPolicy,
+					DiscardConfig:    tp.DownstreamGemPortAttributeList[index].DiscardConfig,
+					IsMulticast:      tp.DownstreamGemPortAttributeList[index].IsMulticast,
+					DControlList:     tp.DownstreamGemPortAttributeList[index].DControlList,
+					SControlList:     tp.DownstreamGemPortAttributeList[index].SControlList})
+		} else {
+			dsUnicastGemAttributeList = append(dsUnicastGemAttributeList,
+				iGemPortAttribute{
+					MaxQueueSize:     tp.DownstreamGemPortAttributeList[index].MaxQueueSize,
+					PbitMap:          tp.DownstreamGemPortAttributeList[index].PbitMap,
+					AesEncryption:    tp.DownstreamGemPortAttributeList[index].AesEncryption,
+					SchedulingPolicy: tp.DownstreamGemPortAttributeList[index].SchedulingPolicy,
+					PriorityQueue:    tp.DownstreamGemPortAttributeList[index].PriorityQueue,
+					Weight:           tp.DownstreamGemPortAttributeList[index].Weight,
+					DiscardPolicy:    tp.DownstreamGemPortAttributeList[index].DiscardPolicy,
+					DiscardConfig:    tp.DownstreamGemPortAttributeList[index].DiscardConfig})
+		}
+	}
+	//add unicast downstream GEM ports to dsGemPortAttributeList
+	for index := 0; index < int(tp.NumGemPorts); index++ {
 		dsGemPortAttributeList = append(dsGemPortAttributeList,
 			iGemPortAttribute{GemportID: gemPorts[index],
-				MaxQueueSize:     tp.DownstreamGemPortAttributeList[index].MaxQueueSize,
-				PbitMap:          tp.DownstreamGemPortAttributeList[index].PbitMap,
-				AesEncryption:    tp.DownstreamGemPortAttributeList[index].AesEncryption,
-				SchedulingPolicy: tp.DownstreamGemPortAttributeList[index].SchedulingPolicy,
-				PriorityQueue:    tp.DownstreamGemPortAttributeList[index].PriorityQueue,
-				Weight:           tp.DownstreamGemPortAttributeList[index].Weight,
-				DiscardPolicy:    tp.DownstreamGemPortAttributeList[index].DiscardPolicy,
-				DiscardConfig:    tp.DownstreamGemPortAttributeList[index].DiscardConfig})
+				MaxQueueSize:     dsUnicastGemAttributeList[index].MaxQueueSize,
+				PbitMap:          dsUnicastGemAttributeList[index].PbitMap,
+				AesEncryption:    dsUnicastGemAttributeList[index].AesEncryption,
+				SchedulingPolicy: dsUnicastGemAttributeList[index].SchedulingPolicy,
+				PriorityQueue:    dsUnicastGemAttributeList[index].PriorityQueue,
+				Weight:           dsUnicastGemAttributeList[index].Weight,
+				DiscardPolicy:    dsUnicastGemAttributeList[index].DiscardPolicy,
+				DiscardConfig:    dsUnicastGemAttributeList[index].DiscardConfig})
 	}
+	//add multicast GEM ports to dsGemPortAttributeList afterwards
+	for k := range dsMulticastGemAttributeList {
+		dsGemPortAttributeList = append(dsGemPortAttributeList, dsMulticastGemAttributeList[k])
+	}
+
 	return &TechProfile{
 		SubscriberIdentifier: uniPortName,
 		Name:                 tp.Name,
@@ -546,7 +599,11 @@
 				DiscardConfig: DiscardConfig{
 					MinThreshold:   defaultMinThreshold,
 					MaxThreshold:   defaultMaxThreshold,
-					MaxProbability: defaultMaxProbability}})
+					MaxProbability: defaultMaxProbability},
+				IsMulticast:  defaultIsMulticast,
+				DControlList: defaultAccessControlList,
+				SControlList: defaultAccessControlList,
+				McastGemID:   defaultMcastGemID})
 	}
 	return &DefaultTechProfile{
 		Name:        t.config.DefaultTPName,
@@ -720,6 +777,10 @@
 		NumGemPorts := len(tp.DownstreamGemPortAttributeList)
 		GemPorts := make([]*tp_pb.TrafficQueue, 0)
 		for Count := 0; Count < NumGemPorts; Count++ {
+			if isMulticastGem(tp.DownstreamGemPortAttributeList[Count].IsMulticast) {
+				//do not take multicast GEM ports. They are handled separately.
+				continue
+			}
 			if tp.DownstreamGemPortAttributeList[Count].AesEncryption == "True" {
 				encryp = true
 			} else {
@@ -757,6 +818,40 @@
 	return nil, fmt.Errorf("downstream gem port traffic queue creation failed due to unsupported direction %s", Dir)
 }
 
+//isMulticastGem returns true if isMulticast attribute value of a GEM port is true; false otherwise
+func isMulticastGem(isMulticastAttrValue string) bool {
+	return isMulticastAttrValue != "" &&
+		(isMulticastAttrValue == "True" || isMulticastAttrValue == "true" || isMulticastAttrValue == "TRUE")
+}
+
+func (tpm *TechProfileMgr) GetMulticastTrafficQueues(tp *TechProfile) []*tp_pb.TrafficQueue {
+	var encryp bool
+	NumGemPorts := len(tp.DownstreamGemPortAttributeList)
+	mcastTrafficQueues := make([]*tp_pb.TrafficQueue, 0)
+	for Count := 0; Count < NumGemPorts; Count++ {
+		if !isMulticastGem(tp.DownstreamGemPortAttributeList[Count].IsMulticast) {
+			continue
+		}
+		if tp.DownstreamGemPortAttributeList[Count].AesEncryption == "True" {
+			encryp = true
+		} else {
+			encryp = false
+		}
+		mcastTrafficQueues = append(mcastTrafficQueues, &tp_pb.TrafficQueue{
+			Direction:     tp_pb.Direction(tpm.GetprotoBufParamValue("direction", tp.DsScheduler.Direction)),
+			GemportId:     tp.DownstreamGemPortAttributeList[Count].McastGemID,
+			PbitMap:       tp.DownstreamGemPortAttributeList[Count].PbitMap,
+			AesEncryption: encryp,
+			SchedPolicy:   tp_pb.SchedulingPolicy(tpm.GetprotoBufParamValue("sched_policy", tp.DownstreamGemPortAttributeList[Count].SchedulingPolicy)),
+			Priority:      tp.DownstreamGemPortAttributeList[Count].PriorityQueue,
+			Weight:        tp.DownstreamGemPortAttributeList[Count].Weight,
+			DiscardPolicy: tp_pb.DiscardPolicy(tpm.GetprotoBufParamValue("discard_policy", tp.DownstreamGemPortAttributeList[Count].DiscardPolicy)),
+		})
+	}
+	log.Debugw("Downstream Multicast Traffic queue list ", log.Fields{"queuelist": mcastTrafficQueues})
+	return mcastTrafficQueues
+}
+
 func (tpm *TechProfileMgr) GetUsTrafficScheduler(tp *TechProfile) *tp_pb.TrafficScheduler {
 	UsScheduler, _ := tpm.GetUsScheduler(tp)
 
diff --git a/vendor/github.com/opencord/voltha-lib-go/v2/pkg/techprofile/tech_profile_if.go b/vendor/github.com/opencord/voltha-lib-go/v2/pkg/techprofile/tech_profile_if.go
index 797edc8..cadca87 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v2/pkg/techprofile/tech_profile_if.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v2/pkg/techprofile/tech_profile_if.go
@@ -33,6 +33,7 @@
 	GetTrafficScheduler(tpInstance *TechProfile, SchedCfg *tp_pb.SchedulerConfig,
 		ShapingCfg *tp_pb.TrafficShapingInfo) *tp_pb.TrafficScheduler
 	GetTrafficQueues(tp *TechProfile, Dir tp_pb.Direction) ([]*tp_pb.TrafficQueue, error)
+	GetMulticastTrafficQueues(tp *TechProfile) []*tp_pb.TrafficQueue
 	GetGemportIDForPbit(tp *TechProfile, Dir tp_pb.Direction, pbit uint32) uint32
 	FindAllTpInstances(techProfiletblID uint32, ponIntf uint32, onuID uint32) []TechProfile
 }
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 0b40ae6..860b4a2 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -63,7 +63,7 @@
 github.com/mitchellh/go-homedir
 # github.com/mitchellh/mapstructure v1.1.2
 github.com/mitchellh/mapstructure
-# github.com/opencord/voltha-lib-go/v2 v2.2.23
+# github.com/opencord/voltha-lib-go/v2 v2.2.25
 github.com/opencord/voltha-lib-go/v2/pkg/adapters
 github.com/opencord/voltha-lib-go/v2/pkg/adapters/adapterif
 github.com/opencord/voltha-lib-go/v2/pkg/adapters/common