Updated Adapter to support to handle DHCP trap on NNI and packet-in/out and Bug Fixing.
Tested EAPOL/DHCP/HSIA functionality E2E with EdgeCore OLT and TWSH ONU KIT.

patch: PON port is derived from platform and sent to core and bug fixes

Retested EAPOL/DHCP/HSIA use case end to end with EdgeCore OLT and TWSH ONU KIT

Change-Id: I99df82fd7a1385c10878f6fe09ce0d30c48d8e99
diff --git a/vendor/github.com/opencord/voltha-go/common/ponresourcemanager/ponresourcemanager.go b/vendor/github.com/opencord/voltha-go/common/ponresourcemanager/ponresourcemanager.go
index c37307b..2873dbc 100755
--- a/vendor/github.com/opencord/voltha-go/common/ponresourcemanager/ponresourcemanager.go
+++ b/vendor/github.com/opencord/voltha-go/common/ponresourcemanager/ponresourcemanager.go
@@ -133,6 +133,7 @@
 	SharedResourceMgrs map[string]*PONResourceManager
 	SharedIdxByType    map[string]string
 	IntfIDs            []uint32 // list of pon interface IDs
+	Globalorlocal      string
 }
 
 func newKVClient(storeType string, address string, timeout int) (kvstore.Client, error) {
@@ -254,10 +255,14 @@
 	log.Debugf("update ranges for %s, %d", StartIDx, StartID)
 
 	if StartID != 0 {
-		PONRMgr.PonResourceRanges[StartIDx] = StartID
+		if (PONRMgr.PonResourceRanges[StartIDx] == nil) || (PONRMgr.PonResourceRanges[StartIDx].(uint32) < StartID) {
+			PONRMgr.PonResourceRanges[StartIDx] = StartID
+		}
 	}
 	if EndID != 0 {
-		PONRMgr.PonResourceRanges[EndIDx] = EndID
+		if (PONRMgr.PonResourceRanges[EndIDx] == nil) || (PONRMgr.PonResourceRanges[EndIDx].(uint32) > EndID) {
+			PONRMgr.PonResourceRanges[EndIDx] = EndID
+		}
 	}
 	//if SharedPoolID != 0 {
 	PONRMgr.PonResourceRanges[SharedIDx] = SharedPoolID
@@ -567,6 +572,7 @@
 	if SharedResourceMgr != nil && PONRMgr != SharedResourceMgr {
 		return SharedResourceMgr.GetResourceID(IntfID, ResourceType, NumIDs)
 	}
+	log.Debugf("Fetching resource from %s rsrc mgr for resource %s", PONRMgr.Globalorlocal, ResourceType)
 
 	Path := PONRMgr.GetPath(IntfID, ResourceType)
 	if Path == "" {
diff --git a/vendor/github.com/opencord/voltha-go/common/techprofile/tech_profile.go b/vendor/github.com/opencord/voltha-go/common/techprofile/tech_profile.go
index 9f7bebf..2879e99 100644
--- a/vendor/github.com/opencord/voltha-go/common/techprofile/tech_profile.go
+++ b/vendor/github.com/opencord/voltha-go/common/techprofile/tech_profile.go
@@ -178,12 +178,23 @@
 }
 
 type iScheduler struct {
-	AllocID   uint32    `json:"alloc_id"`
-	Scheduler Scheduler `json:"scheduler"`
+	AllocID      uint32 `json:"alloc_id"`
+	Direction    string `json:"direction"`
+	AdditionalBw string `json:"additional_bw"`
+	Priority     uint32 `json:"priority"`
+	Weight       uint32 `json:"weight"`
+	QSchedPolicy string `json:"q_sched_policy"`
 }
 type iGemPortAttribute struct {
-	GemportID    uint32           `json:"gem_port_id"`
-	GemAttribute GemPortAttribute `json:"gem_attribute"`
+	GemportID        uint32        `json:"gemport_id"`
+	MaxQueueSize     string        `json:"max_q_size"`
+	PbitMap          string        `json:"pbit_map"`
+	AesEncryption    string        `json:"aes_encryption"`
+	SchedulingPolicy string        `json:"scheduling_policy"`
+	PriorityQueue    int           `json:"priority_q"`
+	Weight           int           `json:"weight"`
+	DiscardPolicy    string        `json:"discard_policy"`
+	DiscardConfig    DiscardConfig `json:"discard_config"`
 }
 
 type TechProfileMgr struct {
@@ -207,7 +218,7 @@
 	ProfileType                    string              `json:"profile_type"`
 	Version                        int                 `json:"version"`
 	NumGemPorts                    uint32              `json:"num_gem_ports"`
-	NumTconts                      uint32              `json:"num_tconts"`
+	NumTconts                      uint32              `json:"num_of_tconts"`
 	InstanceCtrl                   InstanceControl     `json:"instance_control"`
 	UsScheduler                    iScheduler          `json:"us_scheduler"`
 	DsScheduler                    iScheduler          `json:"ds_scheduler"`
@@ -371,7 +382,7 @@
 		log.Errorw("Error getting alloc id from rsrcrMgr", log.Fields{"intfId": intfId, "numTconts": numOfTconts})
 		return nil
 	}
-	fmt.Println("Num GEM ports in TP:", tp.NumGemPorts)
+	log.Debugw("Num GEM ports in TP:", log.Fields{"NumGemPorts": tp.NumGemPorts})
 	if gemPorts, err = t.resourceMgr.GetResourceID(intfId, t.resourceMgr.GetResourceTypeGemPortID(), tp.NumGemPorts); err != nil {
 		log.Errorw("Error getting gemport ids from rsrcrMgr", log.Fields{"intfId": intfId, "numGemports": tp.NumGemPorts})
 		return nil
@@ -380,10 +391,24 @@
 	for index := 0; index < int(tp.NumGemPorts); index++ {
 		usGemPortAttributeList = append(usGemPortAttributeList,
 			iGemPortAttribute{GemportID: gemPorts[index],
-				GemAttribute: tp.UpstreamGemPortAttributeList[index]})
+				MaxQueueSize:     tp.UpstreamGemPortAttributeList[index].MaxQueueSize,
+				PbitMap:          tp.UpstreamGemPortAttributeList[index].PbitMap,
+				AesEncryption:    tp.UpstreamGemPortAttributeList[index].AesEncryption,
+				SchedulingPolicy: tp.UpstreamGemPortAttributeList[index].SchedulingPolicy,
+				PriorityQueue:    tp.UpstreamGemPortAttributeList[index].PriorityQueue,
+				Weight:           tp.UpstreamGemPortAttributeList[index].Weight,
+				DiscardPolicy:    tp.UpstreamGemPortAttributeList[index].DiscardPolicy,
+				DiscardConfig:    tp.UpstreamGemPortAttributeList[index].DiscardConfig})
 		dsGemPortAttributeList = append(dsGemPortAttributeList,
 			iGemPortAttribute{GemportID: gemPorts[index],
-				GemAttribute: tp.DownstreamGemPortAttributeList[index]})
+				MaxQueueSize:     tp.DownstreamGemPortAttributeList[index].MaxQueueSize,
+				PbitMap:          tp.DownstreamGemPortAttributeList[index].PbitMap,
+				AesEncryption:    tp.DownstreamGemPortAttributeList[index].AesEncryption,
+				SchedulingPolicy: tp.DownstreamGemPortAttributeList[index].SchedulingPolicy,
+				PriorityQueue:    tp.DownstreamGemPortAttributeList[index].PriorityQueue,
+				Weight:           tp.DownstreamGemPortAttributeList[index].Weight,
+				DiscardPolicy:    tp.DownstreamGemPortAttributeList[index].DiscardPolicy,
+				DiscardConfig:    tp.DownstreamGemPortAttributeList[index].DiscardConfig})
 	}
 	return &TechProfile{
 		SubscriberIdentifier: uniPortName,
@@ -394,11 +419,19 @@
 		NumTconts:            numOfTconts,
 		InstanceCtrl:         tp.InstanceCtrl,
 		UsScheduler: iScheduler{
-			AllocID:   tcontIDs[0],
-			Scheduler: tp.UsScheduler},
+			AllocID:      tcontIDs[0],
+			Direction:    tp.UsScheduler.Direction,
+			AdditionalBw: tp.UsScheduler.AdditionalBw,
+			Priority:     tp.UsScheduler.Priority,
+			Weight:       tp.UsScheduler.Weight,
+			QSchedPolicy: tp.UsScheduler.QSchedPolicy},
 		DsScheduler: iScheduler{
-			AllocID:   tcontIDs[0],
-			Scheduler: tp.DsScheduler},
+			AllocID:      tcontIDs[0],
+			Direction:    tp.DsScheduler.Direction,
+			AdditionalBw: tp.DsScheduler.AdditionalBw,
+			Priority:     tp.DsScheduler.Priority,
+			Weight:       tp.DsScheduler.Weight,
+			QSchedPolicy: tp.DsScheduler.QSchedPolicy},
 		UpstreamGemPortAttributeList:   usGemPortAttributeList,
 		DownstreamGemPortAttributeList: dsGemPortAttributeList}
 }
@@ -498,17 +531,17 @@
 }
 
 func (t *TechProfileMgr) GetUsScheduler(tpInstance *TechProfile) *openolt_pb.Scheduler {
-	dir := openolt_pb.Direction(t.GetprotoBufParamValue("direction", tpInstance.UsScheduler.Scheduler.Direction))
+	dir := openolt_pb.Direction(t.GetprotoBufParamValue("direction", tpInstance.UsScheduler.Direction))
 	if dir == -1 {
 		log.Fatal("Error in getting Proto for direction for upstream scheduler")
 		return nil
 	}
-	bw := openolt_pb.AdditionalBW(t.GetprotoBufParamValue("additional_bw", tpInstance.UsScheduler.Scheduler.AdditionalBw))
+	bw := openolt_pb.AdditionalBW(t.GetprotoBufParamValue("additional_bw", tpInstance.UsScheduler.AdditionalBw))
 	if bw == -1 {
 		log.Fatal("Error in getting Proto for bandwidth for upstream scheduler")
 		return nil
 	}
-	policy := openolt_pb.SchedulingPolicy(t.GetprotoBufParamValue("sched_policy", tpInstance.UsScheduler.Scheduler.QSchedPolicy))
+	policy := openolt_pb.SchedulingPolicy(t.GetprotoBufParamValue("sched_policy", tpInstance.UsScheduler.QSchedPolicy))
 	if policy == -1 {
 		log.Fatal("Error in getting Proto for scheduling policy for upstream scheduler")
 		return nil
@@ -516,24 +549,24 @@
 	return &openolt_pb.Scheduler{
 		Direction:    dir,
 		AdditionalBw: bw,
-		Priority:     tpInstance.UsScheduler.Scheduler.Priority,
-		Weight:       tpInstance.UsScheduler.Scheduler.Weight,
+		Priority:     tpInstance.UsScheduler.Priority,
+		Weight:       tpInstance.UsScheduler.Weight,
 		SchedPolicy:  policy}
 }
 
 func (t *TechProfileMgr) GetDsScheduler(tpInstance *TechProfile) *openolt_pb.Scheduler {
 
-	dir := openolt_pb.Direction(t.GetprotoBufParamValue("direction", tpInstance.DsScheduler.Scheduler.Direction))
+	dir := openolt_pb.Direction(t.GetprotoBufParamValue("direction", tpInstance.DsScheduler.Direction))
 	if dir == -1 {
 		log.Fatal("Error in getting Proto for direction for downstream scheduler")
 		return nil
 	}
-	bw := openolt_pb.AdditionalBW(t.GetprotoBufParamValue("additional_bw", tpInstance.DsScheduler.Scheduler.AdditionalBw))
+	bw := openolt_pb.AdditionalBW(t.GetprotoBufParamValue("additional_bw", tpInstance.DsScheduler.AdditionalBw))
 	if bw == -1 {
 		log.Fatal("Error in getting Proto for bandwidth for downstream scheduler")
 		return nil
 	}
-	policy := openolt_pb.SchedulingPolicy(t.GetprotoBufParamValue("sched_policy", tpInstance.DsScheduler.Scheduler.QSchedPolicy))
+	policy := openolt_pb.SchedulingPolicy(t.GetprotoBufParamValue("sched_policy", tpInstance.DsScheduler.QSchedPolicy))
 	if policy == -1 {
 		log.Fatal("Error in getting Proto for scheduling policy for downstream scheduler")
 		return nil
@@ -542,8 +575,8 @@
 	return &openolt_pb.Scheduler{
 		Direction:    dir,
 		AdditionalBw: bw,
-		Priority:     tpInstance.DsScheduler.Scheduler.Priority,
-		Weight:       tpInstance.DsScheduler.Scheduler.Weight,
+		Priority:     tpInstance.DsScheduler.Priority,
+		Weight:       tpInstance.DsScheduler.Weight,
 		SchedPolicy:  policy}
 }
 
@@ -561,7 +594,6 @@
 		}
 	}
 	tconts := []*openolt_pb.Tcont{}
-	// TODO: Fix me , UPSTREAM direction is not proper
 	// upstream scheduler
 	tcont_us := &openolt_pb.Tcont{
 		Direction: usSched.Direction,