VOL-1377: Bug fixes during tech profile download at ONU
Tested default tech profile downloaded successfully at ONU and verfied the same in physical ONU device

patch: Ran go-fmted and fixed "make lint" errors

Change-Id: I00b0795d35b851357dc9e1b6b1366ef6b5450f29
diff --git a/common/techprofile/tech_profile.go b/common/techprofile/tech_profile.go
index 9f7bebf..00cfdc1 100644
--- a/common/techprofile/tech_profile.go
+++ b/common/techprofile/tech_profile.go
@@ -178,12 +178,23 @@
 }
 
 type iScheduler struct {
-	AllocID   uint32    `json:"alloc_id"`
-	Scheduler Scheduler `json:"scheduler"`
+	AllocID      uint32 `json:"alloc_id"`
+	Direction    string `json:"direction"`
+	AdditionalBw string `json:"additional_bw"`
+	Priority     uint32 `json:"priority"`
+	Weight       uint32 `json:"weight"`
+	QSchedPolicy string `json:"q_sched_policy"`
 }
 type iGemPortAttribute struct {
-	GemportID    uint32           `json:"gem_port_id"`
-	GemAttribute GemPortAttribute `json:"gem_attribute"`
+	GemportID        uint32        `json:"gemport_id"`
+	MaxQueueSize     string        `json:"max_q_size"`
+	PbitMap          string        `json:"pbit_map"`
+	AesEncryption    string        `json:"aes_encryption"`
+	SchedulingPolicy string        `json:"scheduling_policy"`
+	PriorityQueue    int           `json:"priority_q"`
+	Weight           int           `json:"weight"`
+	DiscardPolicy    string        `json:"discard_policy"`
+	DiscardConfig    DiscardConfig `json:"discard_config"`
 }
 
 type TechProfileMgr struct {
@@ -207,7 +218,7 @@
 	ProfileType                    string              `json:"profile_type"`
 	Version                        int                 `json:"version"`
 	NumGemPorts                    uint32              `json:"num_gem_ports"`
-	NumTconts                      uint32              `json:"num_tconts"`
+	NumTconts                      uint32              `json:"num_of_tconts"`
 	InstanceCtrl                   InstanceControl     `json:"instance_control"`
 	UsScheduler                    iScheduler          `json:"us_scheduler"`
 	DsScheduler                    iScheduler          `json:"ds_scheduler"`
@@ -371,7 +382,6 @@
 		log.Errorw("Error getting alloc id from rsrcrMgr", log.Fields{"intfId": intfId, "numTconts": numOfTconts})
 		return nil
 	}
-	fmt.Println("Num GEM ports in TP:", tp.NumGemPorts)
 	if gemPorts, err = t.resourceMgr.GetResourceID(intfId, t.resourceMgr.GetResourceTypeGemPortID(), tp.NumGemPorts); err != nil {
 		log.Errorw("Error getting gemport ids from rsrcrMgr", log.Fields{"intfId": intfId, "numGemports": tp.NumGemPorts})
 		return nil
@@ -380,10 +390,24 @@
 	for index := 0; index < int(tp.NumGemPorts); index++ {
 		usGemPortAttributeList = append(usGemPortAttributeList,
 			iGemPortAttribute{GemportID: gemPorts[index],
-				GemAttribute: tp.UpstreamGemPortAttributeList[index]})
+				MaxQueueSize:     tp.UpstreamGemPortAttributeList[index].MaxQueueSize,
+				PbitMap:          tp.UpstreamGemPortAttributeList[index].PbitMap,
+				AesEncryption:    tp.UpstreamGemPortAttributeList[index].AesEncryption,
+				SchedulingPolicy: tp.UpstreamGemPortAttributeList[index].SchedulingPolicy,
+				PriorityQueue:    tp.UpstreamGemPortAttributeList[index].PriorityQueue,
+				Weight:           tp.UpstreamGemPortAttributeList[index].Weight,
+				DiscardPolicy:    tp.UpstreamGemPortAttributeList[index].DiscardPolicy,
+				DiscardConfig:    tp.UpstreamGemPortAttributeList[index].DiscardConfig})
 		dsGemPortAttributeList = append(dsGemPortAttributeList,
 			iGemPortAttribute{GemportID: gemPorts[index],
-				GemAttribute: tp.DownstreamGemPortAttributeList[index]})
+				MaxQueueSize:     tp.DownstreamGemPortAttributeList[index].MaxQueueSize,
+				PbitMap:          tp.DownstreamGemPortAttributeList[index].PbitMap,
+				AesEncryption:    tp.DownstreamGemPortAttributeList[index].AesEncryption,
+				SchedulingPolicy: tp.DownstreamGemPortAttributeList[index].SchedulingPolicy,
+				PriorityQueue:    tp.DownstreamGemPortAttributeList[index].PriorityQueue,
+				Weight:           tp.DownstreamGemPortAttributeList[index].Weight,
+				DiscardPolicy:    tp.DownstreamGemPortAttributeList[index].DiscardPolicy,
+				DiscardConfig:    tp.DownstreamGemPortAttributeList[index].DiscardConfig})
 	}
 	return &TechProfile{
 		SubscriberIdentifier: uniPortName,
@@ -394,11 +418,19 @@
 		NumTconts:            numOfTconts,
 		InstanceCtrl:         tp.InstanceCtrl,
 		UsScheduler: iScheduler{
-			AllocID:   tcontIDs[0],
-			Scheduler: tp.UsScheduler},
+			AllocID:      tcontIDs[0],
+			Direction:    tp.UsScheduler.Direction,
+			AdditionalBw: tp.UsScheduler.AdditionalBw,
+			Priority:     tp.UsScheduler.Priority,
+			Weight:       tp.UsScheduler.Weight,
+			QSchedPolicy: tp.UsScheduler.QSchedPolicy},
 		DsScheduler: iScheduler{
-			AllocID:   tcontIDs[0],
-			Scheduler: tp.DsScheduler},
+			AllocID:      tcontIDs[0],
+			Direction:    tp.DsScheduler.Direction,
+			AdditionalBw: tp.DsScheduler.AdditionalBw,
+			Priority:     tp.DsScheduler.Priority,
+			Weight:       tp.DsScheduler.Weight,
+			QSchedPolicy: tp.DsScheduler.QSchedPolicy},
 		UpstreamGemPortAttributeList:   usGemPortAttributeList,
 		DownstreamGemPortAttributeList: dsGemPortAttributeList}
 }
@@ -498,17 +530,17 @@
 }
 
 func (t *TechProfileMgr) GetUsScheduler(tpInstance *TechProfile) *openolt_pb.Scheduler {
-	dir := openolt_pb.Direction(t.GetprotoBufParamValue("direction", tpInstance.UsScheduler.Scheduler.Direction))
+	dir := openolt_pb.Direction(t.GetprotoBufParamValue("direction", tpInstance.UsScheduler.Direction))
 	if dir == -1 {
 		log.Fatal("Error in getting Proto for direction for upstream scheduler")
 		return nil
 	}
-	bw := openolt_pb.AdditionalBW(t.GetprotoBufParamValue("additional_bw", tpInstance.UsScheduler.Scheduler.AdditionalBw))
+	bw := openolt_pb.AdditionalBW(t.GetprotoBufParamValue("additional_bw", tpInstance.UsScheduler.AdditionalBw))
 	if bw == -1 {
 		log.Fatal("Error in getting Proto for bandwidth for upstream scheduler")
 		return nil
 	}
-	policy := openolt_pb.SchedulingPolicy(t.GetprotoBufParamValue("sched_policy", tpInstance.UsScheduler.Scheduler.QSchedPolicy))
+	policy := openolt_pb.SchedulingPolicy(t.GetprotoBufParamValue("sched_policy", tpInstance.UsScheduler.QSchedPolicy))
 	if policy == -1 {
 		log.Fatal("Error in getting Proto for scheduling policy for upstream scheduler")
 		return nil
@@ -516,24 +548,24 @@
 	return &openolt_pb.Scheduler{
 		Direction:    dir,
 		AdditionalBw: bw,
-		Priority:     tpInstance.UsScheduler.Scheduler.Priority,
-		Weight:       tpInstance.UsScheduler.Scheduler.Weight,
+		Priority:     tpInstance.UsScheduler.Priority,
+		Weight:       tpInstance.UsScheduler.Weight,
 		SchedPolicy:  policy}
 }
 
 func (t *TechProfileMgr) GetDsScheduler(tpInstance *TechProfile) *openolt_pb.Scheduler {
 
-	dir := openolt_pb.Direction(t.GetprotoBufParamValue("direction", tpInstance.DsScheduler.Scheduler.Direction))
+	dir := openolt_pb.Direction(t.GetprotoBufParamValue("direction", tpInstance.DsScheduler.Direction))
 	if dir == -1 {
 		log.Fatal("Error in getting Proto for direction for downstream scheduler")
 		return nil
 	}
-	bw := openolt_pb.AdditionalBW(t.GetprotoBufParamValue("additional_bw", tpInstance.DsScheduler.Scheduler.AdditionalBw))
+	bw := openolt_pb.AdditionalBW(t.GetprotoBufParamValue("additional_bw", tpInstance.DsScheduler.AdditionalBw))
 	if bw == -1 {
 		log.Fatal("Error in getting Proto for bandwidth for downstream scheduler")
 		return nil
 	}
-	policy := openolt_pb.SchedulingPolicy(t.GetprotoBufParamValue("sched_policy", tpInstance.DsScheduler.Scheduler.QSchedPolicy))
+	policy := openolt_pb.SchedulingPolicy(t.GetprotoBufParamValue("sched_policy", tpInstance.DsScheduler.QSchedPolicy))
 	if policy == -1 {
 		log.Fatal("Error in getting Proto for scheduling policy for downstream scheduler")
 		return nil
@@ -542,8 +574,8 @@
 	return &openolt_pb.Scheduler{
 		Direction:    dir,
 		AdditionalBw: bw,
-		Priority:     tpInstance.DsScheduler.Scheduler.Priority,
-		Weight:       tpInstance.DsScheduler.Scheduler.Weight,
+		Priority:     tpInstance.DsScheduler.Priority,
+		Weight:       tpInstance.DsScheduler.Weight,
 		SchedPolicy:  policy}
 }
 
diff --git a/rw_core/core/device_agent.go b/rw_core/core/device_agent.go
index fcac69c..9782fee 100755
--- a/rw_core/core/device_agent.go
+++ b/rw_core/core/device_agent.go
@@ -891,6 +891,14 @@
 			//	First port
 			log.Debugw("addLogicalPortToMap-first-port-to-add", log.Fields{"deviceId": agent.deviceId})
 			cloned.Ports = make([]*voltha.Port, 0)
+		} else {
+			for _, p := range cloned.Ports {
+				if p.Type == port.Type && p.PortNo == port.PortNo {
+					log.Debugw("port already exists", log.Fields{"port": *port})
+					return nil
+				}
+			}
+
 		}
 		cp := proto.Clone(port).(*voltha.Port)
 		// Set the admin state of the port to ENABLE if the operational state is ACTIVE
diff --git a/rw_core/core/logical_device_agent.go b/rw_core/core/logical_device_agent.go
index c07335e..2f85c45 100644
--- a/rw_core/core/logical_device_agent.go
+++ b/rw_core/core/logical_device_agent.go
@@ -473,7 +473,7 @@
 	if changed {
 		// Launch a routine to decompose the flows
 		if err := agent.decomposeAndSendFlows(&ofp.Flows{Items: updatedFlows}, lDevice.FlowGroups, agent.includeDefaultFlows); err != nil {
-			log.Errorf("decomposing-and-sending-flows", log.Fields{"logicalDeviceId": agent.logicalDeviceId})
+			log.Errorw("decomposing-and-sending-flows", log.Fields{"logicalDeviceId": agent.logicalDeviceId})
 			return err
 		}
 
@@ -1035,7 +1035,7 @@
 	}
 	// Get all the logical ports on that logical device
 	if lDevice, err := agent.getLogicalDeviceWithoutLock(); err != nil {
-		log.Errorf("unknown-logical-device", log.Fields{"error": err, "logicalDeviceId": agent.logicalDeviceId})
+		log.Errorw("unknown-logical-device", log.Fields{"error": err, "logicalDeviceId": agent.logicalDeviceId})
 		return err
 	} else {
 		//TODO:  Find a better way to refresh only missing routes