VOL-1623-meter support and handling techprofile and fix for flow delete , now migrated to onosproject/onos:1.13.9-rc4
Change in flowupdate API towards adapters
Remove meter_get API from adapter to core
Added dependent vendor library files downloaded by "dep-ensure -update"
Added techprofile changes in the single commit
Review comments are addressed
submiting patch for integration tests for meter changes and modifications in unit test for updated flow decomposer logic
- submitting on behalf of "Salman.Siddiqui@radisys.com"
Load test for meter updated and other flow management test cases with meter
- Performed load test for 1K meters serially and parallely and added more TC in flow management
Rebased
Load test for meter updated and other flow management test cases with meter
- Performed load test for 1K meters serially and parallely and added more TC in flow management
- submitting on behalf of "Salman.Siddiqui@radisys.com"
pulled latest protos
verified EAPOL/DHCP/HSIA data with Edgecore OLT & TW ONT kit for one subcriber
verified delete/re-add is working end to end for the same subscriber
Change-Id: Idb232b7a0f05dc0c7e68266ac885740a3adff317
diff --git a/rw_core/core/adapter_proxy.go b/rw_core/core/adapter_proxy.go
index 41f71a6..9511b9d 100755
--- a/rw_core/core/adapter_proxy.go
+++ b/rw_core/core/adapter_proxy.go
@@ -418,11 +418,11 @@
return unPackResponse(rpc, deviceId, success, result)
}
-func (ap *AdapterProxy) UpdateFlowsBulk(device *voltha.Device, flows *voltha.Flows, groups *voltha.FlowGroups) error {
+func (ap *AdapterProxy) UpdateFlowsBulk(device *voltha.Device, flows *voltha.Flows, groups *voltha.FlowGroups, flowMetadata *voltha.FlowMetadata) error {
log.Debugw("UpdateFlowsBulk", log.Fields{"deviceId": device.Id, "flowsInUpdate": len(flows.Items), "groupsToUpdate": len(groups.Items)})
toTopic := ap.getAdapterTopic(device.Adapter)
rpc := "update_flows_bulk"
- args := make([]*kafka.KVArg, 3)
+ args := make([]*kafka.KVArg, 4)
args[0] = &kafka.KVArg{
Key: "device",
Value: device,
@@ -435,6 +435,10 @@
Key: "groups",
Value: groups,
}
+ args[3] = &kafka.KVArg{
+ Key: "flow_metadata",
+ Value: flowMetadata,
+ }
// Use a device specific topic as we are the only core handling requests for this device
replyToTopic := ap.getCoreTopic()
@@ -443,7 +447,7 @@
return unPackResponse(rpc, device.Id, success, result)
}
-func (ap *AdapterProxy) UpdateFlowsIncremental(device *voltha.Device, flowChanges *openflow_13.FlowChanges, groupChanges *openflow_13.FlowGroupChanges) error {
+func (ap *AdapterProxy) UpdateFlowsIncremental(device *voltha.Device, flowChanges *openflow_13.FlowChanges, groupChanges *openflow_13.FlowGroupChanges, flowMetadata *voltha.FlowMetadata) error {
log.Debugw("UpdateFlowsIncremental",
log.Fields{
"deviceId": device.Id,
@@ -455,7 +459,7 @@
})
toTopic := ap.getAdapterTopic(device.Adapter)
rpc := "update_flows_incrementally"
- args := make([]*kafka.KVArg, 3)
+ args := make([]*kafka.KVArg, 4)
args[0] = &kafka.KVArg{
Key: "device",
Value: device,
@@ -469,6 +473,10 @@
Value: groupChanges,
}
+ args[3] = &kafka.KVArg{
+ Key: "flow_metadata",
+ Value: flowMetadata,
+ }
// Use a device specific topic as we are the only core handling requests for this device
replyToTopic := ap.getCoreTopic()
success, result := ap.kafkaICProxy.InvokeRPC(nil, rpc, &toTopic, &replyToTopic, true, device.Id, args...)