blob: a5dd2fc3908254ffbb27d581eb59f58d6d3d89d3 [file] [log] [blame]
William Kurkian6f436d02019-02-06 16:25:01 -05001#
2# Copyright 2018 the original author or authors.
3#
4# Licensed under the Apache License, Version 2.0 (the "License");
5# you may not use this file except in compliance with the License.
6# You may obtain a copy of the License at
7#
8# http://www.apache.org/licenses/LICENSE-2.0
9#
10# Unless required by applicable law or agreed to in writing, software
11# distributed under the License is distributed on an "AS IS" BASIS,
12# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13# See the License for the specific language governing permissions and
14# limitations under the License.
15#
16import copy
17from twisted.internet import reactor
18import grpc
19from google.protobuf.json_format import MessageToDict
20import hashlib
21from simplejson import dumps
Matt Jeanneret9dbce8c2019-03-23 14:35:00 -040022from twisted.internet.defer import inlineCallbacks, returnValue
William Kurkian6f436d02019-02-06 16:25:01 -050023
William Kurkian8b1690c2019-03-04 16:53:22 -050024from voltha_protos.openflow_13_pb2 import OFPXMC_OPENFLOW_BASIC, \
William Kurkian6f436d02019-02-06 16:25:01 -050025 ofp_flow_stats, OFPMT_OXM, Flows, FlowGroups, OFPXMT_OFB_IN_PORT, \
26 OFPXMT_OFB_VLAN_VID
William Kurkian8b1690c2019-03-04 16:53:22 -050027from voltha_protos.device_pb2 import Port
William Kurkian44cd7bb2019-02-11 16:39:12 -050028import pyvoltha.common.openflow.utils as fd
William Kurkian8b1690c2019-03-04 16:53:22 -050029from voltha_protos import openolt_pb2
Matt Jeanneret9dbce8c2019-03-23 14:35:00 -040030from voltha_protos.inter_container_pb2 import SwitchCapability, PortCapability, \
Matt Jeanneretdc28e8d2019-04-12 19:25:00 -040031 InterAdapterMessageType, InterAdapterOmciMessage, InterAdapterTechProfileDownloadMessage
William Kurkian6f436d02019-02-06 16:25:01 -050032
William Kurkian44cd7bb2019-02-11 16:39:12 -050033from pyvoltha.common.tech_profile.tech_profile import DEFAULT_TECH_PROFILE_TABLE_ID
William Kurkian6f436d02019-02-06 16:25:01 -050034
35# Flow categories
36HSIA_FLOW = "HSIA_FLOW"
37
38EAP_ETH_TYPE = 0x888e
39LLDP_ETH_TYPE = 0x88cc
40
41IGMP_PROTO = 2
42
43# FIXME - see also BRDCM_DEFAULT_VLAN in broadcom_onu.py
44DEFAULT_MGMT_VLAN = 4091
45
46# Openolt Flow
47UPSTREAM = "upstream"
48DOWNSTREAM = "downstream"
49PACKET_TAG_TYPE = "pkt_tag_type"
50UNTAGGED = "untagged"
51SINGLE_TAG = "single_tag"
52DOUBLE_TAG = "double_tag"
53
54# Classifier
55ETH_TYPE = 'eth_type'
56TPID = 'tpid'
57IP_PROTO = 'ip_proto'
58IN_PORT = 'in_port'
59VLAN_VID = 'vlan_vid'
60VLAN_PCP = 'vlan_pcp'
61UDP_DST = 'udp_dst'
62UDP_SRC = 'udp_src'
63IPV4_DST = 'ipv4_dst'
64IPV4_SRC = 'ipv4_src'
65METADATA = 'metadata'
66OUTPUT = 'output'
67# Action
68POP_VLAN = 'pop_vlan'
69PUSH_VLAN = 'push_vlan'
70TRAP_TO_HOST = 'trap_to_host'
71
72
73class OpenOltFlowMgr(object):
74
Matt Jeanneret9dbce8c2019-03-23 14:35:00 -040075 def __init__(self, core_proxy, adapter_proxy, log, stub, device_id, logical_device_id,
William Kurkian6f436d02019-02-06 16:25:01 -050076 platform, resource_mgr):
Matt Jeanneret9dbce8c2019-03-23 14:35:00 -040077 self.core_proxy = core_proxy
78 self.adapter_proxy = adapter_proxy
William Kurkian6f436d02019-02-06 16:25:01 -050079 self.log = log
80 self.stub = stub
81 self.device_id = device_id
82 self.logical_device_id = logical_device_id
83 self.nni_intf_id = None
84 self.platform = platform
William Kurkian6f436d02019-02-06 16:25:01 -050085 self.resource_mgr = resource_mgr
86 self.tech_profile = dict()
87 self._populate_tech_profile_per_pon_port()
88 self.retry_add_flow_list = []
89
90 def add_flow(self, flow):
91 self.log.debug('add flow', flow=flow)
92 classifier_info = dict()
93 action_info = dict()
94
95 for field in fd.get_ofb_fields(flow):
96 if field.type == fd.ETH_TYPE:
97 classifier_info[ETH_TYPE] = field.eth_type
98 self.log.debug('field-type-eth-type',
99 eth_type=classifier_info[ETH_TYPE])
100 elif field.type == fd.IP_PROTO:
101 classifier_info[IP_PROTO] = field.ip_proto
102 self.log.debug('field-type-ip-proto',
103 ip_proto=classifier_info[IP_PROTO])
104 elif field.type == fd.IN_PORT:
105 classifier_info[IN_PORT] = field.port
106 self.log.debug('field-type-in-port',
107 in_port=classifier_info[IN_PORT])
108 elif field.type == fd.VLAN_VID:
109 classifier_info[VLAN_VID] = field.vlan_vid & 0xfff
110 self.log.debug('field-type-vlan-vid',
111 vlan=classifier_info[VLAN_VID])
112 elif field.type == fd.VLAN_PCP:
113 classifier_info[VLAN_PCP] = field.vlan_pcp
114 self.log.debug('field-type-vlan-pcp',
115 pcp=classifier_info[VLAN_PCP])
116 elif field.type == fd.UDP_DST:
117 classifier_info[UDP_DST] = field.udp_dst
118 self.log.debug('field-type-udp-dst',
119 udp_dst=classifier_info[UDP_DST])
120 elif field.type == fd.UDP_SRC:
121 classifier_info[UDP_SRC] = field.udp_src
122 self.log.debug('field-type-udp-src',
123 udp_src=classifier_info[UDP_SRC])
124 elif field.type == fd.IPV4_DST:
125 classifier_info[IPV4_DST] = field.ipv4_dst
126 self.log.debug('field-type-ipv4-dst',
127 ipv4_dst=classifier_info[IPV4_DST])
128 elif field.type == fd.IPV4_SRC:
129 classifier_info[IPV4_SRC] = field.ipv4_src
130 self.log.debug('field-type-ipv4-src',
131 ipv4_dst=classifier_info[IPV4_SRC])
132 elif field.type == fd.METADATA:
133 classifier_info[METADATA] = field.table_metadata
134 self.log.debug('field-type-metadata',
135 metadata=classifier_info[METADATA])
136 else:
137 raise NotImplementedError('field.type={}'.format(
138 field.type))
139
140 for action in fd.get_actions(flow):
141 if action.type == fd.OUTPUT:
142 action_info[OUTPUT] = action.output.port
143 self.log.debug('action-type-output',
144 output=action_info[OUTPUT],
145 in_port=classifier_info[IN_PORT])
146 elif action.type == fd.POP_VLAN:
147 if fd.get_goto_table_id(flow) is None:
148 self.log.debug('being taken care of by ONU', flow=flow)
149 return
150 action_info[POP_VLAN] = True
151 self.log.debug('action-type-pop-vlan',
152 in_port=classifier_info[IN_PORT])
153 elif action.type == fd.PUSH_VLAN:
154 action_info[PUSH_VLAN] = True
155 action_info[TPID] = action.push.ethertype
156 self.log.debug('action-type-push-vlan',
157 push_tpid=action_info[TPID], in_port=classifier_info[IN_PORT])
158 if action.push.ethertype != 0x8100:
159 self.log.error('unhandled-tpid',
160 ethertype=action.push.ethertype)
161 elif action.type == fd.SET_FIELD:
162 # action_info['action_type'] = 'set_field'
163 _field = action.set_field.field.ofb_field
164 assert (action.set_field.field.oxm_class ==
165 OFPXMC_OPENFLOW_BASIC)
166 self.log.debug('action-type-set-field',
167 field=_field, in_port=classifier_info[IN_PORT])
168 if _field.type == fd.VLAN_VID:
169 self.log.debug('set-field-type-vlan-vid',
170 vlan_vid=_field.vlan_vid & 0xfff)
171 action_info[VLAN_VID] = (_field.vlan_vid & 0xfff)
172 else:
173 self.log.error('unsupported-action-set-field-type',
174 field_type=_field.type)
175 else:
176 self.log.error('unsupported-action-type',
177 action_type=action.type, in_port=classifier_info[IN_PORT])
178
179 if fd.get_goto_table_id(flow) is not None and POP_VLAN not in action_info:
180 self.log.debug('being taken care of by ONU', flow=flow)
181 return
182
183 if OUTPUT not in action_info and METADATA in classifier_info:
184 # find flow in the next table
185 next_flow = self.find_next_flow(flow)
186 if next_flow is None:
187 return
188 action_info[OUTPUT] = fd.get_out_port(next_flow)
189 for field in fd.get_ofb_fields(next_flow):
190 if field.type == fd.VLAN_VID:
191 classifier_info[METADATA] = field.vlan_vid & 0xfff
192
193 self.log.debug('flow-ports', classifier_inport=classifier_info[IN_PORT], action_output=action_info[OUTPUT])
194 (port_no, intf_id, onu_id, uni_id) = self.platform.extract_access_from_flow(
195 classifier_info[IN_PORT], action_info[OUTPUT])
Matt Jeanneret9dbce8c2019-03-23 14:35:00 -0400196 self.log.debug('extracted-flow-ports', port_no=port_no, intf_id=intf_id, onu_id=onu_id, uni_id=uni_id)
William Kurkian6f436d02019-02-06 16:25:01 -0500197
198 self.divide_and_add_flow(intf_id, onu_id, uni_id, port_no, classifier_info,
199 action_info, flow)
200
201 def _is_uni_port(self, port_no):
202 try:
203 port = self.adapter_agent.get_logical_port(self.logical_device_id,
204 'uni-{}'.format(port_no))
205 if port is not None:
206 return (not port.root_port), port.device_id
207 else:
208 return False, None
209 except Exception as e:
210 self.log.error("error-retrieving-port", e=e)
211 return False, None
212
213 def _clear_flow_id_from_rm(self, flow, flow_id, flow_direction):
214 uni_port_no = None
215 child_device_id = None
216 if flow_direction == UPSTREAM:
217 for field in fd.get_ofb_fields(flow):
218 if field.type == fd.IN_PORT:
219 is_uni, child_device_id = self._is_uni_port(field.port)
220 if is_uni:
221 uni_port_no = field.port
222 elif flow_direction == DOWNSTREAM:
223 for field in fd.get_ofb_fields(flow):
224 if field.type == fd.METADATA:
225 uni_port = field.table_metadata & 0xFFFFFFFF
226 is_uni, child_device_id = self._is_uni_port(uni_port)
227 if is_uni:
228 uni_port_no = field.port
229
230 if uni_port_no is None:
231 for action in fd.get_actions(flow):
232 if action.type == fd.OUTPUT:
233 is_uni, child_device_id = \
234 self._is_uni_port(action.output.port)
235 if is_uni:
236 uni_port_no = action.output.port
237
238 if child_device_id:
239 child_device = self.adapter_agent.get_device(child_device_id)
240 pon_intf = child_device.proxy_address.channel_id
241 onu_id = child_device.proxy_address.onu_id
242 uni_id = self.platform.uni_id_from_port_num(uni_port_no) if uni_port_no is not None else None
243 flows = self.resource_mgr.get_flow_id_info(pon_intf, onu_id, uni_id, flow_id)
244 assert (isinstance(flows, list))
245 self.log.debug("retrieved-flows", flows=flows)
246 for idx in range(len(flows)):
247 if flow_direction == flows[idx]['flow_type']:
248 flows.pop(idx)
249 self.update_flow_info_to_kv_store(pon_intf, onu_id, uni_id, flow_id, flows)
250 if len(flows) > 0:
251 # There are still flows referencing the same flow_id.
252 # So the flow should not be freed yet.
253 # For ex: Case of HSIA where same flow is shared
254 # between DS and US.
255 return
256
257 self.resource_mgr.free_flow_id_for_uni(pon_intf, onu_id, uni_id, flow_id)
258 else:
259 self.log.error("invalid-info", uni_port_no=uni_port_no,
260 child_device_id=child_device_id)
261
262 def retry_add_flow(self, flow):
263 self.log.debug("retry-add-flow")
264 if flow.id in self.retry_add_flow_list:
265 self.retry_add_flow_list.remove(flow.id)
266 self.add_flow(flow)
267
268 def remove_flow(self, flow):
269 self.log.debug('trying to remove flows from logical flow :',
270 logical_flow=flow)
271 device_flows_to_remove = []
272 device_flows = self.flows_proxy.get('/').items
273 for f in device_flows:
274 if f.cookie == flow.id:
275 device_flows_to_remove.append(f)
276
277 for f in device_flows_to_remove:
278 (id, direction) = self.decode_stored_id(f.id)
279 flow_to_remove = openolt_pb2.Flow(flow_id=id, flow_type=direction)
280 try:
281 self.stub.FlowRemove(flow_to_remove)
282 except grpc.RpcError as grpc_e:
283 if grpc_e.code() == grpc.StatusCode.NOT_FOUND:
284 self.log.debug('This flow does not exist on the switch, '
285 'normal after an OLT reboot',
286 flow=flow_to_remove)
287 else:
288 raise grpc_e
289
290 # once we have successfully deleted the flow on the device
291 # release the flow_id on resource pool and also clear any
292 # data associated with the flow_id on KV store.
293 self._clear_flow_id_from_rm(f, id, direction)
294 self.log.debug('flow removed from device', flow=f,
295 flow_key=flow_to_remove)
296
297 if len(device_flows_to_remove) > 0:
298 new_flows = []
299 flows_ids_to_remove = [f.id for f in device_flows_to_remove]
300 for f in device_flows:
301 if f.id not in flows_ids_to_remove:
302 new_flows.append(f)
303
304 self.flows_proxy.update('/', Flows(items=new_flows))
305 self.log.debug('flows removed from the data store',
306 flow_ids_removed=flows_ids_to_remove,
307 number_of_flows_removed=(len(device_flows) - len(
308 new_flows)), expected_flows_removed=len(
309 device_flows_to_remove))
310 else:
311 self.log.debug('no device flow to remove for this flow (normal '
312 'for multi table flows)', flow=flow)
313
Matt Jeanneret9dbce8c2019-03-23 14:35:00 -0400314 def get_tp_path(self, intf_id, uni):
William Kurkian6f436d02019-02-06 16:25:01 -0500315 # FIXME Should get Table id form the flow, as of now hardcoded to
316 # DEFAULT_TECH_PROFILE_TABLE_ID (64)
317 # 'tp_path' contains the suffix part of the tech_profile_instance path.
318 # The prefix to the 'tp_path' should be set to \
319 # TechProfile.KV_STORE_TECH_PROFILE_PATH_PREFIX by the ONU adapter.
320 return self.tech_profile[intf_id]. \
321 get_tp_path(DEFAULT_TECH_PROFILE_TABLE_ID,
Matt Jeanneret9dbce8c2019-03-23 14:35:00 -0400322 uni)
William Kurkian6f436d02019-02-06 16:25:01 -0500323
324 def delete_tech_profile_instance(self, intf_id, onu_id, uni_id):
325 # Remove the TP instance associated with the ONU
326 ofp_port_name = self._get_ofp_port_name(intf_id, onu_id, uni_id)
327 tp_path = self.get_tp_path(intf_id, ofp_port_name)
328 return self.tech_profile[intf_id].delete_tech_profile_instance(tp_path)
329
Matt Jeanneret9dbce8c2019-03-23 14:35:00 -0400330 @inlineCallbacks
William Kurkian6f436d02019-02-06 16:25:01 -0500331 def divide_and_add_flow(self, intf_id, onu_id, uni_id, port_no, classifier,
332 action, flow):
333
334 self.log.debug('sorting flow', intf_id=intf_id, onu_id=onu_id, uni_id=uni_id, port_no=port_no,
335 classifier=classifier, action=action)
336
Matt Jeanneret9dbce8c2019-03-23 14:35:00 -0400337 uni = self.get_uni_port_path(intf_id, onu_id, uni_id)
338
William Kurkian6f436d02019-02-06 16:25:01 -0500339 alloc_id, gem_ports = self.create_tcont_gemport(intf_id, onu_id, uni_id,
Matt Jeanneret9dbce8c2019-03-23 14:35:00 -0400340 uni, port_no, flow.table_id)
William Kurkian6f436d02019-02-06 16:25:01 -0500341 if alloc_id is None or gem_ports is None:
342 self.log.error("alloc-id-gem-ports-unavailable", alloc_id=alloc_id,
343 gem_ports=gem_ports)
344 return
345
346 self.log.debug('Generated required alloc and gemport ids',
347 alloc_id=alloc_id, gemports=gem_ports)
348
349 # Flows can't be added specific to gemport unless p-bits are received.
350 # Hence adding flows for all gemports
351 for gemport_id in gem_ports:
352 if IP_PROTO in classifier:
353 if classifier[IP_PROTO] == 17:
354 self.log.debug('dhcp flow add')
355 self.add_dhcp_trap(intf_id, onu_id, uni_id, port_no, classifier,
356 action, flow, alloc_id, gemport_id)
357 elif classifier[IP_PROTO] == 2:
358 self.log.warn('igmp flow add ignored, not implemented yet')
359 else:
360 self.log.warn("Invalid-Classifier-to-handle",
361 classifier=classifier,
362 action=action)
363 elif ETH_TYPE in classifier:
364 if classifier[ETH_TYPE] == EAP_ETH_TYPE:
365 self.log.debug('eapol flow add')
366 self.add_eapol_flow(intf_id, onu_id, uni_id, port_no, flow, alloc_id,
367 gemport_id)
Matt Jeanneretdc28e8d2019-04-12 19:25:00 -0400368
369 # TODO NEW CORE: Skip trying to add subsequent eap capture for subscriber vlan
370 # (later attempts at re-eap)
371 #vlan_id = self.get_subscriber_vlan(fd.get_in_port(flow))
372 #if vlan_id is not None:
373 # self.add_eapol_flow(
374 # intf_id, onu_id, uni_id, port_no, flow, alloc_id, gemport_id,
375 # vlan_id=vlan_id)
376 parent_port_no = self.platform.intf_id_to_port_no(intf_id, Port.PON_OLT)
Matt Jeanneretaa360912019-04-22 16:23:12 -0400377
378 self.log.debug('get-child-device', intf_id=intf_id, onu_id=onu_id,
379 parent_port_no=parent_port_no, device_id=self.device_id)
380
Matt Jeanneret9dbce8c2019-03-23 14:35:00 -0400381 onu_device = yield self.core_proxy.get_child_device(self.device_id,
Matt Jeanneretaa360912019-04-22 16:23:12 -0400382 onu_id=int(onu_id),
383 parent_port_no=int(parent_port_no))
Matt Jeanneret9dbce8c2019-03-23 14:35:00 -0400384 tp_path = self.get_tp_path(intf_id, uni)
William Kurkian6f436d02019-02-06 16:25:01 -0500385
Matt Jeanneretdc28e8d2019-04-12 19:25:00 -0400386 tech_msg = InterAdapterTechProfileDownloadMessage(uni_id=uni_id, path=tp_path)
William Kurkian6f436d02019-02-06 16:25:01 -0500387
Matt Jeanneretdc28e8d2019-04-12 19:25:00 -0400388 self.log.debug('Load-tech-profile-request-to-brcm-handler',
389 onu_device=onu_device, tp_path=tp_path, tech_msg=tech_msg)
390
Matt Jeanneret9dbce8c2019-03-23 14:35:00 -0400391 # Send the tech profile event to the onu adapter
Matt Jeanneretdc28e8d2019-04-12 19:25:00 -0400392 yield self.adapter_proxy.send_inter_adapter_message(
393 msg=tech_msg,
394 type=InterAdapterMessageType.TECH_PROFILE_DOWNLOAD_REQUEST,
395 from_adapter="openolt",
396 to_adapter=onu_device.type,
397 to_device_id=onu_device.id
398 )
William Kurkian6f436d02019-02-06 16:25:01 -0500399
400 if classifier[ETH_TYPE] == LLDP_ETH_TYPE:
401 self.log.debug('lldp flow add')
Matt Jeanneret9dbce8c2019-03-23 14:35:00 -0400402 nni_intf_id = yield self.get_nni_intf_id()
William Kurkian6f436d02019-02-06 16:25:01 -0500403 self.add_lldp_flow(flow, port_no, nni_intf_id)
404
405 elif PUSH_VLAN in action:
406 self.add_upstream_data_flow(intf_id, onu_id, uni_id, port_no, classifier,
407 action, flow, alloc_id, gemport_id)
408 elif POP_VLAN in action:
409 self.add_downstream_data_flow(intf_id, onu_id, uni_id, port_no, classifier,
410 action, flow, alloc_id, gemport_id)
411 else:
412 self.log.debug('Invalid-flow-type-to-handle',
413 classifier=classifier,
414 action=action, flow=flow)
415
Matt Jeanneret9dbce8c2019-03-23 14:35:00 -0400416 def get_uni_port_path(self, intf_id, onu_id, uni_id):
417 value = 'pon-{}/onu-{}/uni-{}'.format(intf_id, onu_id, uni_id)
418 return value
419
420 def create_tcont_gemport(self, intf_id, onu_id, uni_id, uni, port_no, table_id):
William Kurkian6f436d02019-02-06 16:25:01 -0500421 alloc_id, gem_port_ids = None, None
422 pon_intf_onu_id = (intf_id, onu_id)
423
424 # If we already have allocated alloc_id and gem_ports earlier, render them
425 alloc_id = \
426 self.resource_mgr.get_current_alloc_ids_for_onu(pon_intf_onu_id)
427 gem_port_ids = \
428 self.resource_mgr.get_current_gemport_ids_for_onu(pon_intf_onu_id)
429 if alloc_id is not None and gem_port_ids is not None:
430 return alloc_id, gem_port_ids
431
432 try:
William Kurkian6f436d02019-02-06 16:25:01 -0500433 # FIXME: If table id is <= 63 using 64 as table id
434 if table_id < DEFAULT_TECH_PROFILE_TABLE_ID:
435 table_id = DEFAULT_TECH_PROFILE_TABLE_ID
436
437 # Check tech profile instance already exists for derived port name
438 tech_profile_instance = self.tech_profile[intf_id]. \
Matt Jeanneret9dbce8c2019-03-23 14:35:00 -0400439 get_tech_profile_instance(table_id, uni)
William Kurkian6f436d02019-02-06 16:25:01 -0500440 self.log.debug('Get-tech-profile-instance-status', tech_profile_instance=tech_profile_instance)
441
442 if tech_profile_instance is None:
443 # create tech profile instance
444 tech_profile_instance = self.tech_profile[intf_id]. \
Matt Jeanneret9dbce8c2019-03-23 14:35:00 -0400445 create_tech_profile_instance(table_id, uni,
William Kurkian6f436d02019-02-06 16:25:01 -0500446 intf_id)
447 if tech_profile_instance is None:
448 raise Exception('Tech-profile-instance-creation-failed')
449 else:
450 self.log.debug(
451 'Tech-profile-instance-already-exist-for-given port-name',
Matt Jeanneret9dbce8c2019-03-23 14:35:00 -0400452 table_id=table_id, intf_id=intf_id, uni=uni)
William Kurkian6f436d02019-02-06 16:25:01 -0500453
454 # upstream scheduler
455 us_scheduler = self.tech_profile[intf_id].get_us_scheduler(
456 tech_profile_instance)
457 # downstream scheduler
458 ds_scheduler = self.tech_profile[intf_id].get_ds_scheduler(
459 tech_profile_instance)
460 # create Tcont
461 tconts = self.tech_profile[intf_id].get_tconts(tech_profile_instance,
462 us_scheduler,
463 ds_scheduler)
464
465 self.stub.CreateTconts(openolt_pb2.Tconts(intf_id=intf_id,
466 onu_id=onu_id,
467 uni_id=uni_id,
Matt Jeanneret9dbce8c2019-03-23 14:35:00 -0400468 port_no=port_no,
William Kurkian6f436d02019-02-06 16:25:01 -0500469 tconts=tconts))
470
471 # Fetch alloc id and gemports from tech profile instance
472 alloc_id = tech_profile_instance.us_scheduler.alloc_id
473 gem_port_ids = []
474 for i in range(len(
475 tech_profile_instance.upstream_gem_port_attribute_list)):
476 gem_port_ids.append(
477 tech_profile_instance.upstream_gem_port_attribute_list[i].
478 gemport_id)
479 except BaseException as e:
480 self.log.exception(exception=e)
481
482 # Update the allocated alloc_id and gem_port_id for the ONU/UNI to KV store
483 pon_intf_onu_id = (intf_id, onu_id, uni_id)
484 self.resource_mgr.resource_mgrs[intf_id].update_alloc_ids_for_onu(
485 pon_intf_onu_id,
486 list([alloc_id])
487 )
488 self.resource_mgr.resource_mgrs[intf_id].update_gemport_ids_for_onu(
489 pon_intf_onu_id,
490 gem_port_ids
491 )
492
493 self.resource_mgr.update_gemports_ponport_to_onu_map_on_kv_store(
494 gem_port_ids, intf_id, onu_id, uni_id
495 )
496
497 return alloc_id, gem_port_ids
498
499 def add_upstream_data_flow(self, intf_id, onu_id, uni_id, port_no, uplink_classifier,
500 uplink_action, logical_flow, alloc_id,
501 gemport_id):
502
503 uplink_classifier[PACKET_TAG_TYPE] = SINGLE_TAG
504
505 self.add_hsia_flow(intf_id, onu_id, uni_id, port_no, uplink_classifier,
506 uplink_action, UPSTREAM,
507 logical_flow, alloc_id, gemport_id)
508
509 # Secondary EAP on the subscriber vlan
510 (eap_active, eap_logical_flow) = self.is_eap_enabled(intf_id, onu_id, uni_id)
511 if eap_active:
512 self.add_eapol_flow(intf_id, onu_id, uni_id, port_no, eap_logical_flow, alloc_id,
513 gemport_id, vlan_id=uplink_classifier[VLAN_VID])
514
515 def add_downstream_data_flow(self, intf_id, onu_id, uni_id, port_no, downlink_classifier,
516 downlink_action, flow, alloc_id, gemport_id):
517 downlink_classifier[PACKET_TAG_TYPE] = DOUBLE_TAG
518 # Needed ???? It should be already there
519 downlink_action[POP_VLAN] = True
520 downlink_action[VLAN_VID] = downlink_classifier[VLAN_VID]
521
522 self.add_hsia_flow(intf_id, onu_id, uni_id, port_no, downlink_classifier,
523 downlink_action, DOWNSTREAM,
524 flow, alloc_id, gemport_id)
525
Matt Jeanneret9dbce8c2019-03-23 14:35:00 -0400526 @inlineCallbacks
William Kurkian6f436d02019-02-06 16:25:01 -0500527 def add_hsia_flow(self, intf_id, onu_id, uni_id, port_no, classifier, action,
528 direction, logical_flow, alloc_id, gemport_id):
529
Matt Jeanneret9dbce8c2019-03-23 14:35:00 -0400530 self.log.debug('add hisa flow', flow=logical_flow, port_no=port_no,
531 intf_id=intf_id, onu_id=onu_id, uni_id=uni_id, gemport_id=gemport_id,
532 alloc_id=alloc_id)
533
William Kurkian6f436d02019-02-06 16:25:01 -0500534 flow_store_cookie = self._get_flow_store_cookie(classifier,
535 gemport_id)
536
Matt Jeanneret9dbce8c2019-03-23 14:35:00 -0400537 self.log.debug('flow-store-cookie-classifer-action', flow_store_cookie=flow_store_cookie, classifier=classifier,
538 action=action)
539
William Kurkian6f436d02019-02-06 16:25:01 -0500540 # One of the OLT platform (Broadcom BAL) requires that symmetric
541 # flows require the same flow_id to be used across UL and DL.
542 # Since HSIA flow is the only symmetric flow currently, we need to
543 # re-use the flow_id across both direction. The 'flow_category'
544 # takes priority over flow_cookie to find any available HSIA_FLOW
545 # id for the ONU.
546 flow_id = self.resource_mgr.get_flow_id(intf_id, onu_id, uni_id,
547 flow_store_cookie,
548 HSIA_FLOW)
549 if flow_id is None:
550 self.log.error("hsia-flow-unavailable")
551 return
Matt Jeanneret9dbce8c2019-03-23 14:35:00 -0400552
553 self.log.debug('flow-id', flow_id=flow_id)
554
555 network_intf_id = yield self.get_nni_intf_id()
556
William Kurkian6f436d02019-02-06 16:25:01 -0500557 flow = openolt_pb2.Flow(
558 access_intf_id=intf_id, onu_id=onu_id, uni_id=uni_id, flow_id=flow_id,
Matt Jeanneret9dbce8c2019-03-23 14:35:00 -0400559 flow_type=direction, alloc_id=alloc_id, network_intf_id=network_intf_id,
William Kurkian6f436d02019-02-06 16:25:01 -0500560 gemport_id=gemport_id,
561 classifier=self.mk_classifier(classifier),
562 action=self.mk_action(action),
563 priority=logical_flow.priority,
564 port_no=port_no,
565 cookie=logical_flow.cookie)
566
Matt Jeanneret9dbce8c2019-03-23 14:35:00 -0400567 self.log.debug('openolt-agent-flow', hsia_flow=flow)
568
William Kurkian6f436d02019-02-06 16:25:01 -0500569 if self.add_flow_to_device(flow, logical_flow):
Matt Jeanneret9dbce8c2019-03-23 14:35:00 -0400570 self.log.debug('added-hsia-openolt-agent-flow', hsia_flow=flow, logical_flow=logical_flow)
William Kurkian6f436d02019-02-06 16:25:01 -0500571 flow_info = self._get_flow_info_as_json_blob(flow,
572 flow_store_cookie,
573 HSIA_FLOW)
574 self.update_flow_info_to_kv_store(flow.access_intf_id,
575 flow.onu_id, flow.uni_id,
576 flow.flow_id, flow_info)
577
Matt Jeanneret9dbce8c2019-03-23 14:35:00 -0400578 @inlineCallbacks
William Kurkian6f436d02019-02-06 16:25:01 -0500579 def add_dhcp_trap(self, intf_id, onu_id, uni_id, port_no, classifier, action, logical_flow,
580 alloc_id, gemport_id):
581
582 self.log.debug('add dhcp upstream trap', classifier=classifier,
583 intf_id=intf_id, onu_id=onu_id, uni_id=uni_id, action=action)
584
585 action.clear()
586 action[TRAP_TO_HOST] = True
587 classifier[UDP_SRC] = 68
588 classifier[UDP_DST] = 67
589 classifier[PACKET_TAG_TYPE] = SINGLE_TAG
590 classifier.pop(VLAN_VID, None)
591
592 flow_store_cookie = self._get_flow_store_cookie(classifier,
593 gemport_id)
594
Matt Jeanneret9dbce8c2019-03-23 14:35:00 -0400595 self.log.debug('flow-store-cookie-classifer-action', flow_store_cookie=flow_store_cookie, classifier=classifier,
596 action=action)
597
William Kurkian6f436d02019-02-06 16:25:01 -0500598 flow_id = self.resource_mgr.get_flow_id(
599 intf_id, onu_id, uni_id, flow_store_cookie
600 )
Matt Jeanneret9dbce8c2019-03-23 14:35:00 -0400601
602 self.log.debug('flow-id', flow_id=flow_id)
603
604 network_intf_id = yield self.get_nni_intf_id()
605
William Kurkian6f436d02019-02-06 16:25:01 -0500606 dhcp_flow = openolt_pb2.Flow(
607 onu_id=onu_id, uni_id=uni_id, flow_id=flow_id, flow_type=UPSTREAM,
608 access_intf_id=intf_id, gemport_id=gemport_id,
Matt Jeanneret9dbce8c2019-03-23 14:35:00 -0400609 alloc_id=alloc_id, network_intf_id=network_intf_id,
William Kurkian6f436d02019-02-06 16:25:01 -0500610 priority=logical_flow.priority,
611 classifier=self.mk_classifier(classifier),
612 action=self.mk_action(action),
613 port_no=port_no,
614 cookie=logical_flow.cookie)
615
Matt Jeanneret9dbce8c2019-03-23 14:35:00 -0400616 self.log.debug('openolt-agent-flow', dhcp_flow=dhcp_flow)
617
William Kurkian6f436d02019-02-06 16:25:01 -0500618 if self.add_flow_to_device(dhcp_flow, logical_flow):
Matt Jeanneret9dbce8c2019-03-23 14:35:00 -0400619 self.log.debug('added-dhcp-openolt-agent-flow', dhcp_flow=dhcp_flow, logical_flow=logical_flow)
William Kurkian6f436d02019-02-06 16:25:01 -0500620 flow_info = self._get_flow_info_as_json_blob(dhcp_flow, flow_store_cookie)
621 self.update_flow_info_to_kv_store(dhcp_flow.access_intf_id,
622 dhcp_flow.onu_id,
623 dhcp_flow.uni_id,
624 dhcp_flow.flow_id,
625 flow_info)
626
Matt Jeanneret9dbce8c2019-03-23 14:35:00 -0400627 @inlineCallbacks
William Kurkian6f436d02019-02-06 16:25:01 -0500628 def add_eapol_flow(self, intf_id, onu_id, uni_id, port_no, logical_flow, alloc_id,
629 gemport_id, vlan_id=DEFAULT_MGMT_VLAN):
630
Matt Jeanneret9dbce8c2019-03-23 14:35:00 -0400631 self.log.debug('add eapol upstream trap', flow=logical_flow, port_no=port_no,
632 intf_id=intf_id, onu_id=onu_id, uni_id=uni_id, gemport_id=gemport_id,
633 alloc_id=alloc_id, vlan_id=vlan_id)
634
William Kurkian6f436d02019-02-06 16:25:01 -0500635 uplink_classifier = dict()
636 uplink_classifier[ETH_TYPE] = EAP_ETH_TYPE
637 uplink_classifier[PACKET_TAG_TYPE] = SINGLE_TAG
638 uplink_classifier[VLAN_VID] = vlan_id
639
640 uplink_action = dict()
641 uplink_action[TRAP_TO_HOST] = True
642
643 flow_store_cookie = self._get_flow_store_cookie(uplink_classifier,
644 gemport_id)
Matt Jeanneret9dbce8c2019-03-23 14:35:00 -0400645
646 self.log.debug('flow-store-cookie-classifier-action', flow_store_cookie=flow_store_cookie, uplink_classifier=uplink_classifier,
647 uplink_action=uplink_action)
648
William Kurkian6f436d02019-02-06 16:25:01 -0500649 # Add Upstream EAPOL Flow.
650 uplink_flow_id = self.resource_mgr.get_flow_id(
651 intf_id, onu_id, uni_id, flow_store_cookie
652 )
653
Matt Jeanneret9dbce8c2019-03-23 14:35:00 -0400654 self.log.debug('flow-id', uplink_flow_id=uplink_flow_id)
655
656 network_intf_id = yield self.get_nni_intf_id()
657
William Kurkian6f436d02019-02-06 16:25:01 -0500658 upstream_flow = openolt_pb2.Flow(
659 access_intf_id=intf_id, onu_id=onu_id, uni_id=uni_id, flow_id=uplink_flow_id,
Matt Jeanneret9dbce8c2019-03-23 14:35:00 -0400660 flow_type=UPSTREAM, alloc_id=alloc_id, network_intf_id=network_intf_id,
William Kurkian6f436d02019-02-06 16:25:01 -0500661 gemport_id=gemport_id,
662 classifier=self.mk_classifier(uplink_classifier),
663 action=self.mk_action(uplink_action),
664 priority=logical_flow.priority,
665 port_no=port_no,
666 cookie=logical_flow.cookie)
667
Matt Jeanneret9dbce8c2019-03-23 14:35:00 -0400668 self.log.debug('openolt-agent-flow', upstream_flow=upstream_flow)
669
William Kurkian6f436d02019-02-06 16:25:01 -0500670 logical_flow = copy.deepcopy(logical_flow)
671 logical_flow.match.oxm_fields.extend(fd.mk_oxm_fields([fd.vlan_vid(
672 vlan_id | 0x1000)]))
673 logical_flow.match.type = OFPMT_OXM
674
675 if self.add_flow_to_device(upstream_flow, logical_flow):
Matt Jeanneret9dbce8c2019-03-23 14:35:00 -0400676 self.log.debug('added-eapol-openolt-agent-flow', upstream_flow=upstream_flow, logical_flow=logical_flow)
William Kurkian6f436d02019-02-06 16:25:01 -0500677 flow_info = self._get_flow_info_as_json_blob(upstream_flow,
678 flow_store_cookie)
679 self.update_flow_info_to_kv_store(upstream_flow.access_intf_id,
680 upstream_flow.onu_id,
681 upstream_flow.uni_id,
682 upstream_flow.flow_id,
683 flow_info)
684
685 if vlan_id == DEFAULT_MGMT_VLAN:
686 # Add Downstream EAPOL Flow, Only for first EAP flow (BAL
687 # requirement)
688 # On one of the platforms (Broadcom BAL), when same DL classifier
689 # vlan was used across multiple ONUs, eapol flow re-adds after
690 # flow delete (cases of onu reboot/disable) fails.
691 # In order to generate unique vlan, a combination of intf_id
692 # onu_id and uni_id is used.
693 # uni_id defaults to 0, so add 1 to it.
694 special_vlan_downstream_flow = 4090 - intf_id * onu_id * (uni_id+1)
695 # Assert that we do not generate invalid vlans under no condition
Matt Jeanneretaa360912019-04-22 16:23:12 -0400696 assert (special_vlan_downstream_flow >= 2), 'invalid-vlan-generated'
697 self.log.warn('generating-special-downstream-vlan-for-bal', special_vlan_downstream_flow=special_vlan_downstream_flow)
William Kurkian6f436d02019-02-06 16:25:01 -0500698
699 downlink_classifier = dict()
700 downlink_classifier[PACKET_TAG_TYPE] = SINGLE_TAG
701 downlink_classifier[VLAN_VID] = special_vlan_downstream_flow
702
703 downlink_action = dict()
704 downlink_action[PUSH_VLAN] = True
705 downlink_action[VLAN_VID] = vlan_id
706
707
708 flow_store_cookie = self._get_flow_store_cookie(downlink_classifier,
709 gemport_id)
710
Matt Jeanneret9dbce8c2019-03-23 14:35:00 -0400711 self.log.debug('flow-store-cookie-classifier-action', flow_store_cookie=flow_store_cookie, downlink_classifier=downlink_classifier,
712 downlink_action=downlink_action)
713
William Kurkian6f436d02019-02-06 16:25:01 -0500714 downlink_flow_id = self.resource_mgr.get_flow_id(
715 intf_id, onu_id, uni_id, flow_store_cookie
716 )
717
Matt Jeanneret9dbce8c2019-03-23 14:35:00 -0400718 self.log.debug('flow-id', downlink_flow_id=downlink_flow_id)
719
William Kurkian6f436d02019-02-06 16:25:01 -0500720 downstream_flow = openolt_pb2.Flow(
721 access_intf_id=intf_id, onu_id=onu_id, uni_id=uni_id, flow_id=downlink_flow_id,
Matt Jeanneret9dbce8c2019-03-23 14:35:00 -0400722 flow_type=DOWNSTREAM, alloc_id=alloc_id, network_intf_id=network_intf_id,
William Kurkian6f436d02019-02-06 16:25:01 -0500723 gemport_id=gemport_id,
724 classifier=self.mk_classifier(downlink_classifier),
725 action=self.mk_action(downlink_action),
726 priority=logical_flow.priority,
727 port_no=port_no,
728 cookie=logical_flow.cookie)
729
Matt Jeanneret9dbce8c2019-03-23 14:35:00 -0400730 self.log.debug('openolt-agent-flow', downstream_flow=downstream_flow)
731
Matt Jeanneretaa360912019-04-22 16:23:12 -0400732 try:
733 downstream_logical_flow = ofp_flow_stats(
734 id=logical_flow.id, cookie=logical_flow.cookie,
735 table_id=logical_flow.table_id, priority=logical_flow.priority,
736 flags=logical_flow.flags)
William Kurkian6f436d02019-02-06 16:25:01 -0500737
Matt Jeanneretaa360912019-04-22 16:23:12 -0400738 downstream_logical_flow.match.oxm_fields.extend(fd.mk_oxm_fields([
739 fd.in_port(fd.get_out_port(logical_flow)),
740 fd.vlan_vid(special_vlan_downstream_flow | 0x1000)]))
741 downstream_logical_flow.match.type = OFPMT_OXM
William Kurkian6f436d02019-02-06 16:25:01 -0500742
Matt Jeanneretaa360912019-04-22 16:23:12 -0400743 downstream_logical_flow.instructions.extend(
744 fd.mk_instructions_from_actions([fd.output(
745 self.platform.mk_uni_port_num(intf_id, onu_id, uni_id))]))
746 except Exception as e:
747 self.log.exception("unexpected-error-building-downstream-logical-flow", intf_id=intf_id, onu_id=onu_id,
748 uni_id=uni_id, e=e, downstream_flow=downstream_flow)
William Kurkian6f436d02019-02-06 16:25:01 -0500749
750 if self.add_flow_to_device(downstream_flow, downstream_logical_flow):
Matt Jeanneret9dbce8c2019-03-23 14:35:00 -0400751 self.log.debug('added-eapol-openolt-agent-flow', downstream_flow=downstream_flow,
752 downstream_logical_flow=downstream_logical_flow)
William Kurkian6f436d02019-02-06 16:25:01 -0500753 flow_info = self._get_flow_info_as_json_blob(downstream_flow,
754 flow_store_cookie)
755 self.update_flow_info_to_kv_store(downstream_flow.access_intf_id,
756 downstream_flow.onu_id,
757 downstream_flow.uni_id,
758 downstream_flow.flow_id,
759 flow_info)
760
761 def repush_all_different_flows(self):
762 # Check if the device is supposed to have flows, if so add them
763 # Recover static flows after a reboot
764 logical_flows = self.logical_flows_proxy.get('/').items
765 devices_flows = self.flows_proxy.get('/').items
766 logical_flows_ids_provisioned = [f.cookie for f in devices_flows]
767 for logical_flow in logical_flows:
768 try:
769 if logical_flow.id not in logical_flows_ids_provisioned:
770 self.add_flow(logical_flow)
771 except Exception as e:
772 self.log.exception('Problem reading this flow', e=e)
773
774 def reset_flows(self):
775 self.flows_proxy.update('/', Flows())
776
777 """ Add a downstream LLDP trap flow on the NNI interface
778 """
779
780 def add_lldp_flow(self, logical_flow, port_no, network_intf_id=0):
781
Matt Jeanneret9dbce8c2019-03-23 14:35:00 -0400782 self.log.debug('add lldp trap flow', flow=logical_flow, port_no=port_no,
783 network_intf_id=network_intf_id)
784
William Kurkian6f436d02019-02-06 16:25:01 -0500785 classifier = dict()
786 classifier[ETH_TYPE] = LLDP_ETH_TYPE
787 classifier[PACKET_TAG_TYPE] = UNTAGGED
788 action = dict()
789 action[TRAP_TO_HOST] = True
790
791 # LLDP flow is installed to trap LLDP packets on the NNI port.
792 # We manage flow_id resource pool on per PON port basis.
793 # Since this situation is tricky, as a hack, we pass the NNI port
794 # index (network_intf_id) as PON port Index for the flow_id resource
795 # pool. Also, there is no ONU Id available for trapping LLDP packets
796 # on NNI port, use onu_id as -1 (invalid)
797 # ****************** CAVEAT *******************
798 # This logic works if the NNI Port Id falls within the same valid
799 # range of PON Port Ids. If this doesn't work for some OLT Vendor
800 # we need to have a re-look at this.
801 # *********************************************
802 onu_id = -1
803 uni_id = -1
804 flow_store_cookie = self._get_flow_store_cookie(classifier)
805 flow_id = self.resource_mgr.get_flow_id(network_intf_id, onu_id, uni_id,
806 flow_store_cookie)
807
Matt Jeanneret9dbce8c2019-03-23 14:35:00 -0400808 self.log.debug('flow-store-cookie-classifier-action', flow_store_cookie=flow_store_cookie, classifier=classifier,
809 action=action)
810
William Kurkian6f436d02019-02-06 16:25:01 -0500811 downstream_flow = openolt_pb2.Flow(
812 access_intf_id=-1, # access_intf_id not required
813 onu_id=onu_id, # onu_id not required
814 uni_id=uni_id, # uni_id not used
815 flow_id=flow_id,
816 flow_type=DOWNSTREAM,
817 network_intf_id=network_intf_id,
818 gemport_id=-1, # gemport_id not required
819 classifier=self.mk_classifier(classifier),
820 action=self.mk_action(action),
821 priority=logical_flow.priority,
822 port_no=port_no,
823 cookie=logical_flow.cookie)
824
Matt Jeanneret9dbce8c2019-03-23 14:35:00 -0400825 self.log.debug('openolt-agent-flow', downstream_flow=downstream_flow)
826
William Kurkian6f436d02019-02-06 16:25:01 -0500827 if self.add_flow_to_device(downstream_flow, logical_flow):
Matt Jeanneret9dbce8c2019-03-23 14:35:00 -0400828 self.log.debug('added-lldp-openolt-agent-flow', downstream_flow=downstream_flow)
William Kurkian6f436d02019-02-06 16:25:01 -0500829 self.update_flow_info_to_kv_store(network_intf_id, onu_id, uni_id,
830 flow_id, downstream_flow)
831
832 def mk_classifier(self, classifier_info):
833
834 classifier = openolt_pb2.Classifier()
835
836 if ETH_TYPE in classifier_info:
837 classifier.eth_type = classifier_info[ETH_TYPE]
838 if IP_PROTO in classifier_info:
839 classifier.ip_proto = classifier_info[IP_PROTO]
840 if VLAN_VID in classifier_info:
841 classifier.o_vid = classifier_info[VLAN_VID]
842 if METADATA in classifier_info:
843 classifier.i_vid = classifier_info[METADATA]
844 if VLAN_PCP in classifier_info:
845 classifier.o_pbits = classifier_info[VLAN_PCP]
846 if UDP_SRC in classifier_info:
847 classifier.src_port = classifier_info[UDP_SRC]
848 if UDP_DST in classifier_info:
849 classifier.dst_port = classifier_info[UDP_DST]
850 if IPV4_DST in classifier_info:
851 classifier.dst_ip = classifier_info[IPV4_DST]
852 if IPV4_SRC in classifier_info:
853 classifier.src_ip = classifier_info[IPV4_SRC]
854 if PACKET_TAG_TYPE in classifier_info:
855 if classifier_info[PACKET_TAG_TYPE] == SINGLE_TAG:
856 classifier.pkt_tag_type = SINGLE_TAG
857 elif classifier_info[PACKET_TAG_TYPE] == DOUBLE_TAG:
858 classifier.pkt_tag_type = DOUBLE_TAG
859 elif classifier_info[PACKET_TAG_TYPE] == UNTAGGED:
860 classifier.pkt_tag_type = UNTAGGED
861 else:
862 classifier.pkt_tag_type = 'none'
863
864 return classifier
865
866 def mk_action(self, action_info):
867 action = openolt_pb2.Action()
868
869 if POP_VLAN in action_info:
870 action.o_vid = action_info[VLAN_VID]
871 action.cmd.remove_outer_tag = True
872 elif PUSH_VLAN in action_info:
873 action.o_vid = action_info[VLAN_VID]
874 action.cmd.add_outer_tag = True
875 elif TRAP_TO_HOST in action_info:
876 action.cmd.trap_to_host = True
877 else:
878 self.log.info('Invalid-action-field', action_info=action_info)
879 return
880 return action
881
882 def is_eap_enabled(self, intf_id, onu_id, uni_id):
883 flows = self.logical_flows_proxy.get('/').items
884
885 for flow in flows:
886 eap_flow = False
887 eap_intf_id = None
888 eap_onu_id = None
889 eap_uni_id = None
890 for field in fd.get_ofb_fields(flow):
891 if field.type == fd.ETH_TYPE:
892 if field.eth_type == EAP_ETH_TYPE:
893 eap_flow = True
894 if field.type == fd.IN_PORT:
895 eap_intf_id = self.platform.intf_id_from_uni_port_num(
896 field.port)
897 eap_onu_id = self.platform.onu_id_from_port_num(field.port)
898 eap_uni_id = self.platform.uni_id_from_port_num(field.port)
899
900 if eap_flow:
901 self.log.debug('eap flow detected', onu_id=onu_id, uni_id=uni_id,
902 intf_id=intf_id, eap_intf_id=eap_intf_id,
903 eap_onu_id=eap_onu_id,
904 eap_uni_id=eap_uni_id)
905 if eap_flow and intf_id == eap_intf_id and onu_id == eap_onu_id and uni_id == eap_uni_id:
906 return True, flow
907
908 return False, None
909
910 def get_subscriber_vlan(self, port):
911 self.log.debug('looking from subscriber flow for port', port=port)
912
913 flows = self.logical_flows_proxy.get('/').items
914 for flow in flows:
915 in_port = fd.get_in_port(flow)
916 out_port = fd.get_out_port(flow)
917 if in_port == port and out_port is not None and \
918 self.platform.intf_id_to_port_type_name(out_port) \
919 == Port.ETHERNET_NNI:
920 fields = fd.get_ofb_fields(flow)
921 self.log.debug('subscriber flow found', fields=fields)
922 for field in fields:
923 if field.type == OFPXMT_OFB_VLAN_VID:
924 self.log.debug('subscriber vlan found',
925 vlan_id=field.vlan_vid)
926 return field.vlan_vid & 0x0fff
927 self.log.debug('No subscriber flow found', port=port)
928 return None
929
930 def add_flow_to_device(self, flow, logical_flow):
931 self.log.debug('pushing flow to device', flow=flow)
932 try:
933 self.stub.FlowAdd(flow)
934 except grpc.RpcError as grpc_e:
935 if grpc_e.code() == grpc.StatusCode.ALREADY_EXISTS:
936 self.log.warn('flow already exists', e=grpc_e, flow=flow)
937 else:
938 self.log.error('failed to add flow',
939 logical_flow=logical_flow, flow=flow,
940 grpc_error=grpc_e)
941 return False
Matt Jeanneretaa360912019-04-22 16:23:12 -0400942 except Exception as f:
943 self.log.exception("unexpected-openolt-agent-error", flow=flow, logical_flow=logical_flow, f=f)
William Kurkian6f436d02019-02-06 16:25:01 -0500944 else:
Matt Jeanneret9dbce8c2019-03-23 14:35:00 -0400945 # TODO NEW CORE: Should not need. Core keeps track of logical flows. no need to keep track. verify, especially olt reboot!
946 # self.register_flow(logical_flow, flow)
William Kurkian6f436d02019-02-06 16:25:01 -0500947 return True
948
949 def update_flow_info_to_kv_store(self, intf_id, onu_id, uni_id, flow_id, flow):
Matt Jeanneret9dbce8c2019-03-23 14:35:00 -0400950 self.log.debug("update-flow-info", intf_id=intf_id, onu_id=onu_id, uni_id=uni_id, flow_id=flow_id, flow=flow)
William Kurkian6f436d02019-02-06 16:25:01 -0500951 self.resource_mgr.update_flow_id_info_for_uni(intf_id, onu_id, uni_id,
952 flow_id, flow)
953
954 def register_flow(self, logical_flow, device_flow):
955 self.log.debug('registering flow in device',
956 logical_flow=logical_flow, device_flow=device_flow)
957 stored_flow = copy.deepcopy(logical_flow)
958 stored_flow.id = self.generate_stored_id(device_flow.flow_id,
959 device_flow.flow_type)
960 self.log.debug('generated device flow id', id=stored_flow.id,
961 flow_id=device_flow.flow_id,
962 direction=device_flow.flow_type)
963 stored_flow.cookie = logical_flow.id
964 flows = self.flows_proxy.get('/')
965 flows.items.extend([stored_flow])
966 self.flows_proxy.update('/', flows)
967
968 def find_next_flow(self, flow):
969 table_id = fd.get_goto_table_id(flow)
970 metadata = 0
971 # Prior to ONOS 1.13.5, Metadata contained the UNI output port number. In
972 # 1.13.5 and later, the lower 32-bits is the output port number and the
973 # upper 32-bits is the inner-vid we are looking for. Use just the lower 32
974 # bits. Allows this code to work with pre- and post-1.13.5 ONOS OltPipeline
975
976 for field in fd.get_ofb_fields(flow):
977 if field.type == fd.METADATA:
978 metadata = field.table_metadata & 0xFFFFFFFF
979 if table_id is None:
980 return None
981 flows = self.logical_flows_proxy.get('/').items
982 next_flows = []
983 for f in flows:
984 if f.table_id == table_id:
985 # FIXME
986 if fd.get_in_port(f) == fd.get_in_port(flow) and \
987 fd.get_out_port(f) == metadata:
988 next_flows.append(f)
989
990 if len(next_flows) == 0:
991 self.log.warning('no next flow found, it may be a timing issue',
992 flow=flow, number_of_flows=len(flows))
993 if flow.id in self.retry_add_flow_list:
994 self.log.debug('flow is already in retry list', flow_id=flow.id)
995 else:
996 self.retry_add_flow_list.append(flow.id)
997 reactor.callLater(5, self.retry_add_flow, flow)
998 return None
999
1000 next_flows.sort(key=lambda f: f.priority, reverse=True)
1001
1002 return next_flows[0]
1003
1004 def update_children_flows(self, device_rules_map):
1005
1006 for device_id, (flows, groups) in device_rules_map.iteritems():
1007 if device_id != self.device_id:
1008 self.root_proxy.update('/devices/{}/flows'.format(device_id),
1009 Flows(items=flows.values()))
1010 self.root_proxy.update('/devices/{}/flow_groups'.format(
1011 device_id), FlowGroups(items=groups.values()))
1012
1013 def clear_flows_and_scheduler_for_logical_port(self, child_device, logical_port):
1014 ofp_port_name = logical_port.ofp_port.name
1015 port_no = logical_port.ofp_port.port_no
1016 pon_port = child_device.proxy_address.channel_id
1017 onu_id = child_device.proxy_address.onu_id
1018 uni_id = self.platform.uni_id_from_port_num(logical_port)
1019
1020 # TODO: The DEFAULT_TECH_PROFILE_ID is assumed. Right way to do,
1021 # is probably to maintain a list of Tech-profile table IDs associated
1022 # with the UNI logical_port. This way, when the logical port is deleted,
1023 # all the associated tech-profile configuration with the UNI logical_port
1024 # can be cleared.
1025 tech_profile_instance = self.tech_profile[pon_port]. \
1026 get_tech_profile_instance(
1027 DEFAULT_TECH_PROFILE_TABLE_ID,
1028 ofp_port_name)
1029 flow_ids = self.resource_mgr.get_current_flow_ids_for_uni(pon_port, onu_id, uni_id)
1030 self.log.debug("outstanding-flows-to-be-cleared", flow_ids=flow_ids)
1031 for flow_id in flow_ids:
1032 flow_infos = self.resource_mgr.get_flow_id_info(pon_port, onu_id, uni_id, flow_id)
1033 for flow_info in flow_infos:
1034 direction = flow_info['flow_type']
1035 flow_to_remove = openolt_pb2.Flow(flow_id=flow_id,
1036 flow_type=direction)
1037 try:
1038 self.stub.FlowRemove(flow_to_remove)
1039 except grpc.RpcError as grpc_e:
1040 if grpc_e.code() == grpc.StatusCode.NOT_FOUND:
1041 self.log.debug('This flow does not exist on the switch, '
1042 'normal after an OLT reboot',
1043 flow=flow_to_remove)
1044 else:
1045 raise grpc_e
1046
1047 self.resource_mgr.free_flow_id_for_uni(pon_port, onu_id, uni_id, flow_id)
1048
1049 try:
1050 tconts = self.tech_profile[pon_port].get_tconts(tech_profile_instance)
1051 self.stub.RemoveTconts(openolt_pb2.Tconts(intf_id=pon_port,
1052 onu_id=onu_id,
1053 uni_id=uni_id,
1054 port_no=port_no,
1055 tconts=tconts))
1056 except grpc.RpcError as grpc_e:
1057 self.log.error('error-removing-tcont-scheduler-queues',
1058 err=grpc_e)
1059
1060 def generate_stored_id(self, flow_id, direction):
1061 if direction == UPSTREAM:
1062 self.log.debug('upstream flow, shifting id')
1063 return 0x1 << 15 | flow_id
1064 elif direction == DOWNSTREAM:
1065 self.log.debug('downstream flow, not shifting id')
1066 return flow_id
1067 else:
1068 self.log.warn('Unrecognized direction', direction=direction)
1069 return flow_id
1070
1071 def decode_stored_id(self, id):
1072 if id >> 15 == 0x1:
1073 return id & 0x7fff, UPSTREAM
1074 else:
1075 return id, DOWNSTREAM
1076
1077 def _populate_tech_profile_per_pon_port(self):
1078 for arange in self.resource_mgr.device_info.ranges:
1079 for intf_id in arange.intf_ids:
1080 self.tech_profile[intf_id] = \
1081 self.resource_mgr.resource_mgrs[intf_id].tech_profile
1082
1083 # Make sure we have as many tech_profiles as there are pon ports on
1084 # the device
1085 assert len(self.tech_profile) == self.resource_mgr.device_info.pon_ports
1086
1087 def _get_flow_info_as_json_blob(self, flow, flow_store_cookie,
1088 flow_category=None):
1089 json_blob = MessageToDict(message=flow,
1090 preserving_proto_field_name=True)
1091 self.log.debug("flow-info", json_blob=json_blob)
1092 json_blob['flow_store_cookie'] = flow_store_cookie
1093 if flow_category is not None:
1094 json_blob['flow_category'] = flow_category
1095 flow_info = self.resource_mgr.get_flow_id_info(flow.access_intf_id,
1096 flow.onu_id, flow.uni_id, flow.flow_id)
1097
1098 if flow_info is None:
1099 flow_info = list()
1100 flow_info.append(json_blob)
1101 else:
1102 assert (isinstance(flow_info, list))
1103 flow_info.append(json_blob)
1104
1105 return flow_info
1106
1107 @staticmethod
1108 def _get_flow_store_cookie(classifier, gem_port=None):
1109 assert isinstance(classifier, dict)
1110 # We need unique flows per gem_port
1111 if gem_port is not None:
1112 to_hash = dumps(classifier, sort_keys=True) + str(gem_port)
1113 else:
1114 to_hash = dumps(classifier, sort_keys=True)
1115 return hashlib.md5(to_hash).hexdigest()[:12]
1116
Matt Jeanneret9dbce8c2019-03-23 14:35:00 -04001117 @inlineCallbacks
William Kurkian6f436d02019-02-06 16:25:01 -05001118 def get_nni_intf_id(self):
1119 if self.nni_intf_id is not None:
Matt Jeanneret9dbce8c2019-03-23 14:35:00 -04001120 returnValue(self.nni_intf_id)
William Kurkian6f436d02019-02-06 16:25:01 -05001121
Matt Jeanneret9dbce8c2019-03-23 14:35:00 -04001122 port_list = yield self.core_proxy.get_ports(self.device_id, Port.ETHERNET_NNI)
1123 self.log.debug("nni-ports-list", port_list=port_list)
1124
1125 # TODO: Hardcoded only first NNI
1126 port = port_list.items[0]
1127
1128 self.log.debug("nni-port", port=port)
1129 self.nni_intf_id = self.platform.intf_id_from_nni_port_num(port.port_no)
1130
1131 self.log.debug("nni-intf-d ", port=port.port_no, nni_intf_id=self.nni_intf_id)
1132 returnValue(self.nni_intf_id)